From d1c7b952b3c182fd4f1c91ff2240cec13225062e Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Sun, 14 Jan 2024 14:06:06 -0800 Subject: [PATCH 01/75] feat: Add TaggedSchema trait Adds a tag as a const field to a schema. --- homestar-runtime/src/api.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 homestar-runtime/src/api.rs diff --git a/homestar-runtime/src/api.rs b/homestar-runtime/src/api.rs new file mode 100644 index 00000000..da503e59 --- /dev/null +++ b/homestar-runtime/src/api.rs @@ -0,0 +1,16 @@ +use schemars::{ + gen::SchemaGenerator, + schema::{Schema, SchemaObject}, + JsonSchema, +}; +use serde_json::json; + +pub(crate) trait TaggedSchema { + fn tag() -> String; + + fn make_tag_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema: SchemaObject = ::json_schema(gen).into(); + schema.const_value = Some(json!(Self::tag())); + schema.into() + } +} From aa934683a1d52788c292c1924e822730ac1c8dc7 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 16 Jan 2024 14:13:56 -0800 Subject: [PATCH 02/75] feat: Add OpenRPC document generator --- homestar-runtime/schemas/openrpc/document.rs | 641 +++++++++++++++++++ homestar-runtime/schemas/openrpc/mod.rs | 1 + 2 files changed, 642 insertions(+) create mode 100644 homestar-runtime/schemas/openrpc/document.rs create mode 100644 homestar-runtime/schemas/openrpc/mod.rs diff --git a/homestar-runtime/schemas/openrpc/document.rs b/homestar-runtime/schemas/openrpc/document.rs new file mode 100644 index 00000000..db447c9f --- /dev/null +++ b/homestar-runtime/schemas/openrpc/document.rs @@ -0,0 +1,641 @@ +#![allow(dead_code)] + +//! OpenRPC API document generator +//! +//! OpenRPC spec: https://github.com/open-rpc/spec +//! Module adapted from: https://github.com/austbot/rust-open-rpc-macros/tree/master/open-rpc-schema + +use schemars::{gen::SchemaSettings, schema::RootSchema, JsonSchema}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +extern crate serde_json; + +#[derive(Serialize, Deserialize, Clone)] +pub enum Openrpc { + #[serde(rename = "1.2.6")] + V26, + #[serde(rename = "1.2.5")] + V25, + #[serde(rename = "1.2.4")] + V24, + #[serde(rename = "1.2.3")] + V23, + #[serde(rename = "1.2.2")] + V22, + #[serde(rename = "1.2.1")] + V21, + #[serde(rename = "1.2.0")] + V20, + #[serde(rename = "1.1.12")] + V112, + #[serde(rename = "1.1.11")] + V111, + #[serde(rename = "1.1.10")] + V110, + #[serde(rename = "1.1.9")] + V19, + #[serde(rename = "1.1.8")] + V18, + #[serde(rename = "1.1.7")] + V17, + #[serde(rename = "1.1.6")] + V16, + #[serde(rename = "1.1.5")] + V15, + #[serde(rename = "1.1.4")] + V14, + #[serde(rename = "1.1.3")] + V13, + #[serde(rename = "1.1.2")] + V12, + #[serde(rename = "1.1.1")] + V11, + #[serde(rename = "1.1.0")] + V10, + #[serde(rename = "1.0.0")] + V00, + #[serde(rename = "1.0.0-rc1")] + V00Rc1, + #[serde(rename = "1.0.0-rc0")] + V00Rc0, +} + +pub type InfoObjectProperties = String; +pub type InfoObjectDescription = String; +pub type InfoObjectTermsOfService = String; +pub type InfoObjectVersion = String; +pub type ContactObjectName = String; +pub type ContactObjectEmail = String; +pub type ContactObjectUrl = String; +pub type SpecificationExtension = serde_json::Value; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ContactObject { + pub name: Option, + pub email: Option, + pub url: Option, +} + +pub type LicenseObjectName = String; +pub type LicenseObjectUrl = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct LicenseObject { + pub name: Option, + pub url: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct InfoObject { + pub title: InfoObjectProperties, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(rename = "termsOfService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terms_of_service: Option, + pub version: InfoObjectVersion, + #[serde(skip_serializing_if = "Option::is_none")] + pub contact: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub license: Option, +} + +pub type ExternalDocumentationObjectDescription = String; +pub type ExternalDocumentationObjectUrl = String; + +/// ExternalDocumentationObject +/// +/// information about external documentation +/// +#[derive(Serialize, Deserialize, Clone)] +pub struct ExternalDocumentationObject { + pub description: Option, + pub url: ExternalDocumentationObjectUrl, +} + +pub type ServerObjectUrl = String; +pub type ServerObjectName = String; +pub type ServerObjectDescription = String; +pub type ServerObjectSummary = String; +pub type ServerObjectVariableDefault = String; +pub type ServerObjectVariableDescription = String; +pub type ServerObjectVariableEnumItem = String; +pub type ServerObjectVariableEnum = Vec; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ServerObjectVariable { + pub default: ServerObjectVariableDefault, + pub description: Option, + #[serde(rename = "enum")] + pub variable_enum: Option, +} + +pub type ServerObjectVariables = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ServerObject { + pub url: ServerObjectUrl, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub variables: Option, +} + +pub type Servers = Vec; +/// MethodObjectName +/// +/// The cannonical name for the method. The name MUST be unique within the methods array. +/// +pub type MethodObjectName = String; +/// MethodObjectDescription +/// +/// A verbose explanation of the method behavior. GitHub Flavored Markdown syntax MAY be used for rich text representation. +/// +pub type MethodObjectDescription = String; +/// MethodObjectSummary +/// +/// A short summary of what the method does. +/// +pub type MethodObjectSummary = String; +pub type TagObjectName = String; +pub type TagObjectDescription = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct TagObject { + pub name: TagObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(rename = "externalDocs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub external_docs: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct ReferenceObject { + #[serde(rename = "$ref")] + pub reference: String, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum TagOrReference { + TagObject(TagObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectTags = Vec; + +/// MethodObjectParamStructure +/// +/// Format the server expects the params. Defaults to 'either'. +/// +/// # Default +/// +/// either +/// +#[derive(Serialize, Deserialize, Clone)] +pub enum MethodObjectParamStructure { + #[serde(rename = "by-position")] + ByPosition, + #[serde(rename = "by-name")] + ByName, + #[serde(rename = "either")] + Either, +} + +pub type ContentDescriptorObjectName = String; +pub type ContentDescriptorObjectDescription = String; +pub type ContentDescriptorObjectSummary = String; +pub type Id = String; +pub type Schema = String; +pub type Comment = String; +pub type Title = String; +pub type Description = String; +type AlwaysTrue = serde_json::Value; +pub type ReadOnly = bool; +pub type Examples = Vec; +pub type MultipleOf = f64; +pub type Maximum = f64; +pub type ExclusiveMaximum = f64; +pub type Minimum = f64; +pub type ExclusiveMinimum = f64; +pub type NonNegativeInteger = i64; +pub type NonNegativeIntegerDefaultZero = i64; +pub type Pattern = String; +pub type SchemaArray = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum Items { + JSONSchema(JSONSchema), + SchemaArray(SchemaArray), +} + +pub type UniqueItems = bool; +pub type StringDoaGddGA = String; +/// StringArray +/// +/// # Default +/// +/// [] +/// +pub type StringArray = Vec; +/// Definitions +/// +/// # Default +/// +/// {} +/// +pub type Definitions = HashMap>; +/// Properties +/// +/// # Default +/// +/// {} +/// +pub type Properties = HashMap>; +/// PatternProperties +/// +/// # Default +/// +/// {} +/// +pub type PatternProperties = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum DependenciesSet { + JSONSchema(JSONSchema), + StringArray(StringArray), +} + +pub type Dependencies = HashMap>; +pub type Enum = Vec; +pub type SimpleTypes = serde_json::Value; +pub type ArrayOfSimpleTypes = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum Type { + SimpleTypes(SimpleTypes), + ArrayOfSimpleTypes(ArrayOfSimpleTypes), +} + +pub type Format = String; +pub type ContentMediaType = String; +pub type ContentEncoding = String; + +/// JSONSchemaBoolean +/// +/// Always valid if true. Never valid if false. Is constant. +/// +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum JSONSchema { + JsonSchemaObject(RootSchema), + JSONSchemaBoolean(bool), +} + +pub type ContentDescriptorObjectRequired = bool; +pub type ContentDescriptorObjectDeprecated = bool; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ContentDescriptorObject { + pub name: ContentDescriptorObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + pub schema: JSONSchema, + #[serde(skip_serializing_if = "Option::is_none")] + pub required: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub deprecated: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ContentDescriptorOrReference { + ContentDescriptorObject(ContentDescriptorObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectParams = Vec; + +/// ErrorObjectCode +/// +/// A Number that indicates the error type that occurred. This MUST be an integer. The error codes from and including -32768 to -32000 are reserved for pre-defined errors. These pre-defined errors SHOULD be assumed to be returned from any JSON-RPC api. +/// +pub type ErrorObjectCode = i64; +/// ErrorObjectMessage +/// +/// A String providing a short description of the error. The message SHOULD be limited to a concise single sentence. +/// +pub type ErrorObjectMessage = String; +/// ErrorObjectData +/// +/// A Primitive or Structured value that contains additional information about the error. This may be omitted. The value of this member is defined by the Server (e.g. detailed error information, nested errors etc.). +/// +pub type ErrorObjectData = serde_json::Value; + +/// ErrorObject +/// +/// Defines an application level error. +/// +#[derive(Serialize, Deserialize, Clone)] +pub struct ErrorObject { + pub code: ErrorObjectCode, + pub message: ErrorObjectMessage, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ErrorOrReference { + ErrorObject(ErrorObject), + ReferenceObject(ReferenceObject), +} + +/// MethodObjectErrors +/// +/// Defines an application level error. +/// +pub type MethodObjectErrors = Vec; +pub type LinkObjectName = String; +pub type LinkObjectSummary = String; +pub type LinkObjectMethod = String; +pub type LinkObjectDescription = String; +pub type LinkObjectParams = serde_json::Value; + +#[derive(Serialize, Deserialize, Clone)] +pub struct LinkObjectServer { + pub url: ServerObjectUrl, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub variables: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct LinkObject { + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub method: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub params: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub server: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum LinkOrReference { + LinkObject(LinkObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectLinks = Vec; +pub type ExamplePairingObjectName = String; +pub type ExamplePairingObjectDescription = String; +pub type ExampleObjectSummary = String; +pub type ExampleObjectValue = serde_json::Value; +pub type ExampleObjectDescription = String; +pub type ExampleObjectName = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ExampleObject { + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + pub value: ExampleObjectValue, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub name: ExampleObjectName, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExampleOrReference { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +pub type ExamplePairingObjectParams = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExamplePairingObjectResult { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct ExamplePairingObject { + pub name: ExamplePairingObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub params: ExamplePairingObjectParams, + pub result: ExamplePairingObjectResult, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExamplePairingOrReference { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectExamples = Vec; +pub type MethodObjectDeprecated = bool; + +#[derive(Serialize, Deserialize, Clone)] +pub struct MethodObject { + pub name: MethodObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub servers: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option, + #[serde(rename = "paramStructure")] + #[serde(skip_serializing_if = "Option::is_none")] + pub param_structure: Option, + pub params: MethodObjectParams, + pub result: ContentDescriptorOrReference, + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub links: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub examples: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub deprecated: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "externalDocs")] + pub external_docs: Option, +} + +pub type Methods = Vec; +pub type SchemaComponents = HashMap>; +pub type LinkComponents = HashMap>; +pub type ErrorComponents = HashMap>; +pub type ExampleComponents = HashMap>; +pub type ExamplePairingComponents = HashMap>; +pub type ContentDescriptorComponents = HashMap>; +pub type TagComponents = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Components { + #[serde(skip_serializing_if = "Option::is_none")] + pub schemas: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub links: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub examples: Option, + #[serde(rename = "examplePairings")] + pub example_pairings: Option, + #[serde(rename = "contentDescriptors")] + #[serde(skip_serializing_if = "Option::is_none")] + pub content_descriptors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct OpenrpcDocument { + pub openrpc: Openrpc, + pub info: InfoObject, + #[serde(rename = "externalDocs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub external_docs: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub servers: Option, + pub methods: Methods, + #[serde(skip_serializing_if = "Option::is_none")] + pub components: Option, +} + +impl Default for OpenrpcDocument { + fn default() -> Self { + OpenrpcDocument { + openrpc: Openrpc::V26, + info: InfoObject { + title: "".to_string(), + description: None, + terms_of_service: None, + version: "".to_string(), + contact: None, + license: None, + }, + external_docs: None, + servers: None, + methods: vec![], + components: None, + } + } +} + +impl OpenrpcDocument { + pub fn set_info(mut self, info: InfoObject) -> Self { + self.info = info; + self + } + pub fn add_object_method(&mut self, method: MethodObject) { + self.methods.push(method) + } +} + +impl ContentDescriptorOrReference { + pub fn new_content_descriptor( + name: ContactObjectName, + description: Option, + ) -> Self { + let mut setting = SchemaSettings::draft07(); + setting.inline_subschemas = true; + let schema = schemars::gen::SchemaGenerator::new(setting).into_root_schema_for::(); + let json_schema = JSONSchema::JsonSchemaObject(schema); + ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name, + description, + summary: None, + schema: json_schema, + required: None, + deprecated: None, + }) + } +} + +impl MethodObject { + pub fn new(name: MethodObjectName, description: Option) -> Self { + Self { + name, + description, + summary: None, + servers: None, + tags: None, + param_structure: None, + params: vec![], + result: ContentDescriptorOrReference::ReferenceObject(ReferenceObject { + reference: "".to_string(), + }), + errors: None, + links: None, + examples: None, + deprecated: None, + external_docs: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[derive(JsonSchema)] + pub struct MyType([u8; 8]); + + #[derive(JsonSchema)] + pub struct MyParam { + pub my_int: i32, + pub my_bool: bool, + pub my_type: Box, + } + + #[derive(JsonSchema)] + pub struct MyRet { + pub success: Box, + } + + #[test] + fn test_openrpc_document() { + let mut document = OpenrpcDocument::default(); + let mut method = MethodObject::new("method1".to_string(), None); + let param = ContentDescriptorOrReference::new_content_descriptor::( + "first_param".to_string(), + Some("no desc".to_string()), + ); + method.params.push(param); + method.result = + ContentDescriptorOrReference::new_content_descriptor::("ret".to_string(), None); + document.add_object_method(method); + let j = serde_json::to_string_pretty(&document).unwrap(); + println!("{}", j); + } +} diff --git a/homestar-runtime/schemas/openrpc/mod.rs b/homestar-runtime/schemas/openrpc/mod.rs new file mode 100644 index 00000000..ef6324cb --- /dev/null +++ b/homestar-runtime/schemas/openrpc/mod.rs @@ -0,0 +1 @@ +pub(crate) mod document; From 42e560c86f9d9bc6e4863443449cd3bf82106598 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 16 Jan 2024 15:31:48 -0800 Subject: [PATCH 03/75] feat: Add API and schema generator --- Cargo.lock | 36 +++++++++ homestar-runtime/Cargo.toml | 8 ++ homestar-runtime/schemas/docs/.gitkeep | 0 homestar-runtime/schemas/generate.rs | 78 +++++++++++++++++++ .../src/event_handler/notification/swarm.rs | 55 ++++++++++++- homestar-runtime/src/lib.rs | 3 + 6 files changed, 179 insertions(+), 1 deletion(-) create mode 100644 homestar-runtime/schemas/docs/.gitkeep create mode 100644 homestar-runtime/schemas/generate.rs diff --git a/Cargo.lock b/Cargo.lock index b40c7dff..8630e685 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2585,6 +2585,7 @@ dependencies = [ "reqwest", "retry", "rm_rf", + "schemars", "sec1", "semver", "serde", @@ -5869,6 +5870,30 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -5972,6 +5997,17 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_ipld_dagcbor" version = "0.4.2" diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index abc672b9..c06169e7 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -24,6 +24,13 @@ path = "src/main.rs" doc = false bench = false +[[bin]] +name = "schemas" +path = "schemas/generate.rs" +bench = false +doc = false +test = false + [[test]] name = "integration" path = "tests/main.rs" @@ -127,6 +134,7 @@ reqwest = { version = "0.11", default-features = false, features = [ "blocking", "json", ] } +schemars = "0.8.16" sec1 = { version = "0.7", default-features = false, features = ["pem"] } semver = { version = "1.0", default-features = false } serde = { workspace = true } diff --git a/homestar-runtime/schemas/docs/.gitkeep b/homestar-runtime/schemas/docs/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs new file mode 100644 index 00000000..e934aa20 --- /dev/null +++ b/homestar-runtime/schemas/generate.rs @@ -0,0 +1,78 @@ +//! Standalone binary to generate OpenRPC API docs and +//! JSON Schemas for method params and notifications. + +use homestar_runtime::NetworkNotification; +use schemars::{schema::RootSchema, schema_for}; +use std::{fs, io::Write}; + +mod openrpc; +use openrpc::document::{ + ContactObject, ContentDescriptorObject, ContentDescriptorOrReference, + ExternalDocumentationObject, InfoObject, JSONSchema, LicenseObject, MethodObject, + MethodObjectParamStructure, Openrpc, OpenrpcDocument, +}; + +// Generate docs with `cargo run --bin schemas` +fn main() { + let network_schema = schema_for!(NetworkNotification); + let _ = fs::File::create("schemas/docs/network.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); + + let api_doc = generate_api_doc(network_schema); + let _ = fs::File::create("schemas/docs/api.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); +} + +// Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md +fn generate_api_doc(network_schema: RootSchema) -> OpenrpcDocument { + let network: MethodObject = MethodObject { + name: "network".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "network".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(network_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + }; + + OpenrpcDocument { + openrpc: Openrpc::V26, // TODO Should we upgrade to latest spec at 1.3.2? + info: InfoObject { + title: "homestar".to_string(), + description: Some(env!("CARGO_PKG_DESCRIPTION").into()), + terms_of_service: None, + version: "0.10.0".to_string(), + contact: Some(ContactObject { + name: None, + url: Some(env!("CARGO_PKG_REPOSITORY").into()), + email: None, + }), + license: Some(LicenseObject { + name: Some(env!("CARGO_PKG_LICENSE").into()), + url: None, + }), + }, + external_docs: Some(ExternalDocumentationObject { + description: None, + url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), + }), + servers: None, + methods: vec![network], + components: None, + } +} diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 517fe3ba..2a0c8883 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -2,9 +2,16 @@ // // [swarm]: libp2p_swarm::Swarm +use crate::api::TaggedSchema; use anyhow::anyhow; +use chrono::prelude::Utc; +use homestar_core::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::{fmt, str::FromStr}; + +use std::{collections::BTreeMap, fmt, str::FromStr}; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -106,3 +113,49 @@ impl FromStr for SwarmNotification { } } } + +/// Network notification type. +#[derive(Clone, JsonSchema, Debug)] +#[schemars(rename = "network")] +pub enum NetworkNotification { + /// Connection established notification. + #[schemars(rename = "connectionEstablished")] + ConnnectionEstablished(ConnectionEstablished), + /// Connection closed notification. + #[schemars(rename = "connectionClosed")] + ConnnectionClosed(ConnectionClosed), +} + +#[derive(JsonSchema, Debug, Clone)] +#[schemars(rename = "connectionEstablished")] +pub struct ConnectionEstablished { + #[schemars(schema_with = "ConnectionEstablished::make_tag_schema")] + tag: String, + timestamp: i64, + #[schemars(rename = "peerId")] + peer_id: String, + address: String, +} + +impl TaggedSchema for ConnectionEstablished { + fn tag() -> String { + "network:connectionEstablished".to_string() + } +} + +#[derive(JsonSchema, Debug, Clone)] +#[schemars(rename = "connectionClosed")] +pub struct ConnectionClosed { + #[schemars(schema_with = "ConnectionClosed::make_tag_schema")] + tag: String, + timestamp: i64, + #[schemars(rename = "peerId")] + peer_id: String, + address: String, +} + +impl TaggedSchema for ConnectionClosed { + fn tag() -> String { + "network:connectionClosed".to_string() + } +} diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 6a431ea1..56f9c3b3 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -49,6 +49,7 @@ //! [tokio console]: https://github.com/tokio-rs/console/tree/main/tokio-console //! [Wasmtime]: https://github.com/bytecodealliance/wasmtime +mod api; pub mod channel; pub mod cli; pub mod daemon; @@ -68,10 +69,12 @@ pub mod test_utils; mod worker; pub mod workflow; +pub(crate) use api::TaggedSchema; pub use db::Db; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; +pub use event_handler::notification::swarm::NetworkNotification; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; From e07dca591640c51051ae15f3cf26999d9c6d8359 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 17 Jan 2024 18:50:56 -0800 Subject: [PATCH 04/75] feat: Add health schema --- homestar-runtime/schemas/generate.rs | 7 ++++++- homestar-runtime/src/db.rs | 5 +++-- homestar-runtime/src/db/utils.rs | 9 +++++++++ homestar-runtime/src/lib.rs | 3 +-- homestar-runtime/src/network/webserver/rpc.rs | 4 ++-- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index e934aa20..911a4c4b 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -1,7 +1,7 @@ //! Standalone binary to generate OpenRPC API docs and //! JSON Schemas for method params and notifications. -use homestar_runtime::NetworkNotification; +use homestar_runtime::{Health, NetworkNotification}; use schemars::{schema::RootSchema, schema_for}; use std::{fs, io::Write}; @@ -14,6 +14,11 @@ use openrpc::document::{ // Generate docs with `cargo run --bin schemas` fn main() { + let health_schema = schema_for!(Health); + let _ = fs::File::create("schemas/docs/health.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + let network_schema = schema_for!(NetworkNotification); let _ = fs::File::create("schemas/docs/network.json") .unwrap() diff --git a/homestar-runtime/src/db.rs b/homestar-runtime/src/db.rs index 428d03d2..4a8acf4e 100644 --- a/homestar-runtime/src/db.rs +++ b/homestar-runtime/src/db.rs @@ -1,6 +1,7 @@ //! (Default) sqlite database integration and setup. use crate::{ + db::utils::Health, settings, workflow::{self, StoredReceipt}, Receipt, @@ -123,9 +124,9 @@ pub trait Database: Send + Sync + Clone { } /// Check if the database is up. - fn health_check(conn: &mut Connection) -> Result<(), diesel::result::Error> { + fn health_check(conn: &mut Connection) -> Result { diesel::sql_query("SELECT 1").execute(conn)?; - Ok(()) + Ok(Health { healthy: true }) } /// Commit a receipt to the database, updating two tables diff --git a/homestar-runtime/src/db/utils.rs b/homestar-runtime/src/db/utils.rs index 08d76e36..c4cd6a25 100644 --- a/homestar-runtime/src/db/utils.rs +++ b/homestar-runtime/src/db/utils.rs @@ -1,6 +1,8 @@ //! Utility functions Database interaction. use chrono::NaiveDateTime; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; /// Trait for converting nanoseconds to a timestamp. pub(crate) trait Timestamp { @@ -14,3 +16,10 @@ impl Timestamp for i64 { NaiveDateTime::from_timestamp_opt(seconds, nanos as u32) } } + +/// Health status of the server and database connection. +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +pub struct Health { + /// Health status. + pub healthy: bool, +} diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 56f9c3b3..aef8eca2 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -69,8 +69,7 @@ pub mod test_utils; mod worker; pub mod workflow; -pub(crate) use api::TaggedSchema; -pub use db::Db; +pub use db::{utils::Health, Db}; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index c0d646d3..eac6cadc 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -155,8 +155,8 @@ where module.register_async_method(HEALTH_ENDPOINT, |_, ctx| async move { match ctx.db.conn() { Ok(mut conn) => { - if DB::health_check(&mut conn).is_ok() { - Ok(serde_json::json!({"healthy": true})) + if let Ok(health) = DB::health_check(&mut conn) { + Ok(serde_json::json!(health)) } else { Err(internal_err("database query is unreachable".to_string())) } From c2d1a217d1eb40b63112fa6c1d403bcb4c8fe288 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 18 Jan 2024 11:59:32 -0800 Subject: [PATCH 05/75] refactor: Remove internal tagging --- homestar-runtime/src/api.rs | 16 ---------------- .../src/event_handler/notification/swarm.rs | 16 ---------------- homestar-runtime/src/lib.rs | 1 - 3 files changed, 33 deletions(-) delete mode 100644 homestar-runtime/src/api.rs diff --git a/homestar-runtime/src/api.rs b/homestar-runtime/src/api.rs deleted file mode 100644 index da503e59..00000000 --- a/homestar-runtime/src/api.rs +++ /dev/null @@ -1,16 +0,0 @@ -use schemars::{ - gen::SchemaGenerator, - schema::{Schema, SchemaObject}, - JsonSchema, -}; -use serde_json::json; - -pub(crate) trait TaggedSchema { - fn tag() -> String; - - fn make_tag_schema(gen: &mut SchemaGenerator) -> Schema { - let mut schema: SchemaObject = ::json_schema(gen).into(); - schema.const_value = Some(json!(Self::tag())); - schema.into() - } -} diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 2a0c8883..6dce1d22 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -129,33 +129,17 @@ pub enum NetworkNotification { #[derive(JsonSchema, Debug, Clone)] #[schemars(rename = "connectionEstablished")] pub struct ConnectionEstablished { - #[schemars(schema_with = "ConnectionEstablished::make_tag_schema")] - tag: String, timestamp: i64, #[schemars(rename = "peerId")] peer_id: String, address: String, } -impl TaggedSchema for ConnectionEstablished { - fn tag() -> String { - "network:connectionEstablished".to_string() - } -} - #[derive(JsonSchema, Debug, Clone)] #[schemars(rename = "connectionClosed")] pub struct ConnectionClosed { - #[schemars(schema_with = "ConnectionClosed::make_tag_schema")] - tag: String, timestamp: i64, #[schemars(rename = "peerId")] peer_id: String, address: String, } - -impl TaggedSchema for ConnectionClosed { - fn tag() -> String { - "network:connectionClosed".to_string() - } -} diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index aef8eca2..b482e7d2 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -49,7 +49,6 @@ //! [tokio console]: https://github.com/tokio-rs/console/tree/main/tokio-console //! [Wasmtime]: https://github.com/bytecodealliance/wasmtime -mod api; pub mod channel; pub mod cli; pub mod daemon; From a648159f5aac0633c9ffbb7100dd8267ead2c5e5 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 18 Jan 2024 15:45:15 -0800 Subject: [PATCH 06/75] feat: Add initial network notification implementations --- homestar-runtime/src/db/utils.rs | 1 + .../src/event_handler/notification/swarm.rs | 218 +++++++++++++++++- 2 files changed, 211 insertions(+), 8 deletions(-) diff --git a/homestar-runtime/src/db/utils.rs b/homestar-runtime/src/db/utils.rs index c4cd6a25..ef2e7d4e 100644 --- a/homestar-runtime/src/db/utils.rs +++ b/homestar-runtime/src/db/utils.rs @@ -19,6 +19,7 @@ impl Timestamp for i64 { /// Health status of the server and database connection. #[derive(Debug, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "health")] pub struct Health { /// Health status. pub healthy: bool, diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 6dce1d22..8be9683c 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -2,7 +2,6 @@ // // [swarm]: libp2p_swarm::Swarm -use crate::api::TaggedSchema; use anyhow::anyhow; use chrono::prelude::Utc; use homestar_core::ipld::DagJson; @@ -10,9 +9,10 @@ use libipld::{serde::from_ipld, Ipld}; use libp2p::{Multiaddr, PeerId}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; - use std::{collections::BTreeMap, fmt, str::FromStr}; +const TIMESTAMP_KEY: &str = "timestamp"; + // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { @@ -119,27 +119,229 @@ impl FromStr for SwarmNotification { #[schemars(rename = "network")] pub enum NetworkNotification { /// Connection established notification. - #[schemars(rename = "connectionEstablished")] + #[schemars(rename = "connection_established")] ConnnectionEstablished(ConnectionEstablished), /// Connection closed notification. - #[schemars(rename = "connectionClosed")] + #[schemars(rename = "connection_closed")] ConnnectionClosed(ConnectionClosed), } +impl DagJson for NetworkNotification {} + +impl From for Ipld { + fn from(notification: NetworkNotification) -> Self { + match notification { + NetworkNotification::ConnnectionEstablished(n) => Ipld::Map(BTreeMap::from([( + "connection_established".into(), + n.into(), + )])), + NetworkNotification::ConnnectionClosed(n) => { + Ipld::Map(BTreeMap::from([("connection_closed".into(), n.into())])) + } + } + } +} + +impl TryFrom for NetworkNotification { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + if let Some((key, val)) = map.first_key_value() { + match key.as_str() { + "connection_established" => Ok(NetworkNotification::ConnnectionEstablished( + ConnectionEstablished::try_from(val.to_owned())?, + )), + "connection_closed" => Ok(NetworkNotification::ConnnectionClosed( + ConnectionClosed::try_from(val.to_owned())?, + )), + _ => Err(anyhow!("Unknown network notification tag type")), + } + } else { + Err(anyhow!("Network notification was an empty map")) + } + } +} + #[derive(JsonSchema, Debug, Clone)] -#[schemars(rename = "connectionEstablished")] +#[schemars(rename = "connection_established")] pub struct ConnectionEstablished { timestamp: i64, - #[schemars(rename = "peerId")] peer_id: String, address: String, } +impl ConnectionEstablished { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionEstablished { + ConnectionEstablished { + peer_id: peer_id.to_string(), + timestamp: Utc::now().timestamp_millis(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionEstablished {} + +impl From for Ipld { + fn from(notification: ConnectionEstablished) -> Self { + Ipld::Map(BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("peer_id".into(), notification.peer_id.into()), + ("address".into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionEstablished { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let peer_key: &str = "peer_id"; + let address_key: &str = "address"; + + let map = from_ipld::>(ipld)?; + + let peer_id = from_ipld( + map.get(peer_key) + .ok_or_else(|| anyhow!("missing {peer_key}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(address_key) + .ok_or_else(|| anyhow!("missing {address_key}"))? + .to_owned(), + )?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionEstablished { + timestamp, + peer_id, + address, + }) + } +} + #[derive(JsonSchema, Debug, Clone)] -#[schemars(rename = "connectionClosed")] +#[schemars(rename = "connection_closed")] pub struct ConnectionClosed { timestamp: i64, - #[schemars(rename = "peerId")] peer_id: String, address: String, } + +impl ConnectionClosed { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionClosed { + ConnectionClosed { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionClosed {} + +impl From for Ipld { + fn from(notification: ConnectionClosed) -> Self { + Ipld::Map(BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("peer_id".into(), notification.peer_id.into()), + ("address".into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionClosed { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let peer_key: &str = "peer_id"; + let address_key: &str = "address"; + + let map = from_ipld::>(ipld)?; + + let peer_id = from_ipld( + map.get(peer_key) + .ok_or_else(|| anyhow!("missing {peer_key}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(address_key) + .ok_or_else(|| anyhow!("missing {address_key}"))? + .to_owned(), + )?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionClosed { + timestamp, + peer_id, + address, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + use libp2p::PeerId; + + #[test] + fn notification_bytes_rountrip() { + let peer_id = PeerId::random(); + let address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(); + let inner = ConnectionEstablished::new(peer_id, address.clone()); + + let notification = NetworkNotification::ConnnectionEstablished(inner.clone()); + let bytes = notification.to_json().unwrap(); + let parsed = NetworkNotification::from_json(bytes.as_ref()).unwrap(); + + match parsed { + NetworkNotification::ConnnectionEstablished(n) => { + let parsed_peer_id = PeerId::from_str(&n.peer_id).unwrap(); + let parsed_address = Multiaddr::from_str(&n.address).unwrap(); + + assert_eq!(parsed_peer_id, peer_id); + assert_eq!(parsed_address, address); + assert_eq!(n.timestamp, inner.timestamp); + } + _ => panic!("Parsed notification did not matched expected variant"), + } + } + + #[test] + fn notification_json_string_rountrip() { + let peer_id = PeerId::random(); + let address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(); + let inner = ConnectionEstablished::new(peer_id, address.clone()); + + let notification = NetworkNotification::ConnnectionEstablished(inner.clone()); + let json_string = notification.to_json_string().unwrap(); + let parsed = NetworkNotification::from_json_string(json_string).unwrap(); + + match parsed { + NetworkNotification::ConnnectionEstablished(n) => { + let parsed_peer_id = PeerId::from_str(&n.peer_id).unwrap(); + let parsed_address = Multiaddr::from_str(&n.address).unwrap(); + + assert_eq!(parsed_peer_id, peer_id); + assert_eq!(parsed_address, address); + assert_eq!(n.timestamp, inner.timestamp); + } + _ => panic!("Parsed notification did not matched expected variant"), + } + } +} From 5eb7318bd36c8857803cce4362d5ca9fb90d608d Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 18 Jan 2024 16:13:51 -0800 Subject: [PATCH 07/75] feat: Add OpenRPC x-messages extension --- homestar-runtime/schemas/generate.rs | 10 +++++++++- homestar-runtime/schemas/openrpc/document.rs | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 911a4c4b..ebc74d6d 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -45,7 +45,7 @@ fn generate_api_doc(network_schema: RootSchema) -> OpenrpcDocument { summary: None, description: None, required: Some(true), - schema: JSONSchema::JsonSchemaObject(network_schema), + schema: JSONSchema::JsonSchemaObject(schema_for!(String)), deprecated: Some(false), }), external_docs: None, @@ -53,6 +53,14 @@ fn generate_api_doc(network_schema: RootSchema) -> OpenrpcDocument { links: None, examples: None, deprecated: Some(false), + x_messages: Some(ContentDescriptorObject { + name: "network subscription messages".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(network_schema), + deprecated: Some(false), + }), }; OpenrpcDocument { diff --git a/homestar-runtime/schemas/openrpc/document.rs b/homestar-runtime/schemas/openrpc/document.rs index db447c9f..d4585fc8 100644 --- a/homestar-runtime/schemas/openrpc/document.rs +++ b/homestar-runtime/schemas/openrpc/document.rs @@ -489,6 +489,9 @@ pub struct MethodObject { #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "externalDocs")] pub external_docs: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "x-messages")] + pub x_messages: Option, } pub type Methods = Vec; @@ -601,6 +604,7 @@ impl MethodObject { examples: None, deprecated: None, external_docs: None, + x_messages: None, } } } From 0e55222f3da2b56da6c3b87993dbb536061b8cff Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 18 Jan 2024 16:22:47 -0800 Subject: [PATCH 08/75] feat: Add health API method --- homestar-runtime/schemas/generate.rs | 30 +++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index ebc74d6d..6deeda31 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -24,14 +24,38 @@ fn main() { .unwrap() .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); - let api_doc = generate_api_doc(network_schema); + let api_doc = generate_api_doc(health_schema, network_schema); let _ = fs::File::create("schemas/docs/api.json") .unwrap() .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); } // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md -fn generate_api_doc(network_schema: RootSchema) -> OpenrpcDocument { +fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> OpenrpcDocument { + let health: MethodObject = MethodObject { + name: "health".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "health".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(health_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + let network: MethodObject = MethodObject { name: "network".to_string(), description: None, @@ -85,7 +109,7 @@ fn generate_api_doc(network_schema: RootSchema) -> OpenrpcDocument { url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), }), servers: None, - methods: vec![network], + methods: vec![health, network], components: None, } } From d0cb6040639996c6a2cb3535f274258421a2fd4b Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 23 Jan 2024 12:59:15 -0800 Subject: [PATCH 09/75] chore: Add IPLD schema generator --- Cargo.lock | 19 +++-- Cargo.toml | 4 + homestar-invocation/Cargo.toml | 2 + homestar-invocation/src/ipld/mod.rs | 1 + homestar-invocation/src/ipld/schema.rs | 102 +++++++++++++++++++++++++ 5 files changed, 123 insertions(+), 5 deletions(-) create mode 100644 homestar-invocation/src/ipld/schema.rs diff --git a/Cargo.lock b/Cargo.lock index 8630e685..f953a38c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -200,6 +200,12 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + [[package]] name = "arrayvec" version = "0.7.4" @@ -564,7 +570,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "constant_time_eq", ] @@ -575,7 +581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "constant_time_eq", ] @@ -586,7 +592,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq", @@ -2508,6 +2514,7 @@ dependencies = [ "libipld", "libsqlite3-sys", "rand", + "schemars", "serde", "serde_json", "signature", @@ -3704,7 +3711,7 @@ version = "0.45.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" dependencies = [ - "arrayvec", + "arrayvec 0.7.4", "asynchronous-codec 0.7.0", "bytes", "either", @@ -5662,7 +5669,7 @@ version = "1.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" dependencies = [ - "arrayvec", + "arrayvec 0.7.4", "num-traits", ] @@ -5876,10 +5883,12 @@ version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" dependencies = [ + "arrayvec 0.5.2", "dyn-clone", "schemars_derive", "serde", "serde_json", + "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 15f6dd0b..404451a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,11 @@ libsqlite3-sys = { version = "0.27", default-features = false, features = [ "bundled", ] } rand = { version = "0.8", default-features = false } +schemars = { version = "0.8.16", features = ["arrayvec", "url"] } serde = { version = "1.0", default-features = false, features = ["derive"] } +serde_json = { version = "1.0", default-features = false, features = [ + "raw_value", +] } serde_ipld_dagcbor = { version = "0.4", default-features = false, features = [ "std", ] } diff --git a/homestar-invocation/Cargo.toml b/homestar-invocation/Cargo.toml index 8c8b6bb6..04fda5de 100644 --- a/homestar-invocation/Cargo.toml +++ b/homestar-invocation/Cargo.toml @@ -36,7 +36,9 @@ homestar-workspace-hack = { workspace = true } libipld = { workspace = true } libsqlite3-sys = { workspace = true, optional = true } rand = { workspace = true } +schemars = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } signature = "2.2" thiserror = { workspace = true } tracing = { workspace = true } diff --git a/homestar-invocation/src/ipld/mod.rs b/homestar-invocation/src/ipld/mod.rs index d7868e8e..5a28bc09 100644 --- a/homestar-invocation/src/ipld/mod.rs +++ b/homestar-invocation/src/ipld/mod.rs @@ -3,6 +3,7 @@ mod dag_cbor; mod dag_json; mod link; +pub mod schema; pub use dag_cbor::*; pub use dag_json::*; diff --git a/homestar-invocation/src/ipld/schema.rs b/homestar-invocation/src/ipld/schema.rs new file mode 100644 index 00000000..4378d36e --- /dev/null +++ b/homestar-invocation/src/ipld/schema.rs @@ -0,0 +1,102 @@ +//! JSON Schema generation for DAG-JSON encoded Ipld. + +use libipld::Ipld; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{borrow::Cow, collections::BTreeMap}; + +/// Ipld stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldStub(Ipld); + +// The Ipld stub exists solely to implement a JSON Schema +// represenation of Ipld. Should libipld provide an implementation +// in the future, this can be removed. +impl JsonSchema for IpldStub { + fn schema_name() -> String { + "ipld".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::ipld::schema::IpldSchema") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = SchemaObject { + instance_type: None, + metadata: Some(Box::new(Metadata { + title: Some("Ipld".to_string()), + description: Some("DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let number_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Number.into())), + ..Default::default() + }; + let bytes_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + '/'.to_string(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "bytes".to_string(), + ::json_schema(gen), + )]), + ..Default::default() + })), + ..Default::default() + }), + )]), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some("Base64 encoded binary".to_string()), + ..Default::default() + })), + ..Default::default() + }; + let array_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Array.into())), + ..Default::default() + }; + let object_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + ..Default::default() + }; + let link_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([('/'.to_string(), ::json_schema(gen))]), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some("CID link that points to some IPLD data".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + schema.subschemas().one_of = Some(vec![ + <()>::json_schema(gen), + ::json_schema(gen), + Schema::Object(number_schema), + ::json_schema(gen), + Schema::Object(bytes_schema), + Schema::Object(array_schema), + Schema::Object(object_schema), + Schema::Object(link_schema), + ]); + + schema.into() + } +} From ef12a0b9de400c9ef076f0b15f93977cde68e246 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 23 Jan 2024 13:01:01 -0800 Subject: [PATCH 10/75] chore: Update import --- homestar-runtime/src/event_handler/notification/swarm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 8be9683c..ae9f8d91 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use chrono::prelude::Utc; -use homestar_core::ipld::DagJson; +use homestar_invocation::ipld::DagJson; use libipld::{serde::from_ipld, Ipld}; use libp2p::{Multiaddr, PeerId}; use schemars::JsonSchema; From 9b88f19ace278d62d37fa9212aed0cfd9f2c7f0a Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 23 Jan 2024 13:27:11 -0800 Subject: [PATCH 11/75] feat: Add workflow schema --- Cargo.lock | 1 + homestar-invocation/src/authority/prf.rs | 32 ++++ homestar-invocation/src/pointer.rs | 98 ++++++++++- homestar-invocation/src/task.rs | 11 +- homestar-invocation/src/task/config.rs | 9 +- homestar-invocation/src/task/instruction.rs | 152 +++++++++++++++++- .../src/task/instruction/nonce.rs | 31 +++- homestar-runtime/Cargo.toml | 6 +- homestar-runtime/schemas/generate.rs | 60 ++++++- homestar-workflow/Cargo.toml | 1 + homestar-workflow/src/workflow.rs | 4 +- 11 files changed, 389 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f953a38c..9cc7b202 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2675,6 +2675,7 @@ dependencies = [ "indexmap 2.1.0", "json", "libipld", + "schemars", "serde", "serde_json", "thiserror", diff --git a/homestar-invocation/src/authority/prf.rs b/homestar-invocation/src/authority/prf.rs index 2a95a226..202fded3 100644 --- a/homestar-invocation/src/authority/prf.rs +++ b/homestar-invocation/src/authority/prf.rs @@ -16,7 +16,13 @@ use diesel::{ #[cfg(feature = "diesel")] use libipld::{cbor::DagCborCodec, prelude::Codec}; use libipld::{serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{ArrayValidation, InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; use ucan::ipld::UcanIpld; /// Proof container, with links to UCANs for a particular [Task] or @@ -89,6 +95,32 @@ impl TryFrom<&Ipld> for UcanPrf { } } +impl JsonSchema for UcanPrf { + fn schema_name() -> String { + "prf".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::authority::prf::UcanPrf") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Vec(vec![InstanceType::Array.into()])), + array: Some(Box::new(ArrayValidation { + items: Some(gen.subschema_for::().into()), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some("CIDs referencing UCAN proofs".to_string()), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(feature = "diesel")] #[cfg_attr(docsrs, doc(cfg(feature = "diesel")))] impl ToSql for UcanPrf { diff --git a/homestar-invocation/src/pointer.rs b/homestar-invocation/src/pointer.rs index 20b9818c..6b54ed36 100644 --- a/homestar-invocation/src/pointer.rs +++ b/homestar-invocation/src/pointer.rs @@ -20,6 +20,11 @@ use diesel::{ }; use enum_assoc::Assoc; use libipld::{cid::Cid, serde::from_ipld, Ipld, Link}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; #[cfg(feature = "diesel")] use std::str::FromStr; @@ -74,6 +79,69 @@ impl fmt::Display for AwaitResult { } } +impl JsonSchema for AwaitResult { + fn schema_name() -> String { + "await_result".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::pointer::AwaitResult") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = SchemaObject { + instance_type: None, + metadata: Some(Box::new(Metadata { + title: Some("Await result".to_string()), + description: Some("Branches of a promise that is awaited".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let await_ok = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + OK_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + let await_err = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + ERR_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + let await_ptr = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + PTR_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + + schema.subschemas().one_of = Some(vec![ + Schema::Object(await_ok), + Schema::Object(await_err), + Schema::Object(await_ptr), + ]); + schema.into() + } +} + /// Describes the eventual output of the referenced [Instruction] as a /// [Pointer], either resolving to a tagged [OK_BRANCH], [ERR_BRANCH], or direct /// result of a [PTR_BRANCH]. @@ -131,7 +199,7 @@ impl TryFrom for Await { ensure!( map.len() == 1, Error::ConditionNotMet( - "await promise must jave only a single key ain a map".to_string() + "await promise must have only a single key in a map".to_string() ) ); @@ -278,6 +346,34 @@ where } } +impl JsonSchema for Pointer { + fn schema_name() -> String { + "pointer".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::pointer::Pointer") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([('/'.to_string(), ::json_schema(gen))]), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some( + "CID reference to an invocation, task, instruction, or receipt".to_string(), + ), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/task.rs b/homestar-invocation/src/task.rs index 6617c453..36ba29ce 100644 --- a/homestar-invocation/src/task.rs +++ b/homestar-invocation/src/task.rs @@ -6,6 +6,7 @@ use crate::{ Error, Pointer, Unit, }; use libipld::{cid::Cid, serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -28,11 +29,19 @@ const PROOF_KEY: &str = "prf"; /// /// [Instruction]: Instruction /// [Receipt]: super::Receipt -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)] +#[schemars( + rename = "task", + description = "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization" +)] pub struct Task<'a, T> { + #[schemars(with = "Instruction<'a, T>", rename = "run", title = "Run instruction")] run: RunInstruction<'a, T>, + #[schemars(title = "Receipt reference")] cause: Option, + #[schemars(with = "Resources", title = "Task Configuration")] meta: Ipld, + #[schemars(title = "UCAN Authorization")] prf: UcanPrf, } diff --git a/homestar-invocation/src/task/config.rs b/homestar-invocation/src/task/config.rs index f90e5792..c9cfad0e 100644 --- a/homestar-invocation/src/task/config.rs +++ b/homestar-invocation/src/task/config.rs @@ -4,6 +4,7 @@ use crate::{consts, Error, Unit}; use libipld::{serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, default::Default, time::Duration}; @@ -12,10 +13,16 @@ const MEMORY_KEY: &str = "memory"; const TIMEOUT_KEY: &str = "time"; /// Resource configuration for defining fuel quota, timeout, etc. -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[schemars( + rename = "resources", + description = "Resource configuration for fuel quota, memory allowance, and timeout" +)] pub struct Resources { fuel: Option, + #[schemars(description = "Memory in bytes")] memory: Option, + #[schemars(with = "Option", description = "Timeout in milliseconds")] time: Option, } diff --git a/homestar-invocation/src/task/instruction.rs b/homestar-invocation/src/task/instruction.rs index b1d847e7..849f2a92 100644 --- a/homestar-invocation/src/task/instruction.rs +++ b/homestar-invocation/src/task/instruction.rs @@ -1,10 +1,27 @@ //! An [Instruction] is the smallest unit of work that can be requested from a //! UCAN, described via `resource`, `ability`. -use crate::{ipld::DagCbor, Error, Pointer, Unit}; +use crate::{ + ipld::{self, DagCbor}, + pointer::AwaitResult, + Error, Pointer, Unit, +}; use libipld::{cid::multibase::Base, serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{ + ArrayValidation, InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, + SingleOrVec, + }, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, collections::BTreeMap, fmt}; +use serde_json::json; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, + fmt, +}; use url::Url; const RESOURCE_KEY: &str = "rsc"; @@ -320,6 +337,137 @@ where impl<'a, T> DagCbor for Instruction<'a, T> where Ipld: From {} +impl<'a, T> JsonSchema for Instruction<'a, T> { + fn schema_name() -> String { + "run".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::task::Instruction") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + struct InputConditional { + if_schema: Schema, + then_schema: Schema, + else_schema: Schema, + } + + fn input_conditional(gen: &mut SchemaGenerator) -> InputConditional { + let if_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "op".to_owned(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("wasm/run")), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; + + let func_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some("The function to call on the Wasm resource".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let args_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Array.into())), + metadata: Some(Box::new(Metadata { + description: Some( + "Arguments to the function. May await a result from another task." + .to_string(), + ), + ..Default::default() + })), + array: Some(Box::new(ArrayValidation { + items: Some(SingleOrVec::Vec(vec![ + gen.subschema_for::(), + gen.subschema_for::(), + ])), + ..Default::default() + })), + ..Default::default() + }; + + let input_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("func".to_string(), Schema::Object(func_schema)), + ("args".to_string(), Schema::Object(args_schema)), + ]), + required: BTreeSet::from(["func".to_string(), "args".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let then_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "input".to_string(), + Schema::Object(input_schema), + )]), + ..Default::default() + })), + ..Default::default() + }; + + InputConditional { + if_schema: Schema::Object(if_schema), + then_schema: Schema::Object(then_schema), + else_schema: Schema::Bool(false), + } + } + + let op_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some("Function executor".to_string()), + ..Default::default() + })), + enum_values: Some(vec![json!("wasm/run")]), + ..Default::default() + }; + + let mut schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Run instruction".to_string()), + description: Some("An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("rsc".to_owned(), ::json_schema(gen)), + ("op".to_owned(), Schema::Object(op_schema)), + ("nnc".to_owned(), ::json_schema(gen)) + ]), + required: BTreeSet::from(["rsc".to_string(), "op".to_string(), "input".to_string(), "nnc".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let input = input_conditional(gen); + schema.subschemas().if_schema = Some(Box::new(input.if_schema)); + schema.subschemas().then_schema = Some(Box::new(input.then_schema)); + schema.subschemas().else_schema = Some(Box::new(input.else_schema)); + + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/task/instruction/nonce.rs b/homestar-invocation/src/task/instruction/nonce.rs index 2f807e93..acc4efd0 100644 --- a/homestar-invocation/src/task/instruction/nonce.rs +++ b/homestar-invocation/src/task/instruction/nonce.rs @@ -9,8 +9,13 @@ use generic_array::{ GenericArray, }; use libipld::{multibase::Base::Base32HexLower, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::fmt; +use std::{borrow::Cow, fmt}; use uuid::Uuid; type Nonce96 = GenericArray; @@ -88,6 +93,30 @@ impl TryFrom<&Ipld> for Nonce { } } +impl JsonSchema for Nonce { + fn schema_name() -> String { + "nonce".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::task::instruction::Nonce") + } + + fn json_schema(_gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some( + "A 12-byte or 16-byte nonce. Use empty string for no nonce.".to_string(), + ), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index c06169e7..accd615e 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -134,14 +134,12 @@ reqwest = { version = "0.11", default-features = false, features = [ "blocking", "json", ] } -schemars = "0.8.16" +schemars = { workspace = true } sec1 = { version = "0.7", default-features = false, features = ["pem"] } semver = { version = "1.0", default-features = false } serde = { workspace = true } serde_ipld_dagcbor = { workspace = true } -serde_json = { version = "1.0", default-features = false, features = [ - "raw_value", -] } +serde_json = { workspace = true } serde_with = { version = "3.5", default-features = false, features = [ "base64", "macros", diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 6deeda31..64208de1 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -2,6 +2,7 @@ //! JSON Schemas for method params and notifications. use homestar_runtime::{Health, NetworkNotification}; +use homestar_workflow::Workflow; use schemars::{schema::RootSchema, schema_for}; use std::{fs, io::Write}; @@ -24,14 +25,23 @@ fn main() { .unwrap() .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); - let api_doc = generate_api_doc(health_schema, network_schema); + let workflow_schema = schema_for!(Workflow<'static, ()>); + let _ = fs::File::create("schemas/docs/workflow.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); + + let api_doc = generate_api_doc(health_schema, network_schema, workflow_schema); let _ = fs::File::create("schemas/docs/api.json") .unwrap() .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); } // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md -fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> OpenrpcDocument { +fn generate_api_doc( + health_schema: RootSchema, + network_schema: RootSchema, + workflow_schema: RootSchema, +) -> OpenrpcDocument { let health: MethodObject = MethodObject { name: "health".to_string(), description: None, @@ -57,7 +67,7 @@ fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> Op }; let network: MethodObject = MethodObject { - name: "network".to_string(), + name: "subscribe_network_events".to_string(), description: None, summary: None, servers: None, @@ -65,7 +75,7 @@ fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> Op param_structure: Some(MethodObjectParamStructure::ByName), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { - name: "network".to_string(), + name: "subscription_id".to_string(), summary: None, description: None, required: Some(true), @@ -87,6 +97,46 @@ fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> Op }), }; + let workflow: MethodObject = MethodObject { + name: "subscribe_run_workflow".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![ContentDescriptorOrReference::ContentDescriptorObject( + ContentDescriptorObject { + name: "workflow".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(workflow_schema), + deprecated: Some(false), + }, + )], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "subscription_id".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(String)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, // x_messages: Some(ContentDescriptorObject { + // name: "network subscription messages".to_string(), + // summary: None, + // description: None, + // required: Some(true), + // schema: JSONSchema::JsonSchemaObject(network_schema), + // deprecated: Some(false), + // }), + }; + OpenrpcDocument { openrpc: Openrpc::V26, // TODO Should we upgrade to latest spec at 1.3.2? info: InfoObject { @@ -109,7 +159,7 @@ fn generate_api_doc(health_schema: RootSchema, network_schema: RootSchema) -> Op url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), }), servers: None, - methods: vec![health, network], + methods: vec![health, network, workflow], components: None, } } diff --git a/homestar-workflow/Cargo.toml b/homestar-workflow/Cargo.toml index 53dccd64..32228129 100644 --- a/homestar-workflow/Cargo.toml +++ b/homestar-workflow/Cargo.toml @@ -23,6 +23,7 @@ homestar-invocation = { version = "0.1", path = "../homestar-invocation" } homestar-workspace-hack = { workspace = true } indexmap = { version = "2.1", default-features = false } libipld = { workspace = true } +schemars = { workspace = true } serde = { workspace = true } thiserror = { workspace = true } diff --git a/homestar-workflow/src/workflow.rs b/homestar-workflow/src/workflow.rs index 2d44175c..b50d47d9 100644 --- a/homestar-workflow/src/workflow.rs +++ b/homestar-workflow/src/workflow.rs @@ -9,6 +9,7 @@ use homestar_invocation::{ Task, Unit, }; use libipld::{serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -17,7 +18,8 @@ const TASKS_KEY: &str = "tasks"; /// Workflow composed of [tasks]. /// /// [tasks]: Task -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, JsonSchema, PartialEq, Serialize, Deserialize)] +#[schemars(title = "Workflow", description = "Workflow composed of tasks")] pub struct Workflow<'a, T> { tasks: Vec>, } From 6b8e09ea765b7e629e8d2c105396c20aa5cc7200 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 24 Jan 2024 11:28:52 -0800 Subject: [PATCH 12/75] chore: Add network event emitter --- homestar-invocation/src/authority/prf.rs | 2 +- .../src/event_handler/notification.rs | 95 +++++++------------ .../src/event_handler/swarm_event.rs | 32 ++++--- homestar-runtime/tests/network/dht.rs | 22 +++-- homestar-runtime/tests/network/gossip.rs | 2 +- .../tests/network/notification.rs | 4 +- 6 files changed, 69 insertions(+), 88 deletions(-) diff --git a/homestar-invocation/src/authority/prf.rs b/homestar-invocation/src/authority/prf.rs index 202fded3..44ed7131 100644 --- a/homestar-invocation/src/authority/prf.rs +++ b/homestar-invocation/src/authority/prf.rs @@ -106,7 +106,7 @@ impl JsonSchema for UcanPrf { fn json_schema(gen: &mut SchemaGenerator) -> Schema { let schema = SchemaObject { - instance_type: Some(SingleOrVec::Vec(vec![InstanceType::Array.into()])), + instance_type: Some(SingleOrVec::Vec(vec![InstanceType::Array])), array: Some(Box::new(ArrayValidation { items: Some(gen.subschema_for::().into()), ..Default::default() diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 209ca367..6b580ebf 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -19,7 +19,9 @@ use tracing::{debug, warn}; pub(crate) mod receipt; pub(crate) mod swarm; pub(crate) use receipt::ReceiptNotification; -pub(crate) use swarm::SwarmNotification; +pub(crate) use swarm::{ + ConnectionClosed, ConnectionEstablished, NetworkNotification, SwarmNotification, +}; const TYPE_KEY: &str = "type"; const DATA_KEY: &str = "data"; @@ -90,6 +92,36 @@ pub(crate) fn emit_event( } } +/// Send network event notification as bytes. +pub(crate) fn emit_network_event( + notifier: Notifier, + notification: NetworkNotification, +) { + let header = Header::new( + SubscriptionTyp::EventSub(SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string()), + None, + ); + + if let Ok(json) = notification.to_json() { + if let Err(err) = notifier.notify(Message::new(header, json)) { + debug!( + subject = "notification.err", + category = "notification", + err=?err, + "unable to send notification {:?}", + notification, + ) + }; + } else { + debug!( + subject = "notification.err", + category = "notification", + "unable to serialize event notification as bytes: {:?}", + notification + ); + } +} + /// Notification sent to clients. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) struct EventNotification { @@ -113,7 +145,7 @@ impl EventNotification { } } -impl DagJson for EventNotification where Ipld: From {} +impl DagJson for EventNotification {} impl From for Ipld { fn from(notification: EventNotification) -> Self { @@ -186,7 +218,7 @@ impl fmt::Display for EventNotificationTyp { } } -impl DagJson for EventNotificationTyp where Ipld: From {} +impl DagJson for EventNotificationTyp {} impl From for Ipld { fn from(typ: EventNotificationTyp) -> Self { @@ -216,60 +248,3 @@ impl TryFrom for EventNotificationTyp { } } } - -#[cfg(test)] -mod test { - use super::*; - use libp2p::PeerId; - use maplit::btreemap; - - #[test] - fn notification_bytes_rountrip() { - let peer_id = PeerId::random().to_string(); - let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); - - let notification = EventNotification::new( - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.clone()), - "address" => Ipld::String(address.clone()) - }, - ); - let bytes = notification.to_json().unwrap(); - - let parsed = EventNotification::from_json(bytes.as_ref()).unwrap(); - let data: BTreeMap = from_ipld(parsed.data).unwrap(); - - assert_eq!( - parsed.typ, - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) - ); - assert_eq!(data.get("peerId").unwrap(), &peer_id); - assert_eq!(data.get("address").unwrap(), &address); - } - - #[test] - fn notification_json_string_rountrip() { - let peer_id = PeerId::random().to_string(); - let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); - - let notification = EventNotification::new( - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.clone()), - "address" => Ipld::String(address.clone()), - }, - ); - let json_string = notification.to_json_string().unwrap(); - - let parsed = EventNotification::from_json_string(json_string).unwrap(); - let data: BTreeMap = from_ipld(parsed.data).unwrap(); - - assert_eq!( - parsed.typ, - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) - ); - assert_eq!(data.get("peerId").unwrap(), &peer_id); - assert_eq!(data.get("address").unwrap(), &address); - } -} diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 5fb0162c..c453dcac 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -2,7 +2,9 @@ use super::EventHandler; #[cfg(feature = "websocket-notify")] -use crate::event_handler::notification::{self, EventNotificationTyp, SwarmNotification}; +use crate::event_handler::notification::{ + self, EventNotificationTyp, NetworkNotification, SwarmNotification, +}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ @@ -1107,14 +1109,15 @@ async fn handle_swarm_event( .insert(peer_id, endpoint.clone()); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.to_string()), - "address" => Ipld::String(endpoint.get_remote_address().to_string()) - }, - ); + NetworkNotification::ConnnectionEstablished( + notification::ConnectionEstablished::new( + peer_id, + endpoint.get_remote_address().to_owned(), + ), + ), + ) } SwarmEvent::ConnectionClosed { peer_id, @@ -1186,14 +1189,13 @@ async fn handle_swarm_event( } #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionClosed), - btreemap! { - "peerId" => Ipld::String(peer_id.to_string()), - "address" => Ipld::String(endpoint.get_remote_address().to_string()) - }, - ); + NetworkNotification::ConnnectionClosed(notification::ConnectionClosed::new( + peer_id, + endpoint.get_remote_address().to_owned(), + )), + ) } SwarmEvent::OutgoingConnectionError { connection_id, diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 22308276..4d6bf33c 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -181,7 +181,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -496,7 +496,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -715,7 +715,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -1098,10 +1098,11 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - println!("node1: {json}"); - - if json["type"].as_str().unwrap() == "network:connectionEstablished" { - assert_eq!(json["data"]["peerId"], SECP256K1MULTIHASH.to_string()); + if json["connection_established"].is_object() { + assert_eq!( + json["connection_established"]["peer_id"], + SECP256K1MULTIHASH.to_string() + ); break; } @@ -1118,8 +1119,11 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node1: {json}"); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { - assert_eq!(json["data"]["peerId"], ED25519MULTIHASH2.to_string()); + if json["connection_established"].is_object() { + assert_eq!( + json["connection_established"]["peerId"], + ED25519MULTIHASH2.to_string() + ); break; } diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs index 0ee08fba..1a8e42c1 100644 --- a/homestar-runtime/tests/network/gossip.rs +++ b/homestar-runtime/tests/network/gossip.rs @@ -148,7 +148,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/notification.rs index 993bf0d5..a076e701 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/notification.rs @@ -143,7 +143,7 @@ fn test_connection_notifications_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -159,7 +159,7 @@ fn test_connection_notifications_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { From 7a905d67618c8afa9384d14b5047e69629bf3ddd Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 25 Jan 2024 13:33:11 -0800 Subject: [PATCH 13/75] feat: Add receipt and receipt notification schemas --- homestar-invocation/src/authority/issuer.rs | 30 ++++- homestar-invocation/src/ipld/schema.rs | 116 ++++++++++++------ homestar-invocation/src/receipt.rs | 68 +++++++++- homestar-invocation/src/task/result.rs | 48 ++++++++ homestar-runtime/schemas/generate.rs | 37 ++++-- .../src/event_handler/notification/receipt.rs | 80 +++++++++++- homestar-runtime/src/lib.rs | 2 +- 7 files changed, 329 insertions(+), 52 deletions(-) diff --git a/homestar-invocation/src/authority/issuer.rs b/homestar-invocation/src/authority/issuer.rs index 01f32600..2837ebcf 100644 --- a/homestar-invocation/src/authority/issuer.rs +++ b/homestar-invocation/src/authority/issuer.rs @@ -11,8 +11,13 @@ use diesel::{ sqlite::Sqlite, }; use libipld::{serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::{fmt, str::FromStr}; +use std::{borrow::Cow, fmt, str::FromStr}; use ucan::ipld::Principle as Principal; /// [Principal] issuer of the [Invocation]. If omitted issuer is @@ -91,6 +96,29 @@ where } } +impl JsonSchema for Issuer { + fn schema_name() -> String { + "iss".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::authority::issuer::Issuer") + } + + fn json_schema(_gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + title: Some("Issuer".to_string()), + description: Some("Principal that issued the receipt".to_string()), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/ipld/schema.rs b/homestar-invocation/src/ipld/schema.rs index 4378d36e..103b4af9 100644 --- a/homestar-invocation/src/ipld/schema.rs +++ b/homestar-invocation/src/ipld/schema.rs @@ -22,7 +22,7 @@ impl JsonSchema for IpldStub { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::ipld::schema::IpldSchema") + Cow::Borrowed("homestar-invocation::ipld::schema::IpldStub") } fn json_schema(gen: &mut SchemaGenerator) -> Schema { @@ -40,31 +40,6 @@ impl JsonSchema for IpldStub { instance_type: Some(SingleOrVec::Single(InstanceType::Number.into())), ..Default::default() }; - let bytes_schema = SchemaObject { - instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), - object: Some(Box::new(ObjectValidation { - properties: BTreeMap::from([( - '/'.to_string(), - Schema::Object(SchemaObject { - instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), - object: Some(Box::new(ObjectValidation { - properties: BTreeMap::from([( - "bytes".to_string(), - ::json_schema(gen), - )]), - ..Default::default() - })), - ..Default::default() - }), - )]), - ..Default::default() - })), - metadata: Some(Box::new(Metadata { - description: Some("Base64 encoded binary".to_string()), - ..Default::default() - })), - ..Default::default() - }; let array_schema = SchemaObject { instance_type: Some(SingleOrVec::Single(InstanceType::Array.into())), ..Default::default() @@ -73,29 +48,96 @@ impl JsonSchema for IpldStub { instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), ..Default::default() }; - let link_schema = SchemaObject { + + schema.subschemas().one_of = Some(vec![ + <()>::json_schema(gen), + ::json_schema(gen), + Schema::Object(number_schema), + ::json_schema(gen), + gen.subschema_for::(), + Schema::Object(array_schema), + Schema::Object(object_schema), + gen.subschema_for::(), + ]); + + schema.into() + } +} + +/// Ipld link stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldLinkStub(Ipld); + +impl JsonSchema for IpldLinkStub { + fn schema_name() -> String { + "ipld_link".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::ipld::schema::IpldLinkStub") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), object: Some(Box::new(ObjectValidation { properties: BTreeMap::from([('/'.to_string(), ::json_schema(gen))]), ..Default::default() })), metadata: Some(Box::new(Metadata { + title: Some("IPLD link".to_string()), description: Some("CID link that points to some IPLD data".to_string()), ..Default::default() })), ..Default::default() }; - schema.subschemas().one_of = Some(vec![ - <()>::json_schema(gen), - ::json_schema(gen), - Schema::Object(number_schema), - ::json_schema(gen), - Schema::Object(bytes_schema), - Schema::Object(array_schema), - Schema::Object(object_schema), - Schema::Object(link_schema), - ]); + schema.into() + } +} + +/// Ipld bytes stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldBytesStub(Ipld); + +impl JsonSchema for IpldBytesStub { + fn schema_name() -> String { + "ipld_bytes".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::ipld::schema::IpldBytesStub") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("IPLD bytes".to_string()), + description: Some("Base64 encoded binary".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + '/'.to_string(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "bytes".to_string(), + ::json_schema(gen), + )]), + ..Default::default() + })), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; schema.into() } diff --git a/homestar-invocation/src/receipt.rs b/homestar-invocation/src/receipt.rs index 57d51251..f0eb3a00 100644 --- a/homestar-invocation/src/receipt.rs +++ b/homestar-invocation/src/receipt.rs @@ -6,7 +6,15 @@ use crate::{ task, Error, Pointer, Unit, }; use libipld::{self, cbor::DagCborCodec, prelude::Codec, serde::from_ipld, Ipld}; -use std::collections::BTreeMap; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, +}; pub mod metadata; @@ -178,3 +186,61 @@ impl TryFrom> for Pointer { Ok(Pointer::new(receipt.to_cid()?)) } } + +impl JsonSchema for Receipt { + fn schema_name() -> String { + "receipt".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::receipt::Receipt") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let meta_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt metadata".to_string()), + description: Some( + "Receipt metadata including the operation that produced the receipt" + .to_string(), + ), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([("op".to_owned(), ::json_schema(gen))]), + required: BTreeSet::from(["op".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt".to_string()), + description: Some("A computed receipt".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("ran".to_owned(), gen.subschema_for::()), + ("out".to_owned(), gen.subschema_for::>()), + ("meta".to_owned(), Schema::Object(meta_schema)), + ("iss".to_owned(), gen.subschema_for::>()), + ("prf".to_owned(), gen.subschema_for::()), + ]), + required: BTreeSet::from([ + "ran".to_string(), + "out".to_string(), + "meta".to_string(), + "prf".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} diff --git a/homestar-invocation/src/task/result.rs b/homestar-invocation/src/task/result.rs index f552d066..fa610a7b 100644 --- a/homestar-invocation/src/task/result.rs +++ b/homestar-invocation/src/task/result.rs @@ -16,7 +16,14 @@ use diesel::{ use libipld::Ipld; #[cfg(feature = "diesel")] use libipld::{cbor::DagCborCodec, prelude::Codec}; +use schemars::{ + gen::SchemaGenerator, + schema::{ArrayValidation, InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::borrow::Cow; const OK: &str = "ok"; const ERR: &str = "error"; @@ -160,6 +167,47 @@ where } } +impl JsonSchema for Result { + fn schema_name() -> String { + "out".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::task::Result") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let out_result = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + enum_values: Some(vec![json!(OK), json!(ERR), json!(JUST)]), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Computation result".to_string()), + description: Some( + "Result tuple with ok/err/just result and associated output".to_string(), + ), + ..Default::default() + })), + array: Some(Box::new(ArrayValidation { + items: Some(SingleOrVec::Vec(vec![ + Schema::Object(out_result), + gen.subschema_for::(), + ])), + min_items: Some(2), + max_items: Some(2), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 64208de1..829c5a9d 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -1,7 +1,8 @@ //! Standalone binary to generate OpenRPC API docs and //! JSON Schemas for method params and notifications. -use homestar_runtime::{Health, NetworkNotification}; +use homestar_invocation::Receipt; +use homestar_runtime::{Health, NetworkNotification, ReceiptNotification}; use homestar_workflow::Workflow; use schemars::{schema::RootSchema, schema_for}; use std::{fs, io::Write}; @@ -30,7 +31,22 @@ fn main() { .unwrap() .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); - let api_doc = generate_api_doc(health_schema, network_schema, workflow_schema); + let receipt_schema = schema_for!(Receipt<()>); + let _ = fs::File::create("schemas/docs/receipt.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); + + let receipt_notification_schema = schema_for!(ReceiptNotification); + let _ = fs::File::create("schemas/docs/receipt_notification.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); + + let api_doc = generate_api_doc( + health_schema, + network_schema, + workflow_schema, + receipt_notification_schema, + ); let _ = fs::File::create("schemas/docs/api.json") .unwrap() .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); @@ -41,6 +57,7 @@ fn generate_api_doc( health_schema: RootSchema, network_schema: RootSchema, workflow_schema: RootSchema, + receipt_notification_schema: RootSchema, ) -> OpenrpcDocument { let health: MethodObject = MethodObject { name: "health".to_string(), @@ -127,14 +144,14 @@ fn generate_api_doc( links: None, examples: None, deprecated: Some(false), - x_messages: None, // x_messages: Some(ContentDescriptorObject { - // name: "network subscription messages".to_string(), - // summary: None, - // description: None, - // required: Some(true), - // schema: JSONSchema::JsonSchemaObject(network_schema), - // deprecated: Some(false), - // }), + x_messages: Some(ContentDescriptorObject { + name: "workflow subscription messages".to_string(), + summary: Some("receipt notifications from a running workflow".to_string()), + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(receipt_notification_schema), + deprecated: Some(false), + }), }; OpenrpcDocument { diff --git a/homestar-runtime/src/event_handler/notification/receipt.rs b/homestar-runtime/src/event_handler/notification/receipt.rs index 770d223e..4470ba8c 100644 --- a/homestar-runtime/src/event_handler/notification/receipt.rs +++ b/homestar-runtime/src/event_handler/notification/receipt.rs @@ -1,11 +1,23 @@ //! Notification receipts. -use homestar_invocation::{ipld::DagJson, Receipt}; +use homestar_invocation::{ + ipld::{schema, DagJson}, + Receipt, +}; use libipld::{ipld, Cid, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, +}; /// A [Receipt] that is sent out for websocket notifications. #[derive(Debug, Clone, PartialEq)] -pub(crate) struct ReceiptNotification(Ipld); +pub struct ReceiptNotification(Ipld); impl ReceiptNotification { /// Obtain a reference to the inner Ipld value. @@ -45,3 +57,67 @@ impl From for ReceiptNotification { ReceiptNotification(ipld) } } + +impl JsonSchema for ReceiptNotification { + fn schema_name() -> String { + "receipt_notification".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-runtime::event_handler::notification::ReceiptNotification") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let metadata_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Metadata".to_string()), + description: Some("Workflow metadata to contextualize the receipt".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("name".to_owned(), ::json_schema(gen)), + ("replayed".to_owned(), ::json_schema(gen)), + ( + "workflow".to_owned(), + gen.subschema_for::(), + ), + ]), + required: BTreeSet::from([ + "name".to_string(), + "receipt".to_string(), + "receipt_cid".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt notification".to_string()), + description: Some( + "A receipt notification associated with a running workflow".to_string(), + ), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("metadata".to_owned(), Schema::Object(metadata_schema)), + ("receipt".to_owned(), gen.subschema_for::>()), + ( + "receipt_cid".to_owned(), + gen.subschema_for::(), + ), + ]), + required: BTreeSet::from(["receipt".to_string(), "receipt_cid".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index b482e7d2..099a2a3d 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -72,7 +72,7 @@ pub use db::{utils::Health, Db}; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; -pub use event_handler::notification::swarm::NetworkNotification; +pub use event_handler::notification::{receipt::ReceiptNotification, swarm::NetworkNotification}; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; From 111d7eb11ffe9f1806fed9116af9a9eaf7aa3d36 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 25 Jan 2024 14:36:04 -0800 Subject: [PATCH 14/75] refactor: Add NodeInfo struct --- Cargo.lock | 2 +- homestar-runtime/src/lib.rs | 2 +- homestar-runtime/src/network/webserver/rpc.rs | 8 +++++--- homestar-runtime/src/runner.rs | 1 + homestar-runtime/src/runner/nodeinfo.rs | 18 ++++++++++++++++++ homestar-runtime/tests/network.rs | 8 +++----- 6 files changed, 29 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9cc7b202..b9787f89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2687,7 +2687,7 @@ version = "0.1.0" dependencies = [ "ahash", "anyhow", - "arrayvec", + "arrayvec 0.7.4", "base64 0.13.1", "bitflags 2.4.2", "bytes", diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 099a2a3d..5971ff7e 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -76,7 +76,7 @@ pub use event_handler::notification::{receipt::ReceiptNotification, swarm::Netwo #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; -pub use runner::Runner; +pub use runner::{NodeInfo, Runner}; pub(crate) use scheduler::TaskScheduler; pub use settings::Settings; pub(crate) use worker::Worker; diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index eac6cadc..200242af 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -6,7 +6,10 @@ use super::notifier::{self, Header, Notifier, SubscriptionTyp}; use super::{listener, prom::PrometheusData, Message}; #[cfg(feature = "websocket-notify")] use crate::channel::AsyncChannel; -use crate::{db::Database, runner::WsSender}; +use crate::{ + db::Database, + runner::{NodeInfo, WsSender}, +}; #[cfg(feature = "websocket-notify")] use anyhow::anyhow; use anyhow::Result; @@ -193,8 +196,7 @@ where if let Ok(Message::AckNodeInfo((static_info, dyn_info))) = rx.recv_deadline(std::time::Instant::now() + ctx.receiver_timeout) { - Ok(serde_json::json!({ - "nodeInfo": {"static": static_info, "dynamic": dyn_info}})) + Ok(serde_json::json!(NodeInfo::new(static_info, dyn_info))) } else { error!( subject = "call.node", diff --git a/homestar-runtime/src/runner.rs b/homestar-runtime/src/runner.rs index 5e0a55e2..9d613f0c 100644 --- a/homestar-runtime/src/runner.rs +++ b/homestar-runtime/src/runner.rs @@ -47,6 +47,7 @@ pub(crate) mod file; mod nodeinfo; pub(crate) mod response; pub(crate) use error::Error; +pub use nodeinfo::NodeInfo; pub(crate) use nodeinfo::{DynamicNodeInfo, StaticNodeInfo}; /// Name of the thread used for the [Runner] / runtime. diff --git a/homestar-runtime/src/runner/nodeinfo.rs b/homestar-runtime/src/runner/nodeinfo.rs index 82fd8aff..1b44f177 100644 --- a/homestar-runtime/src/runner/nodeinfo.rs +++ b/homestar-runtime/src/runner/nodeinfo.rs @@ -4,6 +4,24 @@ use libp2p::{Multiaddr, PeerId}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +/// Node information. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeInfo { + /// Static node information available at startup. + #[serde(rename = "static")] + pub(crate) stat: StaticNodeInfo, + /// Dynamic node information available through events + /// at runtime. + pub(crate) dynamic: DynamicNodeInfo, +} + +impl NodeInfo { + /// Create an instance of [NodeInfo]. + pub(crate) fn new(stat: StaticNodeInfo, dynamic: DynamicNodeInfo) -> Self { + Self { stat, dynamic } + } +} + /// Static node information available at startup. #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct StaticNodeInfo { diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 05e054ca..6f037e09 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -363,7 +363,7 @@ fn test_libp2p_connect_known_peers_integration() -> Result<()> { let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); assert_eq!(http_resp.status(), 200); let http_resp = http_resp.json::().await.unwrap(); - assert!(http_resp["nodeInfo"]["dynamic"]["connections"] + assert!(http_resp["dynamic"]["connections"] .as_object() .unwrap() .get(ED25519MULTIHASH) @@ -372,10 +372,8 @@ fn test_libp2p_connect_known_peers_integration() -> Result<()> { .unwrap() .parse::() .is_ok()); - let static_info = http_resp["nodeInfo"]["static"].as_object().unwrap(); - let listeners = http_resp["nodeInfo"]["dynamic"]["listeners"] - .as_array() - .unwrap(); + let static_info = http_resp["static"].as_object().unwrap(); + let listeners = http_resp["dynamic"]["listeners"].as_array().unwrap(); assert_eq!(static_info.get("peer_id").unwrap(), SECP256K1MULTIHASH); assert_eq!(listeners, &[listen_addr2.to_string()]); }); From be9ec0723efb2e6499a6d00c8faef24d45c99d6c Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 25 Jan 2024 15:15:47 -0800 Subject: [PATCH 15/75] feat: Add node info schema --- homestar-runtime/schemas/generate.rs | 35 +++++++++++++++++++++++-- homestar-runtime/src/runner/nodeinfo.rs | 16 ++++++++--- 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 829c5a9d..54886c53 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -2,7 +2,7 @@ //! JSON Schemas for method params and notifications. use homestar_invocation::Receipt; -use homestar_runtime::{Health, NetworkNotification, ReceiptNotification}; +use homestar_runtime::{Health, NetworkNotification, NodeInfo, ReceiptNotification}; use homestar_workflow::Workflow; use schemars::{schema::RootSchema, schema_for}; use std::{fs, io::Write}; @@ -21,6 +21,11 @@ fn main() { .unwrap() .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + let node_info_schema = schema_for!(NodeInfo); + let _ = fs::File::create("schemas/docs/node_info.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); + let network_schema = schema_for!(NetworkNotification); let _ = fs::File::create("schemas/docs/network.json") .unwrap() @@ -43,6 +48,7 @@ fn main() { let api_doc = generate_api_doc( health_schema, + node_info_schema, network_schema, workflow_schema, receipt_notification_schema, @@ -55,6 +61,7 @@ fn main() { // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md fn generate_api_doc( health_schema: RootSchema, + node_info_schema: RootSchema, network_schema: RootSchema, workflow_schema: RootSchema, receipt_notification_schema: RootSchema, @@ -83,6 +90,30 @@ fn generate_api_doc( x_messages: None, }; + let node_info: MethodObject = MethodObject { + name: "node".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "node_info".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(node_info_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + let network: MethodObject = MethodObject { name: "subscribe_network_events".to_string(), description: None, @@ -176,7 +207,7 @@ fn generate_api_doc( url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), }), servers: None, - methods: vec![health, network, workflow], + methods: vec![health, node_info, network, workflow], components: None, } } diff --git a/homestar-runtime/src/runner/nodeinfo.rs b/homestar-runtime/src/runner/nodeinfo.rs index 1b44f177..c607521a 100644 --- a/homestar-runtime/src/runner/nodeinfo.rs +++ b/homestar-runtime/src/runner/nodeinfo.rs @@ -1,11 +1,13 @@ //! Node information. use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Node information. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "node_info")] pub struct NodeInfo { /// Static node information available at startup. #[serde(rename = "static")] @@ -23,9 +25,11 @@ impl NodeInfo { } /// Static node information available at startup. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "static")] pub(crate) struct StaticNodeInfo { /// The [PeerId] of a node. + #[schemars(with = "String", description = "The peer ID of the node")] pub(crate) peer_id: PeerId, } @@ -44,11 +48,17 @@ impl StaticNodeInfo { /// Dynamic node information available through events /// at runtime. -#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "dynamic")] pub(crate) struct DynamicNodeInfo { /// Listeners for the node. + #[schemars(with = "Vec", description = "Listen addresses for the node")] pub(crate) listeners: Vec, /// Connections for the node. + #[schemars( + with = "HashMap", + description = "Peers and their addresses that are connected to the node" + )] pub(crate) connections: HashMap, } From bb5bcd3bdbcb915256a5165bb36ff99298e56d84 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 25 Jan 2024 15:31:02 -0800 Subject: [PATCH 16/75] feat: Add unsubscribe API methods --- homestar-runtime/schemas/generate.rs | 57 +++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 54886c53..da1f7d7b 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -145,6 +145,30 @@ fn generate_api_doc( }), }; + let network_unsubscribe: MethodObject = MethodObject { + name: "unsubscribe_network_events".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "unsubscribe result".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(bool)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + let workflow: MethodObject = MethodObject { name: "subscribe_run_workflow".to_string(), description: None, @@ -185,6 +209,30 @@ fn generate_api_doc( }), }; + let workflow_unsubscribe: MethodObject = MethodObject { + name: "unsubscribe_run_workflow".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "unsubscribe result".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(bool)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + OpenrpcDocument { openrpc: Openrpc::V26, // TODO Should we upgrade to latest spec at 1.3.2? info: InfoObject { @@ -207,7 +255,14 @@ fn generate_api_doc( url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), }), servers: None, - methods: vec![health, node_info, network, workflow], + methods: vec![ + health, + node_info, + network, + network_unsubscribe, + workflow, + workflow_unsubscribe, + ], components: None, } } From 8b7c68746a339e20ec09e25d07445779a45fb473 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 26 Jan 2024 14:32:29 -0800 Subject: [PATCH 17/75] feat: Add metrics schema --- homestar-runtime/schemas/generate.rs | 36 ++++- homestar-runtime/src/lib.rs | 1 + homestar-runtime/src/network/webserver.rs | 1 + .../src/network/webserver/prom.rs | 141 +++++++++++++++++- 4 files changed, 174 insertions(+), 5 deletions(-) diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index da1f7d7b..a6138990 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -2,7 +2,9 @@ //! JSON Schemas for method params and notifications. use homestar_invocation::Receipt; -use homestar_runtime::{Health, NetworkNotification, NodeInfo, ReceiptNotification}; +use homestar_runtime::{ + Health, NetworkNotification, NodeInfo, PrometheusData, ReceiptNotification, +}; use homestar_workflow::Workflow; use schemars::{schema::RootSchema, schema_for}; use std::{fs, io::Write}; @@ -21,6 +23,11 @@ fn main() { .unwrap() .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + let metrics_schema = schema_for!(PrometheusData); + let _ = fs::File::create("schemas/docs/metrics.json") + .unwrap() + .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); + let node_info_schema = schema_for!(NodeInfo); let _ = fs::File::create("schemas/docs/node_info.json") .unwrap() @@ -48,6 +55,7 @@ fn main() { let api_doc = generate_api_doc( health_schema, + metrics_schema, node_info_schema, network_schema, workflow_schema, @@ -61,6 +69,7 @@ fn main() { // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md fn generate_api_doc( health_schema: RootSchema, + metrics_schema: RootSchema, node_info_schema: RootSchema, network_schema: RootSchema, workflow_schema: RootSchema, @@ -90,6 +99,30 @@ fn generate_api_doc( x_messages: None, }; + let metrics: MethodObject = MethodObject { + name: "metrics".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "metrics".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(metrics_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + let node_info: MethodObject = MethodObject { name: "node".to_string(), description: None, @@ -257,6 +290,7 @@ fn generate_api_doc( servers: None, methods: vec![ health, + metrics, node_info, network, network_unsubscribe, diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 5971ff7e..49158382 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -75,6 +75,7 @@ pub(crate) mod metrics; pub use event_handler::notification::{receipt::ReceiptNotification, swarm::NetworkNotification}; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; +pub use network::webserver::PrometheusData; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; pub use runner::{NodeInfo, Runner}; pub(crate) use scheduler::TaskScheduler; diff --git a/homestar-runtime/src/network/webserver.rs b/homestar-runtime/src/network/webserver.rs index da4b56e0..dd99bff4 100644 --- a/homestar-runtime/src/network/webserver.rs +++ b/homestar-runtime/src/network/webserver.rs @@ -43,6 +43,7 @@ mod rpc; #[cfg(feature = "websocket-notify")] pub(crate) use notifier::Notifier; +pub use prom::PrometheusData; #[cfg(feature = "websocket-notify")] pub(crate) use rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT; use rpc::{Context, JsonRpc}; diff --git a/homestar-runtime/src/network/webserver/prom.rs b/homestar-runtime/src/network/webserver/prom.rs index 622b8850..900c535f 100644 --- a/homestar-runtime/src/network/webserver/prom.rs +++ b/homestar-runtime/src/network/webserver/prom.rs @@ -5,8 +5,17 @@ use anyhow::{anyhow, bail, Result}; use dyn_clone::DynClone; use once_cell::sync::Lazy; use regex::Regex; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use serde_json::json; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap}, +}; const HISTOGRAM_TYPE: &str = "HISTOGRAM"; const SUMMARY_TYPE: &str = "SUMMARY"; @@ -36,9 +45,11 @@ static MULTI_NEWLINE: Lazy<&Regex> = Lazy::new(|| { type Labels = HashMap; type Value = String; -#[derive(Clone, Serialize)] +#[derive(Clone, Serialize, JsonSchema)] /// A parsed representation of the prometheus metrics data -pub(crate) struct PrometheusData { +#[allow(missing_debug_implementations)] +#[schemars(title = "Metrics data", description = "Prometheus metrics data")] +pub struct PrometheusData { metrics: Vec, } @@ -74,6 +85,45 @@ struct Metric { value: Value, } +impl JsonSchema for Metric { + fn schema_name() -> String { + "gauge".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-runtime::network::webserver::prom::Metric") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let type_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("metric")), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Gauge data".to_string()), + description: Some("A gauge metric".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("type".to_string(), Schema::Object(type_schema)), + ("labels".to_string(), >::json_schema(gen)), + ("value".to_string(), ::json_schema(gen)), + ]), + required: BTreeSet::from(["type".to_string(), "value".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct Summary { labels: Option, @@ -90,8 +140,9 @@ struct Histogram { sum: Value, } -#[derive(Debug, Clone, PartialEq, Serialize)] +#[derive(Debug, Clone, PartialEq, Serialize, JsonSchema)] #[serde(rename_all = "lowercase")] +#[schemars(title = "Metric type")] enum MetricType { Gauge, Histogram, @@ -106,6 +157,88 @@ struct MetricFamily { data: Vec>, } +impl JsonSchema for MetricFamily { + fn schema_name() -> String { + "metric".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-runtime::network::webserver::prom::MetricFamily") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + struct DataConditional { + if_schema: Schema, + then_schema: Schema, + else_schema: Schema, + } + + fn data_conditional(gen: &mut SchemaGenerator) -> DataConditional { + let if_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "metric_type".to_owned(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("gauge")), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; + + let then_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([("data".to_string(), ::json_schema(gen))]), + ..Default::default() + })), + ..Default::default() + }; + + DataConditional { + if_schema: Schema::Object(if_schema), + then_schema: Schema::Object(then_schema), + else_schema: Schema::Bool(false), + } + } + + let mut schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Metric family".to_string()), + description: Some("A prometheus gauge, summary, or histogram metric".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("metric_type".to_string(), ::json_schema(gen)), + ("metric_name".to_string(), ::json_schema(gen)), + ("help".to_string(), ::json_schema(gen)), + ]), + required: BTreeSet::from([ + "metric_type".to_string(), + "metric_name".to_string(), + "help".to_string(), + "data".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + let data = data_conditional(gen); + schema.subschemas().if_schema = Some(Box::new(data.if_schema)); + schema.subschemas().then_schema = Some(Box::new(data.then_schema)); + schema.subschemas().else_schema = Some(Box::new(data.else_schema)); + + schema.into() + } +} + #[typetag::serde(tag = "type")] trait MetricLike: DynClone { fn parse_from_string(s: &str) -> Result<(Value, Option)> From 674c44b458fa2b4528fff71d636590388744ec83 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 26 Jan 2024 14:34:55 -0800 Subject: [PATCH 18/75] chore: Set homestar as default binary --- homestar-runtime/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index accd615e..49353e59 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -12,6 +12,7 @@ documentation = "https://docs.rs/homestar-runtime" repository = "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime" authors = { workspace = true } autotests = false +default-run = "homestar" [lib] path = "src/lib.rs" @@ -139,7 +140,7 @@ sec1 = { version = "0.7", default-features = false, features = ["pem"] } semver = { version = "1.0", default-features = false } serde = { workspace = true } serde_ipld_dagcbor = { workspace = true } -serde_json = { workspace = true } +serde_json = { workspace = true } serde_with = { version = "3.5", default-features = false, features = [ "base64", "macros", From e134d7f965469c801e0ddd56c36ee86a39c07bcb Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 29 Jan 2024 08:45:38 -0800 Subject: [PATCH 19/75] test: Add network notification testing apparatus --- .../src/event_handler/notification/swarm.rs | 109 ++++++++++++------ 1 file changed, 74 insertions(+), 35 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index ae9f8d91..d003b7b3 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -297,51 +297,90 @@ impl TryFrom for ConnectionClosed { #[cfg(test)] mod test { use super::*; - use libp2p::PeerId; - #[test] - fn notification_bytes_rountrip() { - let peer_id = PeerId::random(); - let address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(); - let inner = ConnectionEstablished::new(peer_id, address.clone()); + #[derive(Clone, Debug)] + struct Fixtures { + peer_id: PeerId, + address: Multiaddr, + } - let notification = NetworkNotification::ConnnectionEstablished(inner.clone()); - let bytes = notification.to_json().unwrap(); - let parsed = NetworkNotification::from_json(bytes.as_ref()).unwrap(); + fn generate_notifications(fixtures: Fixtures) -> Vec<(i64, NetworkNotification)> { + let Fixtures { peer_id, address } = fixtures; + let connection_established = ConnectionEstablished::new(peer_id, address.clone()); + let connection_closed = ConnectionClosed::new(peer_id, address.clone()); + + vec![ + ( + connection_established.timestamp, + NetworkNotification::ConnnectionEstablished(connection_established.clone()), + ), + ( + connection_closed.timestamp, + NetworkNotification::ConnnectionClosed(connection_closed.clone()), + ), + ] + } - match parsed { - NetworkNotification::ConnnectionEstablished(n) => { - let parsed_peer_id = PeerId::from_str(&n.peer_id).unwrap(); - let parsed_address = Multiaddr::from_str(&n.address).unwrap(); + fn check_notification(timestamp: i64, notification: NetworkNotification, fixtures: Fixtures) { + let Fixtures { peer_id, address } = fixtures; - assert_eq!(parsed_peer_id, peer_id); - assert_eq!(parsed_address, address); - assert_eq!(n.timestamp, inner.timestamp); + match notification { + NetworkNotification::ConnnectionEstablished(n) => { + assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + assert_eq!(n.timestamp, timestamp) + } + NetworkNotification::ConnnectionClosed(n) => { + assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + assert_eq!(n.timestamp, timestamp) } - _ => panic!("Parsed notification did not matched expected variant"), } } #[test] - fn notification_json_string_rountrip() { - let peer_id = PeerId::random(); - let address = Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(); - let inner = ConnectionEstablished::new(peer_id, address.clone()); - - let notification = NetworkNotification::ConnnectionEstablished(inner.clone()); - let json_string = notification.to_json_string().unwrap(); - let parsed = NetworkNotification::from_json_string(json_string).unwrap(); - - match parsed { - NetworkNotification::ConnnectionEstablished(n) => { - let parsed_peer_id = PeerId::from_str(&n.peer_id).unwrap(); - let parsed_address = Multiaddr::from_str(&n.address).unwrap(); + fn notification_bytes_rountrip() { + let fixtures = Fixtures { + peer_id: PeerId::random(), + address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + }; + + // Generate notifications and convert them to bytes + let notifications: Vec<(i64, Vec)> = generate_notifications(fixtures.clone()) + .into_iter() + .map(|(timestamp, notification)| (timestamp, notification.to_json().unwrap())) + .collect(); + + // Convert notifications back and check them + for (timestamp, bytes) in notifications { + check_notification( + timestamp, + NetworkNotification::from_json(bytes.as_ref()).unwrap(), + fixtures.clone(), + ) + } + } - assert_eq!(parsed_peer_id, peer_id); - assert_eq!(parsed_address, address); - assert_eq!(n.timestamp, inner.timestamp); - } - _ => panic!("Parsed notification did not matched expected variant"), + #[test] + fn notification_json_string_rountrip() { + let fixtures = Fixtures { + peer_id: PeerId::random(), + address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + }; + + // Generate notifications and convert them to JSON strings + let notifications: Vec<(i64, String)> = generate_notifications(fixtures.clone()) + .into_iter() + .map(|(timestamp, notification)| (timestamp, notification.to_json_string().unwrap())) + .collect(); + + // Convert notifications back and check them + for (timestamp, json) in notifications { + check_notification( + timestamp, + NetworkNotification::from_json_string(json).unwrap(), + fixtures.clone(), + ) } } } From 9e0c53b671d4b4d69f97201e213ac728f2414d8c Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 29 Jan 2024 13:42:41 -0800 Subject: [PATCH 20/75] test: Fix nodeinfo test --- homestar-runtime/tests/network/notification.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/notification.rs index a076e701..7c33a1e6 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/notification.rs @@ -175,10 +175,8 @@ fn test_connection_notifications_integration() -> Result<()> { assert_eq!( http_resp, serde_json::json!({ - "nodeInfo": { "static": {"peer_id": ED25519MULTIHASH}, "dynamic": {"listeners": [format!("{listen_addr1}")], "connections": {}} - } }) ); }); From 66f2b7646893e12a8a3b6d5562cad90a587c97ae Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 29 Jan 2024 13:57:31 -0800 Subject: [PATCH 21/75] feat: Add mDNS discovered notification Update tests to use notifications instead of timeouts --- .../src/event_handler/notification.rs | 20 +- .../src/event_handler/notification/swarm.rs | 179 ++++++++- .../src/event_handler/swarm_event.rs | 8 +- homestar-runtime/tests/network/mdns.rs | 380 ++++++++++++------ 4 files changed, 435 insertions(+), 152 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 6b580ebf..18926fac 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -19,9 +19,7 @@ use tracing::{debug, warn}; pub(crate) mod receipt; pub(crate) mod swarm; pub(crate) use receipt::ReceiptNotification; -pub(crate) use swarm::{ - ConnectionClosed, ConnectionEstablished, NetworkNotification, SwarmNotification, -}; +pub(crate) use swarm::*; const TYPE_KEY: &str = "type"; const DATA_KEY: &str = "data"; @@ -103,14 +101,14 @@ pub(crate) fn emit_network_event( ); if let Ok(json) = notification.to_json() { - if let Err(err) = notifier.notify(Message::new(header, json)) { - debug!( - subject = "notification.err", - category = "notification", - err=?err, - "unable to send notification {:?}", - notification, - ) + if let Err(_err) = notifier.notify(Message::new(header, json)) { + // debug!( + // subject = "notification.err", + // category = "notification", + // err=?err, + // "unable to send notification {:?}", + // notification, + // ) }; } else { debug!( diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index d003b7b3..5feae5b7 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -7,9 +7,13 @@ use chrono::prelude::Utc; use homestar_invocation::ipld::DagJson; use libipld::{serde::from_ipld, Ipld}; use libp2p::{Multiaddr, PeerId}; -use schemars::JsonSchema; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt, str::FromStr}; +use std::{borrow::Cow, collections::BTreeMap, fmt, str::FromStr}; const TIMESTAMP_KEY: &str = "timestamp"; @@ -124,6 +128,9 @@ pub enum NetworkNotification { /// Connection closed notification. #[schemars(rename = "connection_closed")] ConnnectionClosed(ConnectionClosed), + /// mDNS discovered notification. + #[schemars(rename = "discovered_mdns")] + DiscoveredMdns(DiscoveredMdns), } impl DagJson for NetworkNotification {} @@ -138,6 +145,9 @@ impl From for Ipld { NetworkNotification::ConnnectionClosed(n) => { Ipld::Map(BTreeMap::from([("connection_closed".into(), n.into())])) } + NetworkNotification::DiscoveredMdns(n) => { + Ipld::Map(BTreeMap::from([("discovered_mdns".into(), n.into())])) + } } } } @@ -156,6 +166,9 @@ impl TryFrom for NetworkNotification { "connection_closed" => Ok(NetworkNotification::ConnnectionClosed( ConnectionClosed::try_from(val.to_owned())?, )), + "discovered_mdns" => Ok(NetworkNotification::DiscoveredMdns( + DiscoveredMdns::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -294,6 +307,116 @@ impl TryFrom for ConnectionClosed { } } +#[derive(Debug, Clone)] +pub struct DiscoveredMdns { + timestamp: i64, + peers: Vec<(String, String)>, +} + +impl DiscoveredMdns { + pub(crate) fn new(peers: Vec<(PeerId, Multiaddr)>) -> DiscoveredMdns { + DiscoveredMdns { + timestamp: Utc::now().timestamp_millis(), + peers: peers + .iter() + .map(|(peer_id, address)| (peer_id.to_string(), address.to_string())) + .collect(), + } + } +} + +impl DagJson for DiscoveredMdns {} + +impl From for Ipld { + fn from(notification: DiscoveredMdns) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, address)| (peer_id, address.into())) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("peers".into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredMdns { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let peers_key: &str = "peers"; + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peers_map = from_ipld::>( + map.get(peers_key) + .ok_or_else(|| anyhow!("missing {peers_key}"))? + .to_owned(), + )?; + + let mut peers: Vec<(String, String)> = vec![]; + for peer in peers_map.iter() { + peers.push((peer.0.to_string(), from_ipld(peer.1.to_owned())?)) + } + + Ok(DiscoveredMdns { timestamp, peers }) + } +} + +impl JsonSchema for DiscoveredMdns { + fn schema_name() -> String { + "discovered_mdns".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-runtime::event_handler::notification::swarm::DiscoveredMdns") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ( + "timestamp".to_string(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Number.into())), + ..Default::default() + }), + ), + ( + "peers".to_string(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + description: Some("Peers and their addresses".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + additional_properties: Some(Box::new(::json_schema(gen))), + ..Default::default() + })), + ..Default::default() + }), + ), + ]), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; @@ -302,12 +425,18 @@ mod test { struct Fixtures { peer_id: PeerId, address: Multiaddr, + peers: Vec<(PeerId, Multiaddr)>, } fn generate_notifications(fixtures: Fixtures) -> Vec<(i64, NetworkNotification)> { - let Fixtures { peer_id, address } = fixtures; + let Fixtures { + peer_id, + address, + peers, + } = fixtures; let connection_established = ConnectionEstablished::new(peer_id, address.clone()); let connection_closed = ConnectionClosed::new(peer_id, address.clone()); + let discovered_mdns = DiscoveredMdns::new(peers); vec![ ( @@ -318,22 +447,40 @@ mod test { connection_closed.timestamp, NetworkNotification::ConnnectionClosed(connection_closed.clone()), ), + ( + discovered_mdns.timestamp, + NetworkNotification::DiscoveredMdns(discovered_mdns.clone()), + ), ] } fn check_notification(timestamp: i64, notification: NetworkNotification, fixtures: Fixtures) { - let Fixtures { peer_id, address } = fixtures; + let Fixtures { + peer_id, + address, + peers, + } = fixtures; match notification { NetworkNotification::ConnnectionEstablished(n) => { + assert_eq!(n.timestamp, timestamp); assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); - assert_eq!(n.timestamp, timestamp) } NetworkNotification::ConnnectionClosed(n) => { + assert_eq!(n.timestamp, timestamp); assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); - assert_eq!(n.timestamp, timestamp) + } + NetworkNotification::DiscoveredMdns(n) => { + assert_eq!(n.timestamp, timestamp); + + for peer in n.peers { + assert!(peers.contains(&( + PeerId::from_str(&peer.0).unwrap(), + Multiaddr::from_str(&peer.1).unwrap() + ))) + } } } } @@ -343,6 +490,16 @@ mod test { let fixtures = Fixtures { peer_id: PeerId::random(), address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + peers: vec![ + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + ), + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ), + ], }; // Generate notifications and convert them to bytes @@ -366,6 +523,16 @@ mod test { let fixtures = Fixtures { peer_id: PeerId::random(), address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + peers: vec![ + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + ), + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ), + ], }; // Generate notifications and convert them to JSON strings diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index c453dcac..1b571e67 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -1022,7 +1022,7 @@ async fn handle_swarm_event( } SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Discovered(list))) => { - for (peer_id, multiaddr) in list { + for (peer_id, multiaddr) in list.clone() { debug!( subject = "libp2p.mdns.discovered", category = "handle_swarm_event", @@ -1047,6 +1047,12 @@ async fn handle_swarm_event( ) } } + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoveredMdns(notification::DiscoveredMdns::new(list)), + ) } SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Expired(list))) => { let behaviour = event_handler.swarm.behaviour_mut(); diff --git a/homestar-runtime/tests/network/mdns.rs b/homestar-runtime/tests/network/mdns.rs index 0181dd9c..949ddecc 100644 --- a/homestar-runtime/tests/network/mdns.rs +++ b/homestar-runtime/tests/network/mdns.rs @@ -2,19 +2,27 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, retrieve_output, wait_for_socket_connection, - wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, ED25519MULTIHASH2, - ED25519MULTIHASH4, ED25519MULTIHASH5, + wait_for_socket_connection_v6, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, + ED25519MULTIHASH2, ED25519MULTIHASH4, ED25519MULTIHASH5, }, }; use anyhow::Result; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; use once_cell::sync::Lazy; use std::{ + net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, time::Duration, }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); +const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; +const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::file_serial] @@ -29,8 +37,9 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { let ws_port1 = proc_info1.ws_port; let ws_port2 = proc_info2.ws_port; - let toml1 = format!( - r#" + tokio_test::block_on(async { + let toml1 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} @@ -45,12 +54,12 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { [node.network.webserver] port = {ws_port1} "# - ); - let config1 = make_config!(toml1); + ); + let config1 = make_config!(toml1); - // Start two nodes each configured to listen at 0.0.0.0 with no known peers. - // The nodes are configured with port 0 to allow the OS to select a port. - let homestar_proc1 = Command::new(BIN.as_os_str()) + // Start two nodes each configured to listen at 0.0.0.0 with no known peers. + // The nodes are configured with port 0 to allow the OS to select a port. + let homestar_proc1 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -63,14 +72,29 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" + let proc_guard1 = ChildGuard::new(homestar_proc1); + + if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client = WsClientBuilder::default() + .build(ws_url1.clone()) + .await + .unwrap(); + + let mut sub1: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + let toml2 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }} @@ -85,10 +109,10 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { [node.network.webserver] port = {ws_port2} "# - ); - let config2 = make_config!(toml2); + ); + let config2 = make_config!(toml2); - let homestar_proc2 = Command::new(BIN.as_os_str()) + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -101,75 +125,104 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for seven seconds then kill processes. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(7))); - let dead_proc2 = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Retrieve logs. - let stdout1 = retrieve_output(dead_proc1); - let stdout2 = retrieve_output(dead_proc2); - - // Check that node one connected to node two. - let one_connected_to_two = check_for_line_with( - stdout1.clone(), - vec!["peer connection established", ED25519MULTIHASH5], - ); - - // Check node two was added to the Kademlia table - let two_addded_to_dht = check_for_line_with( - stdout1.clone(), - vec![ - "added identified node to kademlia routing table", - ED25519MULTIHASH5, - ], - ); - - // Check that DHT routing table was updated with node two - let two_in_dht_routing_table = check_for_line_with( - stdout1, - vec![ - "kademlia routing table updated with peer", - ED25519MULTIHASH5, - ], - ); - - assert!(one_connected_to_two); - assert!(two_addded_to_dht); - assert!(two_in_dht_routing_table); - - // Check that node two connected to node one. - let two_connected_to_one = check_for_line_with( - stdout2.clone(), - vec!["peer connection established", ED25519MULTIHASH2], - ); - - // Check node one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout2.clone(), - vec![ - "added identified node to kademlia routing table", - ED25519MULTIHASH2, - ], - ); - - // Check that DHT routing table was updated with node one - let one_in_dht_routing_table = check_for_line_with( - stdout2, - vec![ - "kademlia routing table updated with peer", - ED25519MULTIHASH2, - ], - ); - - assert!(two_connected_to_one); - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); + let proc_guard2 = ChildGuard::new(homestar_proc2); + + if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for mDNS discovered message and conenection established messages + let mut discovered_mdns = false; + let mut connection_established = false; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discovered_mdns"].is_object() { + discovered_mdns = true; + } else if json["connection_established"].is_object() { + connection_established = true; + } + } else { + panic!( + r#"Expected notifications from node one did not arrive in time: + - mDNS discovered: {} + - Connection established: {} + "#, + discovered_mdns, connection_established + ); + } + + if connection_established && discovered_mdns { + break; + } + } + + // Collect logs for seven seconds then kill processes. + let dead_proc1 = kill_homestar(proc_guard1.take(), None); + let dead_proc2 = kill_homestar(proc_guard2.take(), None); + + // Retrieve logs. + let stdout1 = retrieve_output(dead_proc1); + let stdout2 = retrieve_output(dead_proc2); + + // Check that node one connected to node two. + let one_connected_to_two = check_for_line_with( + stdout1.clone(), + vec!["peer connection established", ED25519MULTIHASH5], + ); + + // Check node two was added to the Kademlia table + let two_addded_to_dht = check_for_line_with( + stdout1.clone(), + vec![ + "added identified node to kademlia routing table", + ED25519MULTIHASH5, + ], + ); + + // Check that DHT routing table was updated with node two + let two_in_dht_routing_table = check_for_line_with( + stdout1, + vec![ + "kademlia routing table updated with peer", + ED25519MULTIHASH5, + ], + ); + + assert!(one_connected_to_two); + assert!(two_addded_to_dht); + assert!(two_in_dht_routing_table); + + // Check that node two connected to node one. + let two_connected_to_one = check_for_line_with( + stdout2.clone(), + vec!["peer connection established", ED25519MULTIHASH2], + ); + + // Check node one was added to the Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout2.clone(), + vec![ + "added identified node to kademlia routing table", + ED25519MULTIHASH2, + ], + ); + + // Check that DHT routing table was updated with node one + let one_in_dht_routing_table = check_for_line_with( + stdout2, + vec![ + "kademlia routing table updated with peer", + ED25519MULTIHASH2, + ], + ); + + assert!(two_connected_to_one); + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + }); Ok(()) } @@ -190,8 +243,9 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { let ws_port1 = proc_info1.ws_port; let ws_port2 = proc_info2.ws_port; - let toml1 = format!( - r#" + tokio_test::block_on(async { + let toml1 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_3.pem" }} @@ -206,10 +260,10 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { [node.network.webserver] port = {ws_port1} "# - ); - let config1 = make_config!(toml1); + ); + let config1 = make_config!(toml1); - let homestar_proc1 = Command::new(BIN.as_os_str()) + let homestar_proc1 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -222,14 +276,29 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection(ws_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" + let proc_guard1 = ChildGuard::new(homestar_proc1); + + if wait_for_socket_connection(ws_port1, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client = WsClientBuilder::default() + .build(ws_url1.clone()) + .await + .unwrap(); + + let mut sub1: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + let toml2 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" }} @@ -244,10 +313,10 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { [node.network.webserver] port = {ws_port2} "# - ); - let config2 = make_config!(toml2); + ); + let config2 = make_config!(toml2); - let homestar_proc2 = Command::new(BIN.as_os_str()) + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -260,35 +329,78 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Kill node two after seven seconds. - let _ = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Collect logs for eight seconds then kill node one. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(8))); - - // Retrieve logs. - let stdout = retrieve_output(dead_proc1); - - // Check that node two disconnected from node one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", ED25519MULTIHASH4], - ); - - // Check that node two was removed from the Kademlia table - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", ED25519MULTIHASH4], - ); - - assert!(two_disconnected_from_one); - assert!(two_removed_from_dht_table); + let proc_guard2 = ChildGuard::new(homestar_proc2); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for mDNS discovered message and conenection established messages + let mut discovered_mdns = false; + let mut connection_established = false; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discovered_mdns"].is_object() { + discovered_mdns = true; + } else if json["connection_established"].is_object() { + connection_established = true; + } + } else { + panic!( + r#"Expected notifications from node one did not arrive in time: +- mDNS discovered: {} +- Connection established: {} +"#, + discovered_mdns, connection_established + ); + } + + if connection_established && discovered_mdns { + break; + } + } + + // Kill node two + let _ = kill_homestar(proc_guard2.take(), None); + + // Poll for connection closed message + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["connection_closed"].is_object() { + break; + } + } else { + panic!("Node two did not disconnect from node one in time"); + } + } + + // Collect logs for eight seconds then kill node one. + let dead_proc1 = kill_homestar(proc_guard1.take(), None); + + // Retrieve logs. + let stdout = retrieve_output(dead_proc1); + + // Check that node two disconnected from node one. + let two_disconnected_from_one = check_for_line_with( + stdout.clone(), + vec!["peer connection closed", ED25519MULTIHASH4], + ); + + // Check that node two was removed from the Kademlia table + let two_removed_from_dht_table = check_for_line_with( + stdout.clone(), + vec!["removed peer from kademlia table", ED25519MULTIHASH4], + ); + + assert!(two_disconnected_from_one); + assert!(two_removed_from_dht_table); + }); Ok(()) } From b6de6f8a583046068de7b9a4fc29743029018367 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 30 Jan 2024 12:04:36 -0800 Subject: [PATCH 22/75] wip: Add rendezvous discovery notifications --- homestar-runtime/src/event_handler/cache.rs | 2 + .../src/event_handler/notification.rs | 18 +- .../src/event_handler/notification/swarm.rs | 456 ++++++++++++++++-- .../src/event_handler/swarm_event.rs | 74 ++- homestar-runtime/src/lib.rs | 1 + homestar-runtime/src/network/webserver/rpc.rs | 18 +- homestar-runtime/tests/network/dht.rs | 2 +- homestar-runtime/tests/network/rendezvous.rs | 180 ++++--- 8 files changed, 641 insertions(+), 110 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index 10e5b89a..10a146d2 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -62,6 +62,7 @@ pub(crate) fn setup_cache( let tx = Arc::clone(&sender); if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { + println!("~~~ Cache expiration {:?} ~~~", cause); if cause != Expired { return; } @@ -76,6 +77,7 @@ pub(crate) fn setup_cache( DispatchEvent::DiscoverPeers => { if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") { + println!("~~~ Sending discover peers cache ~~~"); let _ = tx.send(Event::DiscoverPeers(rendezvous_node.to_owned())); }; } diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 18926fac..04cfcff2 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -101,14 +101,16 @@ pub(crate) fn emit_network_event( ); if let Ok(json) = notification.to_json() { - if let Err(_err) = notifier.notify(Message::new(header, json)) { - // debug!( - // subject = "notification.err", - // category = "notification", - // err=?err, - // "unable to send notification {:?}", - // notification, - // ) + if let Err(err) = notifier.notify(Message::new(header, json)) { + // TODO Check on why this errs + + debug!( + subject = "notification.err", + category = "notification", + err=?err, + "unable to send notification {:?}", + notification, + ) }; } else { debug!( diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 5feae5b7..5158d260 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -5,7 +5,12 @@ use anyhow::anyhow; use chrono::prelude::Utc; use homestar_invocation::ipld::DagJson; -use libipld::{serde::from_ipld, Ipld}; +use itertools::Itertools; +use jsonrpsee::core::StringError; +use libipld::{ + serde::{from_ipld, to_ipld}, + Ipld, +}; use libp2p::{Multiaddr, PeerId}; use schemars::{ gen::SchemaGenerator, @@ -131,6 +136,19 @@ pub enum NetworkNotification { /// mDNS discovered notification. #[schemars(rename = "discovered_mdns")] DiscoveredMdns(DiscoveredMdns), + /// Rendezvous client discovered notification. + #[schemars(rename = "discovered_rendezvous")] + DiscoveredRendezvous(DiscoveredRendezvous), + /// Rendezvous client discovered notification. + #[schemars(rename = "registered_rendezvous")] + RegisteredRendezvous(RegisteredRendezvous), + /// Rendezvous discover served notification. + #[schemars(rename = "discover_served_rendezvous")] + DiscoverServedRendezvous(DiscoverServedRendezvous), + // peer_discovered_rendezvous + /// Rendezvous peer registered notification. + #[schemars(rename = "peer_registered_rendezvous")] + PeerRegisteredRendezvous(PeerRegisteredRendezvous), } impl DagJson for NetworkNotification {} @@ -148,6 +166,20 @@ impl From for Ipld { NetworkNotification::DiscoveredMdns(n) => { Ipld::Map(BTreeMap::from([("discovered_mdns".into(), n.into())])) } + NetworkNotification::DiscoveredRendezvous(n) => { + Ipld::Map(BTreeMap::from([("discovered_rendezvous".into(), n.into())])) + } + NetworkNotification::RegisteredRendezvous(n) => { + Ipld::Map(BTreeMap::from([("registered_rendezvous".into(), n.into())])) + } + NetworkNotification::DiscoverServedRendezvous(n) => Ipld::Map(BTreeMap::from([( + "discover_served_rendezvous".into(), + n.into(), + )])), + NetworkNotification::PeerRegisteredRendezvous(n) => Ipld::Map(BTreeMap::from([( + "peer_registered_rendezvous".into(), + n.into(), + )])), } } } @@ -169,6 +201,18 @@ impl TryFrom for NetworkNotification { "discovered_mdns" => Ok(NetworkNotification::DiscoveredMdns( DiscoveredMdns::try_from(val.to_owned())?, )), + "discovered_rendezvous" => Ok(NetworkNotification::DiscoveredRendezvous( + DiscoveredRendezvous::try_from(val.to_owned())?, + )), + "registered_rendezvous" => Ok(NetworkNotification::RegisteredRendezvous( + RegisteredRendezvous::try_from(val.to_owned())?, + )), + "discover_served_rendezvous" => Ok(NetworkNotification::DiscoverServedRendezvous( + DiscoverServedRendezvous::try_from(val.to_owned())?, + )), + "peer_registered_rendezvous" => Ok(NetworkNotification::PeerRegisteredRendezvous( + PeerRegisteredRendezvous::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -417,6 +461,293 @@ impl JsonSchema for DiscoveredMdns { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "discovered_rendezvous")] +pub struct DiscoveredRendezvous { + timestamp: i64, + server: String, + peers: BTreeMap>, +} + +impl DiscoveredRendezvous { + pub(crate) fn new( + server: PeerId, + peers: BTreeMap>, + ) -> DiscoveredRendezvous { + println!("== Creating discovered rendezvous notification =="); + dbg!(peers.clone()); + dbg!(server); + + DiscoveredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + peers: peers + .iter() + .map(|(peer_id, addresses)| { + ( + peer_id.to_string(), + addresses + .iter() + .map(|address| address.to_string()) + .collect(), + ) + }) + .collect(), + } + } +} + +impl DagJson for DiscoveredRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoveredRendezvous) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, addresses)| { + ( + peer_id, + Ipld::List( + addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ) + }) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("server".into(), notification.server.into()), + ("peers".into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let peers_key: &str = "peers"; + let server_key: &str = "server"; + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(server_key) + .ok_or_else(|| anyhow!("missing {server_key}"))? + .to_owned(), + )?; + + // dbg!(map.get(peers_key)); + + let peers = from_ipld::>>( + map.get(peers_key) + .ok_or_else(|| anyhow!("missing {peers_key}"))? + .to_owned(), + )?; + + Ok(DiscoveredRendezvous { + timestamp, + server, + peers, + }) + } +} + +#[derive(JsonSchema, Debug, Clone)] +#[schemars(rename = "registered_rendezvous")] +pub struct RegisteredRendezvous { + timestamp: i64, + server: String, +} + +impl RegisteredRendezvous { + pub(crate) fn new(server: PeerId) -> RegisteredRendezvous { + RegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + } + } +} + +impl DagJson for RegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: RegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("server".into(), notification.server.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for RegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let server_key: &str = "server"; + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(server_key) + .ok_or_else(|| anyhow!("missing {server_key}"))? + .to_owned(), + )?; + + Ok(RegisteredRendezvous { timestamp, server }) + } +} + +#[derive(JsonSchema, Debug, Clone)] +#[schemars(rename = "registered_rendezvous")] +pub struct DiscoverServedRendezvous { + timestamp: i64, + enquirer: String, +} + +impl DiscoverServedRendezvous { + pub(crate) fn new(enquirer: PeerId) -> DiscoverServedRendezvous { + DiscoverServedRendezvous { + timestamp: Utc::now().timestamp_millis(), + enquirer: enquirer.to_string(), + } + } +} + +impl DagJson for DiscoverServedRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoverServedRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("enquirer".into(), notification.enquirer.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoverServedRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let enquirer_key: &str = "enquirer"; + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let enquirer = from_ipld( + map.get(enquirer_key) + .ok_or_else(|| anyhow!("missing {enquirer_key}"))? + .to_owned(), + )?; + + Ok(DiscoverServedRendezvous { + timestamp, + enquirer, + }) + } +} + +#[derive(JsonSchema, Debug, Clone)] +#[schemars(rename = "peer_registered_rendezvous")] +pub struct PeerRegisteredRendezvous { + timestamp: i64, + peer_id: String, + addresses: Vec, +} + +impl PeerRegisteredRendezvous { + pub(crate) fn new(peer_id: PeerId, addresses: Vec) -> PeerRegisteredRendezvous { + PeerRegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + addresses: addresses + .iter() + .map(|address| address.to_string()) + .collect(), + } + } +} + +impl DagJson for PeerRegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: PeerRegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("peer_id".into(), notification.peer_id.into()), + ( + "addresses".into(), + Ipld::List( + notification + .addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PeerRegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let peer_key: &str = "peer_id"; + let addresses_key: &str = "addresses"; + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(peer_key) + .ok_or_else(|| anyhow!("missing {peer_key}"))? + .to_owned(), + )?; + + let addresses = from_ipld( + map.get(addresses_key) + .ok_or_else(|| anyhow!("missing {addresses_key}"))? + .to_owned(), + )?; + + Ok(PeerRegisteredRendezvous { + timestamp, + peer_id, + addresses, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -425,18 +756,60 @@ mod test { struct Fixtures { peer_id: PeerId, address: Multiaddr, + addresses: Vec, peers: Vec<(PeerId, Multiaddr)>, + peers_vec_addr: BTreeMap>, + } + + fn generate_fixtures() -> Fixtures { + Fixtures { + peer_id: PeerId::random(), + address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + addresses: vec![ + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ], + peers: vec![ + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + ), + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ), + ], + peers_vec_addr: BTreeMap::from([ + ( + PeerId::random(), + vec![Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap()], + ), + ( + PeerId::random(), + vec![ + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7002").unwrap(), + ], + ), + ]), + } } fn generate_notifications(fixtures: Fixtures) -> Vec<(i64, NetworkNotification)> { let Fixtures { peer_id, address, + addresses, peers, + peers_vec_addr, } = fixtures; let connection_established = ConnectionEstablished::new(peer_id, address.clone()); let connection_closed = ConnectionClosed::new(peer_id, address.clone()); let discovered_mdns = DiscoveredMdns::new(peers); + let discovered_rendezvous = DiscoveredRendezvous::new(peer_id, peers_vec_addr); + let registered_rendezvous = RegisteredRendezvous::new(peer_id); + let discover_served_rendezvous = DiscoverServedRendezvous::new(peer_id); + let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); vec![ ( @@ -451,6 +824,22 @@ mod test { discovered_mdns.timestamp, NetworkNotification::DiscoveredMdns(discovered_mdns.clone()), ), + ( + discovered_rendezvous.timestamp, + NetworkNotification::DiscoveredRendezvous(discovered_rendezvous.clone()), + ), + ( + registered_rendezvous.timestamp, + NetworkNotification::RegisteredRendezvous(registered_rendezvous.clone()), + ), + ( + discover_served_rendezvous.timestamp, + NetworkNotification::DiscoverServedRendezvous(discover_served_rendezvous.clone()), + ), + ( + peer_registered_rendezvous.timestamp, + NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous.clone()), + ), ] } @@ -458,7 +847,9 @@ mod test { let Fixtures { peer_id, address, + addresses, peers, + peers_vec_addr, } = fixtures; match notification { @@ -482,25 +873,45 @@ mod test { ))) } } + NetworkNotification::DiscoveredRendezvous(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.server).unwrap(), peer_id); + + for peer in n.peers { + assert_eq!( + peer.1 + .iter() + .map(|address| Multiaddr::from_str(address).unwrap()) + .collect::>(), + peers_vec_addr[&PeerId::from_str(&peer.0).unwrap()] + ) + } + } + NetworkNotification::RegisteredRendezvous(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.server).unwrap(), peer_id); + } + NetworkNotification::DiscoverServedRendezvous(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.enquirer).unwrap(), peer_id); + } + NetworkNotification::PeerRegisteredRendezvous(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); + assert_eq!( + n.addresses + .iter() + .map(|address| Multiaddr::from_str(address).unwrap()) + .collect::>(), + addresses + ); + } } } #[test] fn notification_bytes_rountrip() { - let fixtures = Fixtures { - peer_id: PeerId::random(), - address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), - peers: vec![ - ( - PeerId::random(), - Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), - ), - ( - PeerId::random(), - Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), - ), - ], - }; + let fixtures = generate_fixtures(); // Generate notifications and convert them to bytes let notifications: Vec<(i64, Vec)> = generate_notifications(fixtures.clone()) @@ -520,20 +931,7 @@ mod test { #[test] fn notification_json_string_rountrip() { - let fixtures = Fixtures { - peer_id: PeerId::random(), - address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), - peers: vec![ - ( - PeerId::random(), - Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), - ), - ( - PeerId::random(), - Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), - ), - ], - }; + let fixtures = generate_fixtures(); // Generate notifications and convert them to JSON strings let notifications: Vec<(i64, String)> = generate_notifications(fixtures.clone()) diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 1b571e67..8525cecc 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -37,11 +37,11 @@ use libp2p::{ rendezvous::{self, Namespace, Registration}, request_response, swarm::{dial_opts::DialOpts, SwarmEvent}, - PeerId, StreamProtocol, + Multiaddr, PeerId, StreamProtocol, }; #[cfg(feature = "websocket-notify")] use maplit::btreemap; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use tracing::{debug, error, info, warn}; pub(crate) mod record; @@ -306,6 +306,31 @@ async fn handle_swarm_event( } } + #[cfg(feature = "websocket-notify")] + { + println!("== Sending notification =="); + + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoveredRendezvous( + notification::DiscoveredRendezvous::new( + rendezvous_node, + BTreeMap::from( + registrations + .iter() + .map(|registration| { + ( + registration.record.peer_id(), + registration.record.addresses().to_owned(), + ) + }) + .collect::>>(), + ), + ), + ), + ); + } + // Discover peers again at discovery interval event_handler .cache @@ -360,6 +385,14 @@ async fn handle_swarm_event( "registered self with rendezvous node" ); + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::RegisteredRendezvous( + notification::RegisteredRendezvous::new(rendezvous_node), + ), + ); + event_handler .cache .insert( @@ -417,12 +450,22 @@ async fn handle_swarm_event( } SwarmEvent::Behaviour(ComposedEvent::RendezvousServer(rendezvous_server_event)) => { match rendezvous_server_event { - rendezvous::server::Event::DiscoverServed { enquirer, .. } => debug!( - subject = "libp2p.rendezvous.server.discover", - category = "handle_swarm_event", - peer_id = enquirer.to_string(), - "served rendezvous discover request to peer" - ), + rendezvous::server::Event::DiscoverServed { enquirer, .. } => { + debug!( + subject = "libp2p.rendezvous.server.discover", + category = "handle_swarm_event", + peer_id = enquirer.to_string(), + "served rendezvous discover request to peer" + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoverServedRendezvous( + notification::DiscoverServedRendezvous::new(enquirer), + ), + ); + } rendezvous::server::Event::DiscoverNotServed { enquirer, error } => { warn!(subject = "libp2p.rendezvous.server.discover.err", category = "handle_swarm_event", @@ -430,13 +473,24 @@ async fn handle_swarm_event( err=?error, "did not serve rendezvous discover request") } - rendezvous::server::Event::PeerRegistered { peer, .. } => { + rendezvous::server::Event::PeerRegistered { peer, registration } => { debug!( subject = "libp2p.rendezvous.server.peer_registered", category = "handle_swarm_event", peer_id = peer.to_string(), "registered peer through rendezvous" - ) + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::PeerRegisteredRendezvous( + notification::PeerRegisteredRendezvous::new( + peer, + registration.record.addresses().to_owned(), + ), + ), + ); } rendezvous::server::Event::PeerNotRegistered { peer, diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 49158382..e6656991 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -72,6 +72,7 @@ pub use db::{utils::Health, Db}; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; +#[cfg(feature = "websocket-notify")] pub use event_handler::notification::{receipt::ReceiptNotification, swarm::NetworkNotification}; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index 200242af..c3095898 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -217,6 +217,7 @@ where let sink = pending.accept().await?; let rx = ctx.evt_notifier.inner().subscribe(); let stream = BroadcastStream::new(rx); + println!("+++ About to handle event subscription"); Self::handle_event_subscription( sink, stream, @@ -292,11 +293,14 @@ where let rt_hdl = Handle::current(); rt_hdl.spawn(async move { loop { + println!("+*+_*+*+*+*+*+*+*+*+*+*++"); select! { _ = sink.closed() => { + println!("+++ SINK CLOSED +++"); break Ok(()); } next_msg = stream.next() => { + println!("+++ STREAM NEXT +++"); let msg = match next_msg { Some(Ok(notifier::Message { header: Header { @@ -304,16 +308,24 @@ where .. }, payload, - })) if evt == subscription_type => payload, - Some(Ok(_)) => continue, + })) if evt == subscription_type => { + println!("+++ EVENT WITH PAYLOAD +++"); + payload + }, + Some(Ok(_)) => { + println!("+++ EVENT WITH SOME OTHER OK +++"); + continue }, Some(Err(err)) => { + println!("+++ EVENT WITH ERROR +++"); error!(subject = "subscription.event.err", category = "jsonrpc.subscription", err=?err, "subscription stream error"); break Err(err.into()); } - None => break Ok(()), + None => { + println!("+++ EVENT WITH NONE +++"); + break Ok(()) }, }; let sub_msg = SubscriptionMessage::from_json(&msg)?; match sink.try_send(sub_msg) { diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 4d6bf33c..6783955f 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -87,7 +87,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { .arg(config1.filename()) .arg("--db") .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) + // .stdout(Stdio::piped()) .spawn() .unwrap(); let proc_guard1 = ChildGuard::new(homestar_proc1); diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index a126b36f..0801a40e 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -3,13 +3,19 @@ use crate::{ utils::{ check_for_line_with, count_lines_where, kill_homestar, listen_addr, multiaddr, retrieve_output, wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, - ProcInfo, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, - ED25519MULTIHASH4, ED25519MULTIHASH5, SECP256K1MULTIHASH, + ProcInfo, TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, + ED25519MULTIHASH3, ED25519MULTIHASH4, ED25519MULTIHASH5, SECP256K1MULTIHASH, }, }; use anyhow::Result; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; use once_cell::sync::Lazy; use std::{ + net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, thread, @@ -17,6 +23,8 @@ use std::{ }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); +const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; +const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -126,9 +134,14 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep thread::sleep(Duration::from_secs(2)); + // TODO Add notification listener to check for when client 1 registers with server + // and server acknowledges registration + let toml3 = format!( r#" [node] + [node.network] + poll_cache_interval = 1000 [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} [node.network.libp2p] @@ -157,7 +170,7 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { .arg(config3.filename()) .arg("--db") .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) + // .stdout(Stdio::piped()) .spawn() .unwrap(); let proc_guard_client2 = ChildGuard::new(rendezvous_client2); @@ -166,60 +179,97 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client2 = retrieve_output(dead_client2); - - // Check rendezvous server registered the client one - let registered_client_one = check_for_line_with( - stdout_server.clone(), - vec!["registered peer through rendezvous", SECP256K1MULTIHASH], - ); - - // Check rendezvous served a discover request to client two - let served_discovery_to_client_two = check_for_line_with( - stdout_server.clone(), - vec![ - "served rendezvous discover request to peer", - ED25519MULTIHASH2, - ], - ); - - assert!(registered_client_one); - assert!(served_discovery_to_client_two); - - // Check that client two connected to client one. - let two_connected_to_one = check_for_line_with( - stdout_client2.clone(), - vec!["peer connection established", SECP256K1MULTIHASH], - ); - - // Check client one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout_client2.clone(), - vec![ - "added identified node to kademlia routing table", - SECP256K1MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with client one - let one_in_dht_routing_table = check_for_line_with( - stdout_client2.clone(), - vec![ - "kademlia routing table updated with peer", - SECP256K1MULTIHASH, - ], - ); - - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); - assert!(two_connected_to_one); + tokio_test::block_on(async { + let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); + let client3 = WsClientBuilder::default() + .build(ws_url3.clone()) + .await + .unwrap(); + + let mut sub3: Subscription> = client3 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + println!("--- Created sub3 ---"); + + // TODO Listen for client 2 discovered, server discover served, and client 1 connected to client 2 + + // Poll for discovered rendezvous message + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(60)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + println!("{json}"); + + if json["discovered_rendezvous"].is_object() { + break; + } + } else { + panic!("Node two did not receive rendezvous discovery from server in time"); + } + } + + // Collect logs for five seconds then kill proceses. + let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); + let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(15))); + let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(15))); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client2 = retrieve_output(dead_client2); + + // Check rendezvous server registered the client one + let registered_client_one = check_for_line_with( + stdout_server.clone(), + vec!["registered peer through rendezvous", SECP256K1MULTIHASH], + ); + + // Check rendezvous served a discover request to client two + let served_discovery_to_client_two = check_for_line_with( + stdout_server.clone(), + vec![ + "served rendezvous discover request to peer", + ED25519MULTIHASH2, + ], + ); + + assert!(registered_client_one); + assert!(served_discovery_to_client_two); + + // Check that client two connected to client one. + let two_connected_to_one = check_for_line_with( + stdout_client2.clone(), + vec!["peer connection established", SECP256K1MULTIHASH], + ); + + // Check client one was added to the Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout_client2.clone(), + vec![ + "added identified node to kademlia routing table", + SECP256K1MULTIHASH, + ], + ); + + // Check that DHT routing table was updated with client one + let one_in_dht_routing_table = check_for_line_with( + stdout_client2.clone(), + vec![ + "kademlia routing table updated with peer", + SECP256K1MULTIHASH, + ], + ); + + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + assert!(two_connected_to_one); + }); Ok(()) } @@ -332,6 +382,8 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. thread::sleep(Duration::from_secs(2)); + // TODO Wait for clint 1 to register with server, server confirm registration + let toml3 = format!( r#" [node] @@ -372,6 +424,8 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } + // TODO Listen for client 2 connection closed with client 1 (on client 2) + // Kill server and client one after five seconds let _ = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); @@ -499,6 +553,9 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } + // TODO Listen for client registered and server registered peer messages + // with renewal should be more than one. + // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); let dead_client = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); @@ -591,6 +648,8 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { let toml2 = format!( r#" [node] + [node.network] + poll_cache_interval = 100 [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" }} [node.network.libp2p] @@ -630,9 +689,12 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } + // TODO Listen for client discover and server discover served messages + // should be more than one for both (or move on at two) + // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let dead_client = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); + let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); + let dead_client = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(15))); // Retrieve logs. let stdout_server = retrieve_output(dead_server); From cebd548ab23f061f476d02d1dd113eeeb7c9ba04 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Tue, 30 Jan 2024 16:37:37 -0500 Subject: [PATCH 23/75] spawn + cache --- homestar-runtime/src/event_handler/cache.rs | 59 +++++++++++--------- homestar-runtime/tests/network/rendezvous.rs | 2 +- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index 10a146d2..17cfbd36 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -3,8 +3,11 @@ use crate::{channel, event_handler::Event}; use libp2p::PeerId; use moka::{ - future::Cache, - notification::RemovalCause::{self, Expired}, + future::{Cache, FutureExt}, + notification::{ + ListenerFuture, + RemovalCause::{self, Expired}, + }, Expiry as ExpiryBase, }; use std::{ @@ -58,28 +61,34 @@ pub(crate) enum DispatchEvent { pub(crate) fn setup_cache( sender: Arc>, ) -> Cache { - let eviction_listener = move |_key: Arc, val: CacheValue, cause: RemovalCause| { - let tx = Arc::clone(&sender); + let eviction_listener = + move |_key: Arc, val: CacheValue, cause: RemovalCause| -> ListenerFuture { + let tx = Arc::clone(&sender); - if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { - println!("~~~ Cache expiration {:?} ~~~", cause); - if cause != Expired { - return; - } + async move { + if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { + println!("~~~ Cache expiration {:?} ~~~", cause); + if cause != Expired { + return; + } - match event { - DispatchEvent::RegisterPeer => { - if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") - { - let _ = tx.send(Event::RegisterPeer(rendezvous_node.to_owned())); - }; - } - DispatchEvent::DiscoverPeers => { - if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") - { - println!("~~~ Sending discover peers cache ~~~"); - let _ = tx.send(Event::DiscoverPeers(rendezvous_node.to_owned())); - }; + match event { + DispatchEvent::RegisterPeer => { + if let Some(CacheData::Peer(rendezvous_node)) = + val.data.get("rendezvous_node") + { + let _ = tx.send(Event::RegisterPeer(rendezvous_node.to_owned())); + }; + } + DispatchEvent::DiscoverPeers => { + if let Some(CacheData::Peer(rendezvous_node)) = + val.data.get("rendezvous_node") + { + println!("~~~ Sending discover peers cache ~~~"); + let _ = tx.send(Event::DiscoverPeers(rendezvous_node.to_owned())); + }; + } + } } DispatchEvent::DialPeer => { if let Some(CacheData::Peer(node)) = val.data.get("node") { @@ -87,11 +96,11 @@ pub(crate) fn setup_cache( }; } } - } - }; + .boxed() + }; Cache::builder() .expire_after(Expiry) - .eviction_listener(eviction_listener) + .async_eviction_listener(eviction_listener) .build() } diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index 0801a40e..3186bc86 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -179,7 +179,7 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - tokio_test::block_on(async { + tokio_test::task::spawn(async { let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); let client3 = WsClientBuilder::default() .build(ws_url3.clone()) From 9001e97208934873aa9844af38cae63dbb98149e Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 31 Jan 2024 09:07:48 -0800 Subject: [PATCH 24/75] wip: More work on discovery notifications --- homestar-runtime/src/event_handler/cache.rs | 2 - .../src/event_handler/notification.rs | 3 +- .../src/event_handler/notification/swarm.rs | 8 +- .../src/event_handler/swarm_event.rs | 38 +-- homestar-runtime/src/network/webserver/rpc.rs | 18 +- homestar-runtime/tests/network/dht.rs | 2 +- homestar-runtime/tests/network/rendezvous.rs | 312 ++++++++++++++---- 7 files changed, 269 insertions(+), 114 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index 17cfbd36..b1b4e81a 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -67,7 +67,6 @@ pub(crate) fn setup_cache( async move { if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { - println!("~~~ Cache expiration {:?} ~~~", cause); if cause != Expired { return; } @@ -84,7 +83,6 @@ pub(crate) fn setup_cache( if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") { - println!("~~~ Sending discover peers cache ~~~"); let _ = tx.send(Event::DiscoverPeers(rendezvous_node.to_owned())); }; } diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 04cfcff2..5fa66381 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -102,8 +102,7 @@ pub(crate) fn emit_network_event( if let Ok(json) = notification.to_json() { if let Err(err) = notifier.notify(Message::new(header, json)) { - // TODO Check on why this errs - + // TODO Check on why this causes connection closed log errors debug!( subject = "notification.err", category = "notification", diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 5158d260..f4ca078e 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -232,8 +232,8 @@ pub struct ConnectionEstablished { impl ConnectionEstablished { pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionEstablished { ConnectionEstablished { - peer_id: peer_id.to_string(), timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), address: address.to_string(), } } @@ -474,10 +474,6 @@ impl DiscoveredRendezvous { server: PeerId, peers: BTreeMap>, ) -> DiscoveredRendezvous { - println!("== Creating discovered rendezvous notification =="); - dbg!(peers.clone()); - dbg!(server); - DiscoveredRendezvous { timestamp: Utc::now().timestamp_millis(), server: server.to_string(), @@ -547,8 +543,6 @@ impl TryFrom for DiscoveredRendezvous { .to_owned(), )?; - // dbg!(map.get(peers_key)); - let peers = from_ipld::>>( map.get(peers_key) .ok_or_else(|| anyhow!("missing {peers_key}"))? diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 8525cecc..e8ae5303 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -307,29 +307,25 @@ async fn handle_swarm_event( } #[cfg(feature = "websocket-notify")] - { - println!("== Sending notification =="); - - notification::emit_network_event( - event_handler.ws_evt_sender(), - NetworkNotification::DiscoveredRendezvous( - notification::DiscoveredRendezvous::new( - rendezvous_node, - BTreeMap::from( - registrations - .iter() - .map(|registration| { - ( - registration.record.peer_id(), - registration.record.addresses().to_owned(), - ) - }) - .collect::>>(), - ), + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoveredRendezvous( + notification::DiscoveredRendezvous::new( + rendezvous_node, + BTreeMap::from( + registrations + .iter() + .map(|registration| { + ( + registration.record.peer_id(), + registration.record.addresses().to_owned(), + ) + }) + .collect::>>(), ), ), - ); - } + ), + ); // Discover peers again at discovery interval event_handler diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index c3095898..200242af 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -217,7 +217,6 @@ where let sink = pending.accept().await?; let rx = ctx.evt_notifier.inner().subscribe(); let stream = BroadcastStream::new(rx); - println!("+++ About to handle event subscription"); Self::handle_event_subscription( sink, stream, @@ -293,14 +292,11 @@ where let rt_hdl = Handle::current(); rt_hdl.spawn(async move { loop { - println!("+*+_*+*+*+*+*+*+*+*+*+*++"); select! { _ = sink.closed() => { - println!("+++ SINK CLOSED +++"); break Ok(()); } next_msg = stream.next() => { - println!("+++ STREAM NEXT +++"); let msg = match next_msg { Some(Ok(notifier::Message { header: Header { @@ -308,24 +304,16 @@ where .. }, payload, - })) if evt == subscription_type => { - println!("+++ EVENT WITH PAYLOAD +++"); - payload - }, - Some(Ok(_)) => { - println!("+++ EVENT WITH SOME OTHER OK +++"); - continue }, + })) if evt == subscription_type => payload, + Some(Ok(_)) => continue, Some(Err(err)) => { - println!("+++ EVENT WITH ERROR +++"); error!(subject = "subscription.event.err", category = "jsonrpc.subscription", err=?err, "subscription stream error"); break Err(err.into()); } - None => { - println!("+++ EVENT WITH NONE +++"); - break Ok(()) }, + None => break Ok(()), }; let sub_msg = SubscriptionMessage::from_json(&msg)?; match sink.try_send(sub_msg) { diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 6783955f..4d6bf33c 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -87,7 +87,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { .arg(config1.filename()) .arg("--db") .arg(&proc_info1.db_path) - // .stdout(Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap(); let proc_guard1 = ChildGuard::new(homestar_proc1); diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index 3186bc86..f084dd1c 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -130,18 +130,63 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // Wait for registration to complete - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep - thread::sleep(Duration::from_secs(2)); + tokio_test::task::spawn(async { + // Subscribe to rendezvous server + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); + let mut sub1: Subscription> = client1 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + // Subscribe to rendezvous client one + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // TODO Add notification listener to check for when client 1 registers with server - // and server acknowledges registration + // Poll for client one registered with server + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - let toml3 = format!( - r#" + if json["registered_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous client one did not register with server in time"); + } + } + + // Poll for server registered client one + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["peer_registered_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous server did not confirm client one registration in time"); + } + } + + // Start a peer that will discover the registrant through the rendezvous server + let toml3 = format!( + r#" [node] - [node.network] - poll_cache_interval = 1000 [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} [node.network.libp2p] @@ -156,11 +201,10 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { [node.network.webserver] port = {ws_port3} "# - ); - let config3 = make_config!(toml3); + ); + let config3 = make_config!(toml3); - // Start a peer that will discover the registrant through the rendezvous server - let rendezvous_client2 = Command::new(BIN.as_os_str()) + let rendezvous_client2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -170,22 +214,20 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { .arg(config3.filename()) .arg("--db") .arg(&proc_info3.db_path) - // .stdout(Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); + let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } + if wait_for_socket_connection(ws_port3, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } - tokio_test::task::spawn(async { let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); let client3 = WsClientBuilder::default() .build(ws_url3.clone()) .await .unwrap(); - let mut sub3: Subscription> = client3 .subscribe( SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, @@ -195,30 +237,48 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { .await .unwrap(); - println!("--- Created sub3 ---"); - - // TODO Listen for client 2 discovered, server discover served, and client 1 connected to client 2 - // Poll for discovered rendezvous message + let mut discovered_rendezvous = false; + let mut connection_established = false; loop { - if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(60)).await { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - println!("{json}"); - if json["discovered_rendezvous"].is_object() { + discovered_rendezvous = true + } else if json["connection_established"].is_object() + && json["connection_established"]["peer_id"] == SECP256K1MULTIHASH + { + connection_established = true + } + + if discovered_rendezvous && connection_established { break; } } else { - panic!("Node two did not receive rendezvous discovery from server in time"); + panic!("Client two did not receive rendezvous discovery from server in time"); } } - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(15))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(15))); + // Poll for discovery served by rendezvous server + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discover_served_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous server did not serve discovery in time"); + } + } + + // Kill processes. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); + let dead_client2 = kill_homestar(proc_guard_client2.take(), None); // Retrieve logs. let stdout_server = retrieve_output(dead_server); @@ -378,14 +438,67 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // Wait for registration to complete. - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. - thread::sleep(Duration::from_secs(2)); + tokio_test::task::spawn(async { + // Subscribe to rendezvous server + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); + let mut sub1: Subscription> = client1 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // TODO Wait for clint 1 to register with server, server confirm registration + // Subscribe to rendezvous client one + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - let toml3 = format!( - r#" + // Wait for registration to complete. + // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. + // thread::sleep(Duration::from_secs(2)); + + // TODO Wait for clint 1 to register with server, server confirm registration + + // Poll for client one registered with server + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["registered_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous client one did not register with server in time"); + } + } + + // Poll for server registered client one + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["peer_registered_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous server did not confirm client one registration in time"); + } + } + + let toml3 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} @@ -401,11 +514,11 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { [node.network.webserver] port = {ws_port3} "# - ); - let config3 = make_config!(toml3); + ); + let config3 = make_config!(toml3); - // Start a peer that will discover the registrant through the rendezvous server - let rendezvous_client2 = Command::new(BIN.as_os_str()) + // Start a peer that will discover the registrant through the rendezvous server + let rendezvous_client2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -418,38 +531,105 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); + let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } + if wait_for_socket_connection(ws_port3, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); + let client3 = WsClientBuilder::default() + .build(ws_url3.clone()) + .await + .unwrap(); + let mut sub3: Subscription> = client3 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // TODO Listen for client 2 connection closed with client 1 (on client 2) + // Poll for discovered rendezvous message + let mut discovered_rendezvous = false; + let mut connection_established = false; + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // Kill server and client one after five seconds - let _ = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); + if json["discovered_rendezvous"].is_object() { + discovered_rendezvous = true + } else if json["connection_established"].is_object() + && json["connection_established"]["peer_id"] == SECP256K1MULTIHASH + { + connection_established = true + } - // Collect logs for seven seconds then kill process. - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(7))); + if discovered_rendezvous && connection_established { + break; + } + } else { + panic!("Client two did not receive rendezvous discovery from server in time"); + } + } - // Retrieve logs. - let stdout = retrieve_output(dead_client2); + // Poll for discovery served by rendezvous server + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // Check that client two disconnected from client one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", SECP256K1MULTIHASH], - ); + if json["discover_served_rendezvous"].is_object() { + break; + } + } else { + panic!("Rendezvous server did not serve discovery in time"); + } + } - // Check that client two was removed from the Kademlia table - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", SECP256K1MULTIHASH], - ); + // Kill server and client one. + let _ = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); - assert!(two_disconnected_from_one); - assert!(two_removed_from_dht_table); + // Poll for client two disconnected from client one. + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["connection_closed"].is_object() + && json["connection_closed"]["peer_id"] == SECP256K1MULTIHASH + { + break; + } + } else { + panic!("Client two did not receive rendezvous discovery from server in time"); + } + } + + // Kill client two. + let dead_client2 = kill_homestar(proc_guard_client2.take(), None); + + // Retrieve logs. + let stdout = retrieve_output(dead_client2); + + // Check that client two disconnected from client one. + let two_disconnected_from_one = check_for_line_with( + stdout.clone(), + vec!["peer connection closed", SECP256K1MULTIHASH], + ); + + // Check that client two was removed from the Kademlia table + let two_removed_from_dht_table = check_for_line_with( + stdout.clone(), + vec!["removed peer from kademlia table", SECP256K1MULTIHASH], + ); + + assert!(two_disconnected_from_one); + assert!(two_removed_from_dht_table); + }); Ok(()) } From 448f379b764bff38229254b36f85d81c81bf2ae6 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Wed, 31 Jan 2024 15:18:45 -0500 Subject: [PATCH 25/75] async cache --- homestar-runtime/src/event_handler/cache.rs | 9 +++++++-- homestar-runtime/tests/network/rendezvous.rs | 7 +++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index b1b4e81a..8100a9b5 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -67,6 +67,7 @@ pub(crate) fn setup_cache( async move { if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { + println!("event: {:?}", event); if cause != Expired { return; } @@ -76,14 +77,18 @@ pub(crate) fn setup_cache( if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") { - let _ = tx.send(Event::RegisterPeer(rendezvous_node.to_owned())); + let _ = tx + .send_async(Event::RegisterPeer(rendezvous_node.to_owned())) + .await; }; } DispatchEvent::DiscoverPeers => { if let Some(CacheData::Peer(rendezvous_node)) = val.data.get("rendezvous_node") { - let _ = tx.send(Event::DiscoverPeers(rendezvous_node.to_owned())); + let _ = tx + .send_async(Event::DiscoverPeers(rendezvous_node.to_owned())) + .await; }; } } diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index f084dd1c..37551660 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -992,6 +992,7 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { // Start a peer that will renew registrations with the rendezvous server every five seconds let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -1058,9 +1059,9 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { } // Collect logs for seven seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(7))); + let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(7))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(7))); + let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(15))); // Retrieve logs. let stdout_server = retrieve_output(dead_server); @@ -1075,6 +1076,8 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { ], ); + println!("server_discovery_count: {}", server_discovery_count); + // Count discovery responses the client let client_discovery_count = count_lines_where( stdout_client2, From 86de5ed619bceb6478b553202ec7937e11c7e3ea Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 31 Jan 2024 16:21:27 -0800 Subject: [PATCH 26/75] feat: Finish discovery notifications, schemas, and test upgrades --- homestar-runtime/src/event_handler/cache.rs | 23 +- .../src/event_handler/notification/swarm.rs | 96 +---- .../src/event_handler/swarm_event.rs | 26 +- homestar-runtime/tests/network/rendezvous.rs | 388 +++++++++++++----- 4 files changed, 332 insertions(+), 201 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index 8100a9b5..1721d511 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -61,17 +61,15 @@ pub(crate) enum DispatchEvent { pub(crate) fn setup_cache( sender: Arc>, ) -> Cache { - let eviction_listener = - move |_key: Arc, val: CacheValue, cause: RemovalCause| -> ListenerFuture { - let tx = Arc::clone(&sender); - - async move { - if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { - println!("event: {:?}", event); - if cause != Expired { - return; - } + let eviction_listener = move |_key: Arc, + val: CacheValue, + cause: RemovalCause| + -> ListenerFuture { + let tx = Arc::clone(&sender); + async move { + if let Some(CacheData::OnExpiration(event)) = val.data.get("on_expiration") { + if cause == Expired { match event { DispatchEvent::RegisterPeer => { if let Some(CacheData::Peer(rendezvous_node)) = @@ -99,8 +97,9 @@ pub(crate) fn setup_cache( }; } } - .boxed() - }; + } + .boxed() + }; Cache::builder() .expire_after(Expiry) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index f4ca078e..587c7120 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -5,20 +5,11 @@ use anyhow::anyhow; use chrono::prelude::Utc; use homestar_invocation::ipld::DagJson; -use itertools::Itertools; -use jsonrpsee::core::StringError; -use libipld::{ - serde::{from_ipld, to_ipld}, - Ipld, -}; +use libipld::{serde::from_ipld, Ipld}; use libp2p::{Multiaddr, PeerId}; -use schemars::{ - gen::SchemaGenerator, - schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, - JsonSchema, -}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, collections::BTreeMap, fmt, str::FromStr}; +use std::{collections::BTreeMap, fmt, str::FromStr}; const TIMESTAMP_KEY: &str = "timestamp"; @@ -145,7 +136,6 @@ pub enum NetworkNotification { /// Rendezvous discover served notification. #[schemars(rename = "discover_served_rendezvous")] DiscoverServedRendezvous(DiscoverServedRendezvous), - // peer_discovered_rendezvous /// Rendezvous peer registered notification. #[schemars(rename = "peer_registered_rendezvous")] PeerRegisteredRendezvous(PeerRegisteredRendezvous), @@ -351,14 +341,16 @@ impl TryFrom for ConnectionClosed { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "discovered_mdns")] pub struct DiscoveredMdns { timestamp: i64, - peers: Vec<(String, String)>, + #[schemars(description = "Peers discovered by peer ID and multiaddress")] + peers: BTreeMap, } impl DiscoveredMdns { - pub(crate) fn new(peers: Vec<(PeerId, Multiaddr)>) -> DiscoveredMdns { + pub(crate) fn new(peers: BTreeMap) -> DiscoveredMdns { DiscoveredMdns { timestamp: Utc::now().timestamp_millis(), peers: peers @@ -401,71 +393,23 @@ impl TryFrom for DiscoveredMdns { .to_owned(), )?; - let peers_map = from_ipld::>( + let peers = from_ipld::>( map.get(peers_key) .ok_or_else(|| anyhow!("missing {peers_key}"))? .to_owned(), )?; - let mut peers: Vec<(String, String)> = vec![]; - for peer in peers_map.iter() { - peers.push((peer.0.to_string(), from_ipld(peer.1.to_owned())?)) - } - Ok(DiscoveredMdns { timestamp, peers }) } } -impl JsonSchema for DiscoveredMdns { - fn schema_name() -> String { - "discovered_mdns".to_owned() - } - - fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-runtime::event_handler::notification::swarm::DiscoveredMdns") - } - - fn json_schema(gen: &mut SchemaGenerator) -> Schema { - let schema = SchemaObject { - instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), - object: Some(Box::new(ObjectValidation { - properties: BTreeMap::from([ - ( - "timestamp".to_string(), - Schema::Object(SchemaObject { - instance_type: Some(SingleOrVec::Single(InstanceType::Number.into())), - ..Default::default() - }), - ), - ( - "peers".to_string(), - Schema::Object(SchemaObject { - instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), - metadata: Some(Box::new(Metadata { - description: Some("Peers and their addresses".to_string()), - ..Default::default() - })), - object: Some(Box::new(ObjectValidation { - additional_properties: Some(Box::new(::json_schema(gen))), - ..Default::default() - })), - ..Default::default() - }), - ), - ]), - ..Default::default() - })), - ..Default::default() - }; - schema.into() - } -} - #[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "discovered_rendezvous")] pub struct DiscoveredRendezvous { timestamp: i64, + #[schemars(description = "Server that fulfilled the discovery request")] server: String, + #[schemars(description = "Peers discovered by peer ID and multiaddresses")] peers: BTreeMap>, } @@ -561,6 +505,7 @@ impl TryFrom for DiscoveredRendezvous { #[schemars(rename = "registered_rendezvous")] pub struct RegisteredRendezvous { timestamp: i64, + #[schemars(description = "Server that accepted registration")] server: String, } @@ -613,6 +558,7 @@ impl TryFrom for RegisteredRendezvous { #[schemars(rename = "registered_rendezvous")] pub struct DiscoverServedRendezvous { timestamp: i64, + #[schemars(description = "Peer that requested discovery")] enquirer: String, } @@ -668,7 +614,9 @@ impl TryFrom for DiscoverServedRendezvous { #[schemars(rename = "peer_registered_rendezvous")] pub struct PeerRegisteredRendezvous { timestamp: i64, + #[schemars(description = "Peer registered")] peer_id: String, + #[schemars(description = "Multiaddresses for peer")] addresses: Vec, } @@ -751,7 +699,7 @@ mod test { peer_id: PeerId, address: Multiaddr, addresses: Vec, - peers: Vec<(PeerId, Multiaddr)>, + peers: BTreeMap, peers_vec_addr: BTreeMap>, } @@ -763,7 +711,7 @@ mod test { Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ], - peers: vec![ + peers: BTreeMap::from([ ( PeerId::random(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), @@ -772,7 +720,7 @@ mod test { PeerId::random(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ), - ], + ]), peers_vec_addr: BTreeMap::from([ ( PeerId::random(), @@ -861,10 +809,10 @@ mod test { assert_eq!(n.timestamp, timestamp); for peer in n.peers { - assert!(peers.contains(&( - PeerId::from_str(&peer.0).unwrap(), - Multiaddr::from_str(&peer.1).unwrap() - ))) + assert_eq!( + Multiaddr::from_str(&peer.1).unwrap(), + peers[&PeerId::from_str(&peer.0).unwrap()] + ) } } NetworkNotification::DiscoveredRendezvous(n) => { diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index e8ae5303..836893de 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -312,17 +312,15 @@ async fn handle_swarm_event( NetworkNotification::DiscoveredRendezvous( notification::DiscoveredRendezvous::new( rendezvous_node, - BTreeMap::from( - registrations - .iter() - .map(|registration| { - ( - registration.record.peer_id(), - registration.record.addresses().to_owned(), - ) - }) - .collect::>>(), - ), + registrations + .iter() + .map(|registration| { + ( + registration.record.peer_id(), + registration.record.addresses().to_owned(), + ) + }) + .collect::>>(), ), ), ); @@ -1101,7 +1099,11 @@ async fn handle_swarm_event( #[cfg(feature = "websocket-notify")] notification::emit_network_event( event_handler.ws_evt_sender(), - NetworkNotification::DiscoveredMdns(notification::DiscoveredMdns::new(list)), + NetworkNotification::DiscoveredMdns(notification::DiscoveredMdns::new( + list.iter() + .map(|peer| (peer.0, peer.1.to_owned())) + .collect::>(), + )), ) } SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Expired(list))) => { diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index 37551660..bfcf9580 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -18,7 +18,6 @@ use std::{ net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, - thread, time::Duration, }; @@ -463,12 +462,6 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { .await .unwrap(); - // Wait for registration to complete. - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. - // thread::sleep(Duration::from_secs(2)); - - // TODO Wait for clint 1 to register with server, server confirm registration - // Poll for client one registered with server loop { if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { @@ -733,37 +726,102 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // TODO Listen for client registered and server registered peer messages - // with renewal should be more than one. + tokio_test::task::spawn(async { + // Subscribe to rendezvous server + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); + let mut sub1: Subscription> = client1 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); - let dead_client = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); + // Subscribe to rendezvous client + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client = retrieve_output(dead_client); + // Poll for server registered client twice. + let mut peer_registered_count = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // Count registrations on the server - let server_registration_count = count_lines_where( - stdout_server, - vec![ - "registered peer through rendezvous", - "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq", - ], - ); + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == ED25519MULTIHASH3 + { + peer_registered_count += 1; + } + } else { + panic!("Server did not register client twice in time"); + } - // Count registrations on the client - let client_registration_count = count_lines_where( - stdout_client, - vec![ - "registered self with rendezvous node", - "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", - ], - ); + if peer_registered_count == 2 { + break; + } + } - assert!(server_registration_count > 1); - assert!(client_registration_count > 1); + // Poll for client registered with server twice. + let mut registered_count = 0; + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["registered_rendezvous"].is_object() + && json["registered_rendezvous"]["server"] == ED25519MULTIHASH + { + registered_count += 1; + } + } else { + panic!("Client did not register with server twice in time"); + } + + if registered_count == 2 { + break; + } + } + + // Collect logs for five seconds then kill proceses. + let dead_server = kill_homestar(rendezvous_server, None); + let dead_client = kill_homestar(rendezvous_client1, None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client = retrieve_output(dead_client); + + // Count registrations on the server + let server_registration_count = count_lines_where( + stdout_server, + vec![ + "registered peer through rendezvous", + "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq", + ], + ); + + // Count registrations on the client + let client_registration_count = count_lines_where( + stdout_client, + vec![ + "registered self with rendezvous node", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + ], + ); + + assert!(server_registration_count > 1); + assert!(client_registration_count > 1); + }); Ok(()) } @@ -869,37 +927,102 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // TODO Listen for client discover and server discover served messages - // should be more than one for both (or move on at two) + tokio_test::task::spawn(async { + // Subscribe to rendezvous server + let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); + let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); + let mut sub1: Subscription> = client1 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + // Subscribe to rendezvous client + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); - let dead_client = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(15))); + // Poll for server provided discovery twice twice + let mut discover_served_count = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client = retrieve_output(dead_client); + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH4 + { + discover_served_count += 1; + } + } else { + panic!("Server did not provide discovery twice in time"); + } - // Count discover requests on the server - let server_discovery_count = count_lines_where( - stdout_server, - vec![ - "served rendezvous discover request to peer", - ED25519MULTIHASH4, - ], - ); + if discover_served_count == 2 { + break; + } + } - // Count discovery responses the client - let client_discovery_count = count_lines_where( - stdout_client, - vec![ - "received discovery from rendezvous server", - ED25519MULTIHASH, - ], - ); + // Poll for client discovered twice + let mut discovered_count = 0; + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - assert!(server_discovery_count > 1); - assert!(client_discovery_count > 1); + if json["discovered_rendezvous"].is_object() + && json["discovered_rendezvous"]["server"] == ED25519MULTIHASH + { + discovered_count += 1; + } + } else { + panic!("Client did not discover twice in time"); + } + + if discovered_count == 2 { + break; + } + } + + // Collect logs for five seconds then kill proceses. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let dead_client = kill_homestar(proc_guard_client1.take(), None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client = retrieve_output(dead_client); + + // Count discover requests on the server + let server_discovery_count = count_lines_where( + stdout_server, + vec![ + "served rendezvous discover request to peer", + ED25519MULTIHASH4, + ], + ); + + // Count discovery responses the client + let client_discovery_count = count_lines_where( + stdout_client, + vec![ + "received discovery from rendezvous server", + ED25519MULTIHASH, + ], + ); + + assert!(server_discovery_count > 1); + assert!(client_discovery_count > 1); + }); Ok(()) } @@ -1011,16 +1134,41 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - // Wait for registration to complete. - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. - thread::sleep(Duration::from_secs(2)); + tokio_test::task::spawn(async { + // Subscribe to rendezvous client one + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - // Start a peer that will discover with the rendezvous server when - // a discovered registration expires. Note that by default discovery only - // occurs every ten minutes, so discovery requests in this test are driven - // by expirations. - let toml3 = format!( - r#" + // Poll for client one registered with server the first time + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["registered_rendezvous"].is_object() + && json["registered_rendezvous"]["server"] == ED25519MULTIHASH + { + break; + } + } else { + panic!("Client did not register with server twice in time"); + } + } + + // Start a peer that will discover with the rendezvous server when + // a discovered registration expires. Note that by default discovery only + // occurs every ten minutes, so discovery requests in this test are driven + // by client one expirations. + let toml3 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} @@ -1036,10 +1184,10 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { [node.network.webserver] port = {ws_port3} "# - ); - let config3 = make_config!(toml3); + ); + let config3 = make_config!(toml3); - let rendezvous_client2 = Command::new(BIN.as_os_str()) + let rendezvous_client2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -1052,43 +1200,77 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); + let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } + if wait_for_socket_connection(ws_port3, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } - // Collect logs for seven seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(15))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(7))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(15))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client2 = retrieve_output(dead_client2); - - // Count discover requests on the server - let server_discovery_count = count_lines_where( - stdout_server, - vec![ - "served rendezvous discover request to peer", - "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", - ], - ); + // Subscribe to rendezvous client two + let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); + let client3 = WsClientBuilder::default().build(ws_url3).await.unwrap(); + let mut sub3: Subscription> = client3 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); - println!("server_discovery_count: {}", server_discovery_count); + // Poll for client two discovered twice + let mut discovered_count = 0; + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // Count discovery responses the client - let client_discovery_count = count_lines_where( - stdout_client2, - vec![ - "received discovery from rendezvous server", - "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", - ], - ); + if json["discovered_rendezvous"].is_object() + && json["discovered_rendezvous"]["server"] == ED25519MULTIHASH + { + discovered_count += 1; + } + } else { + panic!("Client did not discover twice in time"); + } - assert!(server_discovery_count > 1); - assert!(client_discovery_count > 1); + if discovered_count == 2 { + break; + } + } + + // Collect logs for seven seconds then kill proceses. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); + let dead_client2 = kill_homestar(proc_guard_client2.take(), None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client2 = retrieve_output(dead_client2); + + // Count discover requests on the server + let server_discovery_count = count_lines_where( + stdout_server, + vec![ + "served rendezvous discover request to peer", + "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + ], + ); + + println!("server_discovery_count: {}", server_discovery_count); + + // Count discovery responses the client + let client_discovery_count = count_lines_where( + stdout_client2, + vec![ + "received discovery from rendezvous server", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + ], + ); + + assert!(server_discovery_count > 1); + assert!(client_discovery_count > 1); + }); Ok(()) } From 6edfe100d4b4541c8d6848df3c3fe610532df28d Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 09:10:54 -0800 Subject: [PATCH 27/75] chore: Update redial test notifications --- homestar-runtime/src/event_handler/cache.rs | 10 +++++----- homestar-runtime/tests/network/notification.rs | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index 1721d511..79aaa4d7 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -89,13 +89,13 @@ pub(crate) fn setup_cache( .await; }; } + DispatchEvent::DialPeer => { + if let Some(CacheData::Peer(node)) = val.data.get("node") { + let _ = tx.send(Event::DialPeer(node.to_owned())); + }; + } } } - DispatchEvent::DialPeer => { - if let Some(CacheData::Peer(node)) = val.data.get("node") { - let _ = tx.send(Event::DialPeer(node.to_owned())); - }; - } } } .boxed() diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/notification.rs index 7c33a1e6..e5d503a7 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/notification.rs @@ -302,7 +302,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -318,7 +318,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { @@ -347,7 +347,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -477,7 +477,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -493,7 +493,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { @@ -550,7 +550,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { From 0120c5515b9731157c8f1bf128a051fe1b0cc4c8 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 11:10:45 -0800 Subject: [PATCH 28/75] feat: Add listen and connection error notifications --- .../src/event_handler/notification/swarm.rs | 367 ++++++++++++++---- .../src/event_handler/swarm_event.rs | 33 +- .../tests/network/notification.rs | 4 +- 3 files changed, 311 insertions(+), 93 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 587c7120..49384839 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -6,21 +6,26 @@ use anyhow::anyhow; use chrono::prelude::Utc; use homestar_invocation::ipld::DagJson; use libipld::{serde::from_ipld, Ipld}; -use libp2p::{Multiaddr, PeerId}; +use libp2p::{ + swarm::{DialError, ListenError}, + Multiaddr, PeerId, +}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, fmt, str::FromStr}; +const ADDRESS_KEY: &str = "address"; +const ADDRESSES_KEY: &str = "addresses"; +const ENQUIRER_KEY: &str = "enquirer"; +const ERROR_KEY: &str = "error"; +const PEER_KEY: &str = "peer_id"; +const PEERS_KEY: &str = "peers"; +const SERVER_KEY: &str = "server"; const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - ConnnectionEstablished, - ConnnectionClosed, - ListeningOn, - OutgoingConnectionError, - IncomingConnectionError, PublishedReceiptPubsub, ReceivedReceiptPubsub, GotReceiptDht, @@ -35,18 +40,10 @@ pub(crate) enum SwarmNotification { ReceivedWorkflowInfo, } +// TODO Fill these in for NetworkNotification impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::ConnnectionEstablished => write!(f, "connectionEstablished"), - SwarmNotification::ConnnectionClosed => write!(f, "connectionClosed"), - SwarmNotification::ListeningOn => write!(f, "listeningOn"), - SwarmNotification::OutgoingConnectionError => { - write!(f, "outgoingConnectionError") - } - SwarmNotification::IncomingConnectionError => { - write!(f, "incomingConnectionError") - } SwarmNotification::ReceivedReceiptPubsub => { write!(f, "receivedReceiptPubsub") } @@ -92,11 +89,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "connectionEstablished" => Ok(Self::ConnnectionEstablished), - "connectionClosed" => Ok(Self::ConnnectionClosed), - "listeningOn" => Ok(Self::ListeningOn), - "outgoingConnectionError" => Ok(Self::OutgoingConnectionError), - "incomingConnectionError" => Ok(Self::IncomingConnectionError), "receivedReceiptPubsub" => Ok(Self::ReceivedReceiptPubsub), "publishedReceiptPubsub" => Ok(Self::PublishedReceiptPubsub), "putReciptDht" => Ok(Self::PutReceiptDht), @@ -115,15 +107,24 @@ impl FromStr for SwarmNotification { } /// Network notification type. -#[derive(Clone, JsonSchema, Debug)] +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "network")] pub enum NetworkNotification { + /// Listening on new address notification. + #[schemars(rename = "new_listen_addr")] + NewListenAddr(NewListenAddr), /// Connection established notification. #[schemars(rename = "connection_established")] ConnnectionEstablished(ConnectionEstablished), /// Connection closed notification. #[schemars(rename = "connection_closed")] ConnnectionClosed(ConnectionClosed), + /// Outgoing conenction error notification. + #[schemars(rename = "outgoing_connection_error")] + OutgoingConnectionError(OutgoingConnectionError), + /// Incoming conenction error notification. + #[schemars(rename = "incoming_connection_error")] + IncomingConnectionError(IncomingConnectionError), /// mDNS discovered notification. #[schemars(rename = "discovered_mdns")] DiscoveredMdns(DiscoveredMdns), @@ -146,6 +147,9 @@ impl DagJson for NetworkNotification {} impl From for Ipld { fn from(notification: NetworkNotification) -> Self { match notification { + NetworkNotification::NewListenAddr(n) => { + Ipld::Map(BTreeMap::from([("new_listen_addr".into(), n.into())])) + } NetworkNotification::ConnnectionEstablished(n) => Ipld::Map(BTreeMap::from([( "connection_established".into(), n.into(), @@ -153,6 +157,14 @@ impl From for Ipld { NetworkNotification::ConnnectionClosed(n) => { Ipld::Map(BTreeMap::from([("connection_closed".into(), n.into())])) } + NetworkNotification::OutgoingConnectionError(n) => Ipld::Map(BTreeMap::from([( + "outgoing_connection_error".into(), + n.into(), + )])), + NetworkNotification::IncomingConnectionError(n) => Ipld::Map(BTreeMap::from([( + "incoming_connection_error".into(), + n.into(), + )])), NetworkNotification::DiscoveredMdns(n) => { Ipld::Map(BTreeMap::from([("discovered_mdns".into(), n.into())])) } @@ -182,12 +194,21 @@ impl TryFrom for NetworkNotification { if let Some((key, val)) = map.first_key_value() { match key.as_str() { + "new_listen_addr" => Ok(NetworkNotification::NewListenAddr( + NewListenAddr::try_from(val.to_owned())?, + )), "connection_established" => Ok(NetworkNotification::ConnnectionEstablished( ConnectionEstablished::try_from(val.to_owned())?, )), "connection_closed" => Ok(NetworkNotification::ConnnectionClosed( ConnectionClosed::try_from(val.to_owned())?, )), + "outgoing_connection_error" => Ok(NetworkNotification::OutgoingConnectionError( + OutgoingConnectionError::try_from(val.to_owned())?, + )), + "incoming_connection_error" => Ok(NetworkNotification::IncomingConnectionError( + IncomingConnectionError::try_from(val.to_owned())?, + )), "discovered_mdns" => Ok(NetworkNotification::DiscoveredMdns( DiscoveredMdns::try_from(val.to_owned())?, )), @@ -211,7 +232,69 @@ impl TryFrom for NetworkNotification { } } -#[derive(JsonSchema, Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "new_listen_addr")] +pub struct NewListenAddr { + timestamp: i64, + peer_id: String, + address: String, +} + +impl NewListenAddr { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> NewListenAddr { + NewListenAddr { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for NewListenAddr {} + +impl From for Ipld { + fn from(notification: NewListenAddr) -> Self { + Ipld::Map(BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("peer_id".into(), notification.peer_id.into()), + ("address".into(), notification.address.into()), + ])) + } +} + +impl TryFrom for NewListenAddr { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(NewListenAddr { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "connection_established")] pub struct ConnectionEstablished { timestamp: i64, @@ -245,20 +328,17 @@ impl TryFrom for ConnectionEstablished { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let peer_key: &str = "peer_id"; - let address_key: &str = "address"; - let map = from_ipld::>(ipld)?; let peer_id = from_ipld( - map.get(peer_key) - .ok_or_else(|| anyhow!("missing {peer_key}"))? + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? .to_owned(), )?; let address = from_ipld( - map.get(address_key) - .ok_or_else(|| anyhow!("missing {address_key}"))? + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? .to_owned(), )?; @@ -276,7 +356,7 @@ impl TryFrom for ConnectionEstablished { } } -#[derive(JsonSchema, Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "connection_closed")] pub struct ConnectionClosed { timestamp: i64, @@ -310,20 +390,17 @@ impl TryFrom for ConnectionClosed { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let peer_key: &str = "peer_id"; - let address_key: &str = "address"; - let map = from_ipld::>(ipld)?; let peer_id = from_ipld( - map.get(peer_key) - .ok_or_else(|| anyhow!("missing {peer_key}"))? + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? .to_owned(), )?; let address = from_ipld( - map.get(address_key) - .ok_or_else(|| anyhow!("missing {address_key}"))? + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? .to_owned(), )?; @@ -341,6 +418,125 @@ impl TryFrom for ConnectionClosed { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "outgoing_connection_error")] +pub struct OutgoingConnectionError { + timestamp: i64, + peer_id: Option, + error: String, +} + +impl OutgoingConnectionError { + pub(crate) fn new(peer_id: Option, error: DialError) -> OutgoingConnectionError { + OutgoingConnectionError { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.map(|p| p.to_string()), + error: error.to_string(), + } + } +} + +impl DagJson for OutgoingConnectionError {} + +impl From for Ipld { + fn from(notification: OutgoingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ( + "peer_id".into(), + notification + .peer_id + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + ("error".into(), notification.error.into()), + ])) + } +} + +impl TryFrom for OutgoingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let peer_id = map + .get(PEER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(OutgoingConnectionError { + timestamp, + peer_id, + error, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "incoming_connection_error")] +pub struct IncomingConnectionError { + timestamp: i64, + error: String, +} + +impl IncomingConnectionError { + pub(crate) fn new(error: ListenError) -> IncomingConnectionError { + IncomingConnectionError { + timestamp: Utc::now().timestamp_millis(), + error: error.to_string(), + } + } +} + +impl DagJson for IncomingConnectionError {} + +impl From for Ipld { + fn from(notification: IncomingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + ("timestamp".into(), notification.timestamp.into()), + ("error".into(), notification.error.into()), + ])) + } +} + +impl TryFrom for IncomingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(IncomingConnectionError { timestamp, error }) + } +} + #[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "discovered_mdns")] pub struct DiscoveredMdns { @@ -384,7 +580,6 @@ impl TryFrom for DiscoveredMdns { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let peers_key: &str = "peers"; let map = from_ipld::>(ipld)?; let timestamp = from_ipld( @@ -394,8 +589,8 @@ impl TryFrom for DiscoveredMdns { )?; let peers = from_ipld::>( - map.get(peers_key) - .ok_or_else(|| anyhow!("missing {peers_key}"))? + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? .to_owned(), )?; @@ -471,8 +666,6 @@ impl TryFrom for DiscoveredRendezvous { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let peers_key: &str = "peers"; - let server_key: &str = "server"; let map = from_ipld::>(ipld)?; let timestamp = from_ipld( @@ -482,14 +675,14 @@ impl TryFrom for DiscoveredRendezvous { )?; let server = from_ipld( - map.get(server_key) - .ok_or_else(|| anyhow!("missing {server_key}"))? + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? .to_owned(), )?; let peers = from_ipld::>>( - map.get(peers_key) - .ok_or_else(|| anyhow!("missing {peers_key}"))? + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? .to_owned(), )?; @@ -501,7 +694,7 @@ impl TryFrom for DiscoveredRendezvous { } } -#[derive(JsonSchema, Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "registered_rendezvous")] pub struct RegisteredRendezvous { timestamp: i64, @@ -535,7 +728,6 @@ impl TryFrom for RegisteredRendezvous { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let server_key: &str = "server"; let map = from_ipld::>(ipld)?; let timestamp = from_ipld( @@ -545,8 +737,8 @@ impl TryFrom for RegisteredRendezvous { )?; let server = from_ipld( - map.get(server_key) - .ok_or_else(|| anyhow!("missing {server_key}"))? + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? .to_owned(), )?; @@ -554,7 +746,7 @@ impl TryFrom for RegisteredRendezvous { } } -#[derive(JsonSchema, Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "registered_rendezvous")] pub struct DiscoverServedRendezvous { timestamp: i64, @@ -588,7 +780,6 @@ impl TryFrom for DiscoverServedRendezvous { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let enquirer_key: &str = "enquirer"; let map = from_ipld::>(ipld)?; let timestamp = from_ipld( @@ -598,8 +789,8 @@ impl TryFrom for DiscoverServedRendezvous { )?; let enquirer = from_ipld( - map.get(enquirer_key) - .ok_or_else(|| anyhow!("missing {enquirer_key}"))? + map.get(ENQUIRER_KEY) + .ok_or_else(|| anyhow!("missing {ENQUIRER_KEY}"))? .to_owned(), )?; @@ -610,7 +801,7 @@ impl TryFrom for DiscoverServedRendezvous { } } -#[derive(JsonSchema, Debug, Clone)] +#[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "peer_registered_rendezvous")] pub struct PeerRegisteredRendezvous { timestamp: i64, @@ -660,8 +851,6 @@ impl TryFrom for PeerRegisteredRendezvous { type Error = anyhow::Error; fn try_from(ipld: Ipld) -> Result { - let peer_key: &str = "peer_id"; - let addresses_key: &str = "addresses"; let map = from_ipld::>(ipld)?; let timestamp = from_ipld( @@ -671,14 +860,14 @@ impl TryFrom for PeerRegisteredRendezvous { )?; let peer_id = from_ipld( - map.get(peer_key) - .ok_or_else(|| anyhow!("missing {peer_key}"))? + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? .to_owned(), )?; let addresses = from_ipld( - map.get(addresses_key) - .ok_or_else(|| anyhow!("missing {addresses_key}"))? + map.get(ADDRESSES_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESSES_KEY}"))? .to_owned(), )?; @@ -696,21 +885,21 @@ mod test { #[derive(Clone, Debug)] struct Fixtures { - peer_id: PeerId, address: Multiaddr, addresses: Vec, + peer_id: PeerId, peers: BTreeMap, peers_vec_addr: BTreeMap>, } fn generate_fixtures() -> Fixtures { Fixtures { - peer_id: PeerId::random(), address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), addresses: vec![ Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ], + peer_id: PeerId::random(), peers: BTreeMap::from([ ( PeerId::random(), @@ -739,14 +928,18 @@ mod test { fn generate_notifications(fixtures: Fixtures) -> Vec<(i64, NetworkNotification)> { let Fixtures { - peer_id, address, addresses, + peer_id, peers, peers_vec_addr, } = fixtures; + let new_listen_addr = NewListenAddr::new(peer_id, address.clone()); let connection_established = ConnectionEstablished::new(peer_id, address.clone()); let connection_closed = ConnectionClosed::new(peer_id, address.clone()); + let outgoing_connection_error = + OutgoingConnectionError::new(Some(peer_id), DialError::NoAddresses); + let incoming_connection_error = IncomingConnectionError::new(ListenError::Aborted); let discovered_mdns = DiscoveredMdns::new(peers); let discovered_rendezvous = DiscoveredRendezvous::new(peer_id, peers_vec_addr); let registered_rendezvous = RegisteredRendezvous::new(peer_id); @@ -754,47 +947,64 @@ mod test { let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); vec![ + ( + new_listen_addr.timestamp, + NetworkNotification::NewListenAddr(new_listen_addr), + ), ( connection_established.timestamp, - NetworkNotification::ConnnectionEstablished(connection_established.clone()), + NetworkNotification::ConnnectionEstablished(connection_established), ), ( connection_closed.timestamp, - NetworkNotification::ConnnectionClosed(connection_closed.clone()), + NetworkNotification::ConnnectionClosed(connection_closed), + ), + ( + outgoing_connection_error.timestamp, + NetworkNotification::OutgoingConnectionError(outgoing_connection_error), + ), + ( + incoming_connection_error.timestamp, + NetworkNotification::IncomingConnectionError(incoming_connection_error), ), ( discovered_mdns.timestamp, - NetworkNotification::DiscoveredMdns(discovered_mdns.clone()), + NetworkNotification::DiscoveredMdns(discovered_mdns), ), ( discovered_rendezvous.timestamp, - NetworkNotification::DiscoveredRendezvous(discovered_rendezvous.clone()), + NetworkNotification::DiscoveredRendezvous(discovered_rendezvous), ), ( registered_rendezvous.timestamp, - NetworkNotification::RegisteredRendezvous(registered_rendezvous.clone()), + NetworkNotification::RegisteredRendezvous(registered_rendezvous), ), ( discover_served_rendezvous.timestamp, - NetworkNotification::DiscoverServedRendezvous(discover_served_rendezvous.clone()), + NetworkNotification::DiscoverServedRendezvous(discover_served_rendezvous), ), ( peer_registered_rendezvous.timestamp, - NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous.clone()), + NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous), ), ] } fn check_notification(timestamp: i64, notification: NetworkNotification, fixtures: Fixtures) { let Fixtures { - peer_id, address, addresses, + peer_id, peers, peers_vec_addr, } = fixtures; match notification { + NetworkNotification::NewListenAddr(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + } NetworkNotification::ConnnectionEstablished(n) => { assert_eq!(n.timestamp, timestamp); assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); @@ -805,6 +1015,19 @@ mod test { assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); } + NetworkNotification::OutgoingConnectionError(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!( + n.peer_id + .map_or(None, |p| Some(PeerId::from_str(&p).unwrap())), + Some(peer_id) + ); + assert_eq!(n.error, DialError::NoAddresses.to_string()); + } + NetworkNotification::IncomingConnectionError(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(n.error, ListenError::Aborted.to_string()); + } NetworkNotification::DiscoveredMdns(n) => { assert_eq!(n.timestamp, timestamp); diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 836893de..f3b1667a 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -1141,13 +1141,11 @@ async fn handle_swarm_event( ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ListeningOn), - btreemap! { - "peerId" => Ipld::String(local_peer.to_string()), - "address" => Ipld::String(address.to_string()) - }, + NetworkNotification::NewListenAddr(notification::NewListenAddr::new( + local_peer, address, + )), ); } SwarmEvent::IncomingConnection { .. } => {} @@ -1313,14 +1311,12 @@ async fn handle_swarm_event( } #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::OutgoingConnectionError), - btreemap! { - "peerId" => peer_id.map_or(Ipld::Null, |p| Ipld::String(p.to_string())), - "error" => Ipld::String(error.to_string()) - }, - ); + NetworkNotification::OutgoingConnectionError( + notification::OutgoingConnectionError::new(peer_id, error), + ), + ) } SwarmEvent::IncomingConnectionError { connection_id, @@ -1337,13 +1333,12 @@ async fn handle_swarm_event( "incoming connection error"); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::IncomingConnectionError), - btreemap! { - "error" => Ipld::String(error.to_string()) - }, - ); + NetworkNotification::IncomingConnectionError( + notification::IncomingConnectionError::new(error), + ), + ) } SwarmEvent::ListenerError { listener_id, error } => { error!(subject = "libp2p.listener.err", diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/notification.rs index e5d503a7..8ef42822 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/notification.rs @@ -507,7 +507,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:outgoingConnectionError" { + if json["outgoing_connection_error"].is_object() { break; } } else { @@ -521,7 +521,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:outgoingConnectionError" { + if json["outgoing_connection_error"].is_object() { break; } } else { From 2c75407559cc9e3019e09f2764813025a329e408 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 11:19:58 -0800 Subject: [PATCH 29/75] chore: Add network notification display impl --- .../src/event_handler/notification/swarm.rs | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 49384839..3bd551c7 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -142,6 +142,31 @@ pub enum NetworkNotification { PeerRegisteredRendezvous(PeerRegisteredRendezvous), } +impl fmt::Display for NetworkNotification { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + NetworkNotification::NewListenAddr(_) => write!(f, "new_listen_addr"), + NetworkNotification::ConnnectionEstablished(_) => write!(f, "connection_established"), + NetworkNotification::ConnnectionClosed(_) => write!(f, "connection_closed"), + NetworkNotification::OutgoingConnectionError(_) => { + write!(f, "outgoing_connection_error") + } + NetworkNotification::IncomingConnectionError(_) => { + write!(f, "incoming_connection_error") + } + NetworkNotification::DiscoveredMdns(_) => write!(f, "discovered_mdns"), + NetworkNotification::DiscoveredRendezvous(_) => write!(f, "discovered_rendezvous"), + NetworkNotification::RegisteredRendezvous(_) => write!(f, "registered_rendezvous"), + NetworkNotification::DiscoverServedRendezvous(_) => { + write!(f, "discover_served_rendezvous") + } + NetworkNotification::PeerRegisteredRendezvous(_) => { + write!(f, "peer_registered_rendezvous") + } + } + } +} + impl DagJson for NetworkNotification {} impl From for Ipld { From 110fc5a713d5d82e04d35107a5afd711191bf73a Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 11:42:20 -0800 Subject: [PATCH 30/75] chore: Smarter use of key consts --- .../src/event_handler/notification/swarm.rs | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 3bd551c7..6373fb7a 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -280,9 +280,9 @@ impl DagJson for NewListenAddr {} impl From for Ipld { fn from(notification: NewListenAddr) -> Self { Ipld::Map(BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("peer_id".into(), notification.peer_id.into()), - ("address".into(), notification.address.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), ])) } } @@ -342,9 +342,9 @@ impl DagJson for ConnectionEstablished {} impl From for Ipld { fn from(notification: ConnectionEstablished) -> Self { Ipld::Map(BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("peer_id".into(), notification.peer_id.into()), - ("address".into(), notification.address.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), ])) } } @@ -404,9 +404,9 @@ impl DagJson for ConnectionClosed {} impl From for Ipld { fn from(notification: ConnectionClosed) -> Self { Ipld::Map(BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("peer_id".into(), notification.peer_id.into()), - ("address".into(), notification.address.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), ])) } } @@ -466,15 +466,15 @@ impl DagJson for OutgoingConnectionError {} impl From for Ipld { fn from(notification: OutgoingConnectionError) -> Self { Ipld::Map(BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), ( - "peer_id".into(), + PEER_KEY.into(), notification .peer_id .map(|peer_id| peer_id.into()) .unwrap_or(Ipld::Null), ), - ("error".into(), notification.error.into()), + (ERROR_KEY.into(), notification.error.into()), ])) } } @@ -534,8 +534,8 @@ impl DagJson for IncomingConnectionError {} impl From for Ipld { fn from(notification: IncomingConnectionError) -> Self { Ipld::Map(BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("error".into(), notification.error.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ERROR_KEY.into(), notification.error.into()), ])) } } @@ -593,8 +593,8 @@ impl From for Ipld { .collect(); let map: BTreeMap = BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("peers".into(), peers.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEERS_KEY.into(), peers.into()), ]); Ipld::Map(map) @@ -678,9 +678,9 @@ impl From for Ipld { .collect(); let map: BTreeMap = BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("server".into(), notification.server.into()), - ("peers".into(), peers.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), + (PEERS_KEY.into(), peers.into()), ]); Ipld::Map(map) @@ -741,8 +741,8 @@ impl DagJson for RegisteredRendezvous {} impl From for Ipld { fn from(notification: RegisteredRendezvous) -> Self { let map: BTreeMap = BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("server".into(), notification.server.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), ]); Ipld::Map(map) @@ -793,8 +793,8 @@ impl DagJson for DiscoverServedRendezvous {} impl From for Ipld { fn from(notification: DiscoverServedRendezvous) -> Self { let map: BTreeMap = BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("enquirer".into(), notification.enquirer.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ENQUIRER_KEY.into(), notification.enquirer.into()), ]); Ipld::Map(map) @@ -854,10 +854,10 @@ impl DagJson for PeerRegisteredRendezvous {} impl From for Ipld { fn from(notification: PeerRegisteredRendezvous) -> Self { let map: BTreeMap = BTreeMap::from([ - ("timestamp".into(), notification.timestamp.into()), - ("peer_id".into(), notification.peer_id.into()), + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), ( - "addresses".into(), + ADDRESSES_KEY.into(), Ipld::List( notification .addresses From 121ad7eca7141f8b502ea3a0859bfd3b62a3c4bd Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 14:45:51 -0800 Subject: [PATCH 31/75] feat: Add gossip notifications and schemas --- homestar-runtime/src/event_handler/event.rs | 31 +- .../src/event_handler/notification/swarm.rs | 272 +++++++++++++++--- .../src/event_handler/swarm_event.rs | 17 +- homestar-runtime/tests/network/gossip.rs | 8 +- 4 files changed, 257 insertions(+), 71 deletions(-) diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index ce095e7e..01d6d9c7 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -5,7 +5,9 @@ use super::swarm_event::FoundEvent; use super::EventHandler; #[cfg(feature = "websocket-notify")] use crate::event_handler::{ - notification::{self, emit_receipt, EventNotificationTyp, SwarmNotification}, + notification::{ + self, emit_receipt, EventNotificationTyp, NetworkNotification, SwarmNotification, + }, swarm_event::{ReceiptEvent, WorkflowInfoEvent}, }; #[cfg(feature = "ipfs")] @@ -367,16 +369,12 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PublishedReceiptPubsub, + NetworkNotification::PublishedReceiptPubsub( + notification::PublishedReceiptPubsub::new(receipt.cid(), receipt.ran()), ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) } Err(err) => { warn!( @@ -576,16 +574,15 @@ impl Replay { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PublishedReceiptPubsub, + NetworkNotification::PublishedReceiptPubsub( + notification::PublishedReceiptPubsub::new( + receipt.cid(), + receipt.ran(), + ), ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) }) .map_err(|err| { warn!( diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 6373fb7a..dae81c14 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; use chrono::prelude::Utc; use homestar_invocation::ipld::DagJson; -use libipld::{serde::from_ipld, Ipld}; +use libipld::{serde::from_ipld, Cid, Ipld}; use libp2p::{ swarm::{DialError, ListenError}, Multiaddr, PeerId, @@ -16,18 +16,19 @@ use std::{collections::BTreeMap, fmt, str::FromStr}; const ADDRESS_KEY: &str = "address"; const ADDRESSES_KEY: &str = "addresses"; +const CID_KEY: &str = "cid"; const ENQUIRER_KEY: &str = "enquirer"; const ERROR_KEY: &str = "error"; const PEER_KEY: &str = "peer_id"; const PEERS_KEY: &str = "peers"; +const PUBLISHER_KEY: &str = "publisher"; +const RAN_KEY: &str = "ran"; const SERVER_KEY: &str = "server"; const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - PublishedReceiptPubsub, - ReceivedReceiptPubsub, GotReceiptDht, PutReceiptDht, GotWorkflowInfoDht, @@ -44,12 +45,6 @@ pub(crate) enum SwarmNotification { impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::ReceivedReceiptPubsub => { - write!(f, "receivedReceiptPubsub") - } - SwarmNotification::PublishedReceiptPubsub => { - write!(f, "publishedReceiptPubsub") - } SwarmNotification::PutReceiptDht => { write!(f, "putReceiptDht") } @@ -89,8 +84,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "receivedReceiptPubsub" => Ok(Self::ReceivedReceiptPubsub), - "publishedReceiptPubsub" => Ok(Self::PublishedReceiptPubsub), "putReciptDht" => Ok(Self::PutReceiptDht), "gotReceiptDht" => Ok(Self::GotReceiptDht), "putWorkflowInfoDht" => Ok(Self::PutWorkflowInfoDht), @@ -140,6 +133,12 @@ pub enum NetworkNotification { /// Rendezvous peer registered notification. #[schemars(rename = "peer_registered_rendezvous")] PeerRegisteredRendezvous(PeerRegisteredRendezvous), + /// Published receipt pubsub notification. + #[schemars(rename = "published_receipt_pubsub")] + PublishedReceiptPubsub(PublishedReceiptPubsub), + /// Received receipt pubsub notification. + #[schemars(rename = "received_receipt_pubsub")] + ReceivedReceiptPubsub(ReceivedReceiptPubsub), } impl fmt::Display for NetworkNotification { @@ -163,6 +162,8 @@ impl fmt::Display for NetworkNotification { NetworkNotification::PeerRegisteredRendezvous(_) => { write!(f, "peer_registered_rendezvous") } + NetworkNotification::PublishedReceiptPubsub(_) => write!(f, "published_receipt_pubsub"), + NetworkNotification::ReceivedReceiptPubsub(_) => write!(f, "received_receipt_pubsub"), } } } @@ -207,6 +208,14 @@ impl From for Ipld { "peer_registered_rendezvous".into(), n.into(), )])), + NetworkNotification::PublishedReceiptPubsub(n) => Ipld::Map(BTreeMap::from([( + "published_receipt_pubsub".into(), + n.into(), + )])), + NetworkNotification::ReceivedReceiptPubsub(n) => Ipld::Map(BTreeMap::from([( + "received_receipt_pubsub".into(), + n.into(), + )])), } } } @@ -249,6 +258,12 @@ impl TryFrom for NetworkNotification { "peer_registered_rendezvous" => Ok(NetworkNotification::PeerRegisteredRendezvous( PeerRegisteredRendezvous::try_from(val.to_owned())?, )), + "published_receipt_pubsub" => Ok(NetworkNotification::PublishedReceiptPubsub( + PublishedReceiptPubsub::try_from(val.to_owned())?, + )), + "received_receipt_pubsub" => Ok(NetworkNotification::ReceivedReceiptPubsub( + ReceivedReceiptPubsub::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -293,6 +308,12 @@ impl TryFrom for NewListenAddr { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + let peer_id = from_ipld( map.get(PEER_KEY) .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? @@ -305,12 +326,6 @@ impl TryFrom for NewListenAddr { .to_owned(), )?; - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - Ok(NewListenAddr { timestamp, peer_id, @@ -355,6 +370,12 @@ impl TryFrom for ConnectionEstablished { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + let peer_id = from_ipld( map.get(PEER_KEY) .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? @@ -367,12 +388,6 @@ impl TryFrom for ConnectionEstablished { .to_owned(), )?; - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - Ok(ConnectionEstablished { timestamp, peer_id, @@ -417,6 +432,12 @@ impl TryFrom for ConnectionClosed { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + let peer_id = from_ipld( map.get(PEER_KEY) .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? @@ -429,12 +450,6 @@ impl TryFrom for ConnectionClosed { .to_owned(), )?; - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - Ok(ConnectionClosed { timestamp, peer_id, @@ -485,6 +500,12 @@ impl TryFrom for OutgoingConnectionError { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + let peer_id = map .get(PEER_KEY) .and_then(|ipld| match ipld { @@ -499,12 +520,6 @@ impl TryFrom for OutgoingConnectionError { .to_owned(), )?; - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - Ok(OutgoingConnectionError { timestamp, peer_id, @@ -546,18 +561,18 @@ impl TryFrom for IncomingConnectionError { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; - let error = from_ipld( - map.get(ERROR_KEY) - .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? - .to_owned(), - )?; - let timestamp = from_ipld( map.get(TIMESTAMP_KEY) .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? .to_owned(), )?; + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + Ok(IncomingConnectionError { timestamp, error }) } } @@ -904,17 +919,164 @@ impl TryFrom for PeerRegisteredRendezvous { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "published_receipt_pubsub")] +pub struct PublishedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PublishedReceiptPubsub { + pub(crate) fn new(cid: Cid, ran: String) -> PublishedReceiptPubsub { + PublishedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PublishedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: PublishedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PublishedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PublishedReceiptPubsub { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "received_receipt_pubsub")] +pub struct ReceivedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: String, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl ReceivedReceiptPubsub { + pub(crate) fn new(publisher: PeerId, cid: Cid, ran: String) -> ReceivedReceiptPubsub { + ReceivedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.to_string(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for ReceivedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: ReceivedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PUBLISHER_KEY.into(), notification.publisher.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = from_ipld( + map.get(PUBLISHER_KEY) + .ok_or_else(|| anyhow!("missing {PUBLISHER_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedReceiptPubsub { + timestamp, + publisher, + cid, + ran, + }) + } +} + #[cfg(test)] mod test { use super::*; + use homestar_invocation::test_utils::cid::generate_cid; + use rand::thread_rng; #[derive(Clone, Debug)] struct Fixtures { address: Multiaddr, addresses: Vec, + cid: Cid, peer_id: PeerId, peers: BTreeMap, peers_vec_addr: BTreeMap>, + ran: Cid, } fn generate_fixtures() -> Fixtures { @@ -924,6 +1086,7 @@ mod test { Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ], + cid: generate_cid(&mut thread_rng()), peer_id: PeerId::random(), peers: BTreeMap::from([ ( @@ -948,6 +1111,7 @@ mod test { ], ), ]), + ran: generate_cid(&mut thread_rng()), } } @@ -955,10 +1119,13 @@ mod test { let Fixtures { address, addresses, + cid, peer_id, peers, peers_vec_addr, + ran, } = fixtures; + let new_listen_addr = NewListenAddr::new(peer_id, address.clone()); let connection_established = ConnectionEstablished::new(peer_id, address.clone()); let connection_closed = ConnectionClosed::new(peer_id, address.clone()); @@ -970,6 +1137,8 @@ mod test { let registered_rendezvous = RegisteredRendezvous::new(peer_id); let discover_served_rendezvous = DiscoverServedRendezvous::new(peer_id); let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); + let published_receipt_pubsub = PublishedReceiptPubsub::new(cid, ran.to_string()); + let received_receipt_pubsub = ReceivedReceiptPubsub::new(peer_id, cid, ran.to_string()); vec![ ( @@ -1012,6 +1181,14 @@ mod test { peer_registered_rendezvous.timestamp, NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous), ), + ( + published_receipt_pubsub.timestamp, + NetworkNotification::PublishedReceiptPubsub(published_receipt_pubsub), + ), + ( + received_receipt_pubsub.timestamp, + NetworkNotification::ReceivedReceiptPubsub(received_receipt_pubsub), + ), ] } @@ -1019,9 +1196,11 @@ mod test { let Fixtures { address, addresses, + cid, peer_id, peers, peers_vec_addr, + ran, } = fixtures; match notification { @@ -1096,6 +1275,17 @@ mod test { addresses ); } + NetworkNotification::PublishedReceiptPubsub(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + } + NetworkNotification::ReceivedReceiptPubsub(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.publisher).unwrap(), peer_id); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index f3b1667a..2d132b5f 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -536,17 +536,16 @@ async fn handle_swarm_event( .map(|conn| Db::store_receipt(receipt.clone(), conn)); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceivedReceiptPubsub, + NetworkNotification::ReceivedReceiptPubsub( + notification::ReceivedReceiptPubsub::new( + propagation_source, + receipt.cid(), + receipt.ran(), + ), ), - btreemap! { - "publisher" => Ipld::String(propagation_source.to_string()), - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) } Err(err) => debug!(subject = "libp2p.gossipsub.err", category = "handle_swarm_event", diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs index 1a8e42c1..eee856d8 100644 --- a/homestar-runtime/tests/network/gossip.rs +++ b/homestar-runtime/tests/network/gossip.rs @@ -187,9 +187,9 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:publishedReceiptPubsub" { + if json["published_receipt_pubsub"].is_object() { published_cids.push( - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["published_receipt_pubsub"]["cid"].as_str().unwrap()) .expect("Unable to parse published receipt CID."), ); } @@ -209,9 +209,9 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receivedReceiptPubsub" { + if json["received_receipt_pubsub"].is_object() { received_cids.push( - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["received_receipt_pubsub"]["cid"].as_str().unwrap()) .expect("Unable to parse received receipt CID."), ); } From 823afbd08f21fe31e0fc823a3888c2ae8ea87b38 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 1 Feb 2024 19:49:17 -0800 Subject: [PATCH 32/75] feat: Add receipt DHT notifications --- homestar-runtime/src/event_handler/event.rs | 38 ++-- .../src/event_handler/notification/swarm.rs | 210 ++++++++++++++++-- .../src/event_handler/swarm_event.rs | 8 - homestar-runtime/tests/network/dht.rs | 6 +- 4 files changed, 216 insertions(+), 46 deletions(-) diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index 01d6d9c7..756fcf52 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -176,19 +176,16 @@ impl Event { #[cfg(feature = "websocket-notify")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] Event::StoredRecord(event) => match event { - FoundEvent::Receipt(ReceiptEvent { - peer_id, - receipt, - notification_type, - }) => notification::emit_event( - event_handler.ws_evt_sender(), - notification_type, - btreemap! { - "publisher" => peer_id.map_or(Ipld::Null, |peer_id| Ipld::String(peer_id.to_string())), - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ), + FoundEvent::Receipt(ReceiptEvent { peer_id, receipt }) => { + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::GotReceiptDht(notification::GotReceiptDht::new( + peer_id, + receipt.cid(), + receipt.ran(), + )), + ) + } FoundEvent::Workflow(WorkflowInfoEvent { peer_id, workflow_info, @@ -431,16 +428,13 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PutReceiptDht, - ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + NetworkNotification::PutReceiptDht(notification::PutReceiptDht::new( + receipt.cid(), + receipt.ran(), + )), + ) }, ); diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index dae81c14..8dc1d7c0 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -29,8 +29,6 @@ const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - GotReceiptDht, - PutReceiptDht, GotWorkflowInfoDht, PutWorkflowInfoDht, ReceiptQuorumSuccess, @@ -41,16 +39,9 @@ pub(crate) enum SwarmNotification { ReceivedWorkflowInfo, } -// TODO Fill these in for NetworkNotification impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::PutReceiptDht => { - write!(f, "putReceiptDht") - } - SwarmNotification::GotReceiptDht => { - write!(f, "gotReceiptDht") - } SwarmNotification::PutWorkflowInfoDht => { write!(f, "putWorkflowInfoDht") } @@ -84,8 +75,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "putReciptDht" => Ok(Self::PutReceiptDht), - "gotReceiptDht" => Ok(Self::GotReceiptDht), "putWorkflowInfoDht" => Ok(Self::PutWorkflowInfoDht), "gotWorkflowInfoDht" => Ok(Self::GotWorkflowInfoDht), "receiptQuorumSuccess" => Ok(Self::ReceiptQuorumSuccess), @@ -139,6 +128,12 @@ pub enum NetworkNotification { /// Received receipt pubsub notification. #[schemars(rename = "received_receipt_pubsub")] ReceivedReceiptPubsub(ReceivedReceiptPubsub), + /// Put receipt DHT notification. + #[schemars(rename = "put_receipt_dht")] + PutReceiptDht(PutReceiptDht), + /// Got receipt DHT notification. + #[schemars(rename = "got_receipt_dht")] + GotReceiptDht(GotReceiptDht), } impl fmt::Display for NetworkNotification { @@ -164,6 +159,8 @@ impl fmt::Display for NetworkNotification { } NetworkNotification::PublishedReceiptPubsub(_) => write!(f, "published_receipt_pubsub"), NetworkNotification::ReceivedReceiptPubsub(_) => write!(f, "received_receipt_pubsub"), + NetworkNotification::PutReceiptDht(_) => write!(f, "put_receipt_dht"), + NetworkNotification::GotReceiptDht(_) => write!(f, "got_receipt_dht"), } } } @@ -216,6 +213,12 @@ impl From for Ipld { "received_receipt_pubsub".into(), n.into(), )])), + NetworkNotification::PutReceiptDht(n) => { + Ipld::Map(BTreeMap::from([("put_receipt_dht".into(), n.into())])) + } + NetworkNotification::GotReceiptDht(n) => { + Ipld::Map(BTreeMap::from([("got_receipt_dht".into(), n.into())])) + } } } } @@ -264,6 +267,12 @@ impl TryFrom for NetworkNotification { "received_receipt_pubsub" => Ok(NetworkNotification::ReceivedReceiptPubsub( ReceivedReceiptPubsub::try_from(val.to_owned())?, )), + "put_receipt_dht" => Ok(NetworkNotification::PutReceiptDht( + PutReceiptDht::try_from(val.to_owned())?, + )), + "got_receipt_dht" => Ok(NetworkNotification::GotReceiptDht( + GotReceiptDht::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -1062,6 +1071,157 @@ impl TryFrom for ReceivedReceiptPubsub { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "put_receipt_dht")] +pub struct PutReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PutReceiptDht { + pub(crate) fn new(cid: Cid, ran: String) -> PutReceiptDht { + PutReceiptDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PutReceiptDht {} + +impl From for Ipld { + fn from(notification: PutReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PutReceiptDht { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "got_receipt_dht")] +pub struct GotReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: Option, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl GotReceiptDht { + pub(crate) fn new(publisher: Option, cid: Cid, ran: String) -> GotReceiptDht { + GotReceiptDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for GotReceiptDht {} + +impl From for Ipld { + fn from(notification: GotReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(GotReceiptDht { + timestamp, + publisher, + cid, + ran, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -1139,6 +1299,8 @@ mod test { let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); let published_receipt_pubsub = PublishedReceiptPubsub::new(cid, ran.to_string()); let received_receipt_pubsub = ReceivedReceiptPubsub::new(peer_id, cid, ran.to_string()); + let put_receipt_dht = PutReceiptDht::new(cid, ran.to_string()); + let got_receipt_dht = GotReceiptDht::new(Some(peer_id), cid, ran.to_string()); vec![ ( @@ -1189,6 +1351,14 @@ mod test { received_receipt_pubsub.timestamp, NetworkNotification::ReceivedReceiptPubsub(received_receipt_pubsub), ), + ( + put_receipt_dht.timestamp, + NetworkNotification::PutReceiptDht(put_receipt_dht), + ), + ( + got_receipt_dht.timestamp, + NetworkNotification::GotReceiptDht(got_receipt_dht), + ), ] } @@ -1222,8 +1392,7 @@ mod test { NetworkNotification::OutgoingConnectionError(n) => { assert_eq!(n.timestamp, timestamp); assert_eq!( - n.peer_id - .map_or(None, |p| Some(PeerId::from_str(&p).unwrap())), + n.peer_id.and_then(|p| Some(PeerId::from_str(&p).unwrap())), Some(peer_id) ); assert_eq!(n.error, DialError::NoAddresses.to_string()); @@ -1286,6 +1455,21 @@ mod test { assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); } + NetworkNotification::PutReceiptDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + } + NetworkNotification::GotReceiptDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!( + n.publisher + .and_then(|p| Some(PeerId::from_str(&p).unwrap())), + Some(peer_id) + ); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 2d132b5f..57385c88 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -76,9 +76,6 @@ pub(crate) enum FoundEvent { pub(crate) struct ReceiptEvent { pub(crate) peer_id: Option, pub(crate) receipt: Receipt, - #[cfg(feature = "websocket-notify")] - #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] - pub(crate) notification_type: EventNotificationTyp, } /// [FoundEvent] variant for workflow info found on the DHT. @@ -641,11 +638,6 @@ async fn handle_swarm_event( FoundEvent::Receipt(ReceiptEvent { peer_id, receipt: receipt.clone(), - #[cfg(feature = "websocket-notify")] - notification_type: - EventNotificationTyp::SwarmNotification( - SwarmNotification::GotReceiptDht, - ), }), )); diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 4d6bf33c..8169312c 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -208,7 +208,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:putReceiptDht" { + if json["put_receipt_dht"].is_object() { put_receipt = true; } else if json["type"].as_str().unwrap() == "network:putWorkflowInfoDht" { put_workflow_info = true; @@ -262,8 +262,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { // let json: serde_json::Value = // serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // if json["type"].as_str().unwrap() == "network:gotReceiptDht" { - // received_receipt_cid = Cid::from_str(json["data"]["cid"].as_str().unwrap()) + // if json["got_receipt_dht"].is_object() { + // received_receipt_cid = Cid::from_str(json["got_receipt_dht"]["cid"].as_str().unwrap()) // .expect("Unable to parse received receipt CID."); // break; // } From 146eb4e47c4bbb4cbedb577b0f4dc2a289fc344a Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 2 Feb 2024 10:58:49 -0800 Subject: [PATCH 33/75] feat: Add workflow info DHT notifications --- homestar-runtime/src/event_handler/event.rs | 61 ++- .../src/event_handler/notification.rs | 14 - .../src/event_handler/notification/swarm.rs | 386 +++++++++++++++++- .../src/event_handler/swarm_event.rs | 12 +- homestar-runtime/tests/network/dht.rs | 18 +- 5 files changed, 417 insertions(+), 74 deletions(-) diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index 756fcf52..c2929fc1 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -5,9 +5,7 @@ use super::swarm_event::FoundEvent; use super::EventHandler; #[cfg(feature = "websocket-notify")] use crate::event_handler::{ - notification::{ - self, emit_receipt, EventNotificationTyp, NetworkNotification, SwarmNotification, - }, + notification::{self, emit_receipt, NetworkNotification}, swarm_event::{ReceiptEvent, WorkflowInfoEvent}, }; #[cfg(feature = "ipfs")] @@ -34,7 +32,6 @@ use libp2p::{ PeerId, }; #[cfg(feature = "websocket-notify")] -use maplit::btreemap; use std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, @@ -189,23 +186,26 @@ impl Event { FoundEvent::Workflow(WorkflowInfoEvent { peer_id, workflow_info, - notification_type, - }) => { - if let Some(peer_label) = notification_type.workflow_info_source_label() { - notification::emit_event( - event_handler.ws_evt_sender(), - notification_type, - btreemap! { - peer_label => peer_id.map_or(Ipld::Null, |peer_id| Ipld::String(peer_id.to_string())), - "cid" => Ipld::String(workflow_info.cid().to_string()), - "name" => workflow_info.name.map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(workflow_info.num_tasks as i128), - "progress" => Ipld::List(workflow_info.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(workflow_info.progress_count as i128), - }, - ) - } - } + workflow_source, + }) => notification::emit_network_event( + event_handler.ws_evt_sender(), + match workflow_source { + notification::WorkflowInfoSource::Dht => { + NetworkNotification::GotWorkflowInfoDht( + notification::GotWorkflowInfoDht::new( + peer_id, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), + ) + } + // TODO Fill this case in! + notification::WorkflowInfoSource::RequestResponse => todo!(), + }, + ), }, Event::OutboundRequest(PeerRequest { peer, @@ -470,18 +470,17 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PutWorkflowInfoDht, + NetworkNotification::PutWorkflowInfoDht( + notification::PutWorkflowInfoDht::new( + self.workflow.cid(), + self.workflow.name.to_owned(), + self.workflow.num_tasks, + self.workflow.progress.to_owned(), + self.workflow.progress_count, + ), ), - btreemap! { - "cid" => Ipld::String(self.workflow.cid().to_string()), - "name" => self.workflow.name.as_ref().map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(self.workflow.num_tasks as i128), - "progress" => Ipld::List(self.workflow.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(self.workflow.progress_count as i128), - }, ) }, ); diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 5fa66381..15caec1b 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -193,20 +193,6 @@ pub(crate) enum EventNotificationTyp { SwarmNotification(SwarmNotification), } -impl EventNotificationTyp { - pub(crate) fn workflow_info_source_label<'a>(&self) -> Option<&'a str> { - match &self { - EventNotificationTyp::SwarmNotification(SwarmNotification::ReceivedWorkflowInfo) => { - Some("provider") - } - EventNotificationTyp::SwarmNotification(SwarmNotification::GotWorkflowInfoDht) => { - Some("publisher") - } - _ => None, - } - } -} - impl fmt::Display for EventNotificationTyp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 8dc1d7c0..3e6d2177 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -4,6 +4,7 @@ use anyhow::anyhow; use chrono::prelude::Utc; +use faststr::FastStr; use homestar_invocation::ipld::DagJson; use libipld::{serde::from_ipld, Cid, Ipld}; use libp2p::{ @@ -19,8 +20,12 @@ const ADDRESSES_KEY: &str = "addresses"; const CID_KEY: &str = "cid"; const ENQUIRER_KEY: &str = "enquirer"; const ERROR_KEY: &str = "error"; +const NAME_KEY: &str = "name"; +const NUM_TASKS_KEY: &str = "num_tasks"; const PEER_KEY: &str = "peer_id"; const PEERS_KEY: &str = "peers"; +const PROGRESS_KEY: &str = "progress"; +const PROGRESS_COUNT_KEY: &str = "progress_count"; const PUBLISHER_KEY: &str = "publisher"; const RAN_KEY: &str = "ran"; const SERVER_KEY: &str = "server"; @@ -29,8 +34,6 @@ const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - GotWorkflowInfoDht, - PutWorkflowInfoDht, ReceiptQuorumSuccess, ReceiptQuorumFailure, WorkflowInfoQuorumSuccess, @@ -42,12 +45,6 @@ pub(crate) enum SwarmNotification { impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::PutWorkflowInfoDht => { - write!(f, "putWorkflowInfoDht") - } - SwarmNotification::GotWorkflowInfoDht => { - write!(f, "gotWorkflowInfoDht") - } SwarmNotification::ReceiptQuorumSuccess => { write!(f, "receiptQuorumSuccess") } @@ -75,8 +72,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "putWorkflowInfoDht" => Ok(Self::PutWorkflowInfoDht), - "gotWorkflowInfoDht" => Ok(Self::GotWorkflowInfoDht), "receiptQuorumSuccess" => Ok(Self::ReceiptQuorumSuccess), "receiptQuorumFailure" => Ok(Self::ReceiptQuorumFailure), "workflowInfoQuorumSuccess" => Ok(Self::WorkflowInfoQuorumSuccess), @@ -134,6 +129,18 @@ pub enum NetworkNotification { /// Got receipt DHT notification. #[schemars(rename = "got_receipt_dht")] GotReceiptDht(GotReceiptDht), + /// Put workflow info DHT notification. + #[schemars(rename = "put_workflow_info_dht")] + PutWorkflowInfoDht(PutWorkflowInfoDht), + /// Put workflow info DHT notification. + #[schemars(rename = "got_workflow_info_dht")] + GotWorkflowInfoDht(GotWorkflowInfoDht), +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum WorkflowInfoSource { + Dht, + RequestResponse, } impl fmt::Display for NetworkNotification { @@ -161,6 +168,8 @@ impl fmt::Display for NetworkNotification { NetworkNotification::ReceivedReceiptPubsub(_) => write!(f, "received_receipt_pubsub"), NetworkNotification::PutReceiptDht(_) => write!(f, "put_receipt_dht"), NetworkNotification::GotReceiptDht(_) => write!(f, "got_receipt_dht"), + NetworkNotification::PutWorkflowInfoDht(_) => write!(f, "put_workflow_info_dht"), + NetworkNotification::GotWorkflowInfoDht(_) => write!(f, "got_workflow_info_dht"), } } } @@ -219,6 +228,12 @@ impl From for Ipld { NetworkNotification::GotReceiptDht(n) => { Ipld::Map(BTreeMap::from([("got_receipt_dht".into(), n.into())])) } + NetworkNotification::PutWorkflowInfoDht(n) => { + Ipld::Map(BTreeMap::from([("put_workflow_info_dht".into(), n.into())])) + } + NetworkNotification::GotWorkflowInfoDht(n) => { + Ipld::Map(BTreeMap::from([("got_workflow_info_dht".into(), n.into())])) + } } } } @@ -273,6 +288,12 @@ impl TryFrom for NetworkNotification { "got_receipt_dht" => Ok(NetworkNotification::GotReceiptDht( GotReceiptDht::try_from(val.to_owned())?, )), + "put_workflow_info_dht" => Ok(NetworkNotification::PutWorkflowInfoDht( + PutWorkflowInfoDht::try_from(val.to_owned())?, + )), + "got_workflow_info_dht" => Ok(NetworkNotification::GotWorkflowInfoDht( + GotWorkflowInfoDht::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -1222,6 +1243,276 @@ impl TryFrom for GotReceiptDht { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "put_workflow_info_dht")] +pub struct PutWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl PutWorkflowInfoDht { + pub(crate) fn new( + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> PutWorkflowInfoDht { + PutWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for PutWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: PutWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(PutWorkflowInfoDht { + timestamp, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "got_workflow_info_dht")] +pub struct GotWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info publisher peer ID")] + publisher: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl GotWorkflowInfoDht { + pub(crate) fn new( + publisher: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> GotWorkflowInfoDht { + GotWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for GotWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: GotWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(GotWorkflowInfoDht { + timestamp, + publisher, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -1233,9 +1524,13 @@ mod test { address: Multiaddr, addresses: Vec, cid: Cid, + name: FastStr, + num_tasks: u32, peer_id: PeerId, peers: BTreeMap, peers_vec_addr: BTreeMap>, + progress: Vec, + progress_count: u32, ran: Cid, } @@ -1247,6 +1542,8 @@ mod test { Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ], cid: generate_cid(&mut thread_rng()), + name: FastStr::new("Strong Bad"), + num_tasks: 1, peer_id: PeerId::random(), peers: BTreeMap::from([ ( @@ -1271,6 +1568,8 @@ mod test { ], ), ]), + progress: vec![generate_cid(&mut thread_rng())], + progress_count: 1, ran: generate_cid(&mut thread_rng()), } } @@ -1280,9 +1579,13 @@ mod test { address, addresses, cid, + name, + num_tasks, peer_id, peers, peers_vec_addr, + progress, + progress_count, ran, } = fixtures; @@ -1301,6 +1604,21 @@ mod test { let received_receipt_pubsub = ReceivedReceiptPubsub::new(peer_id, cid, ran.to_string()); let put_receipt_dht = PutReceiptDht::new(cid, ran.to_string()); let got_receipt_dht = GotReceiptDht::new(Some(peer_id), cid, ran.to_string()); + let put_workflow_info_dht = PutWorkflowInfoDht::new( + cid, + Some(name.clone()), + num_tasks, + progress.clone(), + progress_count, + ); + let got_workflow_info_dht = GotWorkflowInfoDht::new( + Some(peer_id), + cid, + Some(name), + num_tasks, + progress, + progress_count, + ); vec![ ( @@ -1359,6 +1677,14 @@ mod test { got_receipt_dht.timestamp, NetworkNotification::GotReceiptDht(got_receipt_dht), ), + ( + put_workflow_info_dht.timestamp, + NetworkNotification::PutWorkflowInfoDht(put_workflow_info_dht), + ), + ( + got_workflow_info_dht.timestamp, + NetworkNotification::GotWorkflowInfoDht(got_workflow_info_dht), + ), ] } @@ -1367,9 +1693,13 @@ mod test { address, addresses, cid, + name, + num_tasks, peer_id, peers, peers_vec_addr, + progress, + progress_count, ran, } = fixtures; @@ -1392,7 +1722,7 @@ mod test { NetworkNotification::OutgoingConnectionError(n) => { assert_eq!(n.timestamp, timestamp); assert_eq!( - n.peer_id.and_then(|p| Some(PeerId::from_str(&p).unwrap())), + n.peer_id.map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); assert_eq!(n.error, DialError::NoAddresses.to_string()); @@ -1461,6 +1791,29 @@ mod test { assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); } NetworkNotification::GotReceiptDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!( + n.publisher.map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + } + NetworkNotification::PutWorkflowInfoDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks, num_tasks); + assert_eq!( + n.progress + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count, progress_count); + } + NetworkNotification::GotWorkflowInfoDht(n) => { assert_eq!(n.timestamp, timestamp); assert_eq!( n.publisher @@ -1468,7 +1821,16 @@ mod test { Some(peer_id) ); assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks, num_tasks); + assert_eq!( + n.progress + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count, progress_count); } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 57385c88..28c8cdc4 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -85,7 +85,7 @@ pub(crate) struct WorkflowInfoEvent { pub(crate) workflow_info: workflow::Info, #[cfg(feature = "websocket-notify")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] - pub(crate) notification_type: EventNotificationTyp, + pub(crate) workflow_source: notification::WorkflowInfoSource, } #[async_trait] @@ -665,10 +665,7 @@ async fn handle_swarm_event( peer_id, workflow_info: workflow_info.clone(), #[cfg(feature = "websocket-notify")] - notification_type: - EventNotificationTyp::SwarmNotification( - SwarmNotification::GotWorkflowInfoDht, - ), + workflow_source: notification::WorkflowInfoSource::Dht, }), )); @@ -1008,9 +1005,8 @@ async fn handle_swarm_event( peer_id, workflow_info: workflow_info.clone(), #[cfg(feature = "websocket-notify")] - notification_type: EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceivedWorkflowInfo, - ), + workflow_source: + notification::WorkflowInfoSource::RequestResponse, }), )); diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 8169312c..a9ec2aaf 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -210,7 +210,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { if json["put_receipt_dht"].is_object() { put_receipt = true; - } else if json["type"].as_str().unwrap() == "network:putWorkflowInfoDht" { + } else if json["put_workflow_info_dht"].is_object() { put_workflow_info = true; } else if json["type"].as_str().unwrap() == "network:receiptQuorumSuccess" { receipt_quorum_success = true; @@ -291,9 +291,9 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:gotWorkflowInfoDht" { + if json["got_workflow_info_dht"].is_object() { received_workflow_info_cid = - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["got_workflow_info_dht"]["cid"].as_str().unwrap()) .expect("Unable to parse received workflow info CID."); break; } @@ -876,9 +876,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> // 2. Wait for connection between a and b to be established // 3. Wait for connection between a and c to be established // 4. Run workflow on a - // 5. Wait for network:putWorkflowInfoDht on a + // 5. Wait for put_workflow_info_dht on a // 6. Run workflow on b - // 7. Wait for network:GotWorkflowInfoDht on b + // 7. Wait for got_workflow_info_dht on b // 8. Delete a's DB // 9. Run workflow on c // 10. Wait for network:receivedWorkflowInfo on c (from b, through a) @@ -1149,9 +1149,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node1: {json}"); - if json["type"].as_str().unwrap() == "network:putWorkflowInfoDht" { + if json["put_workflow_info_dht"].is_object() { assert_eq!( - json["data"]["cid"].as_str().unwrap(), + json["put_workflow_info_dht"]["cid"].as_str().unwrap(), "bafyrmihctgawsskx54qyt3clcaq2quc42pqxzhr73o6qjlc3rc4mhznotq" ); @@ -1179,9 +1179,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node2: {json}"); - if json["type"].as_str().unwrap() == "network:gotWorkflowInfoDht" { + if json["got_workflow_info_dht"].is_object() { assert_eq!( - json["data"]["cid"].as_str().unwrap(), + json["got_workflow_info_dht"]["cid"].as_str().unwrap(), "bafyrmihctgawsskx54qyt3clcaq2quc42pqxzhr73o6qjlc3rc4mhznotq" ); From bf79525c1333fb16ce1f378807a9a44a6261bc92 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 2 Feb 2024 12:12:41 -0800 Subject: [PATCH 34/75] feat: Add receipt quorum notifications --- .../src/event_handler/notification/swarm.rs | 279 ++++++++++++++++-- .../src/event_handler/swarm_event.rs | 30 +- homestar-runtime/tests/network/dht.rs | 15 +- 3 files changed, 281 insertions(+), 43 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 3e6d2177..6366afec 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -18,6 +18,7 @@ use std::{collections::BTreeMap, fmt, str::FromStr}; const ADDRESS_KEY: &str = "address"; const ADDRESSES_KEY: &str = "addresses"; const CID_KEY: &str = "cid"; +const CONNECTED_PEER_COUNT_KEY: &str = "connected_peer_count"; const ENQUIRER_KEY: &str = "enquirer"; const ERROR_KEY: &str = "error"; const NAME_KEY: &str = "name"; @@ -27,15 +28,15 @@ const PEERS_KEY: &str = "peers"; const PROGRESS_KEY: &str = "progress"; const PROGRESS_COUNT_KEY: &str = "progress_count"; const PUBLISHER_KEY: &str = "publisher"; +const QUORUM_KEY: &str = "quorum"; const RAN_KEY: &str = "ran"; const SERVER_KEY: &str = "server"; +const STORED_TO_PEERS_KEY: &str = "stored_to_peers"; const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - ReceiptQuorumSuccess, - ReceiptQuorumFailure, WorkflowInfoQuorumSuccess, WorkflowInfoQuorumFailure, SentWorkflowInfo, @@ -45,12 +46,6 @@ pub(crate) enum SwarmNotification { impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::ReceiptQuorumSuccess => { - write!(f, "receiptQuorumSuccess") - } - SwarmNotification::ReceiptQuorumFailure => { - write!(f, "receiptQuorumFailure") - } SwarmNotification::WorkflowInfoQuorumSuccess => { write!(f, "workflowInfoQuorumSuccess") } @@ -72,8 +67,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "receiptQuorumSuccess" => Ok(Self::ReceiptQuorumSuccess), - "receiptQuorumFailure" => Ok(Self::ReceiptQuorumFailure), "workflowInfoQuorumSuccess" => Ok(Self::WorkflowInfoQuorumSuccess), "workflowInfoQuorumFailure" => Ok(Self::WorkflowInfoQuorumFailure), "sentWorkflowInfo" => Ok(Self::SentWorkflowInfo), @@ -135,6 +128,12 @@ pub enum NetworkNotification { /// Put workflow info DHT notification. #[schemars(rename = "got_workflow_info_dht")] GotWorkflowInfoDht(GotWorkflowInfoDht), + /// Receipt quorum success notification. + #[schemars(rename = "receipt_quorum_success_dht")] + ReceiptQuorumSuccessDht(ReceiptQuorumSuccessDht), + /// Receipt quorum failure notification. + #[schemars(rename = "receipt_quorum_failure_dht")] + ReceiptQuorumFailureDht(ReceiptQuorumFailureDht), } #[derive(Debug, Clone, PartialEq)] @@ -170,6 +169,12 @@ impl fmt::Display for NetworkNotification { NetworkNotification::GotReceiptDht(_) => write!(f, "got_receipt_dht"), NetworkNotification::PutWorkflowInfoDht(_) => write!(f, "put_workflow_info_dht"), NetworkNotification::GotWorkflowInfoDht(_) => write!(f, "got_workflow_info_dht"), + NetworkNotification::ReceiptQuorumSuccessDht(_) => { + write!(f, "receipt_quorum_success_dht") + } + NetworkNotification::ReceiptQuorumFailureDht(_) => { + write!(f, "receipt_quorum_failure_dht") + } } } } @@ -234,6 +239,14 @@ impl From for Ipld { NetworkNotification::GotWorkflowInfoDht(n) => { Ipld::Map(BTreeMap::from([("got_workflow_info_dht".into(), n.into())])) } + NetworkNotification::ReceiptQuorumSuccessDht(n) => Ipld::Map(BTreeMap::from([( + "receipt_quorum_success_dht".into(), + n.into(), + )])), + NetworkNotification::ReceiptQuorumFailureDht(n) => Ipld::Map(BTreeMap::from([( + "receipt_quorum_failure_dht".into(), + n.into(), + )])), } } } @@ -294,6 +307,12 @@ impl TryFrom for NetworkNotification { "got_workflow_info_dht" => Ok(NetworkNotification::GotWorkflowInfoDht( GotWorkflowInfoDht::try_from(val.to_owned())?, )), + "receipt_quorum_success_dht" => Ok(NetworkNotification::ReceiptQuorumSuccessDht( + ReceiptQuorumSuccessDht::try_from(val.to_owned())?, + )), + "receipt_quorum_failure_dht" => Ok(NetworkNotification::ReceiptQuorumFailureDht( + ReceiptQuorumFailureDht::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -1513,6 +1532,177 @@ impl TryFrom for GotWorkflowInfoDht { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "receipt_quorum_success_dht")] +pub struct ReceiptQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl ReceiptQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> ReceiptQuorumSuccessDht { + ReceiptQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for ReceiptQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "receipt_quorum_failure_dht")] +pub struct ReceiptQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl ReceiptQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> ReceiptQuorumFailureDht { + ReceiptQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for ReceiptQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -1524,13 +1714,16 @@ mod test { address: Multiaddr, addresses: Vec, cid: Cid, + connected_peer_count: usize, name: FastStr, num_tasks: u32, peer_id: PeerId, - peers: BTreeMap, - peers_vec_addr: BTreeMap>, + peers: Vec, + peers_map: BTreeMap, + peers_map_vec_addr: BTreeMap>, progress: Vec, progress_count: u32, + quorum: usize, ran: Cid, } @@ -1542,10 +1735,12 @@ mod test { Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ], cid: generate_cid(&mut thread_rng()), + connected_peer_count: 1, name: FastStr::new("Strong Bad"), num_tasks: 1, peer_id: PeerId::random(), - peers: BTreeMap::from([ + peers: vec![PeerId::random(), PeerId::random()], + peers_map: BTreeMap::from([ ( PeerId::random(), Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), @@ -1555,7 +1750,7 @@ mod test { Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), ), ]), - peers_vec_addr: BTreeMap::from([ + peers_map_vec_addr: BTreeMap::from([ ( PeerId::random(), vec![Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap()], @@ -1570,6 +1765,7 @@ mod test { ]), progress: vec![generate_cid(&mut thread_rng())], progress_count: 1, + quorum: 3, ran: generate_cid(&mut thread_rng()), } } @@ -1579,13 +1775,16 @@ mod test { address, addresses, cid, + connected_peer_count, name, num_tasks, peer_id, peers, - peers_vec_addr, + peers_map, + peers_map_vec_addr, progress, progress_count, + quorum, ran, } = fixtures; @@ -1595,8 +1794,8 @@ mod test { let outgoing_connection_error = OutgoingConnectionError::new(Some(peer_id), DialError::NoAddresses); let incoming_connection_error = IncomingConnectionError::new(ListenError::Aborted); - let discovered_mdns = DiscoveredMdns::new(peers); - let discovered_rendezvous = DiscoveredRendezvous::new(peer_id, peers_vec_addr); + let discovered_mdns = DiscoveredMdns::new(peers_map); + let discovered_rendezvous = DiscoveredRendezvous::new(peer_id, peers_map_vec_addr); let registered_rendezvous = RegisteredRendezvous::new(peer_id); let discover_served_rendezvous = DiscoverServedRendezvous::new(peer_id); let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); @@ -1619,6 +1818,14 @@ mod test { progress, progress_count, ); + let receipt_quorum_success_dht = + ReceiptQuorumSuccessDht::new(FastStr::new(cid.to_string()), quorum); + let receipt_quorum_failure_dht = ReceiptQuorumFailureDht::new( + FastStr::new(cid.to_string()), + quorum, + connected_peer_count, + peers, + ); vec![ ( @@ -1685,6 +1892,14 @@ mod test { got_workflow_info_dht.timestamp, NetworkNotification::GotWorkflowInfoDht(got_workflow_info_dht), ), + ( + receipt_quorum_success_dht.timestamp, + NetworkNotification::ReceiptQuorumSuccessDht(receipt_quorum_success_dht), + ), + ( + receipt_quorum_failure_dht.timestamp, + NetworkNotification::ReceiptQuorumFailureDht(receipt_quorum_failure_dht), + ), ] } @@ -1693,13 +1908,16 @@ mod test { address, addresses, cid, + connected_peer_count, name, num_tasks, peer_id, peers, - peers_vec_addr, + peers_map, + peers_map_vec_addr, progress, progress_count, + quorum, ran, } = fixtures; @@ -1737,7 +1955,7 @@ mod test { for peer in n.peers { assert_eq!( Multiaddr::from_str(&peer.1).unwrap(), - peers[&PeerId::from_str(&peer.0).unwrap()] + peers_map[&PeerId::from_str(&peer.0).unwrap()] ) } } @@ -1751,7 +1969,7 @@ mod test { .iter() .map(|address| Multiaddr::from_str(address).unwrap()) .collect::>(), - peers_vec_addr[&PeerId::from_str(&peer.0).unwrap()] + peers_map_vec_addr[&PeerId::from_str(&peer.0).unwrap()] ) } } @@ -1816,8 +2034,7 @@ mod test { NetworkNotification::GotWorkflowInfoDht(n) => { assert_eq!(n.timestamp, timestamp); assert_eq!( - n.publisher - .and_then(|p| Some(PeerId::from_str(&p).unwrap())), + n.publisher.map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); @@ -1832,6 +2049,24 @@ mod test { ); assert_eq!(n.progress_count, progress_count); } + NetworkNotification::ReceiptQuorumSuccessDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); + assert_eq!(n.quorum, quorum); + } + NetworkNotification::ReceiptQuorumFailureDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); + assert_eq!(n.quorum, quorum); + assert_eq!(n.connected_peer_count, connected_peer_count); + assert_eq!( + n.stored_to_peers + .iter() + .map(|p| PeerId::from_str(p).unwrap()) + .collect::>(), + peers + ); + } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 28c8cdc4..59bcd532 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -764,15 +764,14 @@ async fn handle_swarm_event( #[cfg(feature = "websocket-notify")] match key.capsule_tag { - CapsuleTag::Receipt => notification::emit_event( + CapsuleTag::Receipt => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceiptQuorumSuccess, + NetworkNotification::ReceiptQuorumSuccessDht( + notification::ReceiptQuorumSuccessDht::new( + key.cid, + event_handler.receipt_quorum, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.receipt_quorum as i128), - }, ), CapsuleTag::Workflow => notification::emit_event( event_handler.ws_evt_sender(), @@ -806,17 +805,16 @@ async fn handle_swarm_event( #[cfg(feature = "websocket-notify")] if let kad::PutRecordError::QuorumFailed { success, .. } = err { match key.capsule_tag { - CapsuleTag::Receipt => notification::emit_event( + CapsuleTag::Receipt => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceiptQuorumFailure, + NetworkNotification::ReceiptQuorumFailureDht( + notification::ReceiptQuorumFailureDht::new( + key.cid, + event_handler.receipt_quorum, + event_handler.connections.peers.len(), + success, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.receipt_quorum as i128), - "connectedPeers" => Ipld::Integer(event_handler.connections.peers.len() as i128), - "storedToPeers" => Ipld::List(success.iter().map(|cid| Ipld::String(cid.to_string())).collect()) - }, ), CapsuleTag::Workflow => notification::emit_event( event_handler.ws_evt_sender(), diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index a9ec2aaf..b79b897a 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -212,7 +212,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { put_receipt = true; } else if json["put_workflow_info_dht"].is_object() { put_workflow_info = true; - } else if json["type"].as_str().unwrap() == "network:receiptQuorumSuccess" { + } else if json["receipt_quorum_success_dht"].is_object() { receipt_quorum_success = true; } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumSuccess" { workflow_info_quorum_success = true; @@ -521,11 +521,15 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receiptQuorumFailure" { + if json["receipt_quorum_failure_dht"].is_object() { receipt_quorum_failure = true - } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumFailure" { - workflow_info_quorum_failure = true } + + // if json["receipt_quorum_failure_dht"].is_object() { + // receipt_quorum_failure = true + // } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumFailure" { + // workflow_info_quorum_failure = true + // } } else { panic!( r#"Expected notifications from node one did not arrive in time: @@ -536,7 +540,8 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { ); } - if receipt_quorum_failure && workflow_info_quorum_failure { + // if receipt_quorum_failure && workflow_info_quorum_failure { + if receipt_quorum_failure { break; } } From 2562599eceaebc9ec50ce169af344de9de734b5a Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 2 Feb 2024 14:24:37 -0800 Subject: [PATCH 35/75] feat: Add workflow info quorum notifications --- .../src/event_handler/notification/swarm.rs | 245 +++++++++++++++++- .../src/event_handler/swarm_event.rs | 30 +-- homestar-runtime/tests/network/dht.rs | 15 +- 3 files changed, 256 insertions(+), 34 deletions(-) diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 6366afec..58c282c8 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -37,8 +37,6 @@ const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) enum SwarmNotification { - WorkflowInfoQuorumSuccess, - WorkflowInfoQuorumFailure, SentWorkflowInfo, ReceivedWorkflowInfo, } @@ -46,12 +44,6 @@ pub(crate) enum SwarmNotification { impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - SwarmNotification::WorkflowInfoQuorumSuccess => { - write!(f, "workflowInfoQuorumSuccess") - } - SwarmNotification::WorkflowInfoQuorumFailure => { - write!(f, "workflowInfoQuorumFailure") - } SwarmNotification::SentWorkflowInfo => { write!(f, "sentWorkflowInfo") } @@ -67,8 +59,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "workflowInfoQuorumSuccess" => Ok(Self::WorkflowInfoQuorumSuccess), - "workflowInfoQuorumFailure" => Ok(Self::WorkflowInfoQuorumFailure), "sentWorkflowInfo" => Ok(Self::SentWorkflowInfo), "receivedWorkflowInfo" => Ok(Self::ReceivedWorkflowInfo), _ => Err(anyhow!("Missing swarm notification type: {}", ty)), @@ -134,6 +124,12 @@ pub enum NetworkNotification { /// Receipt quorum failure notification. #[schemars(rename = "receipt_quorum_failure_dht")] ReceiptQuorumFailureDht(ReceiptQuorumFailureDht), + /// Wokflow info quorum success notification. + #[schemars(rename = "workflow_info_quorum_success_dht")] + WorkflowInfoQuorumSuccessDht(WorkflowInfoQuorumSuccessDht), + /// Wokflow info quorum failure notification. + #[schemars(rename = "workflow_info_quorum_failure_dht")] + WorkflowInfoQuorumFailureDht(WorkflowInfoQuorumFailureDht), } #[derive(Debug, Clone, PartialEq)] @@ -175,6 +171,12 @@ impl fmt::Display for NetworkNotification { NetworkNotification::ReceiptQuorumFailureDht(_) => { write!(f, "receipt_quorum_failure_dht") } + NetworkNotification::WorkflowInfoQuorumSuccessDht(_) => { + write!(f, "workflow_info_quorum_success_dht") + } + NetworkNotification::WorkflowInfoQuorumFailureDht(_) => { + write!(f, "workflow_info_quorum_failure_dht") + } } } } @@ -247,6 +249,14 @@ impl From for Ipld { "receipt_quorum_failure_dht".into(), n.into(), )])), + NetworkNotification::WorkflowInfoQuorumSuccessDht(n) => Ipld::Map(BTreeMap::from([( + "workflow_info_quorum_success_dht".into(), + n.into(), + )])), + NetworkNotification::WorkflowInfoQuorumFailureDht(n) => Ipld::Map(BTreeMap::from([( + "workflow_info_quorum_failure_dht".into(), + n.into(), + )])), } } } @@ -313,6 +323,16 @@ impl TryFrom for NetworkNotification { "receipt_quorum_failure_dht" => Ok(NetworkNotification::ReceiptQuorumFailureDht( ReceiptQuorumFailureDht::try_from(val.to_owned())?, )), + "workflow_info_quorum_success_dht" => { + Ok(NetworkNotification::WorkflowInfoQuorumSuccessDht( + WorkflowInfoQuorumSuccessDht::try_from(val.to_owned())?, + )) + } + "workflow_info_quorum_failure_dht" => { + Ok(NetworkNotification::WorkflowInfoQuorumFailureDht( + WorkflowInfoQuorumFailureDht::try_from(val.to_owned())?, + )) + } _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -1703,6 +1723,177 @@ impl TryFrom for ReceiptQuorumFailureDht { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_success_dht")] +pub struct WorkflowInfoQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl WorkflowInfoQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> WorkflowInfoQuorumSuccessDht { + WorkflowInfoQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for WorkflowInfoQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_failure_dht")] +pub struct WorkflowInfoQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl WorkflowInfoQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> WorkflowInfoQuorumFailureDht { + WorkflowInfoQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for WorkflowInfoQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -1821,6 +2012,14 @@ mod test { let receipt_quorum_success_dht = ReceiptQuorumSuccessDht::new(FastStr::new(cid.to_string()), quorum); let receipt_quorum_failure_dht = ReceiptQuorumFailureDht::new( + FastStr::new(cid.to_string()), + quorum, + connected_peer_count, + peers.clone(), + ); + let workflow_info_quorum_success_dht = + WorkflowInfoQuorumSuccessDht::new(FastStr::new(cid.to_string()), quorum); + let workflow_info_quorum_failure_dht = WorkflowInfoQuorumFailureDht::new( FastStr::new(cid.to_string()), quorum, connected_peer_count, @@ -1900,6 +2099,14 @@ mod test { receipt_quorum_failure_dht.timestamp, NetworkNotification::ReceiptQuorumFailureDht(receipt_quorum_failure_dht), ), + ( + workflow_info_quorum_success_dht.timestamp, + NetworkNotification::WorkflowInfoQuorumSuccessDht(workflow_info_quorum_success_dht), + ), + ( + workflow_info_quorum_failure_dht.timestamp, + NetworkNotification::WorkflowInfoQuorumFailureDht(workflow_info_quorum_failure_dht), + ), ] } @@ -2067,6 +2274,24 @@ mod test { peers ); } + NetworkNotification::WorkflowInfoQuorumSuccessDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); + assert_eq!(n.quorum, quorum); + } + NetworkNotification::WorkflowInfoQuorumFailureDht(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); + assert_eq!(n.quorum, quorum); + assert_eq!(n.connected_peer_count, connected_peer_count); + assert_eq!( + n.stored_to_peers + .iter() + .map(|p| PeerId::from_str(p).unwrap()) + .collect::>(), + peers + ); + } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 59bcd532..134c0fe9 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -773,15 +773,14 @@ async fn handle_swarm_event( ), ), ), - CapsuleTag::Workflow => notification::emit_event( + CapsuleTag::Workflow => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::WorkflowInfoQuorumSuccess, + NetworkNotification::WorkflowInfoQuorumSuccessDht( + notification::WorkflowInfoQuorumSuccessDht::new( + key.cid, + event_handler.workflow_quorum, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.workflow_quorum as i128), - }, ), } } @@ -816,17 +815,16 @@ async fn handle_swarm_event( ), ), ), - CapsuleTag::Workflow => notification::emit_event( + CapsuleTag::Workflow => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::WorkflowInfoQuorumFailure, + NetworkNotification::WorkflowInfoQuorumFailureDht( + notification::WorkflowInfoQuorumFailureDht::new( + key.cid, + event_handler.workflow_quorum, + event_handler.connections.peers.len(), + success, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.workflow_quorum as i128), - "connectedPeers" => Ipld::Integer(event_handler.connections.peers.len() as i128), - "storedToPeers" => Ipld::List(success.iter().map(|cid| Ipld::String(cid.to_string())).collect()) - }, ), } } diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index b79b897a..0021865c 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -214,7 +214,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { put_workflow_info = true; } else if json["receipt_quorum_success_dht"].is_object() { receipt_quorum_success = true; - } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumSuccess" { + } else if json["workflow_info_quorum_success_dht"].is_object() { workflow_info_quorum_success = true; } } else { @@ -525,11 +525,11 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { receipt_quorum_failure = true } - // if json["receipt_quorum_failure_dht"].is_object() { - // receipt_quorum_failure = true - // } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumFailure" { - // workflow_info_quorum_failure = true - // } + if json["receipt_quorum_failure_dht"].is_object() { + receipt_quorum_failure = true + } else if json["workflow_info_quorum_failure_dht"].is_object() { + workflow_info_quorum_failure = true + } } else { panic!( r#"Expected notifications from node one did not arrive in time: @@ -540,8 +540,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { ); } - // if receipt_quorum_failure && workflow_info_quorum_failure { - if receipt_quorum_failure { + if receipt_quorum_failure && workflow_info_quorum_failure { break; } } From acb4b42841b2feda091c31bca154a2c4f8f09d1a Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 2 Feb 2024 18:08:27 -0800 Subject: [PATCH 36/75] feat: Add request response notifications --- homestar-runtime/src/event_handler/event.rs | 14 +- .../src/event_handler/notification/swarm.rs | 390 +++++++++++++++++- .../src/event_handler/swarm_event.rs | 21 +- homestar-runtime/tests/network/dht.rs | 11 +- 4 files changed, 400 insertions(+), 36 deletions(-) diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index c2929fc1..ae930854 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -202,8 +202,18 @@ impl Event { ), ) } - // TODO Fill this case in! - notification::WorkflowInfoSource::RequestResponse => todo!(), + notification::WorkflowInfoSource::RequestResponse => { + NetworkNotification::ReceivedWorkflowInfo( + notification::ReceivedWorkflowInfo::new( + peer_id, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), + ) + } }, ), }, diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs index 58c282c8..735d20a2 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -27,7 +27,9 @@ const PEER_KEY: &str = "peer_id"; const PEERS_KEY: &str = "peers"; const PROGRESS_KEY: &str = "progress"; const PROGRESS_COUNT_KEY: &str = "progress_count"; +const PROVIDER_KEY: &str = "provider"; const PUBLISHER_KEY: &str = "publisher"; +const REQUESTOR_KEY: &str = "requestor"; const QUORUM_KEY: &str = "quorum"; const RAN_KEY: &str = "ran"; const SERVER_KEY: &str = "server"; @@ -36,21 +38,11 @@ const TIMESTAMP_KEY: &str = "timestamp"; // Swarm notification types sent to clients #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum SwarmNotification { - SentWorkflowInfo, - ReceivedWorkflowInfo, -} +pub(crate) enum SwarmNotification {} impl fmt::Display for SwarmNotification { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - SwarmNotification::SentWorkflowInfo => { - write!(f, "sentWorkflowInfo") - } - SwarmNotification::ReceivedWorkflowInfo => { - write!(f, "receivedWorkflowInfo") - } - } + match *self {} } } @@ -59,8 +51,6 @@ impl FromStr for SwarmNotification { fn from_str(ty: &str) -> Result { match ty { - "sentWorkflowInfo" => Ok(Self::SentWorkflowInfo), - "receivedWorkflowInfo" => Ok(Self::ReceivedWorkflowInfo), _ => Err(anyhow!("Missing swarm notification type: {}", ty)), } } @@ -124,12 +114,18 @@ pub enum NetworkNotification { /// Receipt quorum failure notification. #[schemars(rename = "receipt_quorum_failure_dht")] ReceiptQuorumFailureDht(ReceiptQuorumFailureDht), - /// Wokflow info quorum success notification. + /// Workflow info quorum success notification. #[schemars(rename = "workflow_info_quorum_success_dht")] WorkflowInfoQuorumSuccessDht(WorkflowInfoQuorumSuccessDht), - /// Wokflow info quorum failure notification. + /// Workflow info quorum failure notification. #[schemars(rename = "workflow_info_quorum_failure_dht")] WorkflowInfoQuorumFailureDht(WorkflowInfoQuorumFailureDht), + /// Sent workflow info notification. + #[schemars(rename = "sent_workflow_info")] + SentWorkflowInfo(SentWorkflowInfo), + /// Received workflow info notification. + #[schemars(rename = "received_workflow_info")] + ReceivedWorkflowInfo(ReceivedWorkflowInfo), } #[derive(Debug, Clone, PartialEq)] @@ -177,6 +173,12 @@ impl fmt::Display for NetworkNotification { NetworkNotification::WorkflowInfoQuorumFailureDht(_) => { write!(f, "workflow_info_quorum_failure_dht") } + NetworkNotification::SentWorkflowInfo(_) => { + write!(f, "sent_workflow_info") + } + NetworkNotification::ReceivedWorkflowInfo(_) => { + write!(f, "received_workflow_info") + } } } } @@ -257,6 +259,13 @@ impl From for Ipld { "workflow_info_quorum_failure_dht".into(), n.into(), )])), + NetworkNotification::SentWorkflowInfo(n) => { + Ipld::Map(BTreeMap::from([("sent_workflow_info".into(), n.into())])) + } + NetworkNotification::ReceivedWorkflowInfo(n) => Ipld::Map(BTreeMap::from([( + "received_workflow_info".into(), + n.into(), + )])), } } } @@ -333,6 +342,12 @@ impl TryFrom for NetworkNotification { WorkflowInfoQuorumFailureDht::try_from(val.to_owned())?, )) } + "sent_workflow_info" => Ok(NetworkNotification::SentWorkflowInfo( + SentWorkflowInfo::try_from(val.to_owned())?, + )), + "received_workflow_info" => Ok(NetworkNotification::ReceivedWorkflowInfo( + ReceivedWorkflowInfo::try_from(val.to_owned())?, + )), _ => Err(anyhow!("Unknown network notification tag type")), } } else { @@ -1894,6 +1909,288 @@ impl TryFrom for WorkflowInfoQuorumFailureDht { } } +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "sent_workflow_info")] +pub struct SentWorkflowInfo { + timestamp: i64, + #[schemars(description = "Peer that requested workflow info")] + requestor: String, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl SentWorkflowInfo { + pub(crate) fn new( + requestor: PeerId, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> SentWorkflowInfo { + SentWorkflowInfo { + requestor: requestor.to_string(), + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for SentWorkflowInfo {} + +impl From for Ipld { + fn from(notification: SentWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (REQUESTOR_KEY.into(), notification.requestor.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for SentWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let requestor = from_ipld( + map.get(REQUESTOR_KEY) + .ok_or_else(|| anyhow!("missing {REQUESTOR_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(SentWorkflowInfo { + timestamp, + requestor, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "received_workflow_info")] +pub struct ReceivedWorkflowInfo { + timestamp: i64, + #[schemars(description = "Workflow info provider peer ID")] + provider: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl ReceivedWorkflowInfo { + pub(crate) fn new( + provider: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> ReceivedWorkflowInfo { + ReceivedWorkflowInfo { + timestamp: Utc::now().timestamp_millis(), + provider: provider.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for ReceivedWorkflowInfo {} + +impl From for Ipld { + fn from(notification: ReceivedWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PROVIDER_KEY.into(), + notification + .provider + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let provider = map + .get(PROVIDER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedWorkflowInfo { + timestamp, + provider, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + #[cfg(test)] mod test { use super::*; @@ -2004,9 +2301,9 @@ mod test { let got_workflow_info_dht = GotWorkflowInfoDht::new( Some(peer_id), cid, - Some(name), + Some(name.clone()), num_tasks, - progress, + progress.clone(), progress_count, ); let receipt_quorum_success_dht = @@ -2025,6 +2322,22 @@ mod test { connected_peer_count, peers, ); + let sent_workflow_info = SentWorkflowInfo::new( + peer_id, + cid, + Some(name.clone()), + num_tasks, + progress.clone(), + progress_count, + ); + let received_workflow_info = ReceivedWorkflowInfo::new( + Some(peer_id), + cid, + Some(name), + num_tasks, + progress, + progress_count, + ); vec![ ( @@ -2107,6 +2420,14 @@ mod test { workflow_info_quorum_failure_dht.timestamp, NetworkNotification::WorkflowInfoQuorumFailureDht(workflow_info_quorum_failure_dht), ), + ( + sent_workflow_info.timestamp, + NetworkNotification::SentWorkflowInfo(sent_workflow_info), + ), + ( + received_workflow_info.timestamp, + NetworkNotification::ReceivedWorkflowInfo(received_workflow_info), + ), ] } @@ -2292,6 +2613,39 @@ mod test { peers ); } + NetworkNotification::SentWorkflowInfo(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!(PeerId::from_str(&n.requestor).unwrap(), peer_id); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks, num_tasks); + assert_eq!( + n.progress + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count, progress_count); + } + NetworkNotification::ReceivedWorkflowInfo(n) => { + assert_eq!(n.timestamp, timestamp); + assert_eq!( + n.provider.map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); + assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks, num_tasks); + assert_eq!( + n.progress + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count, progress_count); + } } } diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 134c0fe9..75614200 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -923,19 +923,18 @@ async fn handle_swarm_event( ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::SentWorkflowInfo, + NetworkNotification::SentWorkflowInfo( + notification::SentWorkflowInfo::new( + peer, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), ), - btreemap! { - "requestor" => Ipld::String(peer.to_string()), - "cid" => Ipld::String(workflow_info.cid().to_string()), - "name" => workflow_info.name.as_ref().map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(workflow_info.num_tasks as i128), - "progress" => Ipld::List(workflow_info.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(workflow_info.progress_count as i128), - }, ) } else { let _ = event_handler diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 0021865c..0fed19cb 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -760,9 +760,10 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:sentWorkflowInfo" { - sent_workflow_info_cid = Cid::from_str(json["data"]["cid"].as_str().unwrap()) - .expect("Unable to parse sent workflow info CID."); + if json["sent_workflow_info"].is_object() { + sent_workflow_info_cid = + Cid::from_str(json["sent_workflow_info"]["cid"].as_str().unwrap()) + .expect("Unable to parse sent workflow info CID."); break; } } else { @@ -782,9 +783,9 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receivedWorkflowInfo" { + if json["received_workflow_info"].is_object() { received_workflow_info_cid = - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["received_workflow_info"]["cid"].as_str().unwrap()) .expect("Unable to parse received workflow info CID."); break; } From 9ad25169874f451badb8016c081d30cb3b4e1655 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 2 Feb 2024 18:21:52 -0800 Subject: [PATCH 37/75] chore: Remove vestiges of old swarm notifications --- .../src/event_handler/notification.rs | 152 +----------------- .../notification/{swarm.rs => network.rs} | 24 +-- .../src/event_handler/swarm_event.rs | 8 +- homestar-runtime/src/lib.rs | 2 +- 4 files changed, 7 insertions(+), 179 deletions(-) rename homestar-runtime/src/event_handler/notification/{swarm.rs => network.rs} (99%) diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 15caec1b..a5631d75 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -8,22 +8,14 @@ use crate::{ receipt::metadata::{WORKFLOW_KEY, WORKFLOW_NAME_KEY}, Receipt, }; -use anyhow::anyhow; -use chrono::prelude::Utc; use homestar_invocation::{ipld::DagJson, Receipt as InvocationReceipt}; -use libipld::{serde::from_ipld, Ipld}; -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt, str::FromStr}; +use libipld::Ipld; use tracing::{debug, warn}; +pub(crate) mod network; pub(crate) mod receipt; -pub(crate) mod swarm; +pub(crate) use network::*; pub(crate) use receipt::ReceiptNotification; -pub(crate) use swarm::*; - -const TYPE_KEY: &str = "type"; -const DATA_KEY: &str = "data"; -const TIMESTAMP_KEY: &str = "timestamp"; /// Send receipt notification as bytes. pub(crate) fn emit_receipt( @@ -66,30 +58,6 @@ pub(crate) fn emit_receipt( } } -/// Send event notification as bytes. -pub(crate) fn emit_event( - notifier: Notifier, - ty: EventNotificationTyp, - data: BTreeMap<&str, Ipld>, -) { - let header = Header::new( - SubscriptionTyp::EventSub(SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string()), - None, - ); - let notification = EventNotification::new(ty, data); - - if let Ok(json) = notification.to_json() { - let _ = notifier.notify(Message::new(header, json)); - } else { - warn!( - subject = "notification.err", - category = "notification", - "unable to serialize event notification as bytes: {}", - notification.typ - ); - } -} - /// Send network event notification as bytes. pub(crate) fn emit_network_event( notifier: Notifier, @@ -102,7 +70,6 @@ pub(crate) fn emit_network_event( if let Ok(json) = notification.to_json() { if let Err(err) = notifier.notify(Message::new(header, json)) { - // TODO Check on why this causes connection closed log errors debug!( subject = "notification.err", category = "notification", @@ -120,116 +87,3 @@ pub(crate) fn emit_network_event( ); } } - -/// Notification sent to clients. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) struct EventNotification { - typ: EventNotificationTyp, - data: Ipld, - timestamp: i64, -} - -impl EventNotification { - pub(crate) fn new(typ: EventNotificationTyp, data: BTreeMap<&str, Ipld>) -> Self { - let data = data - .iter() - .map(|(key, val)| (key.to_string(), val.to_owned())) - .collect(); - - Self { - typ, - data: Ipld::Map(data), - timestamp: Utc::now().timestamp_millis(), - } - } -} - -impl DagJson for EventNotification {} - -impl From for Ipld { - fn from(notification: EventNotification) -> Self { - Ipld::Map(BTreeMap::from([ - ("type".into(), notification.typ.into()), - ("data".into(), notification.data), - ("timestamp".into(), notification.timestamp.into()), - ])) - } -} - -impl TryFrom for EventNotification { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let typ: EventNotificationTyp = map - .get(TYPE_KEY) - .ok_or_else(|| anyhow!("missing {TYPE_KEY}"))? - .to_owned() - .try_into()?; - - let data = map - .get(DATA_KEY) - .ok_or_else(|| anyhow!("missing {DATA_KEY}"))? - .to_owned(); - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - Ok(EventNotification { - typ, - data, - timestamp, - }) - } -} - -/// Types of notification sent to clients. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum EventNotificationTyp { - SwarmNotification(SwarmNotification), -} - -impl fmt::Display for EventNotificationTyp { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - EventNotificationTyp::SwarmNotification(subtype) => { - write!(f, "swarm notification: {}", subtype) - } - } - } -} - -impl DagJson for EventNotificationTyp {} - -impl From for Ipld { - fn from(typ: EventNotificationTyp) -> Self { - match typ { - EventNotificationTyp::SwarmNotification(subtype) => { - Ipld::String(format!("network:{}", subtype)) - } - } - } -} - -impl TryFrom for EventNotificationTyp { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - if let Some((ty, subtype)) = from_ipld::(ipld)?.split_once(':') { - match ty { - "network" => Ok(EventNotificationTyp::SwarmNotification( - SwarmNotification::from_str(subtype)?, - )), - _ => Err(anyhow!("Missing event notification type: {}", ty)), - } - } else { - Err(anyhow!( - "Event notification type missing colon delimiter between type and subtype." - )) - } - } -} diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/network.rs similarity index 99% rename from homestar-runtime/src/event_handler/notification/swarm.rs rename to homestar-runtime/src/event_handler/notification/network.rs index 735d20a2..68ea8668 100644 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ b/homestar-runtime/src/event_handler/notification/network.rs @@ -12,8 +12,7 @@ use libp2p::{ Multiaddr, PeerId, }; use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt, str::FromStr}; +use std::{collections::BTreeMap, fmt}; const ADDRESS_KEY: &str = "address"; const ADDRESSES_KEY: &str = "addresses"; @@ -36,26 +35,6 @@ const SERVER_KEY: &str = "server"; const STORED_TO_PEERS_KEY: &str = "stored_to_peers"; const TIMESTAMP_KEY: &str = "timestamp"; -// Swarm notification types sent to clients -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum SwarmNotification {} - -impl fmt::Display for SwarmNotification { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self {} - } -} - -impl FromStr for SwarmNotification { - type Err = anyhow::Error; - - fn from_str(ty: &str) -> Result { - match ty { - _ => Err(anyhow!("Missing swarm notification type: {}", ty)), - } - } -} - /// Network notification type. #[derive(Debug, Clone, JsonSchema)] #[schemars(rename = "network")] @@ -2196,6 +2175,7 @@ mod test { use super::*; use homestar_invocation::test_utils::cid::generate_cid; use rand::thread_rng; + use std::str::FromStr; #[derive(Clone, Debug)] struct Fixtures { diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 75614200..21c35fbe 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -2,9 +2,7 @@ use super::EventHandler; #[cfg(feature = "websocket-notify")] -use crate::event_handler::notification::{ - self, EventNotificationTyp, NetworkNotification, SwarmNotification, -}; +use crate::event_handler::notification::{self, NetworkNotification}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ @@ -27,8 +25,6 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use libipld::Cid; -#[cfg(feature = "websocket-notify")] -use libipld::Ipld; use libp2p::{ gossipsub, identify, kad, kad::{AddProviderOk, BootstrapOk, GetProvidersOk, GetRecordOk, PutRecordOk, QueryResult}, @@ -39,8 +35,6 @@ use libp2p::{ swarm::{dial_opts::DialOpts, SwarmEvent}, Multiaddr, PeerId, StreamProtocol, }; -#[cfg(feature = "websocket-notify")] -use maplit::btreemap; use std::collections::{BTreeMap, HashMap, HashSet}; use tracing::{debug, error, info, warn}; diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index e6656991..fa01e995 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -73,7 +73,7 @@ pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; #[cfg(feature = "websocket-notify")] -pub use event_handler::notification::{receipt::ReceiptNotification, swarm::NetworkNotification}; +pub use event_handler::notification::{network::NetworkNotification, receipt::ReceiptNotification}; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; pub use network::webserver::PrometheusData; From 4c60317603fe2f6fee11c793b2932a8e3732a0fd Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 5 Feb 2024 09:30:15 -0800 Subject: [PATCH 38/75] chore: Remove unused feature flag --- homestar-runtime/src/network/webserver/rpc.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index 200242af..22e635d4 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -185,7 +185,6 @@ where } })?; - #[cfg(not(test))] module.register_async_method(NODE_INFO_ENDPOINT, |_, ctx| async move { let (tx, rx) = crate::channel::AsyncChannel::oneshot(); ctx.runner_sender From 38fdf6a053e57610ffd9e34b7a8a07415b4c51b6 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 5 Feb 2024 14:23:09 -0800 Subject: [PATCH 39/75] feat: Add OpenRPC API doc and JSON Schemas --- .pre-commit-config.yaml | 2 +- homestar-runtime/schemas/docs/api.json | 1668 +++++++++++++++++ homestar-runtime/schemas/docs/health.json | 15 + homestar-runtime/schemas/docs/metrics.json | 87 + homestar-runtime/schemas/docs/network.json | 912 +++++++++ homestar-runtime/schemas/docs/node_info.json | 67 + homestar-runtime/schemas/docs/receipt.json | 146 ++ .../schemas/docs/receipt_notification.json | 184 ++ homestar-runtime/schemas/docs/workflow.json | 266 +++ homestar-runtime/schemas/generate.rs | 3 +- 10 files changed, 3347 insertions(+), 3 deletions(-) create mode 100644 homestar-runtime/schemas/docs/api.json create mode 100644 homestar-runtime/schemas/docs/health.json create mode 100644 homestar-runtime/schemas/docs/metrics.json create mode 100644 homestar-runtime/schemas/docs/network.json create mode 100644 homestar-runtime/schemas/docs/node_info.json create mode 100644 homestar-runtime/schemas/docs/receipt.json create mode 100644 homestar-runtime/schemas/docs/receipt_notification.json create mode 100644 homestar-runtime/schemas/docs/workflow.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e7310957..b60f3e33 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -79,7 +79,7 @@ repos: - id: check-merge-conflict - id: trailing-whitespace - id: end-of-file-fixer - exclude: \.txt$ + exclude: \.(txt|json)$ - id: check-yaml - id: check-json - id: check-added-large-files diff --git a/homestar-runtime/schemas/docs/api.json b/homestar-runtime/schemas/docs/api.json new file mode 100644 index 00000000..fdffe4b7 --- /dev/null +++ b/homestar-runtime/schemas/docs/api.json @@ -0,0 +1,1668 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "homestar", + "description": "Homestar runtime implementation", + "version": "0.10.0", + "contact": { + "name": null, + "email": null, + "url": "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime" + }, + "license": { + "name": "Apache-2.0", + "url": null + } + }, + "externalDocs": { + "description": null, + "url": "https://docs.everywhere.computer/homestar/what-is-homestar/" + }, + "methods": [ + { + "name": "health", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "health", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "health", + "description": "Health status of the server and database connection.", + "type": "object", + "required": [ + "healthy" + ], + "properties": { + "healthy": { + "description": "Health status.", + "type": "boolean" + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "metrics", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "metrics", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Metrics data", + "description": "Prometheus metrics data", + "type": "object", + "required": [ + "metrics" + ], + "properties": { + "metrics": { + "type": "array", + "items": { + "$ref": "#/definitions/metric" + } + } + }, + "definitions": { + "metric": { + "title": "Metric family", + "description": "A prometheus gauge, summary, or histogram metric", + "type": "object", + "if": { + "properties": { + "metric_type": { + "type": "string", + "const": "gauge" + } + } + }, + "then": { + "properties": { + "data": { + "title": "Gauge data", + "description": "A gauge metric", + "type": "object", + "required": [ + "type", + "value" + ], + "properties": { + "labels": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "type": { + "type": "string", + "const": "metric" + }, + "value": { + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "data", + "help", + "metric_name", + "metric_type" + ], + "properties": { + "help": { + "type": "string" + }, + "metric_name": { + "type": "string" + }, + "metric_type": { + "title": "Metric type", + "type": "string", + "enum": [ + "gauge", + "histogram", + "summary" + ] + } + } + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "node", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "node_info", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "node_info", + "description": "Node information.", + "type": "object", + "required": [ + "dynamic", + "static" + ], + "properties": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "allOf": [ + { + "$ref": "#/definitions/dynamic" + } + ] + }, + "static": { + "description": "Static node information available at startup.", + "allOf": [ + { + "$ref": "#/definitions/static" + } + ] + } + }, + "definitions": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "type": "object", + "required": [ + "connections", + "listeners" + ], + "properties": { + "connections": { + "description": "Peers and their addresses that are connected to the node", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listeners": { + "description": "Listen addresses for the node", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "static": { + "description": "Static node information available at startup.", + "type": "object", + "required": [ + "peer_id" + ], + "properties": { + "peer_id": { + "description": "The peer ID of the node", + "type": "string" + } + } + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "subscribe_network_events", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "subscription_id", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "String", + "type": "string" + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "x-messages": { + "name": "network subscription messages", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "network", + "description": "Network notification type.", + "oneOf": [ + { + "description": "Listening on new address notification.", + "type": "object", + "required": [ + "new_listen_addr" + ], + "properties": { + "new_listen_addr": { + "$ref": "#/definitions/new_listen_addr" + } + }, + "additionalProperties": false + }, + { + "description": "Connection established notification.", + "type": "object", + "required": [ + "connection_established" + ], + "properties": { + "connection_established": { + "$ref": "#/definitions/connection_established" + } + }, + "additionalProperties": false + }, + { + "description": "Connection closed notification.", + "type": "object", + "required": [ + "connection_closed" + ], + "properties": { + "connection_closed": { + "$ref": "#/definitions/connection_closed" + } + }, + "additionalProperties": false + }, + { + "description": "Outgoing conenction error notification.", + "type": "object", + "required": [ + "outgoing_connection_error" + ], + "properties": { + "outgoing_connection_error": { + "$ref": "#/definitions/outgoing_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "Incoming conenction error notification.", + "type": "object", + "required": [ + "incoming_connection_error" + ], + "properties": { + "incoming_connection_error": { + "$ref": "#/definitions/incoming_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "mDNS discovered notification.", + "type": "object", + "required": [ + "discovered_mdns" + ], + "properties": { + "discovered_mdns": { + "$ref": "#/definitions/discovered_mdns" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "discovered_rendezvous" + ], + "properties": { + "discovered_rendezvous": { + "$ref": "#/definitions/discovered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "registered_rendezvous" + ], + "properties": { + "registered_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous discover served notification.", + "type": "object", + "required": [ + "discover_served_rendezvous" + ], + "properties": { + "discover_served_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous peer registered notification.", + "type": "object", + "required": [ + "peer_registered_rendezvous" + ], + "properties": { + "peer_registered_rendezvous": { + "$ref": "#/definitions/peer_registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Published receipt pubsub notification.", + "type": "object", + "required": [ + "published_receipt_pubsub" + ], + "properties": { + "published_receipt_pubsub": { + "$ref": "#/definitions/published_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Received receipt pubsub notification.", + "type": "object", + "required": [ + "received_receipt_pubsub" + ], + "properties": { + "received_receipt_pubsub": { + "$ref": "#/definitions/received_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Put receipt DHT notification.", + "type": "object", + "required": [ + "put_receipt_dht" + ], + "properties": { + "put_receipt_dht": { + "$ref": "#/definitions/put_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Got receipt DHT notification.", + "type": "object", + "required": [ + "got_receipt_dht" + ], + "properties": { + "got_receipt_dht": { + "$ref": "#/definitions/got_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "put_workflow_info_dht" + ], + "properties": { + "put_workflow_info_dht": { + "$ref": "#/definitions/put_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "got_workflow_info_dht" + ], + "properties": { + "got_workflow_info_dht": { + "$ref": "#/definitions/got_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum success notification.", + "type": "object", + "required": [ + "receipt_quorum_success_dht" + ], + "properties": { + "receipt_quorum_success_dht": { + "$ref": "#/definitions/receipt_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum failure notification.", + "type": "object", + "required": [ + "receipt_quorum_failure_dht" + ], + "properties": { + "receipt_quorum_failure_dht": { + "$ref": "#/definitions/receipt_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum success notification.", + "type": "object", + "required": [ + "workflow_info_quorum_success_dht" + ], + "properties": { + "workflow_info_quorum_success_dht": { + "$ref": "#/definitions/workflow_info_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum failure notification.", + "type": "object", + "required": [ + "workflow_info_quorum_failure_dht" + ], + "properties": { + "workflow_info_quorum_failure_dht": { + "$ref": "#/definitions/workflow_info_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Sent workflow info notification.", + "type": "object", + "required": [ + "sent_workflow_info" + ], + "properties": { + "sent_workflow_info": { + "$ref": "#/definitions/sent_workflow_info" + } + }, + "additionalProperties": false + }, + { + "description": "Received workflow info notification.", + "type": "object", + "required": [ + "received_workflow_info" + ], + "properties": { + "received_workflow_info": { + "$ref": "#/definitions/received_workflow_info" + } + }, + "additionalProperties": false + } + ], + "definitions": { + "connection_closed": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "connection_established": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_mdns": { + "type": "object", + "required": [ + "peers", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddress", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_rendezvous": { + "type": "object", + "required": [ + "peers", + "server", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddresses", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "server": { + "description": "Server that fulfilled the discovery request", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "publisher": { + "description": "Workflow info publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "incoming_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "new_listen_addr": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "outgoing_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "peer_id": { + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "peer_registered_rendezvous": { + "type": "object", + "required": [ + "addresses", + "peer_id", + "timestamp" + ], + "properties": { + "addresses": { + "description": "Multiaddresses for peer", + "type": "array", + "items": { + "type": "string" + } + }, + "peer_id": { + "description": "Peer registered", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "published_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "publisher", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "provider": { + "description": "Workflow info provider peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "registered_rendezvous": { + "type": "object", + "required": [ + "server", + "timestamp" + ], + "properties": { + "server": { + "description": "Server that accepted registration", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "sent_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "requestor", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "requestor": { + "description": "Peer that requested workflow info", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + } + } + }, + "required": true, + "deprecated": false + } + }, + { + "name": "unsubscribe_network_events", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "unsubscribe result", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Boolean", + "type": "boolean" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "subscribe_run_workflow", + "paramStructure": "by-name", + "params": [ + { + "name": "workflow", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Workflow", + "description": "Workflow composed of tasks", + "type": "object", + "required": [ + "tasks" + ], + "properties": { + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/task" + } + } + }, + "definitions": { + "await_result": { + "title": "Await result", + "description": "Branches of a promise that is awaited", + "oneOf": [ + { + "type": "object", + "properties": { + "await/ok": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/error": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/*": { + "$ref": "#/definitions/pointer" + } + } + } + ] + }, + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resource configuration for fuel quota, memory allowance, and timeout", + "type": "object", + "properties": { + "fuel": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "memory": { + "description": "Memory in bytes", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "time": { + "description": "Timeout in milliseconds", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + } + }, + "run": { + "title": "Run instruction", + "description": "An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce", + "type": "object", + "if": { + "properties": { + "op": { + "type": "string", + "const": "wasm/run" + } + } + }, + "then": { + "properties": { + "input": { + "type": "object", + "required": [ + "args", + "func" + ], + "properties": { + "args": { + "description": "Arguments to the function. May await a result from another task.", + "type": "array", + "items": [ + { + "$ref": "#/definitions/ipld" + }, + { + "$ref": "#/definitions/await_result" + } + ] + }, + "func": { + "description": "The function to call on the Wasm resource", + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "input", + "nnc", + "op", + "rsc" + ], + "properties": { + "nnc": { + "description": "A 12-byte or 16-byte nonce. Use empty string for no nonce.", + "type": "string" + }, + "op": { + "description": "Function executor", + "type": "string", + "enum": [ + "wasm/run" + ] + }, + "rsc": { + "type": "string", + "format": "uri" + } + } + }, + "task": { + "description": "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization", + "type": "object", + "required": [ + "meta", + "prf", + "run" + ], + "properties": { + "cause": { + "title": "Receipt reference", + "anyOf": [ + { + "$ref": "#/definitions/pointer" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Task Configuration", + "allOf": [ + { + "$ref": "#/definitions/resources" + } + ] + }, + "prf": { + "title": "UCAN Authorization", + "allOf": [ + { + "$ref": "#/definitions/prf" + } + ] + }, + "run": { + "title": "Run instruction", + "allOf": [ + { + "$ref": "#/definitions/run" + } + ] + } + } + } + } + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "subscription_id", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "String", + "type": "string" + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "x-messages": { + "name": "workflow subscription messages", + "summary": "receipt notifications from a running workflow", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt notification", + "description": "A receipt notification associated with a running workflow", + "type": "object", + "required": [ + "receipt", + "receipt_cid" + ], + "properties": { + "metadata": { + "title": "Metadata", + "description": "Workflow metadata to contextualize the receipt", + "type": "object", + "required": [ + "name", + "receipt", + "receipt_cid" + ], + "properties": { + "name": { + "type": "string" + }, + "replayed": { + "type": "boolean" + }, + "workflow": { + "$ref": "#/definitions/ipld_link" + } + } + }, + "receipt": { + "$ref": "#/definitions/receipt" + }, + "receipt_cid": { + "$ref": "#/definitions/ipld_link" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "receipt": { + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + } + } + } + }, + "required": true, + "deprecated": false + } + }, + { + "name": "unsubscribe_run_workflow", + "paramStructure": "by-name", + "params": [], + "result": { + "name": "unsubscribe result", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Boolean", + "type": "boolean" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + } + ] +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/health.json b/homestar-runtime/schemas/docs/health.json new file mode 100644 index 00000000..c4debee7 --- /dev/null +++ b/homestar-runtime/schemas/docs/health.json @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "health", + "description": "Health status of the server and database connection.", + "type": "object", + "required": [ + "healthy" + ], + "properties": { + "healthy": { + "description": "Health status.", + "type": "boolean" + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/metrics.json b/homestar-runtime/schemas/docs/metrics.json new file mode 100644 index 00000000..564c638b --- /dev/null +++ b/homestar-runtime/schemas/docs/metrics.json @@ -0,0 +1,87 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Metrics data", + "description": "Prometheus metrics data", + "type": "object", + "required": [ + "metrics" + ], + "properties": { + "metrics": { + "type": "array", + "items": { + "$ref": "#/definitions/metric" + } + } + }, + "definitions": { + "metric": { + "title": "Metric family", + "description": "A prometheus gauge, summary, or histogram metric", + "type": "object", + "if": { + "properties": { + "metric_type": { + "type": "string", + "const": "gauge" + } + } + }, + "then": { + "properties": { + "data": { + "title": "Gauge data", + "description": "A gauge metric", + "type": "object", + "required": [ + "type", + "value" + ], + "properties": { + "labels": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "type": { + "type": "string", + "const": "metric" + }, + "value": { + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "data", + "help", + "metric_name", + "metric_type" + ], + "properties": { + "help": { + "type": "string" + }, + "metric_name": { + "type": "string" + }, + "metric_type": { + "title": "Metric type", + "type": "string", + "enum": [ + "gauge", + "histogram", + "summary" + ] + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/network.json b/homestar-runtime/schemas/docs/network.json new file mode 100644 index 00000000..4fdc9d8f --- /dev/null +++ b/homestar-runtime/schemas/docs/network.json @@ -0,0 +1,912 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "network", + "description": "Network notification type.", + "oneOf": [ + { + "description": "Listening on new address notification.", + "type": "object", + "required": [ + "new_listen_addr" + ], + "properties": { + "new_listen_addr": { + "$ref": "#/definitions/new_listen_addr" + } + }, + "additionalProperties": false + }, + { + "description": "Connection established notification.", + "type": "object", + "required": [ + "connection_established" + ], + "properties": { + "connection_established": { + "$ref": "#/definitions/connection_established" + } + }, + "additionalProperties": false + }, + { + "description": "Connection closed notification.", + "type": "object", + "required": [ + "connection_closed" + ], + "properties": { + "connection_closed": { + "$ref": "#/definitions/connection_closed" + } + }, + "additionalProperties": false + }, + { + "description": "Outgoing conenction error notification.", + "type": "object", + "required": [ + "outgoing_connection_error" + ], + "properties": { + "outgoing_connection_error": { + "$ref": "#/definitions/outgoing_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "Incoming conenction error notification.", + "type": "object", + "required": [ + "incoming_connection_error" + ], + "properties": { + "incoming_connection_error": { + "$ref": "#/definitions/incoming_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "mDNS discovered notification.", + "type": "object", + "required": [ + "discovered_mdns" + ], + "properties": { + "discovered_mdns": { + "$ref": "#/definitions/discovered_mdns" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "discovered_rendezvous" + ], + "properties": { + "discovered_rendezvous": { + "$ref": "#/definitions/discovered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "registered_rendezvous" + ], + "properties": { + "registered_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous discover served notification.", + "type": "object", + "required": [ + "discover_served_rendezvous" + ], + "properties": { + "discover_served_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous peer registered notification.", + "type": "object", + "required": [ + "peer_registered_rendezvous" + ], + "properties": { + "peer_registered_rendezvous": { + "$ref": "#/definitions/peer_registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Published receipt pubsub notification.", + "type": "object", + "required": [ + "published_receipt_pubsub" + ], + "properties": { + "published_receipt_pubsub": { + "$ref": "#/definitions/published_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Received receipt pubsub notification.", + "type": "object", + "required": [ + "received_receipt_pubsub" + ], + "properties": { + "received_receipt_pubsub": { + "$ref": "#/definitions/received_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Put receipt DHT notification.", + "type": "object", + "required": [ + "put_receipt_dht" + ], + "properties": { + "put_receipt_dht": { + "$ref": "#/definitions/put_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Got receipt DHT notification.", + "type": "object", + "required": [ + "got_receipt_dht" + ], + "properties": { + "got_receipt_dht": { + "$ref": "#/definitions/got_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "put_workflow_info_dht" + ], + "properties": { + "put_workflow_info_dht": { + "$ref": "#/definitions/put_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "got_workflow_info_dht" + ], + "properties": { + "got_workflow_info_dht": { + "$ref": "#/definitions/got_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum success notification.", + "type": "object", + "required": [ + "receipt_quorum_success_dht" + ], + "properties": { + "receipt_quorum_success_dht": { + "$ref": "#/definitions/receipt_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum failure notification.", + "type": "object", + "required": [ + "receipt_quorum_failure_dht" + ], + "properties": { + "receipt_quorum_failure_dht": { + "$ref": "#/definitions/receipt_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum success notification.", + "type": "object", + "required": [ + "workflow_info_quorum_success_dht" + ], + "properties": { + "workflow_info_quorum_success_dht": { + "$ref": "#/definitions/workflow_info_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum failure notification.", + "type": "object", + "required": [ + "workflow_info_quorum_failure_dht" + ], + "properties": { + "workflow_info_quorum_failure_dht": { + "$ref": "#/definitions/workflow_info_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Sent workflow info notification.", + "type": "object", + "required": [ + "sent_workflow_info" + ], + "properties": { + "sent_workflow_info": { + "$ref": "#/definitions/sent_workflow_info" + } + }, + "additionalProperties": false + }, + { + "description": "Received workflow info notification.", + "type": "object", + "required": [ + "received_workflow_info" + ], + "properties": { + "received_workflow_info": { + "$ref": "#/definitions/received_workflow_info" + } + }, + "additionalProperties": false + } + ], + "definitions": { + "connection_closed": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "connection_established": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_mdns": { + "type": "object", + "required": [ + "peers", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddress", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_rendezvous": { + "type": "object", + "required": [ + "peers", + "server", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddresses", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "server": { + "description": "Server that fulfilled the discovery request", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "publisher": { + "description": "Workflow info publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "incoming_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "new_listen_addr": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "outgoing_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "peer_id": { + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "peer_registered_rendezvous": { + "type": "object", + "required": [ + "addresses", + "peer_id", + "timestamp" + ], + "properties": { + "addresses": { + "description": "Multiaddresses for peer", + "type": "array", + "items": { + "type": "string" + } + }, + "peer_id": { + "description": "Peer registered", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "published_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "publisher", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "provider": { + "description": "Workflow info provider peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "registered_rendezvous": { + "type": "object", + "required": [ + "server", + "timestamp" + ], + "properties": { + "server": { + "description": "Server that accepted registration", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "sent_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "requestor", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "requestor": { + "description": "Peer that requested workflow info", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/node_info.json b/homestar-runtime/schemas/docs/node_info.json new file mode 100644 index 00000000..b810c3c0 --- /dev/null +++ b/homestar-runtime/schemas/docs/node_info.json @@ -0,0 +1,67 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "node_info", + "description": "Node information.", + "type": "object", + "required": [ + "dynamic", + "static" + ], + "properties": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "allOf": [ + { + "$ref": "#/definitions/dynamic" + } + ] + }, + "static": { + "description": "Static node information available at startup.", + "allOf": [ + { + "$ref": "#/definitions/static" + } + ] + } + }, + "definitions": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "type": "object", + "required": [ + "connections", + "listeners" + ], + "properties": { + "connections": { + "description": "Peers and their addresses that are connected to the node", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listeners": { + "description": "Listen addresses for the node", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "static": { + "description": "Static node information available at startup.", + "type": "object", + "required": [ + "peer_id" + ], + "properties": { + "peer_id": { + "description": "The peer ID of the node", + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/receipt.json b/homestar-runtime/schemas/docs/receipt.json new file mode 100644 index 00000000..b0af3812 --- /dev/null +++ b/homestar-runtime/schemas/docs/receipt.json @@ -0,0 +1,146 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/receipt_notification.json b/homestar-runtime/schemas/docs/receipt_notification.json new file mode 100644 index 00000000..bc5fbd9b --- /dev/null +++ b/homestar-runtime/schemas/docs/receipt_notification.json @@ -0,0 +1,184 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt notification", + "description": "A receipt notification associated with a running workflow", + "type": "object", + "required": [ + "receipt", + "receipt_cid" + ], + "properties": { + "metadata": { + "title": "Metadata", + "description": "Workflow metadata to contextualize the receipt", + "type": "object", + "required": [ + "name", + "receipt", + "receipt_cid" + ], + "properties": { + "name": { + "type": "string" + }, + "replayed": { + "type": "boolean" + }, + "workflow": { + "$ref": "#/definitions/ipld_link" + } + } + }, + "receipt": { + "$ref": "#/definitions/receipt" + }, + "receipt_cid": { + "$ref": "#/definitions/ipld_link" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "receipt": { + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/docs/workflow.json b/homestar-runtime/schemas/docs/workflow.json new file mode 100644 index 00000000..b55c23ea --- /dev/null +++ b/homestar-runtime/schemas/docs/workflow.json @@ -0,0 +1,266 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Workflow", + "description": "Workflow composed of tasks", + "type": "object", + "required": [ + "tasks" + ], + "properties": { + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/task" + } + } + }, + "definitions": { + "await_result": { + "title": "Await result", + "description": "Branches of a promise that is awaited", + "oneOf": [ + { + "type": "object", + "properties": { + "await/ok": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/error": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/*": { + "$ref": "#/definitions/pointer" + } + } + } + ] + }, + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resource configuration for fuel quota, memory allowance, and timeout", + "type": "object", + "properties": { + "fuel": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "memory": { + "description": "Memory in bytes", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "time": { + "description": "Timeout in milliseconds", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + } + }, + "run": { + "title": "Run instruction", + "description": "An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce", + "type": "object", + "if": { + "properties": { + "op": { + "type": "string", + "const": "wasm/run" + } + } + }, + "then": { + "properties": { + "input": { + "type": "object", + "required": [ + "args", + "func" + ], + "properties": { + "args": { + "description": "Arguments to the function. May await a result from another task.", + "type": "array", + "items": [ + { + "$ref": "#/definitions/ipld" + }, + { + "$ref": "#/definitions/await_result" + } + ] + }, + "func": { + "description": "The function to call on the Wasm resource", + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "input", + "nnc", + "op", + "rsc" + ], + "properties": { + "nnc": { + "description": "A 12-byte or 16-byte nonce. Use empty string for no nonce.", + "type": "string" + }, + "op": { + "description": "Function executor", + "type": "string", + "enum": [ + "wasm/run" + ] + }, + "rsc": { + "type": "string", + "format": "uri" + } + } + }, + "task": { + "description": "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization", + "type": "object", + "required": [ + "meta", + "prf", + "run" + ], + "properties": { + "cause": { + "title": "Receipt reference", + "anyOf": [ + { + "$ref": "#/definitions/pointer" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Task Configuration", + "allOf": [ + { + "$ref": "#/definitions/resources" + } + ] + }, + "prf": { + "title": "UCAN Authorization", + "allOf": [ + { + "$ref": "#/definitions/prf" + } + ] + }, + "run": { + "title": "Run instruction", + "allOf": [ + { + "$ref": "#/definitions/run" + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index a6138990..0f89ba38 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -16,7 +16,6 @@ use openrpc::document::{ MethodObjectParamStructure, Openrpc, OpenrpcDocument, }; -// Generate docs with `cargo run --bin schemas` fn main() { let health_schema = schema_for!(Health); let _ = fs::File::create("schemas/docs/health.json") @@ -267,7 +266,7 @@ fn generate_api_doc( }; OpenrpcDocument { - openrpc: Openrpc::V26, // TODO Should we upgrade to latest spec at 1.3.2? + openrpc: Openrpc::V26, info: InfoObject { title: "homestar".to_string(), description: Some(env!("CARGO_PKG_DESCRIPTION").into()), From 943cbf0c4e1a617ebd91e4a6a976adfd8fac4bae Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 5 Feb 2024 16:15:41 -0800 Subject: [PATCH 40/75] feat: Add OpenRPC discover endpoint --- homestar-runtime/schemas/docs/api.json | 15 ++++++++ homestar-runtime/schemas/generate.rs | 36 ++++++++++++++++++- homestar-runtime/src/network/webserver.rs | 4 +++ homestar-runtime/src/network/webserver/rpc.rs | 7 ++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/schemas/docs/api.json b/homestar-runtime/schemas/docs/api.json index fdffe4b7..e526422b 100644 --- a/homestar-runtime/schemas/docs/api.json +++ b/homestar-runtime/schemas/docs/api.json @@ -19,6 +19,21 @@ "url": "https://docs.everywhere.computer/homestar/what-is-homestar/" }, "methods": [ + { + "name": "rpc.discover", + "description": "OpenRPC schema as a description of this service", + "paramStructure": "either", + "params": [], + "result": { + "name": "OpenRPC Schema", + "schema": { + "$ref": "https://github.com/ipvm-wg/homestar/blob/main/homestar-runtime/schemas/docs/api.json" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, { "name": "health", "paramStructure": "by-name", diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 0f89ba38..4dda89f9 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -6,7 +6,10 @@ use homestar_runtime::{ Health, NetworkNotification, NodeInfo, PrometheusData, ReceiptNotification, }; use homestar_workflow::Workflow; -use schemars::{schema::RootSchema, schema_for}; +use schemars::{ + schema::{RootSchema, SchemaObject}, + schema_for, +}; use std::{fs, io::Write}; mod openrpc; @@ -74,6 +77,36 @@ fn generate_api_doc( workflow_schema: RootSchema, receipt_notification_schema: RootSchema, ) -> OpenrpcDocument { + let discover: MethodObject = MethodObject { + name: "rpc.discover".to_string(), + description: Some("OpenRPC schema as a description of this service".to_string()), + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "OpenRPC Schema".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(RootSchema { + schema: SchemaObject { + reference: Some("https://github.com/ipvm-wg/homestar/blob/main/homestar-runtime/schemas/docs/api.json".to_string()), + ..Default::default() + }, + ..Default::default() + }), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + let health: MethodObject = MethodObject { name: "health".to_string(), description: None, @@ -288,6 +321,7 @@ fn generate_api_doc( }), servers: None, methods: vec![ + discover, health, metrics, node_info, diff --git a/homestar-runtime/src/network/webserver.rs b/homestar-runtime/src/network/webserver.rs index dd99bff4..2ec30792 100644 --- a/homestar-runtime/src/network/webserver.rs +++ b/homestar-runtime/src/network/webserver.rs @@ -251,6 +251,10 @@ impl Server { rpc::METRICS_ENDPOINT, )?) .layer(ProxyGetRequestLayer::new("/node", rpc::NODE_INFO_ENDPOINT)?) + .layer(ProxyGetRequestLayer::new( + "/rpc_discover", + rpc::DISCOVER_ENDPOINT, + )?) .layer(cors) .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) .timeout(self.webserver_timeout); diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index 22e635d4..912630ac 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -44,6 +44,11 @@ use tracing::debug; #[allow(unused_imports)] use tracing::{error, warn}; +/// OpenRPC API document +const API_SCHEMA_DOC: &str = include_str!("../../../schemas/docs/api.json"); + +/// OpenRPC API discovery endpoint. +pub(crate) const DISCOVER_ENDPOINT: &str = "rpc_discover"; /// Health endpoint. pub(crate) const HEALTH_ENDPOINT: &str = "health"; /// Metrics endpoint for prometheus / openmetrics polling. @@ -155,6 +160,8 @@ where async fn register(ctx: Context) -> Result>> { let mut module = RpcModule::new(ctx); + module.register_method(DISCOVER_ENDPOINT, |_, _| serde_json::json!(API_SCHEMA_DOC))?; + module.register_async_method(HEALTH_ENDPOINT, |_, ctx| async move { match ctx.db.conn() { Ok(mut conn) => { From b5ffd8181b09c585525be57c9e2bace86768bf76 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 5 Feb 2024 20:03:57 -0800 Subject: [PATCH 41/75] test: Consolidate connection tests --- homestar-runtime/tests/network.rs | 322 +----------------- .../{notification.rs => connection.rs} | 105 +++++- 2 files changed, 93 insertions(+), 334 deletions(-) rename homestar-runtime/tests/network/{notification.rs => connection.rs} (83%) diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 6f037e09..5be4fe63 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -1,27 +1,25 @@ use crate::{ make_config, utils::{ - check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, + check_for_line_with, kill_homestar, listen_addr, retrieve_output, wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; -use libp2p::Multiaddr; use once_cell::sync::Lazy; use std::{ path::PathBuf, process::{Command, Stdio}, - time::Duration, }; +#[cfg(feature = "websocket-notify")] +mod connection; #[cfg(all(feature = "websocket-notify", feature = "test-utils"))] mod dht; #[cfg(feature = "websocket-notify")] mod gossip; mod mdns; -#[cfg(feature = "websocket-notify")] -mod notification; mod rendezvous; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); @@ -255,317 +253,3 @@ fn test_websocket_listens_on_address_integration() -> Result<()> { Ok(()) } - -#[test] -#[serial_test::parallel] -fn test_libp2p_connect_known_peers_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - let node_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr1}" - node_addresses = ["{node_addrb}"] - [node.network.libp2p.mdns] - enable = false - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - - let config1 = make_config!(toml1); - // Start two nodes configured to listen at 127.0.0.1 each with their own port. - // The nodes are configured to dial each other through the node_addresses config. - let homestar_proc1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - tokio_test::block_on(async { - // Check node endpoint to match - let http_url = format!("http://localhost:{}", ws_port2); - let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); - assert_eq!(http_resp.status(), 200); - let http_resp = http_resp.json::().await.unwrap(); - assert!(http_resp["dynamic"]["connections"] - .as_object() - .unwrap() - .get(ED25519MULTIHASH) - .unwrap() - .as_str() - .unwrap() - .parse::() - .is_ok()); - let static_info = http_resp["static"].as_object().unwrap(); - let listeners = http_resp["dynamic"]["listeners"].as_array().unwrap(); - assert_eq!(static_info.get("peer_id").unwrap(), SECP256K1MULTIHASH); - assert_eq!(listeners, &[listen_addr2.to_string()]); - }); - - // Collect logs for five seconds then kill proceses. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(5))); - let dead_proc2 = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout1 = retrieve_output(dead_proc1); - let stdout2 = retrieve_output(dead_proc2); - - // Check node two was added to the Kademlia table - let two_added_to_dht = check_for_line_with( - stdout1.clone(), - vec![ - "added configured node to kademlia routing table", - SECP256K1MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with node two - let two_in_dht_routing_table = check_for_line_with( - stdout1.clone(), - vec![ - "kademlia routing table updated with peer", - SECP256K1MULTIHASH, - ], - ); - - // Check that node one connected to node two. - let one_connected_to_two = check_for_line_with( - stdout1, - vec!["peer connection established", SECP256K1MULTIHASH], - ); - - assert!(one_connected_to_two); - assert!(two_in_dht_routing_table); - assert!(two_added_to_dht); - - // Check node one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout2.clone(), - vec![ - "added configured node to kademlia routing table", - ED25519MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with node one - let one_in_dht_routing_table = check_for_line_with( - stdout2.clone(), - vec!["kademlia routing table updated with peer", ED25519MULTIHASH], - ); - - // Check that node two connected to node one. - let two_connected_to_one = check_for_line_with( - stdout2, - vec!["peer connection established", ED25519MULTIHASH], - ); - - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); - assert!(two_connected_to_one); - - Ok(()) -} - -#[test] -#[serial_test::parallel] -fn test_libp2p_disconnect_known_peers_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - let node_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr1}" - node_addresses = ["{node_addrb}"] - [node.network.libp2p.mdns] - enable = false - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - - let config1 = make_config!(toml1); - // Start two nodes configured to listen at 127.0.0.1 each with their own port. - // The nodes are configured to dial each other through the node_addresses config. - let homestar_proc1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(proc_info2.db_path.clone()) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Kill node two after seven seconds. - let _ = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Collect logs for eight seconds then kill node one. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(8))); - - // Retrieve logs. - let stdout = retrieve_output(dead_proc1); - - // Check that node two disconnected from node one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", SECP256K1MULTIHASH], - ); - - // Check that node two was not removed from the Kademlia table. - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", SECP256K1MULTIHASH], - ); - - assert!(two_disconnected_from_one); - assert!(!two_removed_from_dht_table); - - Ok(()) -} diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/connection.rs similarity index 83% rename from homestar-runtime/tests/network/notification.rs rename to homestar-runtime/tests/network/connection.rs index 8ef42822..8c553edc 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/connection.rs @@ -1,8 +1,9 @@ use crate::{ make_config, utils::{ - kill_homestar, listen_addr, multiaddr, wait_for_socket_connection, ChildGuard, ProcInfo, - TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, + check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, + wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, + ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; @@ -75,7 +76,7 @@ fn test_connection_notifications_integration() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let _proc_guard1 = ChildGuard::new(homestar_proc1); + let proc_guard1 = ChildGuard::new(homestar_proc1); if wait_for_socket_connection(ws_port1, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); @@ -151,7 +152,7 @@ fn test_connection_notifications_integration() -> Result<()> { } } - let _ = kill_homestar(proc_guard2.take(), None); + let dead_proc2 = kill_homestar(proc_guard2.take(), None); // Poll for connection closed message loop { @@ -167,18 +168,92 @@ fn test_connection_notifications_integration() -> Result<()> { } } - // Check node endpoint to match - let http_url = format!("http://localhost:{}", ws_port1); - let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); - assert_eq!(http_resp.status(), 200); - let http_resp = http_resp.json::().await.unwrap(); - assert_eq!( - http_resp, - serde_json::json!({ - "static": {"peer_id": ED25519MULTIHASH}, - "dynamic": {"listeners": [format!("{listen_addr1}")], "connections": {}} - }) + // Kill proceses. + let dead_proc1 = kill_homestar(proc_guard1.take(), None); + + // Retrieve logs. + let stdout1 = retrieve_output(dead_proc1); + let stdout2 = retrieve_output(dead_proc2); + + // Check node one added node two to Kademlia table + let two_added_to_dht = check_for_line_with( + stdout1.clone(), + vec![ + "added configured node to kademlia routing table", + SECP256K1MULTIHASH, + ], + ); + + // Check node one DHT routing table was updated with node two + let two_in_dht_routing_table = check_for_line_with( + stdout1.clone(), + vec![ + "kademlia routing table updated with peer", + SECP256K1MULTIHASH, + ], ); + + // Check that node one connected to node two. + let one_connected_to_two = check_for_line_with( + stdout1.clone(), + vec!["peer connection established", SECP256K1MULTIHASH], + ); + + // Check that node two disconnected from node one. + let two_disconnected_from_one = check_for_line_with( + stdout1.clone(), + vec!["peer connection closed", SECP256K1MULTIHASH], + ); + + // Check that node two was not removed from the Kademlia table. + let two_removed_from_dht_table = check_for_line_with( + stdout1.clone(), + vec!["removed peer from kademlia table", SECP256K1MULTIHASH], + ); + + assert!(one_connected_to_two); + assert!(two_in_dht_routing_table); + assert!(two_added_to_dht); + assert!(two_disconnected_from_one); + assert!(!two_removed_from_dht_table); + + // Check node two added node one to Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout2.clone(), + vec![ + "added configured node to kademlia routing table", + ED25519MULTIHASH, + ], + ); + + // Check node two DHT routing table was updated with node one + let one_in_dht_routing_table = check_for_line_with( + stdout2.clone(), + vec!["kademlia routing table updated with peer", ED25519MULTIHASH], + ); + + // Check that node two connected to node one. + let two_connected_to_one = check_for_line_with( + stdout2, + vec!["peer connection established", ED25519MULTIHASH], + ); + + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + assert!(two_connected_to_one); + + // Check node endpoint to match + // let http_url = format!("http://localhost:{}", ws_port1); + // let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); + // assert_eq!(http_resp.status(), 200); + // let http_resp = http_resp.json::().await.unwrap(); + // assert_eq!( + // http_resp, + // serde_json::json!({ + // "static": {"peer_id": ED25519MULTIHASH}, + // "dynamic": {"listeners": [format!("{listen_addr1}")], "connections": {}} + // }) + // ); }); Ok(()) From 191c3e342e6e5ddbb946ece7dbb7f1be0033b863 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 6 Feb 2024 10:03:28 -0800 Subject: [PATCH 42/75] test: Separate out node info test --- homestar-runtime/tests/network.rs | 72 +++++++++++++++++++- homestar-runtime/tests/network/connection.rs | 13 ---- 2 files changed, 70 insertions(+), 15 deletions(-) diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 5be4fe63..39cfa0fb 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -2,8 +2,8 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, retrieve_output, - wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, ED25519MULTIHASH, - SECP256K1MULTIHASH, + wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, + ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; @@ -253,3 +253,71 @@ fn test_websocket_listens_on_address_integration() -> Result<()> { Ok(()) } + +#[test] +#[serial_test::parallel] +fn test_node_info_endpoint_integration() -> Result<()> { + let proc_info = ProcInfo::new().unwrap(); + + let rpc_port = proc_info.rpc_port; + let metrics_port = proc_info.metrics_port; + let ws_port = proc_info.ws_port; + let listen_addr = listen_addr(proc_info.listen_port); + + let toml = format!( + r#" + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr}" + [node.network.libp2p.mdns] + enable = false + [node.network.libp2p.rendezvous] + enable_client = false + [node.network.metrics] + port = {metrics_port} + [node.network.rpc] + port = {rpc_port} + [node.network.webserver] + port = {ws_port} + "# + ); + let config1 = make_config!(toml); + + let homestar_proc1 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config1.filename()) + .arg("--db") + .arg(&proc_info.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let _proc_guard1 = ChildGuard::new(homestar_proc1); + + if wait_for_socket_connection(ws_port, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + tokio_test::block_on(async { + // Check node endpoint to match + let http_url = format!("http://localhost:{}", ws_port); + let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); + assert_eq!(http_resp.status(), 200); + let http_resp = http_resp.json::().await.unwrap(); + assert_eq!( + http_resp, + serde_json::json!({ + "static": {"peer_id": ED25519MULTIHASH}, + "dynamic": {"listeners": [format!("{listen_addr}")], "connections": {}} + }) + ); + }); + + Ok(()) +} diff --git a/homestar-runtime/tests/network/connection.rs b/homestar-runtime/tests/network/connection.rs index 8c553edc..a41d8dbd 100644 --- a/homestar-runtime/tests/network/connection.rs +++ b/homestar-runtime/tests/network/connection.rs @@ -241,19 +241,6 @@ fn test_connection_notifications_integration() -> Result<()> { assert!(one_addded_to_dht); assert!(one_in_dht_routing_table); assert!(two_connected_to_one); - - // Check node endpoint to match - // let http_url = format!("http://localhost:{}", ws_port1); - // let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); - // assert_eq!(http_resp.status(), 200); - // let http_resp = http_resp.json::().await.unwrap(); - // assert_eq!( - // http_resp, - // serde_json::json!({ - // "static": {"peer_id": ED25519MULTIHASH}, - // "dynamic": {"listeners": [format!("{listen_addr1}")], "connections": {}} - // }) - // ); }); Ok(()) From e0ec4cf68cd8b835f44a9050ed1c8b6cf09d0d01 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 6 Feb 2024 10:20:38 -0800 Subject: [PATCH 43/75] test: Add OpenRPC discovery endpoint test --- homestar-runtime/tests/network.rs | 66 +++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 39cfa0fb..b5b260eb 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -321,3 +321,69 @@ fn test_node_info_endpoint_integration() -> Result<()> { Ok(()) } + +#[test] +#[serial_test::parallel] +fn test_discovery_endpoint_integration() -> Result<()> { + let proc_info = ProcInfo::new().unwrap(); + + let rpc_port = proc_info.rpc_port; + let metrics_port = proc_info.metrics_port; + let ws_port = proc_info.ws_port; + let listen_addr = listen_addr(proc_info.listen_port); + + let toml = format!( + r#" + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr}" + [node.network.libp2p.mdns] + enable = false + [node.network.libp2p.rendezvous] + enable_client = false + [node.network.metrics] + port = {metrics_port} + [node.network.rpc] + port = {rpc_port} + [node.network.webserver] + port = {ws_port} + "# + ); + let config1 = make_config!(toml); + + let homestar_proc1 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config1.filename()) + .arg("--db") + .arg(&proc_info.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let _proc_guard1 = ChildGuard::new(homestar_proc1); + + if wait_for_socket_connection(ws_port, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + tokio_test::block_on(async { + // Check discovery endpoint to match + let http_url = format!("http://localhost:{}", ws_port); + let http_resp = reqwest::get(format!("{}/rpc_discover", http_url)) + .await + .unwrap(); + assert_eq!(http_resp.status(), 200); + let http_resp = http_resp.json::().await.unwrap(); + + const API_SCHEMA_DOC: &str = include_str!("../schemas/docs/api.json"); + assert_eq!(http_resp, serde_json::json!(API_SCHEMA_DOC)); + }); + + Ok(()) +} From ea1d4201552ced729b3d86bba2ed218bab1c3789 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 6 Feb 2024 16:12:26 -0800 Subject: [PATCH 44/75] test: Stabilize mDNS connect test --- homestar-runtime/tests/network/mdns.rs | 51 ++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/homestar-runtime/tests/network/mdns.rs b/homestar-runtime/tests/network/mdns.rs index 949ddecc..24e15f8e 100644 --- a/homestar-runtime/tests/network/mdns.rs +++ b/homestar-runtime/tests/network/mdns.rs @@ -37,7 +37,7 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { let ws_port1 = proc_info1.ws_port; let ws_port2 = proc_info2.ws_port; - tokio_test::block_on(async { + tokio_test::task::spawn(async { let toml1 = format!( r#" [node] @@ -127,11 +127,26 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .unwrap(); let proc_guard2 = ChildGuard::new(homestar_proc2); - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + if wait_for_socket_connection(ws_port2, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } - // Poll for mDNS discovered message and conenection established messages + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default() + .build(ws_url2.clone()) + .await + .unwrap(); + + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + // Poll for mDNS discovered message and conenection established messages on node one let mut discovered_mdns = false; let mut connection_established = false; loop { @@ -159,7 +174,35 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { } } - // Collect logs for seven seconds then kill processes. + // Poll for mDNS discovered message and conenection established messages on node two + let mut discovered_mdns = false; + let mut connection_established = false; + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discovered_mdns"].is_object() { + discovered_mdns = true; + } else if json["connection_established"].is_object() { + connection_established = true; + } + } else { + panic!( + r#"Expected notifications from node two did not arrive in time: + - mDNS discovered: {} + - Connection established: {} + "#, + discovered_mdns, connection_established + ); + } + + if connection_established && discovered_mdns { + break; + } + } + + // Kill processes. let dead_proc1 = kill_homestar(proc_guard1.take(), None); let dead_proc2 = kill_homestar(proc_guard2.take(), None); From b855285aad226f22f5aa2de235ead74f677a9c8b Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 6 Feb 2024 19:01:37 -0800 Subject: [PATCH 45/75] test: Add subscribe_network_events test helper --- homestar-runtime/tests/network/connection.rs | 67 ++------ homestar-runtime/tests/network/dht.rs | 141 +++------------- homestar-runtime/tests/network/gossip.rs | 44 +---- homestar-runtime/tests/network/mdns.rs | 62 +------ homestar-runtime/tests/network/rendezvous.rs | 167 ++++--------------- homestar-runtime/tests/utils.rs | 46 +++++ 6 files changed, 120 insertions(+), 407 deletions(-) diff --git a/homestar-runtime/tests/network/connection.rs b/homestar-runtime/tests/network/connection.rs index a41d8dbd..b31b8936 100644 --- a/homestar-runtime/tests/network/connection.rs +++ b/homestar-runtime/tests/network/connection.rs @@ -2,27 +2,19 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH, SECP256K1MULTIHASH, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, time::Duration, }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -82,24 +74,9 @@ fn test_connection_notifications_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); tokio_test::block_on(async { - tokio_tungstenite::connect_async(ws_url.clone()) - .await - .unwrap(); - - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - let mut sub: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -140,7 +117,7 @@ fn test_connection_notifications_integration() -> Result<()> { // Poll for connection established message loop { - if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); @@ -156,7 +133,7 @@ fn test_connection_notifications_integration() -> Result<()> { // Poll for connection closed message loop { - if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); @@ -328,20 +305,8 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) .env( @@ -503,20 +468,8 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) .env( diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index acd69d5d..0cc1eab9 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -2,9 +2,9 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, ED25519MULTIHASH5, - SECP256K1MULTIHASH, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, + ED25519MULTIHASH5, SECP256K1MULTIHASH, }, }; use anyhow::Result; @@ -13,11 +13,6 @@ use homestar_runtime::{ db::{self, schema, Database}, Db, Settings, }; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use libipld::Cid; use once_cell::sync::Lazy; use std::{ @@ -29,8 +24,6 @@ use std::{ }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -97,20 +90,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -160,20 +141,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for connection established message loop { @@ -426,20 +395,8 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -631,20 +588,8 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -694,20 +639,8 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for connection established message loop { @@ -950,20 +883,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default() - .build(ws_url1.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -1013,20 +934,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); let toml3 = format!( r#" @@ -1076,20 +985,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); - let client3 = WsClientBuilder::default() - .build(ws_url3.clone()) - .await - .unwrap(); - - let mut sub3: Subscription> = client3 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); // Poll node one for connection established with node two message loop { diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs index 52bdd62a..e9cbc848 100644 --- a/homestar-runtime/tests/network/gossip.rs +++ b/homestar-runtime/tests/network/gossip.rs @@ -2,22 +2,16 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH, SECP256K1MULTIHASH, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; use homestar_runtime::{db::Database, Db, Settings}; use itertools::Itertools; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use libipld::Cid; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, str::FromStr, @@ -25,8 +19,6 @@ use std::{ }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -86,20 +78,8 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -156,20 +136,8 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { } } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Run test workflow on node one let _ = Command::new(BIN.as_os_str()) diff --git a/homestar-runtime/tests/network/mdns.rs b/homestar-runtime/tests/network/mdns.rs index 24e15f8e..521c2d42 100644 --- a/homestar-runtime/tests/network/mdns.rs +++ b/homestar-runtime/tests/network/mdns.rs @@ -1,28 +1,20 @@ use crate::{ make_config, utils::{ - check_for_line_with, kill_homestar, retrieve_output, wait_for_socket_connection, - wait_for_socket_connection_v6, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH2, ED25519MULTIHASH4, ED25519MULTIHASH5, + check_for_line_with, kill_homestar, retrieve_output, subscribe_network_events, + wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH2, ED25519MULTIHASH4, ED25519MULTIHASH5, }, }; use anyhow::Result; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, time::Duration, }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::file_serial] @@ -78,20 +70,8 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url1.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -131,20 +111,8 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for mDNS discovered message and conenection established messages on node one let mut discovered_mdns = false; @@ -325,20 +293,8 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url1.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index bfcf9580..a164d773 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -2,28 +2,21 @@ use crate::{ make_config, utils::{ check_for_line_with, count_lines_where, kill_homestar, listen_addr, multiaddr, - retrieve_output, wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, - ProcInfo, TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, - ED25519MULTIHASH3, ED25519MULTIHASH4, ED25519MULTIHASH5, SECP256K1MULTIHASH, + retrieve_output, subscribe_network_events, wait_for_socket_connection, + wait_for_socket_connection_v6, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, + ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, ED25519MULTIHASH4, + ED25519MULTIHASH5, SECP256K1MULTIHASH, }, }; use anyhow::Result; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, time::Duration, }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -131,28 +124,12 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { tokio_test::task::spawn(async { // Subscribe to rendezvous server - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); // Subscribe to rendezvous client one - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for client one registered with server loop { @@ -222,19 +199,9 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); - let client3 = WsClientBuilder::default() - .build(ws_url3.clone()) - .await - .unwrap(); - let mut sub3: Subscription> = client3 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + // Subscribe to rendezvous client two + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); // Poll for discovered rendezvous message let mut discovered_rendezvous = false; @@ -439,28 +406,12 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { tokio_test::task::spawn(async { // Subscribe to rendezvous server - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); // Subscribe to rendezvous client one - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for client one registered with server loop { @@ -530,19 +481,9 @@ fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); - let client3 = WsClientBuilder::default() - .build(ws_url3.clone()) - .await - .unwrap(); - let mut sub3: Subscription> = client3 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + // Subscribe to rendezvous client two + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); // Poll for discovered rendezvous message let mut discovered_rendezvous = false; @@ -728,28 +669,12 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { tokio_test::task::spawn(async { // Subscribe to rendezvous server - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); // Subscribe to rendezvous client - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for server registered client twice. let mut peer_registered_count = 0; @@ -929,28 +854,12 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { tokio_test::task::spawn(async { // Subscribe to rendezvous server - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default().build(ws_url1).await.unwrap(); - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); // Subscribe to rendezvous client - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for server provided discovery twice twice let mut discover_served_count = 0; @@ -1136,16 +1045,8 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { tokio_test::task::spawn(async { // Subscribe to rendezvous client one - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default().build(ws_url2).await.unwrap(); - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for client one registered with server the first time loop { @@ -1207,16 +1108,8 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { } // Subscribe to rendezvous client two - let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); - let client3 = WsClientBuilder::default().build(ws_url3).await.unwrap(); - let mut sub3: Subscription> = client3 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); // Poll for client two discovered twice let mut discovered_count = 0; diff --git a/homestar-runtime/tests/utils.rs b/homestar-runtime/tests/utils.rs index a5618de9..25e88210 100644 --- a/homestar-runtime/tests/utils.rs +++ b/homestar-runtime/tests/utils.rs @@ -2,6 +2,11 @@ use anyhow::{bail, Context, Result}; #[cfg(not(windows))] use assert_cmd::prelude::*; use chrono::{DateTime, FixedOffset}; +use jsonrpsee::{ + core::client::{Client, Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; #[cfg(not(windows))] use nix::{ sys::signal::{self, Signal}, @@ -372,6 +377,47 @@ pub(crate) fn wait_for_socket_connection_v6(port: u16, exp_retry_base: u64) -> R result.map_or_else(|_| Err(()), |_| Ok(())) } +/// Client and subscription. +pub(crate) struct WsClientSub { + #[allow(dead_code)] + client: Client, + sub: Subscription>, +} + +impl WsClientSub { + pub(crate) fn sub(&mut self) -> &mut Subscription> { + &mut self.sub + } +} + +/// Helper function to subscribe to network events +/// Note that the client must not be dropped of the sub will return only None. +pub(crate) async fn subscribe_network_events(ws_port: u16) -> WsClientSub { + const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; + const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; + + let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port); + tokio_tungstenite::connect_async(ws_url.clone()) + .await + .unwrap(); + + let client = WsClientBuilder::default() + .build(ws_url.clone()) + .await + .unwrap(); + + let sub: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + WsClientSub { client, sub } +} + /// Helper extension trait which allows to limit execution time for the futures. /// It is helpful in tests to ensure that no future will ever get stuck forever. pub(crate) trait TimeoutFutureExt: Future + Sized { From 436187ab4160823e5a6da8436c11eab78d017f40 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 14:31:30 -0800 Subject: [PATCH 46/75] test: Refactor rendezvous tests --- homestar-runtime/tests/network/rendezvous.rs | 674 +++++-------------- 1 file changed, 177 insertions(+), 497 deletions(-) diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index a164d773..005ef8ee 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -20,7 +20,7 @@ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)) #[test] #[serial_test::parallel] -fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { +fn test_libp2p_connection_rendezvous_discovery_integration() -> Result<()> { let proc_info1 = ProcInfo::new().unwrap(); let proc_info2 = ProcInfo::new().unwrap(); let proc_info3 = ProcInfo::new().unwrap(); @@ -63,6 +63,7 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -102,47 +103,30 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will register with the rendezvous server - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - tokio_test::task::spawn(async { + tokio_test::block_on(async { // Subscribe to rendezvous server let mut net_events1 = subscribe_network_events(ws_port1).await; let sub1 = net_events1.sub(); - // Subscribe to rendezvous client one - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); - - // Poll for client one registered with server - loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["registered_rendezvous"].is_object() { - break; - } - } else { - panic!("Rendezvous client one did not register with server in time"); - } + // Start a peer that will register with the rendezvous server + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); } // Poll for server registered client one @@ -151,7 +135,9 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["peer_registered_rendezvous"].is_object() { + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == SECP256K1MULTIHASH + { break; } } else { @@ -162,37 +148,38 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { // Start a peer that will discover the registrant through the rendezvous server let toml3 = format!( r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr3}" + node_addresses = ["{node_addra}"] + [node.network.libp2p.mdns] + enable = false + [node.network.metrics] + port = {metrics_port3} + [node.network.rpc] + port = {rpc_port3} + [node.network.webserver] + port = {ws_port3} + "# ); let config3 = make_config!(toml3); let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config3.filename()) + .arg("--db") + .arg(&proc_info3.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); let proc_guard_client2 = ChildGuard::new(rendezvous_client2); if wait_for_socket_connection(ws_port3, 1000).is_err() { @@ -203,47 +190,43 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { let mut net_events3 = subscribe_network_events(ws_port3).await; let sub3 = net_events3.sub(); - // Poll for discovered rendezvous message - let mut discovered_rendezvous = false; - let mut connection_established = false; + // Poll for discovery served by rendezvous server loop { - if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["discovered_rendezvous"].is_object() { - discovered_rendezvous = true - } else if json["connection_established"].is_object() - && json["connection_established"]["peer_id"] == SECP256K1MULTIHASH + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH2 { - connection_established = true - } - - if discovered_rendezvous && connection_established { break; } } else { - panic!("Client two did not receive rendezvous discovery from server in time"); + panic!("Rendezvous server did not serve discovery to client two in time"); } } - // Poll for discovery served by rendezvous server + // Kill server and registrant. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); + + // Poll for client two disconnected from client one. loop { - if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["discover_served_rendezvous"].is_object() { + if json["connection_closed"].is_object() + && json["connection_closed"]["peer_id"] == SECP256K1MULTIHASH + { break; } } else { - panic!("Rendezvous server did not serve discovery in time"); + panic!("Client two did not receive rendezvous discovery from server in time"); } } - // Kill processes. - let dead_server = kill_homestar(proc_guard_server.take(), None); - let _ = kill_homestar(proc_guard_client1.take(), None); + // Kill discoverer. let dead_client2 = kill_homestar(proc_guard_client2.take(), None); // Retrieve logs. @@ -295,269 +278,16 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { assert!(one_addded_to_dht); assert!(one_in_dht_routing_table); assert!(two_connected_to_one); - }); - - Ok(()) -} - -#[test] -#[serial_test::parallel] -fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - let proc_info3 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let rpc_port3 = proc_info3.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let metrics_port3 = proc_info3.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let ws_port3 = proc_info3.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let listen_addr3 = listen_addr(proc_info3.listen_port); - let announce_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr1}" - [node.network.libp2p.rendezvous] - enable_server = true - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - let config1 = make_config!(toml1); - - // Start a rendezvous server - let rendezvous_server = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_server = ChildGuard::new(rendezvous_server); - - if wait_for_socket_connection(ws_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - announce_addresses = ["{announce_addrb}"] - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - let config2 = make_config!(toml2); - - // Start a peer that will register with the rendezvous server - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - tokio_test::task::spawn(async { - // Subscribe to rendezvous server - let mut net_events1 = subscribe_network_events(ws_port1).await; - let sub1 = net_events1.sub(); - - // Subscribe to rendezvous client one - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); - - // Poll for client one registered with server - loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["registered_rendezvous"].is_object() { - break; - } - } else { - panic!("Rendezvous client one did not register with server in time"); - } - } - - // Poll for server registered client one - loop { - if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["peer_registered_rendezvous"].is_object() { - break; - } - } else { - panic!("Rendezvous server did not confirm client one registration in time"); - } - } - - let toml3 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# - ); - let config3 = make_config!(toml3); - - // Start a peer that will discover the registrant through the rendezvous server - let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Subscribe to rendezvous client two - let mut net_events3 = subscribe_network_events(ws_port3).await; - let sub3 = net_events3.sub(); - - // Poll for discovered rendezvous message - let mut discovered_rendezvous = false; - let mut connection_established = false; - loop { - if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["discovered_rendezvous"].is_object() { - discovered_rendezvous = true - } else if json["connection_established"].is_object() - && json["connection_established"]["peer_id"] == SECP256K1MULTIHASH - { - connection_established = true - } - - if discovered_rendezvous && connection_established { - break; - } - } else { - panic!("Client two did not receive rendezvous discovery from server in time"); - } - } - - // Poll for discovery served by rendezvous server - loop { - if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["discover_served_rendezvous"].is_object() { - break; - } - } else { - panic!("Rendezvous server did not serve discovery in time"); - } - } - - // Kill server and client one. - let _ = kill_homestar(proc_guard_server.take(), None); - let _ = kill_homestar(proc_guard_client1.take(), None); - - // Poll for client two disconnected from client one. - loop { - if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["connection_closed"].is_object() - && json["connection_closed"]["peer_id"] == SECP256K1MULTIHASH - { - break; - } - } else { - panic!("Client two did not receive rendezvous discovery from server in time"); - } - } - - // Kill client two. - let dead_client2 = kill_homestar(proc_guard_client2.take(), None); - - // Retrieve logs. - let stdout = retrieve_output(dead_client2); // Check that client two disconnected from client one. let two_disconnected_from_one = check_for_line_with( - stdout.clone(), + stdout_client2.clone(), vec!["peer connection closed", SECP256K1MULTIHASH], ); // Check that client two was removed from the Kademlia table let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), + stdout_client2.clone(), vec!["removed peer from kademlia table", SECP256K1MULTIHASH], ); @@ -608,6 +338,7 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -648,33 +379,30 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will renew registrations with the rendezvous server once per second - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - tokio_test::task::spawn(async { + tokio_test::block_on(async { // Subscribe to rendezvous server let mut net_events1 = subscribe_network_events(ws_port1).await; let sub1 = net_events1.sub(); - // Subscribe to rendezvous client - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); + // Start a peer that will renew registrations with the rendezvous server once per second + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } // Poll for server registered client twice. let mut peer_registered_count = 0; @@ -697,27 +425,6 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { } } - // Poll for client registered with server twice. - let mut registered_count = 0; - loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["registered_rendezvous"].is_object() - && json["registered_rendezvous"]["server"] == ED25519MULTIHASH - { - registered_count += 1; - } - } else { - panic!("Client did not register with server twice in time"); - } - - if registered_count == 2 { - break; - } - } - // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, None); let dead_client = kill_homestar(rendezvous_client1, None); @@ -790,6 +497,7 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -832,34 +540,31 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will discover with the rendezvous server once per second - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - tokio_test::task::spawn(async { + tokio_test::block_on(async { // Subscribe to rendezvous server let mut net_events1 = subscribe_network_events(ws_port1).await; let sub1 = net_events1.sub(); - // Subscribe to rendezvous client - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); + // Start a peer that will discover with the rendezvous server once per second + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } // Poll for server provided discovery twice twice let mut discover_served_count = 0; @@ -882,27 +587,6 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { } } - // Poll for client discovered twice - let mut discovered_count = 0; - loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["discovered_rendezvous"].is_object() - && json["discovered_rendezvous"]["server"] == ED25519MULTIHASH - { - discovered_count += 1; - } - } else { - panic!("Client did not discover twice in time"); - } - - if discovered_count == 2 { - break; - } - } - // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(proc_guard_server.take(), None); let dead_client = kill_homestar(proc_guard_client1.take(), None); @@ -981,6 +665,7 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -1022,45 +707,45 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will renew registrations with the rendezvous server every five seconds - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env("RUST_BACKTRACE", "0") - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } + tokio_test::block_on(async { + // Subscribe to rendezvous server + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); - tokio_test::task::spawn(async { - // Subscribe to rendezvous client one - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); + // Start a peer that will renew registrations with the rendezvous server every five seconds + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } - // Poll for client one registered with server the first time + // Poll for server registered client one the first time loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["registered_rendezvous"].is_object() - && json["registered_rendezvous"]["server"] == ED25519MULTIHASH + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == ED25519MULTIHASH5 { break; } } else { - panic!("Client did not register with server twice in time"); + panic!("Server did not receive registration from client one in time"); } } @@ -1070,61 +755,58 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { // by client one expirations. let toml3 = format!( r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr3}" + node_addresses = ["{node_addra}"] + [node.network.libp2p.mdns] + enable = false + [node.network.metrics] + port = {metrics_port3} + [node.network.rpc] + port = {rpc_port3} + [node.network.webserver] + port = {ws_port3} + "# ); let config3 = make_config!(toml3); let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config3.filename()) + .arg("--db") + .arg(&proc_info3.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); let proc_guard_client2 = ChildGuard::new(rendezvous_client2); if wait_for_socket_connection(ws_port3, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } - // Subscribe to rendezvous client two - let mut net_events3 = subscribe_network_events(ws_port3).await; - let sub3 = net_events3.sub(); - - // Poll for client two discovered twice + // Poll for discovery served to client two twice let mut discovered_count = 0; loop { - if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["discovered_rendezvous"].is_object() - && json["discovered_rendezvous"]["server"] == ED25519MULTIHASH + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH2 { discovered_count += 1; } } else { - panic!("Client did not discover twice in time"); + panic!("Server did not serve discovery to client two twice in time"); } if discovered_count == 2 { @@ -1150,8 +832,6 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { ], ); - println!("server_discovery_count: {}", server_discovery_count); - // Count discovery responses the client let client_discovery_count = count_lines_where( stdout_client2, From 6f8998a9b9e96ead05153e727209cca8dc862d04 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 14:48:32 -0800 Subject: [PATCH 47/75] test: Refactor mDNS tests --- homestar-runtime/tests/network/mdns.rs | 299 ++++++------------------- 1 file changed, 71 insertions(+), 228 deletions(-) diff --git a/homestar-runtime/tests/network/mdns.rs b/homestar-runtime/tests/network/mdns.rs index 521c2d42..00b312b8 100644 --- a/homestar-runtime/tests/network/mdns.rs +++ b/homestar-runtime/tests/network/mdns.rs @@ -3,7 +3,7 @@ use crate::{ utils::{ check_for_line_with, kill_homestar, retrieve_output, subscribe_network_events, wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, ProcInfo, - TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH2, ED25519MULTIHASH4, ED25519MULTIHASH5, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH2, ED25519MULTIHASH5, }, }; use anyhow::Result; @@ -18,7 +18,7 @@ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)) #[test] #[serial_test::file_serial] -fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { +fn test_libp2p_connection_after_mdns_discovery_serial() -> Result<()> { let proc_info1 = ProcInfo::new().unwrap(); let proc_info2 = ProcInfo::new().unwrap(); @@ -29,9 +29,8 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { let ws_port1 = proc_info1.ws_port; let ws_port2 = proc_info2.ws_port; - tokio_test::task::spawn(async { - let toml1 = format!( - r#" + let toml1 = format!( + r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} @@ -46,12 +45,13 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { [node.network.webserver] port = {ws_port1} "# - ); - let config1 = make_config!(toml1); + ); + let config1 = make_config!(toml1); - // Start two nodes each configured to listen at 0.0.0.0 with no known peers. - // The nodes are configured with port 0 to allow the OS to select a port. - let homestar_proc1 = Command::new(BIN.as_os_str()) + // Start a node configured to listen at 0.0.0.0 with no known peers. + // The node is configured with port 0 to allow the OS to select a port. + let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -64,57 +64,59 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } + let proc_guard1 = ChildGuard::new(homestar_proc1); + + if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + let toml2 = format!( + r#" + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }} + [node.network.libp2p] + listen_address = "/ip4/0.0.0.0/tcp/0" + [node.network.libp2p.rendezvous] + enable_client = false + [node.network.metrics] + port = {metrics_port2} + [node.network.rpc] + port = {rpc_port2} + [node.network.webserver] + port = {ws_port2} + "# + ); + let config2 = make_config!(toml2); + tokio_test::block_on(async { + // Subscribe to node one let mut net_events1 = subscribe_network_events(ws_port1).await; let sub1 = net_events1.sub(); - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - let config2 = make_config!(toml2); - + // Start a second node configured to listen at 0.0.0.0 with no known peers. + // The node is configured with port 0 to allow the OS to select a port. let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); let proc_guard2 = ChildGuard::new(homestar_proc2); if wait_for_socket_connection(ws_port2, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } - let mut net_events2 = subscribe_network_events(ws_port2).await; - let sub2 = net_events2.sub(); - - // Poll for mDNS discovered message and conenection established messages on node one + // Poll for mDNS discovered message and connection established messages on node one. let mut discovered_mdns = false; let mut connection_established = false; loop { @@ -124,7 +126,9 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { if json["discovered_mdns"].is_object() { discovered_mdns = true; - } else if json["connection_established"].is_object() { + } else if json["connection_established"].is_object() + && json["connection_established"]["peer_id"] == ED25519MULTIHASH5 + { connection_established = true; } } else { @@ -142,37 +146,27 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { } } - // Poll for mDNS discovered message and conenection established messages on node two - let mut discovered_mdns = false; - let mut connection_established = false; + // Kill node two. + let dead_proc2 = kill_homestar(proc_guard2.take(), None); + + // Poll for client two disconnected from client one. loop { - if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["discovered_mdns"].is_object() { - discovered_mdns = true; - } else if json["connection_established"].is_object() { - connection_established = true; + if json["connection_closed"].is_object() + && json["connection_closed"]["peer_id"] == ED25519MULTIHASH5 + { + break; } } else { - panic!( - r#"Expected notifications from node two did not arrive in time: - - mDNS discovered: {} - - Connection established: {} - "#, - discovered_mdns, connection_established - ); - } - - if connection_established && discovered_mdns { - break; + panic!("Client two did not receive rendezvous discovery from server in time"); } } - // Kill processes. + // Kill node one. let dead_proc1 = kill_homestar(proc_guard1.take(), None); - let dead_proc2 = kill_homestar(proc_guard2.take(), None); // Retrieve logs. let stdout1 = retrieve_output(dead_proc1); @@ -195,7 +189,7 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { // Check that DHT routing table was updated with node two let two_in_dht_routing_table = check_for_line_with( - stdout1, + stdout1.clone(), vec![ "kademlia routing table updated with peer", ED25519MULTIHASH5, @@ -233,168 +227,17 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { assert!(two_connected_to_one); assert!(one_addded_to_dht); assert!(one_in_dht_routing_table); - }); - - Ok(()) -} - -#[test] -#[serial_test::file_serial] -fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { - // Start two nodes each configured to listen at 0.0.0.0 with no known peers. - // The nodes are configured with port 0 to allow the OS to select a port. - - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - - tokio_test::block_on(async { - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_3.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - let config1 = make_config!(toml1); - - let homestar_proc1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection(ws_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let mut net_events1 = subscribe_network_events(ws_port1).await; - let sub1 = net_events1.sub(); - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Poll for mDNS discovered message and conenection established messages - let mut discovered_mdns = false; - let mut connection_established = false; - loop { - if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["discovered_mdns"].is_object() { - discovered_mdns = true; - } else if json["connection_established"].is_object() { - connection_established = true; - } - } else { - panic!( - r#"Expected notifications from node one did not arrive in time: -- mDNS discovered: {} -- Connection established: {} -"#, - discovered_mdns, connection_established - ); - } - - if connection_established && discovered_mdns { - break; - } - } - - // Kill node two - let _ = kill_homestar(proc_guard2.take(), None); - - // Poll for connection closed message - loop { - if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { - let json: serde_json::Value = - serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - - if json["connection_closed"].is_object() { - break; - } - } else { - panic!("Node two did not disconnect from node one in time"); - } - } - - // Collect logs for eight seconds then kill node one. - let dead_proc1 = kill_homestar(proc_guard1.take(), None); - - // Retrieve logs. - let stdout = retrieve_output(dead_proc1); // Check that node two disconnected from node one. let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", ED25519MULTIHASH4], + stdout1.clone(), + vec!["peer connection closed", ED25519MULTIHASH5], ); // Check that node two was removed from the Kademlia table let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", ED25519MULTIHASH4], + stdout1.clone(), + vec!["removed peer from kademlia table", ED25519MULTIHASH5], ); assert!(two_disconnected_from_one); From fc70e06455b1c055018eb5ff42293c319edd1bda Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 15:13:27 -0800 Subject: [PATCH 48/75] test: Less backtrace noize --- homestar-runtime/tests/cli.rs | 2 ++ homestar-runtime/tests/network.rs | 7 +++++++ homestar-runtime/tests/network/connection.rs | 8 ++++++++ homestar-runtime/tests/network/dht.rs | 11 ++++++++++- homestar-runtime/tests/network/gossip.rs | 2 ++ homestar-runtime/tests/webserver.rs | 1 + 6 files changed, 30 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/tests/cli.rs b/homestar-runtime/tests/cli.rs index 8752b8a0..6a5e6bfc 100644 --- a/homestar-runtime/tests/cli.rs +++ b/homestar-runtime/tests/cli.rs @@ -125,6 +125,7 @@ fn test_server_integration() -> Result<()> { let config = make_config!(toml); Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-db") .arg(&proc_info.db_path) @@ -132,6 +133,7 @@ fn test_server_integration() -> Result<()> { .failure(); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index b4b3391a..9a30c086 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -55,6 +55,7 @@ fn test_libp2p_generates_peer_id_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -110,6 +111,7 @@ fn test_libp2p_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -171,6 +173,7 @@ fn test_rpc_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -228,6 +231,7 @@ fn test_websocket_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -286,6 +290,7 @@ fn test_node_info_endpoint_integration() -> Result<()> { let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -354,6 +359,7 @@ fn test_discovery_endpoint_integration() -> Result<()> { let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -425,6 +431,7 @@ fn test_libp2p_configured_with_known_dns_multiaddr() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) diff --git a/homestar-runtime/tests/network/connection.rs b/homestar-runtime/tests/network/connection.rs index b31b8936..36f3e2a8 100644 --- a/homestar-runtime/tests/network/connection.rs +++ b/homestar-runtime/tests/network/connection.rs @@ -56,6 +56,7 @@ fn test_connection_notifications_integration() -> Result<()> { let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -101,6 +102,7 @@ fn test_connection_notifications_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -286,6 +288,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -309,6 +312,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -354,6 +358,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { } let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -449,6 +454,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -472,6 +478,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -545,6 +552,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { } let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index a86a5bd3..df13efff 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -16,7 +16,6 @@ use homestar_runtime::{ use libipld::Cid; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, str::FromStr, @@ -71,6 +70,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -123,6 +123,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -376,6 +377,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -426,6 +428,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -569,6 +572,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -621,6 +625,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -861,6 +866,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> tokio_test::block_on(async move { let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -912,6 +919,7 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -963,6 +971,7 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let config3 = make_config!(toml3); let homestar_proc3 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs index e9cbc848..cace455d 100644 --- a/homestar-runtime/tests/network/gossip.rs +++ b/homestar-runtime/tests/network/gossip.rs @@ -59,6 +59,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { ); let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -104,6 +105,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", diff --git a/homestar-runtime/tests/webserver.rs b/homestar-runtime/tests/webserver.rs index d6d2ddc2..7b72b506 100644 --- a/homestar-runtime/tests/webserver.rs +++ b/homestar-runtime/tests/webserver.rs @@ -46,6 +46,7 @@ fn test_workflow_run_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) From fcca2f7ef7d3fdafaa8b90a07ac71e3562a4b878 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 15:24:59 -0800 Subject: [PATCH 49/75] feat: Add schemas generation workflow --- .github/workflows/schemas.yml | 51 +++++++++++++++++++ homestar-runtime/schemas/generate.rs | 73 +++++++++++++++++++--------- 2 files changed, 100 insertions(+), 24 deletions(-) create mode 100644 .github/workflows/schemas.yml diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml new file mode 100644 index 00000000..487428be --- /dev/null +++ b/.github/workflows/schemas.yml @@ -0,0 +1,51 @@ +name: 📄 Schemas + +on: + push: + branches: [main] + + pull_request: + branches: ["**"] + +permissions: + contents: write + pull-requests: write + +jobs: + schemas: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.HOMESTAR_UPDATE_TOKEN }} + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Run generate schemas + run: cargo run --bin schemas + + # - name: Check for modified files + # id: git-check + # run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + + - name: Check for changed files + id: changed-files + uses: tj-actions/changed-files@v42 + with: + files_yaml: | + docs: + - 'homestar-runtime/schemas/docs/*.json' + + - name: Push changes + # if: steps.git-check.outputs.modified == 'true' + if: steps.changed-files-yaml.outputs.docs_any_changed == 'true' + run: | + git config user.name "${GITHUB_ACTOR}" + git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git + git commit -am "chore(schemas): update OpenRPC API doc and JSON schemas" + git push --force-with-lease origin HEAD:refs/heads/${{ github.head_ref }} diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 4dda89f9..9e893b4b 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -20,40 +20,62 @@ use openrpc::document::{ }; fn main() { + println!("{}", env!("CARGO_MANIFEST_DIR")); let health_schema = schema_for!(Health); - let _ = fs::File::create("schemas/docs/health.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/health.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); let metrics_schema = schema_for!(PrometheusData); - let _ = fs::File::create("schemas/docs/metrics.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/metrics.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); let node_info_schema = schema_for!(NodeInfo); - let _ = fs::File::create("schemas/docs/node_info.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/node_info.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); let network_schema = schema_for!(NetworkNotification); - let _ = fs::File::create("schemas/docs/network.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/network.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); let workflow_schema = schema_for!(Workflow<'static, ()>); - let _ = fs::File::create("schemas/docs/workflow.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/workflow.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); let receipt_schema = schema_for!(Receipt<()>); - let _ = fs::File::create("schemas/docs/receipt.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/receipt.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); let receipt_notification_schema = schema_for!(ReceiptNotification); - let _ = fs::File::create("schemas/docs/receipt_notification.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/receipt_notification.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); let api_doc = generate_api_doc( health_schema, @@ -63,9 +85,12 @@ fn main() { workflow_schema, receipt_notification_schema, ); - let _ = fs::File::create("schemas/docs/api.json") - .unwrap() - .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); + let _ = fs::File::create(format!( + "{}/schemas/docs/api.json", + env!("CARGO_MANIFEST_DIR") + )) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); } // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md From 4a54efd8d4addc3089026b5d38927d177f619744 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 15:54:09 -0800 Subject: [PATCH 50/75] chore: Rename subscribe workflow param to tasks --- .github/workflows/schemas.yml | 5 ----- homestar-runtime/schemas/generate.rs | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 487428be..1b056c0e 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -28,10 +28,6 @@ jobs: - name: Run generate schemas run: cargo run --bin schemas - # - name: Check for modified files - # id: git-check - # run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT - - name: Check for changed files id: changed-files uses: tj-actions/changed-files@v42 @@ -41,7 +37,6 @@ jobs: - 'homestar-runtime/schemas/docs/*.json' - name: Push changes - # if: steps.git-check.outputs.modified == 'true' if: steps.changed-files-yaml.outputs.docs_any_changed == 'true' run: | git config user.name "${GITHUB_ACTOR}" diff --git a/homestar-runtime/schemas/generate.rs b/homestar-runtime/schemas/generate.rs index 9e893b4b..ae449d1c 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-runtime/schemas/generate.rs @@ -268,7 +268,7 @@ fn generate_api_doc( param_structure: Some(MethodObjectParamStructure::ByName), params: vec![ContentDescriptorOrReference::ContentDescriptorObject( ContentDescriptorObject { - name: "workflow".to_string(), + name: "tasks".to_string(), summary: None, description: None, required: Some(true), From dfaf407b4b4f957df4cda61002d8e78ba518e5a9 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 16:06:37 -0800 Subject: [PATCH 51/75] chore: Tinker with schemas action --- .github/workflows/schemas.yml | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 1b056c0e..22a11019 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -28,16 +28,30 @@ jobs: - name: Run generate schemas run: cargo run --bin schemas - - name: Check for changed files - id: changed-files - uses: tj-actions/changed-files@v42 - with: - files_yaml: | - docs: - - 'homestar-runtime/schemas/docs/*.json' + # - name: Check for changed files + # id: changed-files + # uses: tj-actions/changed-files@v42 + # with: + # # files: homestar-runtime/schemas/docs/** + # files: | + # **.json + # since_last_remote_commit: true + + # - name: List all changed files + # env: + # ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} + # run: | + # for file in ${ALL_CHANGED_FILES}; do + # echo "$file was changed" + # done + + - name: Check for modified files + id: git-check + run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT - name: Push changes - if: steps.changed-files-yaml.outputs.docs_any_changed == 'true' + # if: steps.changed-files.outputs.any_changed == 'true' + if: steps.git-check.outputs.modified == 'true' run: | git config user.name "${GITHUB_ACTOR}" git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" From d9d2d157b6108872640661eacfa045b90496f76b Mon Sep 17 00:00:00 2001 From: bgins Date: Thu, 8 Feb 2024 01:00:39 +0000 Subject: [PATCH 52/75] chore(schemas): update OpenRPC API doc and JSON schemas --- homestar-runtime/schemas/docs/api.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/homestar-runtime/schemas/docs/api.json b/homestar-runtime/schemas/docs/api.json index e526422b..5cf53bed 100644 --- a/homestar-runtime/schemas/docs/api.json +++ b/homestar-runtime/schemas/docs/api.json @@ -1190,7 +1190,7 @@ "paramStructure": "by-name", "params": [ { - "name": "workflow", + "name": "tasks", "schema": { "$schema": "http://json-schema.org/draft-07/schema#", "title": "Workflow", From e2160e2b03de1b66cb2597a5faa1272910566df0 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Wed, 7 Feb 2024 17:10:56 -0800 Subject: [PATCH 53/75] chore: More tinkering --- .github/workflows/schemas.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 22a11019..70dcd25c 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -49,6 +49,12 @@ jobs: id: git-check run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + - name: Show check modified value + env: + CHANGED: ${{ steps.git-check.outputs.modified }} + run: | + echo "$CHANGED" + - name: Push changes # if: steps.changed-files.outputs.any_changed == 'true' if: steps.git-check.outputs.modified == 'true' From 6e6efea7dfb888e1ddad34290c60a9d8c8ae9215 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 10:51:24 -0800 Subject: [PATCH 54/75] chore: Remove unneeded feature flag --- homestar-runtime/src/event_handler/event.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index f95567c5..37af36e8 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -31,7 +31,6 @@ use libp2p::{ rendezvous::Namespace, PeerId, }; -#[cfg(feature = "websocket-notify")] use std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, From ecc4e4f27ce2bd1ff5a0137f848d631eb21be610 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 12:57:11 -0800 Subject: [PATCH 55/75] chore: Move schema generator into non-published crate --- .github/workflows/schemas.yml | 2 +- Cargo.lock | 13 +++ Cargo.toml | 1 + homestar-runtime/Cargo.toml | 7 -- homestar-runtime/schemas/{docs => }/api.json | 8 +- homestar-runtime/schemas/docs/.gitkeep | 0 .../schemas/{docs => }/health.json | 0 .../schemas/{docs => }/metrics.json | 0 .../schemas/{docs => }/network.json | 0 .../schemas/{docs => }/node_info.json | 0 .../schemas/{docs => }/receipt.json | 0 .../{docs => }/receipt_notification.json | 0 .../schemas/{docs => }/workflow.json | 0 homestar-runtime/src/network/webserver/rpc.rs | 2 +- homestar-runtime/tests/network.rs | 2 +- homestar-schemas/Cargo.toml | 27 ++++++ .../src/main.rs | 87 ++++++++----------- .../src}/openrpc/document.rs | 0 .../src}/openrpc/mod.rs | 0 19 files changed, 82 insertions(+), 67 deletions(-) rename homestar-runtime/schemas/{docs => }/api.json (99%) delete mode 100644 homestar-runtime/schemas/docs/.gitkeep rename homestar-runtime/schemas/{docs => }/health.json (100%) rename homestar-runtime/schemas/{docs => }/metrics.json (100%) rename homestar-runtime/schemas/{docs => }/network.json (100%) rename homestar-runtime/schemas/{docs => }/node_info.json (100%) rename homestar-runtime/schemas/{docs => }/receipt.json (100%) rename homestar-runtime/schemas/{docs => }/receipt_notification.json (100%) rename homestar-runtime/schemas/{docs => }/workflow.json (100%) create mode 100644 homestar-schemas/Cargo.toml rename homestar-runtime/schemas/generate.rs => homestar-schemas/src/main.rs (84%) rename {homestar-runtime/schemas => homestar-schemas/src}/openrpc/document.rs (100%) rename {homestar-runtime/schemas => homestar-schemas/src}/openrpc/mod.rs (100%) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 70dcd25c..b50383af 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -26,7 +26,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run generate schemas - run: cargo run --bin schemas + run: cargo run -p homestar-schemas # - name: Check for changed files # id: changed-files diff --git a/Cargo.lock b/Cargo.lock index 36ea47d3..949a16bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2637,6 +2637,19 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "homestar-schemas" +version = "0.1.1" +dependencies = [ + "homestar-invocation", + "homestar-runtime", + "homestar-workflow", + "homestar-workspace-hack", + "schemars", + "serde", + "serde_json", +] + [[package]] name = "homestar-wasm" version = "0.1.1" diff --git a/Cargo.toml b/Cargo.toml index e045eaba..bbbfc17c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "homestar-functions/*", "homestar-invocation", "homestar-runtime", + "homestar-schemas", "homestar-wasm", "homestar-workflow", "homestar-workspace-hack", diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 43da2744..7931ad21 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -25,13 +25,6 @@ path = "src/main.rs" doc = false bench = false -[[bin]] -name = "schemas" -path = "schemas/generate.rs" -bench = false -doc = false -test = false - [[test]] name = "integration" path = "tests/main.rs" diff --git a/homestar-runtime/schemas/docs/api.json b/homestar-runtime/schemas/api.json similarity index 99% rename from homestar-runtime/schemas/docs/api.json rename to homestar-runtime/schemas/api.json index 5cf53bed..cc831a76 100644 --- a/homestar-runtime/schemas/docs/api.json +++ b/homestar-runtime/schemas/api.json @@ -2,15 +2,15 @@ "openrpc": "1.2.6", "info": { "title": "homestar", - "description": "Homestar runtime implementation", - "version": "0.10.0", + "description": "", + "version": "0.1.0", "contact": { "name": null, "email": null, - "url": "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime" + "url": "" }, "license": { - "name": "Apache-2.0", + "name": "", "url": null } }, diff --git a/homestar-runtime/schemas/docs/.gitkeep b/homestar-runtime/schemas/docs/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/homestar-runtime/schemas/docs/health.json b/homestar-runtime/schemas/health.json similarity index 100% rename from homestar-runtime/schemas/docs/health.json rename to homestar-runtime/schemas/health.json diff --git a/homestar-runtime/schemas/docs/metrics.json b/homestar-runtime/schemas/metrics.json similarity index 100% rename from homestar-runtime/schemas/docs/metrics.json rename to homestar-runtime/schemas/metrics.json diff --git a/homestar-runtime/schemas/docs/network.json b/homestar-runtime/schemas/network.json similarity index 100% rename from homestar-runtime/schemas/docs/network.json rename to homestar-runtime/schemas/network.json diff --git a/homestar-runtime/schemas/docs/node_info.json b/homestar-runtime/schemas/node_info.json similarity index 100% rename from homestar-runtime/schemas/docs/node_info.json rename to homestar-runtime/schemas/node_info.json diff --git a/homestar-runtime/schemas/docs/receipt.json b/homestar-runtime/schemas/receipt.json similarity index 100% rename from homestar-runtime/schemas/docs/receipt.json rename to homestar-runtime/schemas/receipt.json diff --git a/homestar-runtime/schemas/docs/receipt_notification.json b/homestar-runtime/schemas/receipt_notification.json similarity index 100% rename from homestar-runtime/schemas/docs/receipt_notification.json rename to homestar-runtime/schemas/receipt_notification.json diff --git a/homestar-runtime/schemas/docs/workflow.json b/homestar-runtime/schemas/workflow.json similarity index 100% rename from homestar-runtime/schemas/docs/workflow.json rename to homestar-runtime/schemas/workflow.json diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index 308b2b44..4cdc1512 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -45,7 +45,7 @@ use tracing::debug; use tracing::{error, warn}; /// OpenRPC API document -const API_SCHEMA_DOC: &str = include_str!("../../../schemas/docs/api.json"); +const API_SCHEMA_DOC: &str = include_str!("../../../schemas/api.json"); /// OpenRPC API discovery endpoint. pub(crate) const DISCOVER_ENDPOINT: &str = "rpc_discover"; diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 9a30c086..cb23ef2c 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -387,7 +387,7 @@ fn test_discovery_endpoint_integration() -> Result<()> { assert_eq!(http_resp.status(), 200); let http_resp = http_resp.json::().await.unwrap(); - const API_SCHEMA_DOC: &str = include_str!("../schemas/docs/api.json"); + const API_SCHEMA_DOC: &str = include_str!("../schemas/api.json"); assert_eq!(http_resp, serde_json::json!(API_SCHEMA_DOC)); }); diff --git a/homestar-schemas/Cargo.toml b/homestar-schemas/Cargo.toml new file mode 100644 index 00000000..bfa6a3d8 --- /dev/null +++ b/homestar-schemas/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "homestar-schemas" +publish = false +version = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +homestar-invocation = { version = "0.1", path = "../homestar-invocation", default-features = false } +homestar-runtime = { version = "0.1", path = "../homestar-runtime", default-features = false, features = [ + "websocket-notify", +] } +homestar-workflow = { version = "0.1", path = "../homestar-workflow", default-features = false } +homestar-workspace-hack = { workspace = true } +schemars = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +[[bin]] +name = "schemas" +path = "src/main.rs" +bench = false +doc = false +test = false + +[features] +default = [] diff --git a/homestar-runtime/schemas/generate.rs b/homestar-schemas/src/main.rs similarity index 84% rename from homestar-runtime/schemas/generate.rs rename to homestar-schemas/src/main.rs index ae449d1c..b1b640ba 100644 --- a/homestar-runtime/schemas/generate.rs +++ b/homestar-schemas/src/main.rs @@ -1,5 +1,4 @@ -//! Standalone binary to generate OpenRPC API docs and -//! JSON Schemas for method params and notifications. +//! Binary to generate OpenRPC API docs and JSON Schemas. use homestar_invocation::Receipt; use homestar_runtime::{ @@ -11,7 +10,6 @@ use schemars::{ schema_for, }; use std::{fs, io::Write}; - mod openrpc; use openrpc::document::{ ContactObject, ContentDescriptorObject, ContentDescriptorOrReference, @@ -20,62 +18,48 @@ use openrpc::document::{ }; fn main() { - println!("{}", env!("CARGO_MANIFEST_DIR")); + fn schema_path(name: &str) -> String { + format!( + "{}/../homestar-runtime/schemas/{}", + env!("CARGO_MANIFEST_DIR"), + name + ) + } + let health_schema = schema_for!(Health); - let _ = fs::File::create(format!( - "{}/schemas/docs/health.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + let _ = fs::File::create(schema_path("health.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); let metrics_schema = schema_for!(PrometheusData); - let _ = fs::File::create(format!( - "{}/schemas/docs/metrics.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); + let _ = fs::File::create(schema_path("metrics.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); let node_info_schema = schema_for!(NodeInfo); - let _ = fs::File::create(format!( - "{}/schemas/docs/node_info.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); + let _ = fs::File::create(schema_path("node_info.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); let network_schema = schema_for!(NetworkNotification); - let _ = fs::File::create(format!( - "{}/schemas/docs/network.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); + let _ = fs::File::create(schema_path("network.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); let workflow_schema = schema_for!(Workflow<'static, ()>); - let _ = fs::File::create(format!( - "{}/schemas/docs/workflow.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); + let _ = fs::File::create(schema_path("workflow.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); let receipt_schema = schema_for!(Receipt<()>); - let _ = fs::File::create(format!( - "{}/schemas/docs/receipt.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); + let _ = fs::File::create(schema_path("receipt.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); let receipt_notification_schema = schema_for!(ReceiptNotification); - let _ = fs::File::create(format!( - "{}/schemas/docs/receipt_notification.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); + let _ = fs::File::create(schema_path("receipt_notification.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); let api_doc = generate_api_doc( health_schema, @@ -85,12 +69,9 @@ fn main() { workflow_schema, receipt_notification_schema, ); - let _ = fs::File::create(format!( - "{}/schemas/docs/api.json", - env!("CARGO_MANIFEST_DIR") - )) - .unwrap() - .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); + let _ = fs::File::create(schema_path("api.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); } // Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md @@ -329,7 +310,7 @@ fn generate_api_doc( title: "homestar".to_string(), description: Some(env!("CARGO_PKG_DESCRIPTION").into()), terms_of_service: None, - version: "0.10.0".to_string(), + version: "0.1.0".to_string(), contact: Some(ContactObject { name: None, url: Some(env!("CARGO_PKG_REPOSITORY").into()), diff --git a/homestar-runtime/schemas/openrpc/document.rs b/homestar-schemas/src/openrpc/document.rs similarity index 100% rename from homestar-runtime/schemas/openrpc/document.rs rename to homestar-schemas/src/openrpc/document.rs diff --git a/homestar-runtime/schemas/openrpc/mod.rs b/homestar-schemas/src/openrpc/mod.rs similarity index 100% rename from homestar-runtime/schemas/openrpc/mod.rs rename to homestar-schemas/src/openrpc/mod.rs From 873b417160bbba32ffeec626099118b7df0ac179 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 13:07:52 -0800 Subject: [PATCH 56/75] chore: Add missing websocket-notify flags --- homestar-runtime/src/event_handler/swarm_event.rs | 9 +++++++-- homestar-runtime/tests/network.rs | 2 ++ homestar-runtime/tests/utils.rs | 10 ++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index e3b19b65..9e04dfa6 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -25,6 +25,8 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use libipld::Cid; +#[cfg(feature = "websocket-notify")] +use libp2p::Multiaddr; use libp2p::{ gossipsub, identify, kad, kad::{AddProviderOk, BootstrapOk, GetProvidersOk, GetRecordOk, PutRecordOk, QueryResult}, @@ -33,9 +35,11 @@ use libp2p::{ rendezvous::{self, Namespace, Registration}, request_response, swarm::{dial_opts::DialOpts, SwarmEvent}, - Multiaddr, PeerId, StreamProtocol, + PeerId, StreamProtocol, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +#[cfg(feature = "websocket-notify")] +use std::collections::BTreeMap; +use std::collections::{HashMap, HashSet}; use tracing::{debug, error, info, warn}; pub(crate) mod record; @@ -463,6 +467,7 @@ async fn handle_swarm_event( subject = "libp2p.rendezvous.server.peer_registered", category = "handle_swarm_event", peer_id = peer.to_string(), + addresses = ?registration.record.addresses(), "registered peer through rendezvous" ); diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index cb23ef2c..54aaef7c 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -19,7 +19,9 @@ mod connection; mod dht; #[cfg(feature = "websocket-notify")] mod gossip; +#[cfg(feature = "websocket-notify")] mod mdns; +#[cfg(feature = "websocket-notify")] mod rendezvous; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); diff --git a/homestar-runtime/tests/utils.rs b/homestar-runtime/tests/utils.rs index 25e88210..2030c113 100644 --- a/homestar-runtime/tests/utils.rs +++ b/homestar-runtime/tests/utils.rs @@ -2,6 +2,7 @@ use anyhow::{bail, Context, Result}; #[cfg(not(windows))] use assert_cmd::prelude::*; use chrono::{DateTime, FixedOffset}; +#[cfg(feature = "websocket-notify")] use jsonrpsee::{ core::client::{Client, Subscription, SubscriptionClientT}, rpc_params, @@ -40,12 +41,16 @@ pub(crate) const BIN_NAME: &str = "homestar"; /// Test-default ed25519 multihash. pub(crate) const ED25519MULTIHASH: &str = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN"; /// Test-default ed25519 multihash 2. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH2: &str = "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5"; /// Test-default ed25519 multihash 3. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH3: &str = "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq"; /// Test-default ed25519 multihash 4. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH4: &str = "12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba"; /// Test-default ed25519 multihash 5. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH5: &str = "12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw"; /// Test-default secp256k1 multihash. pub(crate) const SECP256K1MULTIHASH: &str = "16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc"; @@ -56,6 +61,7 @@ pub(crate) fn listen_addr(port: u16) -> String { } /// Return multiaddr address. +#[cfg(feature = "websocket-notify")] pub(crate) fn multiaddr(port: u16, hash: &str) -> String { format!("/ip4/127.0.0.1/tcp/{port}/p2p/{hash}") } @@ -221,6 +227,7 @@ pub(crate) fn check_for_line_with(output: String, predicates: Vec<&str>) -> bool .any(|curr| curr) } +#[cfg(feature = "websocket-notify")] pub(crate) fn count_lines_where(output: String, predicates: Vec<&str>) -> i32 { output.split('\n').fold(0, |count, line| { if line_contains(line, &predicates) { @@ -378,12 +385,14 @@ pub(crate) fn wait_for_socket_connection_v6(port: u16, exp_retry_base: u64) -> R } /// Client and subscription. +#[cfg(feature = "websocket-notify")] pub(crate) struct WsClientSub { #[allow(dead_code)] client: Client, sub: Subscription>, } +#[cfg(feature = "websocket-notify")] impl WsClientSub { pub(crate) fn sub(&mut self) -> &mut Subscription> { &mut self.sub @@ -392,6 +401,7 @@ impl WsClientSub { /// Helper function to subscribe to network events /// Note that the client must not be dropped of the sub will return only None. +#[cfg(feature = "websocket-notify")] pub(crate) async fn subscribe_network_events(ws_port: u16) -> WsClientSub { const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; From dd0f631d2b140e048262e5124c195315bd40f82b Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 13:18:47 -0800 Subject: [PATCH 57/75] chore: More schemas action tweaks --- .../workflows/{audit.yml => audit.yml.bak} | 2 +- .../workflows/{builds.yml => builds.yml.bak} | 0 .../{coverage.yml => coverage.yml.bak} | 4 +-- ...endabot_pr.yaml => dependabot_pr.yaml.bak} | 0 .../workflows/{docker.yml => docker.yml.bak} | 0 .github/workflows/{nix.yml => nix.yml.bak} | 0 .../{release.yml => release.yml.bak} | 0 .github/workflows/schemas.yml | 27 +++++++++++++++++++ .github/workflows/tests_and_checks.yml | 1 - homestar-schemas/src/main.rs | 2 +- 10 files changed, 31 insertions(+), 5 deletions(-) rename .github/workflows/{audit.yml => audit.yml.bak} (93%) rename .github/workflows/{builds.yml => builds.yml.bak} (100%) rename .github/workflows/{coverage.yml => coverage.yml.bak} (97%) rename .github/workflows/{dependabot_pr.yaml => dependabot_pr.yaml.bak} (100%) rename .github/workflows/{docker.yml => docker.yml.bak} (100%) rename .github/workflows/{nix.yml => nix.yml.bak} (100%) rename .github/workflows/{release.yml => release.yml.bak} (100%) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml.bak similarity index 93% rename from .github/workflows/audit.yml rename to .github/workflows/audit.yml.bak index 569d60d3..0d68ae96 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml.bak @@ -2,7 +2,7 @@ name: 🛡 Audit-Check on: schedule: - - cron: '0 0 * * *' + - cron: "0 0 * * *" jobs: security-audit: diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml.bak similarity index 100% rename from .github/workflows/builds.yml rename to .github/workflows/builds.yml.bak diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml.bak similarity index 97% rename from .github/workflows/coverage.yml rename to .github/workflows/coverage.yml.bak index ea070df6..e77a0c29 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml.bak @@ -2,10 +2,10 @@ name: ☂ Code Coverage on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ '*' ] + branches: ["*"] concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/dependabot_pr.yaml b/.github/workflows/dependabot_pr.yaml.bak similarity index 100% rename from .github/workflows/dependabot_pr.yaml rename to .github/workflows/dependabot_pr.yaml.bak diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml.bak similarity index 100% rename from .github/workflows/docker.yml rename to .github/workflows/docker.yml.bak diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml.bak similarity index 100% rename from .github/workflows/nix.yml rename to .github/workflows/nix.yml.bak diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml.bak similarity index 100% rename from .github/workflows/release.yml rename to .github/workflows/release.yml.bak diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index b50383af..0a297b16 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -12,7 +12,34 @@ permissions: pull-requests: write jobs: + changes: + runs-on: ubuntu-latest + permissions: + pull-requests: read + outputs: + rust: ${{ steps.filter.outputs.rust }} + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + rust: + - '**/Cargo.*' + - '**/src/**' + - '**/tests/**' + - '**/build.rs' + - '**/migrations/**' + - '**/fixtures/**' + - '**/wit/**' + schemas: + needs: changes + if: ${{ needs.changes.outputs.rust == 'true' }} + env: + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" runs-on: ubuntu-latest steps: - name: Checkout diff --git a/.github/workflows/tests_and_checks.yml b/.github/workflows/tests_and_checks.yml index 23ca9de8..20e3befb 100644 --- a/.github/workflows/tests_and_checks.yml +++ b/.github/workflows/tests_and_checks.yml @@ -89,7 +89,6 @@ jobs: - name: Run Linter run: cargo clippy --all -- -D warnings - continue-on-error: ${{ matrix.rust-toolchain == 'nightly' && matrix.os == 'macos-14' }} - name: Install cargo-hakari if: ${{ matrix.rust-toolchain == 'stable' }} diff --git a/homestar-schemas/src/main.rs b/homestar-schemas/src/main.rs index b1b640ba..28219977 100644 --- a/homestar-schemas/src/main.rs +++ b/homestar-schemas/src/main.rs @@ -119,7 +119,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "health".to_string(), From a0fd065d5694e71ec84e106ff2bac9ccddc7a16d Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 13:41:29 -0800 Subject: [PATCH 58/75] chore: More schemas action tinkering part two --- .github/workflows/schemas.yml | 9 +++++---- .../{tests_and_checks.yml => tests_and_checks.yml.bak} | 0 homestar-schemas/src/main.rs | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) rename .github/workflows/{tests_and_checks.yml => tests_and_checks.yml.bak} (100%) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 0a297b16..7e5dcbc9 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -2,6 +2,7 @@ name: 📄 Schemas on: push: + # branches: [main, "**"] branches: [main] pull_request: @@ -37,9 +38,9 @@ jobs: schemas: needs: changes if: ${{ needs.changes.outputs.rust == 'true' }} - env: - SCCACHE_GHA_ENABLED: "true" - RUSTC_WRAPPER: "sccache" + # env: + # SCCACHE_GHA_ENABLED: "true" + # RUSTC_WRAPPER: "sccache" runs-on: ubuntu-latest steps: - name: Checkout @@ -90,4 +91,4 @@ jobs: git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git git commit -am "chore(schemas): update OpenRPC API doc and JSON schemas" - git push --force-with-lease origin HEAD:refs/heads/${{ github.head_ref }} + git push --force-with-lease origin HEAD:refs/heads/${{ github.ref }} diff --git a/.github/workflows/tests_and_checks.yml b/.github/workflows/tests_and_checks.yml.bak similarity index 100% rename from .github/workflows/tests_and_checks.yml rename to .github/workflows/tests_and_checks.yml.bak diff --git a/homestar-schemas/src/main.rs b/homestar-schemas/src/main.rs index 28219977..949de8bb 100644 --- a/homestar-schemas/src/main.rs +++ b/homestar-schemas/src/main.rs @@ -143,7 +143,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "metrics".to_string(), From 7d855849273479fe1a1e0ee0cc6ca7e1cf563482 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 14:19:58 -0800 Subject: [PATCH 59/75] chore: More schema action tinkering part three --- .github/workflows/schemas.yml | 44 +++++++++++++++++++++-------------- Cargo.lock | 2 +- homestar-schemas/Cargo.toml | 2 +- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 7e5dcbc9..1d5526a3 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -2,11 +2,10 @@ name: 📄 Schemas on: push: - # branches: [main, "**"] - branches: [main] + branches: [main, "**"] - pull_request: - branches: ["**"] + # pull_request: + # branches: ["**"] permissions: contents: write @@ -15,8 +14,6 @@ permissions: jobs: changes: runs-on: ubuntu-latest - permissions: - pull-requests: read outputs: rust: ${{ steps.filter.outputs.rust }} steps: @@ -27,20 +24,17 @@ jobs: with: filters: | rust: - - '**/Cargo.*' - - '**/src/**' - - '**/tests/**' - - '**/build.rs' - - '**/migrations/**' - - '**/fixtures/**' - - '**/wit/**' + - 'homestar-invocation/src/**' + - 'homestar-runtime/src/**' + - 'homestar-schemas/src/**' + - 'homestar-workflow/src/**' schemas: needs: changes if: ${{ needs.changes.outputs.rust == 'true' }} - # env: - # SCCACHE_GHA_ENABLED: "true" - # RUSTC_WRAPPER: "sccache" + env: + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" runs-on: ubuntu-latest steps: - name: Checkout @@ -53,6 +47,16 @@ jobs: - name: Install Rust Toolchain uses: dtolnay/rust-toolchain@stable + - name: Cache Project + uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + shared-key: test-all-stable-ubuntu-latest + save-if: ${{ github.event_name == 'push' }} + + - name: Sccache + uses: mozilla-actions/sccache-action@v0.0.3 + - name: Run generate schemas run: cargo run -p homestar-schemas @@ -75,13 +79,17 @@ jobs: - name: Check for modified files id: git-check - run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + run: | + echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + echo diff=$(git diff) >> $GITHUB_OUTPUT - name: Show check modified value env: CHANGED: ${{ steps.git-check.outputs.modified }} + DIFF: ${{ steps.git-check.outputs.diff }} run: | echo "$CHANGED" + echo "$DIFF" - name: Push changes # if: steps.changed-files.outputs.any_changed == 'true' @@ -91,4 +99,4 @@ jobs: git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git git commit -am "chore(schemas): update OpenRPC API doc and JSON schemas" - git push --force-with-lease origin HEAD:refs/heads/${{ github.ref }} + git push diff --git a/Cargo.lock b/Cargo.lock index 949a16bb..e0bd1e5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2639,7 +2639,7 @@ dependencies = [ [[package]] name = "homestar-schemas" -version = "0.1.1" +version = "0.1.0" dependencies = [ "homestar-invocation", "homestar-runtime", diff --git a/homestar-schemas/Cargo.toml b/homestar-schemas/Cargo.toml index bfa6a3d8..1f237496 100644 --- a/homestar-schemas/Cargo.toml +++ b/homestar-schemas/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "homestar-schemas" publish = false -version = { workspace = true } +version = "0.1.0" edition = { workspace = true } rust-version = { workspace = true } From 30854dbc839b0cd00f4700445b000b342ecd64b1 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 15:09:18 -0800 Subject: [PATCH 60/75] chore: More tinkering part four --- .github/workflows/schemas.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 1d5526a3..77511b5a 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -79,8 +79,10 @@ jobs: - name: Check for modified files id: git-check + shell: bash run: | - echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + # echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + echo modified=$(if [[ $(git diff) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT echo diff=$(git diff) >> $GITHUB_OUTPUT - name: Show check modified value From 54fc09535ffde4c5cabbed6ce02c8a8683773c42 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 15:31:25 -0800 Subject: [PATCH 61/75] chore: Clean up schema action --- .github/workflows/schemas.yml | 35 ++--------------------------------- 1 file changed, 2 insertions(+), 33 deletions(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 77511b5a..0baf9aad 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -4,9 +4,6 @@ on: push: branches: [main, "**"] - # pull_request: - # branches: ["**"] - permissions: contents: write pull-requests: write @@ -60,41 +57,13 @@ jobs: - name: Run generate schemas run: cargo run -p homestar-schemas - # - name: Check for changed files - # id: changed-files - # uses: tj-actions/changed-files@v42 - # with: - # # files: homestar-runtime/schemas/docs/** - # files: | - # **.json - # since_last_remote_commit: true - - # - name: List all changed files - # env: - # ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} - # run: | - # for file in ${ALL_CHANGED_FILES}; do - # echo "$file was changed" - # done - - - name: Check for modified files + - name: Check for modified schemas id: git-check shell: bash run: | - # echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT - echo modified=$(if [[ $(git diff) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT - echo diff=$(git diff) >> $GITHUB_OUTPUT - - - name: Show check modified value - env: - CHANGED: ${{ steps.git-check.outputs.modified }} - DIFF: ${{ steps.git-check.outputs.diff }} - run: | - echo "$CHANGED" - echo "$DIFF" + echo modified=$(if [[ $(git diff homestar-runtime/schemas/) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT - name: Push changes - # if: steps.changed-files.outputs.any_changed == 'true' if: steps.git-check.outputs.modified == 'true' run: | git config user.name "${GITHUB_ACTOR}" From 63e778c5c03b39fbfaaea1b37898f764a96855e4 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 15:53:34 -0800 Subject: [PATCH 62/75] chore: Update homestar-schemas version --- .github/workflows/schemas.yml | 3 +-- Cargo.lock | 2 +- homestar-runtime/schemas/api.json | 4 ++-- homestar-schemas/Cargo.toml | 6 ++---- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 0baf9aad..03bfc0a4 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -60,8 +60,7 @@ jobs: - name: Check for modified schemas id: git-check shell: bash - run: | - echo modified=$(if [[ $(git diff homestar-runtime/schemas/) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT + run: echo modified=$(if [[ $(git diff homestar-runtime/schemas/) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT - name: Push changes if: steps.git-check.outputs.modified == 'true' diff --git a/Cargo.lock b/Cargo.lock index e0bd1e5a..949a16bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2639,7 +2639,7 @@ dependencies = [ [[package]] name = "homestar-schemas" -version = "0.1.0" +version = "0.1.1" dependencies = [ "homestar-invocation", "homestar-runtime", diff --git a/homestar-runtime/schemas/api.json b/homestar-runtime/schemas/api.json index cc831a76..0ec41db3 100644 --- a/homestar-runtime/schemas/api.json +++ b/homestar-runtime/schemas/api.json @@ -36,7 +36,7 @@ }, { "name": "health", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "health", @@ -62,7 +62,7 @@ }, { "name": "metrics", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "metrics", diff --git a/homestar-schemas/Cargo.toml b/homestar-schemas/Cargo.toml index 1f237496..a68a04f9 100644 --- a/homestar-schemas/Cargo.toml +++ b/homestar-schemas/Cargo.toml @@ -1,15 +1,13 @@ [package] name = "homestar-schemas" publish = false -version = "0.1.0" +version = { workspace = true } edition = { workspace = true } rust-version = { workspace = true } [dependencies] homestar-invocation = { version = "0.1", path = "../homestar-invocation", default-features = false } -homestar-runtime = { version = "0.1", path = "../homestar-runtime", default-features = false, features = [ - "websocket-notify", -] } +homestar-runtime = { version = "0.1", path = "../homestar-runtime" } homestar-workflow = { version = "0.1", path = "../homestar-workflow", default-features = false } homestar-workspace-hack = { workspace = true } schemars = { workspace = true } From 05264632946f1371584e57351199959c61394600 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 15:56:52 -0800 Subject: [PATCH 63/75] chore: Update dependabot action modified file check --- .github/workflows/{audit.yml.bak => audit.yml} | 0 .github/workflows/{builds.yml.bak => builds.yml} | 0 .github/workflows/{coverage.yml.bak => coverage.yml} | 0 .../workflows/{dependabot_pr.yaml.bak => dependabot_pr.yaml} | 2 +- .github/workflows/{docker.yml.bak => docker.yml} | 0 .github/workflows/{nix.yml.bak => nix.yml} | 0 .github/workflows/{release.yml.bak => release.yml} | 0 .../{tests_and_checks.yml.bak => tests_and_checks.yml} | 0 8 files changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{audit.yml.bak => audit.yml} (100%) rename .github/workflows/{builds.yml.bak => builds.yml} (100%) rename .github/workflows/{coverage.yml.bak => coverage.yml} (100%) rename .github/workflows/{dependabot_pr.yaml.bak => dependabot_pr.yaml} (91%) rename .github/workflows/{docker.yml.bak => docker.yml} (100%) rename .github/workflows/{nix.yml.bak => nix.yml} (100%) rename .github/workflows/{release.yml.bak => release.yml} (100%) rename .github/workflows/{tests_and_checks.yml.bak => tests_and_checks.yml} (100%) diff --git a/.github/workflows/audit.yml.bak b/.github/workflows/audit.yml similarity index 100% rename from .github/workflows/audit.yml.bak rename to .github/workflows/audit.yml diff --git a/.github/workflows/builds.yml.bak b/.github/workflows/builds.yml similarity index 100% rename from .github/workflows/builds.yml.bak rename to .github/workflows/builds.yml diff --git a/.github/workflows/coverage.yml.bak b/.github/workflows/coverage.yml similarity index 100% rename from .github/workflows/coverage.yml.bak rename to .github/workflows/coverage.yml diff --git a/.github/workflows/dependabot_pr.yaml.bak b/.github/workflows/dependabot_pr.yaml similarity index 91% rename from .github/workflows/dependabot_pr.yaml.bak rename to .github/workflows/dependabot_pr.yaml index 3195b668..8ef9cfb1 100644 --- a/.github/workflows/dependabot_pr.yaml.bak +++ b/.github/workflows/dependabot_pr.yaml @@ -35,7 +35,7 @@ jobs: - name: Check for modified files id: git-check - run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + run: echo modified=$(if [[ $(git diff) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT - name: Push changes if: steps.git-check.outputs.modified == 'true' diff --git a/.github/workflows/docker.yml.bak b/.github/workflows/docker.yml similarity index 100% rename from .github/workflows/docker.yml.bak rename to .github/workflows/docker.yml diff --git a/.github/workflows/nix.yml.bak b/.github/workflows/nix.yml similarity index 100% rename from .github/workflows/nix.yml.bak rename to .github/workflows/nix.yml diff --git a/.github/workflows/release.yml.bak b/.github/workflows/release.yml similarity index 100% rename from .github/workflows/release.yml.bak rename to .github/workflows/release.yml diff --git a/.github/workflows/tests_and_checks.yml.bak b/.github/workflows/tests_and_checks.yml similarity index 100% rename from .github/workflows/tests_and_checks.yml.bak rename to .github/workflows/tests_and_checks.yml From ac554e6e3e12473d9ba57b26e0be68fba32fa0dc Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 15:58:04 -0800 Subject: [PATCH 64/75] chore: Only run schemas action on push to main --- .github/workflows/schemas.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 03bfc0a4..7a0ade5f 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -2,7 +2,8 @@ name: 📄 Schemas on: push: - branches: [main, "**"] + branches: [main] + # branches: [main, "**"] permissions: contents: write From 8fe6b452c112ecf4a5bb19855cd11414f6341d94 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 16:04:17 -0800 Subject: [PATCH 65/75] chore: Align OpenRPC API version with workspace --- homestar-runtime/schemas/api.json | 2 +- homestar-schemas/src/main.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/homestar-runtime/schemas/api.json b/homestar-runtime/schemas/api.json index 0ec41db3..677eb32d 100644 --- a/homestar-runtime/schemas/api.json +++ b/homestar-runtime/schemas/api.json @@ -3,7 +3,7 @@ "info": { "title": "homestar", "description": "", - "version": "0.1.0", + "version": "0.1.1", "contact": { "name": null, "email": null, diff --git a/homestar-schemas/src/main.rs b/homestar-schemas/src/main.rs index 949de8bb..c3e3e2a4 100644 --- a/homestar-schemas/src/main.rs +++ b/homestar-schemas/src/main.rs @@ -310,7 +310,9 @@ fn generate_api_doc( title: "homestar".to_string(), description: Some(env!("CARGO_PKG_DESCRIPTION").into()), terms_of_service: None, - version: "0.1.0".to_string(), + // Version is tied to workspace, but use homestar-runtime version + // in the future. + version: env!("CARGO_PKG_VERSION").into(), contact: Some(ContactObject { name: None, url: Some(env!("CARGO_PKG_REPOSITORY").into()), From 42f6379431c76389ef141f5f1c56be56f1fb6f5c Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 16:08:54 -0800 Subject: [PATCH 66/75] chore: Update param structure values --- homestar-runtime/schemas/api.json | 8 ++++---- homestar-schemas/src/main.rs | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/homestar-runtime/schemas/api.json b/homestar-runtime/schemas/api.json index 677eb32d..01b88381 100644 --- a/homestar-runtime/schemas/api.json +++ b/homestar-runtime/schemas/api.json @@ -160,7 +160,7 @@ }, { "name": "node", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "node_info", @@ -238,7 +238,7 @@ }, { "name": "subscribe_network_events", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "subscription_id", @@ -1171,7 +1171,7 @@ }, { "name": "unsubscribe_network_events", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "unsubscribe result", @@ -1665,7 +1665,7 @@ }, { "name": "unsubscribe_run_workflow", - "paramStructure": "by-name", + "paramStructure": "either", "params": [], "result": { "name": "unsubscribe result", diff --git a/homestar-schemas/src/main.rs b/homestar-schemas/src/main.rs index c3e3e2a4..fcc30ac5 100644 --- a/homestar-schemas/src/main.rs +++ b/homestar-schemas/src/main.rs @@ -167,7 +167,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "node_info".to_string(), @@ -191,7 +191,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "subscription_id".to_string(), @@ -222,7 +222,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "unsubscribe result".to_string(), @@ -286,7 +286,7 @@ fn generate_api_doc( summary: None, servers: None, tags: None, - param_structure: Some(MethodObjectParamStructure::ByName), + param_structure: Some(MethodObjectParamStructure::Either), params: vec![], result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { name: "unsubscribe result".to_string(), From 9326cc5f9f54d41b49f43a7ca3cb68f61cc28c3b Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Thu, 8 Feb 2024 16:17:35 -0800 Subject: [PATCH 67/75] chore: Remove unused Ipld from schema stubs --- homestar-invocation/src/ipld/schema.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/homestar-invocation/src/ipld/schema.rs b/homestar-invocation/src/ipld/schema.rs index 103b4af9..495ae66b 100644 --- a/homestar-invocation/src/ipld/schema.rs +++ b/homestar-invocation/src/ipld/schema.rs @@ -1,6 +1,5 @@ //! JSON Schema generation for DAG-JSON encoded Ipld. -use libipld::Ipld; use schemars::{ gen::SchemaGenerator, schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, @@ -11,7 +10,7 @@ use std::{borrow::Cow, collections::BTreeMap}; /// Ipld stub for JSON Schema generation #[derive(Debug)] #[doc(hidden)] -pub struct IpldStub(Ipld); +pub struct IpldStub(); // The Ipld stub exists solely to implement a JSON Schema // represenation of Ipld. Should libipld provide an implementation @@ -67,7 +66,7 @@ impl JsonSchema for IpldStub { /// Ipld link stub for JSON Schema generation #[derive(Debug)] #[doc(hidden)] -pub struct IpldLinkStub(Ipld); +pub struct IpldLinkStub(); impl JsonSchema for IpldLinkStub { fn schema_name() -> String { @@ -100,7 +99,7 @@ impl JsonSchema for IpldLinkStub { /// Ipld bytes stub for JSON Schema generation #[derive(Debug)] #[doc(hidden)] -pub struct IpldBytesStub(Ipld); +pub struct IpldBytesStub(); impl JsonSchema for IpldBytesStub { fn schema_name() -> String { From 1b608ba8f0c562b7c45ec4fd780c44bc0711cac9 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Thu, 8 Feb 2024 19:40:39 -0500 Subject: [PATCH 68/75] chore: docker + feature flag apply correction --- .dockerignore | 2 ++ docker/Dockerfile | 6 +++++- homestar-runtime/src/event_handler/event.rs | 24 +++++++++++++++------ homestar-schemas/Cargo.toml | 10 ++++++++- 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/.dockerignore b/.dockerignore index c15b9e4d..515c86b0 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,7 +10,9 @@ !**/migrations !diesel.toml !**/wit +!**/schemas/api.json examples homestar-functions homestar-workspace-hack +homestar-schemas diff --git a/docker/Dockerfile b/docker/Dockerfile index f7c307d8..fcb83e4c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -31,9 +31,11 @@ RUN cargo init --lib homestar-invocation && \ RUN echo "fn main() {}" > ./homestar-runtime/src/main.rs -RUN mkdir -p ./homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/migrations ./examples ./homestar-functions +RUN mkdir -p ./homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/migrations \ + ./examples ./homestar-functions ./homestar-schemas RUN bash -c 'pushd ./examples && cargo init dummy-app-examples && popd' RUN bash -c 'pushd ./homestar-functions && cargo init dummy-app-fns && popd' +RUN cargo init homestar-schemas RUN cargo init --lib homestar-workspace-hack # copy cargo.* @@ -43,6 +45,7 @@ COPY ../homestar-workflow/Cargo.toml ./homestar-workflow/ COPY ../homestar-wasm/Cargo.toml ./homestar-wasm/ COPY ../homestar-runtime/Cargo.toml ./homestar-runtime/ COPY ../homestar-runtime/migrations ./homestar-runtime/migrations +COPY ../homestar-runtime/schemas/api.json ./homestar-runtime/schemas/api.json COPY ../homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/src/test_utils/proc_macro ENTRYPOINT ["/bin/bash"] @@ -91,6 +94,7 @@ WORKDIR /home/runner COPY --chown=homestar:homestar diesel.toml ./ COPY --chown=homestar:homestar ../homestar-runtime/migrations ./migrations +COPY --chown=homestar:homestar ../homestar-runtime/schemas ./schemas COPY --chown=homestar:homestar --from=builder /usr/local/bin/homestar-runtime ./homestar COPY --chown=homestar:homestar --from=builder /usr/local/bin/diesel /usr/local/bin/diesel COPY --chown=homestar:homestar --from=builder /etc/*.db ./ diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index 37af36e8..1094c175 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -708,16 +708,28 @@ where { #[cfg(not(feature = "ipfs"))] async fn handle_event(self, event_handler: &mut EventHandler) { - if let Err(err) = self.handle_info(event_handler).await { - error!(subject = "handle.err", - category = "handle_event", - error=?err, - "error storing event") + match self { + #[cfg(feature = "websocket-notify")] + Event::ReplayReceipts(replay) => { + if let Err(err) = replay.notify(event_handler) { + error!(subject = "replay.err", + category = "handle_event", + error=?err, + "error replaying and notifying receipts") + } + } + event => { + if let Err(err) = event.handle_info(event_handler).await { + error!(subject = "event.err", + category = "handle_event", + error=?err, + "error storing event") + } + } } } #[cfg(feature = "ipfs")] - #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] #[allow(unused_variables)] async fn handle_event(self, event_handler: &mut EventHandler, ipfs: IpfsCli) { match self { diff --git a/homestar-schemas/Cargo.toml b/homestar-schemas/Cargo.toml index a68a04f9..47cfccdd 100644 --- a/homestar-schemas/Cargo.toml +++ b/homestar-schemas/Cargo.toml @@ -7,7 +7,9 @@ rust-version = { workspace = true } [dependencies] homestar-invocation = { version = "0.1", path = "../homestar-invocation", default-features = false } -homestar-runtime = { version = "0.1", path = "../homestar-runtime" } +homestar-runtime = { version = "0.1", path = "../homestar-runtime", default-features = false, features = [ + "websocket-notify", +] } homestar-workflow = { version = "0.1", path = "../homestar-workflow", default-features = false } homestar-workspace-hack = { workspace = true } schemars = { workspace = true } @@ -23,3 +25,9 @@ test = false [features] default = [] + +[package.metadata.cargo-machete] +ignored = ["homestar-workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["homestar-workspace-hack"] From bacf7a273c07eea36a104a27930b6b71cf5352e8 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 9 Feb 2024 09:29:48 -0800 Subject: [PATCH 69/75] chore: Make network notification imports explicit --- homestar-runtime/src/event_handler/notification.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index a5631d75..c03df12e 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -14,7 +14,15 @@ use tracing::{debug, warn}; pub(crate) mod network; pub(crate) mod receipt; -pub(crate) use network::*; +pub(crate) use network::{ + ConnectionClosed, ConnectionEstablished, DiscoverServedRendezvous, DiscoveredMdns, + DiscoveredRendezvous, GotReceiptDht, GotWorkflowInfoDht, IncomingConnectionError, + NetworkNotification, NewListenAddr, OutgoingConnectionError, PeerRegisteredRendezvous, + PublishedReceiptPubsub, PutReceiptDht, PutWorkflowInfoDht, ReceiptQuorumFailureDht, + ReceiptQuorumSuccessDht, ReceivedReceiptPubsub, ReceivedWorkflowInfo, RegisteredRendezvous, + SentWorkflowInfo, WorkflowInfoQuorumFailureDht, WorkflowInfoQuorumSuccessDht, + WorkflowInfoSource, +}; pub(crate) use receipt::ReceiptNotification; /// Send receipt notification as bytes. From 5de960f3c67aedcbe5fd367e5ffdf4fcee4b68e5 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Fri, 9 Feb 2024 09:50:57 -0800 Subject: [PATCH 70/75] chore: Update schema generation action checout ref --- .github/workflows/schemas.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml index 7a0ade5f..266c812c 100644 --- a/.github/workflows/schemas.yml +++ b/.github/workflows/schemas.yml @@ -40,7 +40,6 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.HOMESTAR_UPDATE_TOKEN }} - ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust Toolchain uses: dtolnay/rust-toolchain@stable From 22026356da9ea9e095e39f87aa7e8a364f2acb13 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 12 Feb 2024 11:45:13 -0800 Subject: [PATCH 71/75] chore: Break network notifications into modules --- Cargo.lock | 12 + homestar-runtime/Cargo.toml | 1 + .../src/event_handler/notification/network.rs | 2123 ++--------------- .../notification/network/connection.rs | 325 +++ .../event_handler/notification/network/dht.rs | 788 ++++++ .../notification/network/mdns.rs | 76 + .../notification/network/pubsub.rs | 160 ++ .../notification/network/rendezvous.rs | 300 +++ .../notification/network/req_resp.rs | 304 +++ 9 files changed, 2114 insertions(+), 1975 deletions(-) create mode 100644 homestar-runtime/src/event_handler/notification/network/connection.rs create mode 100644 homestar-runtime/src/event_handler/notification/network/dht.rs create mode 100644 homestar-runtime/src/event_handler/notification/network/mdns.rs create mode 100644 homestar-runtime/src/event_handler/notification/network/pubsub.rs create mode 100644 homestar-runtime/src/event_handler/notification/network/rendezvous.rs create mode 100644 homestar-runtime/src/event_handler/notification/network/req_resp.rs diff --git a/Cargo.lock b/Cargo.lock index 949a16bb..baafb5f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1545,6 +1545,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derive-getters" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2c35ab6e03642397cdda1dd58abbc05d418aef8e36297f336d5aba060fe8df" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "diesel" version = "2.1.4" @@ -2544,6 +2555,7 @@ dependencies = [ "daemonize", "dagga", "dashmap", + "derive-getters", "diesel", "diesel_migrations", "dotenvy", diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 7931ad21..41cba10f 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -53,6 +53,7 @@ const_format = "0.2" crossbeam = "0.8" dagga = "0.2" dashmap = "5.5" +derive-getters = "0.3.0" diesel = { version = "2.1", default-features = false, features = [ "sqlite", "r2d2", diff --git a/homestar-runtime/src/event_handler/notification/network.rs b/homestar-runtime/src/event_handler/notification/network.rs index 68ea8668..7281ed09 100644 --- a/homestar-runtime/src/event_handler/notification/network.rs +++ b/homestar-runtime/src/event_handler/notification/network.rs @@ -1,39 +1,34 @@ -// Notification types for [swarm] events. -// -// [swarm]: libp2p_swarm::Swarm +//! Notification types for [swarm] events. +//! +//! [swarm]: libp2p::swarm::Swarm use anyhow::anyhow; -use chrono::prelude::Utc; -use faststr::FastStr; + use homestar_invocation::ipld::DagJson; -use libipld::{serde::from_ipld, Cid, Ipld}; -use libp2p::{ - swarm::{DialError, ListenError}, - Multiaddr, PeerId, -}; +use libipld::{serde::from_ipld, Ipld}; use schemars::JsonSchema; use std::{collections::BTreeMap, fmt}; -const ADDRESS_KEY: &str = "address"; -const ADDRESSES_KEY: &str = "addresses"; -const CID_KEY: &str = "cid"; -const CONNECTED_PEER_COUNT_KEY: &str = "connected_peer_count"; -const ENQUIRER_KEY: &str = "enquirer"; -const ERROR_KEY: &str = "error"; -const NAME_KEY: &str = "name"; -const NUM_TASKS_KEY: &str = "num_tasks"; -const PEER_KEY: &str = "peer_id"; -const PEERS_KEY: &str = "peers"; -const PROGRESS_KEY: &str = "progress"; -const PROGRESS_COUNT_KEY: &str = "progress_count"; -const PROVIDER_KEY: &str = "provider"; -const PUBLISHER_KEY: &str = "publisher"; -const REQUESTOR_KEY: &str = "requestor"; -const QUORUM_KEY: &str = "quorum"; -const RAN_KEY: &str = "ran"; -const SERVER_KEY: &str = "server"; -const STORED_TO_PEERS_KEY: &str = "stored_to_peers"; -const TIMESTAMP_KEY: &str = "timestamp"; +pub(crate) mod connection; +pub(crate) mod dht; +pub(crate) mod mdns; +pub(crate) mod pubsub; +pub(crate) mod rendezvous; +pub(crate) mod req_resp; +pub(crate) use connection::{ + ConnectionClosed, ConnectionEstablished, IncomingConnectionError, NewListenAddr, + OutgoingConnectionError, +}; +pub(crate) use dht::{ + GotReceiptDht, GotWorkflowInfoDht, PutReceiptDht, PutWorkflowInfoDht, ReceiptQuorumFailureDht, + ReceiptQuorumSuccessDht, WorkflowInfoQuorumFailureDht, WorkflowInfoQuorumSuccessDht, +}; +pub(crate) use mdns::DiscoveredMdns; +pub(crate) use pubsub::{PublishedReceiptPubsub, ReceivedReceiptPubsub}; +pub(crate) use rendezvous::{ + DiscoverServedRendezvous, DiscoveredRendezvous, PeerRegisteredRendezvous, RegisteredRendezvous, +}; +pub(crate) use req_resp::{ReceivedWorkflowInfo, SentWorkflowInfo}; /// Network notification type. #[derive(Debug, Clone, JsonSchema)] @@ -335,1845 +330,19 @@ impl TryFrom for NetworkNotification { } } -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "new_listen_addr")] -pub struct NewListenAddr { - timestamp: i64, - peer_id: String, - address: String, -} - -impl NewListenAddr { - pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> NewListenAddr { - NewListenAddr { - timestamp: Utc::now().timestamp_millis(), - peer_id: peer_id.to_string(), - address: address.to_string(), - } - } -} - -impl DagJson for NewListenAddr {} - -impl From for Ipld { - fn from(notification: NewListenAddr) -> Self { - Ipld::Map(BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PEER_KEY.into(), notification.peer_id.into()), - (ADDRESS_KEY.into(), notification.address.into()), - ])) - } -} - -impl TryFrom for NewListenAddr { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peer_id = from_ipld( - map.get(PEER_KEY) - .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? - .to_owned(), - )?; - - let address = from_ipld( - map.get(ADDRESS_KEY) - .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? - .to_owned(), - )?; - - Ok(NewListenAddr { - timestamp, - peer_id, - address, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "connection_established")] -pub struct ConnectionEstablished { - timestamp: i64, - peer_id: String, - address: String, -} - -impl ConnectionEstablished { - pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionEstablished { - ConnectionEstablished { - timestamp: Utc::now().timestamp_millis(), - peer_id: peer_id.to_string(), - address: address.to_string(), - } - } -} - -impl DagJson for ConnectionEstablished {} - -impl From for Ipld { - fn from(notification: ConnectionEstablished) -> Self { - Ipld::Map(BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PEER_KEY.into(), notification.peer_id.into()), - (ADDRESS_KEY.into(), notification.address.into()), - ])) - } -} - -impl TryFrom for ConnectionEstablished { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peer_id = from_ipld( - map.get(PEER_KEY) - .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? - .to_owned(), - )?; - - let address = from_ipld( - map.get(ADDRESS_KEY) - .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? - .to_owned(), - )?; - - Ok(ConnectionEstablished { - timestamp, - peer_id, - address, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "connection_closed")] -pub struct ConnectionClosed { - timestamp: i64, - peer_id: String, - address: String, -} - -impl ConnectionClosed { - pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionClosed { - ConnectionClosed { - timestamp: Utc::now().timestamp_millis(), - peer_id: peer_id.to_string(), - address: address.to_string(), - } - } -} - -impl DagJson for ConnectionClosed {} - -impl From for Ipld { - fn from(notification: ConnectionClosed) -> Self { - Ipld::Map(BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PEER_KEY.into(), notification.peer_id.into()), - (ADDRESS_KEY.into(), notification.address.into()), - ])) - } -} - -impl TryFrom for ConnectionClosed { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peer_id = from_ipld( - map.get(PEER_KEY) - .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? - .to_owned(), - )?; - - let address = from_ipld( - map.get(ADDRESS_KEY) - .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? - .to_owned(), - )?; - - Ok(ConnectionClosed { - timestamp, - peer_id, - address, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "outgoing_connection_error")] -pub struct OutgoingConnectionError { - timestamp: i64, - peer_id: Option, - error: String, -} - -impl OutgoingConnectionError { - pub(crate) fn new(peer_id: Option, error: DialError) -> OutgoingConnectionError { - OutgoingConnectionError { - timestamp: Utc::now().timestamp_millis(), - peer_id: peer_id.map(|p| p.to_string()), - error: error.to_string(), - } - } -} - -impl DagJson for OutgoingConnectionError {} - -impl From for Ipld { - fn from(notification: OutgoingConnectionError) -> Self { - Ipld::Map(BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - ( - PEER_KEY.into(), - notification - .peer_id - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (ERROR_KEY.into(), notification.error.into()), - ])) - } -} - -impl TryFrom for OutgoingConnectionError { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peer_id = map - .get(PEER_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let error = from_ipld( - map.get(ERROR_KEY) - .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? - .to_owned(), - )?; - - Ok(OutgoingConnectionError { - timestamp, - peer_id, - error, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "incoming_connection_error")] -pub struct IncomingConnectionError { - timestamp: i64, - error: String, -} - -impl IncomingConnectionError { - pub(crate) fn new(error: ListenError) -> IncomingConnectionError { - IncomingConnectionError { - timestamp: Utc::now().timestamp_millis(), - error: error.to_string(), - } - } -} - -impl DagJson for IncomingConnectionError {} - -impl From for Ipld { - fn from(notification: IncomingConnectionError) -> Self { - Ipld::Map(BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (ERROR_KEY.into(), notification.error.into()), - ])) - } -} - -impl TryFrom for IncomingConnectionError { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let error = from_ipld( - map.get(ERROR_KEY) - .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? - .to_owned(), - )?; - - Ok(IncomingConnectionError { timestamp, error }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "discovered_mdns")] -pub struct DiscoveredMdns { - timestamp: i64, - #[schemars(description = "Peers discovered by peer ID and multiaddress")] - peers: BTreeMap, -} - -impl DiscoveredMdns { - pub(crate) fn new(peers: BTreeMap) -> DiscoveredMdns { - DiscoveredMdns { - timestamp: Utc::now().timestamp_millis(), - peers: peers - .iter() - .map(|(peer_id, address)| (peer_id.to_string(), address.to_string())) - .collect(), - } - } -} - -impl DagJson for DiscoveredMdns {} - -impl From for Ipld { - fn from(notification: DiscoveredMdns) -> Self { - let peers: BTreeMap = notification - .peers - .into_iter() - .map(|(peer_id, address)| (peer_id, address.into())) - .collect(); - - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PEERS_KEY.into(), peers.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for DiscoveredMdns { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peers = from_ipld::>( - map.get(PEERS_KEY) - .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? - .to_owned(), - )?; - - Ok(DiscoveredMdns { timestamp, peers }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "discovered_rendezvous")] -pub struct DiscoveredRendezvous { - timestamp: i64, - #[schemars(description = "Server that fulfilled the discovery request")] - server: String, - #[schemars(description = "Peers discovered by peer ID and multiaddresses")] - peers: BTreeMap>, -} - -impl DiscoveredRendezvous { - pub(crate) fn new( - server: PeerId, - peers: BTreeMap>, - ) -> DiscoveredRendezvous { - DiscoveredRendezvous { - timestamp: Utc::now().timestamp_millis(), - server: server.to_string(), - peers: peers - .iter() - .map(|(peer_id, addresses)| { - ( - peer_id.to_string(), - addresses - .iter() - .map(|address| address.to_string()) - .collect(), - ) - }) - .collect(), - } - } -} - -impl DagJson for DiscoveredRendezvous {} - -impl From for Ipld { - fn from(notification: DiscoveredRendezvous) -> Self { - let peers: BTreeMap = notification - .peers - .into_iter() - .map(|(peer_id, addresses)| { - ( - peer_id, - Ipld::List( - addresses - .iter() - .map(|address| Ipld::String(address.to_owned())) - .collect(), - ), - ) - }) - .collect(); - - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (SERVER_KEY.into(), notification.server.into()), - (PEERS_KEY.into(), peers.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for DiscoveredRendezvous { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let server = from_ipld( - map.get(SERVER_KEY) - .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? - .to_owned(), - )?; - - let peers = from_ipld::>>( - map.get(PEERS_KEY) - .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? - .to_owned(), - )?; - - Ok(DiscoveredRendezvous { - timestamp, - server, - peers, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "registered_rendezvous")] -pub struct RegisteredRendezvous { - timestamp: i64, - #[schemars(description = "Server that accepted registration")] - server: String, -} - -impl RegisteredRendezvous { - pub(crate) fn new(server: PeerId) -> RegisteredRendezvous { - RegisteredRendezvous { - timestamp: Utc::now().timestamp_millis(), - server: server.to_string(), - } - } -} - -impl DagJson for RegisteredRendezvous {} - -impl From for Ipld { - fn from(notification: RegisteredRendezvous) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (SERVER_KEY.into(), notification.server.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for RegisteredRendezvous { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let server = from_ipld( - map.get(SERVER_KEY) - .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? - .to_owned(), - )?; - - Ok(RegisteredRendezvous { timestamp, server }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "registered_rendezvous")] -pub struct DiscoverServedRendezvous { - timestamp: i64, - #[schemars(description = "Peer that requested discovery")] - enquirer: String, -} - -impl DiscoverServedRendezvous { - pub(crate) fn new(enquirer: PeerId) -> DiscoverServedRendezvous { - DiscoverServedRendezvous { - timestamp: Utc::now().timestamp_millis(), - enquirer: enquirer.to_string(), - } - } -} - -impl DagJson for DiscoverServedRendezvous {} - -impl From for Ipld { - fn from(notification: DiscoverServedRendezvous) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (ENQUIRER_KEY.into(), notification.enquirer.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for DiscoverServedRendezvous { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let enquirer = from_ipld( - map.get(ENQUIRER_KEY) - .ok_or_else(|| anyhow!("missing {ENQUIRER_KEY}"))? - .to_owned(), - )?; - - Ok(DiscoverServedRendezvous { - timestamp, - enquirer, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "peer_registered_rendezvous")] -pub struct PeerRegisteredRendezvous { - timestamp: i64, - #[schemars(description = "Peer registered")] - peer_id: String, - #[schemars(description = "Multiaddresses for peer")] - addresses: Vec, -} - -impl PeerRegisteredRendezvous { - pub(crate) fn new(peer_id: PeerId, addresses: Vec) -> PeerRegisteredRendezvous { - PeerRegisteredRendezvous { - timestamp: Utc::now().timestamp_millis(), - peer_id: peer_id.to_string(), - addresses: addresses - .iter() - .map(|address| address.to_string()) - .collect(), - } - } -} - -impl DagJson for PeerRegisteredRendezvous {} - -impl From for Ipld { - fn from(notification: PeerRegisteredRendezvous) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PEER_KEY.into(), notification.peer_id.into()), - ( - ADDRESSES_KEY.into(), - Ipld::List( - notification - .addresses - .iter() - .map(|address| Ipld::String(address.to_owned())) - .collect(), - ), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for PeerRegisteredRendezvous { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let peer_id = from_ipld( - map.get(PEER_KEY) - .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? - .to_owned(), - )?; - - let addresses = from_ipld( - map.get(ADDRESSES_KEY) - .ok_or_else(|| anyhow!("missing {ADDRESSES_KEY}"))? - .to_owned(), - )?; +#[cfg(test)] +mod test { + use super::*; - Ok(PeerRegisteredRendezvous { - timestamp, - peer_id, - addresses, - }) - } -} + use connection::NewListenAddr; + use faststr::FastStr; + use homestar_invocation::test_utils::cid::generate_cid; + use libipld::Cid; + use libp2p::{ + swarm::{DialError, ListenError}, + Multiaddr, PeerId, + }; -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "published_receipt_pubsub")] -pub struct PublishedReceiptPubsub { - timestamp: i64, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Ran receipt CID")] - ran: String, -} - -impl PublishedReceiptPubsub { - pub(crate) fn new(cid: Cid, ran: String) -> PublishedReceiptPubsub { - PublishedReceiptPubsub { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - ran, - } - } -} - -impl DagJson for PublishedReceiptPubsub {} - -impl From for Ipld { - fn from(notification: PublishedReceiptPubsub) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (RAN_KEY.into(), notification.ran.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for PublishedReceiptPubsub { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let ran = from_ipld( - map.get(RAN_KEY) - .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? - .to_owned(), - )?; - - Ok(PublishedReceiptPubsub { - timestamp, - cid, - ran, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "received_receipt_pubsub")] -pub struct ReceivedReceiptPubsub { - timestamp: i64, - #[schemars(description = "Receipt publisher peer ID")] - publisher: String, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Ran receipt CID")] - ran: String, -} - -impl ReceivedReceiptPubsub { - pub(crate) fn new(publisher: PeerId, cid: Cid, ran: String) -> ReceivedReceiptPubsub { - ReceivedReceiptPubsub { - timestamp: Utc::now().timestamp_millis(), - publisher: publisher.to_string(), - cid: cid.to_string(), - ran, - } - } -} - -impl DagJson for ReceivedReceiptPubsub {} - -impl From for Ipld { - fn from(notification: ReceivedReceiptPubsub) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (PUBLISHER_KEY.into(), notification.publisher.into()), - (CID_KEY.into(), notification.cid.into()), - (RAN_KEY.into(), notification.ran.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for ReceivedReceiptPubsub { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let publisher = from_ipld( - map.get(PUBLISHER_KEY) - .ok_or_else(|| anyhow!("missing {PUBLISHER_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let ran = from_ipld( - map.get(RAN_KEY) - .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? - .to_owned(), - )?; - - Ok(ReceivedReceiptPubsub { - timestamp, - publisher, - cid, - ran, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "put_receipt_dht")] -pub struct PutReceiptDht { - timestamp: i64, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Ran receipt CID")] - ran: String, -} - -impl PutReceiptDht { - pub(crate) fn new(cid: Cid, ran: String) -> PutReceiptDht { - PutReceiptDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - ran, - } - } -} - -impl DagJson for PutReceiptDht {} - -impl From for Ipld { - fn from(notification: PutReceiptDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (RAN_KEY.into(), notification.ran.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for PutReceiptDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let ran = from_ipld( - map.get(RAN_KEY) - .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? - .to_owned(), - )?; - - Ok(PutReceiptDht { - timestamp, - cid, - ran, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "got_receipt_dht")] -pub struct GotReceiptDht { - timestamp: i64, - #[schemars(description = "Receipt publisher peer ID")] - publisher: Option, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Ran receipt CID")] - ran: String, -} - -impl GotReceiptDht { - pub(crate) fn new(publisher: Option, cid: Cid, ran: String) -> GotReceiptDht { - GotReceiptDht { - timestamp: Utc::now().timestamp_millis(), - publisher: publisher.map(|p| p.to_string()), - cid: cid.to_string(), - ran, - } - } -} - -impl DagJson for GotReceiptDht {} - -impl From for Ipld { - fn from(notification: GotReceiptDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - ( - PUBLISHER_KEY.into(), - notification - .publisher - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (CID_KEY.into(), notification.cid.into()), - (RAN_KEY.into(), notification.ran.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for GotReceiptDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let publisher = map - .get(PUBLISHER_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let ran = from_ipld( - map.get(RAN_KEY) - .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? - .to_owned(), - )?; - - Ok(GotReceiptDht { - timestamp, - publisher, - cid, - ran, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "put_workflow_info_dht")] -pub struct PutWorkflowInfoDht { - timestamp: i64, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Optional workflow name")] - name: Option, - #[schemars(description = "Number of tasks in workflow")] - num_tasks: u32, - #[schemars(description = "Completed task CIDs")] - progress: Vec, - #[schemars(description = "Number of workflow tasks completed")] - progress_count: u32, -} - -impl PutWorkflowInfoDht { - pub(crate) fn new( - cid: Cid, - name: Option, - num_tasks: u32, - progress: Vec, - progress_count: u32, - ) -> PutWorkflowInfoDht { - PutWorkflowInfoDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - name: name.map(|n| n.into()), - num_tasks, - progress: progress.iter().map(|cid| cid.to_string()).collect(), - progress_count, - } - } -} - -impl DagJson for PutWorkflowInfoDht {} - -impl From for Ipld { - fn from(notification: PutWorkflowInfoDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - ( - NAME_KEY.into(), - notification - .name - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (NUM_TASKS_KEY.into(), notification.num_tasks.into()), - ( - PROGRESS_KEY.into(), - Ipld::List( - notification - .progress - .iter() - .map(|cid| Ipld::String(cid.to_string())) - .collect(), - ), - ), - ( - PROGRESS_COUNT_KEY.into(), - notification.progress_count.into(), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for PutWorkflowInfoDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let name = map - .get(NAME_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let num_tasks = from_ipld( - map.get(NUM_TASKS_KEY) - .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? - .to_owned(), - )?; - - let progress = from_ipld::>( - map.get(PROGRESS_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? - .to_owned(), - )?; - - let progress_count = from_ipld( - map.get(PROGRESS_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? - .to_owned(), - )?; - - Ok(PutWorkflowInfoDht { - timestamp, - cid, - name, - num_tasks, - progress, - progress_count, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "got_workflow_info_dht")] -pub struct GotWorkflowInfoDht { - timestamp: i64, - #[schemars(description = "Workflow info publisher peer ID")] - publisher: Option, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Optional workflow name")] - name: Option, - #[schemars(description = "Number of tasks in workflow")] - num_tasks: u32, - #[schemars(description = "Completed task CIDs")] - progress: Vec, - #[schemars(description = "Number of workflow tasks completed")] - progress_count: u32, -} - -impl GotWorkflowInfoDht { - pub(crate) fn new( - publisher: Option, - cid: Cid, - name: Option, - num_tasks: u32, - progress: Vec, - progress_count: u32, - ) -> GotWorkflowInfoDht { - GotWorkflowInfoDht { - timestamp: Utc::now().timestamp_millis(), - publisher: publisher.map(|p| p.to_string()), - cid: cid.to_string(), - name: name.map(|n| n.into()), - num_tasks, - progress: progress.iter().map(|cid| cid.to_string()).collect(), - progress_count, - } - } -} - -impl DagJson for GotWorkflowInfoDht {} - -impl From for Ipld { - fn from(notification: GotWorkflowInfoDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - ( - PUBLISHER_KEY.into(), - notification - .publisher - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (CID_KEY.into(), notification.cid.into()), - ( - NAME_KEY.into(), - notification - .name - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (NUM_TASKS_KEY.into(), notification.num_tasks.into()), - ( - PROGRESS_KEY.into(), - Ipld::List( - notification - .progress - .iter() - .map(|cid| Ipld::String(cid.to_string())) - .collect(), - ), - ), - ( - PROGRESS_COUNT_KEY.into(), - notification.progress_count.into(), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for GotWorkflowInfoDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let publisher = map - .get(PUBLISHER_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let name = map - .get(NAME_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let num_tasks = from_ipld( - map.get(NUM_TASKS_KEY) - .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? - .to_owned(), - )?; - - let progress = from_ipld::>( - map.get(PROGRESS_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? - .to_owned(), - )?; - - let progress_count = from_ipld( - map.get(PROGRESS_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? - .to_owned(), - )?; - - Ok(GotWorkflowInfoDht { - timestamp, - publisher, - cid, - name, - num_tasks, - progress, - progress_count, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "receipt_quorum_success_dht")] -pub struct ReceiptQuorumSuccessDht { - timestamp: i64, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Number of peers participating in quorum")] - quorum: usize, -} - -impl ReceiptQuorumSuccessDht { - pub(crate) fn new(cid: FastStr, quorum: usize) -> ReceiptQuorumSuccessDht { - ReceiptQuorumSuccessDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - quorum, - } - } -} - -impl DagJson for ReceiptQuorumSuccessDht {} - -impl From for Ipld { - fn from(notification: ReceiptQuorumSuccessDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (QUORUM_KEY.into(), notification.quorum.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for ReceiptQuorumSuccessDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let quorum = from_ipld( - map.get(QUORUM_KEY) - .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? - .to_owned(), - )?; - - Ok(ReceiptQuorumSuccessDht { - timestamp, - cid, - quorum, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "receipt_quorum_failure_dht")] -pub struct ReceiptQuorumFailureDht { - timestamp: i64, - #[schemars(description = "Receipt CID")] - cid: String, - #[schemars(description = "Number of peers required for quorum")] - quorum: usize, - #[schemars(description = "Number of connected peers")] - connected_peer_count: usize, - #[schemars(description = "Peers participating in quorum")] - stored_to_peers: Vec, -} - -impl ReceiptQuorumFailureDht { - pub(crate) fn new( - cid: FastStr, - quorum: usize, - connected_peer_count: usize, - stored_to_peers: Vec, - ) -> ReceiptQuorumFailureDht { - ReceiptQuorumFailureDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - quorum, - connected_peer_count, - stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), - } - } -} - -impl DagJson for ReceiptQuorumFailureDht {} - -impl From for Ipld { - fn from(notification: ReceiptQuorumFailureDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (QUORUM_KEY.into(), notification.quorum.into()), - ( - CONNECTED_PEER_COUNT_KEY.into(), - notification.connected_peer_count.into(), - ), - ( - STORED_TO_PEERS_KEY.into(), - Ipld::List( - notification - .stored_to_peers - .iter() - .map(|p| Ipld::String(p.to_string())) - .collect(), - ), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for ReceiptQuorumFailureDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let quorum = from_ipld( - map.get(QUORUM_KEY) - .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? - .to_owned(), - )?; - - let connected_peer_count = from_ipld( - map.get(CONNECTED_PEER_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? - .to_owned(), - )?; - - let stored_to_peers = from_ipld( - map.get(STORED_TO_PEERS_KEY) - .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? - .to_owned(), - )?; - - Ok(ReceiptQuorumFailureDht { - timestamp, - cid, - quorum, - connected_peer_count, - stored_to_peers, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "workflow_info_quorum_success_dht")] -pub struct WorkflowInfoQuorumSuccessDht { - timestamp: i64, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Number of peers participating in quorum")] - quorum: usize, -} - -impl WorkflowInfoQuorumSuccessDht { - pub(crate) fn new(cid: FastStr, quorum: usize) -> WorkflowInfoQuorumSuccessDht { - WorkflowInfoQuorumSuccessDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - quorum, - } - } -} - -impl DagJson for WorkflowInfoQuorumSuccessDht {} - -impl From for Ipld { - fn from(notification: WorkflowInfoQuorumSuccessDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (QUORUM_KEY.into(), notification.quorum.into()), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for WorkflowInfoQuorumSuccessDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let quorum = from_ipld( - map.get(QUORUM_KEY) - .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? - .to_owned(), - )?; - - Ok(WorkflowInfoQuorumSuccessDht { - timestamp, - cid, - quorum, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "workflow_info_quorum_failure_dht")] -pub struct WorkflowInfoQuorumFailureDht { - timestamp: i64, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Number of peers required for quorum")] - quorum: usize, - #[schemars(description = "Number of connected peers")] - connected_peer_count: usize, - #[schemars(description = "Peers participating in quorum")] - stored_to_peers: Vec, -} - -impl WorkflowInfoQuorumFailureDht { - pub(crate) fn new( - cid: FastStr, - quorum: usize, - connected_peer_count: usize, - stored_to_peers: Vec, - ) -> WorkflowInfoQuorumFailureDht { - WorkflowInfoQuorumFailureDht { - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - quorum, - connected_peer_count, - stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), - } - } -} - -impl DagJson for WorkflowInfoQuorumFailureDht {} - -impl From for Ipld { - fn from(notification: WorkflowInfoQuorumFailureDht) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (CID_KEY.into(), notification.cid.into()), - (QUORUM_KEY.into(), notification.quorum.into()), - ( - CONNECTED_PEER_COUNT_KEY.into(), - notification.connected_peer_count.into(), - ), - ( - STORED_TO_PEERS_KEY.into(), - Ipld::List( - notification - .stored_to_peers - .iter() - .map(|p| Ipld::String(p.to_string())) - .collect(), - ), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for WorkflowInfoQuorumFailureDht { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let quorum = from_ipld( - map.get(QUORUM_KEY) - .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? - .to_owned(), - )?; - - let connected_peer_count = from_ipld( - map.get(CONNECTED_PEER_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? - .to_owned(), - )?; - - let stored_to_peers = from_ipld( - map.get(STORED_TO_PEERS_KEY) - .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? - .to_owned(), - )?; - - Ok(WorkflowInfoQuorumFailureDht { - timestamp, - cid, - quorum, - connected_peer_count, - stored_to_peers, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "sent_workflow_info")] -pub struct SentWorkflowInfo { - timestamp: i64, - #[schemars(description = "Peer that requested workflow info")] - requestor: String, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Optional workflow name")] - name: Option, - #[schemars(description = "Number of tasks in workflow")] - num_tasks: u32, - #[schemars(description = "Completed task CIDs")] - progress: Vec, - #[schemars(description = "Number of workflow tasks completed")] - progress_count: u32, -} - -impl SentWorkflowInfo { - pub(crate) fn new( - requestor: PeerId, - cid: Cid, - name: Option, - num_tasks: u32, - progress: Vec, - progress_count: u32, - ) -> SentWorkflowInfo { - SentWorkflowInfo { - requestor: requestor.to_string(), - timestamp: Utc::now().timestamp_millis(), - cid: cid.to_string(), - name: name.map(|n| n.into()), - num_tasks, - progress: progress.iter().map(|cid| cid.to_string()).collect(), - progress_count, - } - } -} - -impl DagJson for SentWorkflowInfo {} - -impl From for Ipld { - fn from(notification: SentWorkflowInfo) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - (REQUESTOR_KEY.into(), notification.requestor.into()), - (CID_KEY.into(), notification.cid.into()), - ( - NAME_KEY.into(), - notification - .name - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (NUM_TASKS_KEY.into(), notification.num_tasks.into()), - ( - PROGRESS_KEY.into(), - Ipld::List( - notification - .progress - .iter() - .map(|cid| Ipld::String(cid.to_string())) - .collect(), - ), - ), - ( - PROGRESS_COUNT_KEY.into(), - notification.progress_count.into(), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for SentWorkflowInfo { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let requestor = from_ipld( - map.get(REQUESTOR_KEY) - .ok_or_else(|| anyhow!("missing {REQUESTOR_KEY}"))? - .to_owned(), - )?; - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let name = map - .get(NAME_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let num_tasks = from_ipld( - map.get(NUM_TASKS_KEY) - .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? - .to_owned(), - )?; - - let progress = from_ipld::>( - map.get(PROGRESS_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? - .to_owned(), - )?; - - let progress_count = from_ipld( - map.get(PROGRESS_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? - .to_owned(), - )?; - - Ok(SentWorkflowInfo { - timestamp, - requestor, - cid, - name, - num_tasks, - progress, - progress_count, - }) - } -} - -#[derive(Debug, Clone, JsonSchema)] -#[schemars(rename = "received_workflow_info")] -pub struct ReceivedWorkflowInfo { - timestamp: i64, - #[schemars(description = "Workflow info provider peer ID")] - provider: Option, - #[schemars(description = "Workflow info CID")] - cid: String, - #[schemars(description = "Optional workflow name")] - name: Option, - #[schemars(description = "Number of tasks in workflow")] - num_tasks: u32, - #[schemars(description = "Completed task CIDs")] - progress: Vec, - #[schemars(description = "Number of workflow tasks completed")] - progress_count: u32, -} - -impl ReceivedWorkflowInfo { - pub(crate) fn new( - provider: Option, - cid: Cid, - name: Option, - num_tasks: u32, - progress: Vec, - progress_count: u32, - ) -> ReceivedWorkflowInfo { - ReceivedWorkflowInfo { - timestamp: Utc::now().timestamp_millis(), - provider: provider.map(|p| p.to_string()), - cid: cid.to_string(), - name: name.map(|n| n.into()), - num_tasks, - progress: progress.iter().map(|cid| cid.to_string()).collect(), - progress_count, - } - } -} - -impl DagJson for ReceivedWorkflowInfo {} - -impl From for Ipld { - fn from(notification: ReceivedWorkflowInfo) -> Self { - let map: BTreeMap = BTreeMap::from([ - (TIMESTAMP_KEY.into(), notification.timestamp.into()), - ( - PROVIDER_KEY.into(), - notification - .provider - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (CID_KEY.into(), notification.cid.into()), - ( - NAME_KEY.into(), - notification - .name - .map(|peer_id| peer_id.into()) - .unwrap_or(Ipld::Null), - ), - (NUM_TASKS_KEY.into(), notification.num_tasks.into()), - ( - PROGRESS_KEY.into(), - Ipld::List( - notification - .progress - .iter() - .map(|cid| Ipld::String(cid.to_string())) - .collect(), - ), - ), - ( - PROGRESS_COUNT_KEY.into(), - notification.progress_count.into(), - ), - ]); - - Ipld::Map(map) - } -} - -impl TryFrom for ReceivedWorkflowInfo { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - let provider = map - .get(PROVIDER_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let cid = from_ipld( - map.get(CID_KEY) - .ok_or_else(|| anyhow!("missing {CID_KEY}"))? - .to_owned(), - )?; - - let name = map - .get(NAME_KEY) - .and_then(|ipld| match ipld { - Ipld::Null => None, - ipld => Some(ipld), - }) - .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); - - let num_tasks = from_ipld( - map.get(NUM_TASKS_KEY) - .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? - .to_owned(), - )?; - - let progress = from_ipld::>( - map.get(PROGRESS_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? - .to_owned(), - )?; - - let progress_count = from_ipld( - map.get(PROGRESS_COUNT_KEY) - .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? - .to_owned(), - )?; - - Ok(ReceivedWorkflowInfo { - timestamp, - provider, - cid, - name, - num_tasks, - progress, - progress_count, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use homestar_invocation::test_utils::cid::generate_cid; use rand::thread_rng; use std::str::FromStr; @@ -2321,97 +490,97 @@ mod test { vec![ ( - new_listen_addr.timestamp, + new_listen_addr.timestamp().to_owned(), NetworkNotification::NewListenAddr(new_listen_addr), ), ( - connection_established.timestamp, + connection_established.timestamp().to_owned(), NetworkNotification::ConnnectionEstablished(connection_established), ), ( - connection_closed.timestamp, + connection_closed.timestamp().to_owned(), NetworkNotification::ConnnectionClosed(connection_closed), ), ( - outgoing_connection_error.timestamp, + outgoing_connection_error.timestamp().to_owned(), NetworkNotification::OutgoingConnectionError(outgoing_connection_error), ), ( - incoming_connection_error.timestamp, + incoming_connection_error.timestamp().to_owned(), NetworkNotification::IncomingConnectionError(incoming_connection_error), ), ( - discovered_mdns.timestamp, + discovered_mdns.timestamp().to_owned(), NetworkNotification::DiscoveredMdns(discovered_mdns), ), ( - discovered_rendezvous.timestamp, + discovered_rendezvous.timestamp().to_owned(), NetworkNotification::DiscoveredRendezvous(discovered_rendezvous), ), ( - registered_rendezvous.timestamp, + registered_rendezvous.timestamp().to_owned(), NetworkNotification::RegisteredRendezvous(registered_rendezvous), ), ( - discover_served_rendezvous.timestamp, + discover_served_rendezvous.timestamp().to_owned(), NetworkNotification::DiscoverServedRendezvous(discover_served_rendezvous), ), ( - peer_registered_rendezvous.timestamp, + peer_registered_rendezvous.timestamp().to_owned(), NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous), ), ( - published_receipt_pubsub.timestamp, + published_receipt_pubsub.timestamp().to_owned(), NetworkNotification::PublishedReceiptPubsub(published_receipt_pubsub), ), ( - received_receipt_pubsub.timestamp, + received_receipt_pubsub.timestamp().to_owned(), NetworkNotification::ReceivedReceiptPubsub(received_receipt_pubsub), ), ( - put_receipt_dht.timestamp, + put_receipt_dht.timestamp().to_owned(), NetworkNotification::PutReceiptDht(put_receipt_dht), ), ( - got_receipt_dht.timestamp, + got_receipt_dht.timestamp().to_owned(), NetworkNotification::GotReceiptDht(got_receipt_dht), ), ( - put_workflow_info_dht.timestamp, + put_workflow_info_dht.timestamp().to_owned(), NetworkNotification::PutWorkflowInfoDht(put_workflow_info_dht), ), ( - got_workflow_info_dht.timestamp, + got_workflow_info_dht.timestamp().to_owned(), NetworkNotification::GotWorkflowInfoDht(got_workflow_info_dht), ), ( - receipt_quorum_success_dht.timestamp, + receipt_quorum_success_dht.timestamp().to_owned(), NetworkNotification::ReceiptQuorumSuccessDht(receipt_quorum_success_dht), ), ( - receipt_quorum_failure_dht.timestamp, + receipt_quorum_failure_dht.timestamp().to_owned(), NetworkNotification::ReceiptQuorumFailureDht(receipt_quorum_failure_dht), ), ( - workflow_info_quorum_success_dht.timestamp, + workflow_info_quorum_success_dht.timestamp().to_owned(), NetworkNotification::WorkflowInfoQuorumSuccessDht(workflow_info_quorum_success_dht), ), ( - workflow_info_quorum_failure_dht.timestamp, + workflow_info_quorum_failure_dht.timestamp().to_owned(), NetworkNotification::WorkflowInfoQuorumFailureDht(workflow_info_quorum_failure_dht), ), ( - sent_workflow_info.timestamp, + sent_workflow_info.timestamp().to_owned(), NetworkNotification::SentWorkflowInfo(sent_workflow_info), ), ( - received_workflow_info.timestamp, + received_workflow_info.timestamp().to_owned(), NetworkNotification::ReceivedWorkflowInfo(received_workflow_info), ), ] } - fn check_notification(timestamp: i64, notification: NetworkNotification, fixtures: Fixtures) { + fn check_notification(timestamp: &i64, notification: NetworkNotification, fixtures: Fixtures) { let Fixtures { address, addresses, @@ -2431,36 +600,36 @@ mod test { match notification { NetworkNotification::NewListenAddr(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); - assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); } NetworkNotification::ConnnectionEstablished(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); - assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); } NetworkNotification::ConnnectionClosed(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); - assert_eq!(Multiaddr::from_str(&n.address).unwrap(), address); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); } NetworkNotification::OutgoingConnectionError(n) => { - assert_eq!(n.timestamp, timestamp); + assert_eq!(n.timestamp(), timestamp); assert_eq!( - n.peer_id.map(|p| PeerId::from_str(&p).unwrap()), + n.peer_id().as_ref().map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); - assert_eq!(n.error, DialError::NoAddresses.to_string()); + assert_eq!(n.error().to_string(), DialError::NoAddresses.to_string()); } NetworkNotification::IncomingConnectionError(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(n.error, ListenError::Aborted.to_string()); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(n.error().to_string(), ListenError::Aborted.to_string()); } NetworkNotification::DiscoveredMdns(n) => { - assert_eq!(n.timestamp, timestamp); + assert_eq!(n.timestamp(), timestamp); - for peer in n.peers { + for peer in n.peers() { assert_eq!( Multiaddr::from_str(&peer.1).unwrap(), peers_map[&PeerId::from_str(&peer.0).unwrap()] @@ -2468,10 +637,10 @@ mod test { } } NetworkNotification::DiscoveredRendezvous(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.server).unwrap(), peer_id); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.server()).unwrap(), peer_id); - for peer in n.peers { + for peer in n.peers() { assert_eq!( peer.1 .iter() @@ -2482,18 +651,18 @@ mod test { } } NetworkNotification::RegisteredRendezvous(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.server).unwrap(), peer_id); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.server()).unwrap(), peer_id); } NetworkNotification::DiscoverServedRendezvous(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.enquirer).unwrap(), peer_id); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.enquirer()).unwrap(), peer_id); } NetworkNotification::PeerRegisteredRendezvous(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.peer_id).unwrap(), peer_id); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); assert_eq!( - n.addresses + n.addresses() .iter() .map(|address| Multiaddr::from_str(address).unwrap()) .collect::>(), @@ -2501,74 +670,78 @@ mod test { ); } NetworkNotification::PublishedReceiptPubsub(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); } NetworkNotification::ReceivedReceiptPubsub(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.publisher).unwrap(), peer_id); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.publisher()).unwrap(), peer_id); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); } NetworkNotification::PutReceiptDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); } NetworkNotification::GotReceiptDht(n) => { - assert_eq!(n.timestamp, timestamp); + assert_eq!(n.timestamp(), timestamp); assert_eq!( - n.publisher.map(|p| PeerId::from_str(&p).unwrap()), + n.publisher() + .as_ref() + .map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(Cid::from_str(&n.ran).unwrap(), ran); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); } NetworkNotification::PutWorkflowInfoDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); - assert_eq!(n.num_tasks, num_tasks); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); assert_eq!( - n.progress + n.progress() .iter() .map(|cid| Cid::from_str(&cid).unwrap()) .collect::>(), progress ); - assert_eq!(n.progress_count, progress_count); + assert_eq!(n.progress_count(), &progress_count); } NetworkNotification::GotWorkflowInfoDht(n) => { - assert_eq!(n.timestamp, timestamp); + assert_eq!(n.timestamp(), timestamp); assert_eq!( - n.publisher.map(|p| PeerId::from_str(&p).unwrap()), + n.publisher() + .as_ref() + .map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); - assert_eq!(n.num_tasks, num_tasks); + assert_eq!(Cid::from_str(&n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); assert_eq!( - n.progress + n.progress() .iter() .map(|cid| Cid::from_str(&cid).unwrap()) .collect::>(), progress ); - assert_eq!(n.progress_count, progress_count); + assert_eq!(n.progress_count(), &progress_count); } NetworkNotification::ReceiptQuorumSuccessDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); - assert_eq!(n.quorum, quorum); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); } NetworkNotification::ReceiptQuorumFailureDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); - assert_eq!(n.quorum, quorum); - assert_eq!(n.connected_peer_count, connected_peer_count); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + assert_eq!(n.connected_peer_count(), &connected_peer_count); assert_eq!( - n.stored_to_peers + n.stored_to_peers() .iter() .map(|p| PeerId::from_str(p).unwrap()) .collect::>(), @@ -2576,17 +749,17 @@ mod test { ); } NetworkNotification::WorkflowInfoQuorumSuccessDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); - assert_eq!(n.quorum, quorum); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); } NetworkNotification::WorkflowInfoQuorumFailureDht(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(FastStr::new(n.cid), FastStr::new(cid.to_string())); - assert_eq!(n.quorum, quorum); - assert_eq!(n.connected_peer_count, connected_peer_count); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + assert_eq!(n.connected_peer_count(), &connected_peer_count); assert_eq!( - n.stored_to_peers + n.stored_to_peers() .iter() .map(|p| PeerId::from_str(p).unwrap()) .collect::>(), @@ -2594,37 +767,37 @@ mod test { ); } NetworkNotification::SentWorkflowInfo(n) => { - assert_eq!(n.timestamp, timestamp); - assert_eq!(PeerId::from_str(&n.requestor).unwrap(), peer_id); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); - assert_eq!(n.num_tasks, num_tasks); + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(&n.requestor()).unwrap(), peer_id); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); assert_eq!( - n.progress + n.progress() .iter() .map(|cid| Cid::from_str(&cid).unwrap()) .collect::>(), progress ); - assert_eq!(n.progress_count, progress_count); + assert_eq!(n.progress_count(), &progress_count); } NetworkNotification::ReceivedWorkflowInfo(n) => { - assert_eq!(n.timestamp, timestamp); + assert_eq!(n.timestamp(), timestamp); assert_eq!( - n.provider.map(|p| PeerId::from_str(&p).unwrap()), + n.provider().as_ref().map(|p| PeerId::from_str(&p).unwrap()), Some(peer_id) ); - assert_eq!(Cid::from_str(&n.cid).unwrap(), cid); - assert_eq!(n.name.map(|name| FastStr::new(name)), Some(name)); - assert_eq!(n.num_tasks, num_tasks); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); assert_eq!( - n.progress + n.progress() .iter() .map(|cid| Cid::from_str(&cid).unwrap()) .collect::>(), progress ); - assert_eq!(n.progress_count, progress_count); + assert_eq!(n.progress_count(), &progress_count); } } } @@ -2642,7 +815,7 @@ mod test { // Convert notifications back and check them for (timestamp, bytes) in notifications { check_notification( - timestamp, + ×tamp, NetworkNotification::from_json(bytes.as_ref()).unwrap(), fixtures.clone(), ) @@ -2662,7 +835,7 @@ mod test { // Convert notifications back and check them for (timestamp, json) in notifications { check_notification( - timestamp, + ×tamp, NetworkNotification::from_json_string(json).unwrap(), fixtures.clone(), ) diff --git a/homestar-runtime/src/event_handler/notification/network/connection.rs b/homestar-runtime/src/event_handler/notification/network/connection.rs new file mode 100644 index 00000000..59ddc31a --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/connection.rs @@ -0,0 +1,325 @@ +//! Notification types for [swarm] connection events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{ + swarm::{DialError, ListenError}, + Multiaddr, PeerId, +}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const ADDRESS_KEY: &str = "address"; +const ERROR_KEY: &str = "error"; +const PEER_KEY: &str = "peer_id"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "new_listen_addr")] +pub struct NewListenAddr { + timestamp: i64, + peer_id: String, + address: String, +} + +impl NewListenAddr { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> NewListenAddr { + NewListenAddr { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for NewListenAddr {} + +impl From for Ipld { + fn from(notification: NewListenAddr) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for NewListenAddr { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(NewListenAddr { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "connection_established")] +pub struct ConnectionEstablished { + timestamp: i64, + peer_id: String, + address: String, +} + +impl ConnectionEstablished { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionEstablished { + ConnectionEstablished { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionEstablished {} + +impl From for Ipld { + fn from(notification: ConnectionEstablished) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionEstablished { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionEstablished { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "connection_closed")] +pub struct ConnectionClosed { + timestamp: i64, + peer_id: String, + address: String, +} + +impl ConnectionClosed { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionClosed { + ConnectionClosed { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionClosed {} + +impl From for Ipld { + fn from(notification: ConnectionClosed) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionClosed { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionClosed { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "outgoing_connection_error")] +pub struct OutgoingConnectionError { + timestamp: i64, + peer_id: Option, + error: String, +} + +impl OutgoingConnectionError { + pub(crate) fn new(peer_id: Option, error: DialError) -> OutgoingConnectionError { + OutgoingConnectionError { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.map(|p| p.to_string()), + error: error.to_string(), + } + } +} + +impl DagJson for OutgoingConnectionError {} + +impl From for Ipld { + fn from(notification: OutgoingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PEER_KEY.into(), + notification + .peer_id + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (ERROR_KEY.into(), notification.error.into()), + ])) + } +} + +impl TryFrom for OutgoingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = map + .get(PEER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + Ok(OutgoingConnectionError { + timestamp, + peer_id, + error, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "incoming_connection_error")] +pub struct IncomingConnectionError { + timestamp: i64, + error: String, +} + +impl IncomingConnectionError { + pub(crate) fn new(error: ListenError) -> IncomingConnectionError { + IncomingConnectionError { + timestamp: Utc::now().timestamp_millis(), + error: error.to_string(), + } + } +} + +impl DagJson for IncomingConnectionError {} + +impl From for Ipld { + fn from(notification: IncomingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ERROR_KEY.into(), notification.error.into()), + ])) + } +} + +impl TryFrom for IncomingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + Ok(IncomingConnectionError { timestamp, error }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/dht.rs b/homestar-runtime/src/event_handler/notification/network/dht.rs new file mode 100644 index 00000000..abac34d2 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/dht.rs @@ -0,0 +1,788 @@ +//! Notification types for [swarm] DHT events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use faststr::FastStr; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const CONNECTED_PEER_COUNT_KEY: &str = "connected_peer_count"; +const NAME_KEY: &str = "name"; +const NUM_TASKS_KEY: &str = "num_tasks"; +const PROGRESS_KEY: &str = "progress"; +const PROGRESS_COUNT_KEY: &str = "progress_count"; +const PUBLISHER_KEY: &str = "publisher"; +const QUORUM_KEY: &str = "quorum"; +const RAN_KEY: &str = "ran"; +const STORED_TO_PEERS_KEY: &str = "stored_to_peers"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "put_receipt_dht")] +pub struct PutReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PutReceiptDht { + pub(crate) fn new(cid: Cid, ran: String) -> PutReceiptDht { + PutReceiptDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PutReceiptDht {} + +impl From for Ipld { + fn from(notification: PutReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PutReceiptDht { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "got_receipt_dht")] +pub struct GotReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: Option, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl GotReceiptDht { + pub(crate) fn new(publisher: Option, cid: Cid, ran: String) -> GotReceiptDht { + GotReceiptDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for GotReceiptDht {} + +impl From for Ipld { + fn from(notification: GotReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(GotReceiptDht { + timestamp, + publisher, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "put_workflow_info_dht")] +pub struct PutWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl PutWorkflowInfoDht { + pub(crate) fn new( + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> PutWorkflowInfoDht { + PutWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for PutWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: PutWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(PutWorkflowInfoDht { + timestamp, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "got_workflow_info_dht")] +pub struct GotWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info publisher peer ID")] + publisher: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl GotWorkflowInfoDht { + pub(crate) fn new( + publisher: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> GotWorkflowInfoDht { + GotWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for GotWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: GotWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(GotWorkflowInfoDht { + timestamp, + publisher, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "receipt_quorum_success_dht")] +pub struct ReceiptQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl ReceiptQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> ReceiptQuorumSuccessDht { + ReceiptQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for ReceiptQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "receipt_quorum_failure_dht")] +pub struct ReceiptQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl ReceiptQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> ReceiptQuorumFailureDht { + ReceiptQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for ReceiptQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_success_dht")] +pub struct WorkflowInfoQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl WorkflowInfoQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> WorkflowInfoQuorumSuccessDht { + WorkflowInfoQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for WorkflowInfoQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_failure_dht")] +pub struct WorkflowInfoQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl WorkflowInfoQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> WorkflowInfoQuorumFailureDht { + WorkflowInfoQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for WorkflowInfoQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/mdns.rs b/homestar-runtime/src/event_handler/notification/network/mdns.rs new file mode 100644 index 00000000..36938a9f --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/mdns.rs @@ -0,0 +1,76 @@ +//! Notification types for [swarm] mDNS events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const PEERS_KEY: &str = "peers"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "discovered_mdns")] +pub struct DiscoveredMdns { + timestamp: i64, + #[schemars(description = "Peers discovered by peer ID and multiaddress")] + peers: BTreeMap, +} + +impl DiscoveredMdns { + pub(crate) fn new(peers: BTreeMap) -> DiscoveredMdns { + DiscoveredMdns { + timestamp: Utc::now().timestamp_millis(), + peers: peers + .iter() + .map(|(peer_id, address)| (peer_id.to_string(), address.to_string())) + .collect(), + } + } +} + +impl DagJson for DiscoveredMdns {} + +impl From for Ipld { + fn from(notification: DiscoveredMdns) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, address)| (peer_id, address.into())) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEERS_KEY.into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredMdns { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peers = from_ipld::>( + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoveredMdns { timestamp, peers }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/pubsub.rs b/homestar-runtime/src/event_handler/notification/network/pubsub.rs new file mode 100644 index 00000000..01d0adbc --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/pubsub.rs @@ -0,0 +1,160 @@ +//! Notification types for [swarm] gossipsub events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const PUBLISHER_KEY: &str = "publisher"; +const RAN_KEY: &str = "ran"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "published_receipt_pubsub")] +pub struct PublishedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PublishedReceiptPubsub { + pub(crate) fn new(cid: Cid, ran: String) -> PublishedReceiptPubsub { + PublishedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PublishedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: PublishedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PublishedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PublishedReceiptPubsub { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "received_receipt_pubsub")] +pub struct ReceivedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: String, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl ReceivedReceiptPubsub { + pub(crate) fn new(publisher: PeerId, cid: Cid, ran: String) -> ReceivedReceiptPubsub { + ReceivedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.to_string(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for ReceivedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: ReceivedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PUBLISHER_KEY.into(), notification.publisher.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = from_ipld( + map.get(PUBLISHER_KEY) + .ok_or_else(|| anyhow!("missing {PUBLISHER_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedReceiptPubsub { + timestamp, + publisher, + cid, + ran, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/rendezvous.rs b/homestar-runtime/src/event_handler/notification/network/rendezvous.rs new file mode 100644 index 00000000..b1803828 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/rendezvous.rs @@ -0,0 +1,300 @@ +//! Notification types for [swarm] rendezvous events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const ADDRESSES_KEY: &str = "addresses"; +const ENQUIRER_KEY: &str = "enquirer"; +const PEER_KEY: &str = "peer_id"; +const PEERS_KEY: &str = "peers"; +const SERVER_KEY: &str = "server"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "discovered_rendezvous")] +pub struct DiscoveredRendezvous { + timestamp: i64, + #[schemars(description = "Server that fulfilled the discovery request")] + server: String, + #[schemars(description = "Peers discovered by peer ID and multiaddresses")] + peers: BTreeMap>, +} + +impl DiscoveredRendezvous { + pub(crate) fn new( + server: PeerId, + peers: BTreeMap>, + ) -> DiscoveredRendezvous { + DiscoveredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + peers: peers + .iter() + .map(|(peer_id, addresses)| { + ( + peer_id.to_string(), + addresses + .iter() + .map(|address| address.to_string()) + .collect(), + ) + }) + .collect(), + } + } +} + +impl DagJson for DiscoveredRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoveredRendezvous) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, addresses)| { + ( + peer_id, + Ipld::List( + addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ) + }) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), + (PEERS_KEY.into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? + .to_owned(), + )?; + + let peers = from_ipld::>>( + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoveredRendezvous { + timestamp, + server, + peers, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "registered_rendezvous")] +pub struct RegisteredRendezvous { + timestamp: i64, + #[schemars(description = "Server that accepted registration")] + server: String, +} + +impl RegisteredRendezvous { + pub(crate) fn new(server: PeerId) -> RegisteredRendezvous { + RegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + } + } +} + +impl DagJson for RegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: RegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for RegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? + .to_owned(), + )?; + + Ok(RegisteredRendezvous { timestamp, server }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "registered_rendezvous")] +pub struct DiscoverServedRendezvous { + timestamp: i64, + #[schemars(description = "Peer that requested discovery")] + enquirer: String, +} + +impl DiscoverServedRendezvous { + pub(crate) fn new(enquirer: PeerId) -> DiscoverServedRendezvous { + DiscoverServedRendezvous { + timestamp: Utc::now().timestamp_millis(), + enquirer: enquirer.to_string(), + } + } +} + +impl DagJson for DiscoverServedRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoverServedRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ENQUIRER_KEY.into(), notification.enquirer.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoverServedRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let enquirer = from_ipld( + map.get(ENQUIRER_KEY) + .ok_or_else(|| anyhow!("missing {ENQUIRER_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoverServedRendezvous { + timestamp, + enquirer, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "peer_registered_rendezvous")] +pub struct PeerRegisteredRendezvous { + timestamp: i64, + #[schemars(description = "Peer registered")] + peer_id: String, + #[schemars(description = "Multiaddresses for peer")] + addresses: Vec, +} + +impl PeerRegisteredRendezvous { + pub(crate) fn new(peer_id: PeerId, addresses: Vec) -> PeerRegisteredRendezvous { + PeerRegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + addresses: addresses + .iter() + .map(|address| address.to_string()) + .collect(), + } + } +} + +impl DagJson for PeerRegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: PeerRegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + ( + ADDRESSES_KEY.into(), + Ipld::List( + notification + .addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PeerRegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let addresses = from_ipld( + map.get(ADDRESSES_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESSES_KEY}"))? + .to_owned(), + )?; + + Ok(PeerRegisteredRendezvous { + timestamp, + peer_id, + addresses, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/req_resp.rs b/homestar-runtime/src/event_handler/notification/network/req_resp.rs new file mode 100644 index 00000000..59f8aaab --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/req_resp.rs @@ -0,0 +1,304 @@ +//! Notification types for [swarm] request_reponse events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use faststr::FastStr; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const NAME_KEY: &str = "name"; +const NUM_TASKS_KEY: &str = "num_tasks"; +const PROGRESS_KEY: &str = "progress"; +const PROGRESS_COUNT_KEY: &str = "progress_count"; +const PROVIDER_KEY: &str = "provider"; +const REQUESTOR_KEY: &str = "requestor"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "sent_workflow_info")] +pub struct SentWorkflowInfo { + timestamp: i64, + #[schemars(description = "Peer that requested workflow info")] + requestor: String, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl SentWorkflowInfo { + pub(crate) fn new( + requestor: PeerId, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> SentWorkflowInfo { + SentWorkflowInfo { + requestor: requestor.to_string(), + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for SentWorkflowInfo {} + +impl From for Ipld { + fn from(notification: SentWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (REQUESTOR_KEY.into(), notification.requestor.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for SentWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let requestor = from_ipld( + map.get(REQUESTOR_KEY) + .ok_or_else(|| anyhow!("missing {REQUESTOR_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(SentWorkflowInfo { + timestamp, + requestor, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "received_workflow_info")] +pub struct ReceivedWorkflowInfo { + timestamp: i64, + #[schemars(description = "Workflow info provider peer ID")] + provider: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl ReceivedWorkflowInfo { + pub(crate) fn new( + provider: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> ReceivedWorkflowInfo { + ReceivedWorkflowInfo { + timestamp: Utc::now().timestamp_millis(), + provider: provider.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for ReceivedWorkflowInfo {} + +impl From for Ipld { + fn from(notification: ReceivedWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PROVIDER_KEY.into(), + notification + .provider + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let provider = map + .get(PROVIDER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedWorkflowInfo { + timestamp, + provider, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} From 1c84b248aaa76eac7772d9a81f76ad896c67eb32 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 12 Feb 2024 13:08:46 -0800 Subject: [PATCH 72/75] chore: Update schemars version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index bbbfc17c..b1fd521b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ libsqlite3-sys = { version = "0.27", default-features = false, features = [ "bundled", ] } rand = { version = "0.8", default-features = false } -schemars = { version = "0.8.16", features = ["arrayvec", "url"] } +schemars = { version = "0.8", features = ["arrayvec", "url"] } serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false, features = [ "raw_value", From a611a4864c686801c987912f1f30ef63280a0db7 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Mon, 12 Feb 2024 14:01:15 -0800 Subject: [PATCH 73/75] chore: Update schema id module paths --- Cargo.lock | 1 + Cargo.toml | 1 + homestar-invocation/Cargo.toml | 3 ++- homestar-invocation/src/authority/issuer.rs | 5 +++-- homestar-invocation/src/authority/prf.rs | 5 +++-- homestar-invocation/src/ipld/schema.rs | 9 +++++---- homestar-invocation/src/pointer.rs | 7 ++++--- homestar-invocation/src/task/instruction/nonce.rs | 5 +++-- homestar-runtime/Cargo.toml | 2 +- .../src/event_handler/notification/receipt.rs | 4 +++- homestar-runtime/src/network/webserver/prom.rs | 6 ++++-- 11 files changed, 30 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index baafb5f8..6489058b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2515,6 +2515,7 @@ dependencies = [ "anyhow", "async-recursion", "byte-unit", + "const_format", "diesel", "enum-as-inner", "enum-assoc", diff --git a/Cargo.toml b/Cargo.toml index b1fd521b..8e47f342 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ chrono = { version = "0.4", default-features = false, features = [ "clock", "std", ] } +const_format = "0.2" enum-assoc = "1.1" enum-as-inner = "0.6" faststr = { version = "0.2", default-features = false, features = ["serde"] } diff --git a/homestar-invocation/Cargo.toml b/homestar-invocation/Cargo.toml index 04fda5de..7d5994f0 100644 --- a/homestar-invocation/Cargo.toml +++ b/homestar-invocation/Cargo.toml @@ -23,6 +23,7 @@ doctest = true anyhow = { workspace = true } async-recursion = { version = "1.0", default-features = false } byte-unit = { workspace = true } +const_format = { workspace = true } diesel = { version = "2.1", default-features = false, features = [ "sqlite", ], optional = true } @@ -38,7 +39,7 @@ libsqlite3-sys = { workspace = true, optional = true } rand = { workspace = true } schemars = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } +serde_json = { workspace = true } signature = "2.2" thiserror = { workspace = true } tracing = { workspace = true } diff --git a/homestar-invocation/src/authority/issuer.rs b/homestar-invocation/src/authority/issuer.rs index 2837ebcf..3c3cc9bb 100644 --- a/homestar-invocation/src/authority/issuer.rs +++ b/homestar-invocation/src/authority/issuer.rs @@ -1,6 +1,7 @@ //! Issuer refers to the issuer of the invocation. use crate::{Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -17,7 +18,7 @@ use schemars::{ JsonSchema, }; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, fmt, str::FromStr}; +use std::{borrow::Cow, fmt, module_path, str::FromStr}; use ucan::ipld::Principle as Principal; /// [Principal] issuer of the [Invocation]. If omitted issuer is @@ -102,7 +103,7 @@ impl JsonSchema for Issuer { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::authority::issuer::Issuer") + Cow::Borrowed(formatcp!("{}::Issuer", module_path!())) } fn json_schema(_gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-invocation/src/authority/prf.rs b/homestar-invocation/src/authority/prf.rs index 44ed7131..2a2808ee 100644 --- a/homestar-invocation/src/authority/prf.rs +++ b/homestar-invocation/src/authority/prf.rs @@ -4,6 +4,7 @@ //! [Task]: crate::Task use crate::{ipld::Link, Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -22,7 +23,7 @@ use schemars::{ JsonSchema, }; use serde::{Deserialize, Serialize}; -use std::borrow::Cow; +use std::{borrow::Cow, module_path}; use ucan::ipld::UcanIpld; /// Proof container, with links to UCANs for a particular [Task] or @@ -101,7 +102,7 @@ impl JsonSchema for UcanPrf { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::authority::prf::UcanPrf") + Cow::Borrowed(formatcp!("{}::UcanPrf", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-invocation/src/ipld/schema.rs b/homestar-invocation/src/ipld/schema.rs index 495ae66b..eebfb288 100644 --- a/homestar-invocation/src/ipld/schema.rs +++ b/homestar-invocation/src/ipld/schema.rs @@ -1,11 +1,12 @@ //! JSON Schema generation for DAG-JSON encoded Ipld. +use const_format::formatcp; use schemars::{ gen::SchemaGenerator, schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, JsonSchema, }; -use std::{borrow::Cow, collections::BTreeMap}; +use std::{borrow::Cow, collections::BTreeMap, module_path}; /// Ipld stub for JSON Schema generation #[derive(Debug)] @@ -21,7 +22,7 @@ impl JsonSchema for IpldStub { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::ipld::schema::IpldStub") + Cow::Borrowed(formatcp!("{}::IpldStub", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { @@ -74,7 +75,7 @@ impl JsonSchema for IpldLinkStub { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::ipld::schema::IpldLinkStub") + Cow::Borrowed(formatcp!("{}::IpldLinkStub", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { @@ -107,7 +108,7 @@ impl JsonSchema for IpldBytesStub { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::ipld::schema::IpldBytesStub") + Cow::Borrowed(formatcp!("{}::IpldBytesStub", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-invocation/src/pointer.rs b/homestar-invocation/src/pointer.rs index 6b54ed36..ebac05ad 100644 --- a/homestar-invocation/src/pointer.rs +++ b/homestar-invocation/src/pointer.rs @@ -9,6 +9,7 @@ //! [Receipts]: super::Receipt use crate::{ensure, Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -28,7 +29,7 @@ use schemars::{ use serde::{Deserialize, Serialize}; #[cfg(feature = "diesel")] use std::str::FromStr; -use std::{borrow::Cow, collections::btree_map::BTreeMap, fmt}; +use std::{borrow::Cow, collections::btree_map::BTreeMap, fmt, module_path}; /// `await/ok` branch for instruction result. pub const OK_BRANCH: &str = "await/ok"; @@ -85,7 +86,7 @@ impl JsonSchema for AwaitResult { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::pointer::AwaitResult") + Cow::Borrowed(formatcp!("{}::AwaitResult", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { @@ -352,7 +353,7 @@ impl JsonSchema for Pointer { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::pointer::Pointer") + Cow::Borrowed(formatcp!("{}::Pointer", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-invocation/src/task/instruction/nonce.rs b/homestar-invocation/src/task/instruction/nonce.rs index acc4efd0..a821a111 100644 --- a/homestar-invocation/src/task/instruction/nonce.rs +++ b/homestar-invocation/src/task/instruction/nonce.rs @@ -3,6 +3,7 @@ //! [Instruction]: super::Instruction use crate::{Error, Unit}; +use const_format::formatcp; use enum_as_inner::EnumAsInner; use generic_array::{ typenum::consts::{U12, U16}, @@ -15,7 +16,7 @@ use schemars::{ JsonSchema, }; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, fmt}; +use std::{borrow::Cow, fmt, module_path}; use uuid::Uuid; type Nonce96 = GenericArray; @@ -99,7 +100,7 @@ impl JsonSchema for Nonce { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-invocation::task::instruction::Nonce") + Cow::Borrowed(formatcp!("{}::Nonce", module_path!())) } fn json_schema(_gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 41cba10f..5b8fd707 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -49,7 +49,7 @@ config = { version = "0.13", default-features = false, features = ["toml"] } console-subscriber = { version = "0.2", default-features = false, features = [ "parking_lot", ], optional = true } -const_format = "0.2" +const_format = { workspace = true } crossbeam = "0.8" dagga = "0.2" dashmap = "5.5" diff --git a/homestar-runtime/src/event_handler/notification/receipt.rs b/homestar-runtime/src/event_handler/notification/receipt.rs index 4470ba8c..44ee4b10 100644 --- a/homestar-runtime/src/event_handler/notification/receipt.rs +++ b/homestar-runtime/src/event_handler/notification/receipt.rs @@ -1,5 +1,6 @@ //! Notification receipts. +use const_format::formatcp; use homestar_invocation::{ ipld::{schema, DagJson}, Receipt, @@ -13,6 +14,7 @@ use schemars::{ use std::{ borrow::Cow, collections::{BTreeMap, BTreeSet}, + module_path, }; /// A [Receipt] that is sent out for websocket notifications. @@ -64,7 +66,7 @@ impl JsonSchema for ReceiptNotification { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-runtime::event_handler::notification::ReceiptNotification") + Cow::Borrowed(formatcp!("{}::ReceiptNotification", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { diff --git a/homestar-runtime/src/network/webserver/prom.rs b/homestar-runtime/src/network/webserver/prom.rs index d42359ee..b6471424 100644 --- a/homestar-runtime/src/network/webserver/prom.rs +++ b/homestar-runtime/src/network/webserver/prom.rs @@ -2,6 +2,7 @@ /// /// Influenced by https://crates.io/crates/prom2jsonrs/0.1.0. use anyhow::{anyhow, bail, Result}; +use const_format::formatcp; use dyn_clone::DynClone; use once_cell::sync::Lazy; use regex::Regex; @@ -15,6 +16,7 @@ use serde_json::json; use std::{ borrow::Cow, collections::{BTreeMap, BTreeSet, HashMap}, + module_path, }; #[allow(dead_code)] @@ -93,7 +95,7 @@ impl JsonSchema for Metric { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-runtime::network::webserver::prom::Metric") + Cow::Borrowed(formatcp!("{}::Metric", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { @@ -165,7 +167,7 @@ impl JsonSchema for MetricFamily { } fn schema_id() -> Cow<'static, str> { - Cow::Borrowed("homestar-runtime::network::webserver::prom::MetricFamily") + Cow::Borrowed(formatcp!("{}::MetricFamily", module_path!())) } fn json_schema(gen: &mut SchemaGenerator) -> Schema { From d4d196c7e227171efef12996e08c659cb5aaba9c Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 13 Feb 2024 08:23:39 -0800 Subject: [PATCH 74/75] chore: Use minor version derive-getters --- homestar-runtime/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 5b8fd707..d80dc2b6 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -53,7 +53,7 @@ const_format = { workspace = true } crossbeam = "0.8" dagga = "0.2" dashmap = "5.5" -derive-getters = "0.3.0" +derive-getters = "0.3" diesel = { version = "2.1", default-features = false, features = [ "sqlite", "r2d2", From 590135a6a5c6257908bc8e0fbdbe5df067321e41 Mon Sep 17 00:00:00 2001 From: Brian Ginsburg Date: Tue, 13 Feb 2024 09:21:20 -0800 Subject: [PATCH 75/75] chore: Document OpenRPC API and schemas --- README.md | 4 ++++ homestar-runtime/README.md | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/README.md b/README.md index 7d86a90d..cd3ed092 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,10 @@ represents the `Homestar` runtime. We recommend diving into each package's own writing and compiling [Wasm component][wasm-component] modules using [wit-bindgen][wit-bindgen]. +- [homestar-schemas](./homestar-schemas) + +`homestar-schemas` is a crate for generating OpenRPC docs and JSON Schemas that document the [homestar-runtime](./homestar-runtime) JSON-RPC API, workflows, and receipts. + - [examples/*](./examples) `examples` contains examples and demos showcasing `Homestar` packages diff --git a/homestar-runtime/README.md b/homestar-runtime/README.md index 1e3d0e23..005bf049 100644 --- a/homestar-runtime/README.md +++ b/homestar-runtime/README.md @@ -32,3 +32,17 @@ and failure modes, etc. For more information, please go to our [Homestar Readme][homestar-readme]. [homestar-readme]: https://github.com/ipvm-wg/homestar/blob/main/README.md + +## API + +The runtime provides a JSON-RPC API to run workflows, request node information, health, and metrics, and to subscribe to network notifications. The OpenRPC API is documented in [api.json][api] and is available on the `rpc_discover` endpoint. + +JSON Schemas for [workflow][workflow-schema], [receipt][receipt-schema], [network notifications][network-schema], [health checks][health-schema], [node info][node-info-schema], and [metrics][metrics-schema] are also available inidividually. + +[api]: ./schemas/api.json +[health-schema]: ./schemas/health.json +[metrics-schema]: ./schemas/metrics.json +[network-schema]: ./schemas/network.json +[node-info-schema]: ./schemas/node_info.json +[receipt-schema]: ./schemas/receipt.json +[workflow-schema]: ./schemas/workflow.json