From 65623582a2e7e8d3fccc4eac20a103a0ccaef95c Mon Sep 17 00:00:00 2001 From: jshlbrd Date: Thu, 22 Aug 2024 17:15:26 -0700 Subject: [PATCH] chore: Use Tag-Based Versioning --- .github/workflows/code.yml | 27 +- {v1/cmd => cmd}/README.md | 0 {v1/cmd => cmd}/aws/lambda/README.md | 0 {v2/cmd => cmd}/aws/lambda/autoscale/main.go | 0 .../aws/lambda/substation/api_gateway.go | 0 .../aws/lambda/substation/dynamodb.go | 0 .../aws/lambda/substation/kinesis_firehose.go | 0 .../aws/lambda/substation/kinesis_stream.go | 0 .../aws/lambda/substation/lambda.go | 0 {v2/cmd => cmd}/aws/lambda/substation/main.go | 0 {v2/cmd => cmd}/aws/lambda/substation/s3.go | 0 {v2/cmd => cmd}/aws/lambda/substation/sns.go | 0 {v2/cmd => cmd}/aws/lambda/substation/sqs.go | 0 {v2/cmd => cmd}/aws/lambda/validate/main.go | 0 .../aws/lambda/validate/main_test.go | 0 .../development/benchmark/substation/main.go | 0 .../kinesis-tap/substation/README.md | 0 .../kinesis-tap/substation/config.jsonnet | 0 .../kinesis-tap/substation/main.go | 0 {v1/condition => condition}/README.md | 0 {v2/condition => condition}/condition.go | 0 {v2/condition => condition}/format_json.go | 0 .../format_json_test.go | 0 {v2/condition => condition}/format_mime.go | 0 .../format_mime_test.go | 0 {v2/condition => condition}/meta.go | 0 {v2/condition => condition}/meta_all.go | 0 {v2/condition => condition}/meta_all_test.go | 0 {v2/condition => condition}/meta_any.go | 0 {v2/condition => condition}/meta_any_test.go | 0 {v2/condition => condition}/meta_none.go | 0 {v2/condition => condition}/meta_none_test.go | 0 {v1/condition => condition}/network.go | 0 .../network_ip_global_unicast.go | 0 .../network_ip_global_unicast_test.go | 0 .../network_ip_link_local_multicast.go | 0 .../network_ip_link_local_multicast_test.go | 0 .../network_ip_link_local_unicast.go | 0 .../network_ip_link_local_unicast_test.go | 0 .../network_ip_loopback.go | 0 .../network_ip_loopback_test.go | 0 .../network_ip_multicast.go | 0 .../network_ip_multicast_test.go | 0 .../network_ip_private.go | 0 .../network_ip_private_test.go | 0 .../network_ip_unicast.go | 0 .../network_ip_unicast_test.go | 0 .../network_ip_unspecified.go | 0 .../network_ip_unspecified_test.go | 0 .../network_ip_valid.go | 0 .../network_ip_valid_test.go | 0 {v1/condition => condition}/number.go | 0 .../number_bitwise_and.go | 0 .../number_bitwise_and_test.go | 0 .../number_bitwise_not.go | 0 .../number_bitwise_or.go | 0 .../number_bitwise_or_test.go | 0 .../number_bitwise_xor.go | 0 .../number_bitwise_xor_test.go | 0 .../number_equal_to.go | 0 .../number_equal_to_test.go | 0 .../number_greater_than.go | 0 .../number_greater_than_test.go | 0 .../number_length_equal_to.go | 0 .../number_length_equal_to_test.go | 0 .../number_length_greater_than.go | 0 .../number_length_greater_than_test.go | 0 .../number_length_less_than.go | 0 .../number_length_less_than_test.go | 0 .../number_less_than.go | 0 .../number_less_than_test.go | 0 {v2/condition => condition}/string.go | 0 .../string_contains.go | 0 .../string_contains_test.go | 0 .../string_ends_with.go | 0 .../string_ends_with_test.go | 0 .../string_equal_to.go | 0 .../string_equal_to_test.go | 0 .../string_greater_than.go | 0 .../string_greater_than_test.go | 0 .../string_less_than.go | 0 .../string_less_than_test.go | 0 {v2/condition => condition}/string_match.go | 0 .../string_match_test.go | 0 .../string_starts_with.go | 0 .../string_starts_with_test.go | 0 {v2/condition => condition}/utility_random.go | 0 {v1/config => config}/config.go | 0 .../condition/meta/config.jsonnet | 0 .../condition/meta/stdout.txt | 0 .../condition/number/config.jsonnet | 0 .../condition/number/stdout.txt | 0 .../condition/string/config.jsonnet | 0 .../condition/string/stdout.txt | 0 {v2/examples => examples}/main.go | 0 .../transform/aggregate/sample/config.jsonnet | 0 .../transform/aggregate/sample/stdout.txt | 0 .../aggregate/summarize/config.jsonnet | 0 .../transform/aggregate/summarize/stdout.txt | 0 .../transform/array/extend/config.jsonnet | 0 .../transform/array/extend/stdout.txt | 0 .../transform/array/flatten/config.jsonnet | 0 .../transform/array/flatten/stdout.txt | 0 .../array/flatten_deep/config.jsonnet | 0 .../transform/array/flatten_deep/stdout.txt | 0 .../transform/array/group/config.jsonnet | 0 .../transform/array/group/stdout.txt | 0 .../enrich/http_secret/config.jsonnet | 0 .../enrich/kvstore_csv/config.jsonnet | 0 .../transform/enrich/kvstore_csv/kv.csv | 0 .../transform/enrich/kvstore_csv/stdout.txt | 0 .../enrich/kvstore_json/config.jsonnet | 0 .../transform/enrich/kvstore_json/stdout.txt | 0 .../enrich/kvstore_set_add/config.jsonnet | 0 .../enrich/kvstore_set_add/stdout.txt | 0 .../transform/enrich/mmdb/config.jsonnet | 0 .../transform/enrich/mmdb/stdout.txt | 0 .../transform/enrich/urlscan/config.jsonnet | 0 .../transform/format/zip/config.jsonnet | 0 .../transform/format/zip/data.csv | 0 .../transform/format/zip/stdout.txt | 0 .../meta/crash_program/config.jsonnet | 0 .../transform/meta/crash_program/stdout.txt | 0 .../meta/each_in_array/config.jsonnet | 0 .../transform/meta/each_in_array/stdout.txt | 0 .../meta/exactly_once_consumer/config.jsonnet | 0 .../meta/exactly_once_consumer/stdout.txt | 0 .../meta/exactly_once_producer/config.jsonnet | 0 .../meta/exactly_once_producer/stdout.txt | 0 .../meta/exactly_once_system/config.jsonnet | 0 .../meta/exactly_once_system/stdout.txt | 0 .../meta/execution_time/config.jsonnet | 0 .../transform/meta/execution_time/stdout.txt | 0 .../meta/retry_with_backoff/config.jsonnet | 0 .../meta/retry_with_backoff/stdout.txt | 0 .../transform/number/clamp/config.jsonnet | 0 .../transform/number/clamp/data.txt | 0 .../transform/number/clamp/stdout.txt | 0 .../transform/number/max/config.jsonnet | 0 .../transform/number/max/data.txt | 0 .../transform/number/max/stdout.txt | 0 .../transform/number/min/config.jsonnet | 0 .../transform/number/min/data.txt | 0 .../transform/number/min/stdout.txt | 0 .../send/aux_transforms/config.jsonnet | 0 .../transform/send/aux_transforms/stdout.txt | 0 .../send/aws_s3_glacier/config.jsonnet | 0 .../transform/send/batch/config.jsonnet | 0 .../transform/send/batch/stdout.txt | 0 .../transform/send/datadog/config.jsonnet | 0 .../transform/send/splunk/config.jsonnet | 0 .../transform/send/sumologic/config.jsonnet | 0 .../time/str_conversion/config.jsonnet | 0 .../transform/time/str_conversion/stdout.txt | 0 .../utility/generate_ctrl/config.jsonnet | 0 .../utility/generate_ctrl/stdout.txt | 0 .../utility/message_bytes/config.jsonnet | 0 .../utility/message_bytes/stdout.txt | 0 .../utility/message_count/config.jsonnet | 0 .../utility/message_count/stdout.txt | 0 .../utility/message_freshness/config.jsonnet | 0 .../utility/message_freshness/stdout.txt | 0 v2/go.mod => go.mod | 0 v2/go.sum => go.sum | 0 {v1/internal => internal}/README.md | 0 .../aggregate/aggregate.go | 0 {v1/internal => internal}/aws/README.md | 0 .../aws/appconfig/appconfig.go | 0 .../aws/cloudwatch/cloudwatch.go | 0 {v2/internal => internal}/aws/config.go | 0 {v1/internal => internal}/aws/config_v2.go | 0 .../aws/dynamodb/dynamodb.go | 0 .../aws/dynamodb/dynamodb_test.go | 0 .../aws/firehose/firehose.go | 0 .../aws/firehose/firehose_test.go | 0 .../aws/kinesis/kinesis.go | 0 .../aws/kinesis/kinesis_test.go | 0 .../aws/lambda/lambda.go | 0 .../aws/lambda/lambda_test.go | 0 .../aws/s3manager/s3manager.go | 0 .../aws/s3manager/s3manager_test.go | 0 .../aws/secretsmanager/secretsmanager.go | 0 .../aws/secretsmanager/secretsmanager_test.go | 0 {v2/internal => internal}/aws/sns/sns.go | 0 {v1/internal => internal}/aws/sns/sns_test.go | 0 {v2/internal => internal}/aws/sqs/sqs.go | 0 {v1/internal => internal}/aws/sqs/sqs_test.go | 0 {v1/internal => internal}/base64/base64.go | 0 .../base64/base64_test.go | 0 {v2/internal => internal}/bufio/bufio.go | 0 {v1/internal => internal}/bufio/bufio_test.go | 0 .../bufio/example_test.go | 0 {v1/internal => internal}/channel/channel.go | 0 {v2/internal => internal}/config/config.go | 0 {v1/internal => internal}/errors/errors.go | 0 .../file/example_test.go | 0 {v2/internal => internal}/file/file.go | 0 {v1/internal => internal}/http/README.md | 0 {v1/internal => internal}/http/http.go | 0 {v1/internal => internal}/http/http_test.go | 0 {v2/internal => internal}/kv/aws_dynamodb.go | 0 {v2/internal => internal}/kv/csv_file.go | 0 {v2/internal => internal}/kv/example_test.go | 0 {v2/internal => internal}/kv/json_file.go | 0 {v2/internal => internal}/kv/kv.go | 0 {v2/internal => internal}/kv/memory.go | 0 {v2/internal => internal}/kv/mmdb.go | 0 {v2/internal => internal}/kv/text_file.go | 0 {v1/internal => internal}/log/log.go | 0 .../media/example_test.go | 0 {v1/internal => internal}/media/media.go | 0 {v1/internal => internal}/media/media_test.go | 0 {v2/internal => internal}/metrics/README.md | 0 .../aws_cloudwatch_embedded_metrics.go | 0 {v2/internal => internal}/metrics/metrics.go | 0 .../secrets/aws_secrets_manager.go | 0 .../secrets/environment_variable.go | 0 {v2/internal => internal}/secrets/secrets.go | 0 .../secrets/secrets_test.go | 0 {v2/message => message}/message.go | 0 {v1/message => message}/message_test.go | 0 v2/substation.go => substation.go | 0 ...bstation.libsonnet => substation.libsonnet | 0 v2/substation_test.go => substation_test.go | 0 ...on_test.jsonnet => substation_test.jsonnet | 0 {v1/transform => transform}/README.md | 0 {v2/transform => transform}/aggregate.go | 0 .../aggregate_from_array.go | 0 .../aggregate_from_array_test.go | 0 .../aggregate_from_string.go | 0 .../aggregate_from_string_test.go | 0 .../aggregate_to_array.go | 0 .../aggregate_to_array_test.go | 0 .../aggregate_to_string.go | 0 .../aggregate_to_string_test.go | 0 {v2/transform => transform}/array_join.go | 0 .../array_join_test.go | 0 {v2/transform => transform}/array_zip.go | 0 {v2/transform => transform}/array_zip_test.go | 0 {v2/transform => transform}/enrich.go | 0 .../enrich_aws_dynamodb_query.go | 0 .../enrich_aws_dynamodb_query_test.go | 0 .../enrich_aws_lambda.go | 0 .../enrich_aws_lambda_test.go | 0 .../enrich_dns_domain_lookup.go | 0 .../enrich_dns_ip_lookup.go | 0 .../enrich_dns_txt_lookup.go | 0 .../enrich_http_get.go | 0 .../enrich_http_post.go | 0 .../enrich_kv_store_item_get.go | 0 .../enrich_kv_store_item_set.go | 0 .../enrich_kv_store_set_add.go | 0 {v2/transform => transform}/format.go | 0 .../format_from_base64.go | 0 .../format_from_base64_test.go | 0 .../format_from_gzip.go | 0 .../format_from_gzip_test.go | 0 .../format_from_pretty_print.go | 0 .../format_from_pretty_print_test.go | 0 .../format_from_zip.go | 0 .../format_from_zip_test.go | 0 .../format_to_base64.go | 0 .../format_to_base64_test.go | 0 {v2/transform => transform}/format_to_gzip.go | 0 .../format_to_gzip_test.go | 0 {v2/transform => transform}/hash.go | 0 {v2/transform => transform}/hash_md5.go | 0 {v2/transform => transform}/hash_md5_test.go | 0 {v2/transform => transform}/hash_sha256.go | 0 .../hash_sha256_test.go | 0 {v2/transform => transform}/meta_err.go | 0 {v2/transform => transform}/meta_err_test.go | 0 {v2/transform => transform}/meta_for_each.go | 0 .../meta_for_each_test.go | 0 .../meta_kv_store_lock.go | 0 .../meta_metric_duration.go | 0 {v2/transform => transform}/meta_retry.go | 0 {v2/transform => transform}/meta_switch.go | 0 .../meta_switch_test.go | 0 {v2/transform => transform}/network.go | 0 .../network_domain_registered_domain.go | 0 .../network_domain_registered_domain_test.go | 0 .../network_domain_subdomain.go | 0 .../network_domain_subdomain_test.go | 0 .../network_domain_top_level_domain.go | 0 .../network_domain_top_level_domain_test.go | 0 {v2/transform => transform}/number.go | 0 .../number_math_addition.go | 0 .../number_math_addition_test.go | 0 .../number_math_division.go | 0 .../number_math_division_test.go | 0 .../number_math_multiplication.go | 0 .../number_math_multiplication_test.go | 0 .../number_math_subtraction.go | 0 .../number_math_subtraction_test.go | 0 {v2/transform => transform}/number_maximum.go | 0 .../number_maximum_test.go | 0 {v2/transform => transform}/number_minimum.go | 0 .../number_minimum_test.go | 0 {v2/transform => transform}/object_copy.go | 0 .../object_copy_test.go | 0 {v2/transform => transform}/object_delete.go | 0 .../object_delete_test.go | 0 {v2/transform => transform}/object_insert.go | 0 .../object_insert_test.go | 0 {v2/transform => transform}/object_jq.go | 0 {v2/transform => transform}/object_jq_test.go | 0 .../object_to_boolean.go | 0 .../object_to_boolean_test.go | 0 .../object_to_float.go | 0 .../object_to_float_test.go | 0 .../object_to_integer.go | 0 .../object_to_integer_test.go | 0 .../object_to_string.go | 0 .../object_to_string_test.go | 0 .../object_to_unsigned_integer.go | 0 .../object_to_unsigned_integer_test.go | 0 {v2/transform => transform}/send.go | 0 .../send_aws_dynamodb_put.go | 0 .../send_aws_eventbridge.go | 0 .../send_aws_kinesis_data_firehose.go | 0 .../send_aws_kinesis_data_stream.go | 0 .../send_aws_lambda.go | 0 {v2/transform => transform}/send_aws_s3.go | 0 {v2/transform => transform}/send_aws_sns.go | 0 {v2/transform => transform}/send_aws_sqs.go | 0 {v2/transform => transform}/send_file.go | 0 {v2/transform => transform}/send_http_post.go | 0 {v2/transform => transform}/send_stdout.go | 0 {v2/transform => transform}/string.go | 0 {v2/transform => transform}/string_append.go | 0 .../string_append_test.go | 0 {v2/transform => transform}/string_capture.go | 0 .../string_capture_test.go | 0 {v2/transform => transform}/string_replace.go | 0 .../string_replace_test.go | 0 {v2/transform => transform}/string_split.go | 0 .../string_split_test.go | 0 .../string_to_lower.go | 0 .../string_to_lower_test.go | 0 .../string_to_snake.go | 0 .../string_to_snake_test.go | 0 .../string_to_upper.go | 0 .../string_to_upper_test.go | 0 {v2/transform => transform}/string_uuid.go | 0 {v2/transform => transform}/time.go | 0 .../time_from_string.go | 0 .../time_from_string_test.go | 0 {v2/transform => transform}/time_from_unix.go | 0 .../time_from_unix_milli.go | 0 .../time_from_unix_milli_test.go | 0 .../time_from_unix_test.go | 0 {v2/transform => transform}/time_now.go | 0 {v2/transform => transform}/time_to_string.go | 0 .../time_to_string_test.go | 0 {v2/transform => transform}/time_to_unix.go | 0 .../time_to_unix_milli.go | 0 .../time_to_unix_milli_test.go | 0 .../time_to_unix_test.go | 0 {v2/transform => transform}/transform.go | 0 .../transform_example_test.go | 0 {v2/transform => transform}/transform_test.go | 0 .../utility_control.go | 0 {v2/transform => transform}/utility_delay.go | 0 {v2/transform => transform}/utility_drop.go | 0 {v2/transform => transform}/utility_err.go | 0 .../utility_metric_bytes.go | 0 .../utility_metric_count.go | 0 .../utility_metric_freshness.go | 0 {v2/transform => transform}/utility_secret.go | 0 v1/cmd/aws/lambda/autoscale/main.go | 183 -- v1/cmd/aws/lambda/substation/api_gateway.go | 89 - v1/cmd/aws/lambda/substation/dynamodb.go | 230 --- .../aws/lambda/substation/kinesis_firehose.go | 71 - .../aws/lambda/substation/kinesis_stream.go | 128 -- v1/cmd/aws/lambda/substation/lambda.go | 64 - v1/cmd/aws/lambda/substation/main.go | 94 -- v1/cmd/aws/lambda/substation/s3.go | 362 ---- v1/cmd/aws/lambda/substation/sns.go | 116 -- v1/cmd/aws/lambda/substation/sqs.go | 115 -- v1/cmd/aws/lambda/validate/main.go | 45 - v1/cmd/aws/lambda/validate/main_test.go | 81 - .../development/benchmark/substation/main.go | 205 --- .../kinesis-tap/substation/config.jsonnet | 7 - .../kinesis-tap/substation/main.go | 264 --- v1/condition/condition.go | 234 --- v1/condition/condition_example_test.go | 58 - v1/condition/condition_test.go | 505 ------ v1/condition/format_json.go | 46 - v1/condition/format_json_test.go | 89 - v1/condition/format_mime.go | 71 - v1/condition/format_mime_test.go | 123 -- v1/condition/meta_condition.go | 79 - v1/condition/meta_condition_test.go | 109 -- v1/condition/meta_err.go | 107 -- v1/condition/meta_err_test.go | 139 -- v1/condition/meta_for_each.go | 141 -- v1/condition/meta_for_each_test.go | 181 -- v1/condition/meta_negate.go | 77 - v1/condition/meta_negate_test.go | 99 -- v1/condition/network_ip_global_unicast.go | 50 - .../network_ip_global_unicast_test.go | 71 - .../network_ip_link_local_multicast.go | 50 - .../network_ip_link_local_multicast_test.go | 71 - v1/condition/network_ip_link_local_unicast.go | 50 - .../network_ip_link_local_unicast_test.go | 71 - v1/condition/network_ip_loopback.go | 50 - v1/condition/network_ip_loopback_test.go | 77 - v1/condition/network_ip_multicast.go | 50 - v1/condition/network_ip_multicast_test.go | 71 - v1/condition/network_ip_private.go | 50 - v1/condition/network_ip_private_test.go | 83 - v1/condition/network_ip_unicast.go | 50 - v1/condition/network_ip_unicast_test.go | 71 - v1/condition/network_ip_unspecified.go | 50 - v1/condition/network_ip_unspecified_test.go | 71 - v1/condition/network_ip_valid.go | 50 - v1/condition/network_ip_valid_test.go | 71 - v1/condition/number_bitwise_and.go | 44 - v1/condition/number_bitwise_and_test.go | 87 - v1/condition/number_bitwise_not.go | 44 - v1/condition/number_bitwise_or.go | 44 - v1/condition/number_bitwise_or_test.go | 77 - v1/condition/number_bitwise_xor.go | 44 - v1/condition/number_bitwise_xor_test.go | 77 - v1/condition/number_equal_to.go | 59 - v1/condition/number_equal_to_test.go | 185 -- v1/condition/number_greater_than.go | 62 - v1/condition/number_greater_than_test.go | 185 -- v1/condition/number_length_equal_to.go | 55 - v1/condition/number_length_equal_to_test.go | 113 -- v1/condition/number_length_greater_than.go | 55 - .../number_length_greater_than_test.go | 113 -- v1/condition/number_length_less_than.go | 55 - v1/condition/number_length_less_than_test.go | 113 -- v1/condition/number_less_than.go | 60 - v1/condition/number_less_than_test.go | 185 -- v1/condition/string.go | 16 - v1/condition/string_contains.go | 48 - v1/condition/string_contains_test.go | 86 - v1/condition/string_ends_with.go | 48 - v1/condition/string_ends_with_test.go | 99 -- v1/condition/string_equal_to.go | 56 - v1/condition/string_equal_to_test.go | 123 -- v1/condition/string_greater_than.go | 56 - v1/condition/string_greater_than_test.go | 113 -- v1/condition/string_less_than.go | 55 - v1/condition/string_less_than_test.go | 113 -- v1/condition/string_match.go | 79 - v1/condition/string_match_test.go | 85 - v1/condition/string_starts_with.go | 48 - v1/condition/string_starts_with_test.go | 99 -- v1/condition/utility_random.go | 51 - v1/examples/Makefile | 133 -- v1/examples/README.md | 15 - .../cmd/client/file/substation/config.jsonnet | 8 - .../cmd/client/file/substation/data.json | 1 - .../client/file/substation/event.libsonnet | 16 - .../cmd/client/file/substation/main.go | 195 --- .../cmd/client/file/substation/send.libsonnet | 7 - .../cmd/development/benchmark/config.jsonnet | 6 - .../cmd/development/benchmark/data_large.json | 1 - .../cmd/development/benchmark/data_small.json | 1 - .../cmd/development/benchmark/event.libsonnet | 7 - .../condition/meta/if_all_else/config.jsonnet | 33 - .../condition/meta/if_all_else/data.json | 1 - .../config/condition/number/config.jsonnet | 27 - v1/examples/config/condition/number/data.json | 1 - .../config/condition/string/config.jsonnet | 26 - v1/examples/config/condition/string/data.json | 1 - v1/examples/config/config.jsonnet | 28 - v1/examples/config/data.json | 1 - .../transform/aggregate/sample/config.jsonnet | 31 - .../transform/aggregate/sample/data.jsonl | 13 - .../aggregate/summarize/config.jsonnet | 24 - .../transform/aggregate/summarize/data.jsonl | 19 - .../aggregate/summarize/stdout.jsonl | 5 - .../transform/array/extend/config.jsonnet | 13 - .../config/transform/array/extend/data.json | 1 - .../config/transform/array/extend/stdout.json | 1 - .../transform/array/flatten/config.jsonnet | 11 - .../config/transform/array/flatten/data.json | 1 - .../transform/array/flatten/stdout.json | 1 - .../array/flatten_deep/config.jsonnet | 12 - .../transform/array/flatten_deep/data.json | 1 - .../transform/array/flatten_deep/stdout.json | 1 - .../transform/array/group/config.jsonnet | 30 - .../config/transform/array/group/data.json | 1 - .../enrich/http_secret/config.jsonnet | 26 - .../enrich/kvstore_csv/config.jsonnet | 22 - .../transform/enrich/kvstore_csv/data.jsonl | 1 - .../enrich/kvstore_json/config.jsonnet | 17 - .../transform/enrich/kvstore_json/data.jsonl | 1 - .../transform/enrich/kvstore_json/kv.json | 1 - .../enrich/kvstore_set_add/config.jsonnet | 28 - .../enrich/kvstore_set_add/data.jsonl | 6 - .../transform/enrich/mmdb/config.jsonnet | 13 - .../config/transform/enrich/mmdb/data.jsonl | 3 - .../config/transform/enrich/mmdb/stdout.jsonl | 3 - .../transform/enrich/urlscan/config.jsonnet | 49 - .../transform/enrich/urlscan/data.jsonl | 1 - .../config/transform/format/zip/data.jsonl | 3 - .../meta/crash_program/config.jsonnet | 32 - .../transform/meta/crash_program/data.json | 1 - .../meta/each_in_array/config.jsonnet | 20 - .../transform/meta/each_in_array/data.json | 1 - .../transform/meta/each_in_array/stdout.json | 1 - .../meta/exactly_once_consumer/config.jsonnet | 33 - .../meta/exactly_once_consumer/data.jsonl | 8 - .../meta/exactly_once_producer/config.jsonnet | 23 - .../meta/exactly_once_producer/data.jsonl | 8 - .../meta/exactly_once_system/config.jsonnet | 26 - .../meta/exactly_once_system/data.jsonl | 8 - .../meta/execution_time/config.jsonnet | 44 - .../meta/retry_with_backoff/config.jsonnet | 26 - .../meta/retry_with_backoff/data.json | 1 - .../transform/number/clamp/config.jsonnet | 11 - .../transform/number/max/config.jsonnet | 11 - .../transform/number/min/config.jsonnet | 11 - .../send/aux_transforms/config.jsonnet | 39 - .../transform/send/aux_transforms/data.jsonl | 13 - .../send/aws_retryable_errors/config.jsonnet | 25 - .../send/aws_s3_glacier/config.jsonnet | 23 - .../transform/send/batch/config.jsonnet | 19 - .../config/transform/send/batch/data.jsonl | 13 - .../transform/send/datadog/config.jsonnet | 42 - .../config/transform/send/datadog/data.jsonl | 13 - .../transform/send/splunk/config.jsonnet | 31 - .../config/transform/send/splunk/data.jsonl | 13 - .../transform/send/sumologic/config.jsonnet | 23 - .../transform/send/sumologic/data.jsonl | 13 - .../time/string_conversion/config.jsonnet | 16 - .../time/string_conversion/data.json | 1 - .../utility/generate_ctrl/config.jsonnet | 13 - .../utility/generate_ctrl/data.jsonl | 13 - .../utility/message_bytes/config.jsonnet | 19 - .../utility/message_bytes/data.jsonl | 13 - .../utility/message_count/config.jsonnet | 18 - .../utility/message_count/data.jsonl | 13 - v1/examples/terraform/aws/README.md | 576 ------- .../config/consumer/config.jsonnet | 11 - .../terraform/_provider.tf | 8 - .../terraform/_resources.tf | 102 -- .../terraform/autoscaler.tf | 40 - .../terraform/consumer.tf | 31 - .../to_lambda/config/consumer/config.jsonnet | 15 - .../to_lambda/terraform/_resources.tf | 21 - .../to_lambda/terraform/consumer.tf | 49 - .../dynamodb/cdc/config/node/config.jsonnet | 8 - .../aws/dynamodb/cdc/terraform/_resources.tf | 40 - .../aws/dynamodb/cdc/terraform/node.tf | 27 - .../config/node/config.jsonnet | 40 - .../distributed_lock/terraform/_resources.tf | 40 - .../distributed_lock/terraform/node.tf | 26 - .../dynamodb/telephone/config/const.libsonnet | 8 - .../config/dvc_mgmt_enrichment/config.jsonnet | 10 - .../config/edr_enrichment/config.jsonnet | 18 - .../config/edr_transform/config.jsonnet | 24 - .../config/idp_enrichment/config.jsonnet | 31 - .../dynamodb/telephone/dvc_mgmt_data.jsonl | 2 - .../aws/dynamodb/telephone/edr_data.jsonl | 2 - .../aws/dynamodb/telephone/idp_data.jsonl | 2 - .../aws/dynamodb/telephone/post_deploy.sh | 4 - .../telephone/terraform/_resources.tf | 57 - .../telephone/terraform/autoscaler.tf | 38 - .../dynamodb/telephone/terraform/dvc_mgmt.tf | 45 - .../aws/dynamodb/telephone/terraform/edr.tf | 78 - .../aws/dynamodb/telephone/terraform/idp.tf | 45 - .../lambda_bus/config/consumer/config.jsonnet | 8 - .../lambda_bus/config/producer/config.jsonnet | 11 - .../lambda_bus/terraform/_resources.tf | 17 - .../lambda_bus/terraform/consumer.tf | 35 - .../lambda_bus/terraform/producer.tf | 28 - .../config/transform_node/config.jsonnet | 14 - .../data_transform/terraform/_resources.tf | 191 --- .../kinesis/autoscale/terraform/_resources.tf | 83 - .../config/publisher/config.jsonnet | 11 - .../config/subscriber/config.jsonnet | 8 - .../multistream/terraform/_resources.tf | 139 -- .../multistream/terraform/autoscaler.tf | 41 - .../multistream/terraform/publisher.tf | 32 - .../multistream/terraform/subscriber.tf | 32 - .../aws/kinesis/nxdr/config/const.libsonnet | 15 - .../nxdr/config/enrichment/config.jsonnet | 34 - .../nxdr/config/threat_enrichment.libsonnet | 45 - .../nxdr/config/transform/config.jsonnet | 13 - .../terraform/aws/kinesis/nxdr/data.jsonl | 5 - .../terraform/aws/kinesis/nxdr/post_deploy.sh | 2 - .../aws/kinesis/nxdr/terraform/_resources.tf | 77 - .../aws/kinesis/nxdr/terraform/autoscaler.tf | 33 - .../aws/kinesis/nxdr/terraform/enrichment.tf | 29 - .../aws/kinesis/nxdr/terraform/transform.tf | 29 - .../time_travel/config/const.libsonnet | 12 - .../config/enrichment/config.jsonnet | 16 - .../config/transform/config.jsonnet | 28 - .../aws/kinesis/time_travel/data.jsonl | 4 - .../aws/kinesis/time_travel/post_deploy.sh | 2 - .../time_travel/terraform/_resources.tf | 79 - .../time_travel/terraform/autoscaler.tf | 35 - .../time_travel/terraform/enrichment.tf | 29 - .../time_travel/terraform/transform.tf | 29 - .../appconfig/config/node/config.jsonnet | 10 - .../lambda/appconfig/terraform/_resources.tf | 47 - .../aws/lambda/appconfig/terraform/node.tf | 31 - .../config/microservice/config.jsonnet | 31 - .../microservice/terraform/_resources.tf | 21 - .../microservice/terraform/microservice.tf | 30 - .../vpc/config/whatismyip/config.jsonnet | 11 - .../aws/lambda/vpc/terraform/_resources.tf | 29 - .../aws/lambda/vpc/terraform/whatismyip.tf | 36 - .../s3/data_lake/config/node/config.jsonnet | 28 - .../aws/s3/data_lake/terraform/_resources.tf | 38 - .../aws/s3/data_lake/terraform/node.tf | 35 - .../config/node/config.jsonnet | 13 - .../config/retrier/config.jsonnet | 17 - .../retry_on_failure/terraform/_resources.tf | 57 - .../terraform/node_with_retrier.tf | 86 - .../aws/s3/sns/config/node/config.jsonnet | 8 - .../aws/s3/sns/terraform/_resources.tf | 159 -- .../terraform/aws/s3/sns/terraform/node.tf | 37 - .../aws/s3/xdr/config/node/config.jsonnet | 60 - .../aws/s3/xdr/config/node/const.libsonnet | 16 - v1/examples/terraform/aws/s3/xdr/data.jsonl | 4 - v1/examples/terraform/aws/s3/xdr/stdout.jsonl | 2 - .../aws/s3/xdr/terraform/_resources.tf | 50 - .../terraform/aws/s3/xdr/terraform/node.tf | 30 - .../sns/pub_sub/config/client/config.jsonnet | 11 - .../config/subscriber_x/config.jsonnet | 8 - .../config/subscriber_y/config.jsonnet | 8 - .../config/subscriber_z/config.jsonnet | 8 - .../aws/sns/pub_sub/terraform/_resources.tf | 35 - .../aws/sns/pub_sub/terraform/subscribers.tf | 121 -- .../config/frontend/config.jsonnet | 25 - .../config/microservice/config.jsonnet | 29 - .../sqs/microservice/terraform/_resources.tf | 56 - .../sqs/microservice/terraform/frontend.tf | 26 - .../microservice/terraform/microservice.tf | 31 - v1/go.mod | 56 - v1/go.sum | 139 -- v1/internal/aws/appconfig/appconfig.go | 44 - v1/internal/aws/cloudwatch/cloudwatch.go | 305 ---- v1/internal/aws/config.go | 105 -- v1/internal/aws/dynamodb/dynamodb.go | 278 --- v1/internal/aws/firehose/firehose.go | 97 -- v1/internal/aws/kinesis/kinesis.go | 344 ---- v1/internal/aws/lambda/lambda.go | 75 - v1/internal/aws/s3manager/s3manager.go | 129 -- .../aws/secretsmanager/secretsmanager.go | 61 - v1/internal/aws/sns/sns.go | 117 -- v1/internal/aws/sqs/sqs.go | 118 -- v1/internal/bufio/bufio.go | 124 -- v1/internal/bufio/example_test.go | 45 - v1/internal/config/config.go | 84 - v1/internal/file/example_test.go | 82 - v1/internal/file/file.go | 175 -- v1/internal/kv/aws_dynamodb.go | 328 ---- v1/internal/kv/csv_file.go | 224 --- v1/internal/kv/example_test.go | 99 -- v1/internal/kv/json_file.go | 146 -- v1/internal/kv/kv.go | 129 -- v1/internal/kv/memory.go | 224 --- v1/internal/kv/mmdb.go | 139 -- v1/internal/kv/text_file.go | 140 -- v1/internal/media/example_test.go | 63 - v1/internal/metrics/README.md | 5 - .../aws_cloudwatch_embedded_metrics.go | 77 - v1/internal/metrics/metrics.go | 49 - v1/internal/secrets/aws_secrets_manager.go | 99 -- v1/internal/secrets/environment_variable.go | 82 - v1/internal/secrets/secrets.go | 100 -- v1/internal/secrets/secrets_test.go | 43 - v1/message/message.go | 338 ---- v1/substation.go | 81 - v1/substation.libsonnet | 1483 ----------------- v1/substation_test.go | 202 --- v1/substation_test.jsonnet | 74 - v1/transform/aggregate.go | 61 - v1/transform/aggregate_from_array.go | 99 -- v1/transform/aggregate_from_array_test.go | 136 -- v1/transform/aggregate_from_string.go | 59 - v1/transform/aggregate_from_string_test.go | 82 - v1/transform/aggregate_to_array.go | 106 -- v1/transform/aggregate_to_array_test.go | 156 -- v1/transform/aggregate_to_string.go | 96 -- v1/transform/aggregate_to_string_test.go | 142 -- v1/transform/array_join.go | 113 -- v1/transform/array_join_test.go | 100 -- v1/transform/array_zip.go | 107 -- v1/transform/array_zip_test.go | 95 -- v1/transform/enrich.go | 62 - v1/transform/enrich_aws_dynamodb.go | 177 -- v1/transform/enrich_aws_dynamodb_test.go | 118 -- v1/transform/enrich_aws_lambda.go | 117 -- v1/transform/enrich_aws_lambda_test.go | 111 -- v1/transform/enrich_dns_domain_lookup.go | 95 -- v1/transform/enrich_dns_ip_lookup.go | 95 -- v1/transform/enrich_dns_txt_lookup.go | 95 -- v1/transform/enrich_http_get.go | 155 -- v1/transform/enrich_http_post.go | 172 -- v1/transform/enrich_kv_store_item_get.go | 130 -- v1/transform/enrich_kv_store_item_set.go | 182 -- v1/transform/enrich_kv_store_set_add.go | 180 -- v1/transform/format.go | 68 - v1/transform/format_from_base64.go | 85 - v1/transform/format_from_base64_test.go | 98 -- v1/transform/format_from_gzip.go | 52 - v1/transform/format_from_gzip_test.go | 79 - v1/transform/format_from_pretty_print.go | 89 - v1/transform/format_from_pretty_print_test.go | 105 -- v1/transform/format_from_zip.go | 86 - v1/transform/format_from_zip_test.go | 86 - v1/transform/format_to_base64.go | 69 - v1/transform/format_to_base64_test.go | 98 -- v1/transform/format_to_gzip.go | 52 - v1/transform/format_to_gzip_test.go | 81 - v1/transform/hash.go | 29 - v1/transform/hash_md5.go | 71 - v1/transform/hash_md5_test.go | 98 -- v1/transform/hash_sha256.go | 71 - v1/transform/hash_sha256_test.go | 93 -- v1/transform/meta_err.go | 134 -- v1/transform/meta_err_test.go | 190 --- v1/transform/meta_for_each.go | 149 -- v1/transform/meta_for_each_test.go | 278 --- v1/transform/meta_kv_store_lock.go | 238 --- v1/transform/meta_metric_duration.go | 125 -- v1/transform/meta_pipeline.go | 139 -- v1/transform/meta_pipeline_test.go | 116 -- v1/transform/meta_retry.go | 169 -- v1/transform/meta_switch.go | 186 --- v1/transform/meta_switch_test.go | 388 ----- v1/transform/network.go | 29 - .../network_domain_registered_domain.go | 77 - .../network_domain_registered_domain_test.go | 95 -- v1/transform/network_domain_subdomain.go | 100 -- v1/transform/network_domain_subdomain_test.go | 95 -- .../network_domain_top_level_domain.go | 71 - .../network_domain_top_level_domain_test.go | 95 -- v1/transform/number.go | 67 - v1/transform/number_math_addition.go | 93 -- v1/transform/number_math_addition_test.go | 118 -- v1/transform/number_math_division.go | 93 -- v1/transform/number_math_division_test.go | 118 -- v1/transform/number_math_multiplication.go | 93 -- .../number_math_multiplication_test.go | 120 -- v1/transform/number_math_subtraction.go | 93 -- v1/transform/number_math_subtraction_test.go | 120 -- v1/transform/number_maximum.go | 76 - v1/transform/number_maximum_test.go | 158 -- v1/transform/number_minimum.go | 76 - v1/transform/number_minimum_test.go | 158 -- v1/transform/object_copy.go | 90 - v1/transform/object_copy_test.go | 185 -- v1/transform/object_delete.go | 71 - v1/transform/object_delete_test.go | 127 -- v1/transform/object_insert.go | 78 - v1/transform/object_insert_test.go | 159 -- v1/transform/object_jq.go | 117 -- v1/transform/object_jq_test.go | 156 -- v1/transform/object_to_boolean.go | 80 - v1/transform/object_to_boolean_test.go | 161 -- v1/transform/object_to_float.go | 76 - v1/transform/object_to_float_test.go | 87 - v1/transform/object_to_integer.go | 76 - v1/transform/object_to_integer_test.go | 101 -- v1/transform/object_to_string.go | 76 - v1/transform/object_to_string_test.go | 116 -- v1/transform/object_to_unsigned_integer.go | 76 - .../object_to_unsigned_integer_test.go | 101 -- v1/transform/send.go | 41 - v1/transform/send_aws_dynamodb.go | 198 --- v1/transform/send_aws_eventbridge.go | 203 --- .../send_aws_kinesis_data_firehose.go | 173 -- v1/transform/send_aws_kinesis_data_stream.go | 220 --- v1/transform/send_aws_lambda.go | 176 -- v1/transform/send_aws_s3.go | 213 --- v1/transform/send_aws_sns.go | 173 -- v1/transform/send_aws_sqs.go | 183 -- v1/transform/send_file.go | 172 -- v1/transform/send_http_post.go | 187 --- v1/transform/send_stdout.go | 129 -- v1/transform/string.go | 45 - v1/transform/string_append.go | 102 -- v1/transform/string_append_test.go | 100 -- v1/transform/string_capture.go | 191 --- v1/transform/string_capture_test.go | 188 --- v1/transform/string_replace.go | 111 -- v1/transform/string_replace_test.go | 131 -- v1/transform/string_split.go | 112 -- v1/transform/string_split_test.go | 100 -- v1/transform/string_to_lower.go | 69 - v1/transform/string_to_lower_test.go | 95 -- v1/transform/string_to_snake.go | 68 - v1/transform/string_to_snake_test.go | 95 -- v1/transform/string_to_upper.go | 69 - v1/transform/string_to_upper_test.go | 95 -- v1/transform/string_uuid.go | 67 - v1/transform/time.go | 108 -- v1/transform/time_from_string.go | 75 - v1/transform/time_from_string_test.go | 114 -- v1/transform/time_from_unix.go | 75 - v1/transform/time_from_unix_milli.go | 75 - v1/transform/time_from_unix_milli_test.go | 100 -- v1/transform/time_from_unix_test.go | 100 -- v1/transform/time_now.go | 78 - v1/transform/time_to_string.go | 74 - v1/transform/time_to_string_test.go | 114 -- v1/transform/time_to_unix.go | 75 - v1/transform/time_to_unix_milli.go | 75 - v1/transform/time_to_unix_milli_test.go | 97 -- v1/transform/time_to_unix_test.go | 97 -- v1/transform/transform.go | 267 --- v1/transform/transform_example_test.go | 46 - v1/transform/transform_test.go | 139 -- v1/transform/utility_control.go | 86 - v1/transform/utility_delay.go | 79 - v1/transform/utility_drop.go | 53 - v1/transform/utility_err.go | 56 - v1/transform/utility_metric_bytes.go | 77 - v1/transform/utility_metric_count.go | 77 - v1/transform/utility_metric_freshness.go | 129 -- v1/transform/utility_secret.go | 77 - v2/cmd/README.md | 11 - v2/cmd/aws/lambda/README.md | 44 - .../kinesis-tap/substation/README.md | 55 - v2/condition/README.md | 3 - v2/condition/network.go | 13 - v2/condition/number.go | 62 - v2/config/config.go | 11 - v2/examples/condition/meta/data.json | 1 - v2/examples/condition/number/data.json | 1 - v2/examples/condition/string/data.json | 1 - .../transform/aggregate/sample/data.jsonl | 13 - .../transform/aggregate/summarize/data.jsonl | 19 - v2/examples/transform/array/extend/data.json | 1 - v2/examples/transform/array/flatten/data.json | 1 - .../transform/array/flatten_deep/data.json | 1 - v2/examples/transform/array/group/data.json | 1 - .../transform/enrich/http_secret/data.json | 1 - .../transform/enrich/kvstore_csv/data.jsonl | 1 - .../transform/enrich/kvstore_csv/kv.csv | 4 - .../transform/enrich/kvstore_json/data.jsonl | 1 - .../enrich/kvstore_set_add/data.jsonl | 6 - v2/examples/transform/enrich/mmdb/data.jsonl | 3 - .../transform/enrich/urlscan/data.json | 1 - .../transform/format/zip/config.jsonnet | 17 - v2/examples/transform/format/zip/data.csv | 3 - v2/examples/transform/format/zip/data.jsonl | 3 - .../transform/meta/crash_program/data.json | 1 - .../transform/meta/each_in_array/data.json | 1 - .../meta/exactly_once_consumer/data.jsonl | 8 - .../meta/exactly_once_producer/data.jsonl | 8 - .../meta/exactly_once_system/data.jsonl | 8 - .../transform/meta/execution_time/data.json | 1 - .../meta/retry_with_backoff/data.json | 1 - v2/examples/transform/number/clamp/data.txt | 3 - v2/examples/transform/number/clamp/stdout.txt | 3 - v2/examples/transform/number/max/data.txt | 4 - v2/examples/transform/number/max/stdout.txt | 4 - v2/examples/transform/number/min/data.txt | 4 - v2/examples/transform/number/min/stdout.txt | 4 - .../transform/send/aux_transforms/data.jsonl | 13 - v2/examples/transform/send/batch/data.jsonl | 13 - v2/examples/transform/send/datadog/data.jsonl | 13 - v2/examples/transform/send/splunk/data.jsonl | 13 - .../transform/send/sumologic/data.jsonl | 13 - .../transform/time/str_conversion/data.json | 1 - .../utility/generate_ctrl/data.jsonl | 13 - .../utility/message_bytes/data.jsonl | 13 - .../utility/message_count/data.jsonl | 13 - .../utility/message_freshness/config.jsonnet | 24 - .../utility/message_freshness/data.jsonl | 1 - v2/internal/README.md | 2 - v2/internal/aggregate/aggregate.go | 160 -- v2/internal/aws/README.md | 6 - v2/internal/aws/config_v2.go | 83 - v2/internal/aws/dynamodb/dynamodb_test.go | 229 --- v2/internal/aws/firehose/firehose_test.go | 105 -- v2/internal/aws/kinesis/kinesis_test.go | 130 -- v2/internal/aws/lambda/lambda_test.go | 82 - v2/internal/aws/s3manager/s3manager_test.go | 118 -- .../aws/secretsmanager/secretsmanager_test.go | 53 - v2/internal/aws/sns/sns_test.go | 104 -- v2/internal/aws/sqs/sqs_test.go | 103 -- v2/internal/base64/base64.go | 25 - v2/internal/base64/base64_test.go | 96 -- v2/internal/bufio/bufio_test.go | 31 - v2/internal/channel/channel.go | 54 - v2/internal/errors/errors.go | 12 - v2/internal/http/README.md | 5 - v2/internal/http/http.go | 88 - v2/internal/http/http_test.go | 87 - v2/internal/log/log.go | 36 - v2/internal/media/media.go | 48 - v2/internal/media/media_test.go | 106 -- v2/message/message_test.go | 343 ---- v2/transform/README.md | 3 - 897 files changed, 1 insertion(+), 38930 deletions(-) rename {v1/cmd => cmd}/README.md (100%) rename {v1/cmd => cmd}/aws/lambda/README.md (100%) rename {v2/cmd => cmd}/aws/lambda/autoscale/main.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/api_gateway.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/dynamodb.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/kinesis_firehose.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/kinesis_stream.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/lambda.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/main.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/s3.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/sns.go (100%) rename {v2/cmd => cmd}/aws/lambda/substation/sqs.go (100%) rename {v2/cmd => cmd}/aws/lambda/validate/main.go (100%) rename {v2/cmd => cmd}/aws/lambda/validate/main_test.go (100%) rename {v2/cmd => cmd}/development/benchmark/substation/main.go (100%) rename {v1/cmd => cmd}/development/kinesis-tap/substation/README.md (100%) rename {v2/cmd => cmd}/development/kinesis-tap/substation/config.jsonnet (100%) rename {v2/cmd => cmd}/development/kinesis-tap/substation/main.go (100%) rename {v1/condition => condition}/README.md (100%) rename {v2/condition => condition}/condition.go (100%) rename {v2/condition => condition}/format_json.go (100%) rename {v2/condition => condition}/format_json_test.go (100%) rename {v2/condition => condition}/format_mime.go (100%) rename {v2/condition => condition}/format_mime_test.go (100%) rename {v2/condition => condition}/meta.go (100%) rename {v2/condition => condition}/meta_all.go (100%) rename {v2/condition => condition}/meta_all_test.go (100%) rename {v2/condition => condition}/meta_any.go (100%) rename {v2/condition => condition}/meta_any_test.go (100%) rename {v2/condition => condition}/meta_none.go (100%) rename {v2/condition => condition}/meta_none_test.go (100%) rename {v1/condition => condition}/network.go (100%) rename {v2/condition => condition}/network_ip_global_unicast.go (100%) rename {v2/condition => condition}/network_ip_global_unicast_test.go (100%) rename {v2/condition => condition}/network_ip_link_local_multicast.go (100%) rename {v2/condition => condition}/network_ip_link_local_multicast_test.go (100%) rename {v2/condition => condition}/network_ip_link_local_unicast.go (100%) rename {v2/condition => condition}/network_ip_link_local_unicast_test.go (100%) rename {v2/condition => condition}/network_ip_loopback.go (100%) rename {v2/condition => condition}/network_ip_loopback_test.go (100%) rename {v2/condition => condition}/network_ip_multicast.go (100%) rename {v2/condition => condition}/network_ip_multicast_test.go (100%) rename {v2/condition => condition}/network_ip_private.go (100%) rename {v2/condition => condition}/network_ip_private_test.go (100%) rename {v2/condition => condition}/network_ip_unicast.go (100%) rename {v2/condition => condition}/network_ip_unicast_test.go (100%) rename {v2/condition => condition}/network_ip_unspecified.go (100%) rename {v2/condition => condition}/network_ip_unspecified_test.go (100%) rename {v2/condition => condition}/network_ip_valid.go (100%) rename {v2/condition => condition}/network_ip_valid_test.go (100%) rename {v1/condition => condition}/number.go (100%) rename {v2/condition => condition}/number_bitwise_and.go (100%) rename {v2/condition => condition}/number_bitwise_and_test.go (100%) rename {v2/condition => condition}/number_bitwise_not.go (100%) rename {v2/condition => condition}/number_bitwise_or.go (100%) rename {v2/condition => condition}/number_bitwise_or_test.go (100%) rename {v2/condition => condition}/number_bitwise_xor.go (100%) rename {v2/condition => condition}/number_bitwise_xor_test.go (100%) rename {v2/condition => condition}/number_equal_to.go (100%) rename {v2/condition => condition}/number_equal_to_test.go (100%) rename {v2/condition => condition}/number_greater_than.go (100%) rename {v2/condition => condition}/number_greater_than_test.go (100%) rename {v2/condition => condition}/number_length_equal_to.go (100%) rename {v2/condition => condition}/number_length_equal_to_test.go (100%) rename {v2/condition => condition}/number_length_greater_than.go (100%) rename {v2/condition => condition}/number_length_greater_than_test.go (100%) rename {v2/condition => condition}/number_length_less_than.go (100%) rename {v2/condition => condition}/number_length_less_than_test.go (100%) rename {v2/condition => condition}/number_less_than.go (100%) rename {v2/condition => condition}/number_less_than_test.go (100%) rename {v2/condition => condition}/string.go (100%) rename {v2/condition => condition}/string_contains.go (100%) rename {v2/condition => condition}/string_contains_test.go (100%) rename {v2/condition => condition}/string_ends_with.go (100%) rename {v2/condition => condition}/string_ends_with_test.go (100%) rename {v2/condition => condition}/string_equal_to.go (100%) rename {v2/condition => condition}/string_equal_to_test.go (100%) rename {v2/condition => condition}/string_greater_than.go (100%) rename {v2/condition => condition}/string_greater_than_test.go (100%) rename {v2/condition => condition}/string_less_than.go (100%) rename {v2/condition => condition}/string_less_than_test.go (100%) rename {v2/condition => condition}/string_match.go (100%) rename {v2/condition => condition}/string_match_test.go (100%) rename {v2/condition => condition}/string_starts_with.go (100%) rename {v2/condition => condition}/string_starts_with_test.go (100%) rename {v2/condition => condition}/utility_random.go (100%) rename {v1/config => config}/config.go (100%) rename {v2/examples => examples}/condition/meta/config.jsonnet (100%) rename {v2/examples => examples}/condition/meta/stdout.txt (100%) rename {v2/examples => examples}/condition/number/config.jsonnet (100%) rename {v2/examples => examples}/condition/number/stdout.txt (100%) rename {v2/examples => examples}/condition/string/config.jsonnet (100%) rename {v2/examples => examples}/condition/string/stdout.txt (100%) rename {v2/examples => examples}/main.go (100%) rename {v2/examples => examples}/transform/aggregate/sample/config.jsonnet (100%) rename {v2/examples => examples}/transform/aggregate/sample/stdout.txt (100%) rename {v2/examples => examples}/transform/aggregate/summarize/config.jsonnet (100%) rename {v2/examples => examples}/transform/aggregate/summarize/stdout.txt (100%) rename {v2/examples => examples}/transform/array/extend/config.jsonnet (100%) rename {v2/examples => examples}/transform/array/extend/stdout.txt (100%) rename {v2/examples => examples}/transform/array/flatten/config.jsonnet (100%) rename {v2/examples => examples}/transform/array/flatten/stdout.txt (100%) rename {v2/examples => examples}/transform/array/flatten_deep/config.jsonnet (100%) rename {v2/examples => examples}/transform/array/flatten_deep/stdout.txt (100%) rename {v2/examples => examples}/transform/array/group/config.jsonnet (100%) rename {v2/examples => examples}/transform/array/group/stdout.txt (100%) rename {v2/examples => examples}/transform/enrich/http_secret/config.jsonnet (100%) rename {v2/examples => examples}/transform/enrich/kvstore_csv/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/enrich/kvstore_csv/kv.csv (100%) rename {v2/examples => examples}/transform/enrich/kvstore_csv/stdout.txt (100%) rename {v2/examples => examples}/transform/enrich/kvstore_json/config.jsonnet (100%) rename {v2/examples => examples}/transform/enrich/kvstore_json/stdout.txt (100%) rename {v2/examples => examples}/transform/enrich/kvstore_set_add/config.jsonnet (100%) rename {v2/examples => examples}/transform/enrich/kvstore_set_add/stdout.txt (100%) rename {v2/examples => examples}/transform/enrich/mmdb/config.jsonnet (100%) rename {v2/examples => examples}/transform/enrich/mmdb/stdout.txt (100%) rename {v2/examples => examples}/transform/enrich/urlscan/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/format/zip/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/format/zip/data.csv (100%) rename {v2/examples => examples}/transform/format/zip/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/crash_program/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/crash_program/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/each_in_array/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/each_in_array/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/exactly_once_consumer/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/exactly_once_consumer/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/exactly_once_producer/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/exactly_once_producer/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/exactly_once_system/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/exactly_once_system/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/execution_time/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/execution_time/stdout.txt (100%) rename {v2/examples => examples}/transform/meta/retry_with_backoff/config.jsonnet (100%) rename {v2/examples => examples}/transform/meta/retry_with_backoff/stdout.txt (100%) rename {v2/examples => examples}/transform/number/clamp/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/number/clamp/data.txt (100%) rename {v1/examples/config => examples}/transform/number/clamp/stdout.txt (100%) rename {v2/examples => examples}/transform/number/max/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/number/max/data.txt (100%) rename {v1/examples/config => examples}/transform/number/max/stdout.txt (100%) rename {v2/examples => examples}/transform/number/min/config.jsonnet (100%) rename {v1/examples/config => examples}/transform/number/min/data.txt (100%) rename {v1/examples/config => examples}/transform/number/min/stdout.txt (100%) rename {v2/examples => examples}/transform/send/aux_transforms/config.jsonnet (100%) rename {v2/examples => examples}/transform/send/aux_transforms/stdout.txt (100%) rename {v2/examples => examples}/transform/send/aws_s3_glacier/config.jsonnet (100%) rename {v2/examples => examples}/transform/send/batch/config.jsonnet (100%) rename {v2/examples => examples}/transform/send/batch/stdout.txt (100%) rename {v2/examples => examples}/transform/send/datadog/config.jsonnet (100%) rename {v2/examples => examples}/transform/send/splunk/config.jsonnet (100%) rename {v2/examples => examples}/transform/send/sumologic/config.jsonnet (100%) rename {v2/examples => examples}/transform/time/str_conversion/config.jsonnet (100%) rename {v2/examples => examples}/transform/time/str_conversion/stdout.txt (100%) rename {v2/examples => examples}/transform/utility/generate_ctrl/config.jsonnet (100%) rename {v2/examples => examples}/transform/utility/generate_ctrl/stdout.txt (100%) rename {v2/examples => examples}/transform/utility/message_bytes/config.jsonnet (100%) rename {v2/examples => examples}/transform/utility/message_bytes/stdout.txt (100%) rename {v2/examples => examples}/transform/utility/message_count/config.jsonnet (100%) rename {v2/examples => examples}/transform/utility/message_count/stdout.txt (100%) rename {v1/examples/config => examples}/transform/utility/message_freshness/config.jsonnet (100%) rename {v2/examples => examples}/transform/utility/message_freshness/stdout.txt (100%) rename v2/go.mod => go.mod (100%) rename v2/go.sum => go.sum (100%) rename {v1/internal => internal}/README.md (100%) rename {v1/internal => internal}/aggregate/aggregate.go (100%) rename {v1/internal => internal}/aws/README.md (100%) rename {v2/internal => internal}/aws/appconfig/appconfig.go (100%) rename {v2/internal => internal}/aws/cloudwatch/cloudwatch.go (100%) rename {v2/internal => internal}/aws/config.go (100%) rename {v1/internal => internal}/aws/config_v2.go (100%) rename {v2/internal => internal}/aws/dynamodb/dynamodb.go (100%) rename {v1/internal => internal}/aws/dynamodb/dynamodb_test.go (100%) rename {v2/internal => internal}/aws/firehose/firehose.go (100%) rename {v1/internal => internal}/aws/firehose/firehose_test.go (100%) rename {v2/internal => internal}/aws/kinesis/kinesis.go (100%) rename {v1/internal => internal}/aws/kinesis/kinesis_test.go (100%) rename {v2/internal => internal}/aws/lambda/lambda.go (100%) rename {v1/internal => internal}/aws/lambda/lambda_test.go (100%) rename {v2/internal => internal}/aws/s3manager/s3manager.go (100%) rename {v1/internal => internal}/aws/s3manager/s3manager_test.go (100%) rename {v2/internal => internal}/aws/secretsmanager/secretsmanager.go (100%) rename {v1/internal => internal}/aws/secretsmanager/secretsmanager_test.go (100%) rename {v2/internal => internal}/aws/sns/sns.go (100%) rename {v1/internal => internal}/aws/sns/sns_test.go (100%) rename {v2/internal => internal}/aws/sqs/sqs.go (100%) rename {v1/internal => internal}/aws/sqs/sqs_test.go (100%) rename {v1/internal => internal}/base64/base64.go (100%) rename {v1/internal => internal}/base64/base64_test.go (100%) rename {v2/internal => internal}/bufio/bufio.go (100%) rename {v1/internal => internal}/bufio/bufio_test.go (100%) rename {v2/internal => internal}/bufio/example_test.go (100%) rename {v1/internal => internal}/channel/channel.go (100%) rename {v2/internal => internal}/config/config.go (100%) rename {v1/internal => internal}/errors/errors.go (100%) rename {v2/internal => internal}/file/example_test.go (100%) rename {v2/internal => internal}/file/file.go (100%) rename {v1/internal => internal}/http/README.md (100%) rename {v1/internal => internal}/http/http.go (100%) rename {v1/internal => internal}/http/http_test.go (100%) rename {v2/internal => internal}/kv/aws_dynamodb.go (100%) rename {v2/internal => internal}/kv/csv_file.go (100%) rename {v2/internal => internal}/kv/example_test.go (100%) rename {v2/internal => internal}/kv/json_file.go (100%) rename {v2/internal => internal}/kv/kv.go (100%) rename {v2/internal => internal}/kv/memory.go (100%) rename {v2/internal => internal}/kv/mmdb.go (100%) rename {v2/internal => internal}/kv/text_file.go (100%) rename {v1/internal => internal}/log/log.go (100%) rename {v2/internal => internal}/media/example_test.go (100%) rename {v1/internal => internal}/media/media.go (100%) rename {v1/internal => internal}/media/media_test.go (100%) rename {v2/internal => internal}/metrics/README.md (100%) rename {v2/internal => internal}/metrics/aws_cloudwatch_embedded_metrics.go (100%) rename {v2/internal => internal}/metrics/metrics.go (100%) rename {v2/internal => internal}/secrets/aws_secrets_manager.go (100%) rename {v2/internal => internal}/secrets/environment_variable.go (100%) rename {v2/internal => internal}/secrets/secrets.go (100%) rename {v2/internal => internal}/secrets/secrets_test.go (100%) rename {v2/message => message}/message.go (100%) rename {v1/message => message}/message_test.go (100%) rename v2/substation.go => substation.go (100%) rename v2/substation.libsonnet => substation.libsonnet (100%) rename v2/substation_test.go => substation_test.go (100%) rename v2/substation_test.jsonnet => substation_test.jsonnet (100%) rename {v1/transform => transform}/README.md (100%) rename {v2/transform => transform}/aggregate.go (100%) rename {v2/transform => transform}/aggregate_from_array.go (100%) rename {v2/transform => transform}/aggregate_from_array_test.go (100%) rename {v2/transform => transform}/aggregate_from_string.go (100%) rename {v2/transform => transform}/aggregate_from_string_test.go (100%) rename {v2/transform => transform}/aggregate_to_array.go (100%) rename {v2/transform => transform}/aggregate_to_array_test.go (100%) rename {v2/transform => transform}/aggregate_to_string.go (100%) rename {v2/transform => transform}/aggregate_to_string_test.go (100%) rename {v2/transform => transform}/array_join.go (100%) rename {v2/transform => transform}/array_join_test.go (100%) rename {v2/transform => transform}/array_zip.go (100%) rename {v2/transform => transform}/array_zip_test.go (100%) rename {v2/transform => transform}/enrich.go (100%) rename {v2/transform => transform}/enrich_aws_dynamodb_query.go (100%) rename {v2/transform => transform}/enrich_aws_dynamodb_query_test.go (100%) rename {v2/transform => transform}/enrich_aws_lambda.go (100%) rename {v2/transform => transform}/enrich_aws_lambda_test.go (100%) rename {v2/transform => transform}/enrich_dns_domain_lookup.go (100%) rename {v2/transform => transform}/enrich_dns_ip_lookup.go (100%) rename {v2/transform => transform}/enrich_dns_txt_lookup.go (100%) rename {v2/transform => transform}/enrich_http_get.go (100%) rename {v2/transform => transform}/enrich_http_post.go (100%) rename {v2/transform => transform}/enrich_kv_store_item_get.go (100%) rename {v2/transform => transform}/enrich_kv_store_item_set.go (100%) rename {v2/transform => transform}/enrich_kv_store_set_add.go (100%) rename {v2/transform => transform}/format.go (100%) rename {v2/transform => transform}/format_from_base64.go (100%) rename {v2/transform => transform}/format_from_base64_test.go (100%) rename {v2/transform => transform}/format_from_gzip.go (100%) rename {v2/transform => transform}/format_from_gzip_test.go (100%) rename {v2/transform => transform}/format_from_pretty_print.go (100%) rename {v2/transform => transform}/format_from_pretty_print_test.go (100%) rename {v2/transform => transform}/format_from_zip.go (100%) rename {v2/transform => transform}/format_from_zip_test.go (100%) rename {v2/transform => transform}/format_to_base64.go (100%) rename {v2/transform => transform}/format_to_base64_test.go (100%) rename {v2/transform => transform}/format_to_gzip.go (100%) rename {v2/transform => transform}/format_to_gzip_test.go (100%) rename {v2/transform => transform}/hash.go (100%) rename {v2/transform => transform}/hash_md5.go (100%) rename {v2/transform => transform}/hash_md5_test.go (100%) rename {v2/transform => transform}/hash_sha256.go (100%) rename {v2/transform => transform}/hash_sha256_test.go (100%) rename {v2/transform => transform}/meta_err.go (100%) rename {v2/transform => transform}/meta_err_test.go (100%) rename {v2/transform => transform}/meta_for_each.go (100%) rename {v2/transform => transform}/meta_for_each_test.go (100%) rename {v2/transform => transform}/meta_kv_store_lock.go (100%) rename {v2/transform => transform}/meta_metric_duration.go (100%) rename {v2/transform => transform}/meta_retry.go (100%) rename {v2/transform => transform}/meta_switch.go (100%) rename {v2/transform => transform}/meta_switch_test.go (100%) rename {v2/transform => transform}/network.go (100%) rename {v2/transform => transform}/network_domain_registered_domain.go (100%) rename {v2/transform => transform}/network_domain_registered_domain_test.go (100%) rename {v2/transform => transform}/network_domain_subdomain.go (100%) rename {v2/transform => transform}/network_domain_subdomain_test.go (100%) rename {v2/transform => transform}/network_domain_top_level_domain.go (100%) rename {v2/transform => transform}/network_domain_top_level_domain_test.go (100%) rename {v2/transform => transform}/number.go (100%) rename {v2/transform => transform}/number_math_addition.go (100%) rename {v2/transform => transform}/number_math_addition_test.go (100%) rename {v2/transform => transform}/number_math_division.go (100%) rename {v2/transform => transform}/number_math_division_test.go (100%) rename {v2/transform => transform}/number_math_multiplication.go (100%) rename {v2/transform => transform}/number_math_multiplication_test.go (100%) rename {v2/transform => transform}/number_math_subtraction.go (100%) rename {v2/transform => transform}/number_math_subtraction_test.go (100%) rename {v2/transform => transform}/number_maximum.go (100%) rename {v2/transform => transform}/number_maximum_test.go (100%) rename {v2/transform => transform}/number_minimum.go (100%) rename {v2/transform => transform}/number_minimum_test.go (100%) rename {v2/transform => transform}/object_copy.go (100%) rename {v2/transform => transform}/object_copy_test.go (100%) rename {v2/transform => transform}/object_delete.go (100%) rename {v2/transform => transform}/object_delete_test.go (100%) rename {v2/transform => transform}/object_insert.go (100%) rename {v2/transform => transform}/object_insert_test.go (100%) rename {v2/transform => transform}/object_jq.go (100%) rename {v2/transform => transform}/object_jq_test.go (100%) rename {v2/transform => transform}/object_to_boolean.go (100%) rename {v2/transform => transform}/object_to_boolean_test.go (100%) rename {v2/transform => transform}/object_to_float.go (100%) rename {v2/transform => transform}/object_to_float_test.go (100%) rename {v2/transform => transform}/object_to_integer.go (100%) rename {v2/transform => transform}/object_to_integer_test.go (100%) rename {v2/transform => transform}/object_to_string.go (100%) rename {v2/transform => transform}/object_to_string_test.go (100%) rename {v2/transform => transform}/object_to_unsigned_integer.go (100%) rename {v2/transform => transform}/object_to_unsigned_integer_test.go (100%) rename {v2/transform => transform}/send.go (100%) rename {v2/transform => transform}/send_aws_dynamodb_put.go (100%) rename {v2/transform => transform}/send_aws_eventbridge.go (100%) rename {v2/transform => transform}/send_aws_kinesis_data_firehose.go (100%) rename {v2/transform => transform}/send_aws_kinesis_data_stream.go (100%) rename {v2/transform => transform}/send_aws_lambda.go (100%) rename {v2/transform => transform}/send_aws_s3.go (100%) rename {v2/transform => transform}/send_aws_sns.go (100%) rename {v2/transform => transform}/send_aws_sqs.go (100%) rename {v2/transform => transform}/send_file.go (100%) rename {v2/transform => transform}/send_http_post.go (100%) rename {v2/transform => transform}/send_stdout.go (100%) rename {v2/transform => transform}/string.go (100%) rename {v2/transform => transform}/string_append.go (100%) rename {v2/transform => transform}/string_append_test.go (100%) rename {v2/transform => transform}/string_capture.go (100%) rename {v2/transform => transform}/string_capture_test.go (100%) rename {v2/transform => transform}/string_replace.go (100%) rename {v2/transform => transform}/string_replace_test.go (100%) rename {v2/transform => transform}/string_split.go (100%) rename {v2/transform => transform}/string_split_test.go (100%) rename {v2/transform => transform}/string_to_lower.go (100%) rename {v2/transform => transform}/string_to_lower_test.go (100%) rename {v2/transform => transform}/string_to_snake.go (100%) rename {v2/transform => transform}/string_to_snake_test.go (100%) rename {v2/transform => transform}/string_to_upper.go (100%) rename {v2/transform => transform}/string_to_upper_test.go (100%) rename {v2/transform => transform}/string_uuid.go (100%) rename {v2/transform => transform}/time.go (100%) rename {v2/transform => transform}/time_from_string.go (100%) rename {v2/transform => transform}/time_from_string_test.go (100%) rename {v2/transform => transform}/time_from_unix.go (100%) rename {v2/transform => transform}/time_from_unix_milli.go (100%) rename {v2/transform => transform}/time_from_unix_milli_test.go (100%) rename {v2/transform => transform}/time_from_unix_test.go (100%) rename {v2/transform => transform}/time_now.go (100%) rename {v2/transform => transform}/time_to_string.go (100%) rename {v2/transform => transform}/time_to_string_test.go (100%) rename {v2/transform => transform}/time_to_unix.go (100%) rename {v2/transform => transform}/time_to_unix_milli.go (100%) rename {v2/transform => transform}/time_to_unix_milli_test.go (100%) rename {v2/transform => transform}/time_to_unix_test.go (100%) rename {v2/transform => transform}/transform.go (100%) rename {v2/transform => transform}/transform_example_test.go (100%) rename {v2/transform => transform}/transform_test.go (100%) rename {v2/transform => transform}/utility_control.go (100%) rename {v2/transform => transform}/utility_delay.go (100%) rename {v2/transform => transform}/utility_drop.go (100%) rename {v2/transform => transform}/utility_err.go (100%) rename {v2/transform => transform}/utility_metric_bytes.go (100%) rename {v2/transform => transform}/utility_metric_count.go (100%) rename {v2/transform => transform}/utility_metric_freshness.go (100%) rename {v2/transform => transform}/utility_secret.go (100%) delete mode 100644 v1/cmd/aws/lambda/autoscale/main.go delete mode 100644 v1/cmd/aws/lambda/substation/api_gateway.go delete mode 100644 v1/cmd/aws/lambda/substation/dynamodb.go delete mode 100644 v1/cmd/aws/lambda/substation/kinesis_firehose.go delete mode 100644 v1/cmd/aws/lambda/substation/kinesis_stream.go delete mode 100644 v1/cmd/aws/lambda/substation/lambda.go delete mode 100644 v1/cmd/aws/lambda/substation/main.go delete mode 100644 v1/cmd/aws/lambda/substation/s3.go delete mode 100644 v1/cmd/aws/lambda/substation/sns.go delete mode 100644 v1/cmd/aws/lambda/substation/sqs.go delete mode 100644 v1/cmd/aws/lambda/validate/main.go delete mode 100644 v1/cmd/aws/lambda/validate/main_test.go delete mode 100644 v1/cmd/development/benchmark/substation/main.go delete mode 100644 v1/cmd/development/kinesis-tap/substation/config.jsonnet delete mode 100644 v1/cmd/development/kinesis-tap/substation/main.go delete mode 100644 v1/condition/condition.go delete mode 100644 v1/condition/condition_example_test.go delete mode 100644 v1/condition/condition_test.go delete mode 100644 v1/condition/format_json.go delete mode 100644 v1/condition/format_json_test.go delete mode 100644 v1/condition/format_mime.go delete mode 100644 v1/condition/format_mime_test.go delete mode 100644 v1/condition/meta_condition.go delete mode 100644 v1/condition/meta_condition_test.go delete mode 100644 v1/condition/meta_err.go delete mode 100644 v1/condition/meta_err_test.go delete mode 100644 v1/condition/meta_for_each.go delete mode 100644 v1/condition/meta_for_each_test.go delete mode 100644 v1/condition/meta_negate.go delete mode 100644 v1/condition/meta_negate_test.go delete mode 100644 v1/condition/network_ip_global_unicast.go delete mode 100644 v1/condition/network_ip_global_unicast_test.go delete mode 100644 v1/condition/network_ip_link_local_multicast.go delete mode 100644 v1/condition/network_ip_link_local_multicast_test.go delete mode 100644 v1/condition/network_ip_link_local_unicast.go delete mode 100644 v1/condition/network_ip_link_local_unicast_test.go delete mode 100644 v1/condition/network_ip_loopback.go delete mode 100644 v1/condition/network_ip_loopback_test.go delete mode 100644 v1/condition/network_ip_multicast.go delete mode 100644 v1/condition/network_ip_multicast_test.go delete mode 100644 v1/condition/network_ip_private.go delete mode 100644 v1/condition/network_ip_private_test.go delete mode 100644 v1/condition/network_ip_unicast.go delete mode 100644 v1/condition/network_ip_unicast_test.go delete mode 100644 v1/condition/network_ip_unspecified.go delete mode 100644 v1/condition/network_ip_unspecified_test.go delete mode 100644 v1/condition/network_ip_valid.go delete mode 100644 v1/condition/network_ip_valid_test.go delete mode 100644 v1/condition/number_bitwise_and.go delete mode 100644 v1/condition/number_bitwise_and_test.go delete mode 100644 v1/condition/number_bitwise_not.go delete mode 100644 v1/condition/number_bitwise_or.go delete mode 100644 v1/condition/number_bitwise_or_test.go delete mode 100644 v1/condition/number_bitwise_xor.go delete mode 100644 v1/condition/number_bitwise_xor_test.go delete mode 100644 v1/condition/number_equal_to.go delete mode 100644 v1/condition/number_equal_to_test.go delete mode 100644 v1/condition/number_greater_than.go delete mode 100644 v1/condition/number_greater_than_test.go delete mode 100644 v1/condition/number_length_equal_to.go delete mode 100644 v1/condition/number_length_equal_to_test.go delete mode 100644 v1/condition/number_length_greater_than.go delete mode 100644 v1/condition/number_length_greater_than_test.go delete mode 100644 v1/condition/number_length_less_than.go delete mode 100644 v1/condition/number_length_less_than_test.go delete mode 100644 v1/condition/number_less_than.go delete mode 100644 v1/condition/number_less_than_test.go delete mode 100644 v1/condition/string.go delete mode 100644 v1/condition/string_contains.go delete mode 100644 v1/condition/string_contains_test.go delete mode 100644 v1/condition/string_ends_with.go delete mode 100644 v1/condition/string_ends_with_test.go delete mode 100644 v1/condition/string_equal_to.go delete mode 100644 v1/condition/string_equal_to_test.go delete mode 100644 v1/condition/string_greater_than.go delete mode 100644 v1/condition/string_greater_than_test.go delete mode 100644 v1/condition/string_less_than.go delete mode 100644 v1/condition/string_less_than_test.go delete mode 100644 v1/condition/string_match.go delete mode 100644 v1/condition/string_match_test.go delete mode 100644 v1/condition/string_starts_with.go delete mode 100644 v1/condition/string_starts_with_test.go delete mode 100644 v1/condition/utility_random.go delete mode 100644 v1/examples/Makefile delete mode 100644 v1/examples/README.md delete mode 100644 v1/examples/cmd/client/file/substation/config.jsonnet delete mode 100644 v1/examples/cmd/client/file/substation/data.json delete mode 100644 v1/examples/cmd/client/file/substation/event.libsonnet delete mode 100644 v1/examples/cmd/client/file/substation/main.go delete mode 100644 v1/examples/cmd/client/file/substation/send.libsonnet delete mode 100644 v1/examples/cmd/development/benchmark/config.jsonnet delete mode 100644 v1/examples/cmd/development/benchmark/data_large.json delete mode 100644 v1/examples/cmd/development/benchmark/data_small.json delete mode 100644 v1/examples/cmd/development/benchmark/event.libsonnet delete mode 100644 v1/examples/config/condition/meta/if_all_else/config.jsonnet delete mode 100644 v1/examples/config/condition/meta/if_all_else/data.json delete mode 100644 v1/examples/config/condition/number/config.jsonnet delete mode 100644 v1/examples/config/condition/number/data.json delete mode 100644 v1/examples/config/condition/string/config.jsonnet delete mode 100644 v1/examples/config/condition/string/data.json delete mode 100644 v1/examples/config/config.jsonnet delete mode 100644 v1/examples/config/data.json delete mode 100644 v1/examples/config/transform/aggregate/sample/config.jsonnet delete mode 100644 v1/examples/config/transform/aggregate/sample/data.jsonl delete mode 100644 v1/examples/config/transform/aggregate/summarize/config.jsonnet delete mode 100644 v1/examples/config/transform/aggregate/summarize/data.jsonl delete mode 100644 v1/examples/config/transform/aggregate/summarize/stdout.jsonl delete mode 100644 v1/examples/config/transform/array/extend/config.jsonnet delete mode 100644 v1/examples/config/transform/array/extend/data.json delete mode 100644 v1/examples/config/transform/array/extend/stdout.json delete mode 100644 v1/examples/config/transform/array/flatten/config.jsonnet delete mode 100644 v1/examples/config/transform/array/flatten/data.json delete mode 100644 v1/examples/config/transform/array/flatten/stdout.json delete mode 100644 v1/examples/config/transform/array/flatten_deep/config.jsonnet delete mode 100644 v1/examples/config/transform/array/flatten_deep/data.json delete mode 100644 v1/examples/config/transform/array/flatten_deep/stdout.json delete mode 100644 v1/examples/config/transform/array/group/config.jsonnet delete mode 100644 v1/examples/config/transform/array/group/data.json delete mode 100644 v1/examples/config/transform/enrich/http_secret/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/kvstore_csv/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/kvstore_csv/data.jsonl delete mode 100644 v1/examples/config/transform/enrich/kvstore_json/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/kvstore_json/data.jsonl delete mode 100644 v1/examples/config/transform/enrich/kvstore_json/kv.json delete mode 100644 v1/examples/config/transform/enrich/kvstore_set_add/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/kvstore_set_add/data.jsonl delete mode 100644 v1/examples/config/transform/enrich/mmdb/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/mmdb/data.jsonl delete mode 100644 v1/examples/config/transform/enrich/mmdb/stdout.jsonl delete mode 100644 v1/examples/config/transform/enrich/urlscan/config.jsonnet delete mode 100644 v1/examples/config/transform/enrich/urlscan/data.jsonl delete mode 100644 v1/examples/config/transform/format/zip/data.jsonl delete mode 100644 v1/examples/config/transform/meta/crash_program/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/crash_program/data.json delete mode 100644 v1/examples/config/transform/meta/each_in_array/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/each_in_array/data.json delete mode 100644 v1/examples/config/transform/meta/each_in_array/stdout.json delete mode 100644 v1/examples/config/transform/meta/exactly_once_consumer/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/exactly_once_consumer/data.jsonl delete mode 100644 v1/examples/config/transform/meta/exactly_once_producer/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/exactly_once_producer/data.jsonl delete mode 100644 v1/examples/config/transform/meta/exactly_once_system/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/exactly_once_system/data.jsonl delete mode 100644 v1/examples/config/transform/meta/execution_time/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/retry_with_backoff/config.jsonnet delete mode 100644 v1/examples/config/transform/meta/retry_with_backoff/data.json delete mode 100644 v1/examples/config/transform/number/clamp/config.jsonnet delete mode 100644 v1/examples/config/transform/number/max/config.jsonnet delete mode 100644 v1/examples/config/transform/number/min/config.jsonnet delete mode 100644 v1/examples/config/transform/send/aux_transforms/config.jsonnet delete mode 100644 v1/examples/config/transform/send/aux_transforms/data.jsonl delete mode 100644 v1/examples/config/transform/send/aws_retryable_errors/config.jsonnet delete mode 100644 v1/examples/config/transform/send/aws_s3_glacier/config.jsonnet delete mode 100644 v1/examples/config/transform/send/batch/config.jsonnet delete mode 100644 v1/examples/config/transform/send/batch/data.jsonl delete mode 100644 v1/examples/config/transform/send/datadog/config.jsonnet delete mode 100644 v1/examples/config/transform/send/datadog/data.jsonl delete mode 100644 v1/examples/config/transform/send/splunk/config.jsonnet delete mode 100644 v1/examples/config/transform/send/splunk/data.jsonl delete mode 100644 v1/examples/config/transform/send/sumologic/config.jsonnet delete mode 100644 v1/examples/config/transform/send/sumologic/data.jsonl delete mode 100644 v1/examples/config/transform/time/string_conversion/config.jsonnet delete mode 100644 v1/examples/config/transform/time/string_conversion/data.json delete mode 100644 v1/examples/config/transform/utility/generate_ctrl/config.jsonnet delete mode 100644 v1/examples/config/transform/utility/generate_ctrl/data.jsonl delete mode 100644 v1/examples/config/transform/utility/message_bytes/config.jsonnet delete mode 100644 v1/examples/config/transform/utility/message_bytes/data.jsonl delete mode 100644 v1/examples/config/transform/utility/message_count/config.jsonnet delete mode 100644 v1/examples/config/transform/utility/message_count/data.jsonl delete mode 100644 v1/examples/terraform/aws/README.md delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/cdc/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/post_deploy.sh delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf delete mode 100644 v1/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf delete mode 100644 v1/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet delete mode 100644 v1/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet delete mode 100644 v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf delete mode 100644 v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf delete mode 100644 v1/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf delete mode 100644 v1/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/data.jsonl delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/post_deploy.sh delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf delete mode 100644 v1/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/data.jsonl delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/post_deploy.sh delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf delete mode 100644 v1/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf delete mode 100644 v1/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/lambda/appconfig/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet delete mode 100644 v1/examples/terraform/aws/lambda/microservice/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/lambda/microservice/terraform/microservice.tf delete mode 100644 v1/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet delete mode 100644 v1/examples/terraform/aws/lambda/vpc/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf delete mode 100644 v1/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/s3/data_lake/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/s3/data_lake/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet delete mode 100644 v1/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf delete mode 100644 v1/examples/terraform/aws/s3/sns/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/s3/sns/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/s3/sns/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/s3/xdr/config/node/config.jsonnet delete mode 100644 v1/examples/terraform/aws/s3/xdr/config/node/const.libsonnet delete mode 100644 v1/examples/terraform/aws/s3/xdr/data.jsonl delete mode 100644 v1/examples/terraform/aws/s3/xdr/stdout.jsonl delete mode 100644 v1/examples/terraform/aws/s3/xdr/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/s3/xdr/terraform/node.tf delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf delete mode 100644 v1/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet delete mode 100644 v1/examples/terraform/aws/sqs/microservice/terraform/_resources.tf delete mode 100644 v1/examples/terraform/aws/sqs/microservice/terraform/frontend.tf delete mode 100644 v1/examples/terraform/aws/sqs/microservice/terraform/microservice.tf delete mode 100644 v1/go.mod delete mode 100644 v1/go.sum delete mode 100644 v1/internal/aws/appconfig/appconfig.go delete mode 100644 v1/internal/aws/cloudwatch/cloudwatch.go delete mode 100644 v1/internal/aws/config.go delete mode 100644 v1/internal/aws/dynamodb/dynamodb.go delete mode 100644 v1/internal/aws/firehose/firehose.go delete mode 100644 v1/internal/aws/kinesis/kinesis.go delete mode 100644 v1/internal/aws/lambda/lambda.go delete mode 100644 v1/internal/aws/s3manager/s3manager.go delete mode 100644 v1/internal/aws/secretsmanager/secretsmanager.go delete mode 100644 v1/internal/aws/sns/sns.go delete mode 100644 v1/internal/aws/sqs/sqs.go delete mode 100644 v1/internal/bufio/bufio.go delete mode 100644 v1/internal/bufio/example_test.go delete mode 100644 v1/internal/config/config.go delete mode 100644 v1/internal/file/example_test.go delete mode 100644 v1/internal/file/file.go delete mode 100644 v1/internal/kv/aws_dynamodb.go delete mode 100644 v1/internal/kv/csv_file.go delete mode 100644 v1/internal/kv/example_test.go delete mode 100644 v1/internal/kv/json_file.go delete mode 100644 v1/internal/kv/kv.go delete mode 100644 v1/internal/kv/memory.go delete mode 100644 v1/internal/kv/mmdb.go delete mode 100644 v1/internal/kv/text_file.go delete mode 100644 v1/internal/media/example_test.go delete mode 100644 v1/internal/metrics/README.md delete mode 100644 v1/internal/metrics/aws_cloudwatch_embedded_metrics.go delete mode 100644 v1/internal/metrics/metrics.go delete mode 100644 v1/internal/secrets/aws_secrets_manager.go delete mode 100644 v1/internal/secrets/environment_variable.go delete mode 100644 v1/internal/secrets/secrets.go delete mode 100644 v1/internal/secrets/secrets_test.go delete mode 100644 v1/message/message.go delete mode 100644 v1/substation.go delete mode 100644 v1/substation.libsonnet delete mode 100644 v1/substation_test.go delete mode 100644 v1/substation_test.jsonnet delete mode 100644 v1/transform/aggregate.go delete mode 100644 v1/transform/aggregate_from_array.go delete mode 100644 v1/transform/aggregate_from_array_test.go delete mode 100644 v1/transform/aggregate_from_string.go delete mode 100644 v1/transform/aggregate_from_string_test.go delete mode 100644 v1/transform/aggregate_to_array.go delete mode 100644 v1/transform/aggregate_to_array_test.go delete mode 100644 v1/transform/aggregate_to_string.go delete mode 100644 v1/transform/aggregate_to_string_test.go delete mode 100644 v1/transform/array_join.go delete mode 100644 v1/transform/array_join_test.go delete mode 100644 v1/transform/array_zip.go delete mode 100644 v1/transform/array_zip_test.go delete mode 100644 v1/transform/enrich.go delete mode 100644 v1/transform/enrich_aws_dynamodb.go delete mode 100644 v1/transform/enrich_aws_dynamodb_test.go delete mode 100644 v1/transform/enrich_aws_lambda.go delete mode 100644 v1/transform/enrich_aws_lambda_test.go delete mode 100644 v1/transform/enrich_dns_domain_lookup.go delete mode 100644 v1/transform/enrich_dns_ip_lookup.go delete mode 100644 v1/transform/enrich_dns_txt_lookup.go delete mode 100644 v1/transform/enrich_http_get.go delete mode 100644 v1/transform/enrich_http_post.go delete mode 100644 v1/transform/enrich_kv_store_item_get.go delete mode 100644 v1/transform/enrich_kv_store_item_set.go delete mode 100644 v1/transform/enrich_kv_store_set_add.go delete mode 100644 v1/transform/format.go delete mode 100644 v1/transform/format_from_base64.go delete mode 100644 v1/transform/format_from_base64_test.go delete mode 100644 v1/transform/format_from_gzip.go delete mode 100644 v1/transform/format_from_gzip_test.go delete mode 100644 v1/transform/format_from_pretty_print.go delete mode 100644 v1/transform/format_from_pretty_print_test.go delete mode 100644 v1/transform/format_from_zip.go delete mode 100644 v1/transform/format_from_zip_test.go delete mode 100644 v1/transform/format_to_base64.go delete mode 100644 v1/transform/format_to_base64_test.go delete mode 100644 v1/transform/format_to_gzip.go delete mode 100644 v1/transform/format_to_gzip_test.go delete mode 100644 v1/transform/hash.go delete mode 100644 v1/transform/hash_md5.go delete mode 100644 v1/transform/hash_md5_test.go delete mode 100644 v1/transform/hash_sha256.go delete mode 100644 v1/transform/hash_sha256_test.go delete mode 100644 v1/transform/meta_err.go delete mode 100644 v1/transform/meta_err_test.go delete mode 100644 v1/transform/meta_for_each.go delete mode 100644 v1/transform/meta_for_each_test.go delete mode 100644 v1/transform/meta_kv_store_lock.go delete mode 100644 v1/transform/meta_metric_duration.go delete mode 100644 v1/transform/meta_pipeline.go delete mode 100644 v1/transform/meta_pipeline_test.go delete mode 100644 v1/transform/meta_retry.go delete mode 100644 v1/transform/meta_switch.go delete mode 100644 v1/transform/meta_switch_test.go delete mode 100644 v1/transform/network.go delete mode 100644 v1/transform/network_domain_registered_domain.go delete mode 100644 v1/transform/network_domain_registered_domain_test.go delete mode 100644 v1/transform/network_domain_subdomain.go delete mode 100644 v1/transform/network_domain_subdomain_test.go delete mode 100644 v1/transform/network_domain_top_level_domain.go delete mode 100644 v1/transform/network_domain_top_level_domain_test.go delete mode 100644 v1/transform/number.go delete mode 100644 v1/transform/number_math_addition.go delete mode 100644 v1/transform/number_math_addition_test.go delete mode 100644 v1/transform/number_math_division.go delete mode 100644 v1/transform/number_math_division_test.go delete mode 100644 v1/transform/number_math_multiplication.go delete mode 100644 v1/transform/number_math_multiplication_test.go delete mode 100644 v1/transform/number_math_subtraction.go delete mode 100644 v1/transform/number_math_subtraction_test.go delete mode 100644 v1/transform/number_maximum.go delete mode 100644 v1/transform/number_maximum_test.go delete mode 100644 v1/transform/number_minimum.go delete mode 100644 v1/transform/number_minimum_test.go delete mode 100644 v1/transform/object_copy.go delete mode 100644 v1/transform/object_copy_test.go delete mode 100644 v1/transform/object_delete.go delete mode 100644 v1/transform/object_delete_test.go delete mode 100644 v1/transform/object_insert.go delete mode 100644 v1/transform/object_insert_test.go delete mode 100644 v1/transform/object_jq.go delete mode 100644 v1/transform/object_jq_test.go delete mode 100644 v1/transform/object_to_boolean.go delete mode 100644 v1/transform/object_to_boolean_test.go delete mode 100644 v1/transform/object_to_float.go delete mode 100644 v1/transform/object_to_float_test.go delete mode 100644 v1/transform/object_to_integer.go delete mode 100644 v1/transform/object_to_integer_test.go delete mode 100644 v1/transform/object_to_string.go delete mode 100644 v1/transform/object_to_string_test.go delete mode 100644 v1/transform/object_to_unsigned_integer.go delete mode 100644 v1/transform/object_to_unsigned_integer_test.go delete mode 100644 v1/transform/send.go delete mode 100644 v1/transform/send_aws_dynamodb.go delete mode 100644 v1/transform/send_aws_eventbridge.go delete mode 100644 v1/transform/send_aws_kinesis_data_firehose.go delete mode 100644 v1/transform/send_aws_kinesis_data_stream.go delete mode 100644 v1/transform/send_aws_lambda.go delete mode 100644 v1/transform/send_aws_s3.go delete mode 100644 v1/transform/send_aws_sns.go delete mode 100644 v1/transform/send_aws_sqs.go delete mode 100644 v1/transform/send_file.go delete mode 100644 v1/transform/send_http_post.go delete mode 100644 v1/transform/send_stdout.go delete mode 100644 v1/transform/string.go delete mode 100644 v1/transform/string_append.go delete mode 100644 v1/transform/string_append_test.go delete mode 100644 v1/transform/string_capture.go delete mode 100644 v1/transform/string_capture_test.go delete mode 100644 v1/transform/string_replace.go delete mode 100644 v1/transform/string_replace_test.go delete mode 100644 v1/transform/string_split.go delete mode 100644 v1/transform/string_split_test.go delete mode 100644 v1/transform/string_to_lower.go delete mode 100644 v1/transform/string_to_lower_test.go delete mode 100644 v1/transform/string_to_snake.go delete mode 100644 v1/transform/string_to_snake_test.go delete mode 100644 v1/transform/string_to_upper.go delete mode 100644 v1/transform/string_to_upper_test.go delete mode 100644 v1/transform/string_uuid.go delete mode 100644 v1/transform/time.go delete mode 100644 v1/transform/time_from_string.go delete mode 100644 v1/transform/time_from_string_test.go delete mode 100644 v1/transform/time_from_unix.go delete mode 100644 v1/transform/time_from_unix_milli.go delete mode 100644 v1/transform/time_from_unix_milli_test.go delete mode 100644 v1/transform/time_from_unix_test.go delete mode 100644 v1/transform/time_now.go delete mode 100644 v1/transform/time_to_string.go delete mode 100644 v1/transform/time_to_string_test.go delete mode 100644 v1/transform/time_to_unix.go delete mode 100644 v1/transform/time_to_unix_milli.go delete mode 100644 v1/transform/time_to_unix_milli_test.go delete mode 100644 v1/transform/time_to_unix_test.go delete mode 100644 v1/transform/transform.go delete mode 100644 v1/transform/transform_example_test.go delete mode 100644 v1/transform/transform_test.go delete mode 100644 v1/transform/utility_control.go delete mode 100644 v1/transform/utility_delay.go delete mode 100644 v1/transform/utility_drop.go delete mode 100644 v1/transform/utility_err.go delete mode 100644 v1/transform/utility_metric_bytes.go delete mode 100644 v1/transform/utility_metric_count.go delete mode 100644 v1/transform/utility_metric_freshness.go delete mode 100644 v1/transform/utility_secret.go delete mode 100644 v2/cmd/README.md delete mode 100644 v2/cmd/aws/lambda/README.md delete mode 100644 v2/cmd/development/kinesis-tap/substation/README.md delete mode 100644 v2/condition/README.md delete mode 100644 v2/condition/network.go delete mode 100644 v2/condition/number.go delete mode 100644 v2/config/config.go delete mode 100644 v2/examples/condition/meta/data.json delete mode 100644 v2/examples/condition/number/data.json delete mode 100644 v2/examples/condition/string/data.json delete mode 100644 v2/examples/transform/aggregate/sample/data.jsonl delete mode 100644 v2/examples/transform/aggregate/summarize/data.jsonl delete mode 100644 v2/examples/transform/array/extend/data.json delete mode 100644 v2/examples/transform/array/flatten/data.json delete mode 100644 v2/examples/transform/array/flatten_deep/data.json delete mode 100644 v2/examples/transform/array/group/data.json delete mode 100644 v2/examples/transform/enrich/http_secret/data.json delete mode 100644 v2/examples/transform/enrich/kvstore_csv/data.jsonl delete mode 100644 v2/examples/transform/enrich/kvstore_csv/kv.csv delete mode 100644 v2/examples/transform/enrich/kvstore_json/data.jsonl delete mode 100644 v2/examples/transform/enrich/kvstore_set_add/data.jsonl delete mode 100644 v2/examples/transform/enrich/mmdb/data.jsonl delete mode 100644 v2/examples/transform/enrich/urlscan/data.json delete mode 100644 v2/examples/transform/format/zip/config.jsonnet delete mode 100644 v2/examples/transform/format/zip/data.csv delete mode 100644 v2/examples/transform/format/zip/data.jsonl delete mode 100644 v2/examples/transform/meta/crash_program/data.json delete mode 100644 v2/examples/transform/meta/each_in_array/data.json delete mode 100644 v2/examples/transform/meta/exactly_once_consumer/data.jsonl delete mode 100644 v2/examples/transform/meta/exactly_once_producer/data.jsonl delete mode 100644 v2/examples/transform/meta/exactly_once_system/data.jsonl delete mode 100644 v2/examples/transform/meta/execution_time/data.json delete mode 100644 v2/examples/transform/meta/retry_with_backoff/data.json delete mode 100644 v2/examples/transform/number/clamp/data.txt delete mode 100644 v2/examples/transform/number/clamp/stdout.txt delete mode 100644 v2/examples/transform/number/max/data.txt delete mode 100644 v2/examples/transform/number/max/stdout.txt delete mode 100644 v2/examples/transform/number/min/data.txt delete mode 100644 v2/examples/transform/number/min/stdout.txt delete mode 100644 v2/examples/transform/send/aux_transforms/data.jsonl delete mode 100644 v2/examples/transform/send/batch/data.jsonl delete mode 100644 v2/examples/transform/send/datadog/data.jsonl delete mode 100644 v2/examples/transform/send/splunk/data.jsonl delete mode 100644 v2/examples/transform/send/sumologic/data.jsonl delete mode 100644 v2/examples/transform/time/str_conversion/data.json delete mode 100644 v2/examples/transform/utility/generate_ctrl/data.jsonl delete mode 100644 v2/examples/transform/utility/message_bytes/data.jsonl delete mode 100644 v2/examples/transform/utility/message_count/data.jsonl delete mode 100644 v2/examples/transform/utility/message_freshness/config.jsonnet delete mode 100644 v2/examples/transform/utility/message_freshness/data.jsonl delete mode 100644 v2/internal/README.md delete mode 100644 v2/internal/aggregate/aggregate.go delete mode 100644 v2/internal/aws/README.md delete mode 100644 v2/internal/aws/config_v2.go delete mode 100644 v2/internal/aws/dynamodb/dynamodb_test.go delete mode 100644 v2/internal/aws/firehose/firehose_test.go delete mode 100644 v2/internal/aws/kinesis/kinesis_test.go delete mode 100644 v2/internal/aws/lambda/lambda_test.go delete mode 100644 v2/internal/aws/s3manager/s3manager_test.go delete mode 100644 v2/internal/aws/secretsmanager/secretsmanager_test.go delete mode 100644 v2/internal/aws/sns/sns_test.go delete mode 100644 v2/internal/aws/sqs/sqs_test.go delete mode 100644 v2/internal/base64/base64.go delete mode 100644 v2/internal/base64/base64_test.go delete mode 100644 v2/internal/bufio/bufio_test.go delete mode 100644 v2/internal/channel/channel.go delete mode 100644 v2/internal/errors/errors.go delete mode 100644 v2/internal/http/README.md delete mode 100644 v2/internal/http/http.go delete mode 100644 v2/internal/http/http_test.go delete mode 100644 v2/internal/log/log.go delete mode 100644 v2/internal/media/media.go delete mode 100644 v2/internal/media/media_test.go delete mode 100644 v2/message/message_test.go delete mode 100644 v2/transform/README.md diff --git a/.github/workflows/code.yml b/.github/workflows/code.yml index 206cfb13..8518ff36 100644 --- a/.github/workflows/code.yml +++ b/.github/workflows/code.yml @@ -5,7 +5,7 @@ on: branches: [main] jobs: - go-v1: + go: runs-on: ubuntu-latest steps: - name: Checkout Repository @@ -20,36 +20,11 @@ jobs: - name: Testing run: go test -timeout 30s -v ./... - working-directory: ./v1/ - name: Linting uses: golangci/golangci-lint-action@v3 with: version: latest - working-directory: ./v1/ - - go-v2: - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - with: - fetch-depth: 1 - - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: 1.22 - - - name: Testing - run: go test -timeout 30s -v ./... - working-directory: ./v2/ - - - name: Linting - uses: golangci/golangci-lint-action@v3 - with: - version: latest - working-directory: ./v2/ python: runs-on: ubuntu-latest diff --git a/v1/cmd/README.md b/cmd/README.md similarity index 100% rename from v1/cmd/README.md rename to cmd/README.md diff --git a/v1/cmd/aws/lambda/README.md b/cmd/aws/lambda/README.md similarity index 100% rename from v1/cmd/aws/lambda/README.md rename to cmd/aws/lambda/README.md diff --git a/v2/cmd/aws/lambda/autoscale/main.go b/cmd/aws/lambda/autoscale/main.go similarity index 100% rename from v2/cmd/aws/lambda/autoscale/main.go rename to cmd/aws/lambda/autoscale/main.go diff --git a/v2/cmd/aws/lambda/substation/api_gateway.go b/cmd/aws/lambda/substation/api_gateway.go similarity index 100% rename from v2/cmd/aws/lambda/substation/api_gateway.go rename to cmd/aws/lambda/substation/api_gateway.go diff --git a/v2/cmd/aws/lambda/substation/dynamodb.go b/cmd/aws/lambda/substation/dynamodb.go similarity index 100% rename from v2/cmd/aws/lambda/substation/dynamodb.go rename to cmd/aws/lambda/substation/dynamodb.go diff --git a/v2/cmd/aws/lambda/substation/kinesis_firehose.go b/cmd/aws/lambda/substation/kinesis_firehose.go similarity index 100% rename from v2/cmd/aws/lambda/substation/kinesis_firehose.go rename to cmd/aws/lambda/substation/kinesis_firehose.go diff --git a/v2/cmd/aws/lambda/substation/kinesis_stream.go b/cmd/aws/lambda/substation/kinesis_stream.go similarity index 100% rename from v2/cmd/aws/lambda/substation/kinesis_stream.go rename to cmd/aws/lambda/substation/kinesis_stream.go diff --git a/v2/cmd/aws/lambda/substation/lambda.go b/cmd/aws/lambda/substation/lambda.go similarity index 100% rename from v2/cmd/aws/lambda/substation/lambda.go rename to cmd/aws/lambda/substation/lambda.go diff --git a/v2/cmd/aws/lambda/substation/main.go b/cmd/aws/lambda/substation/main.go similarity index 100% rename from v2/cmd/aws/lambda/substation/main.go rename to cmd/aws/lambda/substation/main.go diff --git a/v2/cmd/aws/lambda/substation/s3.go b/cmd/aws/lambda/substation/s3.go similarity index 100% rename from v2/cmd/aws/lambda/substation/s3.go rename to cmd/aws/lambda/substation/s3.go diff --git a/v2/cmd/aws/lambda/substation/sns.go b/cmd/aws/lambda/substation/sns.go similarity index 100% rename from v2/cmd/aws/lambda/substation/sns.go rename to cmd/aws/lambda/substation/sns.go diff --git a/v2/cmd/aws/lambda/substation/sqs.go b/cmd/aws/lambda/substation/sqs.go similarity index 100% rename from v2/cmd/aws/lambda/substation/sqs.go rename to cmd/aws/lambda/substation/sqs.go diff --git a/v2/cmd/aws/lambda/validate/main.go b/cmd/aws/lambda/validate/main.go similarity index 100% rename from v2/cmd/aws/lambda/validate/main.go rename to cmd/aws/lambda/validate/main.go diff --git a/v2/cmd/aws/lambda/validate/main_test.go b/cmd/aws/lambda/validate/main_test.go similarity index 100% rename from v2/cmd/aws/lambda/validate/main_test.go rename to cmd/aws/lambda/validate/main_test.go diff --git a/v2/cmd/development/benchmark/substation/main.go b/cmd/development/benchmark/substation/main.go similarity index 100% rename from v2/cmd/development/benchmark/substation/main.go rename to cmd/development/benchmark/substation/main.go diff --git a/v1/cmd/development/kinesis-tap/substation/README.md b/cmd/development/kinesis-tap/substation/README.md similarity index 100% rename from v1/cmd/development/kinesis-tap/substation/README.md rename to cmd/development/kinesis-tap/substation/README.md diff --git a/v2/cmd/development/kinesis-tap/substation/config.jsonnet b/cmd/development/kinesis-tap/substation/config.jsonnet similarity index 100% rename from v2/cmd/development/kinesis-tap/substation/config.jsonnet rename to cmd/development/kinesis-tap/substation/config.jsonnet diff --git a/v2/cmd/development/kinesis-tap/substation/main.go b/cmd/development/kinesis-tap/substation/main.go similarity index 100% rename from v2/cmd/development/kinesis-tap/substation/main.go rename to cmd/development/kinesis-tap/substation/main.go diff --git a/v1/condition/README.md b/condition/README.md similarity index 100% rename from v1/condition/README.md rename to condition/README.md diff --git a/v2/condition/condition.go b/condition/condition.go similarity index 100% rename from v2/condition/condition.go rename to condition/condition.go diff --git a/v2/condition/format_json.go b/condition/format_json.go similarity index 100% rename from v2/condition/format_json.go rename to condition/format_json.go diff --git a/v2/condition/format_json_test.go b/condition/format_json_test.go similarity index 100% rename from v2/condition/format_json_test.go rename to condition/format_json_test.go diff --git a/v2/condition/format_mime.go b/condition/format_mime.go similarity index 100% rename from v2/condition/format_mime.go rename to condition/format_mime.go diff --git a/v2/condition/format_mime_test.go b/condition/format_mime_test.go similarity index 100% rename from v2/condition/format_mime_test.go rename to condition/format_mime_test.go diff --git a/v2/condition/meta.go b/condition/meta.go similarity index 100% rename from v2/condition/meta.go rename to condition/meta.go diff --git a/v2/condition/meta_all.go b/condition/meta_all.go similarity index 100% rename from v2/condition/meta_all.go rename to condition/meta_all.go diff --git a/v2/condition/meta_all_test.go b/condition/meta_all_test.go similarity index 100% rename from v2/condition/meta_all_test.go rename to condition/meta_all_test.go diff --git a/v2/condition/meta_any.go b/condition/meta_any.go similarity index 100% rename from v2/condition/meta_any.go rename to condition/meta_any.go diff --git a/v2/condition/meta_any_test.go b/condition/meta_any_test.go similarity index 100% rename from v2/condition/meta_any_test.go rename to condition/meta_any_test.go diff --git a/v2/condition/meta_none.go b/condition/meta_none.go similarity index 100% rename from v2/condition/meta_none.go rename to condition/meta_none.go diff --git a/v2/condition/meta_none_test.go b/condition/meta_none_test.go similarity index 100% rename from v2/condition/meta_none_test.go rename to condition/meta_none_test.go diff --git a/v1/condition/network.go b/condition/network.go similarity index 100% rename from v1/condition/network.go rename to condition/network.go diff --git a/v2/condition/network_ip_global_unicast.go b/condition/network_ip_global_unicast.go similarity index 100% rename from v2/condition/network_ip_global_unicast.go rename to condition/network_ip_global_unicast.go diff --git a/v2/condition/network_ip_global_unicast_test.go b/condition/network_ip_global_unicast_test.go similarity index 100% rename from v2/condition/network_ip_global_unicast_test.go rename to condition/network_ip_global_unicast_test.go diff --git a/v2/condition/network_ip_link_local_multicast.go b/condition/network_ip_link_local_multicast.go similarity index 100% rename from v2/condition/network_ip_link_local_multicast.go rename to condition/network_ip_link_local_multicast.go diff --git a/v2/condition/network_ip_link_local_multicast_test.go b/condition/network_ip_link_local_multicast_test.go similarity index 100% rename from v2/condition/network_ip_link_local_multicast_test.go rename to condition/network_ip_link_local_multicast_test.go diff --git a/v2/condition/network_ip_link_local_unicast.go b/condition/network_ip_link_local_unicast.go similarity index 100% rename from v2/condition/network_ip_link_local_unicast.go rename to condition/network_ip_link_local_unicast.go diff --git a/v2/condition/network_ip_link_local_unicast_test.go b/condition/network_ip_link_local_unicast_test.go similarity index 100% rename from v2/condition/network_ip_link_local_unicast_test.go rename to condition/network_ip_link_local_unicast_test.go diff --git a/v2/condition/network_ip_loopback.go b/condition/network_ip_loopback.go similarity index 100% rename from v2/condition/network_ip_loopback.go rename to condition/network_ip_loopback.go diff --git a/v2/condition/network_ip_loopback_test.go b/condition/network_ip_loopback_test.go similarity index 100% rename from v2/condition/network_ip_loopback_test.go rename to condition/network_ip_loopback_test.go diff --git a/v2/condition/network_ip_multicast.go b/condition/network_ip_multicast.go similarity index 100% rename from v2/condition/network_ip_multicast.go rename to condition/network_ip_multicast.go diff --git a/v2/condition/network_ip_multicast_test.go b/condition/network_ip_multicast_test.go similarity index 100% rename from v2/condition/network_ip_multicast_test.go rename to condition/network_ip_multicast_test.go diff --git a/v2/condition/network_ip_private.go b/condition/network_ip_private.go similarity index 100% rename from v2/condition/network_ip_private.go rename to condition/network_ip_private.go diff --git a/v2/condition/network_ip_private_test.go b/condition/network_ip_private_test.go similarity index 100% rename from v2/condition/network_ip_private_test.go rename to condition/network_ip_private_test.go diff --git a/v2/condition/network_ip_unicast.go b/condition/network_ip_unicast.go similarity index 100% rename from v2/condition/network_ip_unicast.go rename to condition/network_ip_unicast.go diff --git a/v2/condition/network_ip_unicast_test.go b/condition/network_ip_unicast_test.go similarity index 100% rename from v2/condition/network_ip_unicast_test.go rename to condition/network_ip_unicast_test.go diff --git a/v2/condition/network_ip_unspecified.go b/condition/network_ip_unspecified.go similarity index 100% rename from v2/condition/network_ip_unspecified.go rename to condition/network_ip_unspecified.go diff --git a/v2/condition/network_ip_unspecified_test.go b/condition/network_ip_unspecified_test.go similarity index 100% rename from v2/condition/network_ip_unspecified_test.go rename to condition/network_ip_unspecified_test.go diff --git a/v2/condition/network_ip_valid.go b/condition/network_ip_valid.go similarity index 100% rename from v2/condition/network_ip_valid.go rename to condition/network_ip_valid.go diff --git a/v2/condition/network_ip_valid_test.go b/condition/network_ip_valid_test.go similarity index 100% rename from v2/condition/network_ip_valid_test.go rename to condition/network_ip_valid_test.go diff --git a/v1/condition/number.go b/condition/number.go similarity index 100% rename from v1/condition/number.go rename to condition/number.go diff --git a/v2/condition/number_bitwise_and.go b/condition/number_bitwise_and.go similarity index 100% rename from v2/condition/number_bitwise_and.go rename to condition/number_bitwise_and.go diff --git a/v2/condition/number_bitwise_and_test.go b/condition/number_bitwise_and_test.go similarity index 100% rename from v2/condition/number_bitwise_and_test.go rename to condition/number_bitwise_and_test.go diff --git a/v2/condition/number_bitwise_not.go b/condition/number_bitwise_not.go similarity index 100% rename from v2/condition/number_bitwise_not.go rename to condition/number_bitwise_not.go diff --git a/v2/condition/number_bitwise_or.go b/condition/number_bitwise_or.go similarity index 100% rename from v2/condition/number_bitwise_or.go rename to condition/number_bitwise_or.go diff --git a/v2/condition/number_bitwise_or_test.go b/condition/number_bitwise_or_test.go similarity index 100% rename from v2/condition/number_bitwise_or_test.go rename to condition/number_bitwise_or_test.go diff --git a/v2/condition/number_bitwise_xor.go b/condition/number_bitwise_xor.go similarity index 100% rename from v2/condition/number_bitwise_xor.go rename to condition/number_bitwise_xor.go diff --git a/v2/condition/number_bitwise_xor_test.go b/condition/number_bitwise_xor_test.go similarity index 100% rename from v2/condition/number_bitwise_xor_test.go rename to condition/number_bitwise_xor_test.go diff --git a/v2/condition/number_equal_to.go b/condition/number_equal_to.go similarity index 100% rename from v2/condition/number_equal_to.go rename to condition/number_equal_to.go diff --git a/v2/condition/number_equal_to_test.go b/condition/number_equal_to_test.go similarity index 100% rename from v2/condition/number_equal_to_test.go rename to condition/number_equal_to_test.go diff --git a/v2/condition/number_greater_than.go b/condition/number_greater_than.go similarity index 100% rename from v2/condition/number_greater_than.go rename to condition/number_greater_than.go diff --git a/v2/condition/number_greater_than_test.go b/condition/number_greater_than_test.go similarity index 100% rename from v2/condition/number_greater_than_test.go rename to condition/number_greater_than_test.go diff --git a/v2/condition/number_length_equal_to.go b/condition/number_length_equal_to.go similarity index 100% rename from v2/condition/number_length_equal_to.go rename to condition/number_length_equal_to.go diff --git a/v2/condition/number_length_equal_to_test.go b/condition/number_length_equal_to_test.go similarity index 100% rename from v2/condition/number_length_equal_to_test.go rename to condition/number_length_equal_to_test.go diff --git a/v2/condition/number_length_greater_than.go b/condition/number_length_greater_than.go similarity index 100% rename from v2/condition/number_length_greater_than.go rename to condition/number_length_greater_than.go diff --git a/v2/condition/number_length_greater_than_test.go b/condition/number_length_greater_than_test.go similarity index 100% rename from v2/condition/number_length_greater_than_test.go rename to condition/number_length_greater_than_test.go diff --git a/v2/condition/number_length_less_than.go b/condition/number_length_less_than.go similarity index 100% rename from v2/condition/number_length_less_than.go rename to condition/number_length_less_than.go diff --git a/v2/condition/number_length_less_than_test.go b/condition/number_length_less_than_test.go similarity index 100% rename from v2/condition/number_length_less_than_test.go rename to condition/number_length_less_than_test.go diff --git a/v2/condition/number_less_than.go b/condition/number_less_than.go similarity index 100% rename from v2/condition/number_less_than.go rename to condition/number_less_than.go diff --git a/v2/condition/number_less_than_test.go b/condition/number_less_than_test.go similarity index 100% rename from v2/condition/number_less_than_test.go rename to condition/number_less_than_test.go diff --git a/v2/condition/string.go b/condition/string.go similarity index 100% rename from v2/condition/string.go rename to condition/string.go diff --git a/v2/condition/string_contains.go b/condition/string_contains.go similarity index 100% rename from v2/condition/string_contains.go rename to condition/string_contains.go diff --git a/v2/condition/string_contains_test.go b/condition/string_contains_test.go similarity index 100% rename from v2/condition/string_contains_test.go rename to condition/string_contains_test.go diff --git a/v2/condition/string_ends_with.go b/condition/string_ends_with.go similarity index 100% rename from v2/condition/string_ends_with.go rename to condition/string_ends_with.go diff --git a/v2/condition/string_ends_with_test.go b/condition/string_ends_with_test.go similarity index 100% rename from v2/condition/string_ends_with_test.go rename to condition/string_ends_with_test.go diff --git a/v2/condition/string_equal_to.go b/condition/string_equal_to.go similarity index 100% rename from v2/condition/string_equal_to.go rename to condition/string_equal_to.go diff --git a/v2/condition/string_equal_to_test.go b/condition/string_equal_to_test.go similarity index 100% rename from v2/condition/string_equal_to_test.go rename to condition/string_equal_to_test.go diff --git a/v2/condition/string_greater_than.go b/condition/string_greater_than.go similarity index 100% rename from v2/condition/string_greater_than.go rename to condition/string_greater_than.go diff --git a/v2/condition/string_greater_than_test.go b/condition/string_greater_than_test.go similarity index 100% rename from v2/condition/string_greater_than_test.go rename to condition/string_greater_than_test.go diff --git a/v2/condition/string_less_than.go b/condition/string_less_than.go similarity index 100% rename from v2/condition/string_less_than.go rename to condition/string_less_than.go diff --git a/v2/condition/string_less_than_test.go b/condition/string_less_than_test.go similarity index 100% rename from v2/condition/string_less_than_test.go rename to condition/string_less_than_test.go diff --git a/v2/condition/string_match.go b/condition/string_match.go similarity index 100% rename from v2/condition/string_match.go rename to condition/string_match.go diff --git a/v2/condition/string_match_test.go b/condition/string_match_test.go similarity index 100% rename from v2/condition/string_match_test.go rename to condition/string_match_test.go diff --git a/v2/condition/string_starts_with.go b/condition/string_starts_with.go similarity index 100% rename from v2/condition/string_starts_with.go rename to condition/string_starts_with.go diff --git a/v2/condition/string_starts_with_test.go b/condition/string_starts_with_test.go similarity index 100% rename from v2/condition/string_starts_with_test.go rename to condition/string_starts_with_test.go diff --git a/v2/condition/utility_random.go b/condition/utility_random.go similarity index 100% rename from v2/condition/utility_random.go rename to condition/utility_random.go diff --git a/v1/config/config.go b/config/config.go similarity index 100% rename from v1/config/config.go rename to config/config.go diff --git a/v2/examples/condition/meta/config.jsonnet b/examples/condition/meta/config.jsonnet similarity index 100% rename from v2/examples/condition/meta/config.jsonnet rename to examples/condition/meta/config.jsonnet diff --git a/v2/examples/condition/meta/stdout.txt b/examples/condition/meta/stdout.txt similarity index 100% rename from v2/examples/condition/meta/stdout.txt rename to examples/condition/meta/stdout.txt diff --git a/v2/examples/condition/number/config.jsonnet b/examples/condition/number/config.jsonnet similarity index 100% rename from v2/examples/condition/number/config.jsonnet rename to examples/condition/number/config.jsonnet diff --git a/v2/examples/condition/number/stdout.txt b/examples/condition/number/stdout.txt similarity index 100% rename from v2/examples/condition/number/stdout.txt rename to examples/condition/number/stdout.txt diff --git a/v2/examples/condition/string/config.jsonnet b/examples/condition/string/config.jsonnet similarity index 100% rename from v2/examples/condition/string/config.jsonnet rename to examples/condition/string/config.jsonnet diff --git a/v2/examples/condition/string/stdout.txt b/examples/condition/string/stdout.txt similarity index 100% rename from v2/examples/condition/string/stdout.txt rename to examples/condition/string/stdout.txt diff --git a/v2/examples/main.go b/examples/main.go similarity index 100% rename from v2/examples/main.go rename to examples/main.go diff --git a/v2/examples/transform/aggregate/sample/config.jsonnet b/examples/transform/aggregate/sample/config.jsonnet similarity index 100% rename from v2/examples/transform/aggregate/sample/config.jsonnet rename to examples/transform/aggregate/sample/config.jsonnet diff --git a/v2/examples/transform/aggregate/sample/stdout.txt b/examples/transform/aggregate/sample/stdout.txt similarity index 100% rename from v2/examples/transform/aggregate/sample/stdout.txt rename to examples/transform/aggregate/sample/stdout.txt diff --git a/v2/examples/transform/aggregate/summarize/config.jsonnet b/examples/transform/aggregate/summarize/config.jsonnet similarity index 100% rename from v2/examples/transform/aggregate/summarize/config.jsonnet rename to examples/transform/aggregate/summarize/config.jsonnet diff --git a/v2/examples/transform/aggregate/summarize/stdout.txt b/examples/transform/aggregate/summarize/stdout.txt similarity index 100% rename from v2/examples/transform/aggregate/summarize/stdout.txt rename to examples/transform/aggregate/summarize/stdout.txt diff --git a/v2/examples/transform/array/extend/config.jsonnet b/examples/transform/array/extend/config.jsonnet similarity index 100% rename from v2/examples/transform/array/extend/config.jsonnet rename to examples/transform/array/extend/config.jsonnet diff --git a/v2/examples/transform/array/extend/stdout.txt b/examples/transform/array/extend/stdout.txt similarity index 100% rename from v2/examples/transform/array/extend/stdout.txt rename to examples/transform/array/extend/stdout.txt diff --git a/v2/examples/transform/array/flatten/config.jsonnet b/examples/transform/array/flatten/config.jsonnet similarity index 100% rename from v2/examples/transform/array/flatten/config.jsonnet rename to examples/transform/array/flatten/config.jsonnet diff --git a/v2/examples/transform/array/flatten/stdout.txt b/examples/transform/array/flatten/stdout.txt similarity index 100% rename from v2/examples/transform/array/flatten/stdout.txt rename to examples/transform/array/flatten/stdout.txt diff --git a/v2/examples/transform/array/flatten_deep/config.jsonnet b/examples/transform/array/flatten_deep/config.jsonnet similarity index 100% rename from v2/examples/transform/array/flatten_deep/config.jsonnet rename to examples/transform/array/flatten_deep/config.jsonnet diff --git a/v2/examples/transform/array/flatten_deep/stdout.txt b/examples/transform/array/flatten_deep/stdout.txt similarity index 100% rename from v2/examples/transform/array/flatten_deep/stdout.txt rename to examples/transform/array/flatten_deep/stdout.txt diff --git a/v2/examples/transform/array/group/config.jsonnet b/examples/transform/array/group/config.jsonnet similarity index 100% rename from v2/examples/transform/array/group/config.jsonnet rename to examples/transform/array/group/config.jsonnet diff --git a/v2/examples/transform/array/group/stdout.txt b/examples/transform/array/group/stdout.txt similarity index 100% rename from v2/examples/transform/array/group/stdout.txt rename to examples/transform/array/group/stdout.txt diff --git a/v2/examples/transform/enrich/http_secret/config.jsonnet b/examples/transform/enrich/http_secret/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/http_secret/config.jsonnet rename to examples/transform/enrich/http_secret/config.jsonnet diff --git a/v2/examples/transform/enrich/kvstore_csv/config.jsonnet b/examples/transform/enrich/kvstore_csv/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/kvstore_csv/config.jsonnet rename to examples/transform/enrich/kvstore_csv/config.jsonnet diff --git a/v1/examples/config/transform/enrich/kvstore_csv/kv.csv b/examples/transform/enrich/kvstore_csv/kv.csv similarity index 100% rename from v1/examples/config/transform/enrich/kvstore_csv/kv.csv rename to examples/transform/enrich/kvstore_csv/kv.csv diff --git a/v2/examples/transform/enrich/kvstore_csv/stdout.txt b/examples/transform/enrich/kvstore_csv/stdout.txt similarity index 100% rename from v2/examples/transform/enrich/kvstore_csv/stdout.txt rename to examples/transform/enrich/kvstore_csv/stdout.txt diff --git a/v2/examples/transform/enrich/kvstore_json/config.jsonnet b/examples/transform/enrich/kvstore_json/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/kvstore_json/config.jsonnet rename to examples/transform/enrich/kvstore_json/config.jsonnet diff --git a/v2/examples/transform/enrich/kvstore_json/stdout.txt b/examples/transform/enrich/kvstore_json/stdout.txt similarity index 100% rename from v2/examples/transform/enrich/kvstore_json/stdout.txt rename to examples/transform/enrich/kvstore_json/stdout.txt diff --git a/v2/examples/transform/enrich/kvstore_set_add/config.jsonnet b/examples/transform/enrich/kvstore_set_add/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/kvstore_set_add/config.jsonnet rename to examples/transform/enrich/kvstore_set_add/config.jsonnet diff --git a/v2/examples/transform/enrich/kvstore_set_add/stdout.txt b/examples/transform/enrich/kvstore_set_add/stdout.txt similarity index 100% rename from v2/examples/transform/enrich/kvstore_set_add/stdout.txt rename to examples/transform/enrich/kvstore_set_add/stdout.txt diff --git a/v2/examples/transform/enrich/mmdb/config.jsonnet b/examples/transform/enrich/mmdb/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/mmdb/config.jsonnet rename to examples/transform/enrich/mmdb/config.jsonnet diff --git a/v2/examples/transform/enrich/mmdb/stdout.txt b/examples/transform/enrich/mmdb/stdout.txt similarity index 100% rename from v2/examples/transform/enrich/mmdb/stdout.txt rename to examples/transform/enrich/mmdb/stdout.txt diff --git a/v2/examples/transform/enrich/urlscan/config.jsonnet b/examples/transform/enrich/urlscan/config.jsonnet similarity index 100% rename from v2/examples/transform/enrich/urlscan/config.jsonnet rename to examples/transform/enrich/urlscan/config.jsonnet diff --git a/v1/examples/config/transform/format/zip/config.jsonnet b/examples/transform/format/zip/config.jsonnet similarity index 100% rename from v1/examples/config/transform/format/zip/config.jsonnet rename to examples/transform/format/zip/config.jsonnet diff --git a/v1/examples/config/transform/format/zip/data.csv b/examples/transform/format/zip/data.csv similarity index 100% rename from v1/examples/config/transform/format/zip/data.csv rename to examples/transform/format/zip/data.csv diff --git a/v2/examples/transform/format/zip/stdout.txt b/examples/transform/format/zip/stdout.txt similarity index 100% rename from v2/examples/transform/format/zip/stdout.txt rename to examples/transform/format/zip/stdout.txt diff --git a/v2/examples/transform/meta/crash_program/config.jsonnet b/examples/transform/meta/crash_program/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/crash_program/config.jsonnet rename to examples/transform/meta/crash_program/config.jsonnet diff --git a/v2/examples/transform/meta/crash_program/stdout.txt b/examples/transform/meta/crash_program/stdout.txt similarity index 100% rename from v2/examples/transform/meta/crash_program/stdout.txt rename to examples/transform/meta/crash_program/stdout.txt diff --git a/v2/examples/transform/meta/each_in_array/config.jsonnet b/examples/transform/meta/each_in_array/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/each_in_array/config.jsonnet rename to examples/transform/meta/each_in_array/config.jsonnet diff --git a/v2/examples/transform/meta/each_in_array/stdout.txt b/examples/transform/meta/each_in_array/stdout.txt similarity index 100% rename from v2/examples/transform/meta/each_in_array/stdout.txt rename to examples/transform/meta/each_in_array/stdout.txt diff --git a/v2/examples/transform/meta/exactly_once_consumer/config.jsonnet b/examples/transform/meta/exactly_once_consumer/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/exactly_once_consumer/config.jsonnet rename to examples/transform/meta/exactly_once_consumer/config.jsonnet diff --git a/v2/examples/transform/meta/exactly_once_consumer/stdout.txt b/examples/transform/meta/exactly_once_consumer/stdout.txt similarity index 100% rename from v2/examples/transform/meta/exactly_once_consumer/stdout.txt rename to examples/transform/meta/exactly_once_consumer/stdout.txt diff --git a/v2/examples/transform/meta/exactly_once_producer/config.jsonnet b/examples/transform/meta/exactly_once_producer/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/exactly_once_producer/config.jsonnet rename to examples/transform/meta/exactly_once_producer/config.jsonnet diff --git a/v2/examples/transform/meta/exactly_once_producer/stdout.txt b/examples/transform/meta/exactly_once_producer/stdout.txt similarity index 100% rename from v2/examples/transform/meta/exactly_once_producer/stdout.txt rename to examples/transform/meta/exactly_once_producer/stdout.txt diff --git a/v2/examples/transform/meta/exactly_once_system/config.jsonnet b/examples/transform/meta/exactly_once_system/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/exactly_once_system/config.jsonnet rename to examples/transform/meta/exactly_once_system/config.jsonnet diff --git a/v2/examples/transform/meta/exactly_once_system/stdout.txt b/examples/transform/meta/exactly_once_system/stdout.txt similarity index 100% rename from v2/examples/transform/meta/exactly_once_system/stdout.txt rename to examples/transform/meta/exactly_once_system/stdout.txt diff --git a/v2/examples/transform/meta/execution_time/config.jsonnet b/examples/transform/meta/execution_time/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/execution_time/config.jsonnet rename to examples/transform/meta/execution_time/config.jsonnet diff --git a/v2/examples/transform/meta/execution_time/stdout.txt b/examples/transform/meta/execution_time/stdout.txt similarity index 100% rename from v2/examples/transform/meta/execution_time/stdout.txt rename to examples/transform/meta/execution_time/stdout.txt diff --git a/v2/examples/transform/meta/retry_with_backoff/config.jsonnet b/examples/transform/meta/retry_with_backoff/config.jsonnet similarity index 100% rename from v2/examples/transform/meta/retry_with_backoff/config.jsonnet rename to examples/transform/meta/retry_with_backoff/config.jsonnet diff --git a/v2/examples/transform/meta/retry_with_backoff/stdout.txt b/examples/transform/meta/retry_with_backoff/stdout.txt similarity index 100% rename from v2/examples/transform/meta/retry_with_backoff/stdout.txt rename to examples/transform/meta/retry_with_backoff/stdout.txt diff --git a/v2/examples/transform/number/clamp/config.jsonnet b/examples/transform/number/clamp/config.jsonnet similarity index 100% rename from v2/examples/transform/number/clamp/config.jsonnet rename to examples/transform/number/clamp/config.jsonnet diff --git a/v1/examples/config/transform/number/clamp/data.txt b/examples/transform/number/clamp/data.txt similarity index 100% rename from v1/examples/config/transform/number/clamp/data.txt rename to examples/transform/number/clamp/data.txt diff --git a/v1/examples/config/transform/number/clamp/stdout.txt b/examples/transform/number/clamp/stdout.txt similarity index 100% rename from v1/examples/config/transform/number/clamp/stdout.txt rename to examples/transform/number/clamp/stdout.txt diff --git a/v2/examples/transform/number/max/config.jsonnet b/examples/transform/number/max/config.jsonnet similarity index 100% rename from v2/examples/transform/number/max/config.jsonnet rename to examples/transform/number/max/config.jsonnet diff --git a/v1/examples/config/transform/number/max/data.txt b/examples/transform/number/max/data.txt similarity index 100% rename from v1/examples/config/transform/number/max/data.txt rename to examples/transform/number/max/data.txt diff --git a/v1/examples/config/transform/number/max/stdout.txt b/examples/transform/number/max/stdout.txt similarity index 100% rename from v1/examples/config/transform/number/max/stdout.txt rename to examples/transform/number/max/stdout.txt diff --git a/v2/examples/transform/number/min/config.jsonnet b/examples/transform/number/min/config.jsonnet similarity index 100% rename from v2/examples/transform/number/min/config.jsonnet rename to examples/transform/number/min/config.jsonnet diff --git a/v1/examples/config/transform/number/min/data.txt b/examples/transform/number/min/data.txt similarity index 100% rename from v1/examples/config/transform/number/min/data.txt rename to examples/transform/number/min/data.txt diff --git a/v1/examples/config/transform/number/min/stdout.txt b/examples/transform/number/min/stdout.txt similarity index 100% rename from v1/examples/config/transform/number/min/stdout.txt rename to examples/transform/number/min/stdout.txt diff --git a/v2/examples/transform/send/aux_transforms/config.jsonnet b/examples/transform/send/aux_transforms/config.jsonnet similarity index 100% rename from v2/examples/transform/send/aux_transforms/config.jsonnet rename to examples/transform/send/aux_transforms/config.jsonnet diff --git a/v2/examples/transform/send/aux_transforms/stdout.txt b/examples/transform/send/aux_transforms/stdout.txt similarity index 100% rename from v2/examples/transform/send/aux_transforms/stdout.txt rename to examples/transform/send/aux_transforms/stdout.txt diff --git a/v2/examples/transform/send/aws_s3_glacier/config.jsonnet b/examples/transform/send/aws_s3_glacier/config.jsonnet similarity index 100% rename from v2/examples/transform/send/aws_s3_glacier/config.jsonnet rename to examples/transform/send/aws_s3_glacier/config.jsonnet diff --git a/v2/examples/transform/send/batch/config.jsonnet b/examples/transform/send/batch/config.jsonnet similarity index 100% rename from v2/examples/transform/send/batch/config.jsonnet rename to examples/transform/send/batch/config.jsonnet diff --git a/v2/examples/transform/send/batch/stdout.txt b/examples/transform/send/batch/stdout.txt similarity index 100% rename from v2/examples/transform/send/batch/stdout.txt rename to examples/transform/send/batch/stdout.txt diff --git a/v2/examples/transform/send/datadog/config.jsonnet b/examples/transform/send/datadog/config.jsonnet similarity index 100% rename from v2/examples/transform/send/datadog/config.jsonnet rename to examples/transform/send/datadog/config.jsonnet diff --git a/v2/examples/transform/send/splunk/config.jsonnet b/examples/transform/send/splunk/config.jsonnet similarity index 100% rename from v2/examples/transform/send/splunk/config.jsonnet rename to examples/transform/send/splunk/config.jsonnet diff --git a/v2/examples/transform/send/sumologic/config.jsonnet b/examples/transform/send/sumologic/config.jsonnet similarity index 100% rename from v2/examples/transform/send/sumologic/config.jsonnet rename to examples/transform/send/sumologic/config.jsonnet diff --git a/v2/examples/transform/time/str_conversion/config.jsonnet b/examples/transform/time/str_conversion/config.jsonnet similarity index 100% rename from v2/examples/transform/time/str_conversion/config.jsonnet rename to examples/transform/time/str_conversion/config.jsonnet diff --git a/v2/examples/transform/time/str_conversion/stdout.txt b/examples/transform/time/str_conversion/stdout.txt similarity index 100% rename from v2/examples/transform/time/str_conversion/stdout.txt rename to examples/transform/time/str_conversion/stdout.txt diff --git a/v2/examples/transform/utility/generate_ctrl/config.jsonnet b/examples/transform/utility/generate_ctrl/config.jsonnet similarity index 100% rename from v2/examples/transform/utility/generate_ctrl/config.jsonnet rename to examples/transform/utility/generate_ctrl/config.jsonnet diff --git a/v2/examples/transform/utility/generate_ctrl/stdout.txt b/examples/transform/utility/generate_ctrl/stdout.txt similarity index 100% rename from v2/examples/transform/utility/generate_ctrl/stdout.txt rename to examples/transform/utility/generate_ctrl/stdout.txt diff --git a/v2/examples/transform/utility/message_bytes/config.jsonnet b/examples/transform/utility/message_bytes/config.jsonnet similarity index 100% rename from v2/examples/transform/utility/message_bytes/config.jsonnet rename to examples/transform/utility/message_bytes/config.jsonnet diff --git a/v2/examples/transform/utility/message_bytes/stdout.txt b/examples/transform/utility/message_bytes/stdout.txt similarity index 100% rename from v2/examples/transform/utility/message_bytes/stdout.txt rename to examples/transform/utility/message_bytes/stdout.txt diff --git a/v2/examples/transform/utility/message_count/config.jsonnet b/examples/transform/utility/message_count/config.jsonnet similarity index 100% rename from v2/examples/transform/utility/message_count/config.jsonnet rename to examples/transform/utility/message_count/config.jsonnet diff --git a/v2/examples/transform/utility/message_count/stdout.txt b/examples/transform/utility/message_count/stdout.txt similarity index 100% rename from v2/examples/transform/utility/message_count/stdout.txt rename to examples/transform/utility/message_count/stdout.txt diff --git a/v1/examples/config/transform/utility/message_freshness/config.jsonnet b/examples/transform/utility/message_freshness/config.jsonnet similarity index 100% rename from v1/examples/config/transform/utility/message_freshness/config.jsonnet rename to examples/transform/utility/message_freshness/config.jsonnet diff --git a/v2/examples/transform/utility/message_freshness/stdout.txt b/examples/transform/utility/message_freshness/stdout.txt similarity index 100% rename from v2/examples/transform/utility/message_freshness/stdout.txt rename to examples/transform/utility/message_freshness/stdout.txt diff --git a/v2/go.mod b/go.mod similarity index 100% rename from v2/go.mod rename to go.mod diff --git a/v2/go.sum b/go.sum similarity index 100% rename from v2/go.sum rename to go.sum diff --git a/v1/internal/README.md b/internal/README.md similarity index 100% rename from v1/internal/README.md rename to internal/README.md diff --git a/v1/internal/aggregate/aggregate.go b/internal/aggregate/aggregate.go similarity index 100% rename from v1/internal/aggregate/aggregate.go rename to internal/aggregate/aggregate.go diff --git a/v1/internal/aws/README.md b/internal/aws/README.md similarity index 100% rename from v1/internal/aws/README.md rename to internal/aws/README.md diff --git a/v2/internal/aws/appconfig/appconfig.go b/internal/aws/appconfig/appconfig.go similarity index 100% rename from v2/internal/aws/appconfig/appconfig.go rename to internal/aws/appconfig/appconfig.go diff --git a/v2/internal/aws/cloudwatch/cloudwatch.go b/internal/aws/cloudwatch/cloudwatch.go similarity index 100% rename from v2/internal/aws/cloudwatch/cloudwatch.go rename to internal/aws/cloudwatch/cloudwatch.go diff --git a/v2/internal/aws/config.go b/internal/aws/config.go similarity index 100% rename from v2/internal/aws/config.go rename to internal/aws/config.go diff --git a/v1/internal/aws/config_v2.go b/internal/aws/config_v2.go similarity index 100% rename from v1/internal/aws/config_v2.go rename to internal/aws/config_v2.go diff --git a/v2/internal/aws/dynamodb/dynamodb.go b/internal/aws/dynamodb/dynamodb.go similarity index 100% rename from v2/internal/aws/dynamodb/dynamodb.go rename to internal/aws/dynamodb/dynamodb.go diff --git a/v1/internal/aws/dynamodb/dynamodb_test.go b/internal/aws/dynamodb/dynamodb_test.go similarity index 100% rename from v1/internal/aws/dynamodb/dynamodb_test.go rename to internal/aws/dynamodb/dynamodb_test.go diff --git a/v2/internal/aws/firehose/firehose.go b/internal/aws/firehose/firehose.go similarity index 100% rename from v2/internal/aws/firehose/firehose.go rename to internal/aws/firehose/firehose.go diff --git a/v1/internal/aws/firehose/firehose_test.go b/internal/aws/firehose/firehose_test.go similarity index 100% rename from v1/internal/aws/firehose/firehose_test.go rename to internal/aws/firehose/firehose_test.go diff --git a/v2/internal/aws/kinesis/kinesis.go b/internal/aws/kinesis/kinesis.go similarity index 100% rename from v2/internal/aws/kinesis/kinesis.go rename to internal/aws/kinesis/kinesis.go diff --git a/v1/internal/aws/kinesis/kinesis_test.go b/internal/aws/kinesis/kinesis_test.go similarity index 100% rename from v1/internal/aws/kinesis/kinesis_test.go rename to internal/aws/kinesis/kinesis_test.go diff --git a/v2/internal/aws/lambda/lambda.go b/internal/aws/lambda/lambda.go similarity index 100% rename from v2/internal/aws/lambda/lambda.go rename to internal/aws/lambda/lambda.go diff --git a/v1/internal/aws/lambda/lambda_test.go b/internal/aws/lambda/lambda_test.go similarity index 100% rename from v1/internal/aws/lambda/lambda_test.go rename to internal/aws/lambda/lambda_test.go diff --git a/v2/internal/aws/s3manager/s3manager.go b/internal/aws/s3manager/s3manager.go similarity index 100% rename from v2/internal/aws/s3manager/s3manager.go rename to internal/aws/s3manager/s3manager.go diff --git a/v1/internal/aws/s3manager/s3manager_test.go b/internal/aws/s3manager/s3manager_test.go similarity index 100% rename from v1/internal/aws/s3manager/s3manager_test.go rename to internal/aws/s3manager/s3manager_test.go diff --git a/v2/internal/aws/secretsmanager/secretsmanager.go b/internal/aws/secretsmanager/secretsmanager.go similarity index 100% rename from v2/internal/aws/secretsmanager/secretsmanager.go rename to internal/aws/secretsmanager/secretsmanager.go diff --git a/v1/internal/aws/secretsmanager/secretsmanager_test.go b/internal/aws/secretsmanager/secretsmanager_test.go similarity index 100% rename from v1/internal/aws/secretsmanager/secretsmanager_test.go rename to internal/aws/secretsmanager/secretsmanager_test.go diff --git a/v2/internal/aws/sns/sns.go b/internal/aws/sns/sns.go similarity index 100% rename from v2/internal/aws/sns/sns.go rename to internal/aws/sns/sns.go diff --git a/v1/internal/aws/sns/sns_test.go b/internal/aws/sns/sns_test.go similarity index 100% rename from v1/internal/aws/sns/sns_test.go rename to internal/aws/sns/sns_test.go diff --git a/v2/internal/aws/sqs/sqs.go b/internal/aws/sqs/sqs.go similarity index 100% rename from v2/internal/aws/sqs/sqs.go rename to internal/aws/sqs/sqs.go diff --git a/v1/internal/aws/sqs/sqs_test.go b/internal/aws/sqs/sqs_test.go similarity index 100% rename from v1/internal/aws/sqs/sqs_test.go rename to internal/aws/sqs/sqs_test.go diff --git a/v1/internal/base64/base64.go b/internal/base64/base64.go similarity index 100% rename from v1/internal/base64/base64.go rename to internal/base64/base64.go diff --git a/v1/internal/base64/base64_test.go b/internal/base64/base64_test.go similarity index 100% rename from v1/internal/base64/base64_test.go rename to internal/base64/base64_test.go diff --git a/v2/internal/bufio/bufio.go b/internal/bufio/bufio.go similarity index 100% rename from v2/internal/bufio/bufio.go rename to internal/bufio/bufio.go diff --git a/v1/internal/bufio/bufio_test.go b/internal/bufio/bufio_test.go similarity index 100% rename from v1/internal/bufio/bufio_test.go rename to internal/bufio/bufio_test.go diff --git a/v2/internal/bufio/example_test.go b/internal/bufio/example_test.go similarity index 100% rename from v2/internal/bufio/example_test.go rename to internal/bufio/example_test.go diff --git a/v1/internal/channel/channel.go b/internal/channel/channel.go similarity index 100% rename from v1/internal/channel/channel.go rename to internal/channel/channel.go diff --git a/v2/internal/config/config.go b/internal/config/config.go similarity index 100% rename from v2/internal/config/config.go rename to internal/config/config.go diff --git a/v1/internal/errors/errors.go b/internal/errors/errors.go similarity index 100% rename from v1/internal/errors/errors.go rename to internal/errors/errors.go diff --git a/v2/internal/file/example_test.go b/internal/file/example_test.go similarity index 100% rename from v2/internal/file/example_test.go rename to internal/file/example_test.go diff --git a/v2/internal/file/file.go b/internal/file/file.go similarity index 100% rename from v2/internal/file/file.go rename to internal/file/file.go diff --git a/v1/internal/http/README.md b/internal/http/README.md similarity index 100% rename from v1/internal/http/README.md rename to internal/http/README.md diff --git a/v1/internal/http/http.go b/internal/http/http.go similarity index 100% rename from v1/internal/http/http.go rename to internal/http/http.go diff --git a/v1/internal/http/http_test.go b/internal/http/http_test.go similarity index 100% rename from v1/internal/http/http_test.go rename to internal/http/http_test.go diff --git a/v2/internal/kv/aws_dynamodb.go b/internal/kv/aws_dynamodb.go similarity index 100% rename from v2/internal/kv/aws_dynamodb.go rename to internal/kv/aws_dynamodb.go diff --git a/v2/internal/kv/csv_file.go b/internal/kv/csv_file.go similarity index 100% rename from v2/internal/kv/csv_file.go rename to internal/kv/csv_file.go diff --git a/v2/internal/kv/example_test.go b/internal/kv/example_test.go similarity index 100% rename from v2/internal/kv/example_test.go rename to internal/kv/example_test.go diff --git a/v2/internal/kv/json_file.go b/internal/kv/json_file.go similarity index 100% rename from v2/internal/kv/json_file.go rename to internal/kv/json_file.go diff --git a/v2/internal/kv/kv.go b/internal/kv/kv.go similarity index 100% rename from v2/internal/kv/kv.go rename to internal/kv/kv.go diff --git a/v2/internal/kv/memory.go b/internal/kv/memory.go similarity index 100% rename from v2/internal/kv/memory.go rename to internal/kv/memory.go diff --git a/v2/internal/kv/mmdb.go b/internal/kv/mmdb.go similarity index 100% rename from v2/internal/kv/mmdb.go rename to internal/kv/mmdb.go diff --git a/v2/internal/kv/text_file.go b/internal/kv/text_file.go similarity index 100% rename from v2/internal/kv/text_file.go rename to internal/kv/text_file.go diff --git a/v1/internal/log/log.go b/internal/log/log.go similarity index 100% rename from v1/internal/log/log.go rename to internal/log/log.go diff --git a/v2/internal/media/example_test.go b/internal/media/example_test.go similarity index 100% rename from v2/internal/media/example_test.go rename to internal/media/example_test.go diff --git a/v1/internal/media/media.go b/internal/media/media.go similarity index 100% rename from v1/internal/media/media.go rename to internal/media/media.go diff --git a/v1/internal/media/media_test.go b/internal/media/media_test.go similarity index 100% rename from v1/internal/media/media_test.go rename to internal/media/media_test.go diff --git a/v2/internal/metrics/README.md b/internal/metrics/README.md similarity index 100% rename from v2/internal/metrics/README.md rename to internal/metrics/README.md diff --git a/v2/internal/metrics/aws_cloudwatch_embedded_metrics.go b/internal/metrics/aws_cloudwatch_embedded_metrics.go similarity index 100% rename from v2/internal/metrics/aws_cloudwatch_embedded_metrics.go rename to internal/metrics/aws_cloudwatch_embedded_metrics.go diff --git a/v2/internal/metrics/metrics.go b/internal/metrics/metrics.go similarity index 100% rename from v2/internal/metrics/metrics.go rename to internal/metrics/metrics.go diff --git a/v2/internal/secrets/aws_secrets_manager.go b/internal/secrets/aws_secrets_manager.go similarity index 100% rename from v2/internal/secrets/aws_secrets_manager.go rename to internal/secrets/aws_secrets_manager.go diff --git a/v2/internal/secrets/environment_variable.go b/internal/secrets/environment_variable.go similarity index 100% rename from v2/internal/secrets/environment_variable.go rename to internal/secrets/environment_variable.go diff --git a/v2/internal/secrets/secrets.go b/internal/secrets/secrets.go similarity index 100% rename from v2/internal/secrets/secrets.go rename to internal/secrets/secrets.go diff --git a/v2/internal/secrets/secrets_test.go b/internal/secrets/secrets_test.go similarity index 100% rename from v2/internal/secrets/secrets_test.go rename to internal/secrets/secrets_test.go diff --git a/v2/message/message.go b/message/message.go similarity index 100% rename from v2/message/message.go rename to message/message.go diff --git a/v1/message/message_test.go b/message/message_test.go similarity index 100% rename from v1/message/message_test.go rename to message/message_test.go diff --git a/v2/substation.go b/substation.go similarity index 100% rename from v2/substation.go rename to substation.go diff --git a/v2/substation.libsonnet b/substation.libsonnet similarity index 100% rename from v2/substation.libsonnet rename to substation.libsonnet diff --git a/v2/substation_test.go b/substation_test.go similarity index 100% rename from v2/substation_test.go rename to substation_test.go diff --git a/v2/substation_test.jsonnet b/substation_test.jsonnet similarity index 100% rename from v2/substation_test.jsonnet rename to substation_test.jsonnet diff --git a/v1/transform/README.md b/transform/README.md similarity index 100% rename from v1/transform/README.md rename to transform/README.md diff --git a/v2/transform/aggregate.go b/transform/aggregate.go similarity index 100% rename from v2/transform/aggregate.go rename to transform/aggregate.go diff --git a/v2/transform/aggregate_from_array.go b/transform/aggregate_from_array.go similarity index 100% rename from v2/transform/aggregate_from_array.go rename to transform/aggregate_from_array.go diff --git a/v2/transform/aggregate_from_array_test.go b/transform/aggregate_from_array_test.go similarity index 100% rename from v2/transform/aggregate_from_array_test.go rename to transform/aggregate_from_array_test.go diff --git a/v2/transform/aggregate_from_string.go b/transform/aggregate_from_string.go similarity index 100% rename from v2/transform/aggregate_from_string.go rename to transform/aggregate_from_string.go diff --git a/v2/transform/aggregate_from_string_test.go b/transform/aggregate_from_string_test.go similarity index 100% rename from v2/transform/aggregate_from_string_test.go rename to transform/aggregate_from_string_test.go diff --git a/v2/transform/aggregate_to_array.go b/transform/aggregate_to_array.go similarity index 100% rename from v2/transform/aggregate_to_array.go rename to transform/aggregate_to_array.go diff --git a/v2/transform/aggregate_to_array_test.go b/transform/aggregate_to_array_test.go similarity index 100% rename from v2/transform/aggregate_to_array_test.go rename to transform/aggregate_to_array_test.go diff --git a/v2/transform/aggregate_to_string.go b/transform/aggregate_to_string.go similarity index 100% rename from v2/transform/aggregate_to_string.go rename to transform/aggregate_to_string.go diff --git a/v2/transform/aggregate_to_string_test.go b/transform/aggregate_to_string_test.go similarity index 100% rename from v2/transform/aggregate_to_string_test.go rename to transform/aggregate_to_string_test.go diff --git a/v2/transform/array_join.go b/transform/array_join.go similarity index 100% rename from v2/transform/array_join.go rename to transform/array_join.go diff --git a/v2/transform/array_join_test.go b/transform/array_join_test.go similarity index 100% rename from v2/transform/array_join_test.go rename to transform/array_join_test.go diff --git a/v2/transform/array_zip.go b/transform/array_zip.go similarity index 100% rename from v2/transform/array_zip.go rename to transform/array_zip.go diff --git a/v2/transform/array_zip_test.go b/transform/array_zip_test.go similarity index 100% rename from v2/transform/array_zip_test.go rename to transform/array_zip_test.go diff --git a/v2/transform/enrich.go b/transform/enrich.go similarity index 100% rename from v2/transform/enrich.go rename to transform/enrich.go diff --git a/v2/transform/enrich_aws_dynamodb_query.go b/transform/enrich_aws_dynamodb_query.go similarity index 100% rename from v2/transform/enrich_aws_dynamodb_query.go rename to transform/enrich_aws_dynamodb_query.go diff --git a/v2/transform/enrich_aws_dynamodb_query_test.go b/transform/enrich_aws_dynamodb_query_test.go similarity index 100% rename from v2/transform/enrich_aws_dynamodb_query_test.go rename to transform/enrich_aws_dynamodb_query_test.go diff --git a/v2/transform/enrich_aws_lambda.go b/transform/enrich_aws_lambda.go similarity index 100% rename from v2/transform/enrich_aws_lambda.go rename to transform/enrich_aws_lambda.go diff --git a/v2/transform/enrich_aws_lambda_test.go b/transform/enrich_aws_lambda_test.go similarity index 100% rename from v2/transform/enrich_aws_lambda_test.go rename to transform/enrich_aws_lambda_test.go diff --git a/v2/transform/enrich_dns_domain_lookup.go b/transform/enrich_dns_domain_lookup.go similarity index 100% rename from v2/transform/enrich_dns_domain_lookup.go rename to transform/enrich_dns_domain_lookup.go diff --git a/v2/transform/enrich_dns_ip_lookup.go b/transform/enrich_dns_ip_lookup.go similarity index 100% rename from v2/transform/enrich_dns_ip_lookup.go rename to transform/enrich_dns_ip_lookup.go diff --git a/v2/transform/enrich_dns_txt_lookup.go b/transform/enrich_dns_txt_lookup.go similarity index 100% rename from v2/transform/enrich_dns_txt_lookup.go rename to transform/enrich_dns_txt_lookup.go diff --git a/v2/transform/enrich_http_get.go b/transform/enrich_http_get.go similarity index 100% rename from v2/transform/enrich_http_get.go rename to transform/enrich_http_get.go diff --git a/v2/transform/enrich_http_post.go b/transform/enrich_http_post.go similarity index 100% rename from v2/transform/enrich_http_post.go rename to transform/enrich_http_post.go diff --git a/v2/transform/enrich_kv_store_item_get.go b/transform/enrich_kv_store_item_get.go similarity index 100% rename from v2/transform/enrich_kv_store_item_get.go rename to transform/enrich_kv_store_item_get.go diff --git a/v2/transform/enrich_kv_store_item_set.go b/transform/enrich_kv_store_item_set.go similarity index 100% rename from v2/transform/enrich_kv_store_item_set.go rename to transform/enrich_kv_store_item_set.go diff --git a/v2/transform/enrich_kv_store_set_add.go b/transform/enrich_kv_store_set_add.go similarity index 100% rename from v2/transform/enrich_kv_store_set_add.go rename to transform/enrich_kv_store_set_add.go diff --git a/v2/transform/format.go b/transform/format.go similarity index 100% rename from v2/transform/format.go rename to transform/format.go diff --git a/v2/transform/format_from_base64.go b/transform/format_from_base64.go similarity index 100% rename from v2/transform/format_from_base64.go rename to transform/format_from_base64.go diff --git a/v2/transform/format_from_base64_test.go b/transform/format_from_base64_test.go similarity index 100% rename from v2/transform/format_from_base64_test.go rename to transform/format_from_base64_test.go diff --git a/v2/transform/format_from_gzip.go b/transform/format_from_gzip.go similarity index 100% rename from v2/transform/format_from_gzip.go rename to transform/format_from_gzip.go diff --git a/v2/transform/format_from_gzip_test.go b/transform/format_from_gzip_test.go similarity index 100% rename from v2/transform/format_from_gzip_test.go rename to transform/format_from_gzip_test.go diff --git a/v2/transform/format_from_pretty_print.go b/transform/format_from_pretty_print.go similarity index 100% rename from v2/transform/format_from_pretty_print.go rename to transform/format_from_pretty_print.go diff --git a/v2/transform/format_from_pretty_print_test.go b/transform/format_from_pretty_print_test.go similarity index 100% rename from v2/transform/format_from_pretty_print_test.go rename to transform/format_from_pretty_print_test.go diff --git a/v2/transform/format_from_zip.go b/transform/format_from_zip.go similarity index 100% rename from v2/transform/format_from_zip.go rename to transform/format_from_zip.go diff --git a/v2/transform/format_from_zip_test.go b/transform/format_from_zip_test.go similarity index 100% rename from v2/transform/format_from_zip_test.go rename to transform/format_from_zip_test.go diff --git a/v2/transform/format_to_base64.go b/transform/format_to_base64.go similarity index 100% rename from v2/transform/format_to_base64.go rename to transform/format_to_base64.go diff --git a/v2/transform/format_to_base64_test.go b/transform/format_to_base64_test.go similarity index 100% rename from v2/transform/format_to_base64_test.go rename to transform/format_to_base64_test.go diff --git a/v2/transform/format_to_gzip.go b/transform/format_to_gzip.go similarity index 100% rename from v2/transform/format_to_gzip.go rename to transform/format_to_gzip.go diff --git a/v2/transform/format_to_gzip_test.go b/transform/format_to_gzip_test.go similarity index 100% rename from v2/transform/format_to_gzip_test.go rename to transform/format_to_gzip_test.go diff --git a/v2/transform/hash.go b/transform/hash.go similarity index 100% rename from v2/transform/hash.go rename to transform/hash.go diff --git a/v2/transform/hash_md5.go b/transform/hash_md5.go similarity index 100% rename from v2/transform/hash_md5.go rename to transform/hash_md5.go diff --git a/v2/transform/hash_md5_test.go b/transform/hash_md5_test.go similarity index 100% rename from v2/transform/hash_md5_test.go rename to transform/hash_md5_test.go diff --git a/v2/transform/hash_sha256.go b/transform/hash_sha256.go similarity index 100% rename from v2/transform/hash_sha256.go rename to transform/hash_sha256.go diff --git a/v2/transform/hash_sha256_test.go b/transform/hash_sha256_test.go similarity index 100% rename from v2/transform/hash_sha256_test.go rename to transform/hash_sha256_test.go diff --git a/v2/transform/meta_err.go b/transform/meta_err.go similarity index 100% rename from v2/transform/meta_err.go rename to transform/meta_err.go diff --git a/v2/transform/meta_err_test.go b/transform/meta_err_test.go similarity index 100% rename from v2/transform/meta_err_test.go rename to transform/meta_err_test.go diff --git a/v2/transform/meta_for_each.go b/transform/meta_for_each.go similarity index 100% rename from v2/transform/meta_for_each.go rename to transform/meta_for_each.go diff --git a/v2/transform/meta_for_each_test.go b/transform/meta_for_each_test.go similarity index 100% rename from v2/transform/meta_for_each_test.go rename to transform/meta_for_each_test.go diff --git a/v2/transform/meta_kv_store_lock.go b/transform/meta_kv_store_lock.go similarity index 100% rename from v2/transform/meta_kv_store_lock.go rename to transform/meta_kv_store_lock.go diff --git a/v2/transform/meta_metric_duration.go b/transform/meta_metric_duration.go similarity index 100% rename from v2/transform/meta_metric_duration.go rename to transform/meta_metric_duration.go diff --git a/v2/transform/meta_retry.go b/transform/meta_retry.go similarity index 100% rename from v2/transform/meta_retry.go rename to transform/meta_retry.go diff --git a/v2/transform/meta_switch.go b/transform/meta_switch.go similarity index 100% rename from v2/transform/meta_switch.go rename to transform/meta_switch.go diff --git a/v2/transform/meta_switch_test.go b/transform/meta_switch_test.go similarity index 100% rename from v2/transform/meta_switch_test.go rename to transform/meta_switch_test.go diff --git a/v2/transform/network.go b/transform/network.go similarity index 100% rename from v2/transform/network.go rename to transform/network.go diff --git a/v2/transform/network_domain_registered_domain.go b/transform/network_domain_registered_domain.go similarity index 100% rename from v2/transform/network_domain_registered_domain.go rename to transform/network_domain_registered_domain.go diff --git a/v2/transform/network_domain_registered_domain_test.go b/transform/network_domain_registered_domain_test.go similarity index 100% rename from v2/transform/network_domain_registered_domain_test.go rename to transform/network_domain_registered_domain_test.go diff --git a/v2/transform/network_domain_subdomain.go b/transform/network_domain_subdomain.go similarity index 100% rename from v2/transform/network_domain_subdomain.go rename to transform/network_domain_subdomain.go diff --git a/v2/transform/network_domain_subdomain_test.go b/transform/network_domain_subdomain_test.go similarity index 100% rename from v2/transform/network_domain_subdomain_test.go rename to transform/network_domain_subdomain_test.go diff --git a/v2/transform/network_domain_top_level_domain.go b/transform/network_domain_top_level_domain.go similarity index 100% rename from v2/transform/network_domain_top_level_domain.go rename to transform/network_domain_top_level_domain.go diff --git a/v2/transform/network_domain_top_level_domain_test.go b/transform/network_domain_top_level_domain_test.go similarity index 100% rename from v2/transform/network_domain_top_level_domain_test.go rename to transform/network_domain_top_level_domain_test.go diff --git a/v2/transform/number.go b/transform/number.go similarity index 100% rename from v2/transform/number.go rename to transform/number.go diff --git a/v2/transform/number_math_addition.go b/transform/number_math_addition.go similarity index 100% rename from v2/transform/number_math_addition.go rename to transform/number_math_addition.go diff --git a/v2/transform/number_math_addition_test.go b/transform/number_math_addition_test.go similarity index 100% rename from v2/transform/number_math_addition_test.go rename to transform/number_math_addition_test.go diff --git a/v2/transform/number_math_division.go b/transform/number_math_division.go similarity index 100% rename from v2/transform/number_math_division.go rename to transform/number_math_division.go diff --git a/v2/transform/number_math_division_test.go b/transform/number_math_division_test.go similarity index 100% rename from v2/transform/number_math_division_test.go rename to transform/number_math_division_test.go diff --git a/v2/transform/number_math_multiplication.go b/transform/number_math_multiplication.go similarity index 100% rename from v2/transform/number_math_multiplication.go rename to transform/number_math_multiplication.go diff --git a/v2/transform/number_math_multiplication_test.go b/transform/number_math_multiplication_test.go similarity index 100% rename from v2/transform/number_math_multiplication_test.go rename to transform/number_math_multiplication_test.go diff --git a/v2/transform/number_math_subtraction.go b/transform/number_math_subtraction.go similarity index 100% rename from v2/transform/number_math_subtraction.go rename to transform/number_math_subtraction.go diff --git a/v2/transform/number_math_subtraction_test.go b/transform/number_math_subtraction_test.go similarity index 100% rename from v2/transform/number_math_subtraction_test.go rename to transform/number_math_subtraction_test.go diff --git a/v2/transform/number_maximum.go b/transform/number_maximum.go similarity index 100% rename from v2/transform/number_maximum.go rename to transform/number_maximum.go diff --git a/v2/transform/number_maximum_test.go b/transform/number_maximum_test.go similarity index 100% rename from v2/transform/number_maximum_test.go rename to transform/number_maximum_test.go diff --git a/v2/transform/number_minimum.go b/transform/number_minimum.go similarity index 100% rename from v2/transform/number_minimum.go rename to transform/number_minimum.go diff --git a/v2/transform/number_minimum_test.go b/transform/number_minimum_test.go similarity index 100% rename from v2/transform/number_minimum_test.go rename to transform/number_minimum_test.go diff --git a/v2/transform/object_copy.go b/transform/object_copy.go similarity index 100% rename from v2/transform/object_copy.go rename to transform/object_copy.go diff --git a/v2/transform/object_copy_test.go b/transform/object_copy_test.go similarity index 100% rename from v2/transform/object_copy_test.go rename to transform/object_copy_test.go diff --git a/v2/transform/object_delete.go b/transform/object_delete.go similarity index 100% rename from v2/transform/object_delete.go rename to transform/object_delete.go diff --git a/v2/transform/object_delete_test.go b/transform/object_delete_test.go similarity index 100% rename from v2/transform/object_delete_test.go rename to transform/object_delete_test.go diff --git a/v2/transform/object_insert.go b/transform/object_insert.go similarity index 100% rename from v2/transform/object_insert.go rename to transform/object_insert.go diff --git a/v2/transform/object_insert_test.go b/transform/object_insert_test.go similarity index 100% rename from v2/transform/object_insert_test.go rename to transform/object_insert_test.go diff --git a/v2/transform/object_jq.go b/transform/object_jq.go similarity index 100% rename from v2/transform/object_jq.go rename to transform/object_jq.go diff --git a/v2/transform/object_jq_test.go b/transform/object_jq_test.go similarity index 100% rename from v2/transform/object_jq_test.go rename to transform/object_jq_test.go diff --git a/v2/transform/object_to_boolean.go b/transform/object_to_boolean.go similarity index 100% rename from v2/transform/object_to_boolean.go rename to transform/object_to_boolean.go diff --git a/v2/transform/object_to_boolean_test.go b/transform/object_to_boolean_test.go similarity index 100% rename from v2/transform/object_to_boolean_test.go rename to transform/object_to_boolean_test.go diff --git a/v2/transform/object_to_float.go b/transform/object_to_float.go similarity index 100% rename from v2/transform/object_to_float.go rename to transform/object_to_float.go diff --git a/v2/transform/object_to_float_test.go b/transform/object_to_float_test.go similarity index 100% rename from v2/transform/object_to_float_test.go rename to transform/object_to_float_test.go diff --git a/v2/transform/object_to_integer.go b/transform/object_to_integer.go similarity index 100% rename from v2/transform/object_to_integer.go rename to transform/object_to_integer.go diff --git a/v2/transform/object_to_integer_test.go b/transform/object_to_integer_test.go similarity index 100% rename from v2/transform/object_to_integer_test.go rename to transform/object_to_integer_test.go diff --git a/v2/transform/object_to_string.go b/transform/object_to_string.go similarity index 100% rename from v2/transform/object_to_string.go rename to transform/object_to_string.go diff --git a/v2/transform/object_to_string_test.go b/transform/object_to_string_test.go similarity index 100% rename from v2/transform/object_to_string_test.go rename to transform/object_to_string_test.go diff --git a/v2/transform/object_to_unsigned_integer.go b/transform/object_to_unsigned_integer.go similarity index 100% rename from v2/transform/object_to_unsigned_integer.go rename to transform/object_to_unsigned_integer.go diff --git a/v2/transform/object_to_unsigned_integer_test.go b/transform/object_to_unsigned_integer_test.go similarity index 100% rename from v2/transform/object_to_unsigned_integer_test.go rename to transform/object_to_unsigned_integer_test.go diff --git a/v2/transform/send.go b/transform/send.go similarity index 100% rename from v2/transform/send.go rename to transform/send.go diff --git a/v2/transform/send_aws_dynamodb_put.go b/transform/send_aws_dynamodb_put.go similarity index 100% rename from v2/transform/send_aws_dynamodb_put.go rename to transform/send_aws_dynamodb_put.go diff --git a/v2/transform/send_aws_eventbridge.go b/transform/send_aws_eventbridge.go similarity index 100% rename from v2/transform/send_aws_eventbridge.go rename to transform/send_aws_eventbridge.go diff --git a/v2/transform/send_aws_kinesis_data_firehose.go b/transform/send_aws_kinesis_data_firehose.go similarity index 100% rename from v2/transform/send_aws_kinesis_data_firehose.go rename to transform/send_aws_kinesis_data_firehose.go diff --git a/v2/transform/send_aws_kinesis_data_stream.go b/transform/send_aws_kinesis_data_stream.go similarity index 100% rename from v2/transform/send_aws_kinesis_data_stream.go rename to transform/send_aws_kinesis_data_stream.go diff --git a/v2/transform/send_aws_lambda.go b/transform/send_aws_lambda.go similarity index 100% rename from v2/transform/send_aws_lambda.go rename to transform/send_aws_lambda.go diff --git a/v2/transform/send_aws_s3.go b/transform/send_aws_s3.go similarity index 100% rename from v2/transform/send_aws_s3.go rename to transform/send_aws_s3.go diff --git a/v2/transform/send_aws_sns.go b/transform/send_aws_sns.go similarity index 100% rename from v2/transform/send_aws_sns.go rename to transform/send_aws_sns.go diff --git a/v2/transform/send_aws_sqs.go b/transform/send_aws_sqs.go similarity index 100% rename from v2/transform/send_aws_sqs.go rename to transform/send_aws_sqs.go diff --git a/v2/transform/send_file.go b/transform/send_file.go similarity index 100% rename from v2/transform/send_file.go rename to transform/send_file.go diff --git a/v2/transform/send_http_post.go b/transform/send_http_post.go similarity index 100% rename from v2/transform/send_http_post.go rename to transform/send_http_post.go diff --git a/v2/transform/send_stdout.go b/transform/send_stdout.go similarity index 100% rename from v2/transform/send_stdout.go rename to transform/send_stdout.go diff --git a/v2/transform/string.go b/transform/string.go similarity index 100% rename from v2/transform/string.go rename to transform/string.go diff --git a/v2/transform/string_append.go b/transform/string_append.go similarity index 100% rename from v2/transform/string_append.go rename to transform/string_append.go diff --git a/v2/transform/string_append_test.go b/transform/string_append_test.go similarity index 100% rename from v2/transform/string_append_test.go rename to transform/string_append_test.go diff --git a/v2/transform/string_capture.go b/transform/string_capture.go similarity index 100% rename from v2/transform/string_capture.go rename to transform/string_capture.go diff --git a/v2/transform/string_capture_test.go b/transform/string_capture_test.go similarity index 100% rename from v2/transform/string_capture_test.go rename to transform/string_capture_test.go diff --git a/v2/transform/string_replace.go b/transform/string_replace.go similarity index 100% rename from v2/transform/string_replace.go rename to transform/string_replace.go diff --git a/v2/transform/string_replace_test.go b/transform/string_replace_test.go similarity index 100% rename from v2/transform/string_replace_test.go rename to transform/string_replace_test.go diff --git a/v2/transform/string_split.go b/transform/string_split.go similarity index 100% rename from v2/transform/string_split.go rename to transform/string_split.go diff --git a/v2/transform/string_split_test.go b/transform/string_split_test.go similarity index 100% rename from v2/transform/string_split_test.go rename to transform/string_split_test.go diff --git a/v2/transform/string_to_lower.go b/transform/string_to_lower.go similarity index 100% rename from v2/transform/string_to_lower.go rename to transform/string_to_lower.go diff --git a/v2/transform/string_to_lower_test.go b/transform/string_to_lower_test.go similarity index 100% rename from v2/transform/string_to_lower_test.go rename to transform/string_to_lower_test.go diff --git a/v2/transform/string_to_snake.go b/transform/string_to_snake.go similarity index 100% rename from v2/transform/string_to_snake.go rename to transform/string_to_snake.go diff --git a/v2/transform/string_to_snake_test.go b/transform/string_to_snake_test.go similarity index 100% rename from v2/transform/string_to_snake_test.go rename to transform/string_to_snake_test.go diff --git a/v2/transform/string_to_upper.go b/transform/string_to_upper.go similarity index 100% rename from v2/transform/string_to_upper.go rename to transform/string_to_upper.go diff --git a/v2/transform/string_to_upper_test.go b/transform/string_to_upper_test.go similarity index 100% rename from v2/transform/string_to_upper_test.go rename to transform/string_to_upper_test.go diff --git a/v2/transform/string_uuid.go b/transform/string_uuid.go similarity index 100% rename from v2/transform/string_uuid.go rename to transform/string_uuid.go diff --git a/v2/transform/time.go b/transform/time.go similarity index 100% rename from v2/transform/time.go rename to transform/time.go diff --git a/v2/transform/time_from_string.go b/transform/time_from_string.go similarity index 100% rename from v2/transform/time_from_string.go rename to transform/time_from_string.go diff --git a/v2/transform/time_from_string_test.go b/transform/time_from_string_test.go similarity index 100% rename from v2/transform/time_from_string_test.go rename to transform/time_from_string_test.go diff --git a/v2/transform/time_from_unix.go b/transform/time_from_unix.go similarity index 100% rename from v2/transform/time_from_unix.go rename to transform/time_from_unix.go diff --git a/v2/transform/time_from_unix_milli.go b/transform/time_from_unix_milli.go similarity index 100% rename from v2/transform/time_from_unix_milli.go rename to transform/time_from_unix_milli.go diff --git a/v2/transform/time_from_unix_milli_test.go b/transform/time_from_unix_milli_test.go similarity index 100% rename from v2/transform/time_from_unix_milli_test.go rename to transform/time_from_unix_milli_test.go diff --git a/v2/transform/time_from_unix_test.go b/transform/time_from_unix_test.go similarity index 100% rename from v2/transform/time_from_unix_test.go rename to transform/time_from_unix_test.go diff --git a/v2/transform/time_now.go b/transform/time_now.go similarity index 100% rename from v2/transform/time_now.go rename to transform/time_now.go diff --git a/v2/transform/time_to_string.go b/transform/time_to_string.go similarity index 100% rename from v2/transform/time_to_string.go rename to transform/time_to_string.go diff --git a/v2/transform/time_to_string_test.go b/transform/time_to_string_test.go similarity index 100% rename from v2/transform/time_to_string_test.go rename to transform/time_to_string_test.go diff --git a/v2/transform/time_to_unix.go b/transform/time_to_unix.go similarity index 100% rename from v2/transform/time_to_unix.go rename to transform/time_to_unix.go diff --git a/v2/transform/time_to_unix_milli.go b/transform/time_to_unix_milli.go similarity index 100% rename from v2/transform/time_to_unix_milli.go rename to transform/time_to_unix_milli.go diff --git a/v2/transform/time_to_unix_milli_test.go b/transform/time_to_unix_milli_test.go similarity index 100% rename from v2/transform/time_to_unix_milli_test.go rename to transform/time_to_unix_milli_test.go diff --git a/v2/transform/time_to_unix_test.go b/transform/time_to_unix_test.go similarity index 100% rename from v2/transform/time_to_unix_test.go rename to transform/time_to_unix_test.go diff --git a/v2/transform/transform.go b/transform/transform.go similarity index 100% rename from v2/transform/transform.go rename to transform/transform.go diff --git a/v2/transform/transform_example_test.go b/transform/transform_example_test.go similarity index 100% rename from v2/transform/transform_example_test.go rename to transform/transform_example_test.go diff --git a/v2/transform/transform_test.go b/transform/transform_test.go similarity index 100% rename from v2/transform/transform_test.go rename to transform/transform_test.go diff --git a/v2/transform/utility_control.go b/transform/utility_control.go similarity index 100% rename from v2/transform/utility_control.go rename to transform/utility_control.go diff --git a/v2/transform/utility_delay.go b/transform/utility_delay.go similarity index 100% rename from v2/transform/utility_delay.go rename to transform/utility_delay.go diff --git a/v2/transform/utility_drop.go b/transform/utility_drop.go similarity index 100% rename from v2/transform/utility_drop.go rename to transform/utility_drop.go diff --git a/v2/transform/utility_err.go b/transform/utility_err.go similarity index 100% rename from v2/transform/utility_err.go rename to transform/utility_err.go diff --git a/v2/transform/utility_metric_bytes.go b/transform/utility_metric_bytes.go similarity index 100% rename from v2/transform/utility_metric_bytes.go rename to transform/utility_metric_bytes.go diff --git a/v2/transform/utility_metric_count.go b/transform/utility_metric_count.go similarity index 100% rename from v2/transform/utility_metric_count.go rename to transform/utility_metric_count.go diff --git a/v2/transform/utility_metric_freshness.go b/transform/utility_metric_freshness.go similarity index 100% rename from v2/transform/utility_metric_freshness.go rename to transform/utility_metric_freshness.go diff --git a/v2/transform/utility_secret.go b/transform/utility_secret.go similarity index 100% rename from v2/transform/utility_secret.go rename to transform/utility_secret.go diff --git a/v1/cmd/aws/lambda/autoscale/main.go b/v1/cmd/aws/lambda/autoscale/main.go deleted file mode 100644 index 9a83b998..00000000 --- a/v1/cmd/aws/lambda/autoscale/main.go +++ /dev/null @@ -1,183 +0,0 @@ -package main - -import ( - "context" - "fmt" - "math" - "strconv" - "strings" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-lambda-go/lambda" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/cloudwatch" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/log" - "github.com/tidwall/gjson" -) - -var ( - cloudwatchAPI cloudwatch.API - kinesisAPI kinesis.API -) - -func init() { - // These must run in the same AWS account and region as the Lambda function. - cloudwatchAPI.Setup(aws.Config{}) - kinesisAPI.Setup(aws.Config{}) -} - -func main() { - lambda.Start(handler) -} - -func handler(ctx context.Context, snsEvent events.SNSEvent) error { - payload := snsEvent.Records[0].SNS - topicArn := payload.TopicArn - message := payload.Message - - alarmName := gjson.Get(message, "AlarmName").String() - triggerMetrics := gjson.Get(message, "Trigger.Metrics") - - log.WithField("alarm", alarmName).Debug("Received autoscale notification.") - - var stream string - for _, v := range triggerMetrics.Array() { - id := gjson.Get(v.String(), "Id").String() - if id == "m1" || id == "m2" { - stream = gjson.Get(v.String(), "MetricStat.Metric.Dimensions.0.value").String() - break - } - } - log.WithField("alarm", alarmName).WithField("stream", stream).Debug("Parsed Kinesis stream.") - - shards, err := kinesisAPI.ActiveShards(ctx, stream) - if err != nil { - return fmt.Errorf("handler: %v", err) - } - log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", shards). - Info("Retrieved active shard count.") - - var newShards int64 - if strings.Contains(alarmName, "upscale") { - newShards = upscale(float64(shards)) - } - if strings.Contains(alarmName, "downscale") { - newShards = downscale(float64(shards)) - } - - log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", newShards).Info("Calculated new shard count.") - - tags, err := kinesisAPI.GetTags(ctx, stream) - if err != nil { - return fmt.Errorf("handler: %v", err) - } - - var minShard, maxShard int64 - for _, tag := range tags { - if *tag.Key == "MinimumShards" { - minShard, err = strconv.ParseInt(*tag.Value, 10, 64) - if err != nil { - return fmt.Errorf("handler: %v", err) - } - - log.WithField("stream", stream).WithField("count", minShard).Debug("Retrieved minimum shard count.") - } - - if *tag.Key == "MaximumShards" { - maxShard, err = strconv.ParseInt(*tag.Value, 10, 64) - if err != nil { - return fmt.Errorf("handler: %v", err) - } - - log.WithField("stream", stream).WithField("count", maxShard).Debug("Retrieved maximum shard count.") - } - - // Tracking the last scaling event prevents scaling from occurring too frequently. - // If the current scaling event is an upscale, then the last scaling event must be at least 3 minutes ago. - // If the current scaling event is a downscale, then the last scaling event must be at least 30 minutes ago. - if *tag.Key == "LastScalingEvent" { - lastScalingEvent, err := time.Parse(time.RFC3339, *tag.Value) - if err != nil { - return fmt.Errorf("handler: %v", err) - } - - if (time.Since(lastScalingEvent) < 3*time.Minute && strings.Contains(alarmName, "upscale")) || - (time.Since(lastScalingEvent) < 30*time.Minute && strings.Contains(alarmName, "downscale")) { - log.WithField("stream", stream).WithField("time", lastScalingEvent).Info("Last scaling event is too recent.") - - if err := cloudwatchAPI.UpdateKinesisAlarmState(ctx, alarmName, "Last scaling event is too recent"); err != nil { - return fmt.Errorf("handler: %v", err) - } - - return nil - } - } - } - - if minShard != 0 && newShards < minShard { - newShards = minShard - } - - if maxShard != 0 && newShards > maxShard { - newShards = maxShard - } - - if newShards < 1 { - newShards = 1 - } - - if newShards == shards { - log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", shards).Info("Active shard count is at minimum threshold, no change is required.") - return nil - } - - if err := kinesisAPI.UpdateShards(ctx, stream, newShards); err != nil { - return fmt.Errorf("handler: %v", err) - } - - if err := kinesisAPI.UpdateTag(ctx, stream, "LastScalingEvent", time.Now().Format(time.RFC3339)); err != nil { - return fmt.Errorf("handler: %v", err) - } - - log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", newShards).Info("Updated shard count.") - - if err := cloudwatchAPI.UpdateKinesisDownscaleAlarm(ctx, stream+"_downscale", stream, topicArn, newShards); err != nil { - return fmt.Errorf("handler: %v", err) - } - log.WithField("alarm", stream+"_downscale").WithField("stream", stream).WithField("count", newShards).Debug("Reset CloudWatch alarm.") - - if err := cloudwatchAPI.UpdateKinesisUpscaleAlarm(ctx, stream+"_upscale", stream, topicArn, newShards); err != nil { - return fmt.Errorf("handler: %v", err) - } - log.WithField("alarm", stream+"_upscale").WithField("stream", stream).WithField("count", newShards).Debug("Reset CloudWatch alarm.") - - return nil -} - -func downscale(shards float64) int64 { - switch { - case shards < 5: - return int64(math.Ceil(shards / 2)) - case shards < 13: - return int64(math.Ceil(shards / 1.75)) - case shards < 33: - return int64(math.Ceil(shards / 1.5)) - default: - return int64(math.Ceil(shards / 1.25)) - } -} - -func upscale(shards float64) int64 { - switch { - case shards < 5: - return int64(math.Floor(shards * 2)) - case shards < 13: - return int64(math.Floor(shards * 1.75)) - case shards < 33: - return int64(math.Floor(shards * 1.5)) - default: - return int64(math.Floor(shards * 1.25)) - } -} diff --git a/v1/cmd/aws/lambda/substation/api_gateway.go b/v1/cmd/aws/lambda/substation/api_gateway.go deleted file mode 100644 index 672c3e6a..00000000 --- a/v1/cmd/aws/lambda/substation/api_gateway.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - - "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" -) - -var gateway500Response = events.APIGatewayProxyResponse{StatusCode: 500} - -type gatewayMetadata struct { - Resource string `json:"resource"` - Path string `json:"path"` - Headers map[string]string `json:"headers"` -} - -func gatewayHandler(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return gateway500Response, err - } - - cfg := substation.Config{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return gateway500Response, err - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - return gateway500Response, err - } - - // Create metadata. - m := gatewayMetadata{ - Resource: request.Resource, - Path: request.Path, - Headers: request.Headers, - } - - metadata, err := json.Marshal(m) - if err != nil { - return gateway500Response, err - } - - b := []byte(request.Body) - msg := []*message.Message{ - message.New().SetData(b).SetMetadata(metadata), - message.New().AsControl(), - } - - res, err := sub.Transform(ctx, msg...) - if err != nil { - return gateway500Response, err - } - - // Convert transformed messages to a JSON array. - var output []json.RawMessage - for _, msg := range res { - if msg.IsControl() { - continue - } - - if !json.Valid(msg.Data()) { - return gateway500Response, errLambdaInvalidJSON - } - - var rm json.RawMessage - if err := json.Unmarshal(msg.Data(), &rm); err != nil { - return gateway500Response, err - } - - output = append(output, rm) - } - - body, err := json.Marshal(output) - if err != nil { - return gateway500Response, err - } - - return events.APIGatewayProxyResponse{ - StatusCode: 200, - Headers: map[string]string{"Content-Type": "application/json"}, - Body: string(body), - }, nil -} diff --git a/v1/cmd/aws/lambda/substation/dynamodb.go b/v1/cmd/aws/lambda/substation/dynamodb.go deleted file mode 100644 index 47763ce0..00000000 --- a/v1/cmd/aws/lambda/substation/dynamodb.go +++ /dev/null @@ -1,230 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "strings" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws/dynamodb" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" - "golang.org/x/sync/errgroup" -) - -type dynamodbMetadata struct { - ApproximateCreationDateTime time.Time `json:"approximateCreationDateTime"` - EventSourceArn string `json:"eventSourceArn"` - SequenceNumber string `json:"sequenceNumber"` - SizeBytes int64 `json:"sizeBytes"` - StreamViewType string `json:"streamViewType"` -} - -//nolint:gocognit, gocyclo, cyclop // Ignore cognitive and cyclomatic complexity. -func dynamodbHandler(ctx context.Context, event events.DynamoDBEvent) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return err - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(tfCtx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - // The DynamoDB table name is the second element of the slash-delimited Stream ARN. - // arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291 - table := strings.Split(event.Records[0].EventSourceArn, "/")[1] - - for _, record := range event.Records { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Only records that contain image data (changes) are supported. - if record.Change.StreamViewType == "KEYS_ONLY" { - continue - } - - m := dynamodbMetadata{ - record.Change.ApproximateCreationDateTime.Time, - record.EventSourceArn, - record.Change.SequenceNumber, - record.Change.SizeBytes, - record.Change.StreamViewType, - } - metadata, err := json.Marshal(m) - if err != nil { - return err - } - - // DynamoDB record changes are converted to an object modeled similarly to - // schemas used in Debezium (https://debezium.io/): - // - // - If the View Type on the Stream is OLD_IMAGE, then the "after" field is always null. - // - If the View Type is NEW_IMAGE, then the "before" field is always null. - // - // Setting the View Type to NEW_AND_OLD_IMAGES is recommended for full visibility. - // - // For more information, see these examples from the Debezium documentation: - // - https://debezium.io/documentation/reference/1.2/connectors/mysql.html#mysql-change-event-value - // - https://debezium.io/documentation/reference/1.2/connectors/postgresql.html#postgresql-change-event-value - // - https://debezium.io/documentation/reference/1.2/connectors/sqlserver.html#sqlserver-change-event-value - // - // records are converted to this format: - // { - // "source": { - // "ts_ms": 0, - // "table": "table", - // "connector": "dynamodb" - // }, - // "ts_ms": 0, - // "op": "c", - // "before": { ... }, - // "after": { ... } - // } - msg := message.New().SetMetadata(metadata) - if err := msg.SetValue("source.ts_ms", record.Change.ApproximateCreationDateTime.Time.UnixMilli()); err != nil { - return err - } - - if err := msg.SetValue("source.table", table); err != nil { - return err - } - - if err := msg.SetValue("source.connector", "dynamodb"); err != nil { - return err - } - - if err := msg.SetValue("ts_ms", time.Now().UnixMilli()); err != nil { - return err - } - - // Maps the type of data modification to a Debezium operation string. - // Debezium operations that are relevant to DynamoDB are: - // - c: create (INSERT) - // - u: update (MODIFY) - // - d: delete (REMOVE) - switch record.EventName { - case "INSERT": - if err := msg.SetValue("op", "c"); err != nil { - return err - } - case "MODIFY": - if err := msg.SetValue("op", "u"); err != nil { - return err - } - case "REMOVE": - if err := msg.SetValue("op", "d"); err != nil { - return err - } - } - - // If either image is missing, then the value is set to null. - if record.Change.OldImage == nil { - if err := msg.SetValue("before", nil); err != nil { - return err - } - } else { - var before map[string]interface{} - if err = dynamodbattribute.UnmarshalMap( - dynamodb.ConvertEventsAttributeValueMap(record.Change.OldImage), - &before, - ); err != nil { - return err - } - - if err := msg.SetValue("before", before); err != nil { - return err - } - } - - if record.Change.NewImage == nil { - if err := msg.SetValue("after", nil); err != nil { - return err - } - } else { - var after map[string]interface{} - if err = dynamodbattribute.UnmarshalMap( - dynamodb.ConvertEventsAttributeValueMap(record.Change.NewImage), - &after, - ); err != nil { - return err - } - - if err := msg.SetValue("after", after); err != nil { - return err - } - } - - ch.Send(msg) - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return err - } - - return nil -} diff --git a/v1/cmd/aws/lambda/substation/kinesis_firehose.go b/v1/cmd/aws/lambda/substation/kinesis_firehose.go deleted file mode 100644 index fab949b4..00000000 --- a/v1/cmd/aws/lambda/substation/kinesis_firehose.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" -) - -type firehoseMetadata struct { - ApproximateArrivalTimestamp time.Time `json:"approximateArrivalTimestamp"` - RecordId string `json:"recordId"` -} - -func firehoseHandler(ctx context.Context, event events.KinesisFirehoseEvent) (events.KinesisFirehoseResponse, error) { - var resp events.KinesisFirehoseResponse - - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return resp, err - } - - cfg := substation.Config{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return resp, err - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - return resp, err - } - - // Records are transformed individually. Firehose cannot produce multiple records - // from a single record, so the first transformed message is used and the rest - // are dropped. If no messages are produced, then the record is "dropped." - for _, record := range event.Records { - m := firehoseMetadata{ - ApproximateArrivalTimestamp: record.ApproximateArrivalTimestamp.Time, - RecordId: record.RecordID, - } - metadata, err := json.Marshal(m) - if err != nil { - return resp, err - } - - msg := message.New().SetData(record.Data).SetMetadata(metadata) - res, err := sub.Transform(ctx, msg) - if err != nil { - return resp, err - } - - if len(res) == 0 { - resp.Records = append(resp.Records, events.KinesisFirehoseResponseRecord{ - RecordID: record.RecordID, - Result: events.KinesisFirehoseTransformedStateDropped, - }) - } else { - resp.Records = append(resp.Records, events.KinesisFirehoseResponseRecord{ - RecordID: record.RecordID, - Result: events.KinesisFirehoseTransformedStateOk, - Data: res[0].Data(), - }) - } - } - - return resp, nil -} diff --git a/v1/cmd/aws/lambda/substation/kinesis_stream.go b/v1/cmd/aws/lambda/substation/kinesis_stream.go deleted file mode 100644 index db58beda..00000000 --- a/v1/cmd/aws/lambda/substation/kinesis_stream.go +++ /dev/null @@ -1,128 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/awslabs/kinesis-aggregation/go/deaggregator" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" - "golang.org/x/sync/errgroup" -) - -type kinesisStreamMetadata struct { - ApproximateArrivalTimestamp time.Time `json:"approximateArrivalTimestamp"` - Stream string `json:"stream"` - PartitionKey string `json:"partitionKey"` - SequenceNumber string `json:"sequenceNumber"` -} - -func kinesisStreamHandler(ctx context.Context, event events.KinesisEvent) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return err - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(ctx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - eventSourceArn := event.Records[len(event.Records)-1].EventSourceArn - converted := kinesis.ConvertEventsRecords(event.Records) - deaggregated, err := deaggregator.DeaggregateRecords(converted) - if err != nil { - return err - } - - for _, record := range deaggregated { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Create Message metadata. - m := kinesisStreamMetadata{ - *record.ApproximateArrivalTimestamp, - eventSourceArn, - *record.PartitionKey, - *record.SequenceNumber, - } - - metadata, err := json.Marshal(m) - if err != nil { - return err - } - - msg := message.New().SetData(record.Data).SetMetadata(metadata) - ch.Send(msg) - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return err - } - - return nil -} diff --git a/v1/cmd/aws/lambda/substation/lambda.go b/v1/cmd/aws/lambda/substation/lambda.go deleted file mode 100644 index 0cfa28d4..00000000 --- a/v1/cmd/aws/lambda/substation/lambda.go +++ /dev/null @@ -1,64 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" -) - -func lambdaHandler(ctx context.Context, event json.RawMessage) ([]json.RawMessage, error) { - evt, err := json.Marshal(event) - if err != nil { - return nil, err - } - - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return nil, err - } - - cfg := substation.Config{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return nil, err - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - return nil, err - } - - // Data and ctrl messages are sent as a group. - msg := []*message.Message{ - message.New().SetData(evt), - message.New().AsControl(), - } - - res, err := sub.Transform(ctx, msg...) - if err != nil { - return nil, err - } - - // Convert transformed messages to a JSON array. - var output []json.RawMessage - for _, msg := range res { - if msg.IsControl() { - continue - } - - if !json.Valid(msg.Data()) { - return nil, errLambdaInvalidJSON - } - - var rm json.RawMessage - if err := json.Unmarshal(msg.Data(), &rm); err != nil { - return nil, err - } - - output = append(output, rm) - } - - return output, nil -} diff --git a/v1/cmd/aws/lambda/substation/main.go b/v1/cmd/aws/lambda/substation/main.go deleted file mode 100644 index 720677ef..00000000 --- a/v1/cmd/aws/lambda/substation/main.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - - "github.com/aws/aws-lambda-go/lambda" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/file" -) - -var ( - handler string - - // errLambdaMissingHandler is returned when the Lambda is deployed without a configured handler. - errLambdaMissingHandler = fmt.Errorf("SUBSTATION_LAMBDA_HANDLER environment variable is missing") - - // errLambdaInvalidHandler is returned when the Lambda is deployed with an unsupported handler. - errLambdaInvalidHandler = fmt.Errorf("SUBSTATION_LAMBDA_HANDLER environment variable is invalid") - - // errLambdaInvalidJSON is returned when the Lambda is deployed with a transform that produces invalid JSON. - errLambdaInvalidJSON = fmt.Errorf("transformed data is invalid JSON and cannot be returned") -) - -type customConfig struct { - substation.Config - - Concurrency int `json:"concurrency"` -} - -// getConfig contextually retrieves a Substation configuration. -func getConfig(ctx context.Context) (io.Reader, error) { - buf := new(bytes.Buffer) - - cfg, found := os.LookupEnv("SUBSTATION_CONFIG") - if !found { - return nil, fmt.Errorf("no config found") - } - - path, err := file.Get(ctx, cfg) - defer os.Remove(path) - - if err != nil { - return nil, err - } - - conf, err := os.Open(path) - if err != nil { - return nil, err - } - defer conf.Close() - - if _, err := io.Copy(buf, conf); err != nil { - return nil, err - } - - return buf, nil -} - -func main() { - switch h := handler; h { - case "AWS_API_GATEWAY": - lambda.Start(gatewayHandler) - case "AWS_DYNAMODB_STREAM", "AWS_DYNAMODB": // AWS_DYNAMODB is deprecated - lambda.Start(dynamodbHandler) - case "AWS_KINESIS_DATA_FIREHOSE": - lambda.Start(firehoseHandler) - case "AWS_KINESIS_DATA_STREAM", "AWS_KINESIS": // AWS_KINESIS is deprecated - lambda.Start(kinesisStreamHandler) - case "AWS_LAMBDA": - lambda.Start(lambdaHandler) - case "AWS_S3": - lambda.Start(s3Handler) - case "AWS_S3_SNS": - lambda.Start(s3SnsHandler) - case "AWS_SNS": - lambda.Start(snsHandler) - case "AWS_SQS": - lambda.Start(sqsHandler) - default: - panic(fmt.Errorf("main handler %s: %v", h, errLambdaInvalidHandler)) - } -} - -func init() { - var ok bool - handler, ok = os.LookupEnv("SUBSTATION_LAMBDA_HANDLER") - if !ok { - panic(fmt.Errorf("init handler %s: %v", handler, errLambdaMissingHandler)) - } -} diff --git a/v1/cmd/aws/lambda/substation/s3.go b/v1/cmd/aws/lambda/substation/s3.go deleted file mode 100644 index fa2c14e3..00000000 --- a/v1/cmd/aws/lambda/substation/s3.go +++ /dev/null @@ -1,362 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "io" - "net/url" - "os" - "slices" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/s3manager" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" - "golang.org/x/sync/errgroup" -) - -type s3Metadata struct { - EventTime time.Time `json:"eventTime"` - BucketArn string `json:"bucketArn"` - BucketName string `json:"bucketName"` - ObjectKey string `json:"objectKey"` - ObjectSize int64 `json:"objectSize"` -} - -//nolint:gocognit -func s3Handler(ctx context.Context, event events.S3Event) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return err - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(tfCtx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest - group.Go(func() error { - defer ch.Close() - - client := s3manager.DownloaderAPI{} - client.Setup(aws.Config{}) - - for _, record := range event.Records { - // The S3 object key is URL encoded. - // - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-content-structure.html - objectKey, err := url.QueryUnescape(record.S3.Object.Key) - if err != nil { - return err - } - - m := s3Metadata{ - EventTime: record.EventTime, - BucketArn: record.S3.Bucket.Arn, - BucketName: record.S3.Bucket.Name, - ObjectKey: objectKey, - ObjectSize: record.S3.Object.Size, - } - - metadata, err := json.Marshal(m) - if err != nil { - return err - } - - dst, err := os.CreateTemp("", "substation") - if err != nil { - return err - } - defer os.Remove(dst.Name()) - defer dst.Close() - - if _, err := client.Download(ctx, record.S3.Bucket.Name, objectKey, dst); err != nil { - return err - } - - // Determines if the file should be treated as text. - // Text files are decompressed by the bufio package - // (if necessary) and each line is sent as a separate - // message. All other files are sent as a single message. - mediaType, err := media.File(dst) - if err != nil { - return err - } - - if _, err := dst.Seek(0, 0); err != nil { - return err - } - - // Unsupported media types are sent as binary data. - if !slices.Contains(bufio.MediaTypes, mediaType) { - r, err := io.ReadAll(dst) - if err != nil { - return err - } - - msg := message.New().SetData(r).SetMetadata(metadata) - ch.Send(msg) - - return nil - } - - scanner := bufio.NewScanner() - defer scanner.Close() - - if err := scanner.ReadFile(dst); err != nil { - return err - } - - for scanner.Scan() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - b := []byte(scanner.Text()) - msg := message.New().SetData(b).SetMetadata(metadata) - - ch.Send(msg) - } - - if err := scanner.Err(); err != nil { - return err - } - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return err - } - - return nil -} - -//nolint:gocognit -func s3SnsHandler(ctx context.Context, event events.SNSEvent) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return err - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each Message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(tfCtx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - client := s3manager.DownloaderAPI{} - client.Setup(aws.Config{}) - - for _, record := range event.Records { - var s3Event events.S3Event - err := json.Unmarshal([]byte(record.SNS.Message), &s3Event) - if err != nil { - return err - } - - for _, record := range s3Event.Records { - // The S3 object key is URL encoded. - // - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-content-structure.html - objectKey, err := url.QueryUnescape(record.S3.Object.Key) - if err != nil { - return err - } - - m := s3Metadata{ - record.EventTime, - record.S3.Bucket.Arn, - record.S3.Bucket.Name, - objectKey, - record.S3.Object.Size, - } - metadata, err := json.Marshal(m) - if err != nil { - return err - } - - dst, err := os.CreateTemp("", "substation") - if err != nil { - return err - } - defer os.Remove(dst.Name()) - defer dst.Close() - - if _, err := client.Download(ctx, record.S3.Bucket.Name, objectKey, dst); err != nil { - return err - } - - // Determines if the file should be treated as text. - // Text files are decompressed by the bufio package - // (if necessary) and each line is sent as a separate - // message. All other files are sent as a single message. - mediaType, err := media.File(dst) - if err != nil { - return err - } - - if _, err := dst.Seek(0, 0); err != nil { - return err - } - - // Unsupported media types are sent as binary data. - if !slices.Contains(bufio.MediaTypes, mediaType) { - r, err := io.ReadAll(dst) - if err != nil { - return err - } - - msg := message.New().SetData(r).SetMetadata(metadata) - ch.Send(msg) - - return nil - } - - scanner := bufio.NewScanner() - defer scanner.Close() - - if err := scanner.ReadFile(dst); err != nil { - return err - } - - for scanner.Scan() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - b := []byte(scanner.Text()) - msg := message.New().SetData(b).SetMetadata(metadata) - - ch.Send(msg) - } - - if err := scanner.Err(); err != nil { - return err - } - } - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return err - } - - return nil -} diff --git a/v1/cmd/aws/lambda/substation/sns.go b/v1/cmd/aws/lambda/substation/sns.go deleted file mode 100644 index 5995f24b..00000000 --- a/v1/cmd/aws/lambda/substation/sns.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" - "golang.org/x/sync/errgroup" -) - -type snsMetadata struct { - Timestamp time.Time `json:"timestamp"` - EventSubscriptionArn string `json:"eventSubscriptionArn"` - MessageID string `json:"messageId"` - Subject string `json:"subject"` -} - -func snsHandler(ctx context.Context, event events.SNSEvent) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return fmt.Errorf("sns handler: %v", err) - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return fmt.Errorf("sns handler: %v", err) - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return fmt.Errorf("sns handler: %v", err) - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(ctx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - // Create Message metadata. - m := snsMetadata{ - Timestamp: event.Records[0].SNS.Timestamp, - EventSubscriptionArn: event.Records[0].EventSubscriptionArn, - MessageID: event.Records[0].SNS.MessageID, - Subject: event.Records[0].SNS.Subject, - } - - metadata, err := json.Marshal(m) - if err != nil { - return fmt.Errorf("sns handler: %v", err) - } - - for _, record := range event.Records { - b := []byte(record.SNS.Message) - msg := message.New().SetData(b).SetMetadata(metadata) - - ch.Send(msg) - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return fmt.Errorf("sns handler: %v", err) - } - - return nil -} diff --git a/v1/cmd/aws/lambda/substation/sqs.go b/v1/cmd/aws/lambda/substation/sqs.go deleted file mode 100644 index 3c557385..00000000 --- a/v1/cmd/aws/lambda/substation/sqs.go +++ /dev/null @@ -1,115 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" - "golang.org/x/sync/errgroup" -) - -type sqsMetadata struct { - EventSourceArn string `json:"eventSourceArn"` - MessageID string `json:"messageId"` - BodyMd5 string `json:"bodyMd5"` - Attributes map[string]string `json:"attributes"` -} - -func sqsHandler(ctx context.Context, event events.SQSEvent) error { - // Retrieve and load configuration. - conf, err := getConfig(ctx) - if err != nil { - return fmt.Errorf("sqs handler: %v", err) - } - - cfg := customConfig{} - if err := json.NewDecoder(conf).Decode(&cfg); err != nil { - return fmt.Errorf("sqs handler: %v", err) - } - - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - return fmt.Errorf("sqs handler: %v", err) - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Data transformation. Transforms are executed concurrently using a worker pool - // managed by an errgroup. Each message is processed in a separate goroutine. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(cfg.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - // Transformed messages are never returned to the caller because - // invocation is asynchronous. - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(ctx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - // Create Message metadata. - m := sqsMetadata{ - EventSourceArn: event.Records[0].EventSourceARN, - MessageID: event.Records[0].MessageId, - BodyMd5: event.Records[0].Md5OfBody, - Attributes: event.Records[0].Attributes, - } - - metadata, err := json.Marshal(m) - if err != nil { - return fmt.Errorf("sqs handler: %v", err) - } - - for _, record := range event.Records { - b := []byte(record.Body) - msg := message.New().SetData(b).SetMetadata(metadata) - - ch.Send(msg) - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return fmt.Errorf("sqs handler: %v", err) - } - - return nil -} diff --git a/v1/cmd/aws/lambda/validate/main.go b/v1/cmd/aws/lambda/validate/main.go deleted file mode 100644 index 760aeefc..00000000 --- a/v1/cmd/aws/lambda/validate/main.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - - "github.com/aws/aws-lambda-go/lambda" - - "github.com/brexhq/substation" -) - -func main() { - lambda.Start(handler) -} - -type validationEvent struct { - Content string `json:"content"` - URI string `json:"uri"` -} - -func handler(ctx context.Context, event json.RawMessage) error { - var e validationEvent - err := json.Unmarshal(event, &e) - if err != nil { - return fmt.Errorf("validation: json: %v (%q)", err, string(event)) - } - - conf, err := base64.StdEncoding.DecodeString(e.Content) - if err != nil { - return fmt.Errorf("validation: base64: %v (%q)", err, e.Content) - } - - cfg := substation.Config{} - if err := json.Unmarshal(conf, &cfg); err != nil { - return fmt.Errorf("validation: json: %v (%q)", err, string(conf)) - } - - if _, err := substation.New(ctx, cfg); err != nil { - return fmt.Errorf("validation: substation: %v", err) - } - - return nil -} diff --git a/v1/cmd/aws/lambda/validate/main_test.go b/v1/cmd/aws/lambda/validate/main_test.go deleted file mode 100644 index 85ac3590..00000000 --- a/v1/cmd/aws/lambda/validate/main_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "context" - "encoding/base64" - "encoding/json" - "testing" - - "github.com/brexhq/substation/internal/errors" -) - -var testCfgs = []struct { - name string - cfg []byte - expectedErr error -}{ - { - "invalid transform", - []byte(` - { - "transforms": [ - { - "type": "fooer" - } - ] - } - `), - errors.ErrInvalidFactoryInput, - }, - { - "invalid processor settings", - []byte(` - { - "transforms": [ - { - "type": "object_insert", - } - ] - } - `), - errors.ErrInvalidOption, - }, - { - "valid config", - []byte(` - { - "transforms": [ - { - "settings": { - "object": { - "source_key": "foo", - "target_key": "baz" - } - }, - "type": "object_copy" - } - ] - } - `), - nil, - }, -} - -func TestHandler(t *testing.T) { - for _, cfg := range testCfgs { - t.Run(cfg.name, func(t *testing.T) { - e, err := json.Marshal(validationEvent{ - Content: base64.StdEncoding.EncodeToString(cfg.cfg), - URI: "arn:aws:lambda:region:account:function:SubstationAppConfigLambdaValidator", - }) - if err != nil { - t.Fatal(err) - } - - err = handler(context.Background(), e) - if err != nil && cfg.expectedErr == nil { - t.Error(err) - } - }) - } -} diff --git a/v1/cmd/development/benchmark/substation/main.go b/v1/cmd/development/benchmark/substation/main.go deleted file mode 100644 index a1e58594..00000000 --- a/v1/cmd/development/benchmark/substation/main.go +++ /dev/null @@ -1,205 +0,0 @@ -// Benchmarks the performance of Substation by sending a configurable number of events -// through the system and reporting the total time taken, the number of events sent, the -// amount of data sent, and the rate of events and data sent per second. -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - "runtime/pprof" - "time" - - "golang.org/x/sync/errgroup" - - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/message" -) - -type options struct { - Count int - Concurrency int - ConfigFile string - DataFile string - pprofCPU bool - pprofMemory bool -} - -func main() { - var opts options - - flag.StringVar(&opts.DataFile, "file", "", "File to parse") - flag.IntVar(&opts.Count, "count", 100000, "Number of events to process (default: 100000)") - flag.IntVar(&opts.Concurrency, "concurrency", -1, "Number of concurrent data transformation functions to run (default: number of CPUs available)") - flag.StringVar(&opts.ConfigFile, "config", "", "Substation configuration file (default: empty config)") - flag.BoolVar(&opts.pprofCPU, "cpu", false, "Enable CPU profiling (default: false)") - flag.BoolVar(&opts.pprofMemory, "mem", false, "Enable memory profiling (default: false)") - flag.Parse() - - if opts.DataFile == "" { - fmt.Println("missing required flag -file") - os.Exit(1) - } - - ctx := context.Background() - - fmt.Printf("%s: Configuring Substation\n", time.Now().Format(time.RFC3339Nano)) - var conf []byte - // If no config file is provided, then an empty config is used. - if opts.ConfigFile != "" { - path, err := file.Get(ctx, opts.ConfigFile) - defer os.Remove(path) - - if err != nil { - panic(err) - } - - conf, err = os.ReadFile(path) - if err != nil { - panic(err) - } - } else { - conf = []byte(`{"transforms":[]}`) - } - - cfg := substation.Config{} - if err := json.Unmarshal(conf, &cfg); err != nil { - panic(err) - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - panic(err) - } - - // Collect the sample data for the benchmark. - path, err := file.Get(ctx, opts.DataFile) - defer os.Remove(path) - - if err != nil { - panic(fmt.Errorf("file: %v", err)) - } - - f, err := os.Open(path) - if err != nil { - panic(fmt.Errorf("file: %v", err)) - } - defer f.Close() - - scanner := bufio.NewScanner() - defer scanner.Close() - - if err := scanner.ReadFile(f); err != nil { - panic(err) - } - - fmt.Printf("%s: Loading data into memory\n", time.Now().Format(time.RFC3339Nano)) - var data [][]byte - dataBytes := 0 - for scanner.Scan() { - b := []byte(scanner.Text()) - for i := 0; i < opts.Count; i++ { - data = append(data, b) - dataBytes += len(b) - } - } - - if err := scanner.Err(); err != nil { - panic(err) - } - - if opts.pprofCPU { - f, err := os.Create("./cpu.prof") - if err != nil { - panic(err) - } - defer f.Close() // error handling omitted for example - if err := pprof.StartCPUProfile(f); err != nil { - panic(err) - } - defer pprof.StopCPUProfile() - } - - fmt.Printf("%s: Starting benchmark\n", time.Now().Format(time.RFC3339Nano)) - start := time.Now() - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(opts.Concurrency) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // ctrl messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(ctx, ctrl); err != nil { - return err - } - - return nil - }) - - group.Go(func() error { - defer ch.Close() - - for _, b := range data { - msg := message.New().SetData(b) - ch.Send(msg) - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - panic(err) - } - - fmt.Printf("%s: Ending benchmark\n", time.Now().Format(time.RFC3339Nano)) - - // The benchmark reports the total time taken, the number of events sent, the - // amount of data sent, and the rate of events and data sent per second. - elapsed := time.Since(start) - fmt.Printf("\nBenchmark results:\n") - fmt.Printf("- %d events in %s\n", len(data), elapsed) - fmt.Printf("- %.2f events per second\n", float64(len(data))/elapsed.Seconds()) - fmt.Printf("- %d MB in %s\n", dataBytes/1000/1000, elapsed) - fmt.Printf("- %.2f MB per second\n", float64(dataBytes)/1000/1000/elapsed.Seconds()) - - if opts.pprofMemory { - heap, err := os.Create("./heap.prof") - if err != nil { - panic(err) - } - if err := pprof.WriteHeapProfile(heap); err != nil { - panic(err) - } - } -} diff --git a/v1/cmd/development/kinesis-tap/substation/config.jsonnet b/v1/cmd/development/kinesis-tap/substation/config.jsonnet deleted file mode 100644 index 368392a0..00000000 --- a/v1/cmd/development/kinesis-tap/substation/config.jsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/cmd/development/kinesis-tap/substation/main.go b/v1/cmd/development/kinesis-tap/substation/main.go deleted file mode 100644 index 5425c403..00000000 --- a/v1/cmd/development/kinesis-tap/substation/main.go +++ /dev/null @@ -1,264 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "os" - "os/signal" - "runtime" - "syscall" - "time" - - "golang.org/x/sync/errgroup" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/awslabs/kinesis-aggregation/go/deaggregator" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/internal/log" - "github.com/brexhq/substation/message" -) - -type options struct { - Config string - - // StreamName is the name of the Kinesis stream to read from. - StreamName string - // StreamOffset is the read offset of the stream (earliest, latest). - StreamOffset string -} - -// getConfig contextually retrieves a Substation configuration. -func getConfig(ctx context.Context, cfg string) (io.Reader, error) { - path, err := file.Get(ctx, cfg) - defer os.Remove(path) - - if err != nil { - return nil, err - } - - conf, err := os.Open(path) - if err != nil { - return nil, err - } - defer conf.Close() - - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, conf); err != nil { - return nil, err - } - - return buf, nil -} - -func main() { - var opts options - - flag.StringVar(&opts.Config, "config", "./config.json", "The Substation configuration file used to transform records") - flag.StringVar(&opts.StreamName, "stream-name", "", "The AWS Kinesis Data Stream to fetch records from") - flag.StringVar(&opts.StreamOffset, "stream-offset", "earliest", "Determines the offset of the stream (earliest, latest)") - flag.Parse() - - if err := run(context.Background(), opts); err != nil { - panic(fmt.Errorf("main: %v", err)) - } -} - -//nolint:gocognit // Ignore cognitive complexity. -func run(ctx context.Context, opts options) error { - cfg := substation.Config{} - c, err := getConfig(ctx, opts.Config) - if err != nil { - return err - } - - if err := json.NewDecoder(c).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - // Consumer group that transforms records using Substation - // until the channel is closed by the producer group. - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(runtime.NumCPU()) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - log.Debug("Closed Substation pipeline.") - - // ctrl messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(tfCtx, ctrl); err != nil { - return err - } - - log.Debug("Flushed Substation pipeline.") - - return nil - }) - - // Producer group that fetches records from each shard in the - // Kinesis stream until the context is cancelled by an interrupt - // signal. - group.Go(func() error { - defer ch.Close() // Producer goroutines must close the channel when they are done. - - // The AWS client is configured using environment variables - // or the default credentials file. - client := kinesis.API{} - client.Setup(aws.Config{}) - - res, err := client.ListShards(ctx, opts.StreamName) - if err != nil { - return err - } - - log.WithField("stream", opts.StreamName).WithField("count", len(res.Shards)).Debug("Retrieved active shards from Kinesis stream.") - - var iType string - switch opts.StreamOffset { - case "earliest": - iType = "TRIM_HORIZON" - case "latest": - iType = "LATEST" - default: - return fmt.Errorf("invalid offset: %s", opts.StreamOffset) - } - - // Each shard is read concurrently using a worker - // pool managed by an errgroup that can be cancelled - // by an interrupt signal. - notifyCtx, cancel := signal.NotifyContext(ctx, syscall.SIGINT) - defer cancel() - - recvGroup, recvCtx := errgroup.WithContext(notifyCtx) - defer log.Debug("Closed connections to the Kinesis stream.") - - // This iterates over a snapshot of active shards in the - // stream and will not be updated if shards are split or - // merged. New shards can be identified in the response - // from GetRecords, but this isn't implemented. - // - // Each shard is paginated until the end of the shard is - // reached or the context is cancelled. - for _, shard := range res.Shards { - iterator, err := client.GetShardIterator(ctx, opts.StreamName, *shard.ShardId, iType) - if err != nil { - return err - } - - recvGroup.Go(func() error { - shardIterator := *iterator.ShardIterator - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // GetRecords has a limit of 5 transactions per second - // per shard, so this loop is designed to not overload - // the API in case other consumers are reading from the - // same shard. - res, err := client.GetRecords(recvCtx, shardIterator) - if err != nil { - return err - } - - if res.NextShardIterator == nil { - log.WithField("stream", opts.StreamName).WithField("shard", shard.ShardId).Debug("Reached end of Kinesis shard.") - - break - } - shardIterator = *res.NextShardIterator - - if len(res.Records) == 0 { - time.Sleep(500 * time.Millisecond) - - continue - } - - deagg, err := deaggregator.DeaggregateRecords(res.Records) - if err != nil { - return err - } - - log.WithField("stream", opts.StreamName).WithField("shard", shard.ShardId).WithField("count", len(deagg)).Debug("Retrieved records from Kinesis shard.") - - for _, record := range deagg { - msg := message.New().SetData(record.Data) - ch.Send(msg) - } - - time.Sleep(500 * time.Millisecond) - } - - return nil - }) - } - - // Cancellation errors are expected when the errgroup - // is interrupted. All other errors are returned to - // the caller. - if err := recvGroup.Wait(); err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "RequestCanceled" { - return nil - } - } - - if errors.Is(err, context.Canceled) { - return nil - } - - return err - } - - return nil - }) - - // Wait for the producer and consumer groups to finish, - // or an error from either group. - if err := group.Wait(); err != nil { - return err - } - - return nil -} diff --git a/v1/condition/condition.go b/v1/condition/condition.go deleted file mode 100644 index d629758a..00000000 --- a/v1/condition/condition.go +++ /dev/null @@ -1,234 +0,0 @@ -// Package condition provides functions for evaluating data. -package condition - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// errOperatorMissingInspectors is returned when an Operator that requires -// inspectors is created with no inspectors. -var errOperatorMissingInspectors = fmt.Errorf("missing inspectors") - -type Config struct { - Operator string `json:"operator"` - Inspectors []config.Config `json:"inspectors"` -} - -type inspector interface { - Inspect(context.Context, *message.Message) (bool, error) -} - -// newInspector returns a configured Inspector from an Inspector configuration. -func newInspector(ctx context.Context, cfg config.Config) (inspector, error) { //nolint: cyclop, gocyclo // ignore cyclomatic complexity - switch cfg.Type { - // Format inspectors. - case "format_mime": - return newFormatMIME(ctx, cfg) - case "format_json": - return newFormatJSON(ctx, cfg) - // Meta inspectors. - case "meta_condition": - return newMetaCondition(ctx, cfg) - case "meta_for_each": - return newMetaForEach(ctx, cfg) - case "meta_negate": - return newMetaNegate(ctx, cfg) - // Network inspectors. - case "network_ip_global_unicast": - return newNetworkIPGlobalUnicast(ctx, cfg) - case "network_ip_link_local_multicast": - return newNetworkIPLinkLocalMulticast(ctx, cfg) - case "network_ip_link_local_unicast": - return newNetworkIPLinkLocalUnicast(ctx, cfg) - case "network_ip_loopback": - return newNetworkIPLoopback(ctx, cfg) - case "network_ip_multicast": - return newNetworkIPMulticast(ctx, cfg) - case "network_ip_private": - return newNetworkIPPrivate(ctx, cfg) - case "network_ip_unicast": - return newNetworkIPUnicast(ctx, cfg) - case "network_ip_unspecified": - return newNetworkIPUnspecified(ctx, cfg) - case "network_ip_valid": - return newNetworkIPValid(ctx, cfg) - // Number inspectors. - case "number_equal_to": - return newNumberEqualTo(ctx, cfg) - case "number_less_than": - return newNumberLessThan(ctx, cfg) - case "number_greater_than": - return newNumberGreaterThan(ctx, cfg) - case "number_bitwise_and": - return newNumberBitwiseAND(ctx, cfg) - case "number_bitwise_or": - return newNumberBitwiseOR(ctx, cfg) - case "number_bitwise_xor": - return newNumberBitwiseXOR(ctx, cfg) - case "number_bitwise_not": - return newNumberBitwiseNOT(ctx, cfg) - case "number_length_less_than": - return newNumberLengthLessThan(ctx, cfg) - case "number_length_greater_than": - return newNumberLengthGreaterThan(ctx, cfg) - case "number_length_equal_to": - return newNumberLengthEqualTo(ctx, cfg) - // String inspectors. - case "string_contains": - return newStringContains(ctx, cfg) - case "string_ends_with": - return newStringEndsWith(ctx, cfg) - case "string_equal_to": - return newStringEqualTo(ctx, cfg) - case "string_greater_than": - return newStringGreaterThan(ctx, cfg) - case "string_less_than": - return newStringLessThan(ctx, cfg) - case "string_starts_with": - return newStringStartsWith(ctx, cfg) - case "string_match": - return newStringMatch(ctx, cfg) - // Utility inspectors. - case "utility_random": - return newUtilityRandom(ctx, cfg) - default: - return nil, fmt.Errorf("condition: new_inspector: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) - } -} - -func newInspectors(ctx context.Context, conf ...config.Config) ([]inspector, error) { - var inspectors []inspector - for _, c := range conf { - insp, err := newInspector(ctx, c) - if err != nil { - return nil, err - } - inspectors = append(inspectors, insp) - } - return inspectors, nil -} - -type Operator interface { - Operate(context.Context, *message.Message) (bool, error) -} - -// New returns a configured Operator from an Operator configuration. -func New(ctx context.Context, cfg Config) (Operator, error) { - inspectors, err := newInspectors(ctx, cfg.Inspectors...) - if err != nil { - return nil, err - } - - switch cfg.Operator { - case "all": - return &opAll{inspectors}, nil - case "any": - return &opAny{inspectors}, nil - case "none": - return &opNone{inspectors}, nil - default: - return &opEmpty{}, nil - } -} - -type opAll struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if all inspectors return true, otherwise it returns false. -func (o *opAll) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return false if any check fails - if !ok { - return false, nil - } - } - - // return tue if all checks pass - return true, nil -} - -type opAny struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if any inspectors return true, otherwise it returns false. -func (o *opAny) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return true if any check passes - if ok { - return true, nil - } - } - - // return false if all checks fail - return false, nil -} - -type opNone struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if all inspectors return false, otherwise it returns true. -func (o *opNone) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return false if any check passes - if ok { - return false, nil - } - } - - // return true if all checks fail - return true, nil -} - -type opEmpty struct{} - -// Operate always returns true. -func (o *opEmpty) Operate(ctx context.Context, msg *message.Message) (bool, error) { - return true, nil -} diff --git a/v1/condition/condition_example_test.go b/v1/condition/condition_example_test.go deleted file mode 100644 index ac89581f..00000000 --- a/v1/condition/condition_example_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package condition_test - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func ExampleOperator() { - ctx := context.TODO() - - // Multiple inspectors can be chained together with an operator. - // This example uses the "all" operator, which requires all inspectors to - // return true for the operator to return true. - cfg := condition.Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "number_length_less_than", - Settings: map[string]interface{}{ - "value": 10, - }, - }, - { - Type: "string_contains", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - }, - } - - // Operators are retrieved from the factory and - // applied to a message. - op, err := condition.New(ctx, cfg) - if err != nil { - // handle err - panic(err) - } - - msg := message.New().SetData([]byte("fizzy")) - if err != nil { - // handle err - panic(err) - } - - ok, err := op.Operate(ctx, msg) - if err != nil { - // handle err - panic(err) - } - - // Output: true - fmt.Println(ok) -} diff --git a/v1/condition/condition_test.go b/v1/condition/condition_test.go deleted file mode 100644 index cfe88b36..00000000 --- a/v1/condition/condition_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var allTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "format_mime", - []config.Config{ - { - Type: "format_mime", - Settings: map[string]interface{}{ - "type": "application/x-gzip", - }, - }, - }, - []byte{80, 75, 3, 4}, - false, - }, - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - }, - []byte("foo"), - true, - }, - { - "pattern", - []config.Config{ - { - Type: "string_match", - Settings: map[string]interface{}{ - "pattern": "^foo$", - }, - }, - }, - []byte("foo"), - true, - }, - { - "content", - []config.Config{ - { - Type: "format_mime", - Settings: map[string]interface{}{ - "type": "application/x-gzip", - }, - }, - }, - []byte{80, 75, 3, 4}, - false, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - []byte("foo"), - true, - }, - { - "string length", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - []byte("foo"), - true, - }, - // this test joins multiple ANY operators with an ALL operator, implementing the following logic: - // if ( "foo" starts with "f" OR "foo" ends with "b" ) AND ( len("foo") == 3 ) then return true - { - "condition", - []config.Config{ - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []config.Config{ - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - { - Type: "string_ends_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - }, - }, - }, - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestAll(t *testing.T) { - ctx := context.TODO() - - for _, test := range allTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - cfg := Config{ - Operator: "all", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - }) - } -} - -func benchmarkAll(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opAll{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkAll(b *testing.B) { - for _, test := range allTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkAll(b, test.conf, message) - }, - ) - } -} - -var anyTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "baz", - }, - }, - }, - []byte("foo"), - true, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 5, - }, - }, - }, - []byte("foo"), - true, - }, - { - "string length", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - }, - []byte("foo"), - true, - }, - // this test joins multiple ALL operators with an ANY operator, implementing the following logic: - // if ( len("foo") == 4 AND "foo" starts with "f" ) OR ( len("foo") == 3 ) then return true - { - "condition", - []config.Config{ - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - }, - }, - }, - }, - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestAny(t *testing.T) { - ctx := context.TODO() - - for _, test := range anyTests { - message := message.New().SetData(test.test) - - cfg := Config{ - Operator: "any", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - } -} - -func benchmarkAny(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opAny{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkAny(b *testing.B) { - for _, test := range anyTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkAny(b, test.conf, message) - }, - ) - } -} - -var noneTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "baz", - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - []byte("foo"), - true, - }, - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - []byte("foo"), - false, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "type": "equals", - "value": 0, - }, - }, - { - Type: "meta_negate", - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "value": "f", - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestNone(t *testing.T) { - ctx := context.TODO() - - for _, test := range noneTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - cfg := Config{ - Operator: "none", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - }) - } -} - -func benchmarkNone(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opNone{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkNone(b *testing.B) { - for _, test := range noneTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNone(b, test.conf, message) - }, - ) - } -} - -func TestNewInspector(t *testing.T) { - for _, test := range allTests { - _, err := newInspector(context.TODO(), test.conf[0]) - if err != nil { - t.Error(err) - } - } -} - -func benchmarknewInspector(b *testing.B, conf config.Config) { - for i := 0; i < b.N; i++ { - _, _ = newInspector(context.TODO(), conf) - } -} - -func BenchmarkNewInspector(b *testing.B) { - for _, test := range allTests { - b.Run(test.name, - func(b *testing.B) { - benchmarknewInspector(b, test.conf[0]) - }, - ) - } -} diff --git a/v1/condition/format_json.go b/v1/condition/format_json.go deleted file mode 100644 index 7105a604..00000000 --- a/v1/condition/format_json.go +++ /dev/null @@ -1,46 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type formatJSONConfig struct{} - -func (c *formatJSONConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newFormatJSON(_ context.Context, cfg config.Config) (*formatJSON, error) { - conf := formatJSONConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := formatJSON{ - conf: conf, - } - - return &insp, nil -} - -type formatJSON struct { - conf formatJSONConfig -} - -func (c *formatJSON) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - return json.Valid(msg.Data()), nil -} - -func (c *formatJSON) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/format_json_test.go b/v1/condition/format_json_test.go deleted file mode 100644 index cd7603e6..00000000 --- a/v1/condition/format_json_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &formatJSON{} - -var jsonValidTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte(`{"hello":"world"}`), - true, - }, - { - "pass", - config.Config{}, - []byte(`["a","b","c"]`), - true, - }, - { - "fail", - config.Config{}, - []byte(`{hello:"world"}`), - false, - }, - { - "fail", - config.Config{}, - []byte(`a`), - false, - }, -} - -func TestFormatJSON(t *testing.T) { - ctx := context.TODO() - - for _, test := range jsonValidTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newFormatJSON(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkFormatJSONByte(b *testing.B, insp *formatJSON, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkFormatJSONByte(b *testing.B) { - for _, test := range jsonValidTests { - insp, err := newFormatJSON(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkFormatJSONByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/format_mime.go b/v1/condition/format_mime.go deleted file mode 100644 index f5889a41..00000000 --- a/v1/condition/format_mime.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" -) - -type formatMIMEConfig struct { - // Type is the media type used for comparison during inspection. Media types follow this specification: https://mimesniff.spec.whatwg.org/. - Type string `json:"type"` - - Object iconfig.Object `json:"object"` -} - -func (c *formatMIMEConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *formatMIMEConfig) Validate() error { - if c.Type == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newFormatMIME(_ context.Context, cfg config.Config) (*formatMIME, error) { - conf := formatMIMEConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - insp := formatMIME{ - conf: conf, - } - - return &insp, nil -} - -type formatMIME struct { - conf formatMIMEConfig -} - -func (c *formatMIME) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - media := media.Bytes(msg.Data()) - if media == c.conf.Type { - return true, nil - } - - return false, nil -} - -func (c *formatMIME) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/format_mime_test.go b/v1/condition/format_mime_test.go deleted file mode 100644 index a33dfd7f..00000000 --- a/v1/condition/format_mime_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &formatMIME{} - -var formatMIMETests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - // Matching Gzip against valid Gzip header. - { - "pass gzip", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "ip_address", - }, - "type": "application/x-gzip", - }, - }, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255}, - true, - }, - // Matching Gzip against invalid Gzip header (bytes swapped). - { - "fail gzip", - config.Config{ - Settings: map[string]interface{}{ - "type": "application/x-gzip", - }, - }, - []byte{255, 139, 8, 0, 0, 0, 0, 0, 0, 31}, - false, - }, - // Matching Zip against valid Zip header. - { - "pass zip", - config.Config{ - Settings: map[string]interface{}{ - "type": "application/zip", - }, - }, - []byte{80, 75, 0o3, 0o4}, - true, - }, - // Matching Gzip against valid Zip header. - { - "fail zip", - config.Config{ - Settings: map[string]interface{}{ - "type": "application/zip", - }, - }, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255}, - false, - }, - // Matching Zip against invalid Zip header (bytes swapped). - { - "fail zip", - config.Config{ - Settings: map[string]interface{}{ - "type": "application/zip", - }, - }, - []byte{0o4, 75, 0o3, 80}, - false, - }, -} - -func TestFormatMIME(t *testing.T) { - ctx := context.TODO() - - for _, test := range formatMIMETests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newFormatMIME(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkFormatMIME(b *testing.B, insp *formatMIME, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkFormatMIME(b *testing.B) { - for _, test := range formatMIMETests { - insp, err := newFormatMIME(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkFormatMIME(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/meta_condition.go b/v1/condition/meta_condition.go deleted file mode 100644 index d657d2ee..00000000 --- a/v1/condition/meta_condition.go +++ /dev/null @@ -1,79 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type metaConditionConfig struct { - // Condition used to inspect the message. - Condition Config `json:"condition"` -} - -func (c *metaConditionConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaConditionConfig) Validate() error { - if c.Condition.Operator == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaCondition(ctx context.Context, cfg config.Config) (*metaCondition, error) { - conf := metaConditionConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - op, err := New(ctx, conf.Condition) - if err != nil { - return nil, err - } - - meta := metaCondition{ - conf: conf, - op: op, - } - - return &meta, nil -} - -type metaCondition struct { - conf metaConditionConfig - - op Operator -} - -func (c *metaCondition) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - // This inspector does not directly interpret data, instead the - // message is passed through and each configured inspector - // applies its own data interpretation. - match, err := c.op.Operate(ctx, msg) - if err != nil { - return false, err - } - - return match, nil -} - -func (c *metaCondition) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/meta_condition_test.go b/v1/condition/meta_condition_test.go deleted file mode 100644 index e9d249b1..00000000 --- a/v1/condition/meta_condition_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaCondition{} - -var metaConditionTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "condition": Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "string_contains", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "string": "bcd", - }, - }, - }, - }, - }, - }, - []byte(`{"a":"bcd"}`), - true, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "condition": Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "string_contains", - Settings: map[string]interface{}{ - "string": "bcd", - }, - }, - }, - }, - }, - }, - []byte("bcd"), - true, - }, -} - -func TestMetaCondition(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaConditionTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaCondition(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaCondition(b *testing.B, inspector *metaCondition, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = inspector.Inspect(ctx, message) - } -} - -func BenchmarkMetaCondition(b *testing.B) { - for _, test := range metaConditionTests { - insp, err := newMetaCondition(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaCondition(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/meta_err.go b/v1/condition/meta_err.go deleted file mode 100644 index 7a753603..00000000 --- a/v1/condition/meta_err.go +++ /dev/null @@ -1,107 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaErrConfig struct { - // Inspector used to inspect the message. If the inspector - // throws an error, this inspector will return false. - Inspector config.Config `json:"inspector"` - // ErrorMessages are regular expressions that match error messages and determine - // if the error should be caught. - // - // This is optional and defaults to an empty list (all errors are caught). - ErrorMessages []string `json:"error_messages"` -} - -func (c *metaErrConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaErrConfig) Validate() error { - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaErr(ctx context.Context, cfg config.Config) (*metaErr, error) { - conf := metaErrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_err: %v", err) - } - - meta := metaErr{ - conf: conf, - insp: i, - } - - meta.errorMessages = make([]*regexp.Regexp, len(conf.ErrorMessages)) - for i, em := range conf.ErrorMessages { - re, err := regexp.Compile(em) - if err != nil { - return nil, fmt.Errorf("condition: meta_err: %v", err) - } - - meta.errorMessages[i] = re - } - - return &meta, nil -} - -type metaErr struct { - conf metaErrConfig - - insp inspector - errorMessages []*regexp.Regexp -} - -func (c *metaErr) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - match, err := c.insp.Inspect(ctx, msg) - if err != nil { - // Catch all errors. - if len(c.errorMessages) == 0 { - return false, nil - } - - // Catch specific errors. - for _, re := range c.errorMessages { - if re.MatchString(err.Error()) { - return false, nil - } - } - - return false, fmt.Errorf("condition: meta_err: %v", err) - } - - return match, nil -} - -func (c *metaErr) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/meta_err_test.go b/v1/condition/meta_err_test.go deleted file mode 100644 index d5ccc699..00000000 --- a/v1/condition/meta_err_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaErr{} - -var metaErrTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "catch_all", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "catch_one", - config.Config{ - Settings: map[string]interface{}{ - "error_messages": []string{"input must be an array"}, - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "no_error", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":["bcd"]}`), - true, - }, -} - -func TestMetaErr(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaErrTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaErr(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaErr(b *testing.B, insp *metaErr, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaErr(b *testing.B) { - for _, test := range metaErrTests { - insp, err := newMetaErr(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaErr(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/meta_for_each.go b/v1/condition/meta_for_each.go deleted file mode 100644 index 433102b0..00000000 --- a/v1/condition/meta_for_each.go +++ /dev/null @@ -1,141 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "golang.org/x/exp/slices" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaForEachConfig struct { - // Type determines the method of combining results from the inspector. - // - // Must be one of: - // - none: none of the elements match the inspector - // - any: at least one of the elements match the inspector - // - all: all of the elements match the inspector - Type string `json:"type"` - // Inspector applied to each element. - Inspector config.Config `json:"inspector"` - - Object iconfig.Object `json:"object"` -} - -func (c *metaForEachConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaForEachConfig) Validate() error { - if c.Type == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - if !slices.Contains( - []string{ - "none", - "any", - "all", - }, - c.Type) { - return fmt.Errorf("type %q: %v", c.Type, errors.ErrInvalidOption) - } - - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaForEach(ctx context.Context, cfg config.Config) (*metaForEach, error) { - conf := metaForEachConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_for_each: %v", err) - } - - meta := metaForEach{ - conf: conf, - insp: i, - } - - return &meta, nil -} - -type metaForEach struct { - conf metaForEachConfig - - insp inspector -} - -func (c *metaForEach) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - // This is required to support JSON arrays and objects. - var value message.Value - if c.conf.Object.SourceKey == "" { - value = msg.GetValue("@this") - } else { - value = msg.GetValue(c.conf.Object.SourceKey) - } - - if !value.Exists() { - return false, nil - } - - if !value.IsArray() { - return false, fmt.Errorf("condition: meta_for_each: %v", "input must be an array") - } - - var results []bool - for _, res := range value.Array() { - data := []byte(res.String()) - msg := message.New().SetData(data) - - inspected, err := c.insp.Inspect(ctx, msg) - if err != nil { - return false, fmt.Errorf("condition: meta_for_each: %v", err) - } - results = append(results, inspected) - } - - total := len(results) - matched := 0 - for _, v := range results { - if v { - matched++ - } - } - - switch c.conf.Type { - case "any": - return matched > 0, nil - case "all": - return total == matched, nil - case "none": - return matched == 0, nil - } - - return false, nil -} - -func (c *metaForEach) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/meta_for_each_test.go b/v1/condition/meta_for_each_test.go deleted file mode 100644 index 509e4d88..00000000 --- a/v1/condition/meta_for_each_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaForEach{} - -var metaForEachTests = []struct { - name string - cfg config.Config - test []byte - expected bool - err error -}{ - { - "string starts_with all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "f", - }, - }, - }, - }, - []byte(`{"input":["foo","fizz","flop"]}`), - true, - nil, - }, - { - "ip private all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "network_ip_private", - }, - }, - }, - []byte(`{"input":["192.168.1.2","10.0.42.1","172.16.4.2"]}`), - true, - nil, - }, - { - "string_match", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "any", - "inspector": map[string]interface{}{ - "type": "string_match", - "settings": map[string]interface{}{ - "pattern": "^fizz$", - }, - }, - }, - }, - []byte(`{"input":["foo","fizz","flop"]}`), - true, - nil, - }, - { - "string greater_than", - config.Config{ - Settings: map[string]interface{}{ - "type": "any", - "inspector": map[string]interface{}{ - "type": "string_greater_than", - "settings": map[string]interface{}{ - "string": "0", - }, - }, - }, - }, - []byte(`[0,1,2]`), - true, - nil, - }, - { - "length none", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "none", - "inspector": map[string]interface{}{ - "type": "number_length_greater_than", - "settings": map[string]interface{}{ - "value": 7, - }, - }, - }, - }, - []byte(`{"input":["fooo","fizz","flop"]}`), - true, - nil, - }, - { - "length all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "number_length_equal_to", - "settings": map[string]interface{}{ - "value": 4, - }, - }, - }, - }, - []byte(`{"input":["fooo","fizz","flop"]}`), - true, - nil, - }, -} - -func TestMetaForEach(t *testing.T) { - ctx := context.TODO() - - for _, tt := range metaForEachTests { - t.Run(tt.name, func(t *testing.T) { - message := message.New().SetData(tt.test) - - insp, err := newMetaForEach(ctx, tt.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if tt.expected != check { - t.Errorf("expected %v, got %v, %v", tt.expected, check, string(tt.test)) - } - }) - } -} - -func benchmarkMetaForEach(b *testing.B, insp *metaForEach, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaForEach(b *testing.B) { - for _, test := range metaForEachTests { - insp, err := newMetaForEach(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkMetaForEach(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/meta_negate.go b/v1/condition/meta_negate.go deleted file mode 100644 index c61ec60b..00000000 --- a/v1/condition/meta_negate.go +++ /dev/null @@ -1,77 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaNegateConfig struct { - // Inspector used to inspect the message. - Inspector config.Config `json:"inspector"` -} - -func (c *metaNegateConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaNegateConfig) Validate() error { - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaNegate(ctx context.Context, cfg config.Config) (*metaNegate, error) { - conf := metaNegateConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_negate: %v", err) - } - - meta := metaNegate{ - conf: conf, - insp: i, - } - - return &meta, nil -} - -type metaNegate struct { - conf metaNegateConfig - - insp inspector -} - -func (c *metaNegate) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - match, err := c.insp.Inspect(ctx, msg) - if err != nil { - return false, err - } - - return !match, nil -} - -func (c *metaNegate) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/meta_negate_test.go b/v1/condition/meta_negate_test.go deleted file mode 100644 index 9c8451c9..00000000 --- a/v1/condition/meta_negate_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaNegate{} - -var metaNegateTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "bcd", - }, - "type": "string_equal_to", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "type": "string_equal_to", - "settings": map[string]interface{}{ - "value": "bcd", - }, - }, - }, - }, - []byte(`bcd`), - false, - }, -} - -func TestMetaNegate(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaNegateTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaNegate(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaNegate(b *testing.B, insp *metaNegate, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaNegate(b *testing.B) { - for _, test := range metaNegateTests { - insp, err := newMetaNegate(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaNegate(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_global_unicast.go b/v1/condition/network_ip_global_unicast.go deleted file mode 100644 index 54a46e7a..00000000 --- a/v1/condition/network_ip_global_unicast.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPGlobalUnicast(_ context.Context, cfg config.Config) (*networkIPGlobalUnicast, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPGlobalUnicast{ - conf: conf, - } - - return &insp, nil -} - -type networkIPGlobalUnicast struct { - conf networkIPConfig -} - -func (insp *networkIPGlobalUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsGlobalUnicast(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsGlobalUnicast(), nil -} - -func (insp *networkIPGlobalUnicast) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_global_unicast_test.go b/v1/condition/network_ip_global_unicast_test.go deleted file mode 100644 index 896ac873..00000000 --- a/v1/condition/network_ip_global_unicast_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPGlobalUnicast{} - -var networkIPGlobalUnicastTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("8.8.8.8"), - true, - }, -} - -func TestNetworkIPGlobalUnicast(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPGlobalUnicastTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPGlobalUnicast(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPGlobalUnicastByte(b *testing.B, insp *networkIPGlobalUnicast, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPGlobalUnicastByte(b *testing.B) { - for _, test := range networkIPGlobalUnicastTests { - insp, err := newNetworkIPGlobalUnicast(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPGlobalUnicastByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_link_local_multicast.go b/v1/condition/network_ip_link_local_multicast.go deleted file mode 100644 index 6085cc84..00000000 --- a/v1/condition/network_ip_link_local_multicast.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPLinkLocalMulticast(_ context.Context, cfg config.Config) (*networkIPLinkLocalMulticast, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPLinkLocalMulticast{ - conf: conf, - } - - return &insp, nil -} - -type networkIPLinkLocalMulticast struct { - conf networkIPConfig -} - -func (insp *networkIPLinkLocalMulticast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsLinkLocalMulticast(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsLinkLocalMulticast(), nil -} - -func (insp *networkIPLinkLocalMulticast) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_link_local_multicast_test.go b/v1/condition/network_ip_link_local_multicast_test.go deleted file mode 100644 index 65bff208..00000000 --- a/v1/condition/network_ip_link_local_multicast_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPLinkLocalMulticast{} - -var networkIPLinkLocalMulticastTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("224.0.0.12"), - true, - }, -} - -func TestNetworkIPLinkLocalMulticast(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPLinkLocalMulticastTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPLinkLocalMulticast(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPLinkLocalMulticastByte(b *testing.B, insp *networkIPLinkLocalMulticast, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPLinkLocalMulticastByte(b *testing.B) { - for _, test := range networkIPLinkLocalMulticastTests { - insp, err := newNetworkIPLinkLocalMulticast(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPLinkLocalMulticastByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_link_local_unicast.go b/v1/condition/network_ip_link_local_unicast.go deleted file mode 100644 index 2ead95c8..00000000 --- a/v1/condition/network_ip_link_local_unicast.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPLinkLocalUnicast(_ context.Context, cfg config.Config) (*networkIPLinkLocalUnicast, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPLinkLocalUnicast{ - conf: conf, - } - - return &insp, nil -} - -type networkIPLinkLocalUnicast struct { - conf networkIPConfig -} - -func (insp *networkIPLinkLocalUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsLinkLocalUnicast(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsLinkLocalUnicast(), nil -} - -func (insp *networkIPLinkLocalUnicast) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_link_local_unicast_test.go b/v1/condition/network_ip_link_local_unicast_test.go deleted file mode 100644 index 46ee4ede..00000000 --- a/v1/condition/network_ip_link_local_unicast_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPLinkLocalUnicast{} - -var networkIPLinkLocalUnicastTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("169.254.255.255"), - true, - }, -} - -func TestNetworkIPLinkLocalUnicast(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPLinkLocalUnicastTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPLinkLocalUnicast(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPLinkLocalUnicastByte(b *testing.B, insp *networkIPLinkLocalUnicast, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPLinkLocalUnicastByte(b *testing.B) { - for _, test := range networkIPLinkLocalUnicastTests { - insp, err := newNetworkIPLinkLocalUnicast(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPLinkLocalUnicastByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_loopback.go b/v1/condition/network_ip_loopback.go deleted file mode 100644 index d2a9fde0..00000000 --- a/v1/condition/network_ip_loopback.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPLoopback(_ context.Context, cfg config.Config) (*networkIPLoopback, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPLoopback{ - conf: conf, - } - - return &insp, nil -} - -type networkIPLoopback struct { - conf networkIPConfig -} - -func (insp *networkIPLoopback) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsLoopback(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsLoopback(), nil -} - -func (insp *networkIPLoopback) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_loopback_test.go b/v1/condition/network_ip_loopback_test.go deleted file mode 100644 index 8be780d4..00000000 --- a/v1/condition/network_ip_loopback_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPLoopback{} - -var networkIPLoopbackTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("127.0.0.1"), - true, - }, - { - "fail", - config.Config{}, - []byte("8.8.8.8"), - false, - }, -} - -func TestNetworkIPLoopback(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPLoopbackTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPLoopback(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPLoopbackByte(b *testing.B, insp *networkIPLoopback, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPLoopbackByte(b *testing.B) { - for _, test := range networkIPLoopbackTests { - insp, err := newNetworkIPLoopback(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPLoopbackByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_multicast.go b/v1/condition/network_ip_multicast.go deleted file mode 100644 index 12fb312c..00000000 --- a/v1/condition/network_ip_multicast.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPMulticast(_ context.Context, cfg config.Config) (*networkIPMulticast, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPMulticast{ - conf: conf, - } - - return &insp, nil -} - -type networkIPMulticast struct { - conf networkIPConfig -} - -func (insp *networkIPMulticast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsMulticast(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsMulticast(), nil -} - -func (insp *networkIPMulticast) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_multicast_test.go b/v1/condition/network_ip_multicast_test.go deleted file mode 100644 index a887f274..00000000 --- a/v1/condition/network_ip_multicast_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPMulticast{} - -var networkIPMulticastTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("224.0.0.12"), - true, - }, -} - -func TestNetworkIPMulticast(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPMulticastTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPMulticast(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPMulticastByte(b *testing.B, insp *networkIPMulticast, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPMulticastByte(b *testing.B) { - for _, test := range networkIPMulticastTests { - insp, err := newNetworkIPMulticast(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPMulticastByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_private.go b/v1/condition/network_ip_private.go deleted file mode 100644 index 774c69ad..00000000 --- a/v1/condition/network_ip_private.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPPrivate(_ context.Context, cfg config.Config) (*networkIPPrivate, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPPrivate{ - conf: conf, - } - - return &insp, nil -} - -type networkIPPrivate struct { - conf networkIPConfig -} - -func (insp *networkIPPrivate) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsPrivate(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsPrivate(), nil -} - -func (insp *networkIPPrivate) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_private_test.go b/v1/condition/network_ip_private_test.go deleted file mode 100644 index 2553618b..00000000 --- a/v1/condition/network_ip_private_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPPrivate{} - -var networkIPPrivateTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("8.8.8.8"), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "ip_address", - }, - }, - }, - []byte(`{"ip_address":"192.168.1.2"}`), - true, - }, -} - -func TestNetworkIPPrivate(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPPrivateTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPPrivate(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPPrivateByte(b *testing.B, insp *networkIPPrivate, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPPrivateByte(b *testing.B) { - for _, test := range networkIPPrivateTests { - insp, err := newNetworkIPPrivate(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPPrivateByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_unicast.go b/v1/condition/network_ip_unicast.go deleted file mode 100644 index facd6aa0..00000000 --- a/v1/condition/network_ip_unicast.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPUnicast(_ context.Context, cfg config.Config) (*networkIPUnicast, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPUnicast{ - conf: conf, - } - - return &insp, nil -} - -type networkIPUnicast struct { - conf networkIPConfig -} - -func (insp *networkIPUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsGlobalUnicast() || ip.IsLinkLocalUnicast(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsGlobalUnicast() || ip.IsLinkLocalUnicast(), nil -} - -func (insp *networkIPUnicast) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_unicast_test.go b/v1/condition/network_ip_unicast_test.go deleted file mode 100644 index 73fe3c9d..00000000 --- a/v1/condition/network_ip_unicast_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPUnicast{} - -var networkIPUnicastTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("223.255.255.255"), - true, - }, -} - -func TestNetworkIPUnicast(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPUnicastTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPUnicast(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPUnicastByte(b *testing.B, insp *networkIPUnicast, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPUnicastByte(b *testing.B) { - for _, test := range networkIPUnicastTests { - insp, err := newNetworkIPUnicast(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPUnicastByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_unspecified.go b/v1/condition/network_ip_unspecified.go deleted file mode 100644 index 331ea969..00000000 --- a/v1/condition/network_ip_unspecified.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPUnspecified(_ context.Context, cfg config.Config) (*networkIPUnspecified, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPUnspecified{ - conf: conf, - } - - return &insp, nil -} - -type networkIPUnspecified struct { - conf networkIPConfig -} - -func (insp *networkIPUnspecified) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip.IsUnspecified(), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip.IsUnspecified(), nil -} - -func (c *networkIPUnspecified) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/network_ip_unspecified_test.go b/v1/condition/network_ip_unspecified_test.go deleted file mode 100644 index 06d0aea6..00000000 --- a/v1/condition/network_ip_unspecified_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPUnspecified{} - -var networkIPUnspecifiedTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("0.0.0.0"), - true, - }, -} - -func TestNetworkIPUnspecified(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPUnspecifiedTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPUnspecified(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPUnspecifiedByte(b *testing.B, insp *networkIPUnspecified, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPUnspecifiedByte(b *testing.B) { - for _, test := range networkIPUnspecifiedTests { - insp, err := newNetworkIPUnspecified(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPUnspecifiedByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/network_ip_valid.go b/v1/condition/network_ip_valid.go deleted file mode 100644 index 1c231869..00000000 --- a/v1/condition/network_ip_valid.go +++ /dev/null @@ -1,50 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "net" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkIPValid(_ context.Context, cfg config.Config) (*networkIPValid, error) { - conf := networkIPConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := networkIPValid{ - conf: conf, - } - - return &insp, nil -} - -type networkIPValid struct { - conf networkIPConfig -} - -func (insp *networkIPValid) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - str := string(msg.Data()) - ip := net.ParseIP(str) - - return ip != nil, nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - ip := net.ParseIP(value.String()) - - return ip != nil, nil -} - -func (insp *networkIPValid) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/condition/network_ip_valid_test.go b/v1/condition/network_ip_valid_test.go deleted file mode 100644 index c063c813..00000000 --- a/v1/condition/network_ip_valid_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &networkIPValid{} - -var networkIPValidTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{}, - []byte("127.0.0.1"), - true, - }, -} - -func TestNetworkIPValid(t *testing.T) { - ctx := context.TODO() - - for _, test := range networkIPValidTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNetworkIPValid(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.test)) - } - }) - } -} - -func benchmarkNetworkIPValidByte(b *testing.B, insp *networkIPValid, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNetworkIPValidByte(b *testing.B) { - for _, test := range networkIPValidTests { - insp, err := newNetworkIPValid(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNetworkIPValidByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_bitwise_and.go b/v1/condition/number_bitwise_and.go deleted file mode 100644 index 643e0008..00000000 --- a/v1/condition/number_bitwise_and.go +++ /dev/null @@ -1,44 +0,0 @@ -package condition - -import ( - "context" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberBitwiseAND(_ context.Context, cfg config.Config) (*numberBitwiseAND, error) { - conf := numberBitwiseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberBitwiseAND{ - conf: conf, - } - - return &insp, nil -} - -type numberBitwiseAND struct { - conf numberBitwiseConfig -} - -func (insp *numberBitwiseAND) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - value, err := strconv.ParseInt(string(msg.Data()), 10, 64) - if err != nil { - return false, err - } - - return value&insp.conf.Value != 0, nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return value.Int()&insp.conf.Value != 0, nil -} diff --git a/v1/condition/number_bitwise_and_test.go b/v1/condition/number_bitwise_and_test.go deleted file mode 100644 index e6f2b219..00000000 --- a/v1/condition/number_bitwise_and_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberBitwiseAND{} - -var numberBitwiseANDTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 0x0001, - }, - }, - []byte(`570506001`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 0x0002, - }, - }, - []byte(`570506001`), - false, - }, -} - -func TestNumberBitwiseAND(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberBitwiseANDTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberBitwiseAND(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberBitwiseAND(b *testing.B, insp *numberBitwiseAND, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberBitwiseAND(b *testing.B) { - for _, test := range numberBitwiseANDTests { - insp, err := newNumberBitwiseAND(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberBitwiseAND(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_bitwise_not.go b/v1/condition/number_bitwise_not.go deleted file mode 100644 index 9c5372d9..00000000 --- a/v1/condition/number_bitwise_not.go +++ /dev/null @@ -1,44 +0,0 @@ -package condition - -import ( - "context" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberBitwiseNOT(_ context.Context, cfg config.Config) (*numberBitwiseNOT, error) { - conf := numberBitwiseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberBitwiseNOT{ - conf: conf, - } - - return &insp, nil -} - -type numberBitwiseNOT struct { - conf numberBitwiseConfig -} - -func (insp *numberBitwiseNOT) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - value, err := strconv.ParseInt(string(msg.Data()), 10, 64) - if err != nil { - return false, err - } - - return ^value != 0, nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return ^value.Int() != 0, nil -} diff --git a/v1/condition/number_bitwise_or.go b/v1/condition/number_bitwise_or.go deleted file mode 100644 index 7abce320..00000000 --- a/v1/condition/number_bitwise_or.go +++ /dev/null @@ -1,44 +0,0 @@ -package condition - -import ( - "context" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberBitwiseOR(_ context.Context, cfg config.Config) (*numberBitwiseOR, error) { - conf := numberBitwiseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberBitwiseOR{ - conf: conf, - } - - return &insp, nil -} - -type numberBitwiseOR struct { - conf numberBitwiseConfig -} - -func (insp *numberBitwiseOR) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - value, err := strconv.ParseInt(string(msg.Data()), 10, 64) - if err != nil { - return false, err - } - - return value|insp.conf.Value != 0, nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return value.Int()|insp.conf.Value != 0, nil -} diff --git a/v1/condition/number_bitwise_or_test.go b/v1/condition/number_bitwise_or_test.go deleted file mode 100644 index f6335edd..00000000 --- a/v1/condition/number_bitwise_or_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberBitwiseOR{} - -var numberBitwiseORTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": -1, - }, - }, - []byte(`0`), - true, - }, -} - -func TestNumberBitwiseOR(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberBitwiseORTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberBitwiseOR(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberBitwiseOR(b *testing.B, insp *numberBitwiseOR, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberBitwiseOR(b *testing.B) { - for _, test := range numberBitwiseORTests { - insp, err := newNumberBitwiseOR(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberBitwiseOR(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_bitwise_xor.go b/v1/condition/number_bitwise_xor.go deleted file mode 100644 index 7b4b1466..00000000 --- a/v1/condition/number_bitwise_xor.go +++ /dev/null @@ -1,44 +0,0 @@ -package condition - -import ( - "context" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberBitwiseXOR(_ context.Context, cfg config.Config) (*numberBitwiseXOR, error) { - conf := numberBitwiseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberBitwiseXOR{ - conf: conf, - } - - return &insp, nil -} - -type numberBitwiseXOR struct { - conf numberBitwiseConfig -} - -func (insp *numberBitwiseXOR) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - value, err := strconv.ParseInt(string(msg.Data()), 10, 64) - if err != nil { - return false, err - } - - return value^insp.conf.Value != 0, nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return value.Int()^insp.conf.Value != 0, nil -} diff --git a/v1/condition/number_bitwise_xor_test.go b/v1/condition/number_bitwise_xor_test.go deleted file mode 100644 index 07ad8693..00000000 --- a/v1/condition/number_bitwise_xor_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberBitwiseXOR{} - -var numberBitwiseXORTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": -1, - }, - }, - []byte(`0`), - true, - }, -} - -func TestNumberBitwiseXOR(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberBitwiseXORTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberBitwiseXOR(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberBitwiseXOR(b *testing.B, insp *numberBitwiseXOR, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberBitwiseXOR(b *testing.B) { - for _, test := range numberBitwiseXORTests { - insp, err := newNumberBitwiseXOR(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberBitwiseXOR(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_equal_to.go b/v1/condition/number_equal_to.go deleted file mode 100644 index 340b40e0..00000000 --- a/v1/condition/number_equal_to.go +++ /dev/null @@ -1,59 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberEqualTo(_ context.Context, cfg config.Config) (*numberEqualTo, error) { - conf := numberConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - insp := numberEqualTo{ - conf: conf, - } - return &insp, nil -} - -type numberEqualTo struct { - conf numberConfig -} - -func (insp *numberEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - compare := insp.conf.Value - - if insp.conf.Object.SourceKey == "" { - f, err := strconv.ParseFloat(string(msg.Data()), 64) - if err != nil { - return false, err - } - - return insp.match(f, compare), nil - } - - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Float() - } - - v := msg.GetValue(insp.conf.Object.SourceKey) - return insp.match(v.Float(), compare), nil -} - -func (c *numberEqualTo) match(f float64, t float64) bool { - return f == t -} - -func (c *numberEqualTo) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_equal_to_test.go b/v1/condition/number_equal_to_test.go deleted file mode 100644 index cafc1c92..00000000 --- a/v1/condition/number_equal_to_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberEqualTo{} - -var numberEqualToTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - // Integers - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 14, - }, - }, - []byte(`{"foo":14}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 10, - }, - }, - []byte(`1`), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 0, - }, - }, - []byte(`{"foo":0}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 5, - }, - }, - []byte(`15`), - false, - }, - // Floats - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`1.5`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 0.1, - }, - }, - []byte(`1.5`), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 1.1, - }, - }, - []byte(`{"foo":1.1}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 1.4, - }, - }, - []byte(`1.4`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo": 10, "bar": 10}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": 100, - }, - }, - []byte(`{"foo": 100, "bar": 200}`), - false, - }, -} - -func TestNumberEqualTo(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberEqualToTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberEqualTo(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberEqualTo(b *testing.B, insp *numberEqualTo, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberEqualTo(b *testing.B) { - for _, test := range numberEqualToTests { - insp, err := newNumberEqualTo(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberEqualTo(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_greater_than.go b/v1/condition/number_greater_than.go deleted file mode 100644 index 27041c04..00000000 --- a/v1/condition/number_greater_than.go +++ /dev/null @@ -1,62 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberGreaterThan(_ context.Context, cfg config.Config) (*numberGreaterThan, error) { - conf := numberConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberGreaterThan{ - conf: conf, - } - - return &insp, nil -} - -type numberGreaterThan struct { - conf numberConfig -} - -func (insp *numberGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - compare := insp.conf.Value - - if insp.conf.Object.SourceKey == "" { - f, err := strconv.ParseFloat(string(msg.Data()), 64) - if err != nil { - return false, err - } - - return insp.match(f, compare), nil - } - - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Float() - } - - v := msg.GetValue(insp.conf.Object.SourceKey) - return insp.match(v.Float(), compare), nil -} - -func (c *numberGreaterThan) match(f float64, t float64) bool { - return f > t -} - -func (c *numberGreaterThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_greater_than_test.go b/v1/condition/number_greater_than_test.go deleted file mode 100644 index f12c27e5..00000000 --- a/v1/condition/number_greater_than_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberGreaterThan{} - -var numberGreaterThanTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - // Integers - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 1, - }, - }, - []byte(`{"foo":10}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`10`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 10, - }, - }, - []byte(`{"foo":1}`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 10, - }, - }, - []byte(`1`), - false, - }, - // Floats - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`1.5`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 1.1, - }, - }, - []byte(`1.5`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 1.5, - }, - }, - []byte(`{"foo":1.1}`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 1.5, - }, - }, - []byte(`1`), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo": 100, "bar": 10}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": 10, - }, - }, - []byte(`{"foo": 100, "bar": 2000}`), - false, - }, -} - -func TestNumberGreaterThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberGreaterThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberGreaterThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberGreaterThan(b *testing.B, insp *numberGreaterThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberGreaterThan(b *testing.B) { - for _, test := range numberGreaterThanTests { - insp, err := newNumberGreaterThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberGreaterThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_length_equal_to.go b/v1/condition/number_length_equal_to.go deleted file mode 100644 index 63c9059c..00000000 --- a/v1/condition/number_length_equal_to.go +++ /dev/null @@ -1,55 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberLengthEqualTo(_ context.Context, cfg config.Config) (*numberLengthEqualTo, error) { - conf := numberLengthConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberLengthEqualTo{ - conf: conf, - } - - return &insp, nil -} - -type numberLengthEqualTo struct { - conf numberLengthConfig -} - -func (insp *numberLengthEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - llm := numberLengthMeasurement(msg.Data(), insp.conf.Measurement) - return insp.match(llm), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - if value.IsArray() { - l := len(value.Array()) - return insp.match(l), nil - } - - llm := numberLengthMeasurement(value.Bytes(), insp.conf.Measurement) - return insp.match(llm), nil -} - -func (c *numberLengthEqualTo) match(length int) bool { - return length == c.conf.Value -} - -func (c *numberLengthEqualTo) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_length_equal_to_test.go b/v1/condition/number_length_equal_to_test.go deleted file mode 100644 index fc99b767..00000000 --- a/v1/condition/number_length_equal_to_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberLengthEqualTo{} - -var numberLengthEqualToTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": 3, - }, - }, - []byte(`{"a":"bcd"}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 3, - }, - }, - []byte(`bcd`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": 4, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 4, - }, - }, - []byte(`bcd`), - false, - }, -} - -func TestNumberLengthEqualTo(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberLengthEqualToTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberLengthEqualTo(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberLengthEqualTo(b *testing.B, insp *numberLengthEqualTo, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberLengthEqualTo(b *testing.B) { - for _, test := range numberLengthEqualToTests { - insp, err := newNumberLengthEqualTo(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberLengthEqualTo(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_length_greater_than.go b/v1/condition/number_length_greater_than.go deleted file mode 100644 index 89abccd5..00000000 --- a/v1/condition/number_length_greater_than.go +++ /dev/null @@ -1,55 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberLengthGreaterThan(_ context.Context, cfg config.Config) (*numberLengthGreaterThan, error) { - conf := numberLengthConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberLengthGreaterThan{ - conf: conf, - } - - return &insp, nil -} - -type numberLengthGreaterThan struct { - conf numberLengthConfig -} - -func (insp *numberLengthGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - llm := numberLengthMeasurement(msg.Data(), insp.conf.Measurement) - return insp.match(llm), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - if value.IsArray() { - l := len(value.Array()) - return insp.match(l), nil - } - - llm := numberLengthMeasurement(value.Bytes(), insp.conf.Measurement) - return insp.match(llm), nil -} - -func (c *numberLengthGreaterThan) match(length int) bool { - return length > c.conf.Value -} - -func (c *numberLengthGreaterThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_length_greater_than_test.go b/v1/condition/number_length_greater_than_test.go deleted file mode 100644 index 82a79575..00000000 --- a/v1/condition/number_length_greater_than_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberLengthGreaterThan{} - -var numberLengthGreaterThanTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 2, - }, - }, - []byte(`{"foo":"bar"}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 2, - }, - }, - []byte(`bar`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 3, - }, - }, - []byte(`{"foo":"bar"}`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 3, - }, - }, - []byte(`bar`), - false, - }, -} - -func TestNumberLengthGreaterThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberLengthGreaterThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberLengthGreaterThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberLengthGreaterThan(b *testing.B, insp *numberLengthGreaterThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberLengthGreaterThan(b *testing.B) { - for _, test := range numberLengthGreaterThanTests { - insp, err := newNumberLengthGreaterThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberLengthGreaterThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_length_less_than.go b/v1/condition/number_length_less_than.go deleted file mode 100644 index 110b0581..00000000 --- a/v1/condition/number_length_less_than.go +++ /dev/null @@ -1,55 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberLengthLessThan(_ context.Context, cfg config.Config) (*numberLengthLessThan, error) { - conf := numberLengthConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := numberLengthLessThan{ - conf: conf, - } - - return &insp, nil -} - -type numberLengthLessThan struct { - conf numberLengthConfig -} - -func (insp *numberLengthLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - llm := numberLengthMeasurement(msg.Data(), insp.conf.Measurement) - return insp.match(llm), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - if value.IsArray() { - l := len(value.Array()) - return insp.match(l), nil - } - - llm := numberLengthMeasurement(value.Bytes(), insp.conf.Measurement) - return insp.match(llm), nil -} - -func (c *numberLengthLessThan) match(length int) bool { - return length < c.conf.Value -} - -func (c *numberLengthLessThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_length_less_than_test.go b/v1/condition/number_length_less_than_test.go deleted file mode 100644 index 06ccbec7..00000000 --- a/v1/condition/number_length_less_than_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberLengthLessThan{} - -var numberLengthLessThanTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 4, - }, - }, - []byte(`{"foo":"bar"}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 4, - }, - }, - []byte(`bar`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 3, - }, - }, - []byte(`{"foo":"bar"}`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 3, - }, - }, - []byte(`bar`), - false, - }, -} - -func TestNumberLengthLessThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberLengthLessThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberLengthLessThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberLengthLessThan(b *testing.B, insp *numberLengthLessThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberLengthLessThan(b *testing.B) { - for _, test := range numberLengthLessThanTests { - insp, err := newNumberLengthLessThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberLengthLessThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/number_less_than.go b/v1/condition/number_less_than.go deleted file mode 100644 index 527739af..00000000 --- a/v1/condition/number_less_than.go +++ /dev/null @@ -1,60 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNumberLessThan(_ context.Context, cfg config.Config) (*numberLessThan, error) { - conf := numberConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - insp := numberLessThan{ - conf: conf, - } - return &insp, nil -} - -type numberLessThan struct { - conf numberConfig -} - -func (insp *numberLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - compare := insp.conf.Value - - if insp.conf.Object.SourceKey == "" { - f, err := strconv.ParseFloat(string(msg.Data()), 64) - if err != nil { - return false, err - } - - return insp.match(f, compare), nil - } - - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Float() - } - - v := msg.GetValue(insp.conf.Object.SourceKey) - return insp.match(v.Float(), compare), nil -} - -func (c *numberLessThan) match(f float64, t float64) bool { - return f < t -} - -func (c *numberLessThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/number_less_than_test.go b/v1/condition/number_less_than_test.go deleted file mode 100644 index 860b7a9e..00000000 --- a/v1/condition/number_less_than_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &numberLessThan{} - -var numberLessThanTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - // Integers - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 14, - }, - }, - []byte(`{"foo":10}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`10`), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 10, - }, - }, - []byte(`{"foo":1}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 5, - }, - }, - []byte(`15`), - false, - }, - // Floats - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`1.5`), - false, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": 0.1, - }, - }, - []byte(`1.5`), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - }, - "value": 1.5, - }, - }, - []byte(`{"foo":1.1}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": 1.5, - }, - }, - []byte(`1`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo": 10, "bar": 100}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": 10, - }, - }, - []byte(`{"foo": 2, "bar": 1}`), - false, - }, -} - -func TestNumberLessThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range numberLessThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newNumberLessThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - t.Errorf("settings: %+v", test.cfg) - t.Errorf("test: %+v", string(test.test)) - } - }) - } -} - -func benchmarkNumberLessThan(b *testing.B, insp *numberLessThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkNumberLessThan(b *testing.B) { - for _, test := range numberLessThanTests { - insp, err := newNumberLessThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNumberLessThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string.go b/v1/condition/string.go deleted file mode 100644 index ee516bc7..00000000 --- a/v1/condition/string.go +++ /dev/null @@ -1,16 +0,0 @@ -package condition - -import ( - iconfig "github.com/brexhq/substation/internal/config" -) - -type stringConfig struct { - // Value used for comparison during inspection. - Value string `json:"value"` - - Object iconfig.Object `json:"object"` -} - -func (c *stringConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} diff --git a/v1/condition/string_contains.go b/v1/condition/string_contains.go deleted file mode 100644 index 66d1f5fe..00000000 --- a/v1/condition/string_contains.go +++ /dev/null @@ -1,48 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringContains(_ context.Context, cfg config.Config) (*stringContains, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringContains{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringContains struct { - conf stringConfig - - b []byte -} - -func (insp *stringContains) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - return bytes.Contains(msg.Data(), insp.b), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.Contains(value.Bytes(), insp.b), nil -} - -func (c *stringContains) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_contains_test.go b/v1/condition/string_contains_test.go deleted file mode 100644 index 0e1ad4f8..00000000 --- a/v1/condition/string_contains_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringContains{} - -var stringContainsTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "bc", - }, - }, - []byte("abcd"), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": "BC", - }, - }, - []byte("abcd"), - false, - }, -} - -func TestStringContains(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringContainsTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringContains(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringContains(b *testing.B, insp *stringContains, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringContains(b *testing.B) { - for _, test := range stringContainsTests { - insp, err := newStringContains(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringContains(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_ends_with.go b/v1/condition/string_ends_with.go deleted file mode 100644 index fa80dcc7..00000000 --- a/v1/condition/string_ends_with.go +++ /dev/null @@ -1,48 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringEndsWith(_ context.Context, cfg config.Config) (*stringEndsWith, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringEndsWith{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringEndsWith struct { - conf stringConfig - - b []byte -} - -func (insp *stringEndsWith) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - return bytes.HasSuffix(msg.Data(), insp.b), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.HasSuffix(value.Bytes(), insp.b), nil -} - -func (c *stringEndsWith) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_ends_with_test.go b/v1/condition/string_ends_with_test.go deleted file mode 100644 index 3bff1aa8..00000000 --- a/v1/condition/string_ends_with_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringEndsWith{} - -var stringEndsWithTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "de", - }, - }, - []byte(`{"a":"bcde"}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "de", - }, - }, - []byte("bcde"), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": "bc", - }, - }, - []byte("bcde"), - false, - }, -} - -func TestStringEndsWith(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringEndsWithTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringEndsWith(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringEndsWith(b *testing.B, insp *stringEndsWith, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringEndsWith(b *testing.B) { - for _, test := range stringEndsWithTests { - insp, err := newStringEndsWith(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringEndsWith(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_equal_to.go b/v1/condition/string_equal_to.go deleted file mode 100644 index c7f9760f..00000000 --- a/v1/condition/string_equal_to.go +++ /dev/null @@ -1,56 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringEqualTo(_ context.Context, cfg config.Config) (*stringEqualTo, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringEqualTo{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringEqualTo struct { - conf stringConfig - - b []byte -} - -func (insp *stringEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - compare := insp.b - - if insp.conf.Object.SourceKey == "" { - return bytes.Equal(msg.Data(), compare), nil - } - - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Bytes() - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.Equal(value.Bytes(), compare), nil -} - -func (c *stringEqualTo) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_equal_to_test.go b/v1/condition/string_equal_to_test.go deleted file mode 100644 index 5e37956b..00000000 --- a/v1/condition/string_equal_to_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringEqualTo{} - -var stringEqualToTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "abcde", - }, - }, - []byte("abcde"), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": "abcde", - }, - }, - []byte("abcdef"), - false, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": `""`, - }, - }, - []byte("\"\""), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo":"abc", "bar":"abc"}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": "abc", - }, - }, - []byte(`{"foo":"abc", "bar":"def"}`), - false, - }, -} - -func TestStringEqualTo(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringEqualToTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringEqualTo(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringEqualTo(b *testing.B, insp *stringEqualTo, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringEqualTo(b *testing.B) { - for _, test := range stringEqualToTests { - insp, err := newStringEqualTo(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringEqualTo(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_greater_than.go b/v1/condition/string_greater_than.go deleted file mode 100644 index afbe9334..00000000 --- a/v1/condition/string_greater_than.go +++ /dev/null @@ -1,56 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringGreaterThan(_ context.Context, cfg config.Config) (*stringGreaterThan, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringGreaterThan{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringGreaterThan struct { - conf stringConfig - - b []byte -} - -func (insp *stringGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - compare := insp.b - - if insp.conf.Object.SourceKey == "" { - return bytes.Compare(msg.Data(), compare) > 0, nil - } - - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Bytes() - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.Compare(value.Bytes(), compare) > 0, nil -} - -func (c *stringGreaterThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_greater_than_test.go b/v1/condition/string_greater_than_test.go deleted file mode 100644 index 52f4117c..00000000 --- a/v1/condition/string_greater_than_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringGreaterThan{} - -var stringGreaterThanTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "a", - }, - }, - []byte("b"), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "2022-01-01T00:00:00Z", - }, - }, - []byte(`2023-01-01T00:00:00Z`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo":"2023-01-01T00:00:00Z", "bar":"2022-01-01T00:00:00Z"}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": "greetings", - }, - }, - []byte(`{"foo":"hello", "bar":"world"}`), - false, - }, -} - -func TestStringGreaterThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringGreaterThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringGreaterThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringGreaterThan(b *testing.B, insp *stringGreaterThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringGreaterThan(b *testing.B) { - for _, test := range stringGreaterThanTests { - insp, err := newStringGreaterThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringGreaterThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_less_than.go b/v1/condition/string_less_than.go deleted file mode 100644 index 48d87b1e..00000000 --- a/v1/condition/string_less_than.go +++ /dev/null @@ -1,55 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringLessThan(_ context.Context, cfg config.Config) (*stringLessThan, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringLessThan{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringLessThan struct { - conf stringConfig - - b []byte -} - -func (insp *stringLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - compare := insp.b - - if insp.conf.Object.SourceKey == "" { - return bytes.Compare(msg.Data(), compare) < 0, nil - } - target := msg.GetValue(insp.conf.Object.TargetKey) - - if target.Exists() { - compare = target.Bytes() - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.Compare(value.Bytes(), compare) < 0, nil -} - -func (c *stringLessThan) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_less_than_test.go b/v1/condition/string_less_than_test.go deleted file mode 100644 index b77008ce..00000000 --- a/v1/condition/string_less_than_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringLessThan{} - -var stringLessThanTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "b", - }, - }, - []byte("a"), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "2024-01", - }, - }, - []byte(`2023-01-01T00:00:00Z`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - }, - }, - []byte(`{"foo":"2022-01-01T00:00:00Z", "bar":"2023-01-01T00:00:00Z"}`), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "foo", - "target_key": "bar", - }, - "value": "2025-01-01", - }, - }, - []byte(`{"foo":"2024-01-01T00:00:00Z", "bar":"2023-01-01"}`), - false, - }, -} - -func TestStringLessThan(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringLessThanTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringLessThan(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringLessThan(b *testing.B, insp *stringLessThan, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringLessThan(b *testing.B) { - for _, test := range stringLessThanTests { - insp, err := newStringLessThan(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringLessThan(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_match.go b/v1/condition/string_match.go deleted file mode 100644 index 57510c84..00000000 --- a/v1/condition/string_match.go +++ /dev/null @@ -1,79 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type stringMatchConfig struct { - Object iconfig.Object `json:"object"` - - // Pattern is the regular expression used during inspection. - Pattern string `json:"pattern"` -} - -func (c *stringMatchConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *stringMatchConfig) Validate() error { - if c.Pattern == "" { - return fmt.Errorf("pattern: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newStringMatch(_ context.Context, cfg config.Config) (*stringMatch, error) { - conf := stringMatchConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - re, err := regexp.Compile(conf.Pattern) - if err != nil { - return nil, fmt.Errorf("condition: insp_regexp: %v", err) - } - - insp := stringMatch{ - conf: conf, - re: re, - } - - return &insp, nil -} - -type stringMatch struct { - conf stringMatchConfig - - re *regexp.Regexp -} - -func (insp *stringMatch) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - return insp.re.Match(msg.Data()), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return insp.re.MatchString(value.String()), nil -} - -func (c *stringMatch) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_match_test.go b/v1/condition/string_match_test.go deleted file mode 100644 index b74615bd..00000000 --- a/v1/condition/string_match_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringMatch{} - -var stringMatchTests = []struct { - name string - cfg config.Config - test []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "^Test", - }, - }, - []byte("Test"), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "^Test", - }, - }, - []byte("-Test"), - false, - }, -} - -func TestStringMatch(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringMatchTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - insp, err := newStringMatch(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringMatchByte(b *testing.B, insp *stringMatch, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringMatchByte(b *testing.B) { - for _, test := range stringMatchTests { - insp, err := newStringMatch(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkStringMatchByte(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/string_starts_with.go b/v1/condition/string_starts_with.go deleted file mode 100644 index 4b164aa0..00000000 --- a/v1/condition/string_starts_with.go +++ /dev/null @@ -1,48 +0,0 @@ -package condition - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringStartsWith(_ context.Context, cfg config.Config) (*stringStartsWith, error) { - conf := stringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - insp := stringStartsWith{ - conf: conf, - b: []byte(conf.Value), - } - - return &insp, nil -} - -type stringStartsWith struct { - conf stringConfig - - b []byte -} - -func (insp *stringStartsWith) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - if insp.conf.Object.SourceKey == "" { - return bytes.HasPrefix(msg.Data(), insp.b), nil - } - - value := msg.GetValue(insp.conf.Object.SourceKey) - return bytes.HasPrefix(value.Bytes(), insp.b), nil -} - -func (c *stringStartsWith) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/v1/condition/string_starts_with_test.go b/v1/condition/string_starts_with_test.go deleted file mode 100644 index e3e36e05..00000000 --- a/v1/condition/string_starts_with_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &stringStartsWith{} - -var stringStartsWithTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "bc", - }, - }, - []byte(`{"a":"bcde"}`), - true, - }, - { - "pass", - config.Config{ - Settings: map[string]interface{}{ - "value": "bc", - }, - }, - []byte("bcde"), - true, - }, - { - "fail", - config.Config{ - Settings: map[string]interface{}{ - "value": "de", - }, - }, - []byte("bcde"), - false, - }, -} - -func TestStringStartsWith(t *testing.T) { - ctx := context.TODO() - - for _, test := range stringStartsWithTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newStringStartsWith(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v", test.expected, check) - } - }) - } -} - -func benchmarkStringStartsWith(b *testing.B, insp *stringStartsWith, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkStringStartsWith(b *testing.B) { - for _, test := range stringStartsWithTests { - insp, err := newStringStartsWith(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkStringStartsWith(b, insp, message) - }, - ) - } -} diff --git a/v1/condition/utility_random.go b/v1/condition/utility_random.go deleted file mode 100644 index 2bbd01bb..00000000 --- a/v1/condition/utility_random.go +++ /dev/null @@ -1,51 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "math/rand" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type utilityRandomConfig struct{} - -func (c *utilityRandomConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityRandom(_ context.Context, cfg config.Config) (*utilityRandom, error) { - conf := utilityRandomConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, err - } - - insp := utilityRandom{ - conf: conf, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - } - - return &insp, nil -} - -type utilityRandom struct { - conf utilityRandomConfig - - r *rand.Rand -} - -func (insp *utilityRandom) Inspect(_ context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - return insp.r.Intn(2) == 1, nil -} - -func (insp *utilityRandom) String() string { - b, _ := json.Marshal(insp.conf) - return string(b) -} diff --git a/v1/examples/Makefile b/v1/examples/Makefile deleted file mode 100644 index 3bba9824..00000000 --- a/v1/examples/Makefile +++ /dev/null @@ -1,133 +0,0 @@ -SUBSTATION_DIR ?= $(shell git rev-parse --show-toplevel 2> /dev/null) -SUBSTATION_VERSION ?= $(shell git describe --tags --always --abbrev=0 2> /dev/null) -AWS_ACCOUNT_ID ?= $(shell aws sts get-caller-identity --query 'Account' --output text 2> /dev/null) -AWS_REGION ?= $(shell aws configure get region 2> /dev/null) - -FONT_RED := $(shell tput setaf 1) -FONT_RESET := $(shell tput sgr0) - -check: - @printf "$(FONT_RED)>> Checking Substation variables...$(FONT_RESET)\n" - -ifeq ("${SUBSTATION_DIR}","") - @echo "SUBSTATION_DIR variable is missing!" -else - @echo "SUBSTATION_DIR: ${SUBSTATION_DIR}" -endif - -ifeq ("${SUBSTATION_VERSION}","") - @echo "SUBSTATION_VERSION variable is missing!" -else - @echo "SUBSTATION_VERSION: ${SUBSTATION_VERSION}" -endif - - @printf "$(FONT_RED)>> Checking AWS variables...$(FONT_RESET)\n" - -ifeq ("${AWS_ACCOUNT_ID}","") - @echo "AWS_ACCOUNT_ID variable is missing!" -else - @echo "AWS_ACCOUNT_ID: ${AWS_ACCOUNT_ID}" -endif - -ifeq ("${AWS_REGION}","") - @echo "AWS_REGION variable is missing!" -else - @echo "AWS_REGION: ${AWS_REGION}" -endif - -.PHONY: build -build: - @$(MAKE) build-go - @$(MAKE) build-config - @$(MAKE) build-images - -build-config: - @printf "$(FONT_RED)>> Building configuration files...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && bash build/scripts/config/compile.sh - -build-go: - @printf "$(FONT_RED)>> Building Go binaries...$(FONT_RESET)\n" - @for file in $(shell find $(SUBSTATION_DIR) -name main.go); do \ - cd $$(dirname $$file) && go build; \ - done - -build-images: - @printf "$(FONT_RED)>> Building AppConfig extension...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && AWS_ARCHITECTURE=arm64 AWS_REGION=$(AWS_REGION) bash build/scripts/aws/lambda/get_appconfig_extension.sh - - @printf "$(FONT_RED)>> Building Docker images...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/substation/Dockerfile -t substation:latest-arm64 . && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/autoscale/Dockerfile -t autoscale:latest-arm64 . && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/validate/Dockerfile -t validate:latest-arm64 . - -deploy: - @$(MAKE) deploy-aws-init - @$(MAKE) deploy-aws-images - @$(MAKE) deploy-aws-infra - @$(MAKE) deploy-aws-config - @$(MAKE) deploy-aws-post-script - -deploy-aws-init: - @printf "$(FONT_RED)>> Initializing cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - - @cd $(EXAMPLE)/terraform && \ - terraform init && \ - terraform apply -auto-approve -compact-warnings \ - -target=module.kms \ - -target=module.ecr \ - -target=module.ecr_autoscale \ - -target=module.ecr_validate - -deploy-aws-images: - @printf "$(FONT_RED)>> Deploying images to AWS ECR with Docker...$(FONT_RESET)\n" - - @aws ecr get-login-password | docker login --username AWS --password-stdin $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com - -ifneq ("$(shell aws ecr describe-repositories --repository-names substation --output text 2> /dev/null)","") - @docker tag substation:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/substation:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/substation:$(SUBSTATION_VERSION) -endif - -ifneq ("$(shell aws ecr describe-repositories --repository-names validate --output text 2> /dev/null)","") - @docker tag validate:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/validate:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/validate:$(SUBSTATION_VERSION) -endif - -ifneq ("$(shell aws ecr describe-repositories --repository-names autoscale --output text 2> /dev/null)","") - @docker tag autoscale:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/autoscale:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/autoscale:$(SUBSTATION_VERSION) -endif - -deploy-aws-infra: - @printf "$(FONT_RED)>> Deploying cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - @cd $(EXAMPLE)/terraform && terraform apply -auto-approve -compact-warnings - -deploy-aws-config: - @printf "$(FONT_RED)>> Deploying configurations to AppConfig with Python...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && SUBSTATION_CONFIG_DIRECTORY=examples/$(EXAMPLE) AWS_DEFAULT_REGION=$(AWS_REGION) AWS_APPCONFIG_APPLICATION_NAME=substation AWS_APPCONFIG_ENVIRONMENT=example AWS_APPCONFIG_DEPLOYMENT_STRATEGY=Instant python3 build/scripts/aws/appconfig/appconfig_upload.py - -deploy-aws-post-script: -ifneq ("$(wildcard $(EXAMPLE)/post_deploy.sh)","") - @printf "$(FONT_RED)>> Running post-deploy script...$(FONT_RESET)\n" - @bash $(EXAMPLE)/post_deploy.sh -endif - -destroy: - @printf "$(FONT_RED)>> Destroying configurations in AppConfig with Python...$(FONT_RESET)\n" - @for file in $(shell find $(EXAMPLE) -name config.jsonnet); do \ - AWS_DEFAULT_REGION=$(AWS_REGION) AWS_APPCONFIG_APPLICATION_NAME=substation AWS_APPCONFIG_PROFILE_NAME=$$(basename $$(dirname $$file)) python3 $(SUBSTATION_DIR)/build/scripts/aws/appconfig/appconfig_delete.py; \ - done - - @printf "$(FONT_RED)>> Destroying cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - @cd $(EXAMPLE)/terraform && terraform destroy -auto-approve -compact-warnings - -quickstart: - @$(MAKE) build-go - @$(MAKE) build-config - - @printf "$(FONT_RED)>> Printing data file...$(FONT_RESET)\n" - @cat cmd/client/file/substation/data.json - - @printf "$(FONT_RED)>> Running Substation...$(FONT_RESET)\n" - @cd cmd/client/file/substation && ./substation -config config.json -file data.json diff --git a/v1/examples/README.md b/v1/examples/README.md deleted file mode 100644 index b239e340..00000000 --- a/v1/examples/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# examples - -This directory contains examples of how to use Substation. - -## config - -Contains examples for [configuring conditions and transforms](build/config). - -## cmd - -Contains examples for [creating new applications](build/cmd). - -## terraform - -Contains examples for [deploying to AWS using Terraform](build/terraform/aws). diff --git a/v1/examples/cmd/client/file/substation/config.jsonnet b/v1/examples/cmd/client/file/substation/config.jsonnet deleted file mode 100644 index 46a0f7a1..00000000 --- a/v1/examples/cmd/client/file/substation/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local event = import 'event.libsonnet'; -local send = import 'send.libsonnet'; - -{ - transforms: - event.transforms - + send.transforms, -} diff --git a/v1/examples/cmd/client/file/substation/data.json b/v1/examples/cmd/client/file/substation/data.json deleted file mode 100644 index 2393cd01..00000000 --- a/v1/examples/cmd/client/file/substation/data.json +++ /dev/null @@ -1 +0,0 @@ -{"foo":"bar"} diff --git a/v1/examples/cmd/client/file/substation/event.libsonnet b/v1/examples/cmd/client/file/substation/event.libsonnet deleted file mode 100644 index 5d11708d..00000000 --- a/v1/examples/cmd/client/file/substation/event.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -local match = sub.cnd.any( - sub.cnd.string.equal_to({ object: { source_key: 'foo' }, string: 'baz' }), -); - -local copy = sub.tf.object.copy({ object: { source_key: 'foo', target_key: 'bar' } },); - -{ - transforms: [ - sub.pattern.tf.conditional( - condition=match, transform=copy, - ), - sub.tf.object.insert({ object: { target_key: 'qux' }, value: 'quux' },), - ], -} diff --git a/v1/examples/cmd/client/file/substation/main.go b/v1/examples/cmd/client/file/substation/main.go deleted file mode 100644 index 7e3777d3..00000000 --- a/v1/examples/cmd/client/file/substation/main.go +++ /dev/null @@ -1,195 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "flag" - "fmt" - "io" - "os" - "runtime" - "slices" - "time" - - "golang.org/x/sync/errgroup" - - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" -) - -type options struct { - File string - Config string -} - -// getConfig contextually retrieves a Substation configuration. -func getConfig(ctx context.Context, cfg string) (io.Reader, error) { - path, err := file.Get(ctx, cfg) - defer os.Remove(path) - - if err != nil { - return nil, err - } - - conf, err := os.Open(path) - if err != nil { - return nil, err - } - defer conf.Close() - - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, conf); err != nil { - return nil, err - } - - return buf, nil -} - -func main() { - var opts options - - timeout := flag.Duration("timeout", 10*time.Second, "Timeout in seconds") - flag.StringVar(&opts.File, "file", "", "File to parse") - flag.StringVar(&opts.Config, "config", "", "Substation configuration file") - flag.Parse() - - ctx, cancel := context.WithTimeout(context.Background(), *timeout) - defer cancel() - - if err := run(ctx, opts); err != nil { - panic(fmt.Errorf("main: %v", err)) - } -} - -func run(ctx context.Context, opts options) error { - c, err := getConfig(ctx, opts.Config) - if err != nil { - return err - } - - cfg := substation.Config{} - if err := json.NewDecoder(c).Decode(&cfg); err != nil { - return err - } - - sub, err := substation.New(ctx, cfg) - if err != nil { - return err - } - - ch := channel.New[*message.Message]() - group, ctx := errgroup.WithContext(ctx) - - group.Go(func() error { - tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(runtime.NumCPU()) - - for message := range ch.Recv() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - msg := message - tfGroup.Go(func() error { - if _, err := sub.Transform(tfCtx, msg); err != nil { - return err - } - - return nil - }) - } - - if err := tfGroup.Wait(); err != nil { - return err - } - - // CTRL messages flush the pipeline. This must be done - // after all messages have been processed. - ctrl := message.New().AsControl() - if _, err := sub.Transform(tfCtx, ctrl); err != nil { - return err - } - - return nil - }) - - // Data ingest. - group.Go(func() error { - defer ch.Close() - - fi, err := file.Get(ctx, opts.File) - if err != nil { - return err - } - defer os.Remove(fi) - - f, err := os.Open(fi) - if err != nil { - return err - } - defer f.Close() - - mediaType, err := media.File(f) - if err != nil { - return err - } - - if _, err := f.Seek(0, 0); err != nil { - return err - } - - // Unsupported media types are sent as binary data. - if !slices.Contains(bufio.MediaTypes, mediaType) { - r, err := io.ReadAll(f) - if err != nil { - return err - } - - msg := message.New().SetData(r) - ch.Send(msg) - - return nil - } - - scanner := bufio.NewScanner() - defer scanner.Close() - - if err := scanner.ReadFile(f); err != nil { - return err - } - - for scanner.Scan() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - b := []byte(scanner.Text()) - msg := message.New().SetData(b) - - ch.Send(msg) - } - - if err := scanner.Err(); err != nil { - return err - } - - return nil - }) - - // Wait for all goroutines to complete. This includes the goroutines that are - // executing the transform functions. - if err := group.Wait(); err != nil { - return err - } - - return nil -} diff --git a/v1/examples/cmd/client/file/substation/send.libsonnet b/v1/examples/cmd/client/file/substation/send.libsonnet deleted file mode 100644 index c80dfa72..00000000 --- a/v1/examples/cmd/client/file/substation/send.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/cmd/development/benchmark/config.jsonnet b/v1/examples/cmd/development/benchmark/config.jsonnet deleted file mode 100644 index e8880cd0..00000000 --- a/v1/examples/cmd/development/benchmark/config.jsonnet +++ /dev/null @@ -1,6 +0,0 @@ -local event = import 'event.libsonnet'; - -{ - transforms: - event.transforms, -} diff --git a/v1/examples/cmd/development/benchmark/data_large.json b/v1/examples/cmd/development/benchmark/data_large.json deleted file mode 100644 index 036769fc..00000000 --- a/v1/examples/cmd/development/benchmark/data_large.json +++ /dev/null @@ -1 +0,0 @@ -{"eventVersion":"1.05","userIdentity":{"type":"IAMUser","principalId":"AIDAJDPLRKLG7UEXAMPLE","arn":"arn:aws:iam::123456789012:user/Alice","accountId":"123456789012","accessKeyId":"AKIAIOSFODNN7EXAMPLE","userName":"Alice","sessionContext":{"attributes":{"mfaAuthenticated":"false","creationDate":"2023-09-23T12:45:30Z"}}},"eventTime":"2023-09-23T12:45:30Z","eventSource":"ec2.amazonaws.com","eventName":"RunInstances","awsRegion":"us-west-2","sourceIPAddress":"192.0.2.1","userAgent":"console.ec2.amazonaws.com","requestParameters":{"instanceType":"t2.micro","imageId":"ami-0abcdef1234567890","keyName":"myKeyPair","subnetId":"subnet-0abcdef1234567890","minCount":1,"maxCount":1,"securityGroupIds":["sg-0abcdef1234567890"],"ebsOptimized":false,"monitoring":{"enabled":false},"disableApiTermination":false,"instanceInitiatedShutdownBehavior":"stop","blockDeviceMapping":[{"deviceName":"/dev/sda1","ebs":{"volumeSize":30,"deleteOnTermination":true,"volumeType":"gp2"}}]},"responseElements":{"instancesSet":{"items":[{"instanceId":"i-0abcdef1234567890","currentState":{"code":0,"name":"pending"},"previousState":{"code":80,"name":"stopped"},"privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","publicDnsName":"","stateTransitionReason":"","amiLaunchIndex":0,"productCodes":[],"instanceType":"t2.micro","launchTime":"2023-09-23T12:45:30Z","placement":{"availabilityZone":"us-west-2a","groupName":"","tenancy":"default"},"monitoring":{"state":"disabled"},"subnetId":"subnet-0abcdef1234567890","vpcId":"vpc-0abcdef1234567890","privateIpAddress":"192.0.2.1","sourceDestCheck":true,"groupSet":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"architecture":"x86_64","rootDeviceType":"ebs","rootDeviceName":"/dev/sda1","blockDeviceMapping":[{"deviceName":"/dev/sda1","ebs":{"volumeId":"vol-0abcdef1234567890","status":"attaching","attachTime":"2023-09-23T12:45:30Z","deleteOnTermination":true}}],"virtualizationType":"hvm","clientToken":"","tags":[],"securityGroups":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"sourceDestCheck":true,"hypervisor":"xen","networkInterfaces":[{"networkInterfaceId":"eni-0abcdef1234567890","subnetId":"subnet-0abcdef1234567890","vpcId":"vpc-0abcdef1234567890","description":"Primary network interface","ownerId":"123456789012","status":"in-use","macAddress":"12:34:56:78:9a:bc","privateIpAddress":"192.0.2.1","privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","sourceDestCheck":true,"groups":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"attachment":{"attachmentId":"eni-attach-0abcdef1234567890","deviceIndex":0,"status":"attaching","attachTime":"2023-09-23T12:45:30Z","deleteOnTermination":true},"privateIpAddresses":[{"privateIpAddress":"192.0.2.1","privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","primary":true}]}],"ebsOptimized":false}]},"ownerId":"123456789012","reservationId":"r-0abcdef1234567890"}} diff --git a/v1/examples/cmd/development/benchmark/data_small.json b/v1/examples/cmd/development/benchmark/data_small.json deleted file mode 100644 index 71df0ed1..00000000 --- a/v1/examples/cmd/development/benchmark/data_small.json +++ /dev/null @@ -1 +0,0 @@ -{"eventVersion":"1.05","userIdentity":{"type":"IAMUser","principalId":"AIDAJDPLRKLG7UEXAMPLE","arn":"arn:aws:iam::123456789012:user/Alice","accountId":"123456789012","accessKeyId":"AKIAIOSFODNN7EXAMPLE","userName":"Alice"},"eventTime":"2023-09-23T12:45:30Z","eventSource":"ec2.amazonaws.com","eventName":"StopInstances","awsRegion":"us-west-2","sourceIPAddress":"192.0.2.1","userAgent":"console.ec2.amazonaws.com","requestParameters":{"instanceIds":["i-0abcdef1234567890"]},"responseElements":{"instancesSet":{"items":[{"instanceId":"i-0abcdef1234567890","currentState":{"code":80,"name":"stopped"},"previousState":{"code":16,"name":"running"}}]}}} diff --git a/v1/examples/cmd/development/benchmark/event.libsonnet b/v1/examples/cmd/development/benchmark/event.libsonnet deleted file mode 100644 index e5b7a210..00000000 --- a/v1/examples/cmd/development/benchmark/event.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.transform.time.now({ object: { target_key: 'now' } }), - ], -} diff --git a/v1/examples/config/condition/meta/if_all_else/config.jsonnet b/v1/examples/config/condition/meta/if_all_else/config.jsonnet deleted file mode 100644 index 2d6a5654..00000000 --- a/v1/examples/config/condition/meta/if_all_else/config.jsonnet +++ /dev/null @@ -1,33 +0,0 @@ -// This example determines if all values in an array are email addresses -// that have the DNS domain "brex.com". This technique can be used to -// validate or summarize values in an array. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local domain_match = sub.cnd.all( - // After running the example, try changing this to "any" or "none" and see - // what happens. - sub.cnd.meta.for_each(settings={ type: 'all', inspector: sub.cnd.str.ends_with(settings={ value: '@brex.com' }) }), -); - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - // In real-world deployments, the match decision is typically used - // to summarize an array of values. For this example, the decision - // is represented as a boolean value and printed to stdout. - sub.tf.meta.switch( - settings={ cases: [ - { - condition: domain_match, - transform: sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: true }), - }, - { - transform: sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: false }), - }, - ] } - ), - sub.tf.obj.cp({ object: { source_key: 'meta result' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/condition/meta/if_all_else/data.json b/v1/examples/config/condition/meta/if_all_else/data.json deleted file mode 100644 index 3fa4dcfc..00000000 --- a/v1/examples/config/condition/meta/if_all_else/data.json +++ /dev/null @@ -1 +0,0 @@ -["alice@brex.com","bob@brex.com"] diff --git a/v1/examples/config/condition/number/config.jsonnet b/v1/examples/config/condition/number/config.jsonnet deleted file mode 100644 index f59582d6..00000000 --- a/v1/examples/config/condition/number/config.jsonnet +++ /dev/null @@ -1,27 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This shows example usage of the 'number.equal_to' and 'number.greater_than' conditions. - sub.tf.meta.switch( - settings={ - cases: [ - { - condition: sub.cnd.num.eq({ obj: { src: 'sourcePort' }, value: 22 }), - transform: sub.tf.obj.insert({ obj: { trg: 'protocol' }, value: 'SSH' }), - }, - ], - } - ), - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.num.gt({ obj: { src: 'bytes' }, value: 10000 }), - transform: sub.tf.obj.insert({ obj: { trg: 'severity' }, value: 'high' }), - }, - ] } - ), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/condition/number/data.json b/v1/examples/config/condition/number/data.json deleted file mode 100644 index 1f4466dc..00000000 --- a/v1/examples/config/condition/number/data.json +++ /dev/null @@ -1 +0,0 @@ -{ "eventId": "123461", "timestamp": "2024-07-29T10:00:00Z", "sourceIP": "192.168.1.6", "destinationIP": "172.16.0.7", "sourcePort": "22", "destinationPort": "22", "protocol": "TCP", "action": "ACCEPT", "bytes": "20000" } diff --git a/v1/examples/config/condition/string/config.jsonnet b/v1/examples/config/condition/string/config.jsonnet deleted file mode 100644 index 7576e8da..00000000 --- a/v1/examples/config/condition/string/config.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This shows example usage of the 'string.equal_to' and 'string.greater_than' conditions. - // The string greater than and less than conditions compare lexographically with another static or target_key value. - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.str.eq({ obj: { src: 'action' }, value: 'ACCEPT' }), - transform: sub.tf.obj.insert({ obj: { trg: 'action' }, value: 'Allow' }), - }, - ] } - ), - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.str.gt({ obj: { src: 'vpcId' }, value: 'vpc-1a2b3c4d' }), - transform: sub.tf.obj.insert({ obj: { trg: 'priority' }, value: 'high' }), - }, - ] } - ), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/condition/string/data.json b/v1/examples/config/condition/string/data.json deleted file mode 100644 index 9ae38675..00000000 --- a/v1/examples/config/condition/string/data.json +++ /dev/null @@ -1 +0,0 @@ -{ "eventId": "123461", "timestamp": "2024-07-29T10:00:00Z", "sourceIP": "192.168.1.6", "destinationIP": "172.16.0.7", "sourcePort": "80", "destinationPort": "443", "protocol": "TCP", "action": "ACCEPT", "vpcId": "vpc-2b3c4d5e" } diff --git a/v1/examples/config/config.jsonnet b/v1/examples/config/config.jsonnet deleted file mode 100644 index 783f2e58..00000000 --- a/v1/examples/config/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local sub = import '../../build/config/substation.libsonnet'; - -{ - // Substation application configs always contain an array named `transforms`. - transforms: [ - // Each transform function is defined in the `substation` library. - sub.transform.object.insert({ id: 'insert-z', object: { target_key: 'a' }, value: 'z' }), - // Transform functions can be conditionally applied using the - // `meta.switch` function. - sub.transform.meta.switch({ cases: [ - { - condition: sub.condition.any( - sub.condition.string.equal_to({ object: { source_key: 'a' }, value: 'z' }) - ), - transform: sub.transform.object.insert({ object: { target_key: 'c' }, value: 'd' }), - }, - ] }), - // This is identical to the previous example, but uses a pre-defined - // pattern and library abbreviations. - sub.pattern.tf.conditional( - condition=sub.cnd.str.eq({ obj: { src: 'a' }, value: 'z' }), - transform=sub.tf.obj.insert({ obj: { trg: 'c' }, value: 'd' }), - ), - // Applications usually rely on a `send` transform to send results - // to a destination. These can be defined anywhere in the config. - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/data.json b/v1/examples/config/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v1/examples/config/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v1/examples/config/transform/aggregate/sample/config.jsonnet b/v1/examples/config/transform/aggregate/sample/config.jsonnet deleted file mode 100644 index 23713e2f..00000000 --- a/v1/examples/config/transform/aggregate/sample/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -// This example samples data by aggregating events into an array, then -// selecting the first event in the array as a sample. The sampling rate -// is 1/N, where N is the count of events in the buffer. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Events are aggregated into an array. This example has a sample - // rate of up to 1/10. By default, the sample rate will be lower if - // fewer than 10 events are processed by Substation. - sub.tf.aggregate.to.array({ object: { target_key: 'sample' }, batch: { count: 10 } }), - // A strict sample rate can be enforced by dropping any events that - // contain the `sample` key, but do not have a length of 10. - sub.tf.meta.switch(settings={ cases: [ - { - condition: sub.cnd.any(sub.cnd.num.len.eq({ object: { source_key: 'sample' }, value: 10 })), - transforms: [ - sub.tf.object.copy({ object: { source_key: 'sample.0' } }), - ], - }, - { - condition: sub.cnd.any(sub.cnd.num.len.gt({ object: { source_key: 'sample' }, value: 0 })), - transforms: [ - sub.tf.util.drop(), - ], - }, - ] }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/aggregate/sample/data.jsonl b/v1/examples/config/transform/aggregate/sample/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/aggregate/sample/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/aggregate/summarize/config.jsonnet b/v1/examples/config/transform/aggregate/summarize/config.jsonnet deleted file mode 100644 index 7634782b..00000000 --- a/v1/examples/config/transform/aggregate/summarize/config.jsonnet +++ /dev/null @@ -1,24 +0,0 @@ -// This example reduces data by summarizing multiple network events into a single event, -// simulating the behavior of flow records. This technique can be used to reduce -// any JSON data that contains common fields, not just network events. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Events are aggregated into arrays based on their client and server fields. - // The resulting array is put into a new field named "reduce". - sub.tf.object.copy({ object: { source_key: '[client,server]', target_key: 'meta buffer' } }), - sub.tf.aggregate.to.array({ object: { target_key: 'reduce', batch_key: 'meta buffer' } }), - // The "reduce" field is then reduced into a new object that contains: - // - The last event in the array. - // - The number of events in the array. - // - The sum of the "bytes" field of all events in the array. - sub.tf.object.copy({ object: { source_key: 'reduce|@reverse.0', target_key: 'meta reduce' } }), - sub.tf.object.copy({ object: { source_key: 'reduce.#', target_key: 'meta reduce.count' } }), - sub.tf.number.math.add({ object: { source_key: 'reduce.#.bytes', target_key: 'meta reduce.bytes_total' } }), - sub.tf.object.delete({ object: { source_key: 'meta reduce.bytes' } }), - // The created object overwrites the original event object and is sent to stdout. - sub.tf.object.copy({ object: { source_key: 'meta reduce' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/aggregate/summarize/data.jsonl b/v1/examples/config/transform/aggregate/summarize/data.jsonl deleted file mode 100644 index 9684c261..00000000 --- a/v1/examples/config/transform/aggregate/summarize/data.jsonl +++ /dev/null @@ -1,19 +0,0 @@ -{"client":"10.1.1.2","server":"8.8.8.8","bytes":11,"timestamp":1674429049} -{"client":"10.1.1.3","server":"8.8.4.4","bytes":20,"timestamp":1674429050} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":15,"timestamp":1674429051} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":8,"timestamp":1674429052} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":25,"timestamp":1674429053} -{"client":"10.1.1.4","server":"1.2.3.4","bytes":2400,"timestamp":1674429054} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":23,"timestamp":1674429055} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":12,"timestamp":1674429056} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":18,"timestamp":1674429057} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":6,"timestamp":1674429058} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":23,"timestamp":1674429059} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":12,"timestamp":1674429060} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":18,"timestamp":1674429061} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":6,"timestamp":1674429062} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":11,"timestamp":1674429063} -{"client":"10.1.1.3","server":"8.8.4.4","bytes":20,"timestamp":1674429064} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":15,"timestamp":1674429065} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":8,"timestamp":1674429066} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":25,"timestamp":1674429067} diff --git a/v1/examples/config/transform/aggregate/summarize/stdout.jsonl b/v1/examples/config/transform/aggregate/summarize/stdout.jsonl deleted file mode 100644 index b04a0539..00000000 --- a/v1/examples/config/transform/aggregate/summarize/stdout.jsonl +++ /dev/null @@ -1,5 +0,0 @@ -{"client":"10.1.1.4","server":"1.2.3.4","timestamp":1674429054,"count":1,"bytes_total":2400} -{"client":"10.1.1.3","server":"8.8.4.4","timestamp":1674429064,"count":2,"bytes_total":40} -{"client":"10.1.1.2","server":"8.8.8.8","timestamp":1674429067,"count":4,"bytes_total":72} -{"client":"10.1.1.2","server":"8.8.4.4","timestamp":1674429065,"count":6,"bytes_total":112} -{"client":"10.1.1.3","server":"8.8.8.8","timestamp":1674429066,"count":6,"bytes_total":52} diff --git a/v1/examples/config/transform/array/extend/config.jsonnet b/v1/examples/config/transform/array/extend/config.jsonnet deleted file mode 100644 index 48a66608..00000000 --- a/v1/examples/config/transform/array/extend/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -// This example extends an array by appending and flattening values. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Append the value of `z` to `a` (using the `-1` array index). - sub.tf.object.copy({ object: { source_key: 'z', target_key: 'a.-1' } }), - // Flatten the array. - sub.tf.object.copy({ object: { source_key: 'a|@flatten', target_key: 'a' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/array/extend/data.json b/v1/examples/config/transform/array/extend/data.json deleted file mode 100644 index 9915a5f3..00000000 --- a/v1/examples/config/transform/array/extend/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2],"z":[3,4]} diff --git a/v1/examples/config/transform/array/extend/stdout.json b/v1/examples/config/transform/array/extend/stdout.json deleted file mode 100644 index 554b3847..00000000 --- a/v1/examples/config/transform/array/extend/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4],"z":[3,4]} diff --git a/v1/examples/config/transform/array/flatten/config.jsonnet b/v1/examples/config/transform/array/flatten/config.jsonnet deleted file mode 100644 index d484e19d..00000000 --- a/v1/examples/config/transform/array/flatten/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -// This example flattens an array of arrays. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Flatten by copying the value and chaining GJSON's `@flatten` operator. - sub.tf.obj.cp({ object: { source_key: 'a|@flatten', target_key: 'a' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/array/flatten/data.json b/v1/examples/config/transform/array/flatten/data.json deleted file mode 100644 index 667836a7..00000000 --- a/v1/examples/config/transform/array/flatten/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,[3,4]]} diff --git a/v1/examples/config/transform/array/flatten/stdout.json b/v1/examples/config/transform/array/flatten/stdout.json deleted file mode 100644 index f6595873..00000000 --- a/v1/examples/config/transform/array/flatten/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4]} diff --git a/v1/examples/config/transform/array/flatten_deep/config.jsonnet b/v1/examples/config/transform/array/flatten_deep/config.jsonnet deleted file mode 100644 index 22afb0e0..00000000 --- a/v1/examples/config/transform/array/flatten_deep/config.jsonnet +++ /dev/null @@ -1,12 +0,0 @@ -// This example flattens an array of arrays. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Flatten by copying the value and chaining GJSON's `@flatten` operator - // with the `deep` option. - sub.tf.object.copy({ object: { source_key: 'a|@flatten:{"deep":true}', target_key: 'a' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/array/flatten_deep/data.json b/v1/examples/config/transform/array/flatten_deep/data.json deleted file mode 100644 index 852ff7c7..00000000 --- a/v1/examples/config/transform/array/flatten_deep/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,[3,4,[5,6]]]} diff --git a/v1/examples/config/transform/array/flatten_deep/stdout.json b/v1/examples/config/transform/array/flatten_deep/stdout.json deleted file mode 100644 index 9cbb8774..00000000 --- a/v1/examples/config/transform/array/flatten_deep/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4,5,6]} diff --git a/v1/examples/config/transform/array/group/config.jsonnet b/v1/examples/config/transform/array/group/config.jsonnet deleted file mode 100644 index 27ce4166..00000000 --- a/v1/examples/config/transform/array/group/config.jsonnet +++ /dev/null @@ -1,30 +0,0 @@ -// This example groups an array of arrays into an array of objects -// based on index and configured keys. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local files_key = 'meta files'; - -{ - concurrency: 1, - transforms: [ - // This example sends data to stdout at each step to iteratively show - // how the data is transformed. - sub.tf.send.stdout(), - // Copy the object to metadata, where it is grouped. - sub.tf.obj.cp({ object: { target_key: files_key } }), - // Elements from the file_name array are transformed and derived file extensions - // are added to a new array. - sub.tf.meta.for_each({ - object: { source_key: sub.helpers.object.get_element(files_key, 'file_name'), target_key: sub.helpers.object.append(files_key, 'file_extension') }, - transforms: [ - sub.tf.string.capture(settings={ pattern: '\\.([^\\.]+)$' }), - ], - }), - // The arrays grouped into an array of objects, then copied to the message's data field. - // For example: - // - // [{name: name1, type: type1, size: size1, extension: extension1}, {name: name2, type: type2, size: size2, extension: extension2}] - sub.tf.object.cp({ object: { source_key: files_key + '|@group' } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/array/group/data.json b/v1/examples/config/transform/array/group/data.json deleted file mode 100644 index a0e2819a..00000000 --- a/v1/examples/config/transform/array/group/data.json +++ /dev/null @@ -1 +0,0 @@ -{"file_name":["foo.txt","bar.html"],"file_type":["text/plain","text/html"],"file_size":[100,500]} diff --git a/v1/examples/config/transform/enrich/http_secret/config.jsonnet b/v1/examples/config/transform/enrich/http_secret/config.jsonnet deleted file mode 100644 index 2d28d7a5..00000000 --- a/v1/examples/config/transform/enrich/http_secret/config.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -// This example shows how to use the `utility_secret` transform to -// retrieve a secret and reference it in a subsequent transform. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// The secret is retrieved from the environment variable named -// `SUBSTATION_EXAMPLE_URL` and referenced in subsequent transforms using -// the ID value `ENV_VAR`. -// -// Run this on the local system as an example: -// export SUBSTATION_EXAMPLE_URL=https://www.gutenberg.org/files/2701/old/moby10b.txt -local secret = sub.secrets.environment_variable({ id: 'ENV_VAR', name: 'SUBSTATION_EXAMPLE_URL' }); - -{ - // The `utility_secret` transform retrieves the secret from the environment - // variable and keeps it in memory. The `enrich_http_get` transform references - // the secret using the ID value `ENV_VAR`. In this example, the secret is the - // URL of a web page that is retrieved by the `enrich_http_get` transform and - // sent to stdout by the `send_stdout` transform. - transforms: [ - sub.transform.utility.secret({ secret: secret }), - sub.transform.enrich.http.get({ url: '${SECRET:ENV_VAR}' }), - // Moby Dick is a large text, so the max size of the batch - // has to be increased, otherwise the data won't fit. - sub.tf.send.stdout({ batch: { size: 10000000 } }), - ], -} diff --git a/v1/examples/config/transform/enrich/kvstore_csv/config.jsonnet b/v1/examples/config/transform/enrich/kvstore_csv/config.jsonnet deleted file mode 100644 index 25066078..00000000 --- a/v1/examples/config/transform/enrich/kvstore_csv/config.jsonnet +++ /dev/null @@ -1,22 +0,0 @@ -// This example shows how to use the `enrich_kv_store_item_get` transform -// to lookup data in a KV store backed by a CSV file. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// This CSV file must be local to the Substation app. Absolute paths are -// recommended. Files accessible over HTTPS and hosted in AWS S3 also work. -// -// The `column` parameter is required and specifies the column in the CSV file -// that will be used to lookup the key in the KV store. -local kv = sub.kv_store.csv_file({ file: 'kv.csv', column: 'product' }); - -{ - transforms: [ - // The CSV file KV store returns the entire row minus the key column. - // For example, this returns {"price":"9.99","calories":"500"} for "churro". - sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'product', target_key: 'info' }, - kv_store: kv, - }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/enrich/kvstore_csv/data.jsonl b/v1/examples/config/transform/enrich/kvstore_csv/data.jsonl deleted file mode 100644 index 19a662e7..00000000 --- a/v1/examples/config/transform/enrich/kvstore_csv/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"product":"churro"} diff --git a/v1/examples/config/transform/enrich/kvstore_json/config.jsonnet b/v1/examples/config/transform/enrich/kvstore_json/config.jsonnet deleted file mode 100644 index 71cbcd96..00000000 --- a/v1/examples/config/transform/enrich/kvstore_json/config.jsonnet +++ /dev/null @@ -1,17 +0,0 @@ -// This example shows how to use the `enrich_kv_store_item_get` transform -// to lookup data in a KV store backed by a JSON file. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// This JSON file must be local to the Substation app. Absolute paths are -// recommended. Files accessible over HTTPS and hosted in AWS S3 also work. -local kv = sub.kv_store.json_file({ file: 'kv.json' }); - -{ - transforms: [ - sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'product', target_key: 'price' }, - kv_store: kv, - }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/enrich/kvstore_json/data.jsonl b/v1/examples/config/transform/enrich/kvstore_json/data.jsonl deleted file mode 100644 index 19a662e7..00000000 --- a/v1/examples/config/transform/enrich/kvstore_json/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"product":"churro"} diff --git a/v1/examples/config/transform/enrich/kvstore_json/kv.json b/v1/examples/config/transform/enrich/kvstore_json/kv.json deleted file mode 100644 index 7d22510e..00000000 --- a/v1/examples/config/transform/enrich/kvstore_json/kv.json +++ /dev/null @@ -1 +0,0 @@ -{"churro":9.99} diff --git a/v1/examples/config/transform/enrich/kvstore_set_add/config.jsonnet b/v1/examples/config/transform/enrich/kvstore_set_add/config.jsonnet deleted file mode 100644 index 603f6162..00000000 --- a/v1/examples/config/transform/enrich/kvstore_set_add/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -// This example shows how to use the `enrich_kv_store_set_add` transform -// to track data over time in a KV store. The sample data contains food -// orders and is indexed by each customer's email address. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// Default Memory store is used. -local mem = sub.kv_store.memory(); - -{ - transforms: [ - // Each order is stored in memory indexed by the customer's email - // address and printed to stdout. Only unique orders are stored. - sub.tf.enrich.kv_store.sadd({ - object: { source_key: 'customer', target_key: 'order' }, - kv_store: mem, - ttl_offset: '10s', - }), - sub.tf.send.stdout(), - - // Each message has the list added to its object. The list grows - // as orders are added to the store above. - sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'customer', target_key: 'kv_store' }, - kv_store: mem, - }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/enrich/kvstore_set_add/data.jsonl b/v1/examples/config/transform/enrich/kvstore_set_add/data.jsonl deleted file mode 100644 index 5bf38854..00000000 --- a/v1/examples/config/transform/enrich/kvstore_set_add/data.jsonl +++ /dev/null @@ -1,6 +0,0 @@ -{"date": "2021-01-01","customer":"alice@brex.com","order":"pizza"} -{"date": "2021-01-01","customer":"bob@brex.com","order":"burger"} -{"date": "2021-01-03","customer":"bob@brex.com","order":"pizza"} -{"date": "2021-01-07","customer":"alice@brex.com","order":"pizza"} -{"date": "2021-01-07","customer":"bob@brex.com","order":"burger"} -{"date": "2021-01-13","customer":"alice@brex.com","order":"pizza"} diff --git a/v1/examples/config/transform/enrich/mmdb/config.jsonnet b/v1/examples/config/transform/enrich/mmdb/config.jsonnet deleted file mode 100644 index f30ac056..00000000 --- a/v1/examples/config/transform/enrich/mmdb/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -local city = sub.kv_store.mmdb({ file: 'path/to/GeoLite2-City.mmdb' }); - -local asn = sub.kv_store.mmdb({ file: 'path/to/GeoLite2-ASN.mmdb' }); - -{ - transforms: [ - sub.tf.enrich.kv_store.iget({ object: { source_key: 'ip', target_key: 'city' }, kv_store: city }), - sub.tf.enrich.kv_store.iget({ object: { source_key: 'ip', target_key: 'asn' }, kv_store: asn }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/enrich/mmdb/data.jsonl b/v1/examples/config/transform/enrich/mmdb/data.jsonl deleted file mode 100644 index 6ea857b7..00000000 --- a/v1/examples/config/transform/enrich/mmdb/data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"ip":"1.1.1.1"} -{"ip":"8.8.8.8"} -{"ip":"9.9.9.9"} diff --git a/v1/examples/config/transform/enrich/mmdb/stdout.jsonl b/v1/examples/config/transform/enrich/mmdb/stdout.jsonl deleted file mode 100644 index e1cd24e3..00000000 --- a/v1/examples/config/transform/enrich/mmdb/stdout.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"ip":"8.8.8.8","city":{"continent":{"code":"NA","geoname_id":6255149,"names":{"de":"Nordamerika","en":"North America","es":"Norteamérica","fr":"Amérique du Nord","ja":"北アメリカ","pt-BR":"América do Norte","ru":"Северная Америка","zh-CN":"北美洲"}},"country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}},"location":{"accuracy_radius":1000,"latitude":37.751,"longitude":-97.822,"time_zone":"America/Chicago"},"registered_country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}}},"asn":{"autonomous_system_number":15169,"autonomous_system_organization":"GOOGLE"}} -{"ip":"9.9.9.9","city":{"city":{"geoname_id":5327684,"names":{"de":"Berkeley","en":"Berkeley","es":"Berkeley","fr":"Berkeley","ja":"バークリー","pt-BR":"Berkeley","ru":"Беркли","zh-CN":"伯克利"}},"continent":{"code":"NA","geoname_id":6255149,"names":{"de":"Nordamerika","en":"North America","es":"Norteamérica","fr":"Amérique du Nord","ja":"北アメリカ","pt-BR":"América do Norte","ru":"Северная Америка","zh-CN":"北美洲"}},"country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}},"location":{"accuracy_radius":20,"latitude":37.8767,"longitude":-122.2676,"metro_code":807,"time_zone":"America/Los_Angeles"},"postal":{"code":"94709"},"registered_country":{"geoname_id":2658434,"iso_code":"CH","names":{"de":"Schweiz","en":"Switzerland","es":"Suiza","fr":"Suisse","ja":"スイス連邦","pt-BR":"Suíça","ru":"Швейцария","zh-CN":"瑞士"}},"subdivisions":[{"geoname_id":5332921,"iso_code":"CA","names":{"de":"Kalifornien","en":"California","es":"California","fr":"Californie","ja":"カリフォルニア州","pt-BR":"Califórnia","ru":"Калифорния","zh-CN":"加州"}}]},"asn":{"autonomous_system_number":19281,"autonomous_system_organization":"QUAD9-AS-1"}} -{"ip":"1.1.1.1","city":{"registered_country":{"geoname_id":2077456,"iso_code":"AU","names":{"de":"Australien","en":"Australia","es":"Australia","fr":"Australie","ja":"オーストラリア","pt-BR":"Austrália","ru":"Австралия","zh-CN":"澳大利亚"}}},"asn":{"autonomous_system_number":13335,"autonomous_system_organization":"CLOUDFLARENET"}} diff --git a/v1/examples/config/transform/enrich/urlscan/config.jsonnet b/v1/examples/config/transform/enrich/urlscan/config.jsonnet deleted file mode 100644 index e200fbe1..00000000 --- a/v1/examples/config/transform/enrich/urlscan/config.jsonnet +++ /dev/null @@ -1,49 +0,0 @@ -// This example shows how to make scan requests and retrieve -// results using the urlscan API (https://urlscan.io/docs/api/). -local sub = import '../../../../../build/config/substation.libsonnet'; - -local headers = { 'API-Key': '${SECRET:URLSCAN}', 'Content-Type': 'application/json' }; - -{ - transforms: [ - // Retrieve the urlscan API key from the secrets store. - // (Never put a secret directly into a configuration.) - sub.transform.utility.secret({ - // The API key is stored in an environment variable named - // `URLSCAN_API_KEY`. - secret: sub.secrets.environment_variable({ id: 'URLSCAN', name: 'URLSCAN_API_KEY' }), - }), - // Sends a scan request and waits for the result. This - // follows recommended practices from the urlscan API docs, - // and will try to fetch the result up to 3 times over 15s. - // If there are no results after retrying, then the unmodified - // message is sent to stdout. - sub.tf.enrich.http.post({ - object: { body_key: '@this', target_key: 'meta response' }, - url: 'https://urlscan.io/api/v1/scan/', - headers: headers, - }), - // - sub.tf.util.delay({ duration: '5s' }), - sub.tf.meta.err({ transforms: [ // Errors are caught in case the retry limit is reached. - sub.tf.meta.retry({ - // This condition runs on the result of the transforms. If - // it returns false, then the transforms are retried until - // it returns true or the retry settings are exhausted. - condition: sub.cnd.all([ - sub.cnd.num.len.gt({ object: { source_key: 'meta result.task.time' }, value: 0 }), - ]), - transforms: [ - sub.tf.enrich.http.get({ - object: { source_key: 'meta response.uuid', target_key: 'meta result' }, - url: 'https://urlscan.io/api/v1/result/${DATA}', // DATA is the value of the source_key. - headers: headers, - }), - ], - retry: { delay: '5s', count: 3 }, // Retry up to 3 times with a 5 second delay (5s, 5s, 5s). - }), - ] }), - sub.tf.obj.cp({ object: { source_key: 'meta result' } }), - sub.tf.send.stdout({ batch: { size: 1000 * 1000 * 5 } }), // 5MB (the results can be large). - ], -} diff --git a/v1/examples/config/transform/enrich/urlscan/data.jsonl b/v1/examples/config/transform/enrich/urlscan/data.jsonl deleted file mode 100644 index 0315fbab..00000000 --- a/v1/examples/config/transform/enrich/urlscan/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"url":"https://www.brex.com/"} diff --git a/v1/examples/config/transform/format/zip/data.jsonl b/v1/examples/config/transform/format/zip/data.jsonl deleted file mode 100644 index b7519d55..00000000 --- a/v1/examples/config/transform/format/zip/data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"foo":"bar"} -{"baz":"qux"} -{"quux":"corge"} \ No newline at end of file diff --git a/v1/examples/config/transform/meta/crash_program/config.jsonnet b/v1/examples/config/transform/meta/crash_program/config.jsonnet deleted file mode 100644 index d055ca76..00000000 --- a/v1/examples/config/transform/meta/crash_program/config.jsonnet +++ /dev/null @@ -1,32 +0,0 @@ -// This example shows how to intentionally crash a program if a transform -// does not produce an output. This technique can be used to provide strict -// guarantees about the result of data transformations. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// `key` is the target of the transform that may not produce an output and is -// checked to determine if the transform was successful. -local key = 'c'; - -{ - transforms: [ - // This conditional transform simulates a transform that may not produce an output. - sub.pattern.tf.conditional( - condition=sub.cnd.any(sub.cnd.utility.random()), - transform=sub.tf.obj.insert(settings={ object: { target_key: key }, value: true }), - ), - // If there is no output from the transform, then an error is thrown to crash the program. - sub.tf.meta.switch(settings={ cases: [ - { - condition: sub.cnd.any(sub.cnd.num.len.eq(settings={ object: { source_key: key }, value: 0 })), - transforms: [ - sub.tf.util.err(settings={ message: 'transform produced no output' }), - ], - }, - { - transforms: [ - sub.tf.send.stdout(), - ], - }, - ] }), - ], -} diff --git a/v1/examples/config/transform/meta/crash_program/data.json b/v1/examples/config/transform/meta/crash_program/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v1/examples/config/transform/meta/crash_program/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v1/examples/config/transform/meta/each_in_array/config.jsonnet b/v1/examples/config/transform/meta/each_in_array/config.jsonnet deleted file mode 100644 index cdf7e2cb..00000000 --- a/v1/examples/config/transform/meta/each_in_array/config.jsonnet +++ /dev/null @@ -1,20 +0,0 @@ -// This example shows how to use the `meta.for_each` transform to -// modify objects in an array. In this example, keys are removed -// and added to each object in the array. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.meta.for_each({ - object: { source_key: 'a', target_key: 'a' }, - // Multiple transforms can be applied in series to each object - // in the array by using the `meta.pipeline` transform. Otherwise, - // use any individual transform to modify the object. - transform: sub.tf.meta.pipeline({ transforms: [ - sub.tf.object.delete({ object: { source_key: 'b' } }), - sub.tf.object.insert({ object: { target_key: 'z' }, value: true }), - ] }), - }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/meta/each_in_array/data.json b/v1/examples/config/transform/meta/each_in_array/data.json deleted file mode 100644 index b3c96393..00000000 --- a/v1/examples/config/transform/meta/each_in_array/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[{"b":1,"c":2},{"b":3,"c":4}]} diff --git a/v1/examples/config/transform/meta/each_in_array/stdout.json b/v1/examples/config/transform/meta/each_in_array/stdout.json deleted file mode 100644 index 7598b8b0..00000000 --- a/v1/examples/config/transform/meta/each_in_array/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[{"c":2,"z":true},{"c":4,"z":true}]} diff --git a/v1/examples/config/transform/meta/exactly_once_consumer/config.jsonnet b/v1/examples/config/transform/meta/exactly_once_consumer/config.jsonnet deleted file mode 100644 index 9902fc58..00000000 --- a/v1/examples/config/transform/meta/exactly_once_consumer/config.jsonnet +++ /dev/null @@ -1,33 +0,0 @@ -// This example shows how to use the `meta_kv_store_lock` transform to -// create an "exactly once" semantic for a pipeline consumer. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// In production environments a distributed KV store should be used. -local kv = sub.kv_store.memory(); - -{ - transforms: [ - // If a message acquires a lock, then it is tagged for inspection. - sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'eo_consumer', - ttl_offset: '1m', - transforms: [ - sub.tf.obj.insert({ object: { target_key: 'meta eo_consumer' }, value: 'locked' }), - ], - }), - // Messages that are not locked are dropped from the pipeline. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.none([ - sub.cnd.str.eq({ object: { source_key: 'meta eo_consumer' }, value: 'locked' }), - ]), - transforms: [ - sub.tf.utility.drop(), - ], - }, - ] }), - // At this point only locked messages exist in the pipeline. - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/meta/exactly_once_consumer/data.jsonl b/v1/examples/config/transform/meta/exactly_once_consumer/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v1/examples/config/transform/meta/exactly_once_consumer/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v1/examples/config/transform/meta/exactly_once_producer/config.jsonnet b/v1/examples/config/transform/meta/exactly_once_producer/config.jsonnet deleted file mode 100644 index c7851700..00000000 --- a/v1/examples/config/transform/meta/exactly_once_producer/config.jsonnet +++ /dev/null @@ -1,23 +0,0 @@ -// This example shows how to use the `meta_kv_store_lock` transform to -// create an "exactly once" semantic for a pipeline producer. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// In production environments a distributed KV store should be used. -local kv = sub.kv_store.memory(); - -{ - transforms: [ - // This only prints messages that acquire a lock. Any message - // that fails to acquire a lock will be skipped. An error in the - // sub-transform will cause all previously locked messages to be - // unlocked. - sub.tf.meta.err({ transform: sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'eo_producer', - ttl_offset: '1m', - transforms: [ - sub.tf.send.stdout(), - ], - }) }), - ], -} diff --git a/v1/examples/config/transform/meta/exactly_once_producer/data.jsonl b/v1/examples/config/transform/meta/exactly_once_producer/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v1/examples/config/transform/meta/exactly_once_producer/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v1/examples/config/transform/meta/exactly_once_system/config.jsonnet b/v1/examples/config/transform/meta/exactly_once_system/config.jsonnet deleted file mode 100644 index 95b149ef..00000000 --- a/v1/examples/config/transform/meta/exactly_once_system/config.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -// This example shows how to use the `meta_kv_store_lock` transform to -// create an "exactly once" semantic for an entire pipeline system. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// In production environments a distributed KV store should be used. -local kv = sub.kv_store.memory(); - -{ - transforms: [ - // All messages are locked before being sent through other transform - // functions, ensuring that the message is processed only once. - // An error in any sub-transform will cause all previously locked - // messages to be unlocked. - sub.tf.meta.err({ transforms: [ - sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'eo_system', - ttl_offset: '1m', - transforms: [ - sub.tf.obj.insert({ object: { target_key: 'processed' }, value: true }), - sub.tf.send.stdout(), - ], - }), - ] }), - ], -} diff --git a/v1/examples/config/transform/meta/exactly_once_system/data.jsonl b/v1/examples/config/transform/meta/exactly_once_system/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v1/examples/config/transform/meta/exactly_once_system/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v1/examples/config/transform/meta/execution_time/config.jsonnet b/v1/examples/config/transform/meta/execution_time/config.jsonnet deleted file mode 100644 index 9050510c..00000000 --- a/v1/examples/config/transform/meta/execution_time/config.jsonnet +++ /dev/null @@ -1,44 +0,0 @@ -// This example shows how to use the `meta_metric_duration` transform to -// measure the execution time of other transforms. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local attr = { AppName: 'example' }; -local dest = { type: 'aws_cloudwatch_embedded_metrics' }; - -{ - transforms: [ - // The `meta_metric_duration` transform measures the execution time of - // the transform that it wraps. - sub.transform.meta.metric.duration( - settings={ - metric: { name: 'ObjectCopyDuration', attributes: attr, destination: dest }, - transforms: [ - sub.transform.object.copy({ object: { source_key: 'foo', target_key: 'baz' } }), - ], - }, - ), - // This can be useful for measuring the execution time of transforms that - // may take a long time to execute. In this example, the `utility_delay` - // transform is used to simulate a long-running transform. - sub.transform.meta.metric.duration( - settings={ - metric: { name: 'UtilityDelayDuration', attributes: attr, destination: dest }, - transforms: [ - sub.transform.utility.delay({ duration: '100ms' }), - ], - }, - ), - // Multiple transforms can be measured at once by wrapping them in a - // `meta_pipeline` transform. - sub.transform.meta.metric.duration( - settings={ - metric: { name: 'UtilityMultiDuration', attributes: attr, destination: dest }, - transforms: [ - sub.transform.utility.delay({ duration: '100ms' }), - sub.transform.utility.delay({ duration: '100ms' }), - sub.transform.utility.delay({ duration: '100ms' }), - ], - }, - ), - ], -} diff --git a/v1/examples/config/transform/meta/retry_with_backoff/config.jsonnet b/v1/examples/config/transform/meta/retry_with_backoff/config.jsonnet deleted file mode 100644 index ac6518cc..00000000 --- a/v1/examples/config/transform/meta/retry_with_backoff/config.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -// This example shows how to implement retry with backoff behavior for any -// transform that does not produce an output. This technique may be useful -// when enriching data with external services or asynchronous data pipelines. -local sub = import '../../../../../build/config/substation.libsonnet'; - -// `key` is the target of the transform that may not produce an output and is -// checked to determine if the transform was successful. -local key = 'c'; - -local cnd = sub.cnd.all([ - sub.cnd.num.len.gt({ object: { source_key: key }, value: 0 }), - sub.cnd.utility.random(), // Simulates a transform that may fail to produce an output. -]); - -{ - transforms: [ - sub.tf.meta.retry({ - transforms: [ - sub.tf.obj.insert({ object: { target_key: key }, value: true }), - ], - condition: cnd, // If this returns false, then the transforms are retried. - retry: { delay: '1s', count: 4 }, // Retry up to 4 times with a 1 second backoff (1s, 1s, 1s, 1s). - }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/meta/retry_with_backoff/data.json b/v1/examples/config/transform/meta/retry_with_backoff/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v1/examples/config/transform/meta/retry_with_backoff/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v1/examples/config/transform/number/clamp/config.jsonnet b/v1/examples/config/transform/number/clamp/config.jsonnet deleted file mode 100644 index 0733b599..00000000 --- a/v1/examples/config/transform/number/clamp/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -// This example uses the `number.clamp` pattern to return a value that is -// constrained to a range, where the range is defined by two constants. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - // Use `null` for object keys to operate on the entire message. - transforms: sub.pattern.tf.num.clamp(null, null, 0, 100) + [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/number/max/config.jsonnet b/v1/examples/config/transform/number/max/config.jsonnet deleted file mode 100644 index 42b3d359..00000000 --- a/v1/examples/config/transform/number/max/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -// This example uses the `number_maximum` transform to return the larger -// of two values, where one value is a constant and the other is a message. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.num.max({ value: 0 }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/number/min/config.jsonnet b/v1/examples/config/transform/number/min/config.jsonnet deleted file mode 100644 index 672d7245..00000000 --- a/v1/examples/config/transform/number/min/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -// This example uses the `number_minimum` transform to return the smaller -// of two values, where one value is a constant and the other is a message. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.num.min({ value: 0 }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/send/aux_transforms/config.jsonnet b/v1/examples/config/transform/send/aux_transforms/config.jsonnet deleted file mode 100644 index 1e3ad904..00000000 --- a/v1/examples/config/transform/send/aux_transforms/config.jsonnet +++ /dev/null @@ -1,39 +0,0 @@ -// This example configures send transforms with additional transforms that -// are executed after the data is buffered and before it is sent. The -// transforms applied inside of the send transform do not affect the data -// sent through the main pipeline. All send transforms use this behavior. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // By default all data is buffered before it is sent. - sub.tf.send.stdout({ - // auxiliary_transforms is a sub-pipeline executed after the data is - // batched and before it is sent. The data is scoped to the send transform - // and results are not forwarded to the next transform in the pipeline. - // Any transform can be used here, including additional send transforms. - // - // If auxiliary_transforms is not used, then the batched data is sent individually - // without modification. - auxiliary_transforms: [ - sub.tf.object.insert({ object: { target_key: 'transformed_by' }, value: 'send_stdout' }), - ], - }), - // By default, send.file writes data to `$(pwd)/[year]/[month]/[day]/[uuid]`. - sub.tf.send.file({ - // This sub-pipeline creates a newline delimited JSON (NDJSON) file. Uncomment - // the additional transforms to compress and encode the file. - aux_tforms: [ - sub.tf.object.insert({ object: { target_key: 'transformed_by' }, value: 'send_file' }), - ] + sub.pattern.tf.fmt.jsonl + [ - // sub.tf.fmt.to.gzip(), - // sub.tf.fmt.to.base64(), - ], - }), - // This transform is included to show that the data is not modified outside of - // any individual transform's scope. Since this transform has a low buffer count, - // most data is sent to stdout before the data from any previous transform is. - sub.tf.send.stdout({ batch: { count: 1 } }), - ], -} diff --git a/v1/examples/config/transform/send/aux_transforms/data.jsonl b/v1/examples/config/transform/send/aux_transforms/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/send/aux_transforms/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/send/aws_retryable_errors/config.jsonnet b/v1/examples/config/transform/send/aws_retryable_errors/config.jsonnet deleted file mode 100644 index b5513ebf..00000000 --- a/v1/examples/config/transform/send/aws_retryable_errors/config.jsonnet +++ /dev/null @@ -1,25 +0,0 @@ -// This example configures custom retryable errors for the Kinesis Data Stream -// destination transform. All AWS transforms support a custom retry strategy, -// which can be used to handle transient errors in a way that is specific to -// the AWS service being used or the specific use case. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.aws.kinesis_data_stream( - settings={ stream_name: 'substation', retry: { - // The maximum number of times to retry a request. - // - // The default is 3. - count: 3, - // A list of regular expressions that match error messages - // and cause the request to be retried. If there is no match, then - // the default AWS retry strategy is used. - // - // The default is an empty list (i.e. no custom retryable errors). - error_messages: ['connection reset by peer'], - } }, - ), - ], -} diff --git a/v1/examples/config/transform/send/aws_s3_glacier/config.jsonnet b/v1/examples/config/transform/send/aws_s3_glacier/config.jsonnet deleted file mode 100644 index 4133b405..00000000 --- a/v1/examples/config/transform/send/aws_s3_glacier/config.jsonnet +++ /dev/null @@ -1,23 +0,0 @@ -// This example configures a storage class for the AWS S3 destination transform. -// The Glacier Instant Retrieval class is recommended for archival data that is -// compatible with Substation's serverless architecture; this class can be read -// directly by a Lambda function triggered by an SNS notification. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.aws.s3({ - // Glacier Instant Retrieval charges a minimum of 128KB per object, otherwise - // the other values are set to impossibly high values to ensure all events are - // written to the same file. - batch: { size: 128 * 1000, count: 1000 * 1000, duration: '60m' }, - bucket_name: 'substation', - storage_class: 'GLACIER_IR', // Glacier Instant Retrieval. - // S3 objects are organized by time to the nearest hour and have a UUID filename. - file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.jsonl.gz' }, - // This example formats the data as JSONL and compresses it with Gzip. - aux_tforms: sub.pattern.tf.fmt.jsonl + [sub.tf.fmt.to.gzip()], - }), - ], -} diff --git a/v1/examples/config/transform/send/batch/config.jsonnet b/v1/examples/config/transform/send/batch/config.jsonnet deleted file mode 100644 index fc73e6c1..00000000 --- a/v1/examples/config/transform/send/batch/config.jsonnet +++ /dev/null @@ -1,19 +0,0 @@ -// This example configures send transforms with batch keys to organize -// data before it is sent externally. Every send transform supports batching -// and optionally grouping JSON objects by a value derived from the object. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout({ - // Each object is organized by the value retrieved from the `group_id` key. - object: { batch_key: 'group_id' }, - }), - sub.tf.send.file({ - // This also applies to file-based send transforms, and every other send - // transform as well. - object: { batch_key: 'group_id' }, - }), - ], -} diff --git a/v1/examples/config/transform/send/batch/data.jsonl b/v1/examples/config/transform/send/batch/data.jsonl deleted file mode 100644 index 40196852..00000000 --- a/v1/examples/config/transform/send/batch/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b","group_id":1} -{"c":"d","group_id":2} -{"e":"f","group_id":1} -{"g":"h","group_id":2} -{"i":"j","group_id":1} -{"k":"l","group_id":2} -{"m":"n","group_id":1} -{"o":"p","group_id":2} -{"q":"r","group_id":1} -{"s":"t","group_id":2} -{"u":"v","group_id":1} -{"w":"x","group_id":2} -{"y":"z","group_id":1} diff --git a/v1/examples/config/transform/send/datadog/config.jsonnet b/v1/examples/config/transform/send/datadog/config.jsonnet deleted file mode 100644 index 3e702982..00000000 --- a/v1/examples/config/transform/send/datadog/config.jsonnet +++ /dev/null @@ -1,42 +0,0 @@ -// This example transforms data into a Datadog HTTP intake compatible format -// and sends it to Datadog using the Logs API. -// -// More information about the Datadog Logs API can be found here: -// https://docs.datadoghq.com/api/latest/logs/#send-logs -local sub = import '../../../../../build/config/substation.libsonnet'; - -// Datadog has a strict limit of 5MB per payload. Any individual event -// larger than 1MB will be truncated on ingest. -local max_size = 1000 * 1000 * 5; - -// Datadog has a strict limit of 1000 events per payload. -local max_count = 1000; - -{ - concurrency: 1, - transforms: [ - // Connections to the Datadog Logs API are authenticated using an API key. - sub.transform.utility.secret({ secret: sub.secrets.environment_variable({ id: 'DD', name: 'DATADOG_API_KEY' }) }), - sub.tf.send.http.post({ - batch: { size: max_size, count: max_count }, - auxiliary_transforms: [ - sub.tf.agg.to.array({ object: { target_key: 'message' } }), - ], - url: 'https://http-intake.logs.datadoghq.com/api/v2/logs', - headers: [ - { - key: 'DD-API-KEY', - value: '${SECRET:DD}', - }, - { - key: 'ddsource', - value: 'my-source', - }, - { - key: 'service', - value: 'my-service', - }, - ], - }), - ], -} diff --git a/v1/examples/config/transform/send/datadog/data.jsonl b/v1/examples/config/transform/send/datadog/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/send/datadog/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/send/splunk/config.jsonnet b/v1/examples/config/transform/send/splunk/config.jsonnet deleted file mode 100644 index df1830e3..00000000 --- a/v1/examples/config/transform/send/splunk/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -// This example transforms data into a Splunk HTTP Event Collector (HEC) -// compatible format and sends it to a Splunk instance. The Splunk HEC -// expects mulitple events to be sent in a single request using this format: -// {"a":"b"}{"c":"d"}{"e":"f"} -// -// More information about the Splunk HEC can be found here: -// https://docs.splunk.com/Documentation/SplunkCloud/latest/Data/HECExamples -local sub = import '../../../../../build/config/substation.libsonnet'; - -// By default the Splunk HEC limits the size of each request to 1MB. -local max_size = 1000 * 1000; - -{ - concurrency: 1, - transforms: [ - // Connections to the Splunk HEC are authenticated using a token. - sub.transform.utility.secret({ secret: sub.secrets.environment_variable({ id: 'SPLUNK', name: 'SPLUNK_TOKEN_ID' }) }), - sub.tf.send.http.post({ - batch: { size: max_size }, - auxiliary_transforms: [ - sub.tf.agg.to.array(), - sub.tf.array.join({ separator: '' }), - ], - url: 'https://my-instance.cloud.splunk.com:8088/services/collector', - headers: [{ - key: 'Authorization', - value: 'Splunk ${SECRET:SPLUNK}', - }], - }), - ], -} diff --git a/v1/examples/config/transform/send/splunk/data.jsonl b/v1/examples/config/transform/send/splunk/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/send/splunk/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/send/sumologic/config.jsonnet b/v1/examples/config/transform/send/sumologic/config.jsonnet deleted file mode 100644 index 44347f45..00000000 --- a/v1/examples/config/transform/send/sumologic/config.jsonnet +++ /dev/null @@ -1,23 +0,0 @@ -// This example creates a newline delimited JSON (ndjson) document that can be -// sent to a Sumo Logic HTTPS endpoint. -// -// More information about Sumo Logic HTTP upload can be found here: -// https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/upload-logs/ -local sub = import '../../../../../build/config/substation.libsonnet'; - -// Sumo Logic has a strict limit of 1MB per request. -local max_size = 1000 * 1000; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.http.post({ - batch: { size: max_size }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - // There is no authentication, so the URL should be treated like a secret. - url: 'https://endpoint6.collection.us2.sumologic.com/receiver/v1/http/xxxxxxxxxx', - // You can override the default source category associated with the URL. - // headers: [{key: 'X-Sumo-Category', value: 'testing/substation'}] - }), - ], -} diff --git a/v1/examples/config/transform/send/sumologic/data.jsonl b/v1/examples/config/transform/send/sumologic/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/send/sumologic/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/time/string_conversion/config.jsonnet b/v1/examples/config/transform/time/string_conversion/config.jsonnet deleted file mode 100644 index 58fbbcfe..00000000 --- a/v1/examples/config/transform/time/string_conversion/config.jsonnet +++ /dev/null @@ -1,16 +0,0 @@ -// This example shows how to convert time values between string formats. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - // Substation uses Go's pattern-based time format (https://gobyexample.com/time-formatting-parsing) - // to convert time values to and from strings. All time values in the system are in epoch / Unix format - // with nanosecond precision. - transforms: [ - // This converts the string value to Unix time. - sub.tf.time.from.string({ obj: { source_key: 'time', target_key: 'time' }, format: '2006-01-02T15:04:05.000Z' }), - // This converts the Unix time back to a string. - sub.tf.time.to.string({ obj: { source_key: 'time', target_key: 'time' }, format: '2006-01-02T15:04:05' }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/time/string_conversion/data.json b/v1/examples/config/transform/time/string_conversion/data.json deleted file mode 100644 index 86e52d7c..00000000 --- a/v1/examples/config/transform/time/string_conversion/data.json +++ /dev/null @@ -1 +0,0 @@ -{"time":"2024-01-01T01:02:03.123Z"} diff --git a/v1/examples/config/transform/utility/generate_ctrl/config.jsonnet b/v1/examples/config/transform/utility/generate_ctrl/config.jsonnet deleted file mode 100644 index feb1b242..00000000 --- a/v1/examples/config/transform/utility/generate_ctrl/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -// This example shows how to use the `utility_control` transform to -// generate a control (ctrl) Message based on the amount of data Messages -// received by the system. ctrl Messages overrides the settings of the -// `aggregate_to_array` transform (and any other transform that supports). -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.utility.control({ batch: { count: 2 } }), - sub.tf.aggregate.to.array({ batch: { count: 10000 } }), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/config/transform/utility/generate_ctrl/data.jsonl b/v1/examples/config/transform/utility/generate_ctrl/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/utility/generate_ctrl/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/utility/message_bytes/config.jsonnet b/v1/examples/config/transform/utility/message_bytes/config.jsonnet deleted file mode 100644 index a2cdff2c..00000000 --- a/v1/examples/config/transform/utility/message_bytes/config.jsonnet +++ /dev/null @@ -1,19 +0,0 @@ -// This example shows how to use the `utility_metric_bytes` transform to -// sum the amount of data received and transformed by Substation. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local attr = { AppName: 'example' }; -local dest = { type: 'aws_cloudwatch_embedded_metrics' }; - -{ - transforms: [ - // If the transform is configured first, then the metric reflects - // the sum of bytes received by Substation. - sub.transform.utility.metric.bytes({ metric: { name: 'BytesReceived', attributes: attr, destination: dest } }), - // This inserts a value into the object so that the message size increases. - sub.transform.object.insert({ obj: { target_key: '_' }, value: 1 }), - // If the transform is configured last, then the metric reflects - // the sum of bytes transformed by Substation. - sub.transform.utility.metric.bytes({ metric: { name: 'BytesTransformed', attributes: attr, destination: dest } }), - ], -} diff --git a/v1/examples/config/transform/utility/message_bytes/data.jsonl b/v1/examples/config/transform/utility/message_bytes/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/utility/message_bytes/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/config/transform/utility/message_count/config.jsonnet b/v1/examples/config/transform/utility/message_count/config.jsonnet deleted file mode 100644 index 37b2c3ef..00000000 --- a/v1/examples/config/transform/utility/message_count/config.jsonnet +++ /dev/null @@ -1,18 +0,0 @@ -// This example shows how to use the `utility_metric_count` transform to -// count the number of messages received and transformed by Substation. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local attr = { AppName: 'example' }; -local dest = { type: 'aws_cloudwatch_embedded_metrics' }; - -{ - transforms: [ - // If the transform is configured first, then the count reflects - // the number of messages received by Substation. - sub.transform.utility.metric.count({ metric: { name: 'MessagesReceived', attributes: attr, destination: dest } }), - sub.transform.utility.drop(), - // If the transform is configured last, then the count reflects - // the number of messages transformed by Substation. - sub.transform.utility.metric.count({ metric: { name: 'MessagesTransformed', attributes: attr, destination: dest } }), - ], -} diff --git a/v1/examples/config/transform/utility/message_count/data.jsonl b/v1/examples/config/transform/utility/message_count/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v1/examples/config/transform/utility/message_count/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v1/examples/terraform/aws/README.md b/v1/examples/terraform/aws/README.md deleted file mode 100644 index 0e844b44..00000000 --- a/v1/examples/terraform/aws/README.md +++ /dev/null @@ -1,576 +0,0 @@ -# AWS - -These example deployments demonstrate different use cases for Substation on AWS. - -# CloudWatch Logs - -## Cross-Account / Cross-Region - -Deploys a data pipeline that collects data from CloudWatch log groups in any account or region into a Kinesis Data Stream. - -```mermaid - -flowchart LR - %% resources - cw1([CloudWatch Log Group]) - cw2([CloudWatch Log Group]) - cw3([CloudWatch Log Group]) - kds([Kinesis Data Stream]) - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - subgraph Account B / Region us-west-2 - cw2 - end - - subgraph Account A / Region us-west-2 - cw3 - end - - subgraph Account A / Region us-east-1 - cw1 --> kds - cw3 --> kds - cw2 --> kds - kds --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end - end -``` - -## To Lambda - -Deploys a data pipeline that sends data from a CloudWatch log group to a Lambda function. - -```mermaid - -flowchart LR - %% resources - cw([CloudWatch Log Group]) - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - cw --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end -``` - -# DynamoDB - -## Change Data Capture (CDC) - -Deploys a data pipeline that implements a [change data capture (CDC) pattern using DynamoDB Streams](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html). - -```mermaid - -flowchart LR - %% resources - ddb([DynamoDB Table]) - - cdcHandler[[Handler]] - cdcTransforms[Transforms] - - %% connections - ddb --> cdcHandler - subgraph Substation CDC Node - cdcHandler --> cdcTransforms - end -``` - -## Distributed Lock - -Deploys a data pipeline that implements a distributed lock pattern using DynamoDB. This pattern can be used to add "exactly-once" semantics to services that otherwise do not support it. For similar examples, see the "exactly once" configurations [here](/examples/config/transform/meta/). - -## Telephone - -Deploys a data pipeline that implements a "telephone" pattern by sharing data as context between multiple Lambda functions using a DynamoDB table. This pattern can be used to enrich events across unique data sources. - -```mermaid - -flowchart LR - %% resources - md_kinesis([Device Management - Kinesis Data Stream]) - edr_kinesis([EDR Kinesis Data Stream]) - idp_kinesis([IdP Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - - edrEnrichmentHandler[[Handler]] - edrEnrichmentTransforms[Transforms] - - edrTransformHandler[[Handler]] - edrTransformTransforms[Transforms] - - idpEnrichmentHandler[[Handler]] - idpEnrichmentTransforms[Transforms] - - mdEnrichmentHandler[[Handler]] - mdEnrichmentTransforms[Transforms] - - %% connections - edr_kinesis --> edrEnrichmentHandler - subgraph Substation EDR Enrichment Node - edrEnrichmentHandler --> edrEnrichmentTransforms - end - - edr_kinesis --> edrTransformHandler - subgraph Substation EDR Transform Node - edrTransformHandler --> edrTransformTransforms - end - - idp_kinesis --> idpEnrichmentHandler - subgraph Substation IdP Enrichment Node - idpEnrichmentHandler --> idpEnrichmentTransforms - end - - md_kinesis --> mdEnrichmentHandler - subgraph Substation Dvc Mgmt Enrichment Node - mdEnrichmentHandler --> mdEnrichmentTransforms - end - - edrEnrichmentTransforms --- dynamodb - edrTransformTransforms --- dynamodb - idpEnrichmentTransforms --- dynamodb - mdEnrichmentTransforms --- dynamodb -``` - -# EventBridge - -## Lambda Bus - -Deploys a data pipeline that sends data from an EventBridge event bus to a Lambda function. - -```mermaid -flowchart LR - %% resources - ebb([EventBridge Bus]) - ebs([EventBridge Scheduler]) - - producerHandler[[Handler]] - producerTransforms[Transforms] - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - %% connections - ebs --> ebs - ebs --> producerHandler - subgraph Substation Producer Node - producerHandler --> producerTransforms - end - - producerTransforms --> ebb --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end -``` - -# Firehose - -## Data Transform - -Deploys a [Firehose](https://aws.amazon.com/firehose/) delivery stream with [data transformation](https://docs.aws.amazon.com/firehose/latest/dev/data-transformation.html) enabled. - -```mermaid - -flowchart LR - %% resources - data[/Data/] - firehose([Kinesis Data Firehose]) - s3([S3 Bucket]) - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - data --> firehose --> nodeHandler - - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end - - nodeHandler --> firehose - firehose --> s3 -``` - -# Kinesis - -## Autoscale - -Deploys a Kinesis Data Stream with autoscaling enabled. This can also be used without Substation to manage Kinesis Data Streams. - -```mermaid - -flowchart LR - kds[("Kinesis - Data Stream")] - sns("Autoscale SNS Topic") - cw_upscale("CloudWatch Upscale Alarm") - cw_downscale("CloudWatch Downscale Alarm") - autoscale("Autoscale Lambda") - - autoscale -- UpdateShardCount API --> kds - autoscale -- PutMetricAlarm API ---> cw_upscale - autoscale -- PutMetricAlarm API ---> cw_downscale - - cw_downscale -. notifies .- sns - cw_upscale -. notifies .- sns - - sns -- notifies ---> autoscale - cw_downscale -. monitors .- kds - cw_upscale -. monitors .- kds -``` - -## Multi-Stream - -Deploys a data pipeline that implements a multi-phase streaming data pattern using Kinesis Data Streams. - -```mermaid - -flowchart LR - %% resources - gateway([API Gateway]) - kds1([Kinesis Data Stream]) - kds2([Kinesis Data Stream]) - - publisherHandler[[Handler]] - publisherTransforms[Transforms] - - subscriberHandler[[Handler]] - subscriberTransforms[Transforms] - - %% connections - gateway --> kds1 --> publisherHandler - subgraph Substation Publisher Node - publisherHandler --> publisherTransforms - end - - publisherTransforms --> kds2 --> subscriberHandler - - subgraph Substation Subscriber Node - subscriberHandler --> subscriberTransforms - end -``` - -## nXDR - -Deploys a data pipeline that implements an nXDR pattern by applying threat / risk enrichment metadata to events and sending the enriched data to multiple destinations. This pattern is useful for: -- Generating risk-based detection rules -- Guiding analysts during incident investigations and incident response -- Aiding unstructured threat hunts -- Priorizing logs for retention and analysis - -```mermaid - -flowchart LR - %% resources - kinesis([Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - ext([External System]) - - enrichmentHandler[[Handler]] - enrichmentTransforms[Transforms] - - transformHandler[[Handler]] - transformTransforms[Transforms] - - %% connections - kinesis --> enrichmentHandler - subgraph Substation Enrichment Node - enrichmentHandler --> enrichmentTransforms - end - - enrichmentTransforms --> dynamodb - - kinesis --> transformHandler - subgraph Substation Transform Node - transformHandler --> transformTransforms - end - - transformTransforms --> ext -``` - -## Time Travel - -Deploys a data pipeline that implements a "time travel" pattern by having a subscriber node read data more slowly than an enrichment node. The nodes share data observed across different events using a DynamoDB table. - -```mermaid - -flowchart LR - %% resources - gateway([API Gateway]) - kinesis([Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - - enrichmentHandler[[Handler]] - enrichmentTransforms[Transforms] - - subscriberHandler[[Handler]] - subscriberTransforms[Transforms] - - gateway --> kinesis - %% connections - kinesis -- 5 seconds --> enrichmentHandler - subgraph Substation Enrichment Node - enrichmentHandler --> enrichmentTransforms - end - - enrichmentTransforms --> dynamodb - - kinesis -- 15 seconds --> subscriberHandler - subgraph Substation Subscriber Node - subscriberHandler --> subscriberTransforms - end - - dynamodb --- subscriberTransforms -``` - -# Lambda - -## AppConfig - -Deploys a data pipeline with an invalid config that triggers AppConfig's validator feature. When the AppConfig service receives the compiled Substation configuration and attempts to deploy, the deployment will fail and return an error. - -## Microservice - -Deploys a synchronous microservice that performs DNS resolution. The service can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html). - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - gateway <--> nodeHandler - cli <--> nodeHandler - - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end -``` - -## VPC - -Deploys a synchronous microservice in a VPC that returns the public IP address of the Lambda function. The Lambda can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html). This example can be used to validate how Substation transforms behave inside a VPC. - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - gateway <--> nodeHandler - cli <--> nodeHandler - - subgraph VPC Network - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end - end -``` - -# S3 - -## Data Lake - -Deploys a data pipeline that implements a [data lake pattern using S3](https://docs.aws.amazon.com/whitepapers/latest/building-data-lakes/amazon-s3-data-lake-storage-platform.html). The S3 bucket contains two copies of the data (original and transformed). - -```mermaid - -flowchart LR - bucket([S3 Bucket]) - handler[[Handler]] - gateway([API Gateway]) - - sendS3x[Send to AWS S3] - sendS3y[Send to AWS S3] - mod[...] - - %% connections - gateway --> handler - - subgraph Substation Node - handler --> sendS3x - - subgraph Transforms - sendS3x --> mod --> sendS3y - end - - end - - sendS3x --> bucket - sendS3y --> bucket -``` - -## Retry on Failure - -Deploys a data pipeline that reads data from an S3 bucket and automatically retries failed events using an SQS queue as a [failure destination](https://aws.amazon.com/blogs/compute/introducing-aws-lambda-destinations/). This example will retry forever until the error is resolved. - -```mermaid - -flowchart LR - %% resources - bucket([S3 Bucket]) - queue([SQS Queue]) - %% connections - bucket --> handler - N -.-> queue - queue --> R - rTransforms --> handler - - subgraph N["Substation Node"] - handler[[Handler]] --> transforms[Transforms] - end - subgraph R["Substation Retrier"] - rHandler[[Handler]] --> rTransforms[Transforms] - end -``` - -## SNS - -Deploys a data pipeline that reads data from an S3 bucket via an SNS topic. - -```mermaid - -flowchart LR - %% resources - bucket([S3 Bucket]) - sns([SNS Topic]) - - handler[[Handler]] - transforms[Transforms] - - %% connections - bucket --> sns --> handler - subgraph Substation Node - handler --> transforms - end -``` - -## XDR - -Deploys a data pipeline that implements an XDR (extended detection and response) pattern by reading files from an S3 bucket, conditionally filtering and applying threat / risk enrichment metadata to events, and then writing the enriched events to an S3 bucket. The S3 bucket contains two copies of the data (original and transformed). - -```mermaid -flowchart LR - bucket([S3 Bucket]) - handler[[Handler]] - - threat[Threat Enrichments] - sendS3[Send to AWS S3] - - %% connections - bucket --> handler - - subgraph Substation Node - handler --> threat - - subgraph Transforms - threat --> sendS3 - end - - end - - sendS3 --> bucket -``` - - -# SNS - -## Pub/Sub - -Deploys a data pipeline that implements a [publish/subscribe (pub/sub) pattern](https://aws.amazon.com/what-is/pub-sub-messaging/). The `examples/cmd/client/file` application can be used to send data to the SNS topic. - -```mermaid - -flowchart LR - %% resources - file[(File)] - sns([SNS Topic]) - - cliHandler[[Handler]] - cliTransforms[Transforms] - sub1Handler[[Handler]] - sub1Transforms[Transforms] - sub2Handler[[Handler]] - sub2Transforms[Transforms] - sub3Handler[[Handler]] - sub3Transforms[Transforms] - - %% connections - cliHandler -.- file - subgraph Substation Client - cliHandler --> cliTransforms - end - - cliTransforms --> sns - sns --> sub1Handler - sns --> sub2Handler - sns --> sub3Handler - - subgraph Substation Subscriber Node - sub3Handler --> sub3Transforms - end - - subgraph Substation Subscriber Node - sub2Handler --> sub2Transforms - end - - subgraph Substation Subscriber Node - sub1Handler --> sub1Transforms - end -``` - -# SQS - -## Microservice - -Deploys an asynchronous microservice that performs DNS resolution. The service can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html); requests to the service are assigned a UUID that can be used to retrieve the result from the DynamoDB table. - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - sqs([SQS Queue]) - ddb([DynamoDB Table]) - - gatewayHandler[[Handler]] - gatewayTransforms[Transforms] - - microserviceHandler[[Handler]] - microserviceTransforms[Transforms] - - %% connections - gateway <--> gatewayHandler - cli <--> gatewayHandler - - subgraph Substation Frontend Node - gatewayHandler --> gatewayTransforms --> gatewayHandler - end - - gatewayTransforms --> sqs --> microserviceHandler - - subgraph Substation Microservice Node - microserviceHandler --> microserviceTransforms - end - - microserviceTransforms --> ddb -``` diff --git a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet b/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet deleted file mode 100644 index f545b4d6..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // CloudWatch logs sent to Kinesis Data Streams are gzip compressed. - // These must be decompressed before other transforms are applied. - sub.tf.fmt.from.gzip(), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf b/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf deleted file mode 100644 index e2cba33f..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" { - region = "us-east-1" -} - -provider "aws" { - alias = "usw2" - region = "us-west-2" -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf b/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf deleted file mode 100644 index 0b0ceb2d..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf +++ /dev/null @@ -1,102 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that is used as the destination for CloudWatch Logs. -module "kds" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Reads data from the stream. - module.lambda_consumer.role.name, - # Writes data to the stream. - module.cw_destination_use1.role.name, - module.cw_destination_usw2.role.name, - ] -} - -# CloudWatch Logs destination that sends logs to the Kinesis Data Stream from us-east-1. -module "cw_destination_use1" { - source = "../../../../../../build/terraform/aws/cloudwatch/destination" - - config = { - name = "substation" - destination_arn = module.kds.arn - - # By default, any CloudWatch log in the current AWS account can send logs to this destination. - # Add additional AWS account IDs to allow them to send logs to the destination. - account_ids = [] - } -} - -module "cw_subscription_use1" { - source = "../../../../../../build/terraform/aws/cloudwatch/subscription" - - config = { - name = "substation" - destination_arn = module.cw_destination_use1.arn - log_groups = [ - # This log group does not exist. Add other log groups for resources in the account and region. - # "/aws/lambda/test", - ] - } -} - -# CloudWatch Logs destination that sends logs to the Kinesis Data Stream from us-west-2. -# To add support for more regions, copy this module and change the provider. -module "cw_destination_usw2" { - source = "../../../../../../build/terraform/aws/cloudwatch/destination" - providers = { - aws = aws.usw2 - } - - config = { - name = "substation" - destination_arn = module.kds.arn - - # By default, any CloudWatch log in the current AWS account can send logs to this destination. - # Add additional AWS account IDs to allow them to send logs to the destination. - account_ids = [] - } -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf b/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf deleted file mode 100644 index e4fa889a..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf +++ /dev/null @@ -1,40 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } - - depends_on = [ - module.appconfig.name, - module.ecr_autoscale.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf b/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf deleted file mode 100644 index 7506606f..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that consumes from Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_consumer" { - event_source_arn = module.kds.arn - function_name = module.lambda_consumer.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet b/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet deleted file mode 100644 index 99b93025..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet +++ /dev/null @@ -1,15 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // CloudWatch logs sent to Lambda are base64 encoded and gzip - // compressed within the `awslogs.data` field of the event. - // These must be decoded and decompressed before other transforms are - // applied. - sub.tf.obj.cp({ object: { source_key: 'awslogs.data' } }), - sub.tf.fmt.from.base64(), - sub.tf.fmt.from.gzip(), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf b/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf deleted file mode 100644 index 492b504b..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf +++ /dev/null @@ -1,21 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf b/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf deleted file mode 100644 index 363340c5..00000000 --- a/v1/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf +++ /dev/null @@ -1,49 +0,0 @@ -data "aws_caller_identity" "current" {} -data "aws_region" "current" {} - -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that is invoked by CloudWatch" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -# Allows any CloudWatch log group to send logs to the Lambda function in the current AWS account and region. -# Repeat this for each region that sends logs to the Lambda function. -resource "aws_lambda_permission" "consumer" { - statement_id = "AllowExecutionFromCloudWatch" - action = "lambda:InvokeFunction" - function_name = module.lambda_consumer.name - principal = "logs.amazonaws.com" - source_arn = "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:*" -} - -# CloudWatch Logs subscription filter that sends logs to the Lambda function. -module "cw_subscription" { - source = "../../../../../../build/terraform/aws/cloudwatch/subscription" - - config = { - name = "substation" - destination_arn = module.lambda_consumer.arn - log_groups = [ - # This log group does not exist. Add other log groups for resources in the account and region. - # "/aws/lambda/test", - ] - } -} diff --git a/v1/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet b/v1/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf b/v1/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf deleted file mode 100644 index 9fae34fa..00000000 --- a/v1/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf +++ /dev/null @@ -1,40 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - module.node.role.name, - ] -} diff --git a/v1/examples/terraform/aws/dynamodb/cdc/terraform/node.tf b/v1/examples/terraform/aws/dynamodb/cdc/terraform/node.tf deleted file mode 100644 index cd6b5869..00000000 --- a/v1/examples/terraform/aws/dynamodb/cdc/terraform/node.tf +++ /dev/null @@ -1,27 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that receives CDC events" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_DYNAMODB_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "node" { - event_source_arn = module.dynamodb.stream_arn - function_name = module.node.arn - starting_position = "LATEST" -} diff --git a/v1/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet b/v1/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet deleted file mode 100644 index 7a034ea9..00000000 --- a/v1/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet +++ /dev/null @@ -1,40 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local kv = sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', ttl: 'ttl' }, -}); - -{ - transforms: [ - // All messages are locked before they are sent through other - // transform functions, ensuring that the message is processed - // exactly once. - // - // An error in any sub-transform will cause all previously locked - // messages to be unlocked; this only applies to messages that have - // not yet been flushed by a control message. Use the `utility_control` - // transform to manage how often messages are flushed. - sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'distributed_lock', - ttl_offset: '1m', - transform: sub.tf.meta.pipeline({ transforms: [ - // Delaying and simulating an error makes it possible to - // test message unlocking in real-time (view changes using - // the DynamoDB console). Uncomment the lines below to see - // how it works. - // - // sub.tf.utility.delay({ duration: '10s' }), - // sub.pattern.transform.conditional( - // condition=sub.cnd.utility.random(), - // transform=sub.tf.utility.err({ message: 'simulating error to trigger unlock' }), - // ), - // - // Messages are printed to the console. After this, they are locked - // and will not be printed again until the lock expires. - sub.tf.send.stdout(), - ] }), - }), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf b/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf deleted file mode 100644 index 7200b0f6..00000000 --- a/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf +++ /dev/null @@ -1,40 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - ttl = "ttl" - - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - module.node.role.name, - ] -} diff --git a/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf b/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf deleted file mode 100644 index 2f291fef..00000000 --- a/v1/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf +++ /dev/null @@ -1,26 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that transforms data exactly-once using a distributed lock" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "node" { - function_name = module.node.name - authorization_type = "NONE" -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet b/v1/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet deleted file mode 100644 index 2f3db52e..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - kv_store: sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', sort_key: 'SK', ttl: 'TTL', value: 'cache' }, - }), -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet b/v1/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet deleted file mode 100644 index 42b03fcf..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet +++ /dev/null @@ -1,10 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Puts the user's metadata into the KV store indexed by the host name. - sub.tf.enrich.kv_store.iset({ obj: { src: 'host.name', trg: 'user' }, prefix: 'md_user', kv_store: const.kv_store }), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet b/v1/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet deleted file mode 100644 index 52633cfc..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet +++ /dev/null @@ -1,18 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // If the host metadata contains the host name, then it's put into the KV store - // indexed by the host ID. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.all([ - sub.cnd.num.len.gt({ obj: { src: 'host.name' }, value: 0 }), - ]), - transform: sub.tf.enrich.kv_store.iset({ obj: { src: 'host.id', trg: 'host' }, prefix: 'edr_host', kv_store: const.kv_store }), - }, - ] }), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet b/v1/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet deleted file mode 100644 index 6f340fe5..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet +++ /dev/null @@ -1,24 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -// cnd_copy is a helper function for copying values that are not null. -local cnd_copy(source, target) = sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: source }, value: 0 }), - transform=sub.tf.object.copy({ obj: { src: source, trg: target } }), -); - -{ - concurrency: 1, - transforms: [ - // The value from the KV store can be null, so the result is hidden in metadata and checked before - // copying it into the message data. Many of these values are supersets of each other, so values are - // overwritten if they exist. If any source key is missing, the transform is skipped. - sub.tf.enrich.kv_store.iget({ obj: { src: 'host.id', trg: 'meta edr_host' }, prefix: 'edr_host', kv_store: const.kv_store }), - cnd_copy(source='meta edr_host', target='host'), - sub.tf.enrich.kv_store.iget({ obj: { src: 'host.name', trg: 'meta md_user' }, prefix: 'md_user', kv_store: const.kv_store }), - cnd_copy(source='meta md_user', target='user'), - sub.tf.enrich.kv_store.iget({ obj: { src: 'user.email', trg: 'meta idp_user' }, prefix: 'idp_user', kv_store: const.kv_store }), - cnd_copy(source='meta idp_user', target='user'), - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet b/v1/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet deleted file mode 100644 index eb05a4a8..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // The user's status is determined to be inactive if there is a successful deletion event. - // Any other successful authentication event will set the user's status to active. - // - // In production deployments, additional filtering should be used to reduce the number of - // queries made to the KV store. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.all([ - sub.cnd.str.eq({ object: { source_key: 'event.category' }, value: 'authentication' }), - sub.cnd.str.eq({ object: { source_key: 'event.type' }, value: 'deletion' }), - sub.cnd.str.eq({ object: { source_key: 'event.outcome' }, value: 'success' }), - ]), - transform: sub.tf.object.insert({ object: { target_key: 'user.status.-1' }, value: 'idp_inactive' }), - }, - { - condition: sub.cnd.all([ - sub.cnd.str.eq({ object: { source_key: 'event.outcome' }, value: 'success' }), - ]), - transform: sub.tf.object.insert({ object: { target_key: 'user.status.-1' }, value: 'idp_active' }), - }, - ] }), - // Puts the user's metadata into the KV store indexed by their email address. - sub.tf.enrich.kv_store.iset({ obj: { src: 'user.email', trg: 'user' }, prefix: 'idp_user', kv_store: const.kv_store }), - ], -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl b/v1/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl deleted file mode 100644 index aca58aea..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"host":{"name":"Alice's MacBook Pro"},"user":{"email":"alice@brex.com"}} -{"host":{"name":"Bob's MacBook Pro"},"user":{"email":"bob@brex.com"}} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl b/v1/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl deleted file mode 100644 index d42ddb2b..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"host":{"id":"eb67b0b6a1d04086b75ee38d02018a10","name":"Alice's MacBook Pro"}} -{"event":{"category":"network","type":"connection"},"host":{"id":"eb67b0b6a1d04086b75ee38d02018a10"},"process":{"name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226"},"server":{"ip":"35.186.224.39","port":443}} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl b/v1/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl deleted file mode 100644 index c82b3fcb..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"event":{"category":"authentication","outcome":"success","type":"access"},"user":{"email":"alice@brex.com","roles":["Manager", "Security", "Engineering"]}} -{"event":{"category":"authentication","outcome":"success","type":"deletion"},"user":{"email":"bob@brex.com","roles":["Manager", "Security", "Engineering"]}} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/post_deploy.sh b/v1/examples/terraform/aws/dynamodb/telephone/post_deploy.sh deleted file mode 100644 index 866cafdb..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/post_deploy.sh +++ /dev/null @@ -1,4 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_edr terraform/aws/dynamodb/telephone/edr_data.jsonl --print-response -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_idp terraform/aws/dynamodb/telephone/idp_data.jsonl --print-response -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_md terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl --print-response diff --git a/v1/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf b/v1/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf deleted file mode 100644 index 7cbbd781..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf +++ /dev/null @@ -1,57 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.edr_enrichment.role.name, - module.edr_transform.role.name, - module.idp_enrichment.role.name, - module.dvc_mgmt_enrichment.role.name, - ] -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf b/v1/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf deleted file mode 100644 index b0da7368..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf +++ /dev/null @@ -1,38 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf b/v1/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf deleted file mode 100644 index 100f0e8d..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "dvc_mgmt_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_md" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.dvc_mgmt_enrichment.role.name, - ] -} - -module "dvc_mgmt_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "dvc_mgmt_enrichment" - description = "Substation node that enriches device management data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/dvc_mgmt_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "dvc_mgmt_enrichment" { - event_source_arn = module.dvc_mgmt_kinesis.arn - function_name = module.dvc_mgmt_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf b/v1/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf deleted file mode 100644 index a5350205..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf +++ /dev/null @@ -1,78 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "edr_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_edr" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.edr_enrichment.role.name, - module.edr_transform.role.name, - ] -} - -module "edr_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "edr_transform" - description = "Substation node that transforms EDR data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/edr_transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "edr_transform" { - event_source_arn = module.edr_kinesis.arn - function_name = module.edr_transform.arn - # This is set to 30 seconds (compared to the other data sources - # 5 seconds) to simulate the asynchronous arrival of data in a - # real-world scenario. - maximum_batching_window_in_seconds = 30 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} - -module "edr_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "edr_enrichment" - description = "Substation node that enriches EDR data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/edr_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "edr_enrichment" { - event_source_arn = module.edr_kinesis.arn - function_name = module.edr_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf b/v1/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf deleted file mode 100644 index c5bff745..00000000 --- a/v1/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "idp_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_idp" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.idp_enrichment.role.name, - ] -} - -module "idp_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "idp_enrichment" - description = "Substation node that enriches IdP data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/idp_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "idp_enrichment" { - event_source_arn = module.idp_kinesis.arn - function_name = module.idp_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet b/v1/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet b/v1/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet deleted file mode 100644 index 641eff0e..00000000 --- a/v1/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.time.now({ object: { target_key: 'ts' } }), - sub.tf.obj.insert({ object: { target_key: 'message' }, value: 'Hello from the EventBridge scheduler!' }), - // This sends the event to the default bus. - sub.tf.send.aws.eventbridge(), - ], -} diff --git a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf b/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf deleted file mode 100644 index 06e93869..00000000 --- a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf +++ /dev/null @@ -1,17 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf b/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf deleted file mode 100644 index 390b0f1e..00000000 --- a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf +++ /dev/null @@ -1,35 +0,0 @@ -module "eventbridge_consumer" { - source = "../../../../../../build/terraform/aws/eventbridge/lambda" - - config = { - name = "substation_consumer" - description = "Routes messages from any Substation producer to a Substation Lambda consumer." - function = module.lambda_consumer # This is the Lambda function that will be invoked. - event_pattern = jsonencode({ - # This matches every event sent by any Substation app. - source = [{ "wildcard" : "substation.*" }] - }) - } - - access = [ - module.lambda_producer.role.name, - ] -} - -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that is invoked by the EventBridge bus." - image_uri = "${module.ecr.url}:v1.5.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } -} diff --git a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf b/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf deleted file mode 100644 index 7673ea4b..00000000 --- a/v1/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf +++ /dev/null @@ -1,28 +0,0 @@ -module "eventbridge_producer" { - source = "../../../../../../build/terraform/aws/eventbridge/lambda" - - config = { - name = "substation_producer" - description = "Sends messages to the default EventBridge bus on a schedule." - function = module.lambda_producer # This is the Lambda function that will be invoked. - schedule = "rate(1 minute)" - } -} - -module "lambda_producer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "producer" - description = "Substation node that is invoked by the EventBridge schedule." - image_uri = "${module.ecr.url}:v1.5.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/producer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } -} diff --git a/v1/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet b/v1/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet deleted file mode 100644 index 2bac490c..00000000 --- a/v1/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet +++ /dev/null @@ -1,14 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.object.insert( - settings={ object: { target_key: 'transformed' }, value: true } - ), - // Appending a newline is required so that the S3 object is line delimited. - sub.tf.string.append( - settings={ suffix: '\n' } - ), - ], -} diff --git a/v1/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf b/v1/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf deleted file mode 100644 index f8d1f409..00000000 --- a/v1/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf +++ /dev/null @@ -1,191 +0,0 @@ -locals { - name = "firehose" -} - -data "aws_caller_identity" "caller" {} - -resource "random_uuid" "id" {} - -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - - config = { - name = "alias/substation" - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -################################## -# Firehose resources -################################## - -# IAM -data "aws_iam_policy_document" "firehose" { - statement { - effect = "Allow" - - principals { - type = "Service" - identifiers = ["firehose.amazonaws.com"] - } - - actions = ["sts:AssumeRole"] - } -} - -resource "aws_iam_role" "firehose" { - name = "substation-firehose-${local.name}" - assume_role_policy = data.aws_iam_policy_document.firehose.json -} - -data "aws_iam_policy_document" "firehose_s3" { - statement { - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - resources = [ - module.kms.arn, - ] - } - - statement { - effect = "Allow" - actions = [ - "s3:AbortMultipartUpload", - "s3:GetBucketLocation", - "s3:GetObject", - "s3:ListBucket", - "s3:ListBucketMultipartUploads", - "s3:PutObject" - ] - - resources = [ - aws_s3_bucket.firehose_s3.arn, - "${aws_s3_bucket.firehose_s3.arn}/*", - ] - } -} - -resource "aws_iam_policy" "firehose_s3" { - name = "substation-firehose-${resource.random_uuid.id.id}" - description = "Policy for the ${local.name} Kinesis Data Firehose." - policy = data.aws_iam_policy_document.firehose_s3.json -} - - -resource "aws_iam_role_policy_attachment" "firehose_s3" { - role = aws_iam_role.firehose.name - policy_arn = aws_iam_policy.firehose_s3.arn -} - -# S3 -resource "random_uuid" "firehose_s3" {} - -resource "aws_s3_bucket" "firehose_s3" { - bucket = "${random_uuid.firehose_s3.result}-substation" - force_destroy = true -} - -resource "aws_s3_bucket_ownership_controls" "firehose_s3" { - bucket = aws_s3_bucket.firehose_s3.id - rule { - object_ownership = "BucketOwnerPreferred" - } -} - -resource "aws_s3_bucket_server_side_encryption_configuration" "firehose_s3" { - bucket = aws_s3_bucket.firehose_s3.bucket - - rule { - apply_server_side_encryption_by_default { - kms_master_key_id = module.kms.arn - sse_algorithm = "aws:kms" - } - } -} - -# Kinesis Data Firehose -resource "aws_kinesis_firehose_delivery_stream" "firehose" { - name = "substation" - destination = "extended_s3" - - server_side_encryption { - enabled = true - key_type = "CUSTOMER_MANAGED_CMK" - key_arn = module.kms.arn - } - - extended_s3_configuration { - role_arn = aws_iam_role.firehose.arn - bucket_arn = aws_s3_bucket.firehose_s3.arn - kms_key_arn = module.kms.arn - buffering_interval = 60 - - processing_configuration { - enabled = "true" - - processors { - type = "Lambda" - - parameters { - parameter_name = "LambdaArn" - # LATEST is always used for container images. - parameter_value = "${module.transform.arn}:$LATEST" - } - } - } - } -} - -module "transform" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "transform_node" - description = "Transforms Kinesis Data Firehose records." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 60 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform_node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_FIREHOSE" - "SUBSTATION_DEBUG" : true - } - } - - access = [ - aws_iam_role.firehose.name, - ] - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} diff --git a/v1/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf b/v1/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf deleted file mode 100644 index 609ae675..00000000 --- a/v1/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf +++ /dev/null @@ -1,83 +0,0 @@ -# Repository for the Autoscale app. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscale alarms. -resource "aws_sns_topic" "autoscale" { - name = "autoscale" -} - -# Kinesis Data Stream that is managed by the Autoscale app. -module "kds" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscale.arn - } - - # Min and max shards can be defined as tags to override changes made - # by the Autoscale app. - tags = { - MinimumShards = 2 - MaximumShards = 4 - } - - # Add additional consumer and producer roles as needed. - access = [ - # Autoscales the stream. - module.lambda_autoscale.role.name, - ] -} - -# Lambda Autoscale application that manages Kinesis Data Streams. -module "lambda_autoscale" { - source = "../../../../../../build/terraform/aws/lambda" - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams." - image_uri = "${module.ecr.url}:v1.3.0" # This should use the project's release tags. - image_arm = true - - # Override the default Autoscale configuration using environment variables. - # These are the default settings, included for demonstration purposes. - env = { - "AUTOSCALE_KINESIS_THRESHOLD" : 0.7, - "AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS" : 5, - "AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS" : 60, - } - } - - depends_on = [ - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscale_subscription" { - topic_arn = aws_sns_topic.autoscale.arn - protocol = "lambda" - endpoint = module.lambda_autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} - -resource "aws_lambda_permission" "autoscale_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscale.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet b/v1/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet deleted file mode 100644 index 9b64d783..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This forwards data to the destination stream without transformation. - sub.tf.send.aws.kinesis_data_stream( - settings={ stream_name: 'substation_dst' }, - ), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet b/v1/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf b/v1/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf deleted file mode 100644 index 355c2c97..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf +++ /dev/null @@ -1,139 +0,0 @@ -data "aws_caller_identity" "caller" {} - -# KMS encryption key that is shared by all Substation infrastructure -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - - config = { - name = "alias/substation" - policy = data.aws_iam_policy_document.kms.json - } -} - -# This policy is required to support encrypted SNS topics. -# More information: https://repost.aws/knowledge-center/cloudwatch-receive-sns-for-alarm-trigger -data "aws_iam_policy_document" "kms" { - # Allows CloudWatch to access encrypted SNS topic. - statement { - sid = "CloudWatch" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["cloudwatch.amazonaws.com"] - } - - resources = ["*"] - } - - # Default key policy for KMS. - # https://docs.aws.amazon.com/kms/latest/developerguide/determining-access-key-policy.html - statement { - sid = "KMS" - effect = "Allow" - actions = [ - "kms:*", - ] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.caller.account_id}:root"] - } - - resources = ["*"] - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" - kms_master_key_id = module.kms.id -} - -# API Gateway that sends data to Kinesis. -module "gateway_to_kinesis" { - source = "../../../../../../build/terraform/aws/api_gateway/kinesis_data_stream" - # Always required for the Kinisis Data Stream integration. - kinesis_data_stream = module.kds_src - - config = { - name = "gateway" - } -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kds_src" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - kms = module.kms - - config = { - name = "substation_src" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Reads data to the stream. - module.lambda_publisher.role.name, - # Writes data to the stream. - module.gateway_to_kinesis.role.name, - ] -} - -# Kinesis Data Stream that stores data sent from the pipeline processor. -module "kds_dst" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - kms = module.kms - - config = { - name = "substation_dst" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Writes data to the stream. - module.lambda_publisher.role.name, - # Reads data from the stream. - module.lambda_subscriber.role.name, - ] -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf b/v1/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf deleted file mode 100644 index e177cb37..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf +++ /dev/null @@ -1,41 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } - - depends_on = [ - module.appconfig.name, - module.ecr_autoscale.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf b/v1/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf deleted file mode 100644 index 3eb64423..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf +++ /dev/null @@ -1,32 +0,0 @@ -module "lambda_publisher" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "publisher" - description = "Substation node that publishes to Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/publisher" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_publisher" { - event_source_arn = module.kds_src.arn - function_name = module.lambda_publisher.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/v1/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf b/v1/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf deleted file mode 100644 index 1dfa75f5..00000000 --- a/v1/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf +++ /dev/null @@ -1,32 +0,0 @@ -module "lambda_subscriber" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "subscriber" - description = "Substation node subscribes to Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_subscriber" { - event_source_arn = module.kds_dst.arn - function_name = module.lambda_subscriber.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet b/v1/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet deleted file mode 100644 index 08462e40..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - threat_signals_key: 'threat.signals', - // threat_signal is a custom function that appends threat info to an - // event as enrichment metadata. - // - // If a smaller event is needed, then the enriched threat signal can - // be emitted as a separate event. This is similar to the implementation - // seen in the enrichment Lambda function. - threat_signal(settings): sub.tf.obj.insert({ - obj: { trg: sub.helpers.obj.append_array($.threat_signals_key) }, - value: { name: settings.name, description: settings.description, references: settings.references, risk_score: settings.risk_score }, - }), -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet b/v1/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet deleted file mode 100644 index 9cbaa5ca..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet +++ /dev/null @@ -1,34 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local const = import '../const.libsonnet'; -local threat = import '../threat_enrichment.libsonnet'; - -{ - concurrency: 2, - transforms: - threat.transforms + [ - // Discards any events that don't contain threat signals. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.num.len.eq({ object: { source_key: const.threat_signals_key }, value: 0 }), - ]), - transform: sub.tf.util.drop(), - }, - ] }), - // Explodes the threat signals array into individual events. These become - // threat signal records in the DynamoDB table. - sub.tf.aggregate.from.array({ object: { source_key: const.threat_signals_key } }), - // The host name and current time are used as the keys for the DynamoDB table. - sub.tf.object.copy({ object: { source_key: 'host.name', target_key: 'PK' } }), - sub.tf.time.now({ object: { target_key: 'SK' } }), - sub.tf.time.to.string({ object: { source_key: 'SK', target_key: 'SK' }, format: '2006-01-02T15:04:05.000Z' }), - // Any fields not needed in the DynamoDB item are removed. - sub.tf.object.delete({ object: { source_key: 'event' } }), - sub.tf.object.delete({ object: { source_key: 'host' } }), - sub.tf.object.delete({ object: { source_key: 'process' } }), - sub.tf.object.delete({ object: { source_key: 'threat' } }), - // Writes the threat signal to the DynamoDB table. - sub.tf.send.aws.dynamodb({ table_name: 'substation_threat_signals' }), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet b/v1/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet deleted file mode 100644 index 580517ae..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet +++ /dev/null @@ -1,45 +0,0 @@ -// The nXDR pattern relies on Substation's meta_switch transform to conditionally determine -// if an event matches threat criteria. If the event matches, then a threat signal is created. -// The meta_switch transform supports any combination of if-elif-else logic. -local sub = import '../../../../../../build/config/substation.libsonnet'; -local const = import 'const.libsonnet'; - -// Composable conditions are recommended when managing multiple threat signals. -local cnd = { - process: [ - sub.cnd.str.eq({ obj: { src: 'event.category' }, value: 'process' }), - ], - - macos: { - os: sub.cnd.str.eq({ obj: { src: 'host.os.type' }, value: 'macos' }), - process: $.process + [$.macos.os], - }, -}; - -{ - transforms: [ - // Privilege Escalation - // https://attack.mitre.org/tactics/TA0004/ - // - // https://attack.mitre.org/techniques/T1548/004/ - sub.tf.meta.switch({ - local name = 'privilege_escalation_elevated_execution_with_prompt', - - cases: [ - { - transform: const.threat_signal({ - name: name, - description: 'Identifies when an authentication prompt is generated by the AuthorizationExecuteWithPrivileges API.', - references: ['https://objective-see.com/blog/blog_0x2A.html'], - // The risk score can be dynamically calculated based on additional - // fields in the event. - risk_score: 73, - }), - condition: sub.cnd.all(cnd.macos.process + [ - sub.cnd.str.eq({ obj: { src: 'process.name' }, value: 'security_authtrampoline' }), - ]), - }, - ], - }), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet b/v1/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet deleted file mode 100644 index 695f96a8..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local threat = import '../threat_enrichment.libsonnet'; - -{ - concurrency: 2, - transforms: - threat.transforms + [ - // At this point more transforms can be added and the events can be sent - // to an external system. - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/data.jsonl b/v1/examples/terraform/aws/kinesis/nxdr/data.jsonl deleted file mode 100644 index 037aa3a4..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/data.jsonl +++ /dev/null @@ -1,5 +0,0 @@ -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/sbin/sshd -i","name":"sshd","parent":{"command_line":"/usr/libexec/sshd-keygen-wrapper","name":"sshd-keygen-wrapper","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/libexec/security_authtrampoline /usr/sbin/installer auth 22 -verboseR -allowUntrusted -pkg /private/tmp/xp-6100/epsvcp.pkg -target /","name":"security_authtrampoline","parent":{"command_line":"/private/tmp/update_XP-6100 Series/EPSON.app/Contents/MacOS/EpsonInstaller","name":"EpsonInstaller","parent":{"command_line":"/usr/libexec/runningboardd","name":"runningboardd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome","name":"Google Chrome","parent":{"command_line":"/usr/bin/open -n -a /Applications/Google Chrome.app","name":"open","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/sbin/cupsd","name":"cupsd","parent":{"command_line":"/usr/libexec/cups/backend/usb","name":"cups-usb-backend","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/bin/python3 /usr/local/bin/pip install requests","name":"python3","parent":{"command_line":"/usr/local/bin/pip install requests","name":"pip","parent":{"command_line":"/usr/bin/sudo pip3","name":"sudo"}}}} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/post_deploy.sh b/v1/examples/terraform/aws/kinesis/nxdr/post_deploy.sh deleted file mode 100644 index b3794540..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/post_deploy.sh +++ /dev/null @@ -1,2 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation terraform/aws/kinesis/nxdr/data.jsonl --print-response diff --git a/v1/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf b/v1/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf deleted file mode 100644 index 7867db2d..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf +++ /dev/null @@ -1,77 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscale.role.name, - # Consumes data from the stream. - module.lambda_transform.role.name, - module.lambda_enrichment.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation_threat_signals" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.lambda_enrichment.role.name, - ] -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf b/v1/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf deleted file mode 100644 index d936193c..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf +++ /dev/null @@ -1,33 +0,0 @@ -module "lambda_autoscale" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscale.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf b/v1/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf deleted file mode 100644 index 985e794c..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "enrichment" - description = "Substation enrichment node that writes threat signals to DynamoDB." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_enrichment" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_enrichment.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf b/v1/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf deleted file mode 100644 index e7fdee3d..00000000 --- a/v1/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "transform" - description = "Substation transform node that enriches events with threat information." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_transform" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_transform.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet b/v1/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet deleted file mode 100644 index e3d453ad..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - is_process: [ - sub.cnd.str.eq({ obj: { src: 'event.category' }, value: 'process' }), - sub.cnd.str.eq({ obj: { src: 'event.type' }, value: 'start' }), - ], - kv_store: sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', sort_key: 'SK', ttl: 'TTL', value: 'cache' }, - }), -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet b/v1/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet deleted file mode 100644 index ebd3ac54..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet +++ /dev/null @@ -1,16 +0,0 @@ -// Puts process metadata into the KV store. -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - // The concurrency is set to 1 to ensure that the KV store is not updated in parallel. - concurrency: 1, - transforms: [ - // If the event is a process, then store the process metadata in the KV store - // indexed by the PID. The data is stored in the KV store for 90 days. - sub.pattern.tf.conditional( - condition=sub.cnd.all(const.is_process), - transform=sub.tf.enrich.kv_store.iset({ obj: { src: 'process.pid', trg: 'process' }, prefix: 'process', ttl_offset: std.format('%dh', 24 * 90), kv_store: const.kv_store, close_kv_store: false }), - ), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet b/v1/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet deleted file mode 100644 index 2ba9a63f..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -// All values in the KV store were put there by the `enrichment` function. -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 2, - transforms: [ - // process.* - // - // This is only applied to non-process events. - sub.pattern.tf.conditional( - condition=sub.cnd.none(const.is_process), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.pid', trg: 'process' }, prefix: 'process', kv_store: const.kv_store }), - ), - // process.parent.* - sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: 'process.parent.pid' }, value: 0 }), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.parent.pid', trg: 'process.parent' }, prefix: 'process', kv_store: const.kv_store }), - ), - // process.parent.parent.* - sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: 'process.parent.parent.pid' }, value: 0 }), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.parent.parent.pid', trg: 'process.parent.parent' }, prefix: 'process', kv_store: const.kv_store }), - ), - // Print the results. - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/data.jsonl b/v1/examples/terraform/aws/kinesis/time_travel/data.jsonl deleted file mode 100644 index c0c390c5..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/data.jsonl +++ /dev/null @@ -1,4 +0,0 @@ -{"event":{"category":"network","type":"connection"},"process":{"name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226"},"server":{"ip":"35.186.224.39","port":443},"@timestamp":"2024-03-29T04:02:38.470000Z"} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/sbin/launchd","name":"launchd","pid":"f23e8b548d2e5e1ef3e122a9c5e08a63","start":"2024-03-13T16:17:45.000000Z","parent":{"pid":"b745f7a7c3a98ac5f087be7420e6e3f9"}}} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/usr/libexec/runningboardd","name":"runningboardd","pid":"8faae8aa27f9b4faff6fd98e60201e3d","start":"2024-03-13T16:17:49.000000Z","parent":{"pid":"f23e8b548d2e5e1ef3e122a9c5e08a63"}}} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/Applications/Spotify.app/Contents/MacOS/Spotify","name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226","start":"2024-03-13T16:29:17.000000Z","parent":{"pid":"8faae8aa27f9b4faff6fd98e60201e3d"}}} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/post_deploy.sh b/v1/examples/terraform/aws/kinesis/time_travel/post_deploy.sh deleted file mode 100644 index 6de657d7..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/post_deploy.sh +++ /dev/null @@ -1,2 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation terraform/aws/kinesis/time_travel/data.jsonl --print-response diff --git a/v1/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf b/v1/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf deleted file mode 100644 index ff885f0c..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf +++ /dev/null @@ -1,79 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - shards = 1 - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.lambda_enrichment.role.name, - module.lambda_transform.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.lambda_enrichment.role.name, - module.lambda_transform.role.name, - ] -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf b/v1/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf deleted file mode 100644 index 8c4a8577..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf +++ /dev/null @@ -1,35 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf b/v1/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf deleted file mode 100644 index 5ae781b3..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "enrichment" - description = "Substation node that enriches data from Kinesis and writes it to DynamoDB" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_enrichment" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf b/v1/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf deleted file mode 100644 index 66498b17..00000000 --- a/v1/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "transform" - description = "Substation node that reads from Kinesis with a delay to support enrichment" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_transform" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_transform.arn - maximum_batching_window_in_seconds = 15 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/v1/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet b/v1/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet deleted file mode 100644 index 8b0028e2..00000000 --- a/v1/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet +++ /dev/null @@ -1,10 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // This will always fail validation because the settings are invalid. - sub.tf.object.delete( - settings={ object: { missing_key: 'abc' } } - ), - ], -} diff --git a/v1/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf b/v1/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf deleted file mode 100644 index ebeed22d..00000000 --- a/v1/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf +++ /dev/null @@ -1,47 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - lambda = module.validate - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "validate" { - source = "../../../../../../build/terraform/aws/lambda" - config = { - name = "validate" - description = "Substation configuration validator that is executed by AppConfig." - image_uri = "${module.ecr_validate.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 1 - } - - depends_on = [ - module.appconfig.name, - module.ecr_validate.url, - ] -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "ecr_validate" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "validate" - force_delete = true - } -} diff --git a/v1/examples/terraform/aws/lambda/appconfig/terraform/node.tf b/v1/examples/terraform/aws/lambda/appconfig/terraform/node.tf deleted file mode 100644 index 83ebc0c4..00000000 --- a/v1/examples/terraform/aws/lambda/appconfig/terraform/node.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - - # AppConfig is configured to validate configurations before deployment. - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that never receives a configuration." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "url" { - function_name = module.node.name - authorization_type = "NONE" -} diff --git a/v1/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet b/v1/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet deleted file mode 100644 index 80684767..00000000 --- a/v1/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Dynamically handles input from either Lambda URL or synchronous invocation. - sub.pattern.tf.conditional( - condition=sub.cnd.all([ - sub.cnd.number.length.greater_than( - settings={ object: { source_key: 'body' }, value: 0 } - ), - ]), - transform=sub.tf.object.copy( - settings={ object: { source_key: 'body' } } - ), - ), - // Performs a reverse DNS lookup on the 'addr' field if it is a public IP address. - sub.pattern.tf.conditional( - condition=sub.cnd.none(sub.pattern.cnd.network.ip.internal(key='addr')), - transform=sub.tf.enrich.dns.ip_lookup( - settings={ object: { source_key: 'addr', target_key: 'domain' } }, - ), - ), - // The DNS response is copied so that it is the only value returned in the object. - sub.tf.object.copy( - settings={ object: { source_key: 'domain' } }, - ), - sub.tf.object.copy( - settings={ object: { target_key: 'domain' } }, - ), - ], -} diff --git a/v1/examples/terraform/aws/lambda/microservice/terraform/_resources.tf b/v1/examples/terraform/aws/lambda/microservice/terraform/_resources.tf deleted file mode 100644 index 492b504b..00000000 --- a/v1/examples/terraform/aws/lambda/microservice/terraform/_resources.tf +++ /dev/null @@ -1,21 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/v1/examples/terraform/aws/lambda/microservice/terraform/microservice.tf b/v1/examples/terraform/aws/lambda/microservice/terraform/microservice.tf deleted file mode 100644 index 018b1b8f..00000000 --- a/v1/examples/terraform/aws/lambda/microservice/terraform/microservice.tf +++ /dev/null @@ -1,30 +0,0 @@ -module "microservice" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "microservice" - description = "Substation node that acts as a synchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/microservice" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "substation_microservice" { - function_name = module.microservice.name - authorization_type = "NONE" -} diff --git a/v1/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet b/v1/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet deleted file mode 100644 index c70dd57b..00000000 --- a/v1/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Get the IP address of the service and return it in response. - sub.tf.enrich.http.get(settings={ url: 'https://ipinfo.io/ip' }), - sub.tf.object.copy( - settings={ object: { target_key: 'ip' } }, - ), - ], -} diff --git a/v1/examples/terraform/aws/lambda/vpc/terraform/_resources.tf b/v1/examples/terraform/aws/lambda/vpc/terraform/_resources.tf deleted file mode 100644 index 17e80f65..00000000 --- a/v1/examples/terraform/aws/lambda/vpc/terraform/_resources.tf +++ /dev/null @@ -1,29 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# VPC shared by all Substation resources. -# -# By default, this creates a /16 VPC with private subnets -# in three availability zones in us-east-1. -module "vpc_substation" { - source = "../../../../../../build/terraform/aws/networking/vpc" -} diff --git a/v1/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf b/v1/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf deleted file mode 100644 index 822501bd..00000000 --- a/v1/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf +++ /dev/null @@ -1,36 +0,0 @@ -module "whatismyip" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "whatismyip" - description = "Substation node that acts as a synchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - - vpc_config = { - subnet_ids = module.vpc_substation.private_subnet_ids - security_group_ids = [module.vpc_substation.default_security_group_id] - } - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/whatismyip" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "substation_microservice" { - function_name = module.whatismyip.name - authorization_type = "NONE" -} diff --git a/v1/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet b/v1/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet deleted file mode 100644 index 2322accb..00000000 --- a/v1/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -// This is a placeholder that must be replaced with the bucket produced by Terraform. -local bucket = 'c034c726-70bf-c397-81bd-c9a0d9e82371-substation'; - -{ - concurrency: 1, - // All data is buffered in memory, then written in JSON Lines format to S3. - transforms: [ - sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'original', time_format: '2006/01/02', uuid: true }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - sub.tf.object.insert( - settings={ object: { target_key: 'transformed' }, value: true } - ), - sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'transformed', time_format: '2006/01/02', uuid: true }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - ], -} diff --git a/v1/examples/terraform/aws/s3/data_lake/terraform/_resources.tf b/v1/examples/terraform/aws/s3/data_lake/terraform/_resources.tf deleted file mode 100644 index af46d213..00000000 --- a/v1/examples/terraform/aws/s3/data_lake/terraform/_resources.tf +++ /dev/null @@ -1,38 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - module.lambda_node.role.name, - ] -} diff --git a/v1/examples/terraform/aws/s3/data_lake/terraform/node.tf b/v1/examples/terraform/aws/s3/data_lake/terraform/node.tf deleted file mode 100644 index 6dc872f9..00000000 --- a/v1/examples/terraform/aws/s3/data_lake/terraform/node.tf +++ /dev/null @@ -1,35 +0,0 @@ -module "lambda_gateway" { - source = "../../../../../../build/terraform/aws/api_gateway/lambda" - lambda = module.lambda_node - - config = { - name = "node_gateway" - } - - depends_on = [ - module.lambda_node - ] -} - -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that writes data to S3" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} diff --git a/v1/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet b/v1/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet deleted file mode 100644 index 4523a131..00000000 --- a/v1/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -// This config generates an error to engage the retry on failure feature. -// The pipeline will retry forever until the error is resolved. Change the -// transform to `sub.tf.send.stdout()` to resolve the error and print the logs -// from S3. -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.util.err(settings={ message: 'simulating error to trigger retries' }), - // sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet b/v1/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet deleted file mode 100644 index 8d1c04be..00000000 --- a/v1/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet +++ /dev/null @@ -1,17 +0,0 @@ -// This config transforms the failure record sent by the node Lambda function -// so that it becomes a new request. The new request bypasses S3 and is sent -// directly to the Lambda function. -// -// Additional information is available in the payload and can be used to make -// decisions about the new request or notify external systems about the failure. -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // If needed, then use other information from the failure record to - // decide what to do or notify external systems about the failure. - sub.tf.obj.cp(settings={ object: { source_key: 'requestPayload' } }), - sub.tf.send.aws.lambda(settings={ function_name: 'node' }), - ], -} diff --git a/v1/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf b/v1/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf deleted file mode 100644 index d6a5f9b7..00000000 --- a/v1/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf +++ /dev/null @@ -1,57 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - # Reads objects from the bucket. - module.lambda_node.role.name, - ] -} - -module "sqs" { - source = "../../../../../../build/terraform/aws/sqs" - - config = { - name = "substation_retries" - - # Delay for 30 seconds to allow the pipeline to recover. - delay = 30 - - # Timeout should be at least 6x the timeout of any consuming Lambda functions, plus the batch window. - # Refer to the Lambda documentation for more information: - # https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html. - timeout = 60 - } - - access = [ - # Sends messages to the queue. - module.lambda_retrier.role.name, - # Receives messages from the queue. - module.lambda_node.role.name, - ] -} diff --git a/v1/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf b/v1/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf deleted file mode 100644 index 73a66b61..00000000 --- a/v1/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf +++ /dev/null @@ -1,86 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads data from S3. The node will retry forever if it fails." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3" - "SUBSTATION_DEBUG" : true - } - } - - # The retrier Lambda must be able to invoke this - # Lambda function to retry failed S3 events. - access = [ - module.lambda_retrier.role.name, - ] -} - -resource "aws_lambda_permission" "node" { - statement_id = "AllowExecutionFromS3Bucket" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "s3.amazonaws.com" - source_arn = module.s3.arn -} - -resource "aws_s3_bucket_notification" "node" { - bucket = module.s3.id - - lambda_function { - lambda_function_arn = module.lambda_node.arn - events = ["s3:ObjectCreated:*"] - } - - depends_on = [ - aws_lambda_permission.node - ] -} - -# Configures the Lambda function to send failed events to the SQS queue. -resource "aws_lambda_function_event_invoke_config" "node" { - function_name = module.lambda_node.name - - # This example disables the built-in retry mechanism. - maximum_retry_attempts = 0 - - destination_config { - on_failure { - destination = module.sqs.arn - } - } -} - -module "lambda_retrier" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "retrier" - description = "Substation node that receives events from the retry queue and invokes the original Lambda function." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - # This value should be 1/6th of the visibility timeout of the SQS queue. - timeout = 5 - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/retrier" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SQS" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "retrier" { - event_source_arn = module.sqs.arn - function_name = module.lambda_retrier.arn - maximum_batching_window_in_seconds = 30 - batch_size = 10 -} diff --git a/v1/examples/terraform/aws/s3/sns/config/node/config.jsonnet b/v1/examples/terraform/aws/s3/sns/config/node/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/s3/sns/config/node/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/s3/sns/terraform/_resources.tf b/v1/examples/terraform/aws/s3/sns/terraform/_resources.tf deleted file mode 100644 index d1849460..00000000 --- a/v1/examples/terraform/aws/s3/sns/terraform/_resources.tf +++ /dev/null @@ -1,159 +0,0 @@ -data "aws_caller_identity" "caller" {} - -# KMS encryption key that is shared by all Substation resources. -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - config = { - name = "alias/substation" - policy = data.aws_iam_policy_document.kms.json - } -} - -# This policy is required to support encrypted SNS topics. -# More information: https://repost.aws/knowledge-center/cloudwatch-receive-sns-for-alarm-trigger -data "aws_iam_policy_document" "kms" { - # Allows CloudWatch to access encrypted SNS topic. - statement { - sid = "CloudWatch" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["cloudwatch.amazonaws.com"] - } - - resources = ["*"] - } - - # Allows S3 to access encrypted SNS topic. - statement { - sid = "S3" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["s3.amazonaws.com"] - } - - resources = ["*"] - } - - # Default key policy for KMS. - # https://docs.aws.amazon.com/kms/latest/developerguide/determining-access-key-policy.html - statement { - sid = "KMS" - effect = "Allow" - actions = [ - "kms:*", - ] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.caller.account_id}:root"] - } - - resources = ["*"] - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - kms = module.kms - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - module.lambda_node.role.name, - ] -} - -module "sns" { - source = "../../../../../../build/terraform/aws/sns" - kms = module.kms - - config = { - name = "substation" - } -} - -# Grants the S3 bucket permission to publish to the SNS topic. -resource "aws_sns_topic_policy" "s3_access" { - arn = module.sns.arn - policy = data.aws_iam_policy_document.s3_access_policy.json -} - -data "aws_iam_policy_document" "s3_access_policy" { - statement { - actions = [ - "sns:Publish", - ] - - resources = [ - module.sns.arn, - ] - - condition { - test = "ArnEquals" - variable = "aws:SourceArn" - - values = [ - module.s3.arn, - ] - } - - principals { - type = "Service" - identifiers = ["s3.amazonaws.com"] - } - - effect = "Allow" - } -} - -resource "aws_s3_bucket_notification" "sns" { - bucket = module.s3.id - - topic { - topic_arn = module.sns.arn - - events = [ - "s3:ObjectCreated:*", - ] - } -} diff --git a/v1/examples/terraform/aws/s3/sns/terraform/node.tf b/v1/examples/terraform/aws/s3/sns/terraform/node.tf deleted file mode 100644 index 46ed06f7..00000000 --- a/v1/examples/terraform/aws/s3/sns/terraform/node.tf +++ /dev/null @@ -1,37 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads data from S3 via SNS." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "node" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.lambda_node.arn -} - -resource "aws_lambda_permission" "node" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn -} diff --git a/v1/examples/terraform/aws/s3/xdr/config/node/config.jsonnet b/v1/examples/terraform/aws/s3/xdr/config/node/config.jsonnet deleted file mode 100644 index 74f8597c..00000000 --- a/v1/examples/terraform/aws/s3/xdr/config/node/config.jsonnet +++ /dev/null @@ -1,60 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -// This is a placeholder that must be replaced with the bucket produced by Terraform. -local bucket = 'substation-3e820117-61f0-2fbb-05c4-1fba0db9d82c'; -local const = import 'const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // XDR threat signals rely on the meta_switch transform to conditionally determine - // if an event matches risk criteria. If the event matches, a threat signal is created. - // This transform supports any combination of if-elif-else logic. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'root' }), - ]), - transform: const.threat_signal({ name: 'root_activity', description: 'Root user activity detected.', risk_score: 74 }), - }, - ] }), - // Complex conditions are made possible by using the meta_condition inspector. - sub.tf.meta.switch({ cases: [ - { - // This condition requires both of these statements to be true: - // - // - The `source_ip` field is a public IP address. - // - The `user_name` field contains either `root` or `admin`. - condition: sub.cnd.all([ - sub.cnd.meta.condition({ condition: sub.cnd.none( - sub.pattern.cnd.net.ip.internal(key='source_ip') - ) }), - sub.cnd.meta.condition({ condition: sub.cnd.any([ - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'root' }), - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'admin' }), - ]) }), - ]), - transform: const.threat_signal({ name: 'public_ip_root_admin_activity', description: 'Public IP root or admin user activity detected.', risk_score: 99 }), - }, - ] }), - // If the event contains a threat signal, then it's written to the XDR path - // in the S3 bucket; otherwise the event is discarded. The `auxiliary_transforms` - // field is used to format the data as a JSON Lines file. - // - // If there are no threat signals, then the event is discarded. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.num.len.gt({ obj: { src: const.threat_signals_key }, value: 0 }), - ]), - transform: sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'xdr', time_format: '2006/01/02', uuid: true, suffix: '.jsonl' }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - }, - ] }), - ], -} diff --git a/v1/examples/terraform/aws/s3/xdr/config/node/const.libsonnet b/v1/examples/terraform/aws/s3/xdr/config/node/const.libsonnet deleted file mode 100644 index 300a283a..00000000 --- a/v1/examples/terraform/aws/s3/xdr/config/node/const.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - threat_signals_key: 'threat.signals', - // threat_signal is a custom function that appends a threat signal to an - // event as enrichment metadata. - // - // An alternate approach is to compose a new threat signal event within - // the message metadata and send it as a separate event. This results in - // smaller events with less context and requires a correlation value - // (e.g., hash, ID) to link the threat signal to the original event. - threat_signal(settings): sub.tf.obj.insert({ - obj: { trg: sub.helpers.obj.append_array($.threat_signals_key) }, - value: { name: settings.name, description: settings.description, risk_score: settings.risk_score }, - }), -} diff --git a/v1/examples/terraform/aws/s3/xdr/data.jsonl b/v1/examples/terraform/aws/s3/xdr/data.jsonl deleted file mode 100644 index 8cc71d03..00000000 --- a/v1/examples/terraform/aws/s3/xdr/data.jsonl +++ /dev/null @@ -1,4 +0,0 @@ -{"user_name":"alice","source_ip":"192.168.1.2"} -{"user_name":"admin","source_ip":"192.168.1.3"} -{"user_name":"bob","source_ip":"3.1.1.2"} -{"user_name":"root","source_ip":"3.1.1.3"} diff --git a/v1/examples/terraform/aws/s3/xdr/stdout.jsonl b/v1/examples/terraform/aws/s3/xdr/stdout.jsonl deleted file mode 100644 index 85d9f34e..00000000 --- a/v1/examples/terraform/aws/s3/xdr/stdout.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"user_name":"root","source_ip":"192.168.1.3","threat":{"signals":[{"description":"Root user activity detected.","name":"root_activity","risk_score":74}]}} -{"user_name":"admin","source_ip":"3.1.1.3","threat":{"signals":[{"description":"Public IP root or admin user activity detected.","name":"public_ip_root_admin_activity","risk_score":99}]}} diff --git a/v1/examples/terraform/aws/s3/xdr/terraform/_resources.tf b/v1/examples/terraform/aws/s3/xdr/terraform/_resources.tf deleted file mode 100644 index a5c87d5f..00000000 --- a/v1/examples/terraform/aws/s3/xdr/terraform/_resources.tf +++ /dev/null @@ -1,50 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "substation-${random_uuid.s3.result}" - } - - access = [ - module.lambda_node.role.name, - ] -} - -resource "aws_s3_bucket_notification" "bucket_notification" { - bucket = module.s3.id - - lambda_function { - lambda_function_arn = module.lambda_node.arn - events = ["s3:ObjectCreated:*"] - filter_prefix = "data/" - } - - depends_on = [aws_lambda_permission.allow_bucket] -} diff --git a/v1/examples/terraform/aws/s3/xdr/terraform/node.tf b/v1/examples/terraform/aws/s3/xdr/terraform/node.tf deleted file mode 100644 index acef20f6..00000000 --- a/v1/examples/terraform/aws/s3/xdr/terraform/node.tf +++ /dev/null @@ -1,30 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads and writes data to S3." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_permission" "allow_bucket" { - statement_id = "AllowExecutionFromS3Bucket" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "s3.amazonaws.com" - source_arn = module.s3.arn -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet b/v1/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet deleted file mode 100644 index 89507de6..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.aws.sns( - // This is a placeholder that must be replaced with the SNS ARN produced by Terraform. - settings={ arn: 'arn:aws:sns:us-east-1:123456789012:substation' }, - ), - ], -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet b/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet b/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet b/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf b/v1/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf deleted file mode 100644 index 5ffa1a86..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf +++ /dev/null @@ -1,35 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "sns" { - source = "../../../../../../build/terraform/aws/sns" - - config = { - name = "substation" - } - - access = [ - module.subscriber_x.role.name, - module.subscriber_y.role.name, - module.subscriber_z.role.name, - ] -} diff --git a/v1/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf b/v1/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf deleted file mode 100644 index 33ed4d74..00000000 --- a/v1/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf +++ /dev/null @@ -1,121 +0,0 @@ -module "subscriber_x" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_x" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_x" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_x" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_x.arn -} - -resource "aws_lambda_permission" "subscriber_x" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_x.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_x.name - ] -} - -module "subscriber_y" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_y" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_y" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_y" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_y.arn -} - -resource "aws_lambda_permission" "subscriber_y" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_y.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_y.name - ] -} - - -module "subscriber_z" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_z" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_z" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_z" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_z.arn -} - -resource "aws_lambda_permission" "subscriber_z" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_z.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_z.name - ] -} diff --git a/v1/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet b/v1/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet deleted file mode 100644 index 37157bb6..00000000 --- a/v1/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet +++ /dev/null @@ -1,25 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Dynamically handles input from either Lambda URL or synchronous invocation. - sub.pattern.transform.conditional( - condition=sub.condition.all([ - sub.condition.number.length.greater_than( - settings={ object: { source_key: 'body' }, value: 0 } - ), - ]), - transform=sub.transform.object.copy( - settings={ object: { source_key: 'body' } } - ), - ), - // This UUID is used by the client to retrieve the processed result from DynamoDB. - sub.transform.string.uuid( - settings={ object: { target_key: 'uuid' } }, - ), - sub.transform.send.aws.sqs( - // This is a placeholder that must be replaced with the SQS ARN produced by Terraform. - settings={ arn: 'arn:aws:sqs:us-east-1:123456789012:substation' }, - ), - ], -} diff --git a/v1/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet b/v1/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet deleted file mode 100644 index bc3ea243..00000000 --- a/v1/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet +++ /dev/null @@ -1,29 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Remove any events that do not have a 'uuid' field. - sub.pattern.transform.conditional( - condition=sub.condition.all(sub.pattern.condition.number.length.eq_zero(key='uuid')), - transform=sub.transform.utility.drop(), - ), - // Performs a reverse DNS lookup on the 'addr' field if it is a public IP address. - sub.pattern.transform.conditional( - condition=sub.condition.none(sub.pattern.condition.network.ip.internal(key='addr')), - transform=sub.transform.enrich.dns.ip_lookup( - settings={ object: { source_key: 'addr', target_key: 'domain' } }, - ), - ), - // The uuid field is used as the partition key for the DynamoDB table. - sub.transform.object.copy( - settings={ object: { source_key: 'uuid', target_key: 'PK' } } - ), - sub.transform.object.delete( - settings={ object: { source_key: 'uuid' } } - ), - sub.transform.send.aws.dynamodb( - settings={ table_name: 'substation' } - ), - ], -} diff --git a/v1/examples/terraform/aws/sqs/microservice/terraform/_resources.tf b/v1/examples/terraform/aws/sqs/microservice/terraform/_resources.tf deleted file mode 100644 index 9a00233b..00000000 --- a/v1/examples/terraform/aws/sqs/microservice/terraform/_resources.tf +++ /dev/null @@ -1,56 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "sqs" { - source = "../../../../../../build/terraform/aws/sqs" - - config = { - name = "substation" - } - - access = [ - # Reads from SQS. - module.microservice.role.name, - # Writes to SQS. - module.frontend.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - # Writes to DynamoDB. - module.microservice.role.name, - ] -} diff --git a/v1/examples/terraform/aws/sqs/microservice/terraform/frontend.tf b/v1/examples/terraform/aws/sqs/microservice/terraform/frontend.tf deleted file mode 100644 index 924c7e6c..00000000 --- a/v1/examples/terraform/aws/sqs/microservice/terraform/frontend.tf +++ /dev/null @@ -1,26 +0,0 @@ -module "frontend" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "frontend" - description = "Substation node that acts as a frontend to an asynchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/frontend" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "frontend" { - function_name = module.frontend.name - authorization_type = "NONE" -} diff --git a/v1/examples/terraform/aws/sqs/microservice/terraform/microservice.tf b/v1/examples/terraform/aws/sqs/microservice/terraform/microservice.tf deleted file mode 100644 index 327575dd..00000000 --- a/v1/examples/terraform/aws/sqs/microservice/terraform/microservice.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "microservice" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "microservice" - description = "Substation node that acts as an asynchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/microservice" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SQS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "microservice" { - event_source_arn = module.sqs.arn - function_name = module.microservice.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 -} diff --git a/v1/go.mod b/v1/go.mod deleted file mode 100644 index 38563679..00000000 --- a/v1/go.mod +++ /dev/null @@ -1,56 +0,0 @@ -module github.com/brexhq/substation - -go 1.22 - -require ( - github.com/aws/aws-lambda-go v1.47.0 - github.com/aws/aws-sdk-go v1.54.8 - github.com/aws/aws-sdk-go-v2 v1.30.3 - github.com/aws/aws-sdk-go-v2/config v1.27.26 - github.com/aws/aws-sdk-go-v2/credentials v1.17.26 - github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 - github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 - github.com/aws/aws-xray-sdk-go v1.8.4 - github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486 - github.com/golang/protobuf v1.5.4 - github.com/google/uuid v1.6.0 - github.com/hashicorp/go-retryablehttp v0.7.7 - github.com/iancoleman/strcase v0.3.0 - github.com/itchyny/gojq v0.12.16 - github.com/klauspost/compress v1.17.9 - github.com/oschwald/maxminddb-golang v1.13.0 - github.com/sirupsen/logrus v1.9.3 - github.com/tidwall/gjson v1.17.1 - github.com/tidwall/sjson v1.2.5 - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.7.0 -) - -require ( - github.com/andybalholm/brotli v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect - github.com/aws/smithy-go v1.20.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/itchyny/timefmt-go v0.1.6 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.1 // indirect - github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasthttp v1.55.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.1 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect -) diff --git a/v1/go.sum b/v1/go.sum deleted file mode 100644 index 88d0cea9..00000000 --- a/v1/go.sum +++ /dev/null @@ -1,139 +0,0 @@ -github.com/DATA-DOG/go-sqlmock v1.5.1 h1:FK6RCIUSfmbnI/imIICmboyQBkOckutaa6R5YYlLZyo= -github.com/DATA-DOG/go-sqlmock v1.5.1/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= -github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= -github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= -github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= -github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.54.8 h1:+soIjaRsuXfEJ9ts9poJD2fIIzSSRwfx+T69DrTtL2M= -github.com/aws/aws-sdk-go v1.54.8/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= -github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= -github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= -github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= -github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2 h1:OsggywXCk9iFKdu2Aopg3e1oJITIuyW36hA/B0rqupE= -github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2/go.mod h1:ZnAMilx42P7DgIrdjlWCkNIGSBLzeyk6T31uB8oGTwY= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= -github.com/aws/aws-xray-sdk-go v1.8.4 h1:5D631fWhs5hdBFW/8ALjWam+alm4tW42UGAuMJ1WAUI= -github.com/aws/aws-xray-sdk-go v1.8.4/go.mod h1:mbN1uxWCue9WjS2Oj2FWg7TGIsLikxMOscD0qtEjFFY= -github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= -github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486 h1:fBy4wQzC3T5S6F1o1uTYeR8WF1MIL7GSsPYjzabOwtA= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486/go.mod h1:CQGhQ8Rf1WF5Ke8XuUjcd4PRb+mFTjzKR/pm3EWKaQw= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= -github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g= -github.com/itchyny/gojq v0.12.16/go.mod h1:6abHbdC2uB9ogMS38XsErnfqJ94UlngIJGlRAIj4jTM= -github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= -github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU= -github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= -github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= -github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= -github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/v1/internal/aws/appconfig/appconfig.go b/v1/internal/aws/appconfig/appconfig.go deleted file mode 100644 index 78a3d830..00000000 --- a/v1/internal/aws/appconfig/appconfig.go +++ /dev/null @@ -1,44 +0,0 @@ -// package appconfig provides functions for interacting with AWS AppConfig. -package appconfig - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/brexhq/substation/internal/http" -) - -// errMissingPrefetchEnvVar is returned when a Lambda is deployed without a configured AppConfig URL. -var errMissingPrefetchEnvVar = fmt.Errorf("missing AWS_APPCONFIG_EXTENSION_PREFETCH_LIST environment variable") - -var client http.HTTP - -// GetPrefetch queries and returns the Lambda's prefetched AppConfig configuration. -func GetPrefetch(ctx context.Context, dst io.Writer) error { - if !client.IsEnabled() { - client.Setup() - } - - env := "AWS_APPCONFIG_EXTENSION_PREFETCH_LIST" - url, found := os.LookupEnv(env) - if !found { - return fmt.Errorf("appconfig getprefetch: %v", errMissingPrefetchEnvVar) - } - - local := "http://localhost:2772" + url - - ctx = context.WithoutCancel(ctx) - resp, err := client.Get(ctx, local) - if err != nil { - return fmt.Errorf("appconfig getprefetch URL %s: %v", local, err) - } - defer resp.Body.Close() - - if _, err := io.Copy(dst, resp.Body); err != nil { - return fmt.Errorf("appconfig getprefetch: %v", err) - } - - return nil -} diff --git a/v1/internal/aws/cloudwatch/cloudwatch.go b/v1/internal/aws/cloudwatch/cloudwatch.go deleted file mode 100644 index c1e0a6d9..00000000 --- a/v1/internal/aws/cloudwatch/cloudwatch.go +++ /dev/null @@ -1,305 +0,0 @@ -package cloudwatch - -import ( - "fmt" - "os" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -const ( - // This is the period in seconds that the AWS Kinesis CloudWatch alarms - // will evaluate the metrics over. - kinesisMetricsPeriod = 60 -) - -var ( - // By default, AWS Kinesis streams must be below the lower threshold for - // 100% of the evaluation period (60 minutes) to scale down. This value can - // be overridden by the environment variable AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS. - kinesisDownscaleDatapoints = 60 - // By default, AWS Kinesis streams must be above the upper threshold for - // 100% of the evaluation period (5 minutes) to scale up. This value can - // be overridden by the environment variable AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS. - kinesisUpscaleDatapoints = 5 - // By default, AWS Kinesis streams will scale up if the incoming records and bytes - // are above 70% of the threshold. This value can be overridden by the environment - // variable AUTOSCALE_KINESIS_THRESHOLD, but it cannot be less than 40% or greater - // than 90%. - kinesisThreshold = 0.7 -) - -func init() { - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS"); found { - dps, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - kinesisDownscaleDatapoints = dps - } - - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS"); found { - dps, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - kinesisUpscaleDatapoints = dps - } - - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_THRESHOLD"); found { - threshold, err := strconv.ParseFloat(v, 64) - if err != nil { - panic(err) - } - - if threshold >= 0.4 && threshold <= 0.9 { - kinesisThreshold = threshold - } - } -} - -// New returns a configured CloudWatch client. -func New(cfg iaws.Config) *cloudwatch.CloudWatch { - conf, sess := iaws.New(cfg) - - c := cloudwatch.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the CloudWatch API interface. -type API struct { - Client cloudwatchiface.CloudWatchAPI -} - -// Setup creates a new CloudWatch client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// UpdateKinesisDownscaleAlarm updates CloudWatch alarms that manage the scale down tracking for Kinesis streams. -func (a *API) UpdateKinesisDownscaleAlarm(ctx aws.Context, name, stream, topic string, shards int64) error { - downscaleThreshold := kinesisThreshold - 0.35 - - if _, err := a.Client.PutMetricAlarmWithContext( - ctx, - &cloudwatch.PutMetricAlarmInput{ - AlarmName: aws.String(name), - AlarmDescription: aws.String(stream), - ActionsEnabled: aws.Bool(true), - AlarmActions: []*string{aws.String(topic)}, - EvaluationPeriods: aws.Int64(int64(kinesisDownscaleDatapoints)), - DatapointsToAlarm: aws.Int64(int64(kinesisDownscaleDatapoints)), - Threshold: aws.Float64(downscaleThreshold), - ComparisonOperator: aws.String("LessThanOrEqualToThreshold"), - TreatMissingData: aws.String("ignore"), - Metrics: []*cloudwatch.MetricDataQuery{ - { - Id: aws.String("m1"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingRecords"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("m2"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingBytes"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e1"), - Expression: aws.String("FILL(m1,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e2"), - Expression: aws.String("FILL(m2,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e3"), - Expression: aws.String( - fmt.Sprintf("e1/(1000*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingRecordsPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e4"), - Expression: aws.String( - fmt.Sprintf("e2/(1048576*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingBytesPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e5"), - Expression: aws.String("MAX([e3,e4])"), - Label: aws.String("IncomingMax"), - ReturnData: aws.Bool(true), - }, - }, - }); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - if err := a.UpdateKinesisAlarmState(ctx, name, "Threshold value updated"); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - return nil -} - -// UpdateKinesisUpscaleAlarm updates CloudWatch alarms that manage the scale up tracking for Kinesis streams. -func (a *API) UpdateKinesisUpscaleAlarm(ctx aws.Context, name, stream, topic string, shards int64) error { - upscaleThreshold := kinesisThreshold - - if _, err := a.Client.PutMetricAlarmWithContext( - ctx, - &cloudwatch.PutMetricAlarmInput{ - AlarmName: aws.String(name), - AlarmDescription: aws.String(stream), - ActionsEnabled: aws.Bool(true), - AlarmActions: []*string{aws.String(topic)}, - EvaluationPeriods: aws.Int64(int64(kinesisUpscaleDatapoints)), - DatapointsToAlarm: aws.Int64(int64(kinesisUpscaleDatapoints)), - Threshold: aws.Float64(upscaleThreshold), - ComparisonOperator: aws.String("GreaterThanOrEqualToThreshold"), - TreatMissingData: aws.String("ignore"), - Metrics: []*cloudwatch.MetricDataQuery{ - { - Id: aws.String("m1"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingRecords"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("m2"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingBytes"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e1"), - Expression: aws.String("FILL(m1,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e2"), - Expression: aws.String("FILL(m2,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e3"), - Expression: aws.String( - fmt.Sprintf("e1/(1000*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingRecordsPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e4"), - Expression: aws.String( - fmt.Sprintf("e2/(1048576*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingBytesPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e5"), - Expression: aws.String("MAX([e3,e4])"), - Label: aws.String("IncomingMax"), - ReturnData: aws.Bool(true), - }, - }, - }); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - if err := a.UpdateKinesisAlarmState(ctx, name, "Threshold value updated"); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - return nil -} - -func (a *API) UpdateKinesisAlarmState(ctx aws.Context, name, reason string) error { - _, err := a.Client.SetAlarmStateWithContext(ctx, - &cloudwatch.SetAlarmStateInput{ - AlarmName: aws.String(name), - StateValue: aws.String("INSUFFICIENT_DATA"), - StateReason: aws.String(reason), - }) - return err -} diff --git a/v1/internal/aws/config.go b/v1/internal/aws/config.go deleted file mode 100644 index f593918e..00000000 --- a/v1/internal/aws/config.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "os" - "regexp" - "strconv" - - "github.com/brexhq/substation/internal/config" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" -) - -type Config struct { - Region string `json:"region"` - RoleARN string `json:"role_arn"` - MaxRetries int `json:"max_retries"` - RetryableErrors []string `json:"retryable_errors"` -} - -// New returns a new AWS configuration and session. -func New(cfg Config) (*aws.Config, *session.Session) { - conf := aws.NewConfig() - - if cfg.Region != "" { - conf = conf.WithRegion(cfg.Region) - } else if v, ok := os.LookupEnv("AWS_REGION"); ok { - conf = conf.WithRegion(v) - } else if v, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { - conf = conf.WithRegion(v) - } - - retryer := NewRetryer(config.Retry{ - Count: cfg.MaxRetries, - ErrorMessages: cfg.RetryableErrors, - }) - - // Configurations take precedence over environment variables. - if cfg.MaxRetries != 0 { - goto RETRYER - } - - if v, ok := os.LookupEnv("AWS_MAX_ATTEMPTS"); ok { - max, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - retryer.SetMaxRetries(max) - } - -RETRYER: - conf.Retryer = retryer - sess := session.Must(session.NewSession()) - if cfg.RoleARN != "" { - conf = conf.WithCredentials(stscreds.NewCredentials(sess, cfg.RoleARN)) - } - - return conf, sess -} - -// NewDefault returns a new AWS configuration and session with default values. -func NewDefault() (*aws.Config, *session.Session) { - return New(Config{}) -} - -func NewRetryer(cfg config.Retry) customRetryer { - errMsg := make([]*regexp.Regexp, len(cfg.ErrorMessages)) - for i, err := range cfg.ErrorMessages { - errMsg[i] = regexp.MustCompile(err) - } - - return customRetryer{ - DefaultRetryer: client.DefaultRetryer{ - NumMaxRetries: cfg.Count, - }, - errorMessages: errMsg, - } -} - -type customRetryer struct { - client.DefaultRetryer - - // errorMessages are regular expressions that are used to match error messages. - errorMessages []*regexp.Regexp -} - -func (r customRetryer) SetMaxRetries(max int) { - r.NumMaxRetries = max -} - -// ShouldRetry retries if any of the configured error strings are found in the request error. -func (r customRetryer) ShouldRetry(req *request.Request) bool { - for _, err := range r.errorMessages { - if err.MatchString(req.Error.Error()) { - return true - } - } - - // Fallback to the default retryer. - return r.DefaultRetryer.ShouldRetry(req) -} diff --git a/v1/internal/aws/dynamodb/dynamodb.go b/v1/internal/aws/dynamodb/dynamodb.go deleted file mode 100644 index 9b607628..00000000 --- a/v1/internal/aws/dynamodb/dynamodb.go +++ /dev/null @@ -1,278 +0,0 @@ -package dynamodb - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured DynamoDB client. -func New(cfg iaws.Config) *dynamodb.DynamoDB { - conf, sess := iaws.New(cfg) - - c := dynamodb.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the DynamoDB API interface. -type API struct { - Client dynamodbiface.DynamoDBAPI -} - -// Setup creates a new DynamoDB client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -func (a *API) DeleteItem(ctx aws.Context, table string, key map[string]*dynamodb.AttributeValue) (resp *dynamodb.DeleteItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.DeleteItemWithContext( - ctx, - &dynamodb.DeleteItemInput{ - TableName: aws.String(table), - Key: key, - }, - ) - if err != nil { - return nil, fmt.Errorf("deleteitem table %s: %v", table, err) - } - - return resp, nil -} - -// BatchPutItem is a convenience wrapper for putting multiple items into a DynamoDB table. -func (a *API) BatchPutItem(ctx aws.Context, table string, items []map[string]*dynamodb.AttributeValue) (resp *dynamodb.BatchWriteItemOutput, err error) { - var requests []*dynamodb.WriteRequest - for _, item := range items { - requests = append(requests, &dynamodb.WriteRequest{ - PutRequest: &dynamodb.PutRequest{ - Item: item, - }, - }) - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.BatchWriteItemWithContext( - ctx, - &dynamodb.BatchWriteItemInput{ - RequestItems: map[string][]*dynamodb.WriteRequest{ - table: requests, - }, - }, - ) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case dynamodb.ErrCodeProvisionedThroughputExceededException: - var retry []map[string]*dynamodb.AttributeValue - - for _, item := range resp.UnprocessedItems[table] { - retry = append(retry, item.PutRequest.Item) - } - - if len(retry) > 0 { - return a.BatchPutItem(ctx, table, retry) - } - - fallthrough - default: - return nil, fmt.Errorf("batch_put_item: table %s: %v", table, err) - } - } - } - - return resp, nil -} - -// UpdateItem -func (a *API) UpdateItem(ctx aws.Context, input *dynamodb.UpdateItemInput) (resp *dynamodb.UpdateItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - return a.Client.UpdateItemWithContext(ctx, input) -} - -// PutItem is a convenience wrapper for putting items into a DynamoDB table. -func (a *API) PutItem(ctx aws.Context, table string, item map[string]*dynamodb.AttributeValue) (resp *dynamodb.PutItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.PutItemWithContext( - ctx, - &dynamodb.PutItemInput{ - TableName: aws.String(table), - Item: item, - }) - if err != nil { - return nil, fmt.Errorf("putitem table %s: %v", table, err) - } - - return resp, nil -} - -func (a *API) PutItemWithCondition(ctx aws.Context, table string, item map[string]*dynamodb.AttributeValue, conditionExpression string, expressionAttributeNames map[string]*string, expressionAttributeValues map[string]*dynamodb.AttributeValue) (resp *dynamodb.PutItemOutput, err error) { - input := &dynamodb.PutItemInput{ - TableName: aws.String(table), - ConditionExpression: aws.String(conditionExpression), - ExpressionAttributeNames: expressionAttributeNames, - Item: item, - ExpressionAttributeValues: expressionAttributeValues, - ReturnValues: aws.String("ALL_OLD"), - } - - resp, err = a.Client.PutItemWithContext(ctx, input) - if err != nil { - return resp, err - } - - return resp, nil -} - -/* -Query is a convenience wrapper for querying a DynamoDB table. The paritition and sort keys are always referenced in the key condition expression as ":PK" and ":SK". Refer to the DynamoDB documentation for the Query operation's request syntax and key condition expression patterns: - -- https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#API_Query_RequestSyntax - -- https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.KeyConditionExpressions -*/ -func (a *API) Query(ctx aws.Context, table, partitionKey, sortKey, keyConditionExpression string, limit int64, scanIndexForward bool) (resp *dynamodb.QueryOutput, err error) { - expression := make(map[string]*dynamodb.AttributeValue) - expression[":PK"] = &dynamodb.AttributeValue{ - S: aws.String(partitionKey), - } - - if sortKey != "" { - expression[":SK"] = &dynamodb.AttributeValue{ - S: aws.String(sortKey), - } - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.QueryWithContext( - ctx, - &dynamodb.QueryInput{ - TableName: aws.String(table), - KeyConditionExpression: aws.String(keyConditionExpression), - ExpressionAttributeValues: expression, - Limit: aws.Int64(limit), - ScanIndexForward: aws.Bool(scanIndexForward), - }) - if err != nil { - return nil, fmt.Errorf("query: table %s key_condition_expression %s: %v", table, keyConditionExpression, err) - } - - return resp, nil -} - -// GetItem is a convenience wrapper for getting items into a DynamoDB table. -func (a *API) GetItem(ctx aws.Context, table string, attributes map[string]interface{}, consistentRead bool) (resp *dynamodb.GetItemOutput, err error) { - attr, err := dynamodbattribute.MarshalMap(attributes) - if err != nil { - return nil, fmt.Errorf("get_item: table %s: %v", table, err) - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.GetItemWithContext( - ctx, - &dynamodb.GetItemInput{ - TableName: aws.String(table), - Key: attr, - ConsistentRead: aws.Bool(consistentRead), - }, - ) - if err != nil { - return nil, fmt.Errorf("get_item: table %s: %v", table, err) - } - - return resp, nil -} - -// ConvertEventsAttributeValue converts events.DynamoDBAttributeValue to dynamodb.AttributeValue. -func ConvertEventsAttributeValue(v events.DynamoDBAttributeValue) *dynamodb.AttributeValue { - switch v.DataType() { - case events.DataTypeBinary: - return &dynamodb.AttributeValue{ - B: v.Binary(), - } - case events.DataTypeBinarySet: - return &dynamodb.AttributeValue{ - BS: v.BinarySet(), - } - case events.DataTypeNumber: - return &dynamodb.AttributeValue{ - N: aws.String(v.Number()), - } - case events.DataTypeNumberSet: - av := &dynamodb.AttributeValue{} - - for _, n := range v.NumberSet() { - av.NS = append(av.NS, aws.String(n)) - } - - return av - case events.DataTypeString: - return &dynamodb.AttributeValue{ - S: aws.String(v.String()), - } - case events.DataTypeStringSet: - av := &dynamodb.AttributeValue{} - - for _, s := range v.StringSet() { - av.SS = append(av.SS, aws.String(s)) - } - - return av - case events.DataTypeList: - av := &dynamodb.AttributeValue{} - - for _, v := range v.List() { - av.L = append(av.L, ConvertEventsAttributeValue(v)) - } - - return av - case events.DataTypeMap: - av := &dynamodb.AttributeValue{} - av.M = make(map[string]*dynamodb.AttributeValue) - - for k, v := range v.Map() { - av.M[k] = ConvertEventsAttributeValue(v) - } - - return av - case events.DataTypeNull: - return &dynamodb.AttributeValue{ - NULL: aws.Bool(true), - } - case events.DataTypeBoolean: - return &dynamodb.AttributeValue{ - BOOL: aws.Bool(v.Boolean()), - } - default: - return nil - } -} - -// ConvertEventsAttributeValueMap converts a map of events.DynamoDBAttributeValue to a map of dynamodb.AttributeValue. -func ConvertEventsAttributeValueMap(m map[string]events.DynamoDBAttributeValue) map[string]*dynamodb.AttributeValue { - av := make(map[string]*dynamodb.AttributeValue) - - for k, v := range m { - av[k] = ConvertEventsAttributeValue(v) - } - - return av -} diff --git a/v1/internal/aws/firehose/firehose.go b/v1/internal/aws/firehose/firehose.go deleted file mode 100644 index 8d0125f0..00000000 --- a/v1/internal/aws/firehose/firehose.go +++ /dev/null @@ -1,97 +0,0 @@ -package firehose - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New creates a new session for Kinesis Firehose -func New(cfg iaws.Config) *firehose.Firehose { - conf, sess := iaws.New(cfg) - - c := firehose.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps a Kinesis Firehose client interface -type API struct { - Client firehoseiface.FirehoseAPI -} - -// IsEnabled checks whether a new client has been set -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates a Kinesis Firehose client -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// PutRecord is a convenience wrapper for putting a record into a Kinesis Firehose stream. -func (a *API) PutRecord(ctx aws.Context, data []byte, stream string) (*firehose.PutRecordOutput, error) { - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PutRecordWithContext( - ctx, - &firehose.PutRecordInput{ - DeliveryStreamName: aws.String(stream), - Record: &firehose.Record{Data: data}, - }) - if err != nil { - return nil, fmt.Errorf("putrecord stream %s: %v", stream, err) - } - - return resp, nil -} - -// PutRecordBatch is a convenience wrapper for putting multiple records into a Kinesis Firehose stream. This function becomes recursive for any records that failed the PutRecord operation. -func (a *API) PutRecordBatch(ctx aws.Context, stream string, data [][]byte) (*firehose.PutRecordBatchOutput, error) { - var records []*firehose.Record - for _, d := range data { - records = append(records, &firehose.Record{Data: d}) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PutRecordBatchWithContext( - ctx, - &firehose.PutRecordBatchInput{ - DeliveryStreamName: aws.String(stream), - Records: records, - }, - ) - - // failed records are identified by the existence of an error code. - // if an error code exists, then data is stored in a new slice and - // recursively input into the function. - if resp.FailedPutCount != aws.Int64(0) { - var retry [][]byte - for idx, r := range resp.RequestResponses { - if r.ErrorCode == nil { - continue - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.PutRecordBatch(ctx, stream, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("putrecordbatch stream %s: %v", stream, err) - } - - return resp, nil -} diff --git a/v1/internal/aws/kinesis/kinesis.go b/v1/internal/aws/kinesis/kinesis.go deleted file mode 100644 index a402e12a..00000000 --- a/v1/internal/aws/kinesis/kinesis.go +++ /dev/null @@ -1,344 +0,0 @@ -package kinesis - -import ( - "context" - "crypto/md5" - "fmt" - "os" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "github.com/aws/aws-xray-sdk-go/xray" - rec "github.com/awslabs/kinesis-aggregation/go/records" - iaws "github.com/brexhq/substation/internal/aws" - - //nolint: staticcheck // not ready to switch package - "github.com/golang/protobuf/proto" -) - -// Aggregate produces a KPL-compliant Kinesis record -type Aggregate struct { - Record *rec.AggregatedRecord - Count int - Size int - PartitionKey string -} - -// New creates a new Kinesis record with default values -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L167 -func (a *Aggregate) New() { - a.Record = &rec.AggregatedRecord{} - a.Count = 0 - a.Size = 0 - - a.PartitionKey = "" - a.Record.PartitionKeyTable = make([]string, 0) -} - -func varIntSize(i int) int { - if i == 0 { - return 1 - } - - var needed int - for i > 0 { - needed++ - i >>= 1 - } - - bytes := needed / 7 - if needed%7 > 0 { - bytes++ - } - - return bytes -} - -func (a *Aggregate) calculateRecordSize(data []byte, partitionKey string) int { - var recordSize int - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L344-L349 - pkSize := 1 + varIntSize(len(partitionKey)) + len(partitionKey) - recordSize += pkSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L362-L364 - pkiSize := 1 + varIntSize(a.Count) - recordSize += pkiSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L371-L374 - dataSize := 1 + varIntSize(len(data)) + len(data) - recordSize += dataSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L376-L378 - recordSize = recordSize + 1 + varIntSize(pkiSize+dataSize) - - // input record size + current aggregated record size + 4 byte magic header + 16 byte MD5 digest - return recordSize + a.Record.XXX_Size() + 20 -} - -// Add inserts a Kinesis record into an aggregated Kinesis record -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L382 -func (a *Aggregate) Add(data []byte, partitionKey string) bool { - // https://docs.aws.amazon.com/streams/latest/dev/key-concepts.html#partition-key - if len(partitionKey) > 256 { - partitionKey = partitionKey[0:256] - } - - // grab the first parition key in the set of events - if a.PartitionKey == "" { - a.PartitionKey = partitionKey - } - - // Verify the record size won't exceed the 1 MB limit of the Kinesis service. - // https://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html - if a.calculateRecordSize(data, partitionKey) > 1024*1024 { - return false - } - - pki := uint64(a.Count) - r := &rec.Record{ - PartitionKeyIndex: &pki, - Data: data, - } - - // Append the data to the aggregated record. - a.Record.Records = append(a.Record.Records, r) - a.Record.PartitionKeyTable = append(a.Record.PartitionKeyTable, partitionKey) - - // Update the record count and size. This is not used in the aggregated record. - a.Count++ - a.Size += a.calculateRecordSize(data, partitionKey) - - return true -} - -// Get returns a KPL-compliant compressed Kinesis record -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L293 -func (a *Aggregate) Get() []byte { - data, _ := proto.Marshal(a.Record) - md5Hash := md5.Sum(data) - - record := []byte("\xf3\x89\x9a\xc2") - record = append(record, data...) - record = append(record, md5Hash[:]...) - - return record -} - -// ConvertEventsRecords converts Kinesis records between the Lambda and Go SDK packages. This is required for deaggregating Kinesis records processed by AWS Lambda. -func ConvertEventsRecords(records []events.KinesisEventRecord) []*kinesis.Record { - output := make([]*kinesis.Record, 0) - - for _, r := range records { - // ApproximateArrivalTimestamp is events.SecondsEpochTime which serializes time.Time - ts := r.Kinesis.ApproximateArrivalTimestamp.UTC() - output = append(output, &kinesis.Record{ - ApproximateArrivalTimestamp: &ts, - Data: r.Kinesis.Data, - EncryptionType: &r.Kinesis.EncryptionType, - PartitionKey: &r.Kinesis.PartitionKey, - SequenceNumber: &r.Kinesis.SequenceNumber, - }) - } - - return output -} - -// New returns a configured Kinesis client. -func New(cfg iaws.Config) *kinesis.Kinesis { - conf, sess := iaws.New(cfg) - - c := kinesis.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Kinesis API interface. -type API struct { - Client kinesisiface.KinesisAPI -} - -// Setup creates a new Kinesis client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// ListShards wraps the ListShardsWithContext API. -func (a *API) ListShards(ctx aws.Context, stream string) (*kinesis.ListShardsOutput, error) { - return a.Client.ListShardsWithContext(ctx, &kinesis.ListShardsInput{ - StreamName: aws.String(stream), - }) -} - -// GetShardIterator wraps the GetShardIteratorWithContext API. -func (a *API) GetShardIterator(ctx aws.Context, stream, shard, iteratorType string) (*kinesis.GetShardIteratorOutput, error) { - return a.Client.GetShardIteratorWithContext(ctx, &kinesis.GetShardIteratorInput{ - ShardId: aws.String(shard), - ShardIteratorType: aws.String(iteratorType), - StreamName: aws.String(stream), - }) -} - -// GetRecords wraps the GetRecordsWithContext API. -func (a *API) GetRecords(ctx aws.Context, iterator string) (*kinesis.GetRecordsOutput, error) { - return a.Client.GetRecordsWithContext(ctx, &kinesis.GetRecordsInput{ - ShardIterator: aws.String(iterator), - }) -} - -// PutRecords is a convenience wrapper for putting multiple records into a Kinesis stream. -func (a *API) PutRecords(ctx aws.Context, stream, partitionKey string, data [][]byte) (*kinesis.PutRecordsOutput, error) { - var records []*kinesis.PutRecordsRequestEntry - - ctx = context.WithoutCancel(ctx) - for _, d := range data { - records = append(records, &kinesis.PutRecordsRequestEntry{ - Data: d, - PartitionKey: aws.String(partitionKey), - }) - } - - resp, err := a.Client.PutRecordsWithContext( - ctx, - &kinesis.PutRecordsInput{ - Records: records, - StreamName: aws.String(stream), - }, - ) - - // If any record fails, then the record is recursively retried. - if resp.FailedRecordCount != nil && *resp.FailedRecordCount > 0 { - var retry [][]byte - - for idx, r := range resp.Records { - if r.ErrorCode != nil { - retry = append(retry, data[idx]) - } - } - - if len(retry) > 0 { - return a.PutRecords(ctx, stream, partitionKey, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("put_records: stream %s: %v", stream, err) - } - - return resp, nil -} - -// ActiveShards returns the number of in-use shards for a Kinesis stream. -func (a *API) ActiveShards(ctx aws.Context, stream string) (int64, error) { - var shards int64 - params := &kinesis.ListShardsInput{ - StreamName: aws.String(stream), - } - -LOOP: - for { - output, err := a.Client.ListShardsWithContext(ctx, params) - if err != nil { - return 0, fmt.Errorf("listshards stream %s: %v", stream, err) - } - - for _, s := range output.Shards { - if end := s.SequenceNumberRange.EndingSequenceNumber; end == nil { - shards++ - } - } - - if output.NextToken != nil { - params = &kinesis.ListShardsInput{ - NextToken: output.NextToken, - } - } else { - break LOOP - } - } - - return shards, nil -} - -// UpdateShards uniformly updates a Kinesis stream's shard count and returns when the update is complete. -func (a *API) UpdateShards(ctx aws.Context, stream string, shards int64) error { - params := &kinesis.UpdateShardCountInput{ - StreamName: aws.String(stream), - TargetShardCount: aws.Int64(shards), - ScalingType: aws.String("UNIFORM_SCALING"), - } - if _, err := a.Client.UpdateShardCountWithContext(ctx, params); err != nil { - return fmt.Errorf("updateshards stream %s shards %d: %v", stream, shards, err) - } - - for { - resp, err := a.Client.DescribeStreamSummaryWithContext(ctx, - &kinesis.DescribeStreamSummaryInput{ - StreamName: aws.String(stream), - }) - if err != nil { - return fmt.Errorf("describestream stream %s: %v", stream, err) - } - - if status := resp.StreamDescriptionSummary.StreamStatus; status != aws.String("UPDATING") { - break - } - time.Sleep(1 * time.Second) - } - - return nil -} - -// GetTags recursively retrieves all tags for a Kinesis stream. -func (a *API) GetTags(ctx aws.Context, stream string) ([]*kinesis.Tag, error) { - var tags []*kinesis.Tag - var lastTag string - - for { - req := &kinesis.ListTagsForStreamInput{ - StreamName: aws.String(stream), - } - - if lastTag != "" { - req.ExclusiveStartTagKey = aws.String(lastTag) - } - - resp, err := a.Client.ListTagsForStreamWithContext(ctx, req) - if err != nil { - return nil, fmt.Errorf("listtags stream %s: %v", stream, err) - } - - tags = append(tags, resp.Tags...) - lastTag = *resp.Tags[len(resp.Tags)-1].Key - - // enables recursion - if !*resp.HasMoreTags { - break - } - } - - return tags, nil -} - -// UpdateTag updates a tag on a Kinesis stream. -func (a *API) UpdateTag(ctx aws.Context, stream, key, value string) error { - input := &kinesis.AddTagsToStreamInput{ - StreamName: aws.String(stream), - Tags: map[string]*string{ - key: aws.String(value), - }, - } - - if _, err := a.Client.AddTagsToStreamWithContext(ctx, input); err != nil { - return fmt.Errorf("updatetag stream %s key %s value %s: %v", stream, key, value, err) - } - - return nil -} diff --git a/v1/internal/aws/lambda/lambda.go b/v1/internal/aws/lambda/lambda.go deleted file mode 100644 index b140099d..00000000 --- a/v1/internal/aws/lambda/lambda.go +++ /dev/null @@ -1,75 +0,0 @@ -package lambda - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured Lambda client. - -func New(cfg iaws.Config) *lambda.Lambda { - conf, sess := iaws.New(cfg) - - c := lambda.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Lambda API interface. -type API struct { - Client lambdaiface.LambdaAPI -} - -// Setup creates a new Lambda client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Invoke is a convenience wrapper for synchronously invoking a Lambda function. -func (a *API) Invoke(ctx aws.Context, function string, payload []byte) (resp *lambda.InvokeOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.InvokeWithContext( - ctx, - &lambda.InvokeInput{ - FunctionName: aws.String(function), - InvocationType: aws.String("RequestResponse"), - Payload: payload, - }) - if err != nil { - return nil, fmt.Errorf("invoke function %s: %v", function, err) - } - - return resp, nil -} - -// InvokeAsync is a convenience wrapper for asynchronously invoking a Lambda function. -func (a *API) InvokeAsync(ctx aws.Context, function string, payload []byte) (resp *lambda.InvokeOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.InvokeWithContext( - ctx, - &lambda.InvokeInput{ - FunctionName: aws.String(function), - InvocationType: aws.String("Event"), - Payload: payload, - }) - if err != nil { - return nil, fmt.Errorf("invoke_async function %s: %v", function, err) - } - - return resp, nil -} diff --git a/v1/internal/aws/s3manager/s3manager.go b/v1/internal/aws/s3manager/s3manager.go deleted file mode 100644 index 9f0a89ef..00000000 --- a/v1/internal/aws/s3manager/s3manager.go +++ /dev/null @@ -1,129 +0,0 @@ -// package s3manager provides methods and functions for downloading and uploading objects in AWS S3. -package s3manager - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/media" -) - -// NewS3 returns a configured S3 client. -func NewS3(cfg iaws.Config) *s3.S3 { - conf, sess := iaws.New(cfg) - - c := s3.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// NewS3Downloader returns a configured Downloader client. -func NewS3Downloader(cfg iaws.Config) *s3manager.Downloader { - return s3manager.NewDownloaderWithClient(NewS3(cfg)) -} - -// DownloaderAPI wraps the Downloader API interface. -type DownloaderAPI struct { - Client s3manageriface.DownloaderAPI -} - -// Setup creates a new Downloader client. -func (a *DownloaderAPI) Setup(cfg iaws.Config) { - a.Client = NewS3Downloader(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *DownloaderAPI) IsEnabled() bool { - return a.Client != nil -} - -// Download is a convenience wrapper for downloading an object from S3. -func (a *DownloaderAPI) Download(ctx aws.Context, bucket, key string, dst io.WriterAt) (int64, error) { - input := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - - ctx = context.WithoutCancel(ctx) - size, err := a.Client.DownloadWithContext(ctx, dst, input) - if err != nil { - return 0, fmt.Errorf("s3manager download bucket %s key %s: %v", bucket, key, err) - } - - return size, nil -} - -// NewS3Uploader returns a configured Uploader client. -func NewS3Uploader(cfg iaws.Config) *s3manager.Uploader { - return s3manager.NewUploaderWithClient(NewS3(cfg)) -} - -// UploaderAPI wraps the Uploader API interface. -type UploaderAPI struct { - Client s3manageriface.UploaderAPI -} - -// Setup creates a new Uploader client. -func (a *UploaderAPI) Setup(cfg iaws.Config) { - a.Client = NewS3Uploader(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *UploaderAPI) IsEnabled() bool { - return a.Client != nil -} - -// Upload is a convenience wrapper for uploading an object to S3. -func (a *UploaderAPI) Upload(ctx aws.Context, bucket, key, storageClass string, src io.Reader) (*s3manager.UploadOutput, error) { - // temporary file is used so that the src can have its content identified and be uploaded to S3 - dst, err := os.CreateTemp("", "substation") - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - defer os.Remove(dst.Name()) - defer dst.Close() - - if _, err := io.Copy(dst, src); err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - mediaType, err := media.File(dst) - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - if _, err := dst.Seek(0, 0); err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - if storageClass == "" { - storageClass = "STANDARD" - } - - input := &s3manager.UploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - Body: dst, - ContentType: aws.String(mediaType), - StorageClass: aws.String(storageClass), - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.UploadWithContext(ctx, input) - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - return resp, nil -} diff --git a/v1/internal/aws/secretsmanager/secretsmanager.go b/v1/internal/aws/secretsmanager/secretsmanager.go deleted file mode 100644 index 751e8d19..00000000 --- a/v1/internal/aws/secretsmanager/secretsmanager.go +++ /dev/null @@ -1,61 +0,0 @@ -package secretsmanager - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured Secrets Manager client. -func New(cfg iaws.Config) *secretsmanager.SecretsManager { - conf, sess := iaws.New(cfg) - - c := secretsmanager.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Secrets Manager API interface. -type API struct { - Client secretsmanageriface.SecretsManagerAPI -} - -// Setup creates a new Secrets Manager client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// GetSecret is a convenience wrapper for getting a secret from Secrets Manager. -func (a *API) GetSecret(ctx aws.Context, secretName string) (secret string, err error) { - input := &secretsmanager.GetSecretValueInput{ - SecretId: aws.String(secretName), - VersionStage: aws.String("AWSCURRENT"), // VersionStage defaults to AWSCURRENT if unspecified - } - - ctx = context.WithoutCancel(ctx) - result, err := a.Client.GetSecretValueWithContext(ctx, input) - if err != nil { - return secret, fmt.Errorf("getsecretvalue secret %s: %v", secretName, err) - } - - if result.SecretString != nil { - secret = *result.SecretString - return secret, err - } - - return secret, err -} diff --git a/v1/internal/aws/sns/sns.go b/v1/internal/aws/sns/sns.go deleted file mode 100644 index 8708ad73..00000000 --- a/v1/internal/aws/sns/sns.go +++ /dev/null @@ -1,117 +0,0 @@ -package sns - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/aws/aws-sdk-go/service/sns/snsiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/google/uuid" -) - -// New returns a configured SNS client. -func New(cfg iaws.Config) *sns.SNS { - conf, sess := iaws.New(cfg) - - c := sns.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps an SNS client interface. -type API struct { - Client snsiface.SNSAPI -} - -// IsEnabled checks whether a new client has been set. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates an SNS client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// Publish is a convenience wrapper for publishing a message to an SNS topic. -func (a *API) Publish(ctx aws.Context, arn string, data []byte) (*sns.PublishOutput, error) { - req := &sns.PublishInput{ - Message: aws.String(string(data)), - TopicArn: aws.String(arn), - } - - if strings.HasSuffix(arn, ".fifo") { - mgid := uuid.New().String() - req.MessageGroupId = aws.String(mgid) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PublishWithContext(ctx, req) - if err != nil { - return nil, fmt.Errorf("publish: topic %s: %v", arn, err) - } - - return resp, nil -} - -// PublishBatch is a convenience wrapper for publishing a batch of messages to an SNS topic. -func (a *API) PublishBatch(ctx aws.Context, topic string, data [][]byte) (*sns.PublishBatchOutput, error) { - mgid := uuid.New().String() - - var entries []*sns.PublishBatchRequestEntry - for idx, d := range data { - entry := &sns.PublishBatchRequestEntry{ - Id: aws.String(strconv.Itoa(idx)), - Message: aws.String(string(d)), - } - - if strings.HasSuffix(topic, ".fifo") { - entry.MessageGroupId = aws.String(mgid) - } - - entries = append(entries, entry) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PublishBatchWithContext( - ctx, - &sns.PublishBatchInput{ - PublishBatchRequestEntries: entries, - TopicArn: aws.String(topic), - }, - ) - - // if a message fails, then the message ID is used to select the - // original data that was in the message. this data is put in a - // new slice and recursively input into the function. - if resp.Failed != nil { - var retry [][]byte - for _, f := range resp.Failed { - idx, err := strconv.Atoi(*f.Id) - if err != nil { - return nil, err - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.PublishBatch(ctx, topic, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("publish_batch: topic %s: %v", topic, err) - } - - return resp, nil -} diff --git a/v1/internal/aws/sqs/sqs.go b/v1/internal/aws/sqs/sqs.go deleted file mode 100644 index 5a31975f..00000000 --- a/v1/internal/aws/sqs/sqs.go +++ /dev/null @@ -1,118 +0,0 @@ -package sqs - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/aws/aws-sdk-go/service/sqs/sqsiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/google/uuid" -) - -// New returns a configured SQS client. -func New(cfg iaws.Config) *sqs.SQS { - conf, sess := iaws.New(cfg) - - c := sqs.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps an SQS client interface. -type API struct { - Client sqsiface.SQSAPI -} - -// IsEnabled checks whether a new client has been set. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates an SQS client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// SendMessage is a convenience wrapper for sending a message to an SQS queue. -func (a *API) SendMessage(ctx aws.Context, queue string, data []byte) (*sqs.SendMessageOutput, error) { - mgid := uuid.New().String() - - msg := &sqs.SendMessageInput{ - MessageBody: aws.String(string(data)), - QueueUrl: aws.String(queue), - } - - if strings.HasSuffix(queue, ".fifo") { - msg.MessageGroupId = aws.String(mgid) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.SendMessageWithContext(ctx, msg) - if err != nil { - return nil, fmt.Errorf("send_message: queue %s: %v", queue, err) - } - - return resp, nil -} - -// SendMessageBatch is a convenience wrapper for sending multiple messages to an SQS queue. This function becomes recursive for any messages that failed the SendMessage operation. -func (a *API) SendMessageBatch(ctx aws.Context, queue string, data [][]byte) (*sqs.SendMessageBatchOutput, error) { - mgid := uuid.New().String() - - var messages []*sqs.SendMessageBatchRequestEntry - for idx, d := range data { - entry := &sqs.SendMessageBatchRequestEntry{ - Id: aws.String(strconv.Itoa(idx)), - MessageBody: aws.String(string(d)), - } - - if strings.HasSuffix(queue, ".fifo") { - entry.MessageGroupId = aws.String(mgid) - } - - messages = append(messages, entry) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.SendMessageBatchWithContext( - ctx, - &sqs.SendMessageBatchInput{ - Entries: messages, - QueueUrl: aws.String(queue), - }, - ) - - // if a message fails, then the message ID is used to select the - // original data that was in the message. this data is put in a - // new slice and recursively input into the function. - if resp.Failed != nil { - var retry [][]byte - for _, r := range resp.Failed { - idx, err := strconv.Atoi(aws.StringValue(r.Id)) - if err != nil { - return nil, fmt.Errorf("send_message_batch: queue %s: %v", queue, err) - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.SendMessageBatch(ctx, queue, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("send_message_batch: queue %s: %v", queue, err) - } - - return resp, nil -} diff --git a/v1/internal/bufio/bufio.go b/v1/internal/bufio/bufio.go deleted file mode 100644 index 179761ba..00000000 --- a/v1/internal/bufio/bufio.go +++ /dev/null @@ -1,124 +0,0 @@ -package bufio - -import ( - "bufio" - "compress/bzip2" - "compress/gzip" - "io" - "os" - "strconv" - - "github.com/brexhq/substation/internal/media" - "github.com/klauspost/compress/snappy" - "github.com/klauspost/compress/zstd" -) - -// MediaTypes natively supported by the bufio -// scanner. -var MediaTypes = []string{ - "application/x-bzip2", - "application/x-gzip", - "application/x-zstd", - "application/x-snappy-framed", - "text/plain; charset=utf-8", -} - -// NewScanner returns a new -func NewScanner() *scanner { - return &scanner{} -} - -// scanner wraps bufio.scanner and provides methods for scanning data. The caller is responsible -// for checking errors and closing the scanner when it is no longer needed. -type scanner struct { - *bufio.Scanner - // openHandles contains all handles that must be closed after scanning is complete. - openHandles []io.ReadCloser -} - -// ReadFile inspects, decompresses, and reads an open file into the scanner. These file compression -// formats are optionally supported: -// - bzip2 (https://en.wikipedia.org/wiki/Bzip2) -// - gzip (https://en.wikipedia.org/wiki/Gzip) -// - snappy (https://en.wikipedia.org/wiki/Snappy_(compression)) -// - zstd (https://en.wikipedia.org/wiki/Zstandard) -func (s *scanner) ReadFile(file *os.File) error { - var reader io.ReadCloser - s.openHandles = append(s.openHandles, file) - - if _, err := file.Seek(0, 0); err != nil { - return err - } - - mediaType, err := media.File(file) - if err != nil { - return err - } - - if _, err := file.Seek(0, 0); err != nil { - return err - } - - switch mediaType { - case "application/x-bzip2": - reader = io.NopCloser(bzip2.NewReader(file)) - s.openHandles = append(s.openHandles, reader) - case "application/x-gzip": - gzipReader, err := gzip.NewReader(file) - if err != nil { - return err - } - - reader = gzipReader - s.openHandles = append(s.openHandles, reader) - case "application/zstd": - zstdReader, err := zstd.NewReader(file) - if err != nil { - return err - } - - reader = io.NopCloser(zstdReader) - s.openHandles = append(s.openHandles, reader) - case "application/x-snappy-framed": - snappyReader := snappy.NewReader(file) - reader = io.NopCloser(snappyReader) - default: - // file was previously added to openHandles - reader = file - } - - s.Scanner = bufio.NewScanner(reader) - s.Scanner.Split(bufio.ScanLines) - - // Each line has a default capacity of 64 KB and a variable maximum capacity - // (defaults to 128 MB). - b := make([]byte, bufio.MaxScanTokenSize) - if mem, ok := os.LookupEnv("AWS_LAMBDA_FUNCTION_MEMORY_SIZE"); !ok { - s.Scanner.Buffer(b, (1000 * 1000 * 128)) - } else { - m, _ := strconv.ParseFloat(mem, 64) - // For AWS Lambda, the max capacity is 80% of the function's memory. - s.Scanner.Buffer(b, 1000000*int(m*0.8)) - } - - return nil -} - -func (s *scanner) Err() error { - if err := s.Scanner.Err(); err != nil { - return err - } - - return nil -} - -// Close closes all open handles. -func (s *scanner) Close() error { - for _, h := range s.openHandles { - if err := h.Close(); err != nil { - return err - } - } - - return nil -} diff --git a/v1/internal/bufio/example_test.go b/v1/internal/bufio/example_test.go deleted file mode 100644 index 106dac1d..00000000 --- a/v1/internal/bufio/example_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package bufio_test - -import ( - "fmt" - "os" - - "github.com/brexhq/substation/internal/bufio" -) - -func ExampleNewScanner_setup() { - s := bufio.NewScanner() - defer s.Close() -} - -func ExampleNewScanner_readFile() { - // temp file is used to simulate an open file and must be removed after the test completes - file, _ := os.CreateTemp("", "substation") - defer os.Remove(file.Name()) - - _, _ = file.Write([]byte("foo\nbar\nbaz")) - - // scanner closes all open handles, including the open file - s := bufio.NewScanner() - defer s.Close() - - // scanner automatically decompresses file and chooses appropriate scan method (default is "text") - if err := s.ReadFile(file); err != nil { - // handle error - panic(err) - } - - for s.Scan() { - fmt.Println(s.Text()) - } - - if err := s.Err(); err != nil { - // handle error - panic(err) - } - - // Output: - // foo - // bar - // baz -} diff --git a/v1/internal/config/config.go b/v1/internal/config/config.go deleted file mode 100644 index 11df2e45..00000000 --- a/v1/internal/config/config.go +++ /dev/null @@ -1,84 +0,0 @@ -// package config provides configuration types and functions for Substation. -// -// Any non-backwards compatible changes to the configuration types should be -// accompanied by a version bump. Use the guidance below for choosing the -// appropriate fields for configurations: -// -// For time-based configurations: -// -// - Use `Delay` for the amount of time to wait before executing. -// -// - Use `Timeout` for the amount of time to wait before interrupting -// an execution. -// -// - Use `Duration` for the total amount of time over many executions. -package config - -import ( - "encoding/json" - - "github.com/brexhq/substation/config" -) - -type Object struct { - // SourceKey retrieves a value from a JSON object. - SourceKey string `json:"source_key"` - // TargetKey place a value into a JSON object. - TargetKey string `json:"target_key"` - // BatchKey retrieves a value from a JSON object that is used to organize - // batched data (internal/aggregate). - BatchKey string `json:"batch_key"` -} - -type AWS struct { - // Region is the AWS region that the AWS client will use. - Region string `json:"region"` - // RoleARN is the AWS IAM role that the AWS client will assume. - RoleARN string `json:"role_arn"` -} - -type Metric struct { - // Name is the name of the metric. - Name string `json:"name"` - // Attributes are key-value pairs that are associated with the metric. - Attributes map[string]string `json:"attributes"` - // Destination is the metrics destination that the metric will be sent to (internal/metrics). - Destination config.Config `json:"destination"` -} - -type Request struct { - // Timeout is the amount of time that the request will wait before timing out. - Timeout string `json:"Timeout"` -} - -type Retry struct { - // Count is the maximum number of times that the action will be retried. This - // can be combined with the Delay field to create a backoff strategy. - Count int `json:"count"` - // Delay is the amount of time to wait before retrying the action. This can be - // combined with the Count field to create a backoff strategy. - Delay string `json:"delay"` - // ErrorMessages are regular expressions that match error messages and determine - // if the action should be retried. - ErrorMessages []string `json:"error_messages"` -} - -type Batch struct { - // Count is the maximum number of records that can be batched. - Count int `json:"count"` - // Size is the maximum size of the batch in bytes. - Size int `json:"size"` - // Duration is the maximum amount of time that records can be batched for. - Duration string `json:"duration"` -} - -// Decode marshals and unmarshals an input interface into the output interface -// using the standard library's json package. This should be used when decoding -// JSON configurations (i.e., Config) in Substation interface factories. -func Decode(input, output interface{}) error { - b, err := json.Marshal(input) - if err != nil { - return err - } - return json.Unmarshal(b, output) -} diff --git a/v1/internal/file/example_test.go b/v1/internal/file/example_test.go deleted file mode 100644 index eea009e2..00000000 --- a/v1/internal/file/example_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package file_test - -import ( - "context" - "fmt" - "io" - "os" - "strings" - - "github.com/brexhq/substation/internal/file" -) - -func ExampleGet_local() { - // temp file is used to simulate an open file and must be removed after the test completes - temp, _ := os.CreateTemp("", "substation") - defer os.Remove(temp.Name()) - defer temp.Close() - - _, _ = temp.Write([]byte("foo\nbar\nbaz")) - - // a local copy of the file is created and must be removed when it's no longer needed, regardless of errors - path, err := file.Get(context.TODO(), temp.Name()) - defer os.Remove(path) - - if err != nil { - // handle err - panic(err) - } - - f, err := os.Open(path) - if err != nil { - // handle err - panic(err) - } - - defer f.Close() - - buf, err := io.ReadAll(f) - if err != nil { - // handle err - panic(err) - } - - fmt.Println(string(buf)) - - // Output: - // foo - // bar - // baz -} - -func ExampleGet_http() { - location := "https://example.com" - - // a local copy of the HTTP body is created and must be removed when it's no longer needed, regardless of errors - path, err := file.Get(context.TODO(), location) - defer os.Remove(path) - - if err != nil { - // handle err - panic(err) - } - - f, err := os.Open(path) - if err != nil { - // handle err - panic(err) - } - - defer f.Close() - - buf := make([]byte, 16) - if _, err = f.Read(buf); err != nil { - // handle err - panic(err) - } - - prefix := strings.HasPrefix(strings.ToUpper(string(buf)), " - fmt.Println(item) -} diff --git a/v1/internal/kv/json_file.go b/v1/internal/kv/json_file.go deleted file mode 100644 index 73d9ef2d..00000000 --- a/v1/internal/kv/json_file.go +++ /dev/null @@ -1,146 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strings" - "sync" - - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" - "github.com/tidwall/gjson" -) - -// errJSONFileInvalid is returned when the file contains invalid JSON. -var errJSONFileInvalid = fmt.Errorf("invalid JSON") - -// kvJSONFile is a read-only key-value store that is derived from a file containing -// an object and stored in memory. -type kvJSONFile struct { - // File contains the location of the text file. This can be either a path on local - // disk, an HTTP(S) URL, or an AWS S3 URL. - File string `json:"file"` - // IsLines indicates that the file is a JSON Lines file. The first non-null value - // is returned when a key is found. - IsLines bool `json:"is_lines"` - - mu *sync.Mutex - object []byte -} - -// Create a new JSON file KV store. -func newKVJSONFile(cfg config.Config) (*kvJSONFile, error) { - var store kvJSONFile - if err := _config.Decode(cfg.Settings, &store); err != nil { - return nil, err - } - store.mu = new(sync.Mutex) - - if store.File == "" { - return nil, fmt.Errorf("kv: json: options %+v: %v", &store, errors.ErrMissingRequiredOption) - } - - return &store, nil -} - -func (store *kvJSONFile) String() string { - return toString(store) -} - -// Get retrieves a value from the store. -func (store *kvJSONFile) Get(ctx context.Context, key string) (interface{}, error) { - store.mu.Lock() - defer store.mu.Unlock() - - // JSON Lines files are queried as an array and the first non-null value is returned. - // See https://github.com/tidwall/gjson#json-lines for more information. - if store.IsLines && !strings.HasPrefix(key, "..#.") { - key = "..#." + key - res := gjson.GetBytes(store.object, key) - - for _, v := range res.Array() { - if v.Exists() { - return v.Value(), nil - } - } - - return nil, nil - } - - res := gjson.GetBytes(store.object, key) - if !res.Exists() { - return nil, nil - } - - return res.Value(), nil -} - -// Set is unused because this is a read-only store. -func (store *kvJSONFile) Set(ctx context.Context, key string, val interface{}) error { - return errSetNotSupported -} - -// SetWithTTL is unused because this is a read-only store. -func (store *kvJSONFile) SetWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// SetAddWithTTL is unused because this is a read-only store. -func (store *kvJSONFile) SetAddWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// IsEnabled returns true if the store is ready for use. -func (store *kvJSONFile) IsEnabled() bool { - store.mu.Lock() - defer store.mu.Unlock() - - return store.object != nil -} - -// Setup creates the store by reading the text file into memory. -func (store *kvJSONFile) Setup(ctx context.Context) error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary setup - if store.object != nil { - return nil - } - - path, err := file.Get(ctx, store.File) - defer os.Remove(path) - if err != nil { - return fmt.Errorf("kv: json_file: %v", err) - } - - buf, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("kv: json_file: %v", err) - } - - if !json.Valid(buf) { - return fmt.Errorf("kv: json_file: %v", errJSONFileInvalid) - } - - store.object = buf - return nil -} - -// Closes the store. -func (store *kvJSONFile) Close() error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary closing - if store.object == nil { - return nil - } - - store.object = nil - return nil -} diff --git a/v1/internal/kv/kv.go b/v1/internal/kv/kv.go deleted file mode 100644 index 717bcc37..00000000 --- a/v1/internal/kv/kv.go +++ /dev/null @@ -1,129 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" -) - -var ( - mu sync.Mutex - m map[string]Storer - lock map[string]Locker - // errSetNotSupported is returned when the KV set action is not supported. - errSetNotSupported = fmt.Errorf("set not supported") - // ErrNoLock is returned when a lock cannot be acquired. - ErrNoLock = fmt.Errorf("unable to acquire lock") -) - -// Storer provides tools for getting values from and putting values into key-value stores. -type Storer interface { - Get(context.Context, string) (interface{}, error) - Set(context.Context, string, interface{}) error - SetWithTTL(context.Context, string, interface{}, int64) error - SetAddWithTTL(context.Context, string, interface{}, int64) error - Setup(context.Context) error - Close() error - IsEnabled() bool -} - -// required to support Stringer interface -func toString(s Storer) string { - b, _ := json.Marshal(s) - return string(b) -} - -// Get returns a pointer to a Storer that is stored as a package level global variable. -// This function and each Storer are safe for concurrent access. -func Get(cfg config.Config) (Storer, error) { - mu.Lock() - defer mu.Unlock() - - // KV store configurations are mapped using the "signature" of their config. - // this makes it possible for a single run of a Substation application to rely - // on multiple KV stores. - sig := fmt.Sprint(cfg) - store, ok := m[sig] - if ok { - return store, nil - } - - storer, err := New(cfg) - if err != nil { - return nil, err - } - m[sig] = storer - - return m[sig], nil -} - -// New returns a Storer. -func New(cfg config.Config) (Storer, error) { - switch t := cfg.Type; t { - case "aws_dynamodb": - return newKVAWSDynamoDB(cfg) - case "csv_file": - return newKVCSVFile(cfg) - case "json_file": - return newKVJSONFile(cfg) - case "memory": - return newKVMemory(cfg) - case "mmdb": - return newKVMMDB(cfg) - case "text_file": - return newKVTextFile(cfg) - default: - return nil, fmt.Errorf("kv_store: %s: %v", t, errors.ErrInvalidFactoryInput) - } -} - -type Locker interface { - Lock(context.Context, string, int64) error - Unlock(context.Context, string) error - Setup(context.Context) error - IsEnabled() bool -} - -// Get returns a pointer to a Locker that is stored as a package level global variable. -// This function and each Locker are safe for concurrent access. -func GetLocker(cfg config.Config) (Locker, error) { - mu.Lock() - defer mu.Unlock() - - // KV store configurations are mapped using the "signature" of their config. - // this makes it possible for a single run of a Substation application to rely - // on multiple KV stores. - sig := fmt.Sprint(cfg) - locker, ok := lock[sig] - if ok { - return locker, nil - } - - locker, err := NewLocker(cfg) - if err != nil { - return nil, err - } - lock[sig] = locker - - return lock[sig], nil -} - -func NewLocker(cfg config.Config) (Locker, error) { - switch t := cfg.Type; t { - case "aws_dynamodb": - return newKVAWSDynamoDB(cfg) - case "memory": - return newKVMemory(cfg) - default: - return nil, fmt.Errorf("kv_store locker: %s: %v", t, errors.ErrInvalidFactoryInput) - } -} - -func init() { - m = make(map[string]Storer) - lock = make(map[string]Locker) -} diff --git a/v1/internal/kv/memory.go b/v1/internal/kv/memory.go deleted file mode 100644 index b59c524c..00000000 --- a/v1/internal/kv/memory.go +++ /dev/null @@ -1,224 +0,0 @@ -package kv - -import ( - "container/list" - "context" - "slices" - "sync" - "time" - - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" -) - -// kvMemory is a read-write key-value store that is stored in memory. -// -// This KV store uses least recently used (LRU) eviction and optionally supports -// per-value time-to-live (TTL). -type kvMemory struct { - // Capacity limits the maximum capacity of the store. - // - // This is optional and defaults to 1024 values. - Capacity int `json:"capacity"` - mu sync.Mutex - lockMu sync.Mutex - lru list.List - items map[string]*list.Element -} - -// Create a new memory KV store. -func newKVMemory(cfg config.Config) (*kvMemory, error) { - var store kvMemory - if err := _config.Decode(cfg.Settings, &store); err != nil { - return nil, err - } - - return &store, nil -} - -func (store *kvMemory) String() string { - return toString(store) -} - -type kvMemoryElement struct { - key string - value interface{} - ttl int64 -} - -// Get retrieves a value from the store. If the value had a time-to-live (TTL) -// configured when it was added and the TTL has passed, then nothing is returned. -func (store *kvMemory) Get(ctx context.Context, key string) (interface{}, error) { - store.mu.Lock() - defer store.mu.Unlock() - - if node, found := store.items[key]; found { - ttl := node.Value.(kvMemoryElement).ttl - - // a zero value for ttl indicates that ttl is not configured for the item - if ttl != 0 && ttl <= time.Now().Unix() { - delete(store.items, key) - store.lru.Remove(node) - - return nil, nil - } - - // resetting the position of the node prevents recently accessed items from being evicted - store.lru.MoveToFront(node) - return node.Value.(kvMemoryElement).value, nil - } - - return nil, nil -} - -// Set adds a value to the store. If the addition causes the capacity of the store to -// exceed the configured limit, then the least recently accessed value is removed from -// the store. -func (store *kvMemory) Set(ctx context.Context, key string, val interface{}) error { - return store.SetWithTTL(ctx, key, val, 0) -} - -// SetWithTTL adds a value to the store with a time-to-live (TTL). If the addition -// causes the capacity of the store to exceed the configured limit, then the least -// recently accessed value is removed from the store. -func (store *kvMemory) SetWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - store.mu.Lock() - defer store.mu.Unlock() - - value := kvMemoryElement{key, val, ttl} - - if node, ok := store.items[key]; ok { - // resetting the position of the node prevents recently accessed items from being evicted - store.lru.MoveToFront(node) - node.Value = value - - return nil - } - - store.lru.PushFront(value) - store.items[key] = store.lru.Front() - - if store.lru.Len() > store.Capacity { - node := store.lru.Back() - - store.lru.Remove(node) - delete(store.items, node.Value.(kvMemoryElement).key) - } - - return nil -} - -// Lock adds an item to the store if it does not already exist. If the item already exists -// and the time-to-live (TTL) has not expired, then this returns ErrNoLock. -func (store *kvMemory) Lock(ctx context.Context, key string, ttl int64) error { - store.lockMu.Lock() - defer store.lockMu.Unlock() - - if node, ok := store.items[key]; ok { - ttl := node.Value.(kvMemoryElement).ttl - if ttl <= time.Now().Unix() { - delete(store.items, key) - store.lru.Remove(node) - } - - return ErrNoLock - } - - return store.SetWithTTL(ctx, key, nil, ttl) -} - -// SetAddWithTTL appends a value to a set (unique list) in the store. If the list does not -// exist, then it is created. -// -// If the TTL value is zero, then the item will not expire (this behavior is managed by the -// Get method). -func (store *kvMemory) SetAddWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - store.mu.Lock() - defer store.mu.Unlock() - - // List already exists for the key. - if node, ok := store.items[key]; ok { - // Resetting the position of the node prevents recently accessed items from being evicted - store.lru.MoveToFront(node) - - // This implements the behavior of a set (unique list). - if slices.Contains(node.Value.(kvMemoryElement).value.([]interface{}), val) { - return nil - } - - node.Value = kvMemoryElement{ - key: key, - value: append(node.Value.(kvMemoryElement).value.([]interface{}), val), - ttl: ttl, // Always update the TTL value. Zero values are ignored on retrieval. - } - - return nil - } - - // No list exists for the key. - store.lru.PushFront(kvMemoryElement{key, []interface{}{val}, ttl}) - store.items[key] = store.lru.Front() - - if store.lru.Len() > store.Capacity { - node := store.lru.Back() - - store.lru.Remove(node) - delete(store.items, node.Value.(kvMemoryElement).key) - } - - return nil -} - -// Unlock removes an item from the store. -func (store *kvMemory) Unlock(ctx context.Context, key string) error { - store.lockMu.Lock() - defer store.lockMu.Unlock() - - if node, ok := store.items[key]; ok { - store.lru.Remove(node) - delete(store.items, key) - } - - return nil -} - -// IsEnabled returns true if the store is ready for use. -func (store *kvMemory) IsEnabled() bool { - store.mu.Lock() - defer store.mu.Unlock() - - return store.items != nil -} - -// Setup creates the store. -func (store *kvMemory) Setup(ctx context.Context) error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary setup - if store.items != nil { - return nil - } - - store.items = make(map[string]*list.Element) - - if store.Capacity == 0 { - store.Capacity = 1024 - } - - return nil -} - -// Closes the store. -func (store *kvMemory) Close() error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary closing - if store.items == nil { - return nil - } - - store.items = nil - return nil -} diff --git a/v1/internal/kv/mmdb.go b/v1/internal/kv/mmdb.go deleted file mode 100644 index 192f818c..00000000 --- a/v1/internal/kv/mmdb.go +++ /dev/null @@ -1,139 +0,0 @@ -//go:build !wasm - -package kv - -import ( - "context" - "fmt" - "net" - "os" - "sync" - - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" - "github.com/oschwald/maxminddb-golang" -) - -// errMMDBKeyMustBeAddr is returned when the key used in a Get call is not a valid -// IP address. -var errMMDBKeyMustBeAddr = fmt.Errorf("key must be IP address") - -// KvMMDB is a read-only key-value store that is derived from any MaxMind database -// format (MMDB) file. -// -// MMDB is an open source database file format that maps IPv4 and IPv6 addresses to -// data records, and is most commonly utilized by MaxMind GeoIP databases. Learn more -// about the file format here: https://maxmind.github.io/MaxMind-DB/. -type kvMMDB struct { - // File contains the location of the MMDB file. This can be either a path on local - // disk, an HTTP(S) URL, or an AWS S3 URL. - File string `json:"file"` - mu sync.RWMutex - reader *maxminddb.Reader -} - -// Create a new MMDB KV store. -func newKVMMDB(cfg config.Config) (*kvMMDB, error) { - var store kvMMDB - if err := _config.Decode(cfg.Settings, &store); err != nil { - return nil, err - } - - if store.File == "" { - return nil, fmt.Errorf("kv: mmdb: options %+v: %v", &store, errors.ErrMissingRequiredOption) - } - - return &store, nil -} - -func (store *kvMMDB) String() string { - return toString(store) -} - -// Get retrieves a value from the store. -func (store *kvMMDB) Get(ctx context.Context, key string) (interface{}, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - addr := net.ParseIP(key) - if addr == nil { - // does not include the key that caused the error to avoid leaking - // private data. this should be wrapped by the caller, which can - // provide more information about what caused the error. - return nil, fmt.Errorf("kv: mmdb: %v", errMMDBKeyMustBeAddr) - } - - var r interface{} - if err := store.reader.Lookup(addr, &r); err != nil { - return nil, fmt.Errorf("kv: mmdb: %v", err) - } - - return r, nil -} - -// Set is unused because this is a read-only store. -func (store *kvMMDB) Set(ctx context.Context, key string, val interface{}) error { - return errSetNotSupported -} - -// SetWithTTL is unused because this is a read-only store. -func (store *kvMMDB) SetWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// SetAddWithTTL is unused because this is a read-only store. -func (store *kvMMDB) SetAddWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// IsEnabled returns true if the store is ready for use. -func (store *kvMMDB) IsEnabled() bool { - store.mu.Lock() - defer store.mu.Unlock() - - return store.reader != nil -} - -// Setup creates the store by reading the text file into memory. -func (store *kvMMDB) Setup(ctx context.Context) error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary setup - if store.reader != nil { - return nil - } - - path, err := file.Get(ctx, store.File) - defer os.Remove(path) - if err != nil { - return fmt.Errorf("kv: mmdb: %v", err) - } - - db, err := maxminddb.Open(path) - if err != nil { - return fmt.Errorf("kv: mmdb: %v", err) - } - - store.reader = db - return nil -} - -// Closes the store. -func (store *kvMMDB) Close() error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary closing - if store.reader == nil { - return nil - } - - if err := store.reader.Close(); err != nil { - return fmt.Errorf("kv: mmdb: %v", err) - } - - return nil -} diff --git a/v1/internal/kv/text_file.go b/v1/internal/kv/text_file.go deleted file mode 100644 index bb09fda4..00000000 --- a/v1/internal/kv/text_file.go +++ /dev/null @@ -1,140 +0,0 @@ -package kv - -import ( - "bufio" - "context" - "fmt" - "os" - "sync" - - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" -) - -// kvTextFile is a read-only key-value store that is derived from a newline delimited -// text file and stored in memory. -// -// Rows from the text file are stored in a slice where each element becomes the key and -// the value is a boolean true. -// -// For example, if the file contains this data: -// -// foo -// bar -// baz -// -// The store becomes this: -// -// map[foo:true bar:true baz:true] -type kvTextFile struct { - // File contains the location of the text file. This can be either a path on local - // disk, an HTTP(S) URL, or an AWS S3 URL. - File string `json:"file"` - mu sync.Mutex - items []string -} - -// Create a new text file KV store. -func newKVTextFile(cfg config.Config) (*kvTextFile, error) { - var store kvTextFile - if err := _config.Decode(cfg.Settings, &store); err != nil { - return nil, err - } - - if store.File == "" { - return nil, fmt.Errorf("kv: text_file: options %+v: %v", &store, errors.ErrMissingRequiredOption) - } - - return &store, nil -} - -func (store *kvTextFile) String() string { - return toString(store) -} - -// Get retrieves a value from the store. -func (store *kvTextFile) Get(ctx context.Context, key string) (interface{}, error) { - store.mu.Lock() - defer store.mu.Unlock() - - return store.contains(key), nil -} - -// Set is unused because this is a read-only store. -func (store *kvTextFile) Set(ctx context.Context, key string, val interface{}) error { - return errSetNotSupported -} - -// SetWithTTL is unused because this is a read-only store. -func (store *kvTextFile) SetWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// SetAddWithTTL is unused because this is a read-only store. -func (store *kvTextFile) SetAddWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { - return errSetNotSupported -} - -// IsEnabled returns true if the store is ready for use. -func (store *kvTextFile) IsEnabled() bool { - store.mu.Lock() - defer store.mu.Unlock() - - return store.items != nil -} - -// Setup creates the store by reading the text file into memory. -func (store *kvTextFile) Setup(ctx context.Context) error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary setup - if store.items != nil { - return nil - } - - path, err := file.Get(ctx, store.File) - defer os.Remove(path) - if err != nil { - return fmt.Errorf("kv: text_file: %v", err) - } - - f, err := os.Open(path) - if err != nil { - return fmt.Errorf("kv: text_file: %v", err) - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - store.items = append(store.items, scanner.Text()) - } - - return nil -} - -// Closes the store. -func (store *kvTextFile) Close() error { - store.mu.Lock() - defer store.mu.Unlock() - - // avoids unnecessary closing - if store.items == nil { - return nil - } - - store.items = nil - return nil -} - -func (store *kvTextFile) contains(key string) bool { - for _, item := range store.items { - if item == key { - return true - } - } - - return false -} diff --git a/v1/internal/media/example_test.go b/v1/internal/media/example_test.go deleted file mode 100644 index 7cb63878..00000000 --- a/v1/internal/media/example_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package media_test - -import ( - "fmt" - "os" - - "github.com/brexhq/substation/internal/media" -) - -func ExampleBytes() { - b := []byte("\x42\x5a\x68") - mediaType := media.Bytes(b) - - fmt.Println(mediaType) - // Output: application/x-bzip2 -} - -func ExampleFile() { - // temp file is used to simulate an open file and must be removed after the test completes - file, _ := os.CreateTemp("", "substation") - defer os.Remove(file.Name()) - defer file.Close() - - _, _ = file.Write([]byte("\x42\x5a\x68")) - - // media.File moves the file offset to zero - mediaType, err := media.File(file) - if err != nil { - // handle err - panic(err) - } - - fmt.Println(mediaType) - // Output: application/x-bzip2 -} - -func Example_switch() { - bytes := [][]byte{ - // application/x-bzip2 - []byte("\x42\x5a\x68"), - // application/x-gzip - []byte("\x1f\x8b\x08"), - // text/html - []byte("\x3c\x68\x74\x6d\x6c\x3e"), - } - - for _, b := range bytes { - // use a switch statement to contextually distribute data to other functions - switch media.Bytes(b) { - case "application/x-bzip2": - continue - // bzip2(b) - case "application/x-gzip": - continue - // gzip(b) - case "text/html; charset=utf-8": - continue - // html(b) - default: - continue - } - } -} diff --git a/v1/internal/metrics/README.md b/v1/internal/metrics/README.md deleted file mode 100644 index 4053cac8..00000000 --- a/v1/internal/metrics/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# metrics - -Contains interfaces and methods for generating application metrics and sending them to external services. Metrics can be generated anywhere in the application and optionally sent to a single external service. The naming convention for metrics names and attributes is PascalCase, also known as upper camel case (e.g. UpperCamelCase). - -Information for each metrics generator is available in the [GoDoc](https://pkg.go.dev/github.com/brexhq/substation/internal/metrics). diff --git a/v1/internal/metrics/aws_cloudwatch_embedded_metrics.go b/v1/internal/metrics/aws_cloudwatch_embedded_metrics.go deleted file mode 100644 index f1c63b86..00000000 --- a/v1/internal/metrics/aws_cloudwatch_embedded_metrics.go +++ /dev/null @@ -1,77 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/tidwall/sjson" -) - -type awsCloudWatchEmbeddedMetricsConfig struct{} - -// awsCloudWatchEmbeddedMetrics creates a metric in the AWS Embedded Metrics -// Format and writes it to standard output. This is the preferred method for -// generating metrics from AWS Lambda functions. Read more about the Embedded -// Metrics Format specification here: -// https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html. -type awsCloudWatchEmbeddedMetrics struct { - conf awsCloudWatchEmbeddedMetricsConfig -} - -func newAWSCloudWatchEmbeddedMetrics(_ context.Context, cfg config.Config) (*awsCloudWatchEmbeddedMetrics, error) { - conf := awsCloudWatchEmbeddedMetricsConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, err - } - - return &awsCloudWatchEmbeddedMetrics{ - conf: conf, - }, nil -} - -func (m *awsCloudWatchEmbeddedMetrics) Generate(ctx context.Context, data Data) (err error) { - emf := []byte{} - - emf, err = sjson.SetBytes(emf, "_aws.Timestamp", time.Now().UnixMilli()) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - - emf, err = sjson.SetBytes(emf, "_aws.CloudWatchMetrics.0.Namespace", metricsApplication) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - - var dimensions []string - for key, val := range data.Attributes { - dimensions = append(dimensions, key) - - emf, err = sjson.SetBytes(emf, key, val) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - } - - emf, err = sjson.SetBytes(emf, "_aws.CloudWatchMetrics.0.Dimensions.-1", dimensions) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - - emf, err = sjson.SetBytes(emf, "_aws.CloudWatchMetrics.0.Metrics.0.Name", data.Name) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - - emf, err = sjson.SetBytes(emf, data.Name, data.Value) - if err != nil { - return fmt.Errorf("metrics log_embedded_metrics: %v", err) - } - - // Logging EMF to standard out in AWS Lambda automatically sends metrics to CloudWatch. - fmt.Println(string(emf)) - - return nil -} diff --git a/v1/internal/metrics/metrics.go b/v1/internal/metrics/metrics.go deleted file mode 100644 index ee0006aa..00000000 --- a/v1/internal/metrics/metrics.go +++ /dev/null @@ -1,49 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" -) - -const ( - metricsApplication = "Substation" -) - -// Data contains a metric that can be sent to external services. -type Data struct { - // Contextual information related to the metric. If the external service accepts key-value pairs (e.g., identifiers, tags), then this is passed directly to the service. - Attributes map[string]string - - // A short name that describes the metric. This is passed directly to the external service and should use the upper camel case (UpperCamelCase) naming convention. - Name string - - // The metric data point. This value is converted to the correct data type before being sent to the external service. - Value interface{} -} - -// AddAttributes is a convenience method for adding attributes to a metric. -func (d *Data) AddAttributes(attr map[string]string) { - if d.Attributes == nil { - d.Attributes = make(map[string]string) - } - - for key, val := range attr { - d.Attributes[key] = val - } -} - -type Generator interface { - Generate(context.Context, Data) error -} - -func New(ctx context.Context, cfg config.Config) (Generator, error) { - switch cfg.Type { - case "aws_cloudwatch_embedded_metrics": - return newAWSCloudWatchEmbeddedMetrics(ctx, cfg) - default: - return nil, fmt.Errorf("metrics: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) - } -} diff --git a/v1/internal/secrets/aws_secrets_manager.go b/v1/internal/secrets/aws_secrets_manager.go deleted file mode 100644 index 97323807..00000000 --- a/v1/internal/secrets/aws_secrets_manager.go +++ /dev/null @@ -1,99 +0,0 @@ -package secrets - -import ( - "context" - "fmt" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/secretsmanager" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type awsSecretsManagerConfig struct { - ID string `json:"id"` - Name string `json:"name"` - TTLOffset string `json:"ttl_offset"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *awsSecretsManagerConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *awsSecretsManagerConfig) Validate() error { - if c.ID == "" { - return fmt.Errorf("id: %v", errors.ErrMissingRequiredOption) - } - - if c.Name == "" { - return fmt.Errorf("name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type awsSecretsManager struct { - conf awsSecretsManagerConfig - - ttl int64 - // client is safe for concurrent access. - client secretsmanager.API -} - -func newAWSSecretsManager(_ context.Context, cfg config.Config) (*awsSecretsManager, error) { - conf := awsSecretsManagerConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("secrets: aws_secrets_manager: %v", err) - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("secrets: aws_secrets_manager: %v", err) - } - - ttl := conf.TTLOffset - if ttl == "" { - ttl = defaultTTL - } - - dur, err := time.ParseDuration(ttl) - if err != nil { - return nil, fmt.Errorf("secrets: environment_variable: %v", err) - } - - c := &awsSecretsManager{ - conf: conf, - ttl: time.Now().Add(dur).Unix(), - } - - c.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return c, nil -} - -func (c *awsSecretsManager) Retrieve(ctx context.Context) error { - v, err := c.client.GetSecret(ctx, c.conf.Name) - if err != nil { - return fmt.Errorf("secrets: environment_variable: name %s: %v", c.conf.Name, err) - } - - // SetWithTTL isn't used here because the TTL is managed by - // transform/utility_secret.go. - if err := cache.Set(ctx, c.conf.ID, v); err != nil { - return fmt.Errorf("secrets: environment_variable: id %s: %v", c.conf.ID, err) - } - - return nil -} - -func (c *awsSecretsManager) Expired() bool { - return time.Now().Unix() >= c.ttl -} diff --git a/v1/internal/secrets/environment_variable.go b/v1/internal/secrets/environment_variable.go deleted file mode 100644 index b9ec740f..00000000 --- a/v1/internal/secrets/environment_variable.go +++ /dev/null @@ -1,82 +0,0 @@ -package secrets - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type environmentVariableConfig struct { - ID string `json:"id"` - Name string `json:"name"` - TTLOffset string `json:"ttl_offset"` -} - -func (c *environmentVariableConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *environmentVariableConfig) Validate() error { - if c.ID == "" { - return fmt.Errorf("id: %v", errors.ErrMissingRequiredOption) - } - - if c.Name == "" { - return fmt.Errorf("name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type env struct { - conf environmentVariableConfig - - ttl int64 -} - -func newEnvironmentVariable(_ context.Context, cfg config.Config) (*env, error) { - conf := environmentVariableConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("secrets: environment_variable: %v", err) - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("secrets: environment_variable: %v", err) - } - - ttl := conf.TTLOffset - if ttl == "" { - ttl = defaultTTL - } - - dur, err := time.ParseDuration(ttl) - if err != nil { - return nil, fmt.Errorf("secrets: environment_variable: %v", err) - } - - return &env{ - conf: conf, - ttl: time.Now().Add(dur).Unix(), - }, nil -} - -func (c *env) Retrieve(ctx context.Context) error { - if v, ok := os.LookupEnv(c.conf.Name); ok { - // SetWithTTL isn't used here because the TTL is managed by - // transform/utility_secret.go. - if err := cache.Set(ctx, c.conf.ID, v); err != nil { - return fmt.Errorf("secrets: environment_variable: id %s: %v", c.conf.ID, err) - } - } - - return nil -} - -func (c *env) Expired() bool { - return time.Now().Unix() >= c.ttl -} diff --git a/v1/internal/secrets/secrets.go b/v1/internal/secrets/secrets.go deleted file mode 100644 index 866c7c53..00000000 --- a/v1/internal/secrets/secrets.go +++ /dev/null @@ -1,100 +0,0 @@ -// Package secrets provides functions for retrieving local and remote secrets and interpolating them into configuration files. -package secrets - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" -) - -var ( - // interpRe is used for parsing secrets during interpolation. Secrets - // must not contain any curly braces. - interpRe = regexp.MustCompile(`\${(SECRET:[^}]+)}`) - // KV store is used as a secrets cache - cache kv.Storer -) - -// defaultTTL enforces a 15 minute rotation for all secrets stored in memory. -const defaultTTL = "15m" - -type Retriever interface { - Retrieve(context.Context) error - Expired() bool -} - -func New(ctx context.Context, cfg config.Config) (Retriever, error) { - switch cfg.Type { - case "aws_secrets_manager": - return newAWSSecretsManager(ctx, cfg) - case "environment_variable": - return newEnvironmentVariable(ctx, cfg) - default: - return nil, fmt.Errorf("secrets: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) - } -} - -// Interpolate identifies when a string contains one or more secrets and -// interpolates each secret with the string. This function uses the same -// convention as the standard library's regexp package for capturing named -// groups (${name}). -// -// For example, if the string is "/path/to/${SECRET:FOO}" and BAR is the -// secret value stored in the internal lookup, then the interpolated string -// is "/path/to/BAR". -// -// Multiple secrets can be stored in a single string; if the string is -// "/path/to/${SECRET:FOO}/${SECRET:BAZ}", then the interpolated string -// is "/path/to/BAR/QUX". -// -// If more than one interpolation function is applied to a string (e.g., non-secrets -// capture groups), then this function must be called first. -func Interpolate(ctx context.Context, s string) (string, error) { - if !strings.Contains(s, "${SECRET") { - return s, nil - } - - matches := interpRe.FindAllStringSubmatch(s, -1) - for _, m := range matches { - if len(m) == 0 { - continue - } - - secretName := strings.ReplaceAll(m[len(m)-1], "SECRET:", "") - secret, err := cache.Get(ctx, secretName) - if err != nil { - return "", err - } - - // Replaces each substring with a secret. If the secret is - // BAR and the string was "/path/to/secret/${SECRET:FOO}", - // then the interpolated string output is "/path/to/secret/BAR". - old := fmt.Sprintf("${%s}", m[len(m)-1]) - s = strings.Replace(s, old, secret.(string), 1) - } - - return s, nil -} - -func init() { - kv, err := kv.New(config.Config{ - Type: "memory", - Settings: map[string]interface{}{ - "capacity": 1000, - }, - }) - if err != nil { - panic(err) - } - - if err := kv.Setup(context.TODO()); err != nil { - panic(err) - } - - cache = kv -} diff --git a/v1/internal/secrets/secrets_test.go b/v1/internal/secrets/secrets_test.go deleted file mode 100644 index 7222c5da..00000000 --- a/v1/internal/secrets/secrets_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package secrets - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" -) - -func TestCollect(t *testing.T) { - t.Setenv("FOO", "bar") - - ctx := context.Background() - - cfg := config.Config{ - Type: "environment_variable", - Settings: map[string]interface{}{ - "id": "id", - "name": "FOO", - }, - } - - ret, err := New(ctx, cfg) - if err != nil { - // handle error - panic(err) - } - - if err := ret.Retrieve(ctx); err != nil { - // handle error - panic(err) - } - - interp, err := Interpolate(context.TODO(), "/path/to/secret/${SECRET:id}") - if err != nil { - // handle error - panic(err) - } - - if interp != "/path/to/secret/bar" { - t.Fatalf("unexpected interpolation: %s", interp) - } -} diff --git a/v1/message/message.go b/v1/message/message.go deleted file mode 100644 index 1b316691..00000000 --- a/v1/message/message.go +++ /dev/null @@ -1,338 +0,0 @@ -// Package message provides functions for managing data used by conditions and transforms. -package message - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - "unicode/utf8" - - "github.com/brexhq/substation/internal/base64" - "github.com/tidwall/gjson" - "github.com/tidwall/sjson" -) - -const ( - // metaKey is a prefix used to access the meta field in a Message. - metaKey = "meta " -) - -// errSetRawInvalidValue is returned when setRaw receives an invalid interface type. -var errSetRawInvalidValue = fmt.Errorf("invalid value type") - -// Message is the data structure that is handled by transforms and interpreted by -// conditions. -// -// Data in each message can be accessed and modified as JSON text or binary data: -// - JSON text is accessed using the GetValue, SetValue, and DeleteValue methods. -// - Binary data is accessed using the Data and SetData methods. -// -// Metadata is an additional data field that is meant to store information about the -// message, but can be used for any purpose. For JSON text, metadata is accessed using -// the GetValue, SetValue, and DeleteValue methods with a key prefixed with "meta" (e.g. -// "meta foo"). Binary metadata is accessed using the Metadata and SetMetadata methods. -// -// Messages can also be configured as "control messages." Control messages are used for flow -// control in Substation functions and applications, but can be used for any purpose depending -// on the needs of a transform or condition. These messages should not contain data or metadata. -type Message struct { - data []byte - meta []byte - - // ctrl is a flag that indicates if the message is a control message. - // - // Control messages trigger special behavior in transforms and conditions. - ctrl bool -} - -// String returns the message data as a string. -func (m *Message) String() string { - return string(m.data) -} - -// New returns a new Message. -func New(opts ...func(*Message)) *Message { - msg := &Message{} - for _, o := range opts { - o(msg) - } - - return msg -} - -// AsControl sets the message as a control message. -func (m *Message) AsControl() *Message { - m.data = nil - m.meta = nil - - m.ctrl = true - return m -} - -// IsControl returns true if the message is a control message. -func (m *Message) IsControl() bool { - return m.ctrl -} - -// Data returns the message data. -func (m *Message) Data() []byte { - if m.ctrl { - return nil - } - - return m.data -} - -// SetData sets the message data. -func (m *Message) SetData(data []byte) *Message { - if m.ctrl { - return m - } - - m.data = data - return m -} - -// Metadata returns the message metadata. -func (m *Message) Metadata() []byte { - if m.ctrl { - return nil - } - - return m.meta -} - -// SetMetadata sets the message metadata. -func (m *Message) SetMetadata(metadata []byte) *Message { - if m.ctrl { - return m - } - - m.meta = metadata - return m -} - -// GetValue returns a value from the message data or metadata. -// -// If the key is prefixed with "meta" (e.g. "meta foo"), then -// the value is retrieved from the metadata field, otherwise it -// is retrieved from the data field. -// -// This only works with JSON text. If the message data or metadata -// is not JSON text, then an empty value is returned. -func (m *Message) GetValue(key string) Value { - if strings.HasPrefix(key, metaKey) { - key = strings.TrimPrefix(key, metaKey) - key = strings.TrimSpace(key) - - v := gjson.GetBytes(m.meta, key) - return Value{gjson: v} - } - - key = strings.TrimSpace(key) - v := gjson.GetBytes(m.data, key) - return Value{gjson: v} -} - -// SetValue sets a value in the message data or metadata. -// -// If the key is prefixed with "meta" (e.g. "meta foo"), then -// the value is placed into the metadata field, otherwise it -// is placed into the data field. -// -// This only works with JSON text. If the message data or metadata -// is not JSON text, then this method does nothing. -func (m *Message) SetValue(key string, value interface{}) error { - if strings.HasPrefix(key, metaKey) { - key = strings.TrimPrefix(key, metaKey) - key = strings.TrimSpace(key) - - meta, err := setValue(m.meta, key, value) - if err != nil { - return err - } - m.meta = meta - - return nil - } - - key = strings.TrimSpace(key) - data, err := setValue(m.data, key, value) - if err != nil { - return err - } - m.data = data - - return nil -} - -// DeleteValue deletes a value in the message data or metadata. -// -// If the key is prefixed with "meta" (e.g. "meta foo"), then -// the value is removed from the metadata field, otherwise it -// is removed from the data field. -// -// This only works with JSON text. If the message data or metadata -// is not JSON text, then this method does nothing. -func (m *Message) DeleteValue(key string) error { - if strings.HasPrefix(key, metaKey) { - key = strings.TrimPrefix(key, metaKey) - key = strings.TrimSpace(key) - - meta, err := deleteValue(m.meta, key) - if err != nil { - return err - } - m.meta = meta - - return nil - } - - data, err := deleteValue(m.data, key) - if err != nil { - return err - } - m.data = data - - return nil -} - -// Value is a wrapper around gjson.Result that provides a consistent interface -// for converting values from JSON text. -type Value struct { - gjson gjson.Result -} - -// Value returns the value as an interface{}. -func (v Value) Value() any { - return v.gjson.Value() -} - -// String returns the value as a string. -func (v Value) String() string { - return v.gjson.String() -} - -// Bytes returns the value as a byte slice. -func (v Value) Bytes() []byte { - return []byte(v.gjson.String()) -} - -// Int returns the value as an int64. -func (v Value) Int() int64 { - return v.gjson.Int() -} - -// Uint returns the value as a uint64. -func (v Value) Uint() uint64 { - return v.gjson.Uint() -} - -// Float returns the value as a float64. -func (v Value) Float() float64 { - return v.gjson.Float() -} - -// Bool returns the value as a bool. -func (v Value) Bool() bool { - return v.gjson.Bool() -} - -// Array returns the value as a slice of Value. -func (v Value) Array() []Value { - var values []Value - for _, r := range v.gjson.Array() { - values = append(values, Value{gjson: r}) - } - - return values -} - -// IsArray returns true if the value is an array. -func (v Value) IsArray() bool { - return v.gjson.IsArray() -} - -// Map returns the value as a map of string to Value. -func (v Value) Map() map[string]Value { - values := make(map[string]Value) - for k, r := range v.gjson.Map() { - values[k] = Value{gjson: r} - } - - return values -} - -// Exists returns true if the value exists. -func (v Value) Exists() bool { - return v.gjson.Exists() -} - -func deleteValue(json []byte, key string) ([]byte, error) { - b, err := sjson.DeleteBytes(json, key) - if err != nil { - return nil, err - } - - return b, nil -} - -// sjson.SetBytesOptions is not used because transform benchmarks perform better with -// sjson.SetBytes (allocating a new byte slice). This may change if transforms are -// refactored. -func setValue(json []byte, key string, value interface{}) ([]byte, error) { - if validJSON(value) { - return setRaw(json, key, value) - } - - switch v := value.(type) { - case []byte: - if utf8.Valid(v) { - return sjson.SetBytes(json, key, v) - } else { - return sjson.SetBytes(json, key, base64.Encode(v)) - } - case Value: - return sjson.SetBytes(json, key, v.Value()) - default: - return sjson.SetBytes(json, key, v) - } -} - -// sjson.SetRawBytesOptions is not used because transform benchmarks perform better with -// sjson.SetRawBytes (allocating a new byte slice). This may change if transforms are -// refactored. -func setRaw(json []byte, key string, value interface{}) ([]byte, error) { - switch v := value.(type) { - case []byte: - return sjson.SetRawBytes(json, key, v) - case string: - return sjson.SetRawBytes(json, key, []byte(v)) - case Value: - return sjson.SetRawBytes(json, key, v.Bytes()) - default: - return nil, errSetRawInvalidValue - } -} - -func validJSON(data interface{}) bool { - switch v := data.(type) { - case []byte: - if !bytes.HasPrefix(v, []byte(`{`)) && !bytes.HasPrefix(v, []byte(`[`)) { - return false - } - - return json.Valid(v) - case string: - if !strings.HasPrefix(v, `{`) && !strings.HasPrefix(v, `[`) { - return false - } - - return json.Valid([]byte(v)) - case Value: - return validJSON(v.String()) - default: - return false - } -} diff --git a/v1/substation.go b/v1/substation.go deleted file mode 100644 index c5455364..00000000 --- a/v1/substation.go +++ /dev/null @@ -1,81 +0,0 @@ -package substation - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" -) - -var errNoTransforms = fmt.Errorf("no transforms configured") - -// Config is the core configuration for the application. Custom applications -// should embed this and add additional configuration options. -type Config struct { - // Transforms contains a list of data transformatons that are executed. - Transforms []config.Config `json:"transforms"` -} - -// Substation provides access to data transformation functions. -type Substation struct { - cfg Config - - factory transform.Factory - tforms []transform.Transformer -} - -// New returns a new Substation instance. -func New(ctx context.Context, cfg Config, opts ...func(*Substation)) (*Substation, error) { - if cfg.Transforms == nil { - return nil, errNoTransforms - } - - sub := &Substation{ - cfg: cfg, - factory: transform.New, - } - - for _, o := range opts { - o(sub) - } - - // Create transforms from the configuration. - for _, c := range cfg.Transforms { - t, err := sub.factory(ctx, c) - if err != nil { - return nil, err - } - - sub.tforms = append(sub.tforms, t) - } - - return sub, nil -} - -// WithTransformFactory implements a custom transform factory. -func WithTransformFactory(fac transform.Factory) func(*Substation) { - return func(s *Substation) { - s.factory = fac - } -} - -// Transform runs the configured data transformation functions on the -// provided messages. -// -// This is safe to use concurrently. -func (s *Substation) Transform(ctx context.Context, msg ...*message.Message) ([]*message.Message, error) { - return transform.Apply(ctx, s.tforms, msg...) -} - -// String returns a JSON representation of the configuration. -func (s *Substation) String() string { - b, err := json.Marshal(s.cfg) - if err != nil { - return fmt.Sprintf("substation: %v", err) - } - - return string(b) -} diff --git a/v1/substation.libsonnet b/v1/substation.libsonnet deleted file mode 100644 index cf13a410..00000000 --- a/v1/substation.libsonnet +++ /dev/null @@ -1,1483 +0,0 @@ -{ - // Mirrors interfaces from the condition package. - cnd: $.condition, - condition: { - // Operators. - all(i): { operator: 'all', inspectors: $.helpers.make_array(i) }, - any(i): { operator: 'any', inspectors: $.helpers.make_array(i) }, - none(i): { operator: 'none', inspectors: $.helpers.make_array(i) }, - // Inspectors. - fmt: $.condition.format, - format: { - json(settings={}): { - type: 'format_json', - }, - mime(settings={}): { - local default = { - object: $.config.object, - type: null, - }, - - type: 'format_mime', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - num: $.condition.number, - number: { - default: { - object: $.config.object, - value: null, - }, - eq(settings={}): $.condition.number.equal_to(settings=settings), - equal_to(settings={}): { - local default = $.condition.number.default, - - type: 'number_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - lt(settings={}): $.condition.number.less_than(settings=settings), - less_than(settings={}): { - local default = $.condition.number.default, - - type: 'number_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - gt(settings={}): $.condition.number.greater_than(settings=settings), - greater_than(settings={}): { - local default = $.condition.number.default, - - type: 'number_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - bitwise: { - and(settings={}): { - local default = { - object: $.config.object, - value: null, - }, - - type: 'number_bitwise_and', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - not(settings={}): { - local default = { - object: $.config.object, - }, - - type: 'number_bitwise_not', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - or(settings={}): { - local default = { - object: $.config.object, - value: null, - }, - - type: 'number_bitwise_or', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - xor(settings={}): { - local default = { - object: $.config.object, - value: null, - }, - - type: 'number_bitwise_xor', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - len: $.condition.number.length, - length: { - default: { - object: $.config.object, - value: null, - measurement: 'byte', - }, - eq(settings={}): $.condition.number.length.equal_to(settings=settings), - equal_to(settings={}): { - local default = $.condition.number.length.default, - - type: 'number_length_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - gt(settings={}): $.condition.number.length.greater_than(settings=settings), - greater_than(settings={}): { - local default = $.condition.number.length.default, - - type: 'number_length_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - lt(settings={}): $.condition.number.length.less_than(settings=settings), - less_than(settings={}): { - local default = $.condition.number.length.default, - - type: 'number_length_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - meta: { - condition(settings={}): { - local default = { condition: null }, - - type: 'meta_condition', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - err(settings={}): { - local default = { - inspector: null, - error_messages: ['.*'], - }, - - type: 'meta_err', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - for_each(settings={}): { - local default = { - object: $.config.object, - type: null, - inspector: null, - }, - - type: 'meta_for_each', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - negate(settings={}): { - local default = { inspector: null }, - - type: 'meta_negate', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - net: $.condition.network, - network: { - ip: { - default: { - object: $.config.object, - }, - global_unicast(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_global_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - link_local_multicast(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_link_local_multicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - link_local_unicast(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_link_local_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - loopback(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_loopback', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - multicast(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_multicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - private(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_private', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - unicast(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - unspecified(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_unspecified', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - valid(settings={}): { - local default = $.condition.network.ip.default, - - type: 'network_ip_valid', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - str: $.condition.string, - string: { - default: { - object: $.config.object, - value: null, - }, - has(settings={}): $.condition.string.contains(settings=settings), - contains(settings={}): { - local default = $.condition.string.default, - - type: 'string_contains', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - eq(settings={}): $.condition.string.equal_to(settings=settings), - equal_to(settings={}): { - local default = $.condition.string.default, - - type: 'string_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - gt(settings={}): $.condition.string.greater_than(settings=settings), - greater_than(settings={}): { - local default = $.condition.string.default, - - type: 'string_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - lt(settings={}): $.condition.string.less_than(settings=settings), - less_than(settings={}): { - local default = $.condition.string.default, - - type: 'string_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - prefix(settings={}): $.condition.string.starts_with(settings=settings), - starts_with(settings={}): { - local default = $.condition.string.default, - - type: 'string_starts_with', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - suffix(settings={}): $.condition.string.ends_with(settings=settings), - ends_with(settings={}): { - local default = $.condition.string.default, - - type: 'string_ends_with', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - match(settings={}): { - local default = { - object: $.config.object, - pattern: null, - }, - - type: 'string_match', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - util: $.condition.utility, - utility: { - random(settings={}): { - type: 'utility_random', - }, - }, - }, - // Mirrors interfaces from the transform package. - tf: $.transform, - transform: { - agg: $.transform.aggregate, - aggregate: { - from: { - arr(settings={}): $.transform.aggregate.from.array(settings=settings), - array(settings={}): { - local type = 'aggregate_from_array', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - str(settings={}): $.transform.aggregate.from.string(settings=settings), - string(settings={}): { - local type = 'aggregate_from_string', - local default = { - id: $.helpers.id(type, settings), - separator: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - to: { - arr(settings={}): $.transform.aggregate.to.array(settings=settings), - array(settings={}): { - local type = 'aggregate_to_array', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - batch: $.config.batch, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - str(settings={}): $.transform.aggregate.to.string(settings=settings), - string(settings={}): { - local type = 'aggregate_to_string', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - separator: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - arr: $.transform.array, - array: { - join(settings={}): { - local type = 'array_join', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - separator: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - zip(settings={}): { - local type = 'array_zip', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - enrich: { - aws: { - dynamodb(settings={}): { - local type = 'enrich_aws_dynamodb', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - aws: $.config.aws, - retry: $.config.retry, - table_name: null, - partition_key: null, - sort_key: null, - key_condition_expression: null, - limit: 1, - scan_index_forward: false, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - lambda(settings={}): { - local type = 'enrich_aws_lambda', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - aws: $.config.aws, - retry: $.config.retry, - function_name: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - dns: { - default: { - object: $.config.object, - request: $.config.request, - }, - domain_lookup(settings={}): { - local type = 'enrich_dns_domain_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - ip_lookup(settings={}): { - local type = 'enrich_dns_ip_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - txt_lookup(settings={}): { - local type = 'enrich_dns_txt_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - http: { - default: { - object: $.config.object, - request: $.config.request, - url: null, - headers: null, - }, - get(settings={}): { - local type = 'enrich_http_get', - local default = $.transform.enrich.http.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - post(settings={}): { - local type = 'enrich_http_post', - local default = $.transform.enrich.http.default { body_key: null, id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - kv_store: { - default: { - object: $.config.object, - prefix: null, - kv_store: null, - close_kv_store: false, - }, - // Deprecated: Use `item.get` or `iget` instead. - get(settings={}): { - local type = 'enrich_kv_store_get', - local default = $.transform.enrich.kv_store.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - // Deprecated: Use `item.set` or `iset` instead. - set(settings={}): { - local type = 'enrich_kv_store_set', - local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - iget: $.transform.enrich.kv_store.item.get, - iset: $.transform.enrich.kv_store.item.set, - item: { - get: $.transform.enrich.kv_store.get, - set: $.transform.enrich.kv_store.set, - }, - // In future releases this will also be `set.add`. - sadd(settings={}): { - local type = 'enrich_kv_store_set_add', - local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - fmt: $.transform.format, - format: { - default: { - object: $.config.object, - }, - from: { - b64(settings={}): $.transform.format.from.base64(settings=settings), - base64(settings={}): { - local type = 'format_from_base64', - local default = $.transform.format.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - gz(settings={}): $.transform.format.from.gzip(settings=settings), - gzip(settings={}): { - local type = 'format_from_gzip', - local default = { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - pretty_print(settings={}): { - local type = 'format_from_pretty_print', - local default = { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - zip(settings={}): { - local type = 'format_from_zip', - local default = { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - to: { - b64(settings={}): $.transform.format.to.base64(settings=settings), - base64(settings={}): { - local type = 'format_to_base64', - local default = $.transform.format.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - gz(settings={}): $.transform.format.to.gzip(settings=settings), - gzip(settings={}): { - local type = 'format_to_gzip', - local default = { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - hash: { - default: { - object: $.config.object, - }, - md5(settings={}): { - local type = 'hash_md5', - local default = $.transform.hash.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - sha256(settings={}): { - local type = 'hash_sha256', - local default = $.transform.hash.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - num: $.transform.number, - number: { - max(settings={}): $.transform.number.maximum(settings=settings), - maximum(settings={}): { - local type = 'number_maximum', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - value: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - min(settings={}): $.transform.number.minimum(settings=settings), - minimum(settings={}): { - local type = 'number_minimum', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - value: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - math: { - default: { - object: $.config.object, - }, - add(settings={}): $.transform.number.math.addition(settings=settings), - addition(settings={}): { - local type = 'number_math_addition', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - sub(settings={}): $.transform.number.math.subtraction(settings=settings), - subtraction(settings={}): { - local type = 'number_math_subtraction', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - mul(settings={}): $.transform.number.math.multiplication(settings=settings), - multiplication(settings={}): { - local type = 'number_math_multiplication', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - div(settings={}): $.transform.number.math.division(settings=settings), - division(settings={}): { - local type = 'number_math_division', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - meta: { - err(settings={}): { - local type = 'meta_err', - local default = { - id: $.helpers.id(type, settings), - transform: null, - transforms: null, - error_messages: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - for_each(settings={}): { - local type = 'meta_for_each', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - transform: null, - transforms: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - kv_store: { - lock(settings={}): { - local type = 'meta_kv_store_lock', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object { ttl_key: null }, - transform: null, - transforms: null, - kv_store: null, - prefix: null, - ttl_offset: '0s', - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - metric: { - duration(settings={}): { - local type = 'meta_metric_duration', - local default = { - id: $.helpers.id(type, settings), - metric: $.config.metric, - transform: null, - transforms: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - pipe(settings={}): $.transform.meta.pipeline(settings=settings), - pipeline(settings={}): { - local type = 'meta_pipeline', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - transforms: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - retry(settings={}): { - local type = 'meta_retry', - local default = { - id: $.helpers.id(type, settings), - retry: $.config.retry { error_messages: ['.*'] }, - condition: null, - transforms: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - switch(settings={}): { - local type = 'meta_switch', - local default = { - id: $.helpers.id(type, settings), - cases: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - net: $.transform.network, - network: { - domain: { - default: { - object: $.config.object, - }, - registered_domain(settings={}): { - local type = 'network_domain_registered_domain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - subdomain(settings={}): { - local type = 'network_domain_subdomain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - tld(settings={}): $.transform.network.domain.top_level_domain(settings=settings), - top_level_domain(settings={}): { - local type = 'network_domain_top_level_domain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - obj: $.transform.object, - object: { - default: { - object: $.config.object, - }, - cp(settings={}): $.transform.object.copy(settings=settings), - copy(settings={}): { - local type = 'object_copy', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - del(settings={}): $.transform.object.delete(settings=settings), - delete(settings={}): { - local type = 'object_delete', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - insert(settings={}): { - local type = 'object_insert', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - jq(settings={}): { - local type = 'object_jq', - local default = { - id: $.helpers.id(type, settings), - filter: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - to: { - bool(settings={}): $.transform.object.to.boolean(settings=settings), - boolean(settings={}): { - local type = 'object_to_boolean', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - float(settings={}): { - local type = 'object_to_float', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - int(settings={}): $.transform.object.to.integer(settings=settings), - integer(settings={}): { - local type = 'object_to_integer', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - str(settings={}): $.transform.object.to.string(settings=settings), - string(settings={}): { - local type = 'object_to_string', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - uint(settings={}): $.transform.object.to.unsigned_integer(settings=settings), - unsigned_integer(settings={}): { - local type = 'object_to_unsigned_integer', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - send: { - aws: { - dynamodb(settings={}): { - local type = 'send_aws_dynamodb', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - table_name: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - eventbridge(settings={}): { - local type = 'send_aws_eventbridge', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - arn: null, - description: null, - }, - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - firehose(settings={}): $.transform.send.aws.kinesis_data_firehose(settings=settings), - kinesis_data_firehose(settings={}): { - local type = 'send_aws_kinesis_data_firehose', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - stream_name: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - kinesis_data_stream(settings={}): { - local type = 'send_aws_kinesis_data_stream', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - stream_name: null, - use_batch_key_as_partition_key: false, - enable_record_aggregation: false, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - lambda(settings={}): { - local type = 'send_aws_lambda', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - function_name: null, - }, - - type: type, - settings: std.mergePatch(default, $.helpers.abbv(settings)), - }, - s3(settings={}): { - local type = 'send_aws_s3', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - bucket_name: null, - storage_class: 'STANDARD', - file_path: $.file_path, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - sns(settings={}): { - local type = 'send_aws_sns', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - arn: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - sqs(settings={}): { - local type = 'send_aws_sqs', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - arn: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - }, - file(settings={}): { - local type = 'send_file', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - file_path: $.file_path, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - http: { - post(settings={}): { - local type = 'send_http_post', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - url: null, - headers: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - headers: if std.objectHas(settings, 'headers') then settings.headers else if std.objectHas(settings, 'hdr') then settings.hdr else null, - hdr: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - }, - stdout(settings={}): { - local type = 'send_stdout', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - auxiliary_transforms: null, - }, - - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - }, - str: $.transform.string, - string: { - append(settings={}): { - local type = 'string_append', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - suffix: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - capture(settings={}): { - local type = 'string_capture', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - pattern: null, - count: 0, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - repl: $.transform.string.replace, - replace(settings={}): { - local type = 'string_replace', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - pattern: null, - replacement: null, - }, - - local s = std.mergePatch(settings, { - pattern: settings.pattern, - replacement: if std.objectHas(settings, 'replacement') then settings.replacement else if std.objectHas(settings, 'repl') then settings.repl else null, - repl: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - split(settings={}): { - local type = 'string_split', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - separator: null, - }, - - local s = std.mergePatch(settings, { - separator: if std.objectHas(settings, 'separator') then settings.separator else if std.objectHas(settings, 'sep') then settings.sep else null, - sep: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - to: { - default: { - object: $.config.object, - }, - lower(settings={}): { - local type = 'string_to_lower', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - upper(settings={}): { - local type = 'string_to_upper', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - snake(settings={}): { - local type = 'string_to_snake', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - uuid(settings={}): { - local type = 'string_uuid', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - time: { - from: { - str(settings={}): $.transform.time.from.string(settings=settings), - string(settings={}): { - local type = 'time_from_string', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - format: null, - location: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - unix(settings={}): { - local type = 'time_from_unix', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - unix_milli(settings={}): { - local type = 'time_from_unix_milli', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - now(settings={}): { - local type = 'time_now', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - to: { - str(settings={}): $.transform.time.to.string(settings=settings), - string(settings={}): { - local type = 'time_to_string', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - format: null, - location: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - unix(settings={}): { - local type = 'time_to_unix', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - unix_milli(settings={}): { - local type = 'time_to_unix_milli', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - util: $.transform.utility, - utility: { - control(settings={}): { - local type = 'utility_control', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - delay(settings={}): { - local type = 'utility_delay', - local default = { - id: $.helpers.id(type, settings), - duration: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - drop(settings={}): { - local type = 'utility_drop', - local default = { - id: $.helpers.id(type, settings), - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - err(settings={}): { - local type = 'utility_err', - local default = { - id: $.helpers.id(type, settings), - message: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - metric: { - bytes(settings={}): { - local type = 'utility_metric_bytes', - local default = { - id: $.helpers.id(type, settings), - metric: $.config.metric, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - count(settings={}): { - local type = 'utility_metric_count', - local default = { - id: $.helpers.id(type, settings), - metric: $.config.metric, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - freshness(settings={}): { - local type = 'utility_metric_freshness', - local default = { - id: $.helpers.id(type, settings), - threshold: null, - metric: $.config.metric, - object: $.config.object, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - secret(settings={}): { - local type = 'utility_secret', - local default = { - id: $.helpers.id(type, settings), - secret: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - // Mirrors interfaces from the internal/kv_store package. - kv_store: { - aws_dynamodb(settings={}): { - local default = { - aws: $.config.aws, - retry: $.config.retry, - table_name: null, - attributes: { partition_key: null, sort_key: null, value: null, ttl: null }, - consistent_read: false, - }, - - type: 'aws_dynamodb', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - csv_file(settings={}): { - local default = { file: null, column: null, delimiter: ',', header: null }, - - type: 'csv_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - json_file(settings=$.defaults.kv_store.json_file.settings): { - local default = { file: null, is_lines: false }, - - type: 'json_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - memory(settings={}): { - local default = { capacity: 1024 }, - - type: 'memory', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - mmdb(settings={}): { - local default = { file: null }, - - type: 'mmdb', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - text_file(settings={}): { - local default = { file: null }, - - type: 'text_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - // Mirrors structs from the internal/config package. - config: { - aws: { region: null, role_arn: null }, - batch: { count: 1000, size: 1000 * 1000, duration: '1m' }, - metric: { name: null, attributes: null, destination: null }, - object: { source_key: null, target_key: null, batch_key: null }, - request: { timeout: '1s' }, - retry: { count: 3, delay: '1s', error_messages: null }, - }, - // Mirrors config from the internal/file package. - file_path: { prefix: null, time_format: '2006/01/02', uuid: true, suffix: null }, - // Mirrors interfaces from the internal/secrets package. - secrets: { - default: { id: null, ttl: null }, - aws: { - secrets_manager(settings={}): { - local default = { - id: null, - name: null, - ttl_offset: null, - aws: $.config.aws, - retry: $.config.retry, - }, - - type: 'aws_secrets_manager', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - environment_variable(settings={}): { - local default = { id: null, name: null, ttl_offset: null }, - - type: 'environment_variable', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - // Commonly used condition and transform patterns. - pattern: { - cnd: $.pattern.condition, - condition: { - obj(key): { - object: { source_key: key }, - }, - // Negates any inspector. - negate(inspector): $.condition.meta.negate(settings={ inspector: inspector }), - net: $.pattern.condition.network, - network: { - ip: { - // Checks if an IP address is internal. - // - // Use with the ANY operator to match internal IP addresses. - // Use with the NONE operator to match external IP addresses. - internal(key=null): [ - $.condition.network.ip.link_local_multicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.link_local_unicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.loopback(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.multicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.private(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.unspecified(settings=$.pattern.condition.obj(key)), - ], - }, - }, - num: $.pattern.condition.number, - number: { - len: $.pattern.condition.number.length, - length: { - // Checks if data is equal to zero. - // - // Use with the ANY / ALL operator to match empty data. - // Use with the NONE operator to match non-empty data. - eq_zero(key=null): - $.condition.number.length.equal_to(settings=$.pattern.condition.obj(key) { value: 0 }), - // Checks if data is greater than zero. - // - // Use with the ANY / ALL operator to match non-empty data. - // Use with the NONE operator to match empty data. - gt_zero(key=null): - $.condition.number.length.greater_than(settings=$.pattern.condition.obj(key) { value: 0 }), - }, - }, - }, - tf: $.pattern.transform, - transform: { - // Conditional applies a transform when a single condition is met. If - // the condition does not contain a valid operator, then it is assumed - // to be an ANY operator. - conditional(condition, transform): { - local type = 'meta_switch', - local c = if std.objectHas(condition, 'type') then { operator: 'any', inspectors: [condition] } else condition, - - type: type, - settings: { id: $.helpers.id(type, transform), cases: [{ condition: c, transform: transform }] }, - }, - fmt: $.pattern.transform.format, - format: { - // Creates JSON Lines text from data. Only valid JSON text is included. - jsonl: [ - $.pattern.tf.conditional( - condition=$.cnd.meta.negate({ inspector: $.cnd.fmt.json() }), - transform=$.tf.util.drop(), - ), - $.tf.agg.to.string({ separator: '\n' }), - $.tf.str.append({ suffix: '\n' }), - ], - }, - num: $.pattern.transform.number, - number: { - clamp(source_key, target_key, min, max): [ - $.tf.number.maximum({ object: { source_key: source_key, target_key: target_key }, value: min }), - $.tf.number.minimum({ object: { source_key: target_key, target_key: target_key }, value: max }), - ], - }, - }, - }, - // Utility functions that can be used in conditions and transforms. - helpers: { - // If the input is not an array, then this returns it as an array. - make_array(i): if !std.isArray(i) then [i] else i, - obj: $.helpers.object, - object: { - // If key is `foo` and arr is `bar`, then the result is `foo.bar`. - // If key is `foo` and arr is `[bar, baz]`, then the result is `foo.bar.baz`. - append(key, arr): std.join('.', $.helpers.make_array(key) + $.helpers.make_array(arr)), - // If key is `foo`, then the result is `foo.-1`. - append_array(key): key + '.-1', - // If key is `foo` and e is `0`, then the result is `foo.0`. - get_element(key, e=0): std.join('.', [key, if std.isNumber(e) then std.toString(e) else e]), - }, - abbv(settings): std.mergePatch(settings, { - object: if std.objectHas(settings, 'object') then $.helpers.abbv_obj(settings.object) else if std.objectHas(settings, 'obj') then $.helpers.abbv_obj(settings.obj) else null, - obj: null, - }), - abbv_obj(s): { - source_key: if std.objectHas(s, 'src') then s.src else if std.objectHas(s, 'source_key') then s.source_key else null, - src: null, - target_key: if std.objectHas(s, 'trg') then s.trg else if std.objectHas(s, 'target_key') then s.target_key else null, - trg: null, - batch_key: if std.objectHas(s, 'btch') then s.batch else if std.objectHas(s, 'batch_key') then s.batch_key else null, - }, - id(type, settings): std.join('-', [std.md5(type)[:8], std.md5(std.toString(settings))[:8]]), - }, -} diff --git a/v1/substation_test.go b/v1/substation_test.go deleted file mode 100644 index 3573d743..00000000 --- a/v1/substation_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package substation_test - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" -) - -func ExampleSubstation() { - // Substation applications rely on a context for cancellation and timeouts. - ctx := context.Background() - - // Define a configuration. For native Substation applications, this is managed by Jsonnet. - // - // This example copies an object's value and prints the data to stdout. - conf := []byte(` - { - "transforms":[ - {"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}}, - {"type":"send_stdout"} - ] - } - `) - - cfg := substation.Config{} - if err := json.Unmarshal(conf, &cfg); err != nil { - // Handle error. - panic(err) - } - - // Create a new Substation instance. - sub, err := substation.New(ctx, cfg) - if err != nil { - // Handle error. - panic(err) - } - - // Print the Substation configuration. - fmt.Println(sub) - - // Substation instances process data defined as a Message. Messages can be processed - // individually or in groups. This example processes multiple messages as a group. - msg := []*message.Message{ - // The first message is a data message. Only data messages are transformed. - message.New().SetData([]byte(`{"a":"b"}`)), - // The second message is a ctrl message. ctrl messages flush the pipeline. - message.New().AsControl(), - } - - // Transform the group of messages. In this example, results are not used. - if _, err := sub.Transform(ctx, msg...); err != nil { - // Handle error. - panic(err) - } - - // Output: - // {"transforms":[{"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}},{"type":"send_stdout","settings":null}]} - // {"a":"b","c":"b"} -} - -// Custom applications should embed the Substation configuration and -// add additional configuration options. -type customConfig struct { - substation.Config - - Auth struct { - Username string `json:"username"` - // Please don't store passwords in configuration files, this is only an example! - Password string `json:"password"` - } `json:"auth"` -} - -// String returns an example string representation of the custom configuration. -func (c customConfig) String() string { - return fmt.Sprintf("%s:%s", c.Auth.Username, c.Auth.Password) -} - -func Example_substationCustomConfig() { - // Substation applications rely on a context for cancellation and timeouts. - ctx := context.Background() - - // Define and load the custom configuration. This config includes a username - // and password for authentication. - conf := []byte(` - { - "transforms":[ - {"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}}, - {"type":"send_stdout"} - ], - "auth":{ - "username":"foo", - "password":"bar" - } - } - `) - - cfg := customConfig{} - if err := json.Unmarshal(conf, &cfg); err != nil { - // Handle error. - panic(err) - } - - // Create a new Substation instance from the embedded configuration. - sub, err := substation.New(ctx, cfg.Config) - if err != nil { - // Handle error. - panic(err) - } - - // Print the Substation configuration. - fmt.Println(sub) - - // Print the custom configuration. - fmt.Println(cfg) - - // Output: - // {"transforms":[{"type":"object_copy","settings":{"object":{"source_key":"a","target_key":"c"}}},{"type":"send_stdout","settings":null}]} - // foo:bar -} - -func Example_substationCustomTransforms() { - // Substation applications rely on a context for cancellation and timeouts. - ctx := context.Background() - - // Define and load the configuration. This config includes a transform that - // is not part of the standard Substation package. - conf := []byte(` - { - "transforms":[ - {"type":"utility_duplicate"}, - {"type":"send_stdout"} - ] - } - `) - - cfg := substation.Config{} - if err := json.Unmarshal(conf, &cfg); err != nil { - // Handle error. - panic(err) - } - - // Create a new Substation instance with a custom transform factory for loading - // the custom transform. - sub, err := substation.New(ctx, cfg, substation.WithTransformFactory(customFactory)) - if err != nil { - // Handle error. - panic(err) - } - - msg := []*message.Message{ - message.New().SetData([]byte(`{"a":"b"}`)), - message.New().AsControl(), - } - - // Transform the group of messages. In this example, results are not used. - if _, err := sub.Transform(ctx, msg...); err != nil { - // Handle error. - panic(err) - } - - // Output: - // {"a":"b"} - // {"a":"b"} -} - -// customFactory is used in the custom transform example to load the custom transform. -func customFactory(ctx context.Context, cfg config.Config) (transform.Transformer, error) { - switch cfg.Type { - // Usually a custom transform requires configuration, but this - // is a toy example. Customizable transforms should have a new - // function that returns a new instance of the configured transform. - case "utility_duplicate": - return &utilityDuplicate{Count: 1}, nil - } - - return transform.New(ctx, cfg) -} - -// Duplicates a message. -type utilityDuplicate struct { - // Count is the number of times to duplicate the message. - Count int `json:"count"` -} - -func (t *utilityDuplicate) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - // Always return control messages. - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - output := []*message.Message{msg} - for i := 0; i < t.Count; i++ { - output = append(output, msg) - } - - return output, nil -} diff --git a/v1/substation_test.jsonnet b/v1/substation_test.jsonnet deleted file mode 100644 index 8e9be890..00000000 --- a/v1/substation_test.jsonnet +++ /dev/null @@ -1,74 +0,0 @@ -local sub = import 'substation.libsonnet'; - -local src = 'a'; -local trg = 'b'; - -local transform = sub.transform.object.copy(settings={ obj: { src: src, trg: trg } }); -local inspector = sub.condition.format.json(); - -{ - condition: { - number: { - equal_to: sub.condition.number.equal_to({ obj: { src: src }, value: 1 }), - less_than: sub.condition.number.less_than({ obj: { src: src }, value: 1 }), - greater_than: sub.condition.number.greater_than({ obj: { src: src }, value: 1 }), - }, - }, - transform: { - send: { - aws: { - s3: sub.transform.send.aws.s3({ bucket: 'my-bucket' }), - }, - http: { - post: sub.transform.send.http.post({ - url: 'http://localhost:8080', - hdr: [{ key: 'Content-Type', value: 'application/json' }], - }), - }, - }, - string: { - repl: sub.transform.string.repl({ - obj: { src: src, trg: trg }, - pattern: 'a', - repl: 'b', - }), - replace: sub.transform.string.replace({ - object: { source_key: src, target_key: trg }, - pattern: 'a', - replacement: 'b', - }), - split: sub.transform.string.split({ - object: { source_key: src, target_key: trg }, - sep: '.', - }), - }, - }, - helpers: { - make_array: sub.helpers.make_array(src), - key: { - append: sub.helpers.object.append(src, trg), - append_array: sub.helpers.object.append_array(src), - get_element: sub.helpers.object.get_element(src, 1), - }, - }, - pattern: { - condition: { - obj: sub.pattern.condition.obj(src), - negate: sub.pattern.condition.negate(inspector), - network: { - ip: { - internal: sub.pattern.condition.network.ip.internal(src), - }, - }, - logic: { - len: { - eq_zero: sub.pattern.condition.number.length.eq_zero(src), - gt_zero: sub.pattern.condition.number.length.gt_zero(src), - }, - }, - }, - transform: { - conditional: sub.pattern.transform.conditional(inspector, transform), - }, - }, -} diff --git a/v1/transform/aggregate.go b/v1/transform/aggregate.go deleted file mode 100644 index f9883a28..00000000 --- a/v1/transform/aggregate.go +++ /dev/null @@ -1,61 +0,0 @@ -package transform - -import ( - "bytes" - "fmt" - "slices" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type aggregateArrayConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *aggregateArrayConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func aggToArray(data [][]byte) []byte { - if len(data) == 0 { - return nil - } - - return slices.Concat([]byte("["), bytes.Join(data, []byte(",")), []byte("]")) -} - -type aggregateStrConfig struct { - // Separator is the string that is used to join and split data. - Separator string `json:"separator"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *aggregateStrConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *aggregateStrConfig) Validate() error { - if c.Separator == "" { - return fmt.Errorf("separator: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func aggToStr(data [][]byte, separator []byte) []byte { - if len(data) == 0 { - return nil - } - - return bytes.Join(data, separator) -} - -func aggFromStr(data []byte, separator []byte) [][]byte { - return bytes.Split(data, separator) -} diff --git a/v1/transform/aggregate_from_array.go b/v1/transform/aggregate_from_array.go deleted file mode 100644 index 4097591d..00000000 --- a/v1/transform/aggregate_from_array.go +++ /dev/null @@ -1,99 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/tidwall/gjson" -) - -func newAggregateFromArray(_ context.Context, cfg config.Config) (*aggregateFromArray, error) { - conf := aggregateArrayConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform aggregate_from_array: %v", err) - } - - if conf.ID == "" { - conf.ID = "aggregate_from_array" - } - - tf := aggregateFromArray{ - conf: conf, - hasObjSrc: conf.Object.SourceKey != "", - hasObjTrg: conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type aggregateFromArray struct { - conf aggregateArrayConfig - hasObjSrc bool - hasObjTrg bool -} - -func (tf *aggregateFromArray) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - meta := msg.Metadata() - var output []*message.Message - - var value message.Value - if tf.hasObjSrc { - value = msg.GetValue(tf.conf.Object.SourceKey) - if err := msg.DeleteValue(tf.conf.Object.SourceKey); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - for _, res := range value.Array() { - outMsg := message.New().SetMetadata(meta) - - if tf.hasObjSrc { - for key, val := range msg.GetValue("@this").Map() { - if err := outMsg.SetValue(key, val.Value()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - } - - if tf.hasObjTrg { - if err := outMsg.SetValue(tf.conf.Object.TargetKey, res); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - output = append(output, outMsg) - continue - } - - if tf.hasObjSrc { - tmp := fmt.Sprintf(`[%s,%s]`, outMsg.Data(), res.String()) - join := gjson.GetBytes([]byte(tmp), "@join") - - outMsg.SetData([]byte(join.String())) - output = append(output, outMsg) - continue - } - - outMsg.SetData(res.Bytes()) - output = append(output, outMsg) - } - - return output, nil -} - -func (tf *aggregateFromArray) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/aggregate_from_array_test.go b/v1/transform/aggregate_from_array_test.go deleted file mode 100644 index b7b7c931..00000000 --- a/v1/transform/aggregate_from_array_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package transform - -import ( - "context" - "testing" - - "golang.org/x/exp/slices" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &aggregateFromArray{} - -var aggregateFromArrayTests = []struct { - name string - cfg config.Config - data []string - expected []string -}{ - // data tests - { - "data", - config.Config{}, - []string{ - `[{"a":"b"},{"c":"d"},{"e":"f"}]`, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - }, - { - "data with set_key", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "x", - }, - }, - }, - []string{ - `[{"a":"b"},{"c":"d"},{"e":"f"}]`, - }, - []string{ - `{"x":{"a":"b"}}`, - `{"x":{"c":"d"}}`, - `{"x":{"e":"f"}}`, - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "x", - }, - }, - }, - []string{ - `{"x":[{"a":"b"},{"c":"d"},{"e":"f"}],"y":"z"}`, - }, - []string{ - `{"y":"z","a":"b"}`, - `{"y":"z","c":"d"}`, - `{"y":"z","e":"f"}`, - }, - }, - { - "object with set_key", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "x", - "target_key": "x", - }, - }, - }, - []string{ - `{"x":[{"a":"b"},{"c":"d"},{"e":"f"}],"y":"z"}`, - }, - []string{ - `{"y":"z","x":{"a":"b"}}`, - `{"y":"z","x":{"c":"d"}}`, - `{"y":"z","x":{"e":"f"}}`, - }, - }, -} - -func TestAggregateFromArray(t *testing.T) { - ctx := context.TODO() - for _, test := range aggregateFromArrayTests { - t.Run(test.name, func(t *testing.T) { - var messages []*message.Message - for _, data := range test.data { - msg := message.New().SetData([]byte(data)) - messages = append(messages, msg) - } - - // aggregateFromArray relies on an interrupt message to flush the buffer, - // so it's always added and then removed from the output. - ctrl := message.New().AsControl() - messages = append(messages, ctrl) - - tf, err := newAggregateFromArray(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := Apply(ctx, []Transformer{tf}, messages...) - if err != nil { - t.Error(err) - } - - var arr []string - for _, c := range result { - if c.IsControl() { - continue - } - - arr = append(arr, string(c.Data())) - } - - // The order of the output is not guaranteed, so we need to - // check that the expected values are present anywhere in the - // result. - for _, r := range arr { - if !slices.Contains(test.expected, r) { - t.Errorf("expected %s, got %s", test.expected, r) - } - } - }) - } -} diff --git a/v1/transform/aggregate_from_string.go b/v1/transform/aggregate_from_string.go deleted file mode 100644 index 6a529b00..00000000 --- a/v1/transform/aggregate_from_string.go +++ /dev/null @@ -1,59 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newAggregateFromString(_ context.Context, cfg config.Config) (*aggregateFromString, error) { - conf := aggregateStrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform aggregate_from_string: %v", err) - } - - if conf.ID == "" { - conf.ID = "aggregate_from_string" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := aggregateFromString{ - conf: conf, - separator: []byte(conf.Separator), - } - - return &tf, nil -} - -type aggregateFromString struct { - conf aggregateStrConfig - - separator []byte -} - -func (tf *aggregateFromString) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var output []*message.Message - deagg := aggFromStr(msg.Data(), tf.separator) - - for _, b := range deagg { - msg := message.New().SetData(b).SetMetadata(msg.Metadata()) - output = append(output, msg) - } - - return output, nil -} - -func (tf *aggregateFromString) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/aggregate_from_string_test.go b/v1/transform/aggregate_from_string_test.go deleted file mode 100644 index 7adc6ca7..00000000 --- a/v1/transform/aggregate_from_string_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package transform - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "golang.org/x/exp/slices" -) - -var _ Transformer = &aggregateFromString{} - -var aggregateFromStringTests = []struct { - name string - cfg config.Config - data []string - expected []string -}{ - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "separator": `\n`, - }, - }, - []string{ - `{"a":"b"}\n{"c":"d"}\n{"e":"f"}`, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - }, -} - -func TestAggregateFromString(t *testing.T) { - ctx := context.TODO() - for _, test := range aggregateFromStringTests { - t.Run(test.name, func(t *testing.T) { - var messages []*message.Message - for _, data := range test.data { - msg := message.New().SetData([]byte(data)) - messages = append(messages, msg) - } - - // aggregateFromString relies on an interrupt message to flush the buffer, - // so it's always added and then removed from the output. - ctrl := message.New().AsControl() - messages = append(messages, ctrl) - - tf, err := newAggregateFromString(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := Apply(ctx, []Transformer{tf}, messages...) - if err != nil { - t.Error(err) - } - - var arr []string - for _, c := range result { - if c.IsControl() { - continue - } - - arr = append(arr, string(c.Data())) - } - - // The order of the output is not guaranteed, so we need to - // check that the expected values are present anywhere in the - // result. - for _, r := range arr { - if !slices.Contains(test.expected, r) { - t.Errorf("expected %s, got %s", test.expected, r) - } - } - }) - } -} diff --git a/v1/transform/aggregate_to_array.go b/v1/transform/aggregate_to_array.go deleted file mode 100644 index cd6e7546..00000000 --- a/v1/transform/aggregate_to_array.go +++ /dev/null @@ -1,106 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/message" -) - -func newAggregateToArray(_ context.Context, cfg config.Config) (*aggregateToArray, error) { - conf := aggregateArrayConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform aggregate_to_array: %v", err) - } - - if conf.ID == "" { - conf.ID = "aggregate_to_array" - } - - tf := aggregateToArray{ - conf: conf, - hasObjTrg: conf.Object.TargetKey != "", - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = *agg - - return &tf, nil -} - -type aggregateToArray struct { - conf aggregateArrayConfig - hasObjTrg bool - - mu sync.Mutex - agg aggregate.Aggregate -} - -func (tf *aggregateToArray) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - var output []*message.Message - - for _, items := range tf.agg.GetAll() { - array := aggToArray(items.Get()) - - outMsg := message.New() - if tf.hasObjTrg { - if err := outMsg.SetValue(tf.conf.Object.TargetKey, array); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - outMsg.SetData(array) - } - - output = append(output, outMsg) - } - - tf.agg.ResetAll() - - output = append(output, msg) - return output, nil - } - - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return nil, nil - } - - array := aggToArray(tf.agg.Get(key)) - - outMsg := message.New() - if tf.hasObjTrg { - if err := outMsg.SetValue(tf.conf.Object.TargetKey, array); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - outMsg.SetData(array) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{outMsg}, nil -} - -func (tf *aggregateToArray) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/aggregate_to_array_test.go b/v1/transform/aggregate_to_array_test.go deleted file mode 100644 index b9ae79f6..00000000 --- a/v1/transform/aggregate_to_array_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package transform - -import ( - "context" - "testing" - - "golang.org/x/exp/slices" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &aggregateToArray{} - -var aggregateToArrayTests = []struct { - name string - cfg config.Config - data []string - expected []string -}{ - // data tests - { - "data no_limit", - config.Config{}, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `[{"a":"b"},{"c":"d"},{"e":"f"}]`, - }, - }, - { - "data with_key", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "batch_key": "c", - }, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `[{"a":"b"},{"e":"f"}]`, - `[{"c":"d"}]`, - }, - }, - { - "data max_count", - config.Config{ - Settings: map[string]interface{}{ - "batch": map[string]interface{}{ - "count": 2, - }, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `[{"a":"b"},{"c":"d"}]`, - `[{"e":"f"}]`, - }, - }, - { - "data max_size", - config.Config{ - Settings: map[string]interface{}{ - "batch": map[string]interface{}{ - "size": 25, - }, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `[{"a":"b"},{"c":"d"}]`, - `[{"e":"f"}]`, - }, - }, - // object tests - { - "object no_limit", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "x", - }, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `{"x":[{"a":"b"},{"c":"d"},{"e":"f"}]}`, - }, - }, -} - -func TestAggregateToArray(t *testing.T) { - ctx := context.TODO() - for _, test := range aggregateToArrayTests { - t.Run(test.name, func(t *testing.T) { - var messages []*message.Message - for _, data := range test.data { - msg := message.New().SetData([]byte(data)) - messages = append(messages, msg) - } - - // aggregateToArray relies on an interrupt message to flush the buffer, - // so it's always added and then removed from the output. - ctrl := message.New().AsControl() - messages = append(messages, ctrl) - - tf, err := newAggregateToArray(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := Apply(ctx, []Transformer{tf}, messages...) - if err != nil { - t.Error(err) - } - - var arr []string - for _, c := range result { - if c.IsControl() { - continue - } - - arr = append(arr, string(c.Data())) - } - - // The order of the output is not guaranteed, so we need to - // check that the expected values are present anywhere in the - // result. - for _, r := range arr { - if !slices.Contains(test.expected, r) { - t.Errorf("expected %s, got %s", test.expected, r) - } - } - }) - } -} diff --git a/v1/transform/aggregate_to_string.go b/v1/transform/aggregate_to_string.go deleted file mode 100644 index 7eb5f489..00000000 --- a/v1/transform/aggregate_to_string.go +++ /dev/null @@ -1,96 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/message" -) - -func newAggregateToString(_ context.Context, cfg config.Config) (*aggregateToString, error) { - conf := aggregateStrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform aggregate_to_string: %v", err) - } - - if conf.ID == "" { - conf.ID = "aggregate_to_string" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := aggregateToString{ - conf: conf, - separator: []byte(conf.Separator), - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - return &tf, nil -} - -type aggregateToString struct { - conf aggregateStrConfig - - separator []byte - - mu sync.Mutex - agg *aggregate.Aggregate -} - -func (tf *aggregateToString) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - var output []*message.Message - - for _, items := range tf.agg.GetAll() { - agg := aggToStr(items.Get(), tf.separator) - outMsg := message.New().SetData(agg) - - output = append(output, outMsg) - } - - tf.agg.ResetAll() - - output = append(output, msg) - return output, nil - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return nil, nil - } - - agg := aggToStr(tf.agg.Get(key), tf.separator) - outMsg := message.New().SetData(agg) - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{outMsg}, nil -} - -func (tf *aggregateToString) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/aggregate_to_string_test.go b/v1/transform/aggregate_to_string_test.go deleted file mode 100644 index f510ad10..00000000 --- a/v1/transform/aggregate_to_string_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package transform - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "golang.org/x/exp/slices" -) - -var _ Transformer = &aggregateToString{} - -var aggregateToStringTests = []struct { - name string - cfg config.Config - data []string - expected []string -}{ - { - "data no_limit", - config.Config{ - Settings: map[string]interface{}{ - "separator": `\n`, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `{"a":"b"}\n{"c":"d"}\n{"e":"f"}`, - }, - }, - { - "data with_key", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "batch_key": "c", - }, - "separator": `\n`, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `{"a":"b"}\n{"e":"f"}`, - `{"c":"d"}`, - }, - }, - { - "data max_count", - config.Config{ - Settings: map[string]interface{}{ - "batch": map[string]interface{}{ - "count": 2, - }, - "separator": `\n`, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `{"a":"b"}\n{"c":"d"}`, - `{"e":"f"}`, - }, - }, - { - "data max_size", - config.Config{ - Settings: map[string]interface{}{ - "batch": map[string]interface{}{ - "size": 25, - }, - "separator": `\n`, - }, - }, - []string{ - `{"a":"b"}`, - `{"c":"d"}`, - `{"e":"f"}`, - }, - []string{ - `{"a":"b"}\n{"c":"d"}`, - `{"e":"f"}`, - }, - }, -} - -func TestAggregateToString(t *testing.T) { - ctx := context.TODO() - for _, test := range aggregateToStringTests { - t.Run(test.name, func(t *testing.T) { - var messages []*message.Message - for _, data := range test.data { - msg := message.New().SetData([]byte(data)) - messages = append(messages, msg) - } - - // aggregateToString relies on an interrupt message to flush the buffer, - // so it's always added and then removed from the output. - ctrl := message.New().AsControl() - messages = append(messages, ctrl) - - tf, err := newAggregateToString(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := Apply(ctx, []Transformer{tf}, messages...) - if err != nil { - t.Error(err) - } - - var arr []string - for _, c := range result { - if c.IsControl() { - continue - } - - arr = append(arr, string(c.Data())) - } - - // The order of the output is not guaranteed, so we need to - // check that the expected values are present anywhere in the - // result. - for _, r := range arr { - if !slices.Contains(test.expected, r) { - t.Errorf("expected %s, got %s", test.expected, r) - } - } - }) - } -} diff --git a/v1/transform/array_join.go b/v1/transform/array_join.go deleted file mode 100644 index 362b9bfb..00000000 --- a/v1/transform/array_join.go +++ /dev/null @@ -1,113 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type arrayJoinConfig struct { - // Separator is the string that is used to join data. - Separator string `json:"separator"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *arrayJoinConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *arrayJoinConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newArrayJoin(_ context.Context, cfg config.Config) (*arrayJoin, error) { - conf := arrayJoinConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform array_join: %v", err) - } - - if conf.ID == "" { - conf.ID = "array_join" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := arrayJoin{ - conf: conf, - hasObjectKey: conf.Object.SourceKey != "", - hasObjectSetKey: conf.Object.TargetKey != "", - separator: []byte(conf.Separator), - } - - return &tf, nil -} - -type arrayJoin struct { - conf arrayJoinConfig - hasObjectKey bool - hasObjectSetKey bool - - separator []byte -} - -func (tf *arrayJoin) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.hasObjectKey { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var arr []string - for _, val := range value.Array() { - arr = append(arr, val.String()) - } - - str := strings.Join(arr, tf.conf.Separator) - - if tf.hasObjectSetKey { - if err := msg.SetValue(tf.conf.Object.TargetKey, str); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - msg.SetData([]byte(str)) - return []*message.Message{msg}, nil -} - -func (tf *arrayJoin) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/array_join_test.go b/v1/transform/array_join_test.go deleted file mode 100644 index 0e6f6f9a..00000000 --- a/v1/transform/array_join_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &arrayJoin{} - -var arrayJoinTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "separator": ".", - }, - }, - []byte(`["b","c","d"]`), - [][]byte{ - []byte(`b.c.d`), - }, - }, - // object tests - { - "object from", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "separator": ".", - }, - }, - []byte(`{"a":["b","c","d"]}`), - [][]byte{ - []byte(`{"a":"b.c.d"}`), - }, - }, -} - -func TestArrayJoin(t *testing.T) { - ctx := context.TODO() - for _, test := range arrayJoinTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newArrayJoin(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkArrayJoin(b *testing.B, tf *arrayJoin, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkArrayJoin(b *testing.B) { - for _, test := range arrayJoinTests { - p, err := newArrayJoin(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkArrayJoin(b, p, test.test) - }, - ) - } -} diff --git a/v1/transform/array_zip.go b/v1/transform/array_zip.go deleted file mode 100644 index 778d9e72..00000000 --- a/v1/transform/array_zip.go +++ /dev/null @@ -1,107 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type arrayZipConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *arrayZipConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *arrayZipConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newArrayZip(_ context.Context, cfg config.Config) (*arrayZip, error) { - conf := arrayZipConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform array_zip: %v", err) - } - - if conf.ID == "" { - conf.ID = "array_zip" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := arrayZip{ - conf: conf, - hasObjSrc: conf.Object.SourceKey != "", - hasObjDst: conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type arrayZip struct { - conf arrayZipConfig - hasObjSrc bool - hasObjDst bool -} - -func (tf *arrayZip) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.hasObjSrc { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() || !value.IsArray() { - return []*message.Message{msg}, nil - } - - cache := make(map[int][]interface{}) - for _, val := range value.Array() { - for i, v := range val.Array() { - cache[i] = append(cache[i], v.Value()) - } - } - - var b []interface{} - for i := 0; i < len(cache); i++ { - b = append(b, cache[i]) - } - - if tf.hasObjDst { - if err := msg.SetValue(tf.conf.Object.TargetKey, b); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - msg.SetData(anyToBytes(b)) - return []*message.Message{msg}, nil -} - -func (tf *arrayZip) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/array_zip_test.go b/v1/transform/array_zip_test.go deleted file mode 100644 index 0b7eae92..00000000 --- a/v1/transform/array_zip_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &arrayZip{} - -var arrayZipTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "data", - config.Config{ - Settings: map[string]interface{}{}, - }, - []byte(`[["b","c"],[1,2]]`), - [][]byte{ - []byte(`[["b",1],["c",2]]`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[["b","c"],[1,2]]}`), - [][]byte{ - []byte(`{"a":[["b",1],["c",2]]}`), - }, - }, -} - -func TestArrayZip(t *testing.T) { - ctx := context.TODO() - for _, test := range arrayZipTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newArrayZip(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkArrayZip(b *testing.B, tf *arrayZip, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkArrayZip(b *testing.B) { - for _, test := range arrayZipTests { - tf, err := newArrayZip(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkArrayZip(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/enrich.go b/v1/transform/enrich.go deleted file mode 100644 index 64e19f33..00000000 --- a/v1/transform/enrich.go +++ /dev/null @@ -1,62 +0,0 @@ -package transform - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - gohttp "net/http" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -// enrichHTTPInterp is used for interpolating data into URLs. -const enrichHTTPInterp = `${DATA}` - -type enrichDNSConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Request iconfig.Request `json:"request"` -} - -func (c *enrichDNSConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichDNSConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Request.Timeout == "" { - c.Request.Timeout = "1s" - } - - return nil -} - -func enrichHTTPParseResponse(resp *gohttp.Response) ([]byte, error) { - defer resp.Body.Close() - - buf, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - dst := &bytes.Buffer{} - if json.Valid(buf) { - // Compact converts a multi-line object into a single-line object. - if err := json.Compact(dst, buf); err != nil { - return nil, err - } - } else { - dst = bytes.NewBuffer(buf) - } - - return dst.Bytes(), nil -} diff --git a/v1/transform/enrich_aws_dynamodb.go b/v1/transform/enrich_aws_dynamodb.go deleted file mode 100644 index 90583e4c..00000000 --- a/v1/transform/enrich_aws_dynamodb.go +++ /dev/null @@ -1,177 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/dynamodb" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type enrichAWSDynamoDBConfig struct { - // TableName is the DynamoDB table that is queried. - TableName string `json:"table_name"` - // PartitionKey is the DynamoDB partition key. - PartitionKey string `json:"partition_key"` - // SortKey is the DynamoDB sort key. - // - // This is optional and has no default. - SortKey string `json:"sort_key"` - // KeyConditionExpression is the DynamoDB key condition - // expression string (see documentation). - KeyConditionExpression string `json:"key_condition_expression"` - // Limits determines the maximum number of items to evalute. - // - // This is optional and defaults to evaluating all items. - Limit int64 `json:"limit"` - // ScanIndexForward specifies the order of index traversal. - // - // Must be one of: - // - true (traversal is performed in ascending order) - // - false (traversal is performed in descending order) - // - // This is optional and defaults to true. - ScanIndexForward bool `json:"scan_index_forward"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *enrichAWSDynamoDBConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichAWSDynamoDBConfig) Validate() error { - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.PartitionKey == "" { - return fmt.Errorf("partition_key: %v", errors.ErrMissingRequiredOption) - } - - if c.TableName == "" { - return fmt.Errorf("table_name: %v", errors.ErrMissingRequiredOption) - } - - if c.KeyConditionExpression == "" { - return fmt.Errorf("key_condition_expression: %v", errors.ErrMissingRequiredOption) - } - return nil -} - -func newEnrichAWSDynamoDB(_ context.Context, cfg config.Config) (*enrichAWSDynamoDB, error) { - conf := enrichAWSDynamoDBConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_aws_dynamodb: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_aws_dynamodb" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichAWSDynamoDB{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type enrichAWSDynamoDB struct { - conf enrichAWSDynamoDBConfig - - // client is safe for concurrent access. - client dynamodb.API -} - -func (tf *enrichAWSDynamoDB) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var tmp *message.Message - if tf.conf.Object.SourceKey != "" { - value := msg.GetValue(tf.conf.Object.SourceKey) - tmp = message.New().SetData(value.Bytes()) - } else { - tmp = msg - } - - if !json.Valid(tmp.Data()) { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errMsgInvalidObject) - } - - pk := tmp.GetValue(tf.conf.PartitionKey) - if !pk.Exists() { - return []*message.Message{msg}, nil - } - - sk := tmp.GetValue(tf.conf.SortKey) - value, err := tf.dynamodb(ctx, pk.String(), sk.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // No match. - if len(value) == 0 { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichAWSDynamoDB) dynamodb(ctx context.Context, pk, sk string) ([]map[string]interface{}, error) { - resp, err := tf.client.Query( - ctx, - tf.conf.TableName, - pk, sk, - tf.conf.KeyConditionExpression, - tf.conf.Limit, - tf.conf.ScanIndexForward, - ) - if err != nil { - return nil, err - } - - var items []map[string]interface{} - for _, i := range resp.Items { - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(i, &item) - if err != nil { - return nil, err - } - - items = append(items, item) - } - return items, nil -} - -func (tf *enrichAWSDynamoDB) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_aws_dynamodb_test.go b/v1/transform/enrich_aws_dynamodb_test.go deleted file mode 100644 index 4e563ade..00000000 --- a/v1/transform/enrich_aws_dynamodb_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/brexhq/substation/config" - ddb "github.com/brexhq/substation/internal/aws/dynamodb" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &enrichAWSDynamoDB{} - -type enrichAWSDynamoDBMockedQuery struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.QueryOutput -} - -func (m enrichAWSDynamoDBMockedQuery) QueryWithContext(ctx aws.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error) { - return &m.Resp, nil -} - -var enrichAWSDynamoDBTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error - api ddb.API -}{ - { - "success", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - "table_name": "tab", - "partition_key": "PK", - "key_condition_expression": "kce", - }, - }, - []byte(`{"PK":"b"}`), - [][]byte{ - []byte(`{"PK":"b","a":[{"b":"c"}]}`), - }, - nil, - ddb.API{ - Client: enrichAWSDynamoDBMockedQuery{ - Resp: dynamodb.QueryOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "b": { - S: aws.String("c"), - }, - }, - }, - }, - }, - }, - }, -} - -func TestEnrichAWSDynamoDB(t *testing.T) { - ctx := context.TODO() - for _, test := range enrichAWSDynamoDBTests { - tf, err := newEnrichAWSDynamoDB(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - tf.client = test.api - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - } -} - -func benchmarkEnrichAWSDynamoDB(b *testing.B, tf *enrichAWSDynamoDB, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkEnrichAWSDynamoDB(b *testing.B) { - ctx := context.TODO() - for _, test := range enrichAWSDynamoDBTests { - b.Run(test.name, - func(b *testing.B) { - tf, err := newEnrichAWSDynamoDB(ctx, test.cfg) - if err != nil { - b.Fatal(err) - } - tf.client = test.api - - benchmarkEnrichAWSDynamoDB(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/enrich_aws_lambda.go b/v1/transform/enrich_aws_lambda.go deleted file mode 100644 index 85415be6..00000000 --- a/v1/transform/enrich_aws_lambda.go +++ /dev/null @@ -1,117 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/lambda" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" - "github.com/tidwall/gjson" -) - -type enrichAWSLambdaConfig struct { - // FunctionName is the AWS Lambda function to synchronously invoke. - FunctionName string `json:"function_name"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *enrichAWSLambdaConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichAWSLambdaConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.FunctionName == "" { - return fmt.Errorf("function_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichAWSLambda(_ context.Context, cfg config.Config) (*enrichAWSLambda, error) { - conf := enrichAWSLambdaConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_aws_lambda: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_aws_lambda" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichAWSLambda{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type enrichAWSLambda struct { - conf enrichAWSLambdaConfig - - // client is safe for concurrent access. - client lambda.API -} - -func (tf *enrichAWSLambda) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !json.Valid(value.Bytes()) { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errMsgInvalidObject) - } - - resp, err := tf.client.Invoke(ctx, tf.conf.FunctionName, value.Bytes()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if resp.FunctionError != nil { - resErr := gjson.GetBytes(resp.Payload, "errorMessage").String() - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, resErr) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, resp.Payload); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichAWSLambda) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_aws_lambda_test.go b/v1/transform/enrich_aws_lambda_test.go deleted file mode 100644 index a3c343f3..00000000 --- a/v1/transform/enrich_aws_lambda_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" - "github.com/brexhq/substation/config" - lamb "github.com/brexhq/substation/internal/aws/lambda" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &enrichAWSLambda{} - -type enrichAWSLambdaMockedInvoke struct { - lambdaiface.LambdaAPI - Resp lambda.InvokeOutput -} - -func (m enrichAWSLambdaMockedInvoke) InvokeWithContext(ctx aws.Context, input *lambda.InvokeInput, opts ...request.Option) (*lambda.InvokeOutput, error) { - return &m.Resp, nil -} - -var enrichAWSLambdaTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error - api lamb.API -}{ - { - "success", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "function_name": "func", - }, - }, - []byte(`{"a":{"b":"c"}}`), - [][]byte{ - []byte(`{"a":{"d":"e"}}`), - }, - nil, - lamb.API{ - Client: enrichAWSLambdaMockedInvoke{ - Resp: lambda.InvokeOutput{ - Payload: []byte(`{"d":"e"}`), - }, - }, - }, - }, -} - -func TestEnrichAWSLambda(t *testing.T) { - ctx := context.TODO() - for _, test := range enrichAWSLambdaTests { - tf, err := newEnrichAWSLambda(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - tf.client = test.api - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - } -} - -func benchmarkEnrichAWSLambda(b *testing.B, tf *enrichAWSLambda, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkEnrichAWSLambda(b *testing.B) { - ctx := context.TODO() - for _, test := range enrichAWSLambdaTests { - b.Run(test.name, - func(b *testing.B) { - tf, err := newEnrichAWSLambda(ctx, test.cfg) - if err != nil { - b.Fatal(err) - } - tf.client = test.api - - benchmarkEnrichAWSLambda(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/enrich_dns_domain_lookup.go b/v1/transform/enrich_dns_domain_lookup.go deleted file mode 100644 index 0f0f4a9e..00000000 --- a/v1/transform/enrich_dns_domain_lookup.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "net" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newEnrichDNSDomainLookup(_ context.Context, cfg config.Config) (*enrichDNSDomainLookup, error) { - conf := enrichDNSConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_dns_domain_lookup: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_dns_domain_lookup" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - dur, err := time.ParseDuration(conf.Request.Timeout) - if err != nil { - return nil, fmt.Errorf("transform %s: duration: %v", conf.ID, err) - } - - tf := enrichDNSDomainLookup{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - resolver: net.Resolver{}, - timeout: dur, - } - - return &tf, nil -} - -type enrichDNSDomainLookup struct { - conf enrichDNSConfig - isObj bool - - resolver net.Resolver - timeout time.Duration -} - -// Transform performs a DNS lookup on a message. -func (tf *enrichDNSDomainLookup) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - resolverCtx, cancel := context.WithTimeout(ctx, tf.timeout) - defer cancel() // important to avoid a resource leak - - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - names, err := tf.resolver.LookupHost(resolverCtx, str) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // Return the first name. - data := []byte(names[0]) - msg.SetData(data) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - names, err := tf.resolver.LookupHost(resolverCtx, value.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, names); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichDNSDomainLookup) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_dns_ip_lookup.go b/v1/transform/enrich_dns_ip_lookup.go deleted file mode 100644 index e41dec2d..00000000 --- a/v1/transform/enrich_dns_ip_lookup.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "net" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newEnrichDNSIPLookup(_ context.Context, cfg config.Config) (*enrichDNSIPLookup, error) { - conf := enrichDNSConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_dns_ip_lookup: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_dns_ip_lookup" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - dur, err := time.ParseDuration(conf.Request.Timeout) - if err != nil { - return nil, fmt.Errorf("transform %s: duration: %v", conf.ID, err) - } - - tf := enrichDNSIPLookup{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - resolver: net.Resolver{}, - timeout: dur, - } - - return &tf, nil -} - -type enrichDNSIPLookup struct { - conf enrichDNSConfig - isObj bool - - resolver net.Resolver - timeout time.Duration -} - -// Transform performs a DNS lookup on a message. -func (tf *enrichDNSIPLookup) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - resolverCtx, cancel := context.WithTimeout(ctx, tf.timeout) - defer cancel() // important to avoid a resource leak - - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - addrs, err := tf.resolver.LookupAddr(resolverCtx, str) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // Return the first address. - data := []byte(addrs[0]) - msg.SetData(data) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - addrs, err := tf.resolver.LookupAddr(resolverCtx, value.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, addrs); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichDNSIPLookup) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_dns_txt_lookup.go b/v1/transform/enrich_dns_txt_lookup.go deleted file mode 100644 index 8720e73d..00000000 --- a/v1/transform/enrich_dns_txt_lookup.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "net" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newEnrichDNSTxtLookup(_ context.Context, cfg config.Config) (*enrichDNSTxtLookup, error) { - conf := enrichDNSConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_dns_txt_lookup: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_dns_txt_lookup" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - dur, err := time.ParseDuration(conf.Request.Timeout) - if err != nil { - return nil, fmt.Errorf("transform %s: duration: %v", conf.ID, err) - } - - tf := enrichDNSTxtLookup{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - resolver: net.Resolver{}, - timeout: dur, - } - - return &tf, nil -} - -type enrichDNSTxtLookup struct { - conf enrichDNSConfig - isObj bool - - resolver net.Resolver - timeout time.Duration -} - -// Transform performs a DNS lookup on a message. -func (tf *enrichDNSTxtLookup) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - resolverCtx, cancel := context.WithTimeout(ctx, tf.timeout) - defer cancel() // important to avoid a resource leak - - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - recs, err := tf.resolver.LookupTXT(resolverCtx, str) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // Return the first record. - data := []byte(recs[0]) - msg.SetData(data) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - recs, err := tf.resolver.LookupTXT(resolverCtx, value.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, recs); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichDNSTxtLookup) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_http_get.go b/v1/transform/enrich_http_get.go deleted file mode 100644 index 47ecc92a..00000000 --- a/v1/transform/enrich_http_get.go +++ /dev/null @@ -1,155 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" -) - -type enrichHTTPGetConfig struct { - // URL is the HTTP(S) endpoint that data is retrieved from. - // - // If the substring ${DATA} is in the URL, then the URL is interpolated with - // data (either the value from Object.SourceKey or the raw data). URLs may be optionally - // interpolated with secrets (e.g., ${SECRET:FOO}). - URL string `json:"url"` - // Headers are an array of objects that contain HTTP headers sent in the request. - // Values may be optionally interpolated with secrets (e.g., ${SECRET:FOO}). - // - // This is optional and has no default. - Headers map[string]string `json:"headers"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *enrichHTTPGetConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichHTTPGetConfig) Validate() error { - if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichHTTPGet(ctx context.Context, cfg config.Config) (*enrichHTTPGet, error) { - conf := enrichHTTPGetConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_http_get: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_http_get" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichHTTPGet{ - conf: conf, - } - - tf.client.Setup() - for k, v := range conf.Headers { - // Retrieve secret and interpolate with header value. - v, err := secrets.Interpolate(ctx, v) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.headers = append(tf.headers, http.Header{ - Key: k, - Value: v, - }) - } - - return &tf, nil -} - -type enrichHTTPGet struct { - conf enrichHTTPGetConfig - - // client is safe for concurrent use. - client http.HTTP - headers []http.Header -} - -func (tf *enrichHTTPGet) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - // The URL can exist in three states: - // - // - No interpolation, the URL is unchanged. - // - // - Object-based interpolation, the URL is interpolated - // using the object handling pattern. - // - // - Data-based interpolation, the URL is interpolated - // using the data handling pattern. - // - // The URL is always interpolated with the substring ${DATA}. - url := tf.conf.URL - if strings.Contains(url, enrichHTTPInterp) { - if tf.conf.Object.SourceKey != "" { - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - url = strings.ReplaceAll(url, enrichHTTPInterp, value.String()) - } else { - url = strings.ReplaceAll(url, enrichHTTPInterp, string(msg.Data())) - } - } - - // Retrieve secret and interpolate with URL - url, err := secrets.Interpolate(ctx, url) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // resp.Body is closed by enrichHTTPParseResponse. - resp, err := tf.client.Get(ctx, url, tf.headers...) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - parsed, err := enrichHTTPParseResponse(resp) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If TargetKey is set, then the response body is stored in the message. - // Otherwise, the response body overwrites the message data. - if tf.conf.Object.TargetKey != "" { - if err := msg.SetValue(tf.conf.Object.TargetKey, parsed); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - msg.SetData(parsed) - return []*message.Message{msg}, nil -} - -func (tf *enrichHTTPGet) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_http_post.go b/v1/transform/enrich_http_post.go deleted file mode 100644 index 28b312e6..00000000 --- a/v1/transform/enrich_http_post.go +++ /dev/null @@ -1,172 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" -) - -type enrichHTTPPostObjectConfig struct { - // BodyKey retrieves a value from an object that is used as the message body. - BodyKey string `json:"body_key"` - - iconfig.Object -} - -type enrichHTTPPostConfig struct { - // URL is the HTTP(S) endpoint that data is retrieved from. - // - // If the substring ${data} is in the URL, then the URL is interpolated with - // data (either the value from Object.SourceKey or the raw data). URLs may be optionally - // interpolated with secrets (e.g., ${SECRETS_ENV:FOO}). - URL string `json:"url"` - - // Headers are an array of objects that contain HTTP headers sent in the request. - // Values may be optionally interpolated with secrets (e.g., ${SECRETS_ENV:FOO}). - // - // This is optional and has no default. - Headers map[string]string `json:"headers"` - - ID string `json:"id"` - Object enrichHTTPPostObjectConfig `json:"object"` -} - -func (c *enrichHTTPPostConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichHTTPPostConfig) Validate() error { - if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.BodyKey == "" { - return fmt.Errorf("body_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichHTTPPost(ctx context.Context, cfg config.Config) (*enrichHTTPPost, error) { - conf := enrichHTTPPostConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_http_post: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_http_post" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichHTTPPost{ - conf: conf, - } - - tf.client.Setup() - for k, v := range conf.Headers { - // Retrieve secret and interpolate with header value. - v, err := secrets.Interpolate(ctx, v) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.headers = append(tf.headers, http.Header{ - Key: k, - Value: v, - }) - } - - return &tf, nil -} - -type enrichHTTPPost struct { - conf enrichHTTPPostConfig - - // client is safe for concurrent use. - client http.HTTP - headers []http.Header -} - -func (tf *enrichHTTPPost) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - // The URL can exist in three states: - // - // - No interpolation, the URL is unchanged. - // - // - Object-based interpolation, the URL is interpolated - // using the object handling pattern. - // - // - Data-based interpolation, the URL is interpolated - // using the data handling pattern. - // - // The URL is always interpolated with the substring ${DATA}. - url := tf.conf.URL - if strings.Contains(url, enrichHTTPInterp) { - if tf.conf.Object.SourceKey != "" { - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - url = strings.ReplaceAll(url, enrichHTTPInterp, value.String()) - } else { - url = strings.ReplaceAll(url, enrichHTTPInterp, string(msg.Data())) - } - } - - // Retrieve secret and interpolate with URL - url, err := secrets.Interpolate(ctx, url) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - bodyValue := msg.GetValue(tf.conf.Object.BodyKey) - if !bodyValue.Exists() { - return []*message.Message{msg}, nil - } - - // resp.Body is closed by enrichHTTPParseResponse. - resp, err := tf.client.Post(ctx, url, bodyValue.String(), tf.headers...) - // If ErrorOnFailure is configured, then errors are returned, - // but otherwise the message is returned as-is. - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - parsed, err := enrichHTTPParseResponse(resp) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If TargetKey exists, then the response body is written into the message, - // but otherwise the response is not stored and the message is returned - // as-is. - if tf.conf.Object.TargetKey != "" { - if err := msg.SetValue(tf.conf.Object.TargetKey, parsed); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichHTTPPost) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_kv_store_item_get.go b/v1/transform/enrich_kv_store_item_get.go deleted file mode 100644 index d6b53741..00000000 --- a/v1/transform/enrich_kv_store_item_get.go +++ /dev/null @@ -1,130 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" -) - -type enrichKVStoreItemGetConfig struct { - // Prefix is prepended to the key and can be used to simplify - // data management within a KV store. - // - // This is optional and defaults to an empty string. - Prefix string `json:"prefix"` - // CloseKVStore determines if the KV store is closed when a control - // message is received. - // - // This is optional and defaults to false (KV store is not closed). - CloseKVStore bool `json:"close_kv_store"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - KVStore config.Config `json:"kv_store"` -} - -func (c *enrichKVStoreItemGetConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichKVStoreItemGetConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichKVStoreItemGet(_ context.Context, cfg config.Config) (*enrichKVStoreItemGet, error) { - conf := enrichKVStoreItemGetConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_kv_store_get: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_kv_store_get" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - kvStore, err := kv.Get(conf.KVStore) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichKVStoreItemGet{ - conf: conf, - kvStore: kvStore, - } - - return &tf, nil -} - -type enrichKVStoreItemGet struct { - conf enrichKVStoreItemGetConfig - kvStore kv.Storer -} - -func (tf *enrichKVStoreItemGet) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if !tf.conf.CloseKVStore { - return []*message.Message{msg}, nil - } - - if err := tf.kvStore.Close(); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - if !tf.kvStore.IsEnabled() { - if err := tf.kvStore.Setup(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - key := value.String() - if tf.conf.Prefix != "" { - key = fmt.Sprint(tf.conf.Prefix, ":", key) - } - - v, err := tf.kvStore.Get(ctx, key) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, v); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichKVStoreItemGet) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_kv_store_item_set.go b/v1/transform/enrich_kv_store_item_set.go deleted file mode 100644 index 87857c3a..00000000 --- a/v1/transform/enrich_kv_store_item_set.go +++ /dev/null @@ -1,182 +0,0 @@ -//go:build !wasm - -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" -) - -type enrichKVStoreItemSetObjectConfig struct { - // TTLKey retrieves a value from an object that is used as the time-to-live (TTL) - // of the item set into the KV store. This value must be an integer that represents - // the Unix time when the item will be evicted from the store. Any precision greater - // than seconds (e.g., milliseconds, nanoseconds) is truncated to seconds. - // - // This is optional and defaults to using no TTL when setting items into the store. - TTLKey string `json:"ttl_key"` - - iconfig.Object -} - -type enrichKVStoreItemSetConfig struct { - // Prefix is prepended to the key and can be used to simplify - // data management within a KV store. - // - // This is optional and defaults to an empty string. - Prefix string `json:"prefix"` - // TTLOffset is an offset used to determine the time-to-live (TTL) of the item set - // into the KV store. If Object.TTLKey is configured, then this value is added to the TTL - // value retrieved from the object. If Object.TTLKey is not used, then this value is added - // to the current time. - // - // For example, if Object.TTLKey is not set and the offset is "1d", then the value - // will be evicted from the store when more than 1 day has passed. - // - // This is optional and defaults to using no TTL when setting values into the store. - TTLOffset string `json:"ttl_offset"` - // CloseKVStore determines if the KV store is closed when a control - // message is received. - // - // This is optional and defaults to false (KV store is not closed). - CloseKVStore bool `json:"close_kv_store"` - - ID string `json:"id"` - Object enrichKVStoreItemSetObjectConfig `json:"object"` - KVStore config.Config `json:"kv_store"` -} - -func (c *enrichKVStoreItemSetConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichKVStoreItemSetConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichKVStoreItemSet(_ context.Context, cfg config.Config) (*enrichKVStoreItemSet, error) { - conf := enrichKVStoreItemSetConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_kv_store_set: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_kv_store_set" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - kvStore, err := kv.Get(conf.KVStore) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - if conf.TTLOffset == "" { - conf.TTLOffset = "0s" - } - - dur, err := time.ParseDuration(conf.TTLOffset) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichKVStoreItemSet{ - conf: conf, - kvStore: kvStore, - ttl: int64(dur.Seconds()), - } - - return &tf, nil -} - -type enrichKVStoreItemSet struct { - conf enrichKVStoreItemSetConfig - kvStore kv.Storer - ttl int64 -} - -func (tf *enrichKVStoreItemSet) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if !tf.conf.CloseKVStore { - return []*message.Message{msg}, nil - } - - if err := tf.kvStore.Close(); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - if !tf.kvStore.IsEnabled() { - if err := tf.kvStore.Setup(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - key := value.String() - if tf.conf.Prefix != "" { - key = fmt.Sprint(tf.conf.Prefix, ":", key) - } - - //nolint: nestif // ignore nesting complexity - if tf.conf.Object.TTLKey != "" && tf.ttl != 0 { - value := msg.GetValue(tf.conf.Object.TTLKey) - ttl := truncateTTL(value) + tf.ttl - - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else if tf.conf.Object.TTLKey != "" { - value := msg.GetValue(tf.conf.Object.TTLKey) - ttl := truncateTTL(value) - - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else if tf.ttl != 0 { - ttl := time.Now().Add(time.Duration(tf.ttl) * time.Second).Unix() - - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - if err := tf.kvStore.Set(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichKVStoreItemSet) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/enrich_kv_store_set_add.go b/v1/transform/enrich_kv_store_set_add.go deleted file mode 100644 index 72824b54..00000000 --- a/v1/transform/enrich_kv_store_set_add.go +++ /dev/null @@ -1,180 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" -) - -type enrichKVStoreSetAddObjectConfig struct { - // TTLKey retrieves a value from an object that is used as the time-to-live (TTL) - // of the item set into the KV store. This value must be an integer that represents - // the Unix time when the item will be evicted from the store. Any precision greater - // than seconds (e.g., milliseconds, nanoseconds) is truncated to seconds. - // - // This is optional and defaults to using no TTL when setting items into the store. - TTLKey string `json:"ttl_key"` - - iconfig.Object -} - -type enrichKVStoreSetAddConfig struct { - // Prefix is prepended to the key and can be used to simplify - // data management within a KV store. - // - // This is optional and defaults to an empty string. - Prefix string `json:"prefix"` - // TTLOffset is an offset used to determine the time-to-live (TTL) of the item set - // into the KV store. If Object.TTLKey is configured, then this value is added to the TTL - // value retrieved from the object. If Object.TTLKey is not used, then this value is added - // to the current time. - // - // For example, if Object.TTLKey is not set and the offset is "1d", then the value - // will be evicted from the store when more than 1 day has passed. - // - // This is optional and defaults to using no TTL when setting values into the store. - TTLOffset string `json:"ttl_offset"` - // CloseKVStore determines if the KV store is closed when a control - // message is received. - // - // This is optional and defaults to false (KV store is not closed). - CloseKVStore bool `json:"close_kv_store"` - - ID string `json:"id"` - Object enrichKVStoreSetAddObjectConfig `json:"object"` - KVStore config.Config `json:"kv_store"` -} - -func (c *enrichKVStoreSetAddConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichKVStoreSetAddConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newEnrichKVStoreSetAdd(_ context.Context, cfg config.Config) (*enrichKVStoreSetAdd, error) { - conf := enrichKVStoreSetAddConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_kv_store_set: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_kv_store_set" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - kvStore, err := kv.Get(conf.KVStore) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - if conf.TTLOffset == "" { - conf.TTLOffset = "0s" - } - - dur, err := time.ParseDuration(conf.TTLOffset) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichKVStoreSetAdd{ - conf: conf, - kvStore: kvStore, - ttl: int64(dur.Seconds()), - } - - return &tf, nil -} - -type enrichKVStoreSetAdd struct { - conf enrichKVStoreSetAddConfig - kvStore kv.Storer - ttl int64 -} - -func (tf *enrichKVStoreSetAdd) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if !tf.conf.CloseKVStore { - return []*message.Message{msg}, nil - } - - if err := tf.kvStore.Close(); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - if !tf.kvStore.IsEnabled() { - if err := tf.kvStore.Setup(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - key := value.String() - if tf.conf.Prefix != "" { - key = fmt.Sprint(tf.conf.Prefix, ":", key) - } - - //nolint: nestif // ignore nesting complexity - if tf.conf.Object.TTLKey != "" && tf.ttl != 0 { - value := msg.GetValue(tf.conf.Object.TTLKey) - ttl := truncateTTL(value) + tf.ttl - - if err := tf.kvStore.SetAddWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else if tf.conf.Object.TTLKey != "" { - value := msg.GetValue(tf.conf.Object.TTLKey) - ttl := truncateTTL(value) - - if err := tf.kvStore.SetAddWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else if tf.ttl != 0 { - ttl := time.Now().Add(time.Duration(tf.ttl) * time.Second).Unix() - - if err := tf.kvStore.SetAddWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - if err := tf.kvStore.SetAddWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), 0); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichKVStoreSetAdd) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format.go b/v1/transform/format.go deleted file mode 100644 index 01f86ee7..00000000 --- a/v1/transform/format.go +++ /dev/null @@ -1,68 +0,0 @@ -package transform - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type formatBase64Config struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *formatBase64Config) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *formatBase64Config) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type formatGzipConfig struct { - ID string `json:"id"` -} - -func (c *formatGzipConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func fmtToGzip(data []byte) ([]byte, error) { - var buf bytes.Buffer - gz := gzip.NewWriter(&buf) - if _, err := gz.Write(data); err != nil { - return nil, err - } - if err := gz.Close(); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func fmtFromGzip(data []byte) ([]byte, error) { - r := bytes.NewReader(data) - gz, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - - output, err := io.ReadAll(gz) - if err != nil { - return nil, err - } - - return output, nil -} diff --git a/v1/transform/format_from_base64.go b/v1/transform/format_from_base64.go deleted file mode 100644 index cf2fc67e..00000000 --- a/v1/transform/format_from_base64.go +++ /dev/null @@ -1,85 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "unicode/utf8" - - "github.com/brexhq/substation/config" - ibase64 "github.com/brexhq/substation/internal/base64" - "github.com/brexhq/substation/message" -) - -// errFormatFromBase64DecodeBinary is returned when the Base64 transform is configured -// to decode output into an object, but the output contains binary data and -// cannot be written into a valid object. -var errFormatFromBase64DecodeBinary = fmt.Errorf("cannot write binary as object") - -func newFormatFromBase64(_ context.Context, cfg config.Config) (*formatFromBase64, error) { - conf := formatBase64Config{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_from_base64: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_from_base64" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := formatFromBase64{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type formatFromBase64 struct { - conf formatBase64Config - isObject bool -} - -func (tf *formatFromBase64) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - decoded, err := ibase64.Decode(msg.Data()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData(decoded) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - b64, err := ibase64.Decode(value.Bytes()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if !utf8.Valid(b64) { - return nil, errFormatFromBase64DecodeBinary - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, b64); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *formatFromBase64) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_from_base64_test.go b/v1/transform/format_from_base64_test.go deleted file mode 100644 index 41ebc0e7..00000000 --- a/v1/transform/format_from_base64_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatFromBase64{} - -var formatFromBase64Tests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - // data tests - { - "data", - config.Config{}, - []byte(`Yg==`), - [][]byte{ - []byte(`b`), - }, - nil, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"Yg=="}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - nil, - }, -} - -func TestFormatFromBase64(t *testing.T) { - ctx := context.TODO() - for _, test := range formatFromBase64Tests { - t.Run(test.name, func(t *testing.T) { - tf, err := newFormatFromBase64(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkFormatFromBase64(b *testing.B, tf *formatFromBase64, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkFormatFromBase64(b *testing.B) { - for _, test := range formatFromBase64Tests { - tf, err := newFormatFromBase64(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatFromBase64(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/format_from_gzip.go b/v1/transform/format_from_gzip.go deleted file mode 100644 index 6ddbb9a6..00000000 --- a/v1/transform/format_from_gzip.go +++ /dev/null @@ -1,52 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newFormatFromGzip(_ context.Context, cfg config.Config) (*formatFromGzip, error) { - conf := formatGzipConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_from_gzip: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_from_gzip" - } - - tf := formatFromGzip{ - conf: conf, - isObject: false, - } - - return &tf, nil -} - -type formatFromGzip struct { - conf formatGzipConfig - isObject bool -} - -func (tf *formatFromGzip) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - gz, err := fmtFromGzip(msg.Data()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData(gz) - return []*message.Message{msg}, nil -} - -func (tf *formatFromGzip) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_from_gzip_test.go b/v1/transform/format_from_gzip_test.go deleted file mode 100644 index 14260dfa..00000000 --- a/v1/transform/format_from_gzip_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatFromGzip{} - -var formatFromGzipTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "data", - config.Config{}, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 74, 203, 207, 7, 4, 0, 0, 255, 255, 33, 101, 115, 140, 3, 0, 0, 0}, - [][]byte{ - []byte(`foo`), - }, - }, -} - -func TestFormatFromGzip(t *testing.T) { - ctx := context.TODO() - for _, test := range formatFromGzipTests { - t.Run(test.name, func(t *testing.T) { - msg := message.New().SetData(test.test) - - tf, err := newFormatFromGzip(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkFormatFromGzip(b *testing.B, tf *formatFromGzip, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkFormatFromGzip(b *testing.B) { - for _, test := range formatFromGzipTests { - tf, err := newFormatFromGzip(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatFromGzip(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/format_from_pretty_print.go b/v1/transform/format_from_pretty_print.go deleted file mode 100644 index 72bacc1d..00000000 --- a/v1/transform/format_from_pretty_print.go +++ /dev/null @@ -1,89 +0,0 @@ -package transform - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -const ( - formatFromPPOpenCurlyBracket = 123 // { - formatFromPPCloseCurlyBracket = 125 // } -) - -type formatFromPrettyPrintConfig struct { - ID string `json:"id"` -} - -func (c *formatFromPrettyPrintConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newFormatFromPrettyPrint(_ context.Context, cfg config.Config) (*formatFromPrettyPrint, error) { - conf := formatFromPrettyPrintConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_from_pretty_print: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_from_pretty_print" - } - - tf := formatFromPrettyPrint{ - conf: conf, - } - - return &tf, nil -} - -type formatFromPrettyPrint struct { - conf formatFromPrettyPrintConfig - - count int - stack []byte -} - -func (tf *formatFromPrettyPrint) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - for _, data := range msg.Data() { - tf.stack = append(tf.stack, data) - - if data == formatFromPPOpenCurlyBracket { - tf.count++ - } - - if data == formatFromPPCloseCurlyBracket { - tf.count-- - } - - if tf.count == 0 { - var buf bytes.Buffer - if err := json.Compact(&buf, tf.stack); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - tf.stack = []byte{} - if json.Valid(buf.Bytes()) { - msg.SetData(buf.Bytes()) - return []*message.Message{msg}, nil - } - - return nil, fmt.Errorf("transform %s: invalid json", tf.conf.ID) - } - } - - return nil, nil -} - -func (tf *formatFromPrettyPrint) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_from_pretty_print_test.go b/v1/transform/format_from_pretty_print_test.go deleted file mode 100644 index 83d0f02f..00000000 --- a/v1/transform/format_from_pretty_print_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package transform - -import ( - "bytes" - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatFromPrettyPrint{} - -var formatFromPrettyPrintTests = []struct { - name string - cfg config.Config - test [][]byte - expected [][]byte -}{ - { - "from", - config.Config{}, - [][]byte{ - []byte(`{ - "foo":"bar" - }`), - }, - [][]byte{ - []byte(`{"foo":"bar"}`), - }, - }, - { - "from", - config.Config{}, - [][]byte{ - []byte(`{`), - []byte(`"foo":"bar",`), - []byte(`"baz": {`), - []byte(` "qux": "corge"`), - []byte(`}`), - []byte(`}`), - }, - [][]byte{ - []byte(`{"foo":"bar","baz":{"qux":"corge"}}`), - }, - }, -} - -func TestFormatFromPrettyPrint(t *testing.T) { - ctx := context.TODO() - for _, test := range formatFromPrettyPrintTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newFormatFromPrettyPrint(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - var messages []*message.Message - for _, data := range test.test { - msg := message.New().SetData(data) - messages = append(messages, msg) - } - - result, err := Apply(ctx, []Transformer{tf}, messages...) - if err != nil { - t.Error(err) - } - - for i, res := range result { - expected := test.expected[i] - if !bytes.Equal(expected, res.Data()) { - t.Errorf("expected %s, got %s", expected, res.Data()) - } - } - }) - } -} - -func benchmarkFormatFromPrettyPrint(b *testing.B, tf *formatFromPrettyPrint, data [][]byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - var messages []*message.Message - for _, d := range data { - msg := message.New().SetData(d) - messages = append(messages, msg) - } - - _, _ = Apply(ctx, []Transformer{tf}, messages...) - } -} - -func BenchmarkFormatFromPrettyPrint(b *testing.B) { - for _, test := range formatFromPrettyPrintTests { - tf, err := newFormatFromPrettyPrint(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatFromPrettyPrint(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/format_from_zip.go b/v1/transform/format_from_zip.go deleted file mode 100644 index 74b01943..00000000 --- a/v1/transform/format_from_zip.go +++ /dev/null @@ -1,86 +0,0 @@ -package transform - -import ( - "archive/zip" - "bytes" - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type formatZipConfig struct { - ID string `json:"id"` -} - -func (c *formatZipConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newFormatFromZip(_ context.Context, cfg config.Config) (*formatFromZip, error) { - conf := formatZipConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_from_zip: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_from_zip" - } - - tf := formatFromZip{ - conf: conf, - } - - return &tf, nil -} - -type formatFromZip struct { - conf formatZipConfig -} - -func (tf *formatFromZip) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - b := bytes.NewReader(msg.Data()) - r, err := zip.NewReader(b, int64(len(msg.Data()))) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - var msgs []*message.Message - for _, f := range r.File { - if f.FileInfo().IsDir() { - continue - } - - if f.FileInfo().Size() == 0 { - continue - } - - rc, err := f.Open() - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - defer rc.Close() - - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(rc); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - m := message.New().SetData(buf.Bytes()).SetMetadata(msg.Metadata()) - msgs = append(msgs, m) - } - - return msgs, nil -} - -func (tf *formatFromZip) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_from_zip_test.go b/v1/transform/format_from_zip_test.go deleted file mode 100644 index 395f3eba..00000000 --- a/v1/transform/format_from_zip_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package transform - -import ( - "context" - "slices" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatFromZip{} - -var formatFromZipTests = []struct { - name string - cfg config.Config - test []byte - expected []string -}{ - { - "data", - config.Config{}, - // This is a zip file containing two files with the contents "bar" and "qux" (no newlines). - []byte{80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 57, 63, 251, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 28, 0, 116, 109, 112, 47, 85, 84, 9, 0, 3, 238, 10, 165, 102, 239, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 55, 63, 251, 88, 200, 175, 228, 166, 3, 0, 0, 0, 3, 0, 0, 0, 11, 0, 28, 0, 116, 109, 112, 47, 98, 97, 122, 46, 116, 120, 116, 85, 84, 9, 0, 3, 233, 10, 165, 102, 234, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 113, 117, 120, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 44, 63, 251, 88, 170, 140, 255, 118, 3, 0, 0, 0, 3, 0, 0, 0, 11, 0, 28, 0, 116, 109, 112, 47, 102, 111, 111, 46, 116, 120, 116, 85, 84, 9, 0, 3, 212, 10, 165, 102, 214, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 98, 97, 114, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 57, 63, 251, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 237, 65, 0, 0, 0, 0, 116, 109, 112, 47, 85, 84, 5, 0, 3, 238, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 55, 63, 251, 88, 200, 175, 228, 166, 3, 0, 0, 0, 3, 0, 0, 0, 11, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 164, 129, 62, 0, 0, 0, 116, 109, 112, 47, 98, 97, 122, 46, 116, 120, 116, 85, 84, 5, 0, 3, 233, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 44, 63, 251, 88, 170, 140, 255, 118, 3, 0, 0, 0, 3, 0, 0, 0, 11, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 164, 129, 134, 0, 0, 0, 116, 109, 112, 47, 102, 111, 111, 46, 116, 120, 116, 85, 84, 5, 0, 3, 212, 10, 165, 102, 117, 120, 11, 0, 1, 4, 246, 1, 0, 0, 4, 20, 0, 0, 0, 80, 75, 5, 6, 0, 0, 0, 0, 3, 0, 3, 0, 236, 0, 0, 0, 206, 0, 0, 0, 0, 0}, - []string{ - "bar", - "qux", - }, - }, -} - -func TestFormatFromZip(t *testing.T) { - ctx := context.TODO() - for _, test := range formatFromZipTests { - t.Run(test.name, func(t *testing.T) { - msg := message.New().SetData(test.test) - - tf, err := newFormatFromZip(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msgs, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - // The order of the output is not guaranteed, so we need to - // check that the expected values are present anywhere in the - // result. - var results []string - for _, m := range msgs { - results = append(results, string(m.Data())) - } - - for _, r := range results { - if !slices.Contains(test.expected, r) { - t.Errorf("expected %s, got %s", test.expected, r) - } - } - }) - } -} - -func benchmarkFormatFromZip(b *testing.B, tf *formatFromZip, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkFormatFromZip(b *testing.B) { - for _, test := range formatFromZipTests { - tf, err := newFormatFromZip(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatFromZip(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/format_to_base64.go b/v1/transform/format_to_base64.go deleted file mode 100644 index adc634d8..00000000 --- a/v1/transform/format_to_base64.go +++ /dev/null @@ -1,69 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - ibase64 "github.com/brexhq/substation/internal/base64" - "github.com/brexhq/substation/message" -) - -func newFormatToBase64(_ context.Context, cfg config.Config) (*formatToBase64, error) { - conf := formatBase64Config{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_to_base64: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_to_base64" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := formatToBase64{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type formatToBase64 struct { - conf formatBase64Config - isObject bool -} - -func (tf *formatToBase64) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b64 := ibase64.Encode(msg.Data()) - msg.SetData(b64) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - b64 := ibase64.Encode(value.Bytes()) - - if err := msg.SetValue(tf.conf.Object.TargetKey, b64); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *formatToBase64) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_to_base64_test.go b/v1/transform/format_to_base64_test.go deleted file mode 100644 index 65f430aa..00000000 --- a/v1/transform/format_to_base64_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatToBase64{} - -var formatToBase64Tests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - // data tests - { - "data", - config.Config{}, - []byte(`b`), - [][]byte{ - []byte(`Yg==`), - }, - nil, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"Yg=="}`), - }, - nil, - }, -} - -func TestFormatToBase64(t *testing.T) { - ctx := context.TODO() - for _, test := range formatToBase64Tests { - t.Run(test.name, func(t *testing.T) { - tf, err := newFormatToBase64(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkFormatToBase64(b *testing.B, tf *formatToBase64, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkFormatToBase64Encode(b *testing.B) { - for _, test := range formatToBase64Tests { - tf, err := newFormatToBase64(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatToBase64(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/format_to_gzip.go b/v1/transform/format_to_gzip.go deleted file mode 100644 index 543798c3..00000000 --- a/v1/transform/format_to_gzip.go +++ /dev/null @@ -1,52 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newFormatToGzip(_ context.Context, cfg config.Config) (*formatToGzip, error) { - conf := formatGzipConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform format_to_gzip: %v", err) - } - - if conf.ID == "" { - conf.ID = "format_to_gzip" - } - - tf := formatToGzip{ - conf: conf, - isObject: false, - } - - return &tf, nil -} - -type formatToGzip struct { - conf formatGzipConfig - isObject bool -} - -func (tf *formatToGzip) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - gz, err := fmtToGzip(msg.Data()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData(gz) - return []*message.Message{msg}, nil -} - -func (tf *formatToGzip) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/format_to_gzip_test.go b/v1/transform/format_to_gzip_test.go deleted file mode 100644 index 8f590e27..00000000 --- a/v1/transform/format_to_gzip_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &formatToGzip{} - -var formatToGzipTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - { - "data", - config.Config{}, - []byte(`foo`), - [][]byte{ - {31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 74, 203, 207, 7, 4, 0, 0, 255, 255, 33, 101, 115, 140, 3, 0, 0, 0}, - }, - nil, - }, -} - -func TestFormatToGzip(t *testing.T) { - ctx := context.TODO() - for _, test := range formatToGzipTests { - t.Run(test.name, func(t *testing.T) { - msg := message.New().SetData(test.test) - - tf, err := newFormatToGzip(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkFormatToGzip(b *testing.B, tf *formatToGzip, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkFormatToGzip(b *testing.B) { - for _, test := range formatToGzipTests { - tf, err := newFormatToGzip(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkFormatToGzip(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/hash.go b/v1/transform/hash.go deleted file mode 100644 index 5526e291..00000000 --- a/v1/transform/hash.go +++ /dev/null @@ -1,29 +0,0 @@ -package transform - -import ( - "fmt" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type hashConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *hashConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *hashConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} diff --git a/v1/transform/hash_md5.go b/v1/transform/hash_md5.go deleted file mode 100644 index 3083dbb3..00000000 --- a/v1/transform/hash_md5.go +++ /dev/null @@ -1,71 +0,0 @@ -package transform - -import ( - "context" - "crypto/md5" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newHashMD5(_ context.Context, cfg config.Config) (*hashMD5, error) { - conf := hashConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform hash_md5: %v", err) - } - - if conf.ID == "" { - conf.ID = "hash_md5" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := hashMD5{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type hashMD5 struct { - conf hashConfig - isObject bool -} - -func (tf *hashMD5) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - sum := md5.Sum(msg.Data()) - str := fmt.Sprintf("%x", sum) - - msg.SetData([]byte(str)) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - sum := md5.Sum(value.Bytes()) - str := fmt.Sprintf("%x", sum) - - if err := msg.SetValue(tf.conf.Object.TargetKey, str); err != nil { - return nil, err - } - - return []*message.Message{msg}, nil -} - -func (tf *hashMD5) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/hash_md5_test.go b/v1/transform/hash_md5_test.go deleted file mode 100644 index 35118208..00000000 --- a/v1/transform/hash_md5_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &hashMD5{} - -var hashMD5Tests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "algorithm": "MD5", - }, - }, - []byte(`a`), - [][]byte{ - []byte(`0cc175b9c0f1b6a831c399e269772661`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "algorithm": "MD5", - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"92eb5ffee6ae2fec3ad71c777531578f"}`), - }, - }, -} - -func TestHashMD5(t *testing.T) { - ctx := context.TODO() - for _, test := range hashMD5Tests { - t.Run(test.name, func(t *testing.T) { - tf, err := newHashMD5(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkHashMD5(b *testing.B, tf *hashMD5, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkHashMD5(b *testing.B) { - for _, test := range hashMD5Tests { - tf, err := newHashMD5(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkHashMD5(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/hash_sha256.go b/v1/transform/hash_sha256.go deleted file mode 100644 index f70c860f..00000000 --- a/v1/transform/hash_sha256.go +++ /dev/null @@ -1,71 +0,0 @@ -package transform - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newHashSHA256(_ context.Context, cfg config.Config) (*hashSHA256, error) { - conf := hashConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform hash_sha256: %v", err) - } - - if conf.ID == "" { - conf.ID = "hash_sha256" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := hashSHA256{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type hashSHA256 struct { - conf hashConfig - isObject bool -} - -func (tf *hashSHA256) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - sum := sha256.Sum256(msg.Data()) - str := fmt.Sprintf("%x", sum) - - msg.SetData([]byte(str)) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - sum := sha256.Sum256(value.Bytes()) - str := fmt.Sprintf("%x", sum) - - if err := msg.SetValue(tf.conf.Object.TargetKey, str); err != nil { - return nil, err - } - - return []*message.Message{msg}, nil -} - -func (tf *hashSHA256) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/hash_sha256_test.go b/v1/transform/hash_sha256_test.go deleted file mode 100644 index 2cc89677..00000000 --- a/v1/transform/hash_sha256_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &hashSHA256{} - -var hashSHA256Tests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "data", - config.Config{}, - []byte(`a`), - [][]byte{ - []byte(`ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"3e23e8160039594a33894f6564e1b1348bbd7a0088d42c4acb73eeaed59c009d"}`), - }, - }, -} - -func TestHashSHA256(t *testing.T) { - ctx := context.TODO() - for _, test := range hashSHA256Tests { - t.Run(test.name, func(t *testing.T) { - tf, err := newHashSHA256(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkHashSHA256(b *testing.B, tf *hashSHA256, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkHashSHA256(b *testing.B) { - for _, test := range hashSHA256Tests { - tf, err := newHashSHA256(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkHashSHA256(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/meta_err.go b/v1/transform/meta_err.go deleted file mode 100644 index 4e8e2b0b..00000000 --- a/v1/transform/meta_err.go +++ /dev/null @@ -1,134 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type metaErrConfig struct { - // Transform that is applied with error handling. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` - // Transforms that are applied in series with error handling. - Transforms []config.Config `json:"transforms"` - - // ErrorMessages are regular expressions that match error messages and determine - // if the error should be caught. - // - // This is optional and defaults to an empty list (all errors are caught). - ErrorMessages []string `json:"error_messages"` - - ID string `json:"id"` -} - -func (c *metaErrConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaErrConfig) Validate() error { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaErr(ctx context.Context, cfg config.Config) (*metaErr, error) { - conf := metaErrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_err: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_err" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaErr{ - conf: conf, - } - - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tf = tfer - } - - tf.tfs = make([]Transformer, len(conf.Transforms)) - for i, t := range conf.Transforms { - tfer, err := New(ctx, t) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tfs[i] = tfer - } - - tf.errorMessages = make([]*regexp.Regexp, len(conf.ErrorMessages)) - for i, eMsg := range conf.ErrorMessages { - r, err := regexp.Compile(eMsg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.errorMessages[i] = r - } - - return &tf, nil -} - -type metaErr struct { - conf metaErrConfig - - tf Transformer - tfs []Transformer - errorMessages []*regexp.Regexp -} - -func (tf *metaErr) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - - if err != nil { - // Deprecated: Remove this block in a future release. - if len(tf.errorMessages) == 0 { - return []*message.Message{msg}, nil - } - - for _, e := range tf.errorMessages { - if e.MatchString(err.Error()) { - return []*message.Message{msg}, nil - } - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil -} - -func (tf *metaErr) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_err_test.go b/v1/transform/meta_err_test.go deleted file mode 100644 index 945378d0..00000000 --- a/v1/transform/meta_err_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &metaErr{} - -var metaErrTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "utility_err", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "utility_err", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "error_messages string", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - "error_messages": []string{ - "test error", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "error_messages string", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - "error_messages": []string{ - "test error", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "error_messages regex", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - "error_messages": []string{ - "^test", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "error_messages regex", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - "error_messages": []string{ - "^test", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, -} - -func TestMetaErr(t *testing.T) { - ctx := context.TODO() - for _, test := range metaErrTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newMetaErr(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Fatal(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkMetaErr(b *testing.B, tf *metaErr, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkMetaErr(b *testing.B) { - for _, test := range metaErrTests { - tf, err := newMetaErr(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkMetaErr(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/meta_for_each.go b/v1/transform/meta_for_each.go deleted file mode 100644 index d31c2b5e..00000000 --- a/v1/transform/meta_for_each.go +++ /dev/null @@ -1,149 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type metaForEachConfig struct { - // Transform that is applied to each item in the array. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` - // Transforms that are applied in series to the data in the array. - Transforms []config.Config - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *metaForEachConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaForEachConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaForEach(ctx context.Context, cfg config.Config) (*metaForEach, error) { - conf := metaForEachConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_for_each: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_for_each" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaForEach{ - conf: conf, - } - - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.tf = tfer - } - - tf.tfs = make([]Transformer, len(conf.Transforms)) - for i, t := range conf.Transforms { - tfer, err := New(ctx, t) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tfs[i] = tfer - } - - return &tf, nil -} - -type metaForEach struct { - conf metaForEachConfig - - tf Transformer - tfs []Transformer -} - -func (tf *metaForEach) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - var msgs []*message.Message - var err error - - if msg.IsControl() { - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var arr []interface{} - for _, res := range value.Array() { - tmpMsg := message.New().SetData(res.Bytes()) - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, tmpMsg) - } else { - msgs, err = tf.tf.Transform(ctx, tmpMsg) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - for _, m := range msgs { - v := bytesToValue(m.Data()) - arr = append(arr, v.Value()) - } - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, arr); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *metaForEach) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_for_each_test.go b/v1/transform/meta_for_each_test.go deleted file mode 100644 index 7a234efb..00000000 --- a/v1/transform/meta_for_each_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &metaForEach{} - -var metaForEachTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "meta_pipeline", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "meta_pipeline", - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Type: "format_from_base64", - }, - { - Type: "format_from_gzip", - }, - }, - }, - }, - }, - }, - []byte(`{"a":["H4sIAMpcy2IA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA","H4sIAI/bzmIA/wXAMQ0AAADCMK1MAv6Pph2qjP92AwAAAA=="]}`), - [][]byte{ - []byte(`{"a":["H4sIAMpcy2IA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA","H4sIAI/bzmIA/wXAMQ0AAADCMK1MAv6Pph2qjP92AwAAAA=="],"b":["foo","bar"]}`), - }, - }, - { - "format_from_base64", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "secrets", - "target_key": "decoded", - }, - "transform": config.Config{ - Type: "format_from_base64", - }, - }, - }, - []byte(`{"secrets":["ZHJpbms=","eW91cg==","b3ZhbHRpbmU="]}`), - [][]byte{ - []byte(`{"secrets":["ZHJpbms=","eW91cg==","b3ZhbHRpbmU="],"decoded":["drink","your","ovaltine"]}`), - }, - }, - { - "string_capture", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "user_email", - "target_key": "user_name", - }, - "transform": config.Config{ - Type: "string_capture", - Settings: map[string]interface{}{ - "pattern": "^([^@]*)@.*$", - }, - }, - }, - }, - []byte(`{"user_email":["john.d@example.com","jane.d@example.com"]}`), - [][]byte{ - []byte(`{"user_email":["john.d@example.com","jane.d@example.com"],"user_name":["john.d","jane.d"]}`), - }, - }, - { - "string_to_lower", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "upcase", - "target_key": "downcase", - }, - "transform": config.Config{ - Type: "string_to_lower", - }, - }, - }, - []byte(`{"upcase":["ABC","DEF"]}`), - [][]byte{ - []byte(`{"upcase":["ABC","DEF"],"downcase":["abc","def"]}`), - }, - }, - { - "network_domain_subdomain", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "domain", - "target_key": "subdomain", - }, - "transform": config.Config{ - Type: "network_domain_subdomain", - }, - }, - }, - []byte(`{"domain":["www.example.com","mail.example.top"]}`), - [][]byte{ - []byte(`{"domain":["www.example.com","mail.example.top"],"subdomain":["www","mail"]}`), - }, - }, - { - "hash_sha256", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "hash_sha256", - }, - }, - }, - []byte(`{"a":["foo","bar","baz"]}`), - [][]byte{ - []byte(`{"a":["foo","bar","baz"],"b":["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae","fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9","baa5a0964d3320fbc0c6a922140453c8513ea24ab8fd0577034804a967248096"]}`), - }, - }, - { - "object_insert", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "object_insert", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "baz", - }, - "value": "qux", - }, - }, - }, - }, - []byte(`{"a":[{"foo":"bar"},{"baz":"quux"}]}`), - [][]byte{ - []byte(`{"a":[{"foo":"bar"},{"baz":"quux"}],"b":[{"baz":"qux","foo":"bar"},{"baz":"qux"}]}`), - }, - }, - { - "string_replace", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "string_replace", - Settings: map[string]interface{}{ - "pattern": "r", - "replacement": "z", - }, - }, - }, - }, - []byte(`{"a":["bar","bard"]}`), - [][]byte{ - []byte(`{"a":["bar","bard"],"b":["baz","bazd"]}`), - }, - }, - { - "time_from_string", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "time_from_string", - Settings: map[string]interface{}{ - "format": "2006-01-02T15:04:05Z", - }, - }, - }, - }, - []byte(`{"a":["2021-03-06T00:02:57Z","2021-03-06T00:03:57Z","2021-03-06T00:04:57Z"]}`), - [][]byte{ - []byte(`{"a":["2021-03-06T00:02:57Z","2021-03-06T00:03:57Z","2021-03-06T00:04:57Z"],"b":["1614988977000000000","1614989037000000000","1614989097000000000"]}`), - }, - }, - { - "format_from_base64", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "secrets", - "target_key": "decoded", - }, - "transforms": []config.Config{ - { - Type: "format_from_base64", - }, - }, - }, - }, - []byte(`{"secrets":["ZHJpbms=","eW91cg==","b3ZhbHRpbmU="]}`), - [][]byte{ - []byte(`{"secrets":["ZHJpbms=","eW91cg==","b3ZhbHRpbmU="],"decoded":["drink","your","ovaltine"]}`), - }, - }, -} - -func TestMetaForEach(t *testing.T) { - ctx := context.TODO() - for _, test := range metaForEachTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newMetaForEach(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Fatal(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkMetaForEach(b *testing.B, tf *metaForEach, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkMetaForEach(b *testing.B) { - for _, test := range metaForEachTests { - tf, err := newMetaForEach(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkMetaForEach(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/meta_kv_store_lock.go b/v1/transform/meta_kv_store_lock.go deleted file mode 100644 index 44f83b16..00000000 --- a/v1/transform/meta_kv_store_lock.go +++ /dev/null @@ -1,238 +0,0 @@ -package transform - -import ( - "context" - "crypto/sha256" - "fmt" - "sync" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" -) - -type metaVStoreLockObjectConfig struct { - // TTLKey retrieves a value from an object that is used as the time-to-live (TTL) - // of the item locked in the KV store. This value must be an integer that represents - // the Unix time when the item will be evicted from the store. Any precision greater - // than seconds (e.g., milliseconds, nanoseconds) is truncated to seconds. - // - // This is optional and defaults to using no TTL when setting items into the store. - TTLKey string `json:"ttl_key"` - - iconfig.Object -} - -type metaKVStoreLockConfig struct { - // Prefix is prepended to the key and can be used to simplify - // data management within a KV store. - // - // This is optional and defaults to an empty string. - Prefix string `json:"prefix"` - // TTLOffset is an offset used to determine the time-to-live (TTL) of the item set - // into the KV store. If Object.TTLKey is configured, then this value is added to the TTL - // value retrieved from the object. If Object.TTLKey is not used, then this value is added - // to the current time. - // - // For example, if Object.TTLKey is not set and the offset is "1d", then the value - // will be evicted from the store when more than 1 day has passed. - // - // This is optional and defaults to using no TTL when setting values into the store. - TTLOffset string `json:"ttl_offset"` - - // Transform that is applied after the lock is acquired. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` - // Transforms that are applied in series after the lock is acquired. - Transforms []config.Config `json:"transforms"` - - ID string `json:"id"` - Object metaVStoreLockObjectConfig `json:"object"` - KVStore config.Config `json:"kv_store"` -} - -func (c *metaKVStoreLockConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaKVStoreLockConfig) Validate() error { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) - } - - if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaKVStoreLock(ctx context.Context, cfg config.Config) (*metaKVStoreLock, error) { - conf := metaKVStoreLockConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_kv_store_lock: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_kv_store_lock" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaKVStoreLock{ - conf: conf, - } - - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tf = tfer - } - - tf.tfs = make([]Transformer, len(conf.Transforms)) - for i, t := range conf.Transforms { - tfer, err := New(ctx, t) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tfs[i] = tfer - } - - locker, err := kv.GetLocker(conf.KVStore) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - if err := locker.Setup(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.locker = locker - - if conf.TTLOffset == "" { - conf.TTLOffset = "0s" - } - - dur, err := time.ParseDuration(conf.TTLOffset) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.ttl = int64(dur.Seconds()) - - return &tf, nil -} - -// metaKVStoreLock applies a lock in a KV store and executes a transform. If the lock is already -// held, then an error is returned. The lock is applied with a time-to-live (TTL) value, which is -// used to determine when the lock is automatically released. -type metaKVStoreLock struct { - tf Transformer - tfs []Transformer - - conf metaKVStoreLockConfig - locker kv.Locker - ttl int64 - - // mu is required to prevent concurrent access to the keys slice. - mu sync.Mutex - keys []string -} - -// Transforms a message based on the configuration. -func (tf *metaKVStoreLock) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - - if err != nil { - for _, key := range tf.keys { - _ = tf.locker.Unlock(ctx, key) - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - tf.keys = tf.keys[:0] - return msgs, nil - } - - // By default, the lock key is the SHA256 hash of the message. - var lockKey string - v := msg.GetValue(tf.conf.Object.SourceKey) - if !v.Exists() { - sum := sha256.Sum256(msg.Data()) - lockKey = fmt.Sprintf("%x", sum) - } else { - lockKey = v.String() - } - - if tf.conf.Prefix != "" { - lockKey = fmt.Sprint(tf.conf.Prefix, ":", lockKey) - } - - // Calculate TTL based on the configuration. - var ttl int64 - if tf.conf.Object.TTLKey != "" && tf.ttl != 0 { - v := msg.GetValue(tf.conf.Object.TTLKey) - ttl = truncateTTL(v) + tf.ttl - } else if tf.conf.Object.TTLKey != "" { - v := msg.GetValue(tf.conf.Object.TTLKey) - ttl = truncateTTL(v) - } else if tf.ttl != 0 { - ttl = time.Now().Add(time.Duration(tf.ttl) * time.Second).Unix() - } - - // Acquire the lock. If the lock is already held, then the message is returned as is. - // This prevents the transform from being applied to the message more than once. - if err := tf.locker.Lock(ctx, lockKey, ttl); err != nil { - if err == kv.ErrNoLock { - return []*message.Message{msg}, nil - } - - for _, key := range tf.keys { - _ = tf.locker.Unlock(ctx, key) - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - tf.keys = append(tf.keys, lockKey) - - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - - if err != nil { - for _, key := range tf.keys { - _ = tf.locker.Unlock(ctx, key) - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil -} diff --git a/v1/transform/meta_metric_duration.go b/v1/transform/meta_metric_duration.go deleted file mode 100644 index edd2cea2..00000000 --- a/v1/transform/meta_metric_duration.go +++ /dev/null @@ -1,125 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" -) - -type metaMetricDurationConfig struct { - ID string `json:"id"` - Metric iconfig.Metric `json:"metric"` - - // Transform that has its duration measured. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` - // Transforms that have their total duration measured. - Transforms []config.Config `json:"transforms"` -} - -func (c *metaMetricDurationConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newMetaMetricsDuration(ctx context.Context, cfg config.Config) (*metaMetricDuration, error) { - // conf gets validated when calling metrics.New. - conf := metaMetricDurationConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_metric_duration: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_metric_duration" - } - - m, err := metrics.New(ctx, conf.Metric.Destination) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaMetricDuration{ - conf: conf, - metric: m, - } - - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.tf = tfer - } - - tf.tfs = make([]Transformer, len(conf.Transforms)) - for i, t := range conf.Transforms { - tfer, err := New(ctx, t) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.tfs[i] = tfer - } - - return &tf, nil -} - -type metaMetricDuration struct { - conf metaMetricDurationConfig - - tf Transformer - tfs []Transformer - - // This is measured in nanoseconds. - metric metrics.Generator - duration time.Duration -} - -func (tf *metaMetricDuration) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if err := tf.metric.Generate(ctx, metrics.Data{ - Name: tf.conf.Metric.Name, - Value: tf.duration, - Attributes: tf.conf.Metric.Attributes, - }); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - start := time.Now() - defer func() { - tf.duration += time.Since(start) - }() - - if len(tf.tfs) > 0 { - return Apply(ctx, tf.tfs, msg) - } - - return tf.tf.Transform(ctx, msg) -} - -func (tf *metaMetricDuration) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_pipeline.go b/v1/transform/meta_pipeline.go deleted file mode 100644 index b402d407..00000000 --- a/v1/transform/meta_pipeline.go +++ /dev/null @@ -1,139 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// errMetaPipelineArrayInput is returned when the transform is configured to process -// an object, but the input is an array. Array values are not supported by this transform, -// instead the input should be run through the metaForEach transform (which can encapsulate -// the pipeline transform). -var errMetaPipelineArrayInput = fmt.Errorf("input is an array") - -type metaPipelineConfig struct { - // Transforms that are applied in series to the data. - Transforms []config.Config `json:"transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *metaPipelineConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaPipelineConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if len(c.Transforms) == 0 { - return fmt.Errorf("transforms: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -// Deprecated: newMetaPipeline exists for backwards compatibility and will be -// removed in a future release. Use the Transforms fields on other meta transforms -// instead. -func newMetaPipeline(ctx context.Context, cfg config.Config) (*metaPipeline, error) { - conf := metaPipelineConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_pipeline: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_pipeline" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaPipeline{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - var tform []Transformer - for _, c := range conf.Transforms { - t, err := New(ctx, c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tform = append(tform, t) - } - tf.tf = tform - - return &tf, nil -} - -type metaPipeline struct { - conf metaPipelineConfig - isObject bool - - tf []Transformer -} - -func (tf *metaPipeline) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - msgs, err := Apply(ctx, tf.tf, msg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - if !tf.isObject { - msgs, err := Apply(ctx, tf.tf, msg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if value.IsArray() { - return nil, fmt.Errorf("transform %s: key %s: %v", tf.conf.ID, tf.conf.Object.SourceKey, errMetaPipelineArrayInput) - } - - res, err := Apply(ctx, tf.tf, message.New().SetData(value.Bytes())) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - var output []*message.Message - for _, msg := range res { - if err := msg.SetValue(tf.conf.Object.TargetKey, msg.Data()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - output = append(output, msg) - } - - return output, nil -} - -func (tf *metaPipeline) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_pipeline_test.go b/v1/transform/meta_pipeline_test.go deleted file mode 100644 index 607ad65d..00000000 --- a/v1/transform/meta_pipeline_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &metaPipeline{} - -var metaPipelineTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "transforms": []config.Config{ - { - Type: "format_from_base64", - Settings: map[string]interface{}{}, - }, - { - Type: "format_from_gzip", - Settings: map[string]interface{}{}, - }, - }, - }, - }, - []byte(`{"a":"H4sIAO291GIA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA"}`), - [][]byte{ - []byte(`{"a":"foo"}`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Type: "format_from_base64", - Settings: map[string]interface{}{}, - }, - { - Type: "format_from_gzip", - Settings: map[string]interface{}{}, - }, - }, - }, - }, - []byte(`H4sIAO291GIA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA`), - [][]byte{ - []byte(`foo`), - }, - }, -} - -func TestMetaPipeline(t *testing.T) { - ctx := context.TODO() - for _, test := range metaPipelineTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newMetaPipeline(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkMetaPipeline(b *testing.B, tf *metaPipeline, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkMetaPipeline(b *testing.B) { - for _, test := range metaPipelineTests { - tf, err := newMetaPipeline(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkMetaPipeline(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/meta_retry.go b/v1/transform/meta_retry.go deleted file mode 100644 index 6bd36697..00000000 --- a/v1/transform/meta_retry.go +++ /dev/null @@ -1,169 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - "time" - - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// errMetaRetryLimitReached is returned when the configured retry -// limit is reached. Other transforms may try to catch this error, so -// any update to the variable's value is considered a BREAKING CHANGE. -var errMetaRetryLimitReached = fmt.Errorf("retry limit reached") - -type metaRetryConfig struct { - // Transforms that are applied in series, then checked for success - // based on the condition or errors. - Transforms []config.Config `json:"transforms"` - // Condition that must be true for the transforms to be considered - // a success. - Condition condition.Config `json:"condition"` - - Retry iconfig.Retry `json:"retry"` - ID string `json:"id"` -} - -func (c *metaRetryConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaRetryConfig) Validate() error { - for _, t := range c.Transforms { - if t.Type == "" { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) - } - } - - return nil -} - -func newMetaRetry(ctx context.Context, cfg config.Config) (*metaRetry, error) { - conf := metaRetryConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_retry: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_retry" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tforms := make([]Transformer, len(conf.Transforms)) - for i, t := range conf.Transforms { - tfer, err := New(ctx, t) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tforms[i] = tfer - } - - cnd, err := condition.New(ctx, conf.Condition) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - del, err := time.ParseDuration(conf.Retry.Delay) - if err != nil { - return nil, fmt.Errorf("transform %s: delay: %v", conf.ID, err) - } - - errs := make([]*regexp.Regexp, len(conf.Retry.ErrorMessages)) - for i, e := range conf.Retry.ErrorMessages { - r, err := regexp.Compile(e) - if err != nil { - return nil, fmt.Errorf("transform %s: error_messages: %v", conf.ID, err) - } - - errs[i] = r - } - - tf := metaRetry{ - conf: conf, - transforms: tforms, - condition: cnd, - delay: del, - errorMessages: errs, - } - - return &tf, nil -} - -type metaRetry struct { - conf metaRetryConfig - - condition condition.Operator - transforms []Transformer - delay time.Duration - errorMessages []*regexp.Regexp -} - -func (tf *metaRetry) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - var counter int - -LOOP: - for { - // If the retry count is set to 0, then this will retry forever. - if tf.conf.Retry.Count > 0 && counter > tf.conf.Retry.Count { - break - } - - // Implements constant backoff. The first iteration is skipped. - if counter > 0 { - time.Sleep(tf.delay) - } - - counter++ - - // This must operate on a copy of the message to avoid - // modifying the original message in case the transform - // fails. - cMsg := *msg - msgs, err := Apply(ctx, tf.transforms, &cMsg) - if err != nil { - for _, r := range tf.errorMessages { - if r.MatchString(err.Error()) { - continue LOOP - } - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - for _, m := range msgs { - if m.IsControl() { - continue - } - - ok, err := tf.condition.Operate(ctx, m) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // Any condition failure immediately restarts the loop. - if !ok { - continue LOOP - } - } - - return msgs, nil - } - - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errMetaRetryLimitReached) -} - -func (tf *metaRetry) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_switch.go b/v1/transform/meta_switch.go deleted file mode 100644 index 041e6437..00000000 --- a/v1/transform/meta_switch.go +++ /dev/null @@ -1,186 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type metaSwitchCaseConfig struct { - // Condition that must be true for the transforms to be applied. - Condition condition.Config `json:"condition"` - - // Transform that is applied when the condition is true. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` - // Transforms that are applied in series when the condition is true. - Transforms []config.Config `json:"transforms"` -} - -type metaSwitchConfig struct { - // Cases are the transforms that are conditionally applied. If - // no condition is configured, then the transform is always - // applied. - Cases []metaSwitchCaseConfig `json:"cases"` - - ID string `json:"id"` -} - -func (c *metaSwitchConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaSwitchConfig) Validate() error { - if len(c.Cases) == 0 { - return fmt.Errorf("cases: %v", errors.ErrMissingRequiredOption) - } - - for _, c := range c.Cases { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) - } - } - - return nil -} - -type metaSwitchConditional struct { - operator condition.Operator - transformer Transformer - transformers []Transformer -} - -func newMetaSwitch(ctx context.Context, cfg config.Config) (*metaSwitch, error) { - conf := metaSwitchConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_switch: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_switch" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - conditionals := make([]metaSwitchConditional, len(conf.Cases)) - for i, s := range conf.Cases { - conditional := metaSwitchConditional{} - - op, err := condition.New(ctx, s.Condition) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - conditional.operator = op - - if s.Transform.Type != "" { - tf, err := New(ctx, s.Transform) - if err != nil { - return nil, fmt.Errorf("transform meta_switch: %v", err) - } - - conditional.transformer = tf - } - - for _, c := range s.Transforms { - tf, err := New(ctx, c) - if err != nil { - return nil, fmt.Errorf("transform meta_switch: %v", err) - } - - conditional.transformers = append(conditional.transformers, tf) - } - - conditionals[i] = conditional - } - - tf := metaSwitch{ - conf: conf, - conditional: conditionals, - } - - return &tf, nil -} - -type metaSwitch struct { - conf metaSwitchConfig - - conditional []metaSwitchConditional -} - -func (tf *metaSwitch) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - var messages []*message.Message - for _, c := range tf.conditional { - var msgs []*message.Message - var err error - - if len(c.transformers) > 0 { - msgs, err = Apply(ctx, c.transformers, msg) - } else { - msgs, err = c.transformer.Transform(ctx, msg) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - messages = append(messages, msgs...) - } - - // This is required to deduplicate the control messages that - // were sent to the conditional transforms. - var msgs []*message.Message - for _, m := range messages { - if m.IsControl() { - continue - } - - msgs = append(msgs, m) - } - - msgs = append(msgs, msg) - return msgs, nil - } - - for _, c := range tf.conditional { - ok, err := c.operator.Operate(ctx, msg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if !ok { - continue - } - - var msgs []*message.Message - if len(c.transformers) > 0 { - msgs, err = Apply(ctx, c.transformers, msg) - } else { - msgs, err = c.transformer.Transform(ctx, msg) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - // If no conditions match, then return the original message. - return []*message.Message{msg}, nil -} - -func (tf *metaSwitch) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/meta_switch_test.go b/v1/transform/meta_switch_test.go deleted file mode 100644 index 41c4bb8b..00000000 --- a/v1/transform/meta_switch_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &metaSwitch{} - -var metaSwitchTests = []struct { - name string - cfg config.Config - data []byte - expected [][]byte -}{ - // This test simulates an if block by having the condition always - // succeed. - { - "if", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "b", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","c":"b"}`), - }, - }, - { - "if", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "b", - }, - }, - }, - }, - "transforms": []map[string]interface{}{ - { - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","c":"b"}`), - }, - }, - // This test simulates an if/else block by having the first condition - // always fail and the second condition always succeed by not having - // any conditions (the condition package will always return true if - // there are no conditions). - { - "if_else", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - { - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "x", - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","x":"b"}`), - }, - }, - { - "if_else", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transforms": []map[string]interface{}{ - { - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - }, - { - "transforms": []map[string]interface{}{ - { - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "x", - }, - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","x":"b"}`), - }, - }, - // This test simulates an if/else if block by having all conditions - // fail. The data should be unchanged. - { - "if_else_if", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "d", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "d", - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "if_else_if", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transforms": []map[string]interface{}{ - { - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - }, - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "d", - }, - }, - }, - }, - "transforms": []map[string]interface{}{ - { - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "d", - }, - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, -} - -func TestMetaSwitch(t *testing.T) { - ctx := context.TODO() - for _, test := range metaSwitchTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newMetaSwitch(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.data) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkMetaSwitch(b *testing.B, tf *metaSwitch, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkMetaSwitch(b *testing.B) { - for _, test := range metaSwitchTests { - tf, err := newMetaSwitch(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkMetaSwitch(b, tf, test.data) - }, - ) - } -} diff --git a/v1/transform/network.go b/v1/transform/network.go deleted file mode 100644 index 82ab4e52..00000000 --- a/v1/transform/network.go +++ /dev/null @@ -1,29 +0,0 @@ -package transform - -import ( - "fmt" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type networkDomainConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *networkDomainConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *networkDomainConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} diff --git a/v1/transform/network_domain_registered_domain.go b/v1/transform/network_domain_registered_domain.go deleted file mode 100644 index da59e94d..00000000 --- a/v1/transform/network_domain_registered_domain.go +++ /dev/null @@ -1,77 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "golang.org/x/net/publicsuffix" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkDomainRegisteredDomain(_ context.Context, cfg config.Config) (*networkDomainRegisteredDomain, error) { - conf := networkDomainConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform network_domain_registered_domain: %v", err) - } - - if conf.ID == "" { - conf.ID = "network_domain_registered_domain" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := networkDomainRegisteredDomain{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type networkDomainRegisteredDomain struct { - conf networkDomainConfig - isObj bool -} - -func (tf *networkDomainRegisteredDomain) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - domain, err := publicsuffix.EffectiveTLDPlusOne(str) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData([]byte(domain)) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - domain, err := publicsuffix.EffectiveTLDPlusOne(value.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, domain); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *networkDomainRegisteredDomain) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/network_domain_registered_domain_test.go b/v1/transform/network_domain_registered_domain_test.go deleted file mode 100644 index 1d076b12..00000000 --- a/v1/transform/network_domain_registered_domain_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &networkDomainRegisteredDomain{} - -var networkDomainRegisteredDomainTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`c.b.com`), - [][]byte{ - []byte(`b.com`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"c.b.com"}`), - [][]byte{ - []byte(`{"a":"b.com"}`), - }, - }, -} - -func TestNetworkDomainRegisteredDomain(t *testing.T) { - ctx := context.TODO() - for _, test := range networkDomainRegisteredDomainTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNetworkDomainRegisteredDomain(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNetworkDomainRegisteredDomain(b *testing.B, tf *networkDomainRegisteredDomain, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNetworkDomainRegisteredDomain(b *testing.B) { - for _, test := range networkDomainRegisteredDomainTests { - tf, err := newNetworkDomainRegisteredDomain(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNetworkDomainRegisteredDomain(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/network_domain_subdomain.go b/v1/transform/network_domain_subdomain.go deleted file mode 100644 index cba23bcb..00000000 --- a/v1/transform/network_domain_subdomain.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "golang.org/x/net/publicsuffix" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -// errFmtSubdomainNoSubdomain is returned when a domain without a subdomain is -// processed. -var errFmtSubdomainNoSubdomain = fmt.Errorf("no subdomain") - -func newNetworkDomainSubdomain(_ context.Context, cfg config.Config) (*networkDomainSubdomain, error) { - conf := networkDomainConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform network_domain_subdomain: %v", err) - } - - if conf.ID == "" { - conf.ID = "network_domain_subdomain" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := networkDomainSubdomain{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type networkDomainSubdomain struct { - conf networkDomainConfig - isObj bool -} - -func (tf *networkDomainSubdomain) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - domain, err := fmtParseSubdomain(str) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData([]byte(domain)) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - domain, err := fmtParseSubdomain(value.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, domain); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *networkDomainSubdomain) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func fmtParseSubdomain(s string) (string, error) { - domain, err := publicsuffix.EffectiveTLDPlusOne(s) - if err != nil { - return "", err - } - - // Subdomain is the input string minus the domain and a leading dot: - // input == "foo.bar.com" - // domain == "bar.com" - // subdomain == "foo" ("foo.bar.com" minus ".bar.com") - subdomain := strings.Replace(s, "."+domain, "", 1) - if subdomain == domain { - return "", errFmtSubdomainNoSubdomain - } - - return subdomain, nil -} diff --git a/v1/transform/network_domain_subdomain_test.go b/v1/transform/network_domain_subdomain_test.go deleted file mode 100644 index 636bb2f2..00000000 --- a/v1/transform/network_domain_subdomain_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &networkDomainSubdomain{} - -var networkDomainSubdomainTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`c.b.com`), - [][]byte{ - []byte(`c`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"c.b.com"}`), - [][]byte{ - []byte(`{"a":"c"}`), - }, - }, -} - -func TestNetworkDomainSubdomain(t *testing.T) { - ctx := context.TODO() - for _, test := range networkDomainSubdomainTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNetworkDomainSubdomain(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNetworkDomainSubdomain(b *testing.B, tf *networkDomainSubdomain, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNetworkDomainSubdomain(b *testing.B) { - for _, test := range networkDomainSubdomainTests { - tf, err := newNetworkDomainSubdomain(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNetworkDomainSubdomain(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/network_domain_top_level_domain.go b/v1/transform/network_domain_top_level_domain.go deleted file mode 100644 index 79acca37..00000000 --- a/v1/transform/network_domain_top_level_domain.go +++ /dev/null @@ -1,71 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "golang.org/x/net/publicsuffix" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newNetworkDomainTopLevelDomain(_ context.Context, cfg config.Config) (*networkDomainTopLevelDomain, error) { - conf := networkDomainConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform network_domain_top_level_domain: %v", err) - } - - if conf.ID == "" { - conf.ID = "network_domain_top_level_domain" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := networkDomainTopLevelDomain{ - conf: conf, - isObj: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type networkDomainTopLevelDomain struct { - conf networkDomainConfig - isObj bool -} - -func (tf *networkDomainTopLevelDomain) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObj { - str := string(msg.Data()) - domain, _ := publicsuffix.PublicSuffix(str) - - msg.SetData([]byte(domain)) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - domain, _ := publicsuffix.PublicSuffix(value.String()) - - if err := msg.SetValue(tf.conf.Object.TargetKey, domain); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *networkDomainTopLevelDomain) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/network_domain_top_level_domain_test.go b/v1/transform/network_domain_top_level_domain_test.go deleted file mode 100644 index d0cc7fbe..00000000 --- a/v1/transform/network_domain_top_level_domain_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &networkDomainTopLevelDomain{} - -var networkDomainTopLevelDomainTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`b.com`), - [][]byte{ - []byte(`com`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"b.com"}`), - [][]byte{ - []byte(`{"a":"com"}`), - }, - }, -} - -func TestNetworkDomainTopLevelDomain(t *testing.T) { - ctx := context.TODO() - for _, test := range networkDomainTopLevelDomainTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNetworkDomainTopLevelDomain(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNetworkDomainTopLevelDomain(b *testing.B, tf *networkDomainTopLevelDomain, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNetworkDomainTopLevelDomain(b *testing.B) { - for _, test := range networkDomainTopLevelDomainTests { - tf, err := newNetworkDomainTopLevelDomain(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNetworkDomainTopLevelDomain(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number.go b/v1/transform/number.go deleted file mode 100644 index ec02512b..00000000 --- a/v1/transform/number.go +++ /dev/null @@ -1,67 +0,0 @@ -package transform - -import ( - "fmt" - "strconv" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -// Use this config for any Number transform that only requires a single value. -type numberValConfig struct { - Value float64 `json:"value"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *numberValConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -// 0.0 is a valid value and should not be checked. -func (c *numberValConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type numberMathConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *numberMathConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *numberMathConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -// numberFloat64ToString addresses multiple issues with performing math -// operations on floats: -// -// - Converts the float to a string without scientific notation: 1.1e+9 -> 1100000000 -// -// - Truncates the float to remove trailing zeros: 1.100000000 -> 1.1 -// -// - Removes the decimal point if it is a whole number: 1.0 -> 1 -func numberFloat64ToString(v float64) string { - return strconv.FormatFloat(v, 'f', -1, 64) -} diff --git a/v1/transform/number_math_addition.go b/v1/transform/number_math_addition.go deleted file mode 100644 index 02f673ea..00000000 --- a/v1/transform/number_math_addition.go +++ /dev/null @@ -1,93 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMathAddition(_ context.Context, cfg config.Config) (*numberMathAddition, error) { - conf := numberMathConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_math_addition: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_math_addition" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMathAddition{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMathAddition struct { - conf numberMathConfig - isObject bool -} - -func (tf *numberMathAddition) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var vFloat64 float64 - for i, val := range value.Array() { - if i == 0 { - vFloat64 = val.Float() - continue - } - - vFloat64 += val.Float() - } - - strFloat64 := numberFloat64ToString(vFloat64) - if !tf.isObject { - msg.SetData([]byte(strFloat64)) - - return []*message.Message{msg}, nil - } - - f, err := strconv.ParseFloat(strFloat64, 64) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, f); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMathAddition) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_math_addition_test.go b/v1/transform/number_math_addition_test.go deleted file mode 100644 index c45a5ed2..00000000 --- a/v1/transform/number_math_addition_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMathAddition{} - -var numberMathAdditionTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`[6,2]`), - [][]byte{ - []byte(`8`), - }, - }, - { - "data", - config.Config{}, - []byte(`[0.123456789,10]`), - [][]byte{ - []byte(`10.123456789`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[6,2]}`), - [][]byte{ - []byte(`{"a":8}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[0.123456789,10]}`), - [][]byte{ - []byte(`{"a":10.123456789}`), - }, - }, -} - -func TestNumberMathAddition(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMathAdditionTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMathAddition(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMathAddition(b *testing.B, tf *numberMathAddition, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMathAddition(b *testing.B) { - for _, test := range numberMathAdditionTests { - tf, err := newNumberMathAddition(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMathAddition(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number_math_division.go b/v1/transform/number_math_division.go deleted file mode 100644 index 60e2735c..00000000 --- a/v1/transform/number_math_division.go +++ /dev/null @@ -1,93 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMathDivision(_ context.Context, cfg config.Config) (*numberMathDivision, error) { - conf := numberMathConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_math_division: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_math_division" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMathDivision{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMathDivision struct { - conf numberMathConfig - isObject bool -} - -func (tf *numberMathDivision) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var vFloat64 float64 - for i, val := range value.Array() { - if i == 0 { - vFloat64 = val.Float() - continue - } - - vFloat64 /= val.Float() - } - - strFloat64 := numberFloat64ToString(vFloat64) - if !tf.isObject { - msg.SetData([]byte(strFloat64)) - - return []*message.Message{msg}, nil - } - - f, err := strconv.ParseFloat(strFloat64, 64) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, f); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMathDivision) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_math_division_test.go b/v1/transform/number_math_division_test.go deleted file mode 100644 index b7163ad1..00000000 --- a/v1/transform/number_math_division_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMathDivision{} - -var numberMathDivisionTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`[6,2]`), - [][]byte{ - []byte(`3`), - }, - }, - { - "data", - config.Config{}, - []byte(`[0.123456789,10]`), - [][]byte{ - []byte(`0.0123456789`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[6,2]}`), - [][]byte{ - []byte(`{"a":3}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[0.123456789,10]}`), - [][]byte{ - []byte(`{"a":0.0123456789}`), - }, - }, -} - -func TestDiv(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMathDivisionTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMathDivision(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMathDivision(b *testing.B, tf *numberMathDivision, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMathDivision(b *testing.B) { - for _, test := range numberMathDivisionTests { - tf, err := newNumberMathDivision(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMathDivision(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number_math_multiplication.go b/v1/transform/number_math_multiplication.go deleted file mode 100644 index 56d27573..00000000 --- a/v1/transform/number_math_multiplication.go +++ /dev/null @@ -1,93 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMathMultiplication(_ context.Context, cfg config.Config) (*numberMathMultiplication, error) { - conf := numberMathConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_math_multiplication: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_math_multiplication" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMathMultiplication{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMathMultiplication struct { - conf numberMathConfig - isObject bool -} - -func (tf *numberMathMultiplication) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var vFloat64 float64 - for i, val := range value.Array() { - if i == 0 { - vFloat64 = val.Float() - continue - } - - vFloat64 *= val.Float() - } - - strFloat64 := numberFloat64ToString(vFloat64) - if !tf.isObject { - msg.SetData([]byte(strFloat64)) - - return []*message.Message{msg}, nil - } - - f, err := strconv.ParseFloat(strFloat64, 64) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, f); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMathMultiplication) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_math_multiplication_test.go b/v1/transform/number_math_multiplication_test.go deleted file mode 100644 index 9516e879..00000000 --- a/v1/transform/number_math_multiplication_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMathMultiplication{} - -var numberMathMultiplicationTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`[2,3]`), - [][]byte{ - []byte(`6`), - }, - }, - { - "data", - config.Config{}, - []byte(`[0.123456789,10]`), - [][]byte{ - []byte(`1.23456789`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[2,3]}`), - [][]byte{ - []byte(`{"a":6}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[0.123456789,10]}`), - [][]byte{ - []byte(`{"a":1.23456789}`), - }, - }, -} - -func TestNumberMathMultiplication(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMathMultiplicationTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMathMultiplication(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMathMultiplication(b *testing.B, tf *numberMathMultiplication, data []byte) { - ctx := context.TODO() - msg := message.New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMathMultiplication(b *testing.B) { - for _, test := range numberMathMultiplicationTests { - tf, err := newNumberMathMultiplication(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMathMultiplication(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number_math_subtraction.go b/v1/transform/number_math_subtraction.go deleted file mode 100644 index cca26e92..00000000 --- a/v1/transform/number_math_subtraction.go +++ /dev/null @@ -1,93 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMathSubtraction(_ context.Context, cfg config.Config) (*numberMathSubtraction, error) { - conf := numberMathConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_math_subtraction: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_math_subtraction" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMathSubtraction{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMathSubtraction struct { - conf numberMathConfig - isObject bool -} - -func (tf *numberMathSubtraction) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if !value.IsArray() { - return []*message.Message{msg}, nil - } - - var vFloat64 float64 - for i, val := range value.Array() { - if i == 0 { - vFloat64 = val.Float() - continue - } - - vFloat64 -= val.Float() - } - - strFloat64 := numberFloat64ToString(vFloat64) - if !tf.isObject { - msg.SetData([]byte(strFloat64)) - - return []*message.Message{msg}, nil - } - - f, err := strconv.ParseFloat(strFloat64, 64) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, f); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMathSubtraction) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_math_subtraction_test.go b/v1/transform/number_math_subtraction_test.go deleted file mode 100644 index 83403dc9..00000000 --- a/v1/transform/number_math_subtraction_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMathSubtraction{} - -var numberMathSubtractionTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`[6,2]`), - [][]byte{ - []byte(`4`), - }, - }, - { - "data", - config.Config{}, - []byte(`[0.123456789,10]`), - [][]byte{ - []byte(`-9.876543211`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[6,2]}`), - [][]byte{ - []byte(`{"a":4}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":[0.123456789,10]}`), - [][]byte{ - []byte(`{"a":-9.876543211}`), - }, - }, -} - -func TestNumberMathSubtraction(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMathSubtractionTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMathSubtraction(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMathSubtraction(b *testing.B, tf *numberMathSubtraction, data []byte) { - ctx := context.TODO() - msg := message.New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMathSubtraction(b *testing.B) { - for _, test := range numberMathSubtractionTests { - tf, err := newNumberMathSubtraction(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMathSubtraction(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number_maximum.go b/v1/transform/number_maximum.go deleted file mode 100644 index 45f28ef4..00000000 --- a/v1/transform/number_maximum.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "math" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMaximum(_ context.Context, cfg config.Config) (*numberMaximum, error) { - conf := numberValConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_maximum: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_maximum" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMaximum{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMaximum struct { - conf numberValConfig - isObject bool -} - -func (tf *numberMaximum) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - flo64 := math.Max(value.Float(), tf.conf.Value) - - if !tf.isObject { - s := numberFloat64ToString(flo64) - msg.SetData([]byte(s)) - - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, flo64); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMaximum) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_maximum_test.go b/v1/transform/number_maximum_test.go deleted file mode 100644 index 1850d557..00000000 --- a/v1/transform/number_maximum_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMaximum{} - -var numberMaximumTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`0`), - [][]byte{ - []byte(`1`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": -1, - }, - }, - []byte(`0`), - [][]byte{ - []byte(`0`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": -1.1, - }, - }, - []byte(`0.1`), - [][]byte{ - []byte(`0.1`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": 1, - }, - }, - []byte(`{"a":0}`), - [][]byte{ - []byte(`{"a":1}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": -1, - }, - }, - []byte(`{"a":0}`), - [][]byte{ - []byte(`{"a":0}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": -1.1, - }, - }, - []byte(`{"a":0.1}`), - [][]byte{ - []byte(`{"a":0.1}`), - }, - }, -} - -func TestNumberMaximum(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMaximumTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMaximum(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMaximum(b *testing.B, tf *numberMaximum, data []byte) { - ctx := context.TODO() - msg := message.New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMaximum(b *testing.B) { - for _, test := range numberMaximumTests { - tf, err := newNumberMaximum(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMaximum(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/number_minimum.go b/v1/transform/number_minimum.go deleted file mode 100644 index 124f1d93..00000000 --- a/v1/transform/number_minimum.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "math" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -func newNumberMinimum(_ context.Context, cfg config.Config) (*numberMinimum, error) { - conf := numberValConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform number_minimum: %v", err) - } - - if conf.ID == "" { - conf.ID = "number_minimum" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := numberMinimum{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type numberMinimum struct { - conf numberValConfig - isObject bool -} - -func (tf *numberMinimum) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - flo64 := math.Min(value.Float(), tf.conf.Value) - - if !tf.isObject { - s := numberFloat64ToString(flo64) - msg.SetData([]byte(s)) - - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, flo64); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *numberMinimum) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/number_minimum_test.go b/v1/transform/number_minimum_test.go deleted file mode 100644 index d45d649e..00000000 --- a/v1/transform/number_minimum_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &numberMinimum{} - -var numberMinimumTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": 1, - }, - }, - []byte(`0`), - [][]byte{ - []byte(`0`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": -1, - }, - }, - []byte(`0`), - [][]byte{ - []byte(`-1`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "value": -1.1, - }, - }, - []byte(`0.1`), - [][]byte{ - []byte(`-1.1`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": 1, - }, - }, - []byte(`{"a":0}`), - [][]byte{ - []byte(`{"a":0}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": -1, - }, - }, - []byte(`{"a":0}`), - [][]byte{ - []byte(`{"a":-1}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "value": -1.1, - }, - }, - []byte(`{"a":0.1}`), - [][]byte{ - []byte(`{"a":-1.1}`), - }, - }, -} - -func TestNumberMinimum(t *testing.T) { - ctx := context.TODO() - for _, test := range numberMinimumTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newNumberMinimum(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkNumberMinimum(b *testing.B, tf *numberMinimum, data []byte) { - ctx := context.TODO() - msg := message.New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkNumberMinimum(b *testing.B) { - for _, test := range numberMinimumTests { - tf, err := newNumberMinimum(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkNumberMinimum(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_copy.go b/v1/transform/object_copy.go deleted file mode 100644 index edfad8c4..00000000 --- a/v1/transform/object_copy.go +++ /dev/null @@ -1,90 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type objectCopyConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectCopyConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newObjectCopy(_ context.Context, cfg config.Config) (*objectCopy, error) { - conf := objectCopyConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_copy: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_copy" - } - - tf := objectCopy{ - conf: conf, - hasObjectKey: conf.Object.SourceKey != "" && conf.Object.TargetKey == "", - hasObjectSetKey: conf.Object.SourceKey == "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type objectCopy struct { - conf objectCopyConfig - hasObjectKey bool - hasObjectSetKey bool -} - -func (tf *objectCopy) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if tf.hasObjectKey { - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - msg.SetData(value.Bytes()) - return []*message.Message{msg}, nil - } - - if tf.hasObjectSetKey { - if len(msg.Data()) == 0 { - return []*message.Message{msg}, nil - } - - outMsg := message.New().SetMetadata(msg.Metadata()) - if err := outMsg.SetValue(tf.conf.Object.TargetKey, msg.Data()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{outMsg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectCopy) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_copy_test.go b/v1/transform/object_copy_test.go deleted file mode 100644 index 37f36e67..00000000 --- a/v1/transform/object_copy_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectCopy{} - -var objectCopyTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","c":"b"}`), - }, - }, - { - "unescape object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"{\"b\":\"c\"}"`), - [][]byte{ - []byte(`{"a":{"b":"c"}`), - }, - }, - { - "unescape array", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"[\"b\",\"c\"]"}`), - [][]byte{ - []byte(`{"a":["b","c"]}`), - }, - }, - { - "from object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`b`), - }, - }, - { - "from nested object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - }, - }, - []byte(`{"a":{"b":"c"}}`), - [][]byte{ - []byte(`{"b":"c"}`), - }, - }, - { - "to object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - }, - }, - []byte(`b`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "to nested object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a.b", - }, - }, - }, - []byte(`c`), - [][]byte{ - []byte(`{"a":{"b":"c"}}`), - }, - }, - { - "to object base64", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - }, - }, - []byte{120, 156, 5, 192, 49, 13, 0, 0, 0, 194, 48, 173, 76, 2, 254, 143, 166, 29, 2, 93, 1, 54}, - [][]byte{ - []byte(`{"a":"eJwFwDENAAAAwjCtTAL+j6YdAl0BNg=="}`), - }, - }, -} - -func TestObjectCopy(t *testing.T) { - ctx := context.TODO() - for _, test := range objectCopyTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectCopy(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var r [][]byte - for _, c := range result { - r = append(r, c.Data()) - } - - if !reflect.DeepEqual(r, test.expected) { - t.Errorf("expected %s, got %s", test.expected, r) - } - }) - } -} - -func benchmarkObjectCopy(b *testing.B, tf *objectCopy, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectCopy(b *testing.B) { - for _, test := range objectCopyTests { - tf, err := newObjectCopy(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectCopy(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_delete.go b/v1/transform/object_delete.go deleted file mode 100644 index 12bb042a..00000000 --- a/v1/transform/object_delete.go +++ /dev/null @@ -1,71 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectDeleteConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectDeleteConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectDeleteConfig) Validate() error { - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectDelete(_ context.Context, cfg config.Config) (*objectDelete, error) { - conf := objectDeleteConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_delete: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_delete" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - proc := objectDelete{ - conf: conf, - } - - return &proc, nil -} - -type objectDelete struct { - conf objectDeleteConfig -} - -func (tf *objectDelete) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if err := msg.DeleteValue(tf.conf.Object.SourceKey); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectDelete) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_delete_test.go b/v1/transform/object_delete_test.go deleted file mode 100644 index 86e43855..00000000 --- a/v1/transform/object_delete_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectDelete{} - -var objectDeleteTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "c", - }, - }, - }, - []byte(`{"a":"b","c":{"d":"e"}}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "array", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "c", - }, - }, - }, - []byte(`{"a":"b","c":["d","e"]}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "string", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "c", - }, - }, - }, - []byte(`{"a":"b","c":"d"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - - { - "int", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "c", - }, - }, - }, - []byte(`{"a":"b","c":1}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, -} - -func TestObjectDelete(t *testing.T) { - ctx := context.TODO() - for _, test := range objectDeleteTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectDelete(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectDelete(b *testing.B, tf *objectDelete, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectDelete(b *testing.B) { - for _, test := range objectDeleteTests { - tf, err := newObjectDelete(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectDelete(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_insert.go b/v1/transform/object_insert.go deleted file mode 100644 index 21a78303..00000000 --- a/v1/transform/object_insert.go +++ /dev/null @@ -1,78 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectInsertConfig struct { - // Value inserted into the object. - Value interface{} `json:"value"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectInsertConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectInsertConfig) Validate() error { - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Value == nil { - return fmt.Errorf("value: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectInsert(_ context.Context, cfg config.Config) (*objectInsert, error) { - conf := objectInsertConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_insert: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_insert" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := objectInsert{ - conf: conf, - } - - return &tf, nil -} - -type objectInsert struct { - conf objectInsertConfig -} - -func (tf *objectInsert) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, tf.conf.Value); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectInsert) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_insert_test.go b/v1/transform/object_insert_test.go deleted file mode 100644 index 31bd1ad2..00000000 --- a/v1/transform/object_insert_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectInsert{} - -var objectInsertTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, "value": `{"b":"c"}`, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":{"b":"c"}}`), - }, - }, - { - "array", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, "value": []string{"b", "c"}, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":["b","c"]}`), - }, - }, - { - "map", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, "value": map[string]string{ - "b": "c", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":{"b":"c"}}`), - }, - }, - { - "string", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - "value": "b", - }, - }, - []byte{}, - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "int", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, "value": 1, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":1}`), - }, - }, - { - "bytes", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - "value": []byte{120, 156, 5, 192, 49, 13, 0, 0, 0, 194, 48, 173, 76, 2, 254, 143, 166, 29, 2, 93, 1, 54}, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"eJwFwDENAAAAwjCtTAL+j6YdAl0BNg=="}`), - }, - }, -} - -func TestObjectInsert(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectInsertTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectInsert(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var r [][]byte - for _, c := range result { - r = append(r, c.Data()) - } - - if !reflect.DeepEqual(r, test.expected) { - t.Errorf("expected %s, got %s", test.expected, r) - } - }) - } -} - -func benchmarkObjectInsert(b *testing.B, tf *objectInsert, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectInsert(b *testing.B) { - for _, test := range objectInsertTests { - tf, err := newObjectInsert(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectInsert(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_jq.go b/v1/transform/object_jq.go deleted file mode 100644 index 70c68727..00000000 --- a/v1/transform/object_jq.go +++ /dev/null @@ -1,117 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" - "github.com/itchyny/gojq" -) - -// errObjectJQNoOutputGenerated is returned when jq generates no output. -var errObjectJQNoOutputGenerated = fmt.Errorf("no output generated") - -type objectJQConfig struct { - // Filter is the jq filter applied to data. - Filter string `json:"filter"` - - ID string `json:"id"` -} - -func (c *objectJQConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectJQConfig) Validate() error { - if c.Filter == "" { - return fmt.Errorf("query: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectJQ(_ context.Context, cfg config.Config) (*objectJQ, error) { - conf := objectJQConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_jq: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_jq" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - q, err := gojq.Parse(conf.Filter) - if err != nil { - return nil, err - } - - tf := objectJQ{ - conf: conf, - query: q, - } - - return &tf, nil -} - -type objectJQ struct { - conf objectJQConfig - - query *gojq.Query -} - -func (tf *objectJQ) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var i interface{} - if err := json.Unmarshal(msg.Data(), &i); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - var arr []interface{} - iter := tf.query.RunWithContext(ctx, i) - - for { - v, ok := iter.Next() - if !ok { - break - } - if err, ok := v.(error); ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - arr = append(arr, v) - } - - var err error - var b []byte - switch len(arr) { - case 0: - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errObjectJQNoOutputGenerated) - case 1: - b, err = json.Marshal(arr[0]) - default: - b, err = json.Marshal(arr) - } - - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - msg.SetData(b) - return []*message.Message{msg}, nil -} - -func (tf *objectJQ) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_jq_test.go b/v1/transform/object_jq_test.go deleted file mode 100644 index fcdd5f81..00000000 --- a/v1/transform/object_jq_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectJQ{} - -var objectJQTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - { - "access", - config.Config{ - Settings: map[string]interface{}{ - "filter": `.a`, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`"b"`), - }, - nil, - }, - { - "access", - config.Config{ - Settings: map[string]interface{}{ - "filter": `.a, .c`, - }, - }, - []byte(`{"a":"b","c":"d"}`), - [][]byte{ - []byte(`["b","d"]`), - }, - nil, - }, - { - "access", - config.Config{ - Settings: map[string]interface{}{ - "filter": `.a`, - }, - }, - []byte(`{"a":{"b":"c"}}`), - [][]byte{ - []byte(`{"b":"c"}`), - }, - nil, - }, - { - "array", - config.Config{ - Settings: map[string]interface{}{ - "filter": `.a`, - }, - }, - []byte(`{"a":["b","c","d"]}`), - [][]byte{ - []byte(`["b","c","d"]`), - }, - nil, - }, - { - "slice", - config.Config{ - Settings: map[string]interface{}{ - "filter": `.a[-1:]`, - }, - }, - []byte(`{"a":["b","c","d","e","f","g"]}`), - [][]byte{ - []byte(`["g"]`), - }, - nil, - }, - { - "recursion", - config.Config{ - Settings: map[string]interface{}{ - "filter": `walk( if type == "object" then - with_entries( select( - (.value != "") and - (.value != {}) and - (.value != null) - ) ) - else - . end)`, - }, - }, - []byte(`{"a":{"b":{"c":""}},"d":null,"e":"f"}`), - [][]byte{ - []byte(`{"e":"f"}`), - }, - nil, - }, -} - -func TestObjectJQ(t *testing.T) { - ctx := context.TODO() - for _, test := range objectJQTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectJQ(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectJQ(b *testing.B, tf *objectJQ, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectJQ(b *testing.B) { - for _, test := range objectJQTests { - tf, err := newObjectJQ(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectJQ(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_to_boolean.go b/v1/transform/object_to_boolean.go deleted file mode 100644 index e6c84806..00000000 --- a/v1/transform/object_to_boolean.go +++ /dev/null @@ -1,80 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectToBooleanConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectToBooleanConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectToBooleanConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectToBoolean(_ context.Context, cfg config.Config) (*objectToBoolean, error) { - conf := objectToBooleanConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_to_boolean: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_to_boolean" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := objectToBoolean{ - conf: conf, - } - - return &tf, nil -} - -type objectToBoolean struct { - conf objectToBooleanConfig -} - -func (tf *objectToBoolean) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *objectToBoolean) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value.Bool()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} diff --git a/v1/transform/object_to_boolean_test.go b/v1/transform/object_to_boolean_test.go deleted file mode 100644 index 8bb4ccb4..00000000 --- a/v1/transform/object_to_boolean_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectToBoolean{} - -var objectToBooleanTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "float to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1.0}`), - [][]byte{ - []byte(`{"a":true}`), - }, - }, - { - "float to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":0.0}`), - [][]byte{ - []byte(`{"a":false}`), - }, - }, - { - "int to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1}`), - [][]byte{ - []byte(`{"a":true}`), - }, - }, - { - "int to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":0}`), - [][]byte{ - []byte(`{"a":false}`), - }, - }, - { - "str to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"true"}`), - [][]byte{ - []byte(`{"a":true}`), - }, - }, - { - "str to_bool", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"false"}`), - [][]byte{ - []byte(`{"a":false}`), - }, - }, -} - -func TestObjectToBoolean(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectToBooleanTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectToBoolean(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectToBoolean(b *testing.B, tf *objectToBoolean, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectToBoolean(b *testing.B) { - for _, test := range objectToBooleanTests { - tf, err := newObjectToBoolean(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectToBoolean(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_to_float.go b/v1/transform/object_to_float.go deleted file mode 100644 index 24d1ac5e..00000000 --- a/v1/transform/object_to_float.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectToFloatConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectToFloatConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectToFloatConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectToFloat(_ context.Context, cfg config.Config) (*objectToFloat, error) { - conf := objectToFloatConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_to_float: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_to_float" - } - - tf := objectToFloat{ - conf: conf, - } - - return &tf, nil -} - -type objectToFloat struct { - conf objectToFloatConfig -} - -func (tf *objectToFloat) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value.Float()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectToFloat) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_to_float_test.go b/v1/transform/object_to_float_test.go deleted file mode 100644 index 42a54f35..00000000 --- a/v1/transform/object_to_float_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectToFloat{} - -var objectToFloatTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "str to_float", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "type": "float", - }, - }, - []byte(`{"a":"1.1"}`), - [][]byte{ - []byte(`{"a":1.1}`), - }, - }, -} - -func TestObjectToFloat(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectToFloatTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectToFloat(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectToFloat(b *testing.B, tf *objectToFloat, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectToFloat(b *testing.B) { - for _, test := range objectToFloatTests { - tf, err := newObjectToFloat(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectToFloat(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_to_integer.go b/v1/transform/object_to_integer.go deleted file mode 100644 index 4cdd4fc7..00000000 --- a/v1/transform/object_to_integer.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectToIntegerConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectToIntegerConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectToIntegerConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectToInteger(_ context.Context, cfg config.Config) (*objectToInteger, error) { - conf := objectToIntegerConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_to_integer: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_to_integer" - } - - tf := objectToInteger{ - conf: conf, - } - - return &tf, nil -} - -type objectToInteger struct { - conf objectToIntegerConfig -} - -func (tf *objectToInteger) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value.Int()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectToInteger) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_to_integer_test.go b/v1/transform/object_to_integer_test.go deleted file mode 100644 index 21af653a..00000000 --- a/v1/transform/object_to_integer_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectToInteger{} - -var objectToIntegerTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "float to_int", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1.1}`), - [][]byte{ - []byte(`{"a":1}`), - }, - }, - { - "str to_int", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"-1"}`), - [][]byte{ - []byte(`{"a":-1}`), - }, - }, -} - -func TestObjectToInteger(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectToIntegerTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectToInteger(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectToInteger(b *testing.B, tf *objectToInteger, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectToInteger(b *testing.B) { - for _, test := range objectToIntegerTests { - tf, err := newObjectToInteger(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectToInteger(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_to_string.go b/v1/transform/object_to_string.go deleted file mode 100644 index 6bf886dd..00000000 --- a/v1/transform/object_to_string.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectToStringConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectToStringConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectToStringConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectToString(_ context.Context, cfg config.Config) (*objectToString, error) { - conf := objectToStringConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_to_string: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_to_string" - } - - tf := objectToString{ - conf: conf, - } - - return &tf, nil -} - -type objectToString struct { - conf objectToStringConfig -} - -func (tf *objectToString) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value.String()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectToString) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_to_string_test.go b/v1/transform/object_to_string_test.go deleted file mode 100644 index c7b9b182..00000000 --- a/v1/transform/object_to_string_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectToString{} - -var objectToStringTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "bool to_str", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":true}`), - [][]byte{ - []byte(`{"a":"true"}`), - }, - }, - { - "float to_str", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1.1}`), - [][]byte{ - []byte(`{"a":"1.1"}`), - }, - }, - { - "int to_str", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1}`), - [][]byte{ - []byte(`{"a":"1"}`), - }, - }, -} - -func TestObjectToString(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectToStringTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectToString(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectToString(b *testing.B, tf *objectToString, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectToString(b *testing.B) { - for _, test := range objectToStringTests { - tf, err := newObjectToString(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectToString(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/object_to_unsigned_integer.go b/v1/transform/object_to_unsigned_integer.go deleted file mode 100644 index 90f59889..00000000 --- a/v1/transform/object_to_unsigned_integer.go +++ /dev/null @@ -1,76 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type objectToUnsignedIntegerConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *objectToUnsignedIntegerConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *objectToUnsignedIntegerConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newObjectToUnsignedInteger(_ context.Context, cfg config.Config) (*objectToUnsignedInteger, error) { - conf := objectToUnsignedIntegerConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform object_to_unsigned_integer: %v", err) - } - - if conf.ID == "" { - conf.ID = "object_to_unsigned_integer" - } - - tf := objectToUnsignedInteger{ - conf: conf, - } - - return &tf, nil -} - -type objectToUnsignedInteger struct { - conf objectToUnsignedIntegerConfig -} - -func (tf *objectToUnsignedInteger) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value.Uint()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *objectToUnsignedInteger) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/object_to_unsigned_integer_test.go b/v1/transform/object_to_unsigned_integer_test.go deleted file mode 100644 index e4709bc2..00000000 --- a/v1/transform/object_to_unsigned_integer_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &objectToUnsignedInteger{} - -var objectToUnsignedIntegerTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "float to_uint", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1.1}`), - [][]byte{ - []byte(`{"a":1}`), - }, - }, - { - "str to_uint", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"-1"}`), - [][]byte{ - []byte(`{"a":0}`), - }, - }, -} - -func TestObjectToUnsignedInteger(t *testing.T) { - ctx := context.TODO() - - for _, test := range objectToUnsignedIntegerTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newObjectToUnsignedInteger(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkObjectToUnsignedInteger(b *testing.B, tf *objectToUnsignedInteger, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkObjectToUnsignedInteger(b *testing.B) { - for _, test := range objectToUnsignedIntegerTests { - tf, err := newObjectToUnsignedInteger(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkObjectToUnsignedInteger(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/send.go b/v1/transform/send.go deleted file mode 100644 index db5b6739..00000000 --- a/v1/transform/send.go +++ /dev/null @@ -1,41 +0,0 @@ -package transform - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/message" -) - -// errSendBatchMisconfigured is returned when data cannot be successfully added -// to a batch. This is usually due to a misconfiguration, such as a size, count, -// or duration limit. -var errSendBatchMisconfigured = fmt.Errorf("data could not be added to batch") - -func withTransforms(ctx context.Context, tf []Transformer, items [][]byte) ([][]byte, error) { - if tf == nil { - return items, nil - } - - var msg []*message.Message - for _, i := range items { - msg = append(msg, message.New().SetData(i)) - } - msg = append(msg, message.New().AsControl()) - - res, err := Apply(ctx, tf, msg...) - if err != nil { - return nil, err - } - - var output [][]byte - for _, r := range res { - if r.IsControl() { - continue - } - - output = append(output, r.Data()) - } - - return output, nil -} diff --git a/v1/transform/send_aws_dynamodb.go b/v1/transform/send_aws_dynamodb.go deleted file mode 100644 index b29c1098..00000000 --- a/v1/transform/send_aws_dynamodb.go +++ /dev/null @@ -1,198 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - idynamodb "github.com/brexhq/substation/internal/aws/dynamodb" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Items greater than 400 KB in size cannot be put into DynamoDB. -const sendAWSDynamoDBItemSizeLimit = 1024 * 400 - -// errSendAWSDynamoDBItemSizeLimit is returned when data exceeds the -// DynamoDB item size limit. If this error occurs, then drop or reduce -// the size of the data before attempting to write it to DynamoDB. -var errSendAWSDynamoDBItemSizeLimit = fmt.Errorf("data exceeded size limit") - -// errSendAWSDynamoDBNonObject is returned when non-object data is sent to the transform. -// -// If this error occurs, then parse the data into an object (or drop invalid objects) -// before attempting to send the data. -var errSendAWSDynamoDBNonObject = fmt.Errorf("input must be object") - -type sendAWSDynamoDBConfig struct { - // TableName is the DynamoDB table that items are written to. - TableName string `json:"table_name"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSDynamoDBConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSDynamoDBConfig) Validate() error { - if c.TableName == "" { - return fmt.Errorf("table_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSDynamoDB(_ context.Context, cfg config.Config) (*sendAWSDynamoDB, error) { - conf := sendAWSDynamoDBConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_dynamodb: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_dynamodb" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSDynamoDB{ - conf: conf, - } - - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - agg, err := aggregate.New(aggregate.Config{ - // DynamoDB limits batch operations to 25 records and 16 MiB. - Count: 25, - Size: 1000 * 1000 * 16, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSDynamoDB struct { - conf sendAWSDynamoDBConfig - - // client is safe for concurrent use. - client idynamodb.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSDynamoDB) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if !json.Valid(msg.Data()) { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSDynamoDBNonObject) - } - - if len(msg.Data()) > sendAWSDynamoDBItemSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSDynamoDBItemSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSDynamoDB) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSDynamoDB) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - var items []map[string]*dynamodb.AttributeValue - for _, b := range data { - m := make(map[string]any) - for k, v := range bytesToValue(b).Map() { - m[k] = v.Value() - } - - i, err := dynamodbattribute.MarshalMap(m) - if err != nil { - return err - } - - items = append(items, i) - } - - if _, err := tf.client.BatchPutItem(ctx, tf.conf.TableName, items); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_aws_eventbridge.go b/v1/transform/send_aws_eventbridge.go deleted file mode 100644 index e2b50acb..00000000 --- a/v1/transform/send_aws_eventbridge.go +++ /dev/null @@ -1,203 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/aws/aws-sdk-go-v2/service/eventbridge" - "github.com/aws/aws-sdk-go-v2/service/eventbridge/types" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -// Records greater than 256 KB in size cannot be -// put into an EventBridge bus. -const sendAWSEventBridgeMessageSizeLimit = 1024 * 1024 * 256 - -// errSendAWSEventBridgeMessageSizeLimit is returned when data -// exceeds the EventBridge message size limit. If this error -// occurs, then conditions or transforms should be applied to -// either drop or reduce the size of the data. -var errSendAWSEventBridgeMessageSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSEventBridgeConfig struct { - // ARN is the EventBridge bus to send messages to. - ARN string `json:"arn"` - // Describes the type of the messages sent to EventBridge. - Description string `json:"description"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSEventBridgeConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newSendAWSEventBridge(ctx context.Context, cfg config.Config) (*sendAWSEventBridge, error) { - conf := sendAWSEventBridgeConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_eventbridge: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_eventbridge" - } - - if conf.Description == "" { - // The AWS EventBridge service relies on this value for - // event routing, so any update to the `conf.Description` - // variable is considered a BREAKING CHANGE. - conf.Description = "Substation Transform" - } - - tf := sendAWSEventBridge{ - conf: conf, - } - - // Setup the AWS client. - awsCfg, err := aws.NewV2(ctx, aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.client = eventbridge.NewFromConfig(awsCfg) - - // Setup the batch. - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSEventBridge struct { - conf sendAWSEventBridgeConfig - - // client is safe for concurrent use. - client *eventbridge.Client - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSEventBridge) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendAWSEventBridgeMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSEventBridgeMessageSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - return []*message.Message{msg}, nil -} - -func (tf *sendAWSEventBridge) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSEventBridge) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - entries := make([]types.PutEventsRequestEntry, len(data)) - for i, d := range data { - // The AWS EventBridge service relies on this value for - // event routing, so any update to the `source` variable - // is considered a BREAKING CHANGE. - source := fmt.Sprintf("substation.%s", tf.conf.ID) - detail := string(d) - - entry := types.PutEventsRequestEntry{ - Source: &source, - Detail: &detail, - DetailType: &tf.conf.Description, - } - - // If empty, this is the default event bus. - if tf.conf.ARN != "" { - entry.EventBusName = &tf.conf.ARN - } - - entries[i] = entry - } - - ctx = context.WithoutCancel(ctx) - input := &eventbridge.PutEventsInput{ - Entries: entries, - } - - if _, err = tf.client.PutEvents(ctx, input); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_aws_kinesis_data_firehose.go b/v1/transform/send_aws_kinesis_data_firehose.go deleted file mode 100644 index 3fc6faf1..00000000 --- a/v1/transform/send_aws_kinesis_data_firehose.go +++ /dev/null @@ -1,173 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/firehose" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Records greater than 1000 KiB in size cannot be put into Kinesis Firehose. -const sendAWSKinesisDataFirehoseMessageSizeLimit = 1024 * 1000 - -// errSendAWSKinesisDataFirehoseRecordSizeLimit is returned when data exceeds the -// Kinesis Firehose record size limit. If this error occurs, -// then drop or reduce the size of the data before attempting to -// send it to Kinesis Firehose. -var errSendAWSKinesisDataFirehoseRecordSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSKinesisDataFirehoseConfig struct { - // StreamName is the Firehose Delivery Stream that records are sent to. - StreamName string `json:"stream_name"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSKinesisDataFirehoseConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSKinesisDataFirehoseConfig) Validate() error { - if c.StreamName == "" { - return fmt.Errorf("stream_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSKinesisDataFirehose(_ context.Context, cfg config.Config) (*sendAWSKinesisDataFirehose, error) { - conf := sendAWSKinesisDataFirehoseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_kinesis_data_firehose: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_kinesis_data_firehose" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSKinesisDataFirehose{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - agg, err := aggregate.New(aggregate.Config{ - // Firehose limits batch operations to 500 records and 4 MiB. - Count: 500, - Size: sendAWSKinesisDataFirehoseMessageSizeLimit * 4, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSKinesisDataFirehose struct { - conf sendAWSKinesisDataFirehoseConfig - - // client is safe for concurrent use. - client firehose.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSKinesisDataFirehose) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendAWSKinesisDataFirehoseMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSKinesisDataFirehoseRecordSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSKinesisDataFirehose) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSKinesisDataFirehose) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - if _, err := tf.client.PutRecordBatch(ctx, tf.conf.StreamName, data); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_aws_kinesis_data_stream.go b/v1/transform/send_aws_kinesis_data_stream.go deleted file mode 100644 index 60fcf892..00000000 --- a/v1/transform/send_aws_kinesis_data_stream.go +++ /dev/null @@ -1,220 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/kinesis" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" - "github.com/google/uuid" -) - -// Records greater than 1 MB in size cannot be -// put into a Kinesis Data Stream. -const sendAWSKinesisDataStreamMessageSizeLimit = 1000 * 1000 - -// errSendAWSKinesisDataStreamMessageSizeLimit is returned when data -// exceeds the Kinesis record size limit. If this error occurs, then -// conditions or transforms should be applied to either drop or reduce -// the size of the data. -var errSendAWSKinesisDataStreamMessageSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSKinesisDataStreamConfig struct { - // StreamName is the Kinesis Data Stream that records are sent to. - StreamName string `json:"stream_name"` - // UseBatchKeyAsPartitionKey determines if the batch key should be used as the partition key. - UseBatchKeyAsPartitionKey bool `json:"use_batch_key_as_partition_key"` - // EnableRecordAggregation determines if records should be aggregated. - EnableRecordAggregation bool `json:"enable_record_aggregation"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSKinesisDataStreamConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSKinesisDataStreamConfig) Validate() error { - if c.StreamName == "" { - return fmt.Errorf("stream_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSKinesisDataStream(_ context.Context, cfg config.Config) (*sendAWSKinesisDataStream, error) { - conf := sendAWSKinesisDataStreamConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_kinesis_data_stream: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_kinesis_data_stream" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSKinesisDataStream{ - conf: conf, - } - - agg, err := aggregate.New(aggregate.Config{ - // Kinesis Data Streams limits batch operations to 500 records and 5MiB. - Count: 500, - Size: sendAWSKinesisDataStreamMessageSizeLimit * 5, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type sendAWSKinesisDataStream struct { - conf sendAWSKinesisDataStreamConfig - - // client is safe for concurrent use. - client kinesis.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSKinesisDataStream) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendAWSKinesisDataStreamMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSKinesisDataStreamMessageSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSKinesisDataStream) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSKinesisDataStream) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - var partitionKey string - switch tf.conf.UseBatchKeyAsPartitionKey { - case true: - partitionKey = key - case false: - partitionKey = uuid.NewString() - } - - if tf.conf.EnableRecordAggregation { - data = tf.aggregateRecords(partitionKey, data) - } - - if len(data) == 0 { - return nil - } - - if _, err := tf.client.PutRecords(ctx, tf.conf.StreamName, partitionKey, data); err != nil { - return err - } - - return nil -} - -func (tf *sendAWSKinesisDataStream) aggregateRecords(partitionKey string, data [][]byte) [][]byte { - var records [][]byte - - // Aggregation silently drops any data that is between ~0.9999 MB and 1 MB. - agg := &kinesis.Aggregate{} - agg.New() - - for _, d := range data { - if ok := agg.Add(d, partitionKey); ok { - continue - } else if agg.Count > 0 { - records = append(records, agg.Get()) - } - - agg.New() - _ = agg.Add(d, partitionKey) - } - - if agg.Count > 0 { - records = append(records, agg.Get()) - } - - return records -} diff --git a/v1/transform/send_aws_lambda.go b/v1/transform/send_aws_lambda.go deleted file mode 100644 index f8d174a3..00000000 --- a/v1/transform/send_aws_lambda.go +++ /dev/null @@ -1,176 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/lambda" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Payloads greater than 256 KB in size cannot be -// sent to an AWS Lambda function. -const sendLambdaPayloadSizeLimit = 1024 * 1024 * 256 - -// errSendLambdaPayloadSizeLimit is returned when data exceeds the Lambda -// payload size limit. If this error occurs, then conditions or transforms -// should be applied to either drop or reduce the size of the data. -var errSendLambdaPayloadSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSLambdaConfig struct { - // FunctionName is the AWS Lambda function to asynchronously invoke. - FunctionName string `json:"function_name"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSLambdaConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSLambdaConfig) Validate() error { - if c.FunctionName == "" { - return fmt.Errorf("function_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSLambda(_ context.Context, cfg config.Config) (*sendAWSLambda, error) { - conf := sendAWSLambdaConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_lambda: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_lambda" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSLambda{ - conf: conf, - function: conf.FunctionName, - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type sendAWSLambda struct { - conf sendAWSLambdaConfig - function string - - // client is safe for concurrent use. - client lambda.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSLambda) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendLambdaPayloadSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendLambdaPayloadSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSLambda) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - for _, b := range data { - if _, err := tf.client.InvokeAsync(ctx, tf.function, b); err != nil { - return err - } - } - - return nil -} - -func (tf *sendAWSLambda) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/send_aws_s3.go b/v1/transform/send_aws_s3.go deleted file mode 100644 index ba92e958..00000000 --- a/v1/transform/send_aws_s3.go +++ /dev/null @@ -1,213 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "os" - "slices" - "sync" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/s3manager" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" -) - -type sendAWSS3Config struct { - // BucketName is the AWS S3 bucket that data is written to. - BucketName string `json:"bucket_name"` - // StorageClass is the storage class of the object. - StorageClass string `json:"storage_class"` - // FilePath determines how the name of the uploaded object is constructed. - // See filePath.New for more information. - FilePath file.Path `json:"file_path"` - // UseBatchKeyAsPrefix determines if the batch key should be used as the prefix. - UseBatchKeyAsPrefix bool `json:"use_batch_key_as_prefix"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSS3Config) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSS3Config) Validate() error { - if c.BucketName == "" { - return fmt.Errorf("bucket_name: %v", errors.ErrMissingRequiredOption) - } - - if !slices.Contains(s3.StorageClass_Values(), c.StorageClass) { - return fmt.Errorf("storage_class: %v", errors.ErrInvalidOption) - } - - return nil -} - -func newSendAWSS3(_ context.Context, cfg config.Config) (*sendAWSS3, error) { - conf := sendAWSS3Config{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_s3: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_s3" - } - - if conf.StorageClass == "" { - conf.StorageClass = "STANDARD" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSS3{ - conf: conf, - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type sendAWSS3 struct { - conf sendAWSS3Config - - // client is safe for concurrent use. - client s3manager.UploaderAPI - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSS3) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSS3) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSS3) send(ctx context.Context, key string) error { - p := tf.conf.FilePath - if key != "" && tf.conf.UseBatchKeyAsPrefix { - p.Prefix = key - } - - filePath := p.New() - if filePath == "" { - return fmt.Errorf("file path is empty") - } - - temp, err := os.CreateTemp("", "substation") - if err != nil { - return err - } - defer os.Remove(temp.Name()) - defer temp.Close() - - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - for _, d := range data { - if _, err := temp.Write(d); err != nil { - return err - } - } - - // Flush the file before uploading to S3. - if err := temp.Close(); err != nil { - return err - } - - f, err := os.Open(temp.Name()) - if err != nil { - return err - } - defer f.Close() - - if _, err := tf.client.Upload(ctx, tf.conf.BucketName, filePath, tf.conf.StorageClass, f); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_aws_sns.go b/v1/transform/send_aws_sns.go deleted file mode 100644 index 50ce7668..00000000 --- a/v1/transform/send_aws_sns.go +++ /dev/null @@ -1,173 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/sns" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Records greater than 256 KB in size cannot be -// put into an SNS topic. -const sendAWSSNSMessageSizeLimit = 1024 * 1024 * 256 - -// errSendAWSSNSMessageSizeLimit is returned when data exceeds the SNS msg -// size limit. If this error occurs, then conditions or transforms -// should be applied to either drop or reduce the size of the data. -var errSendAWSSNSMessageSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSSNSConfig struct { - // ARN is the AWS SNS topic ARN that messages are sent to. - ARN string `json:"arn"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSSNSConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSSNSConfig) Validate() error { - if c.ARN == "" { - return fmt.Errorf("topic: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSSNS(_ context.Context, cfg config.Config) (*sendAWSSNS, error) { - conf := sendAWSSNSConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_sns: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_sns" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSSNS{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - agg, err := aggregate.New(aggregate.Config{ - // SQS limits batch operations to 10 messages. - Count: 10, - // SNS limits batch operations to 256 KB. - Size: sendAWSSNSMessageSizeLimit, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSSNS struct { - conf sendAWSSNSConfig - - // client is safe for concurrent use. - client sns.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSSNS) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendAWSSNSMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSSNSMessageSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - return []*message.Message{msg}, nil -} - -func (tf *sendAWSSNS) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSSNS) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - if _, err := tf.client.PublishBatch(ctx, tf.conf.ARN, data); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_aws_sqs.go b/v1/transform/send_aws_sqs.go deleted file mode 100644 index bcc664c2..00000000 --- a/v1/transform/send_aws_sqs.go +++ /dev/null @@ -1,183 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/sqs" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Records greater than 256 KB in size cannot be -// put into an SQS queue. -const sendSQSMessageSizeLimit = 1024 * 1024 * 256 - -// errSendSQSMessageSizeLimit is returned when data exceeds the SQS msg -// size limit. If this error occurs, then conditions or transforms -// should be applied to either drop or reduce the size of the data. -var errSendSQSMessageSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSSQSConfig struct { - // ARN is the AWS SNS topic ARN that messages are sent to. - ARN string `json:"arn"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSSQSConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSSQSConfig) Validate() error { - if c.ARN == "" { - return fmt.Errorf("arn: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSSQS(_ context.Context, cfg config.Config) (*sendAWSSQS, error) { - conf := sendAWSSQSConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_sqs: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_sqs" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - // arn:aws:sqs:region:account_id:queue_name - arn := strings.Split(conf.ARN, ":") - tf := sendAWSSQS{ - conf: conf, - queueURL: fmt.Sprintf( - "https://sqs.%s.amazonaws.com/%s/%s", - arn[3], - arn[4], - arn[5], - ), - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - agg, err := aggregate.New(aggregate.Config{ - // SQS limits batch operations to 10 messages. - Count: 10, - // SQS limits batch operations to 256 KB. - Size: sendSQSMessageSizeLimit, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSSQS struct { - conf sendAWSSQSConfig - queueURL string - - // client is safe for concurrent use. - client sqs.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSSQS) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendSQSMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendSQSMessageSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - return []*message.Message{msg}, nil -} - -func (tf *sendAWSSQS) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSSQS) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - if _, err := tf.client.SendMessageBatch(ctx, tf.queueURL, data); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_file.go b/v1/transform/send_file.go deleted file mode 100644 index b027964d..00000000 --- a/v1/transform/send_file.go +++ /dev/null @@ -1,172 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/message" -) - -type sendFileConfig struct { - // FilePath determines how the name of the file is constructed. - // See filePath.New for more information. - FilePath file.Path `json:"file_path"` - // UseBatchKeyAsPrefix determines if the batch key should be used as the prefix. - UseBatchKeyAsPrefix bool `json:"use_batch_key_as_prefix"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *sendFileConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendFileConfig) Validate() error { - return nil -} - -func newSendFile(_ context.Context, cfg config.Config) (*sendFile, error) { - conf := sendFileConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_file: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_file" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendFile{ - conf: conf, - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendFile struct { - conf sendFileConfig - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendFile) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendFile) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendFile) send(ctx context.Context, key string) error { - p := tf.conf.FilePath - if key != "" && tf.conf.UseBatchKeyAsPrefix { - p.Prefix = key - } - - path := p.New() - if path == "" { - return fmt.Errorf("file path is empty") - } - - // Ensures that the path is OS agnostic. - path = filepath.FromSlash(path) - if err := os.MkdirAll(filepath.Dir(path), 0o770); err != nil { - return err - } - - f, err := os.Create(path) - if err != nil { - return err - } - - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - for _, d := range data { - if _, err := f.Write(d); err != nil { - return err - } - } - - if err := f.Close(); err != nil { - return err - } - - return nil -} diff --git a/v1/transform/send_http_post.go b/v1/transform/send_http_post.go deleted file mode 100644 index 3fe42d5a..00000000 --- a/v1/transform/send_http_post.go +++ /dev/null @@ -1,187 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" -) - -type sendHTTPPostConfig struct { - // URL is the HTTP(S) endpoint that data is sent to. - URL string `json:"url"` - // Headers are an array of objects that contain HTTP headers sent in the request. - // - // This is optional and has no default. - Headers map[string]string `json:"headers"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *sendHTTPPostConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendHTTPPostConfig) Validate() error { - if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendHTTPPost(_ context.Context, cfg config.Config) (*sendHTTPPost, error) { - conf := sendHTTPPostConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_http_post: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_http_post" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendHTTPPost{ - conf: conf, - } - - tf.client.Setup() - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - tf.client.EnableXRay() - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendHTTPPost struct { - conf sendHTTPPostConfig - - // client is safe for concurrent use. - client http.HTTP - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendHTTPPost) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendHTTPPost) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendHTTPPost) send(ctx context.Context, key string) error { - var headers []http.Header - for k, v := range tf.conf.Headers { - // Retrieve secret and interpolate with header value. - v, err := secrets.Interpolate(ctx, v) - if err != nil { - return err - } - - headers = append(headers, http.Header{ - Key: k, - Value: v, - }) - } - - // Retrieve secret and interpolate with URL. - url, err := secrets.Interpolate(ctx, tf.conf.URL) - if err != nil { - return err - } - - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - for _, d := range data { - resp, err := tf.client.Post(ctx, url, d, headers...) - if err != nil { - return err - } - - //nolint:errcheck // Response body is discarded to avoid resource leaks. - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } - - return nil -} diff --git a/v1/transform/send_stdout.go b/v1/transform/send_stdout.go deleted file mode 100644 index 2c8f53fd..00000000 --- a/v1/transform/send_stdout.go +++ /dev/null @@ -1,129 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type sendStdoutConfig struct { - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *sendStdoutConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newSendStdout(_ context.Context, cfg config.Config) (*sendStdout, error) { - conf := sendStdoutConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_stdout: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_stdout" - } - - tf := sendStdout{ - conf: conf, - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendStdout struct { - conf sendStdoutConfig - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendStdout) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendStdout) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendStdout) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - for _, d := range data { - fmt.Println(string(d)) - } - - return nil -} diff --git a/v1/transform/string.go b/v1/transform/string.go deleted file mode 100644 index 171e917f..00000000 --- a/v1/transform/string.go +++ /dev/null @@ -1,45 +0,0 @@ -package transform - -import ( - "fmt" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type strCaseConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *strCaseConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *strCaseConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func strCaptureGetBytesMatch(match [][]byte) []byte { - if len(match) > 1 { - return match[len(match)-1] - } - - return nil -} - -func strCaptureGetStringMatch(match []string) string { - if len(match) > 1 { - return match[len(match)-1] - } - - return "" -} diff --git a/v1/transform/string_append.go b/v1/transform/string_append.go deleted file mode 100644 index 1af277c4..00000000 --- a/v1/transform/string_append.go +++ /dev/null @@ -1,102 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type stringAppendConfig struct { - // Suffix is the string appended to the end of the string. - Suffix string `json:"suffix"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *stringAppendConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *stringAppendConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Suffix == "" { - return fmt.Errorf("suffix: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type stringAppend struct { - conf stringAppendConfig - isObject bool - - s []byte -} - -func newStringAppend(_ context.Context, cfg config.Config) (*stringAppend, error) { - conf := stringAppendConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_append: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_append" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringAppend{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - s: []byte(conf.Suffix), - } - - return &tf, nil -} - -func (tf *stringAppend) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b := msg.Data() - b = append(b, tf.s...) - - msg.SetData(b) - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - str := value.String() + tf.conf.Suffix - - if err := msg.SetValue(tf.conf.Object.TargetKey, str); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringAppend) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_append_test.go b/v1/transform/string_append_test.go deleted file mode 100644 index 2c6b124c..00000000 --- a/v1/transform/string_append_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringAppend{} - -var stringAppendTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "suffix": "c", - }, - }, - []byte(`ab`), - [][]byte{ - []byte(`abc`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "suffix": "c", - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"bc"}`), - }, - }, -} - -func TestStringAppend(t *testing.T) { - ctx := context.TODO() - for _, test := range stringAppendTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringAppend(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkStringAppend(b *testing.B, tf *stringAppend, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringAppend(b *testing.B) { - for _, test := range stringAppendTests { - tf, err := newStringAppend(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringAppend(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_capture.go b/v1/transform/string_capture.go deleted file mode 100644 index 4d5d9cfe..00000000 --- a/v1/transform/string_capture.go +++ /dev/null @@ -1,191 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - "strings" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type stringCaptureConfig struct { - // Pattern is the regular expression used to capture values. - Pattern string `json:"pattern"` - re *regexp.Regexp - - // Count is the number of captures to make. - // - // This is optional and defaults to 0, which means that a single - // capture is made. If a named capture group is used, then this - // is ignored. - Count int `json:"count"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *stringCaptureConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *stringCaptureConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Pattern == "" { - return fmt.Errorf("pattern: %v", errors.ErrMissingRequiredOption) - } - - re, err := regexp.Compile(c.Pattern) - if err != nil { - return fmt.Errorf("pattern: %v", err) - } - - c.re = re - - return nil -} - -func newStringCapture(_ context.Context, cfg config.Config) (*stringCapture, error) { - conf := stringCaptureConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_capture: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_capture" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringCapture{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - // Check if the regular expression contains at least one named capture group. - containsCaptureGroup: strings.Contains(conf.Pattern, "(?P<"), - } - - return &tf, nil -} - -type stringCapture struct { - conf stringCaptureConfig - isObject bool - containsCaptureGroup bool -} - -func (tf *stringCapture) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - switch { - case tf.containsCaptureGroup: - outMsg := message.New().SetMetadata(msg.Metadata()) - - matches := tf.conf.re.FindSubmatch(msg.Data()) - for i, m := range matches { - if i == 0 { - continue - } - - if err := outMsg.SetValue(tf.conf.re.SubexpNames()[i], m); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{outMsg}, nil - - case tf.conf.Count == 0: - matches := tf.conf.re.FindSubmatch(msg.Data()) - msg.SetData(strCaptureGetBytesMatch(matches)) - - return []*message.Message{msg}, nil - - default: - tmpMsg := message.New() - subs := tf.conf.re.FindAllSubmatch(msg.Data(), tf.conf.Count) - - for _, s := range subs { - m := strCaptureGetBytesMatch(s) - if err := tmpMsg.SetValue("key.-1", m); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - v := tmpMsg.GetValue("key") - msg.SetData(v.Bytes()) - - return []*message.Message{msg}, nil - } - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - switch { - case tf.containsCaptureGroup: - matches := tf.conf.re.FindStringSubmatch(value.String()) - for i, match := range matches { - if i == 0 { - continue - } - - // If the same key is used multiple times, then this will correctly - // set multiple named groups into that key. - // - // If set_key is "a" and the first group returns {"b":"c"}, then - // the output is {"a":{"b":"c"}}. If the second group returns - // {"d":"e"} then the output is {"a":{"b":"c","d":"e"}}. - setKey := tf.conf.Object.TargetKey + "." + tf.conf.re.SubexpNames()[i] - if err := msg.SetValue(setKey, match); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{msg}, nil - - case tf.conf.Count == 0: - matches := tf.conf.re.FindStringSubmatch(value.String()) - if err := msg.SetValue(tf.conf.Object.TargetKey, strCaptureGetStringMatch(matches)); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - - default: - var matches []string - subs := tf.conf.re.FindAllStringSubmatch(value.String(), tf.conf.Count) - - for _, s := range subs { - m := strCaptureGetStringMatch(s) - matches = append(matches, m) - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, matches); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } -} - -func (tf *stringCapture) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_capture_test.go b/v1/transform/string_capture_test.go deleted file mode 100644 index 186b5051..00000000 --- a/v1/transform/string_capture_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringCapture{} - -var stringCaptureTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "^([^@]*)@.*$", - }, - }, - []byte(`b@c`), - [][]byte{ - []byte(`b`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "count": 3, - "pattern": "(.{1})", - }, - }, - []byte(`bcd`), - [][]byte{ - []byte(`["b","c","d"]`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "count": 1, - "pattern": "(.{1})", - }, - }, - []byte(`bcd`), - [][]byte{ - []byte(`["b"]`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "(?P[a-zA-Z]+) (?P[a-zA-Z]+)", - }, - }, - []byte(`c e`), - [][]byte{ - []byte(`{"b":"c","d":"e"}`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "pattern": "^([^@]*)@.*$", - }, - }, - []byte(`{"a":"b@c"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "count": 3, - "pattern": "(.{1})", - }, - }, - []byte(`{"a":"bcd"}`), - [][]byte{ - []byte(`{"a":["b","c","d"]}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "count": 1, - "pattern": "(.{1})", - }, - }, - []byte(`{"a":"bcd"}`), - [][]byte{ - []byte(`{"a":["b"]}`), - }, - }, - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "pattern": "(?P[a-zA-Z]+) (?P[a-zA-Z]+)", - }, - }, - []byte(`{"a":"c e"}`), - [][]byte{ - []byte(`{"a":{"b":"c","d":"e"}}`), - }, - }, -} - -func TestStringCapture(t *testing.T) { - ctx := context.TODO() - for _, test := range stringCaptureTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringCapture(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkStringCapture(b *testing.B, tf *stringCapture, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringCapture(b *testing.B) { - for _, test := range stringCaptureTests { - tf, err := newStringCapture(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringCapture(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_replace.go b/v1/transform/string_replace.go deleted file mode 100644 index 6d2d3f1b..00000000 --- a/v1/transform/string_replace.go +++ /dev/null @@ -1,111 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type stringReplaceConfig struct { - // Pattern is the regular expression used to identify values to replace. - Pattern string `json:"pattern"` - re *regexp.Regexp - // Replacement is the string to replace the matched values with. - Replacement string `json:"replacement"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *stringReplaceConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *stringReplaceConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Pattern == "" { - return fmt.Errorf("old: %v", errors.ErrMissingRequiredOption) - } - - re, err := regexp.Compile(c.Pattern) - if err != nil { - return fmt.Errorf("pattern: %v", err) - } - - c.re = re - - return nil -} - -func newStringReplace(_ context.Context, cfg config.Config) (*stringReplace, error) { - conf := stringReplaceConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_replace: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_replace" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringReplace{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - r: []byte(conf.Replacement), - } - - return &tf, nil -} - -type stringReplace struct { - conf stringReplaceConfig - isObject bool - - r []byte -} - -func (tf *stringReplace) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b := tf.conf.re.ReplaceAll(msg.Data(), tf.r) - msg.SetData(b) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - s := tf.conf.re.ReplaceAllString(value.String(), string(tf.r)) - if err := msg.SetValue(tf.conf.Object.TargetKey, s); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringReplace) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_replace_test.go b/v1/transform/string_replace_test.go deleted file mode 100644 index 9cbd529d..00000000 --- a/v1/transform/string_replace_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringReplace{} - -var stringReplaceTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data replace", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "c", - "replacement": "b", - }, - }, - []byte(`abc`), - [][]byte{ - []byte(`abb`), - }, - }, - { - "data remove", - config.Config{ - Settings: map[string]interface{}{ - "pattern": "c", - "replacement": "", - }, - }, - []byte(`abc`), - [][]byte{ - []byte(`ab`), - }, - }, - // object tests - { - "object replace", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "pattern": "c", - "replacement": "b", - }, - }, - []byte(`{"a":"bc"}`), - [][]byte{ - []byte(`{"a":"bb"}`), - }, - }, - { - "object remove", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "pattern": "c", - }, - }, - []byte(`{"a":"bc"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, -} - -func TestStringReplace(t *testing.T) { - ctx := context.TODO() - for _, test := range stringReplaceTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringReplace(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkStringReplace(b *testing.B, tf *stringReplace, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringReplace(b *testing.B) { - for _, test := range stringReplaceTests { - tf, err := newStringReplace(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringReplace(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_split.go b/v1/transform/string_split.go deleted file mode 100644 index 330482c2..00000000 --- a/v1/transform/string_split.go +++ /dev/null @@ -1,112 +0,0 @@ -package transform - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type stringSplitConfig struct { - // Separator splits the string into elements of the array. - Separator string `json:"separator"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *stringSplitConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *stringSplitConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Separator == "" { - return fmt.Errorf("separator: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type stringSplit struct { - conf stringSplitConfig - isObject bool - - separator []byte -} - -func newStringSplit(_ context.Context, cfg config.Config) (*stringSplit, error) { - conf := stringSplitConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_split: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_split" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringSplit{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - separator: []byte(conf.Separator), - } - - return &tf, nil -} - -func (tf *stringSplit) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - tmpMsg := message.New() - - b := bytes.Split(msg.Data(), tf.separator) - for _, v := range b { - if err := tmpMsg.SetValue("key.-1", v); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - value := tmpMsg.GetValue("key") - msg.SetData(value.Bytes()) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - str := strings.Split(value.String(), tf.conf.Separator) - - if err := msg.SetValue(tf.conf.Object.TargetKey, str); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringSplit) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_split_test.go b/v1/transform/string_split_test.go deleted file mode 100644 index 99706efe..00000000 --- a/v1/transform/string_split_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringSplit{} - -var stringSplitTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "separator": ".", - }, - }, - []byte(`b.c.d`), - [][]byte{ - []byte(`["b","c","d"]`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "separator": ".", - }, - }, - []byte(`{"a":"b.c.d"}`), - [][]byte{ - []byte(`{"a":["b","c","d"]}`), - }, - }, -} - -func TestStringSplit(t *testing.T) { - ctx := context.TODO() - for _, test := range stringSplitTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringSplit(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkStringSplit(b *testing.B, tf *stringSplit, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarStrkSplit(b *testing.B) { - for _, test := range stringSplitTests { - p, err := newStringSplit(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringSplit(b, p, test.test) - }, - ) - } -} diff --git a/v1/transform/string_to_lower.go b/v1/transform/string_to_lower.go deleted file mode 100644 index 19673794..00000000 --- a/v1/transform/string_to_lower.go +++ /dev/null @@ -1,69 +0,0 @@ -package transform - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringToLower(_ context.Context, cfg config.Config) (*stringToLower, error) { - conf := strCaseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_to_lower: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_to_lower" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringToLower{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type stringToLower struct { - conf strCaseConfig - isObject bool -} - -func (tf *stringToLower) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b := bytes.ToLower(msg.Data()) - msg.SetData(b) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - s := strings.ToLower(value.String()) - if err := msg.SetValue(tf.conf.Object.TargetKey, s); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringToLower) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_to_lower_test.go b/v1/transform/string_to_lower_test.go deleted file mode 100644 index 5f3a5fd3..00000000 --- a/v1/transform/string_to_lower_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringToLower{} - -var stringToLowerTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`B`), - [][]byte{ - []byte(`b`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"B"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, -} - -func TestStringToLower(t *testing.T) { - ctx := context.TODO() - for _, test := range stringToLowerTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringToLower(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var r [][]byte - for _, c := range result { - r = append(r, c.Data()) - } - - if !reflect.DeepEqual(r, test.expected) { - t.Errorf("expected %s, got %s", test.expected, r) - } - }) - } -} - -func benchmarkStringToLower(b *testing.B, tf *stringToLower, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringToLower(b *testing.B) { - for _, test := range stringToLowerTests { - tf, err := newStringToLower(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringToLower(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_to_snake.go b/v1/transform/string_to_snake.go deleted file mode 100644 index 7683e591..00000000 --- a/v1/transform/string_to_snake.go +++ /dev/null @@ -1,68 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/iancoleman/strcase" -) - -func newStringToSnake(_ context.Context, cfg config.Config) (*stringToSnake, error) { - conf := strCaseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_to_snake: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_to_snake" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringToSnake{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type stringToSnake struct { - conf strCaseConfig - isObject bool -} - -func (tf *stringToSnake) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b := []byte(strcase.ToSnake(string(msg.Data()))) - msg.SetData(b) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - s := strcase.ToSnake(value.String()) - if err := msg.SetValue(tf.conf.Object.TargetKey, s); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringToSnake) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_to_snake_test.go b/v1/transform/string_to_snake_test.go deleted file mode 100644 index a4954527..00000000 --- a/v1/transform/string_to_snake_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringToSnake{} - -var stringToSnakeTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`bC`), - [][]byte{ - []byte(`b_c`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"bC"})`), - [][]byte{ - []byte(`{"a":"b_c"})`), - }, - }, -} - -func TestStringToSnake(t *testing.T) { - ctx := context.TODO() - for _, test := range stringToSnakeTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringToSnake(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var r [][]byte - for _, c := range result { - r = append(r, c.Data()) - } - - if !reflect.DeepEqual(r, test.expected) { - t.Errorf("expected %s, got %s", test.expected, r) - } - }) - } -} - -func benchmarkStringToSnake(b *testing.B, tf *stringToSnake, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringToSnake(b *testing.B) { - for _, test := range stringToSnakeTests { - tf, err := newStringToSnake(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringToSnake(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_to_upper.go b/v1/transform/string_to_upper.go deleted file mode 100644 index 983cd1b7..00000000 --- a/v1/transform/string_to_upper.go +++ /dev/null @@ -1,69 +0,0 @@ -package transform - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newStringToUpper(_ context.Context, cfg config.Config) (*stringToUpper, error) { - conf := strCaseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_to_lower: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_to_upper" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := stringToUpper{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type stringToUpper struct { - conf strCaseConfig - isObject bool -} - -func (tf *stringToUpper) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if !tf.isObject { - b := bytes.ToUpper(msg.Data()) - msg.SetData(b) - - return []*message.Message{msg}, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - s := strings.ToUpper(value.String()) - if err := msg.SetValue(tf.conf.Object.TargetKey, s); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *stringToUpper) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/string_to_upper_test.go b/v1/transform/string_to_upper_test.go deleted file mode 100644 index 029e1257..00000000 --- a/v1/transform/string_to_upper_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &stringToUpper{} - -var stringToUpperTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{}, - []byte(`b`), - [][]byte{ - []byte(`B`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"B"}`), - }, - }, -} - -func TestStringToUpper(t *testing.T) { - ctx := context.TODO() - for _, test := range stringToUpperTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newStringToUpper(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var r [][]byte - for _, c := range result { - r = append(r, c.Data()) - } - - if !reflect.DeepEqual(r, test.expected) { - t.Errorf("expected %s, got %s", test.expected, r) - } - }) - } -} - -func benchmarkStringToUpper(b *testing.B, tf *stringToUpper, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkStringToUpper(b *testing.B) { - for _, test := range stringToUpperTests { - tf, err := newStringToUpper(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkStringToUpper(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/string_uuid.go b/v1/transform/string_uuid.go deleted file mode 100644 index 59380bd2..00000000 --- a/v1/transform/string_uuid.go +++ /dev/null @@ -1,67 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" - "github.com/google/uuid" -) - -type stringUUIDConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *stringUUIDConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newStringUUID(_ context.Context, cfg config.Config) (*stringUUID, error) { - conf := stringUUIDConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform string_uuid: %v", err) - } - - if conf.ID == "" { - conf.ID = "string_uuid" - } - - tf := stringUUID{ - conf: conf, - hasObjectSetKey: conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type stringUUID struct { - conf stringUUIDConfig - hasObjectSetKey bool -} - -func (tf *stringUUID) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - uid := uuid.NewString() - if tf.hasObjectSetKey { - if err := msg.SetValue(tf.conf.Object.TargetKey, uid); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - msg.SetData([]byte(uid)) - return []*message.Message{msg}, nil -} - -func (tf *stringUUID) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time.go b/v1/transform/time.go deleted file mode 100644 index a1a91c0f..00000000 --- a/v1/transform/time.go +++ /dev/null @@ -1,108 +0,0 @@ -package transform - -import ( - "fmt" - "time" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -const ( - timeDefaultFmt = "2006-01-02T15:04:05.000Z" -) - -type timeUnixConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *timeUnixConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *timeUnixConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -type timePatternConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` - - Format string `json:"format"` - Location string `json:"location"` -} - -func (c *timePatternConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *timePatternConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Format == "" { - return fmt.Errorf("format: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func timeUnixToBytes(t time.Time) []byte { - return []byte(fmt.Sprintf("%d", t.UnixNano())) -} - -// timeUnixToStr converts a UnixNano timestamp to a string. -func timeUnixToStr(ts int64, timeFmt string, loc string) (string, error) { - timeDate := time.Unix(0, ts) - - if loc != "" { - ll, err := time.LoadLocation(loc) - if err != nil { - return "", fmt.Errorf("location %s: %v", loc, err) - } - - timeDate = timeDate.In(ll) - } - - return timeDate.Format(timeFmt), nil -} - -func timeStrToUnix(timeStr, timeFmt string, loc string) (time.Time, error) { - var timeDate time.Time - if loc != "" { - ll, err := time.LoadLocation(loc) - if err != nil { - return timeDate, fmt.Errorf("location %s: %v", loc, err) - } - - pil, err := time.ParseInLocation(timeFmt, timeStr, ll) - if err != nil { - return timeDate, fmt.Errorf("format %s location %s: %v", timeFmt, loc, err) - } - - timeDate = pil - } else { - p, err := time.Parse(timeFmt, timeStr) - if err != nil { - return timeDate, fmt.Errorf("format %s: %v", timeFmt, err) - } - - timeDate = p - } - - return timeDate, nil -} diff --git a/v1/transform/time_from_string.go b/v1/transform/time_from_string.go deleted file mode 100644 index 4b3b14e0..00000000 --- a/v1/transform/time_from_string.go +++ /dev/null @@ -1,75 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeFromString(_ context.Context, cfg config.Config) (*timeFromString, error) { - conf := timePatternConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_from_string: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_from_string" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeFromString{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeFromString struct { - conf timePatternConfig - isObject bool -} - -func (tf *timeFromString) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - date, err := timeStrToUnix(value.String(), tf.conf.Format, tf.conf.Location) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, date.UnixNano()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value := timeUnixToBytes(date) - msg.SetData(value) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeFromString) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_from_string_test.go b/v1/transform/time_from_string_test.go deleted file mode 100644 index e8420ed2..00000000 --- a/v1/transform/time_from_string_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeFromString{} - -var timeFromStringTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "format": timeDefaultFmt, - }, - }, - []byte(`2021-12-19T01:31:30.000Z`), - [][]byte{ - []byte(`1639877490000000000`), - }, - }, - { - "data with_location", - config.Config{ - Settings: map[string]interface{}{ - "format": timeDefaultFmt, - // Offset from UTC by -5 hours. - "location": "America/New_York", - }, - }, - []byte(`2021-12-19T01:31:30.000Z`), - [][]byte{ - []byte(`1639895490000000000`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "format": timeDefaultFmt, - }, - }, - []byte(`{"a":"2021-12-19T01:31:30.000Z"}`), - [][]byte{ - []byte(`{"a":1639877490000000000}`), - }, - }, -} - -func TestTimeFromString(t *testing.T) { - ctx := context.TODO() - for _, test := range timeFromStringTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeFromString(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkTimeFromString(b *testing.B, tf *timeFromString, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeFromString(b *testing.B) { - for _, test := range timeFromStringTests { - tf, err := newTimeFromString(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkTimeFromString(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/time_from_unix.go b/v1/transform/time_from_unix.go deleted file mode 100644 index 60fb5654..00000000 --- a/v1/transform/time_from_unix.go +++ /dev/null @@ -1,75 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeFromUnix(_ context.Context, cfg config.Config) (*timeFromUnix, error) { - conf := timeUnixConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_from_unix: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_from_unix" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeFromUnix{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeFromUnix struct { - conf timeUnixConfig - isObject bool -} - -func (tf *timeFromUnix) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - // Convert Unix to UnixNano. - date := time.Unix(value.Int(), 0) - ns := date.UnixNano() - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, ns); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value := []byte(fmt.Sprintf("%d", ns)) - msg.SetData(value) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeFromUnix) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_from_unix_milli.go b/v1/transform/time_from_unix_milli.go deleted file mode 100644 index 69191c48..00000000 --- a/v1/transform/time_from_unix_milli.go +++ /dev/null @@ -1,75 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeFromUnixMilli(_ context.Context, cfg config.Config) (*timeFromUnixMilli, error) { - conf := timeUnixConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_from_unix_milli: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_from_unix_milli" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeFromUnixMilli{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeFromUnixMilli struct { - conf timeUnixConfig - isObject bool -} - -func (tf *timeFromUnixMilli) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - // Convert UnixMilli to UnixNano. - date := time.UnixMilli(value.Int()) - ns := date.UnixNano() - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, ns); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value := []byte(fmt.Sprintf("%d", ns)) - msg.SetData(value) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeFromUnixMilli) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_from_unix_milli_test.go b/v1/transform/time_from_unix_milli_test.go deleted file mode 100644 index 6320eb1f..00000000 --- a/v1/transform/time_from_unix_milli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeFromUnix{} - -var timeUnixFromMilliTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{}, - }, - []byte(`1639895490000`), - [][]byte{ - []byte(`1639895490000000000`), - }, - nil, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1639877490000}`), - [][]byte{ - []byte(`{"a":1639877490000000000}`), - }, - nil, - }, -} - -func TestTimeFromUnixMilli(t *testing.T) { - ctx := context.TODO() - for _, test := range timeUnixFromMilliTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeFromUnixMilli(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkTimeFromUnixMilli(b *testing.B, tf *timeFromUnixMilli, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeFromUnixMilli(b *testing.B) { - for _, test := range timeUnixFromMilliTests { - tf, err := newTimeFromUnixMilli(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkTimeFromUnixMilli(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/time_from_unix_test.go b/v1/transform/time_from_unix_test.go deleted file mode 100644 index 1e0870bd..00000000 --- a/v1/transform/time_from_unix_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeFromUnix{} - -var timeUnixFromTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{}, - }, - []byte(`1639895490`), - [][]byte{ - []byte(`1639895490000000000`), - }, - nil, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1639877490}`), - [][]byte{ - []byte(`{"a":1639877490000000000}`), - }, - nil, - }, -} - -func TestTimeFromUnix(t *testing.T) { - ctx := context.TODO() - for _, test := range timeUnixFromTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeFromUnix(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkTimeFromUnix(b *testing.B, tf *timeFromUnix, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeFromUnix(b *testing.B) { - for _, test := range timeUnixFromTests { - tf, err := newTimeFromUnix(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkTimeFromUnix(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/time_now.go b/v1/transform/time_now.go deleted file mode 100644 index 6eae9579..00000000 --- a/v1/transform/time_now.go +++ /dev/null @@ -1,78 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type timeNowConfig struct { - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *timeNowConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *timeNowConfig) Validate() error { - return nil -} - -func newTimeNow(_ context.Context, cfg config.Config) (*timeNow, error) { - conf := timeNowConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_now: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_now" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeNow{ - conf: conf, - hasObjectSetKey: conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeNow struct { - conf timeNowConfig - hasObjectSetKey bool -} - -func (tf *timeNow) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - date := time.Now() - - if tf.hasObjectSetKey { - if err := msg.SetValue(tf.conf.Object.TargetKey, date.UnixNano()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil - } - - value := timeUnixToBytes(date) - msg.SetData(value) - - return []*message.Message{msg}, nil -} - -func (tf *timeNow) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_to_string.go b/v1/transform/time_to_string.go deleted file mode 100644 index eb2f3d89..00000000 --- a/v1/transform/time_to_string.go +++ /dev/null @@ -1,74 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeToString(_ context.Context, cfg config.Config) (*timeToString, error) { - conf := timePatternConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_to_string: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_to_string" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeToString{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeToString struct { - conf timePatternConfig - isObject bool -} - -func (tf *timeToString) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - pattern, err := timeUnixToStr(value.Int(), tf.conf.Format, tf.conf.Location) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, pattern); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - msg.SetData([]byte(pattern)) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeToString) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_to_string_test.go b/v1/transform/time_to_string_test.go deleted file mode 100644 index 6588c8bc..00000000 --- a/v1/transform/time_to_string_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeToString{} - -var timeToStringTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "format": timeDefaultFmt, - }, - }, - []byte(`1639877490000000000`), - [][]byte{ - []byte(`2021-12-19T01:31:30.000Z`), - }, - }, - { - "data with_location", - config.Config{ - Settings: map[string]interface{}{ - "format": timeDefaultFmt, - // Offset from UTC by -5 hours. - "location": "America/New_York", - }, - }, - []byte(`1639895490000000000`), - [][]byte{ - []byte(`2021-12-19T01:31:30.000Z`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "format": timeDefaultFmt, - }, - }, - []byte(`{"a":1639877490000000000}`), - [][]byte{ - []byte(`{"a":"2021-12-19T01:31:30.000Z"}`), - }, - }, -} - -func TestTimeToString(t *testing.T) { - ctx := context.TODO() - for _, test := range timeToStringTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeToString(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkTimeToString(b *testing.B, tf *timeToString, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeToString(b *testing.B) { - for _, test := range timeToStringTests { - tf, err := newTimeToString(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkTimeToString(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/time_to_unix.go b/v1/transform/time_to_unix.go deleted file mode 100644 index cd054be2..00000000 --- a/v1/transform/time_to_unix.go +++ /dev/null @@ -1,75 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeToUnix(_ context.Context, cfg config.Config) (*timeToUnix, error) { - conf := timeUnixConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_to_unix: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_to_unix" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeToUnix{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeToUnix struct { - conf timeUnixConfig - isObject bool -} - -func (tf *timeToUnix) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - // Convert UnixNano to Unix. - date := time.Unix(0, value.Int()) - unix := date.Unix() - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, unix); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value := []byte(fmt.Sprintf("%d", unix)) - msg.SetData(value) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeToUnix) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_to_unix_milli.go b/v1/transform/time_to_unix_milli.go deleted file mode 100644 index 573b3fa1..00000000 --- a/v1/transform/time_to_unix_milli.go +++ /dev/null @@ -1,75 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func newTimeToUnixMilli(_ context.Context, cfg config.Config) (*timeToUnixMilli, error) { - conf := timeUnixConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform time_to_unix_milli: %v", err) - } - - if conf.ID == "" { - conf.ID = "time_to_unix_milli" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := timeToUnixMilli{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - return &tf, nil -} - -type timeToUnixMilli struct { - conf timeUnixConfig - isObject bool -} - -func (tf *timeToUnixMilli) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var value message.Value - if tf.isObject { - value = msg.GetValue(tf.conf.Object.SourceKey) - } else { - value = bytesToValue(msg.Data()) - } - - if !value.Exists() { - return []*message.Message{msg}, nil - } - - // Convert UnixNano to UnixMilli. - date := time.Unix(0, value.Int()) - ms := date.UnixMilli() - - if tf.isObject { - if err := msg.SetValue(tf.conf.Object.TargetKey, ms); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } else { - value := []byte(fmt.Sprintf("%d", ms)) - msg.SetData(value) - } - - return []*message.Message{msg}, nil -} - -func (tf *timeToUnixMilli) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/time_to_unix_milli_test.go b/v1/transform/time_to_unix_milli_test.go deleted file mode 100644 index 3e3b5b46..00000000 --- a/v1/transform/time_to_unix_milli_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeToUnixMilli{} - -var timeToUnixMilliTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{}, - }, - []byte(`1639895490000000000`), - [][]byte{ - []byte(`1639895490000`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1639877490000000000}`), - [][]byte{ - []byte(`{"a":1639877490000}`), - }, - }, -} - -func TestTimeToUnixMilli(t *testing.T) { - ctx := context.TODO() - for _, test := range timeToUnixMilliTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeToUnixMilli(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarktimeToUnixMilli(b *testing.B, tf *timeToUnixMilli, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeToUnixMilli(b *testing.B) { - for _, test := range timeToUnixMilliTests { - tf, err := newTimeToUnixMilli(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarktimeToUnixMilli(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/time_to_unix_test.go b/v1/transform/time_to_unix_test.go deleted file mode 100644 index dd33dcc0..00000000 --- a/v1/transform/time_to_unix_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &timeToUnix{} - -var timeToUnixTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - // data tests - { - "data", - config.Config{ - Settings: map[string]interface{}{}, - }, - []byte(`1639895490000000000`), - [][]byte{ - []byte(`1639895490`), - }, - }, - // object tests - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - }, - }, - []byte(`{"a":1639877490000000000}`), - [][]byte{ - []byte(`{"a":1639877490}`), - }, - }, -} - -func TestTimeToUnix(t *testing.T) { - ctx := context.TODO() - for _, test := range timeToUnixTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newTimeToUnix(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkTimeToUnix(b *testing.B, tf *timeToUnix, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkTimeToUnix(b *testing.B) { - for _, test := range timeToUnixTests { - tf, err := newTimeToUnix(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkTimeToUnix(b, tf, test.test) - }, - ) - } -} diff --git a/v1/transform/transform.go b/v1/transform/transform.go deleted file mode 100644 index 650da77c..00000000 --- a/v1/transform/transform.go +++ /dev/null @@ -1,267 +0,0 @@ -// Package transform provides functions for transforming messages. -package transform - -import ( - "context" - "fmt" - "math" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -var errMsgInvalidObject = fmt.Errorf("message must be JSON object") - -// Transformer is the interface implemented by all transforms and -// provides the ability to transform a message. -type Transformer interface { - Transform(context.Context, *message.Message) ([]*message.Message, error) -} - -// Factory can be used to implement custom transform factory functions. -type Factory func(context.Context, config.Config) (Transformer, error) - -// New is a factory function for returning a configured Transformer. -func New(ctx context.Context, cfg config.Config) (Transformer, error) { //nolint: cyclop, gocyclo // ignore cyclomatic complexity - switch cfg.Type { - // Aggregation transforms. - case "aggregate_from_array": - return newAggregateFromArray(ctx, cfg) - case "aggregate_to_array": - return newAggregateToArray(ctx, cfg) - case "aggregate_from_string": - return newAggregateFromString(ctx, cfg) - case "aggregate_to_string": - return newAggregateToString(ctx, cfg) - // Array transforms. - case "array_join": - return newArrayJoin(ctx, cfg) - case "array_zip": - return newArrayZip(ctx, cfg) - // Enrichment transforms. - case "enrich_aws_dynamodb": - return newEnrichAWSDynamoDB(ctx, cfg) - case "enrich_aws_lambda": - return newEnrichAWSLambda(ctx, cfg) - case "enrich_dns_ip_lookup": - return newEnrichDNSIPLookup(ctx, cfg) - case "enrich_dns_domain_lookup": - return newEnrichDNSDomainLookup(ctx, cfg) - case "enrich_dns_text_lookup": - return newEnrichDNSTxtLookup(ctx, cfg) - case "enrich_http_get": - return newEnrichHTTPGet(ctx, cfg) - case "enrich_http_post": - return newEnrichHTTPPost(ctx, cfg) - // Deprecated: Use enrich_kv_store_item_get instead. - case "enrich_kv_store_get": - fallthrough - case "enrich_kv_store_item_get": - return newEnrichKVStoreItemGet(ctx, cfg) - // Deprecated: Use enrich_kv_store_item_set instead. - case "enrich_kv_store_set": - fallthrough - case "enrich_kv_store_item_set": - return newEnrichKVStoreItemSet(ctx, cfg) - case "enrich_kv_store_set_add": - return newEnrichKVStoreSetAdd(ctx, cfg) - // Format transforms. - case "format_from_base64": - return newFormatFromBase64(ctx, cfg) - case "format_to_base64": - return newFormatToBase64(ctx, cfg) - case "format_from_gzip": - return newFormatFromGzip(ctx, cfg) - case "format_to_gzip": - return newFormatToGzip(ctx, cfg) - case "format_from_pretty_print": - return newFormatFromPrettyPrint(ctx, cfg) - case "format_from_zip": - return newFormatFromZip(ctx, cfg) - // Hash transforms. - case "hash_md5": - return newHashMD5(ctx, cfg) - case "hash_sha256": - return newHashSHA256(ctx, cfg) - // Meta transforms. - case "meta_err": - return newMetaErr(ctx, cfg) - case "meta_for_each": - return newMetaForEach(ctx, cfg) - case "meta_kv_store_lock": - return newMetaKVStoreLock(ctx, cfg) - case "meta_metric_duration": - return newMetaMetricsDuration(ctx, cfg) - case "meta_pipeline": - return newMetaPipeline(ctx, cfg) - case "meta_retry": - return newMetaRetry(ctx, cfg) - case "meta_switch": - return newMetaSwitch(ctx, cfg) - // Number transforms. - case "number_maximum": - return newNumberMaximum(ctx, cfg) - case "number_minimum": - return newNumberMinimum(ctx, cfg) - case "number_math_addition": - return newNumberMathAddition(ctx, cfg) - case "number_math_division": - return newNumberMathDivision(ctx, cfg) - case "number_math_multiplication": - return newNumberMathMultiplication(ctx, cfg) - case "number_math_subtraction": - return newNumberMathSubtraction(ctx, cfg) - // Network transforms. - case "network_domain_registered_domain": - return newNetworkDomainRegisteredDomain(ctx, cfg) - case "network_domain_subdomain": - return newNetworkDomainSubdomain(ctx, cfg) - case "network_domain_top_level_domain": - return newNetworkDomainTopLevelDomain(ctx, cfg) - // Object transforms. - case "object_copy": - return newObjectCopy(ctx, cfg) - case "object_delete": - return newObjectDelete(ctx, cfg) - case "object_insert": - return newObjectInsert(ctx, cfg) - case "object_jq": - return newObjectJQ(ctx, cfg) - case "object_to_boolean": - return newObjectToBoolean(ctx, cfg) - case "object_to_float": - return newObjectToFloat(ctx, cfg) - case "object_to_integer": - return newObjectToInteger(ctx, cfg) - case "object_to_string": - return newObjectToString(ctx, cfg) - case "object_to_unsigned_integer": - return newObjectToUnsignedInteger(ctx, cfg) - // Send transforms. - case "send_aws_dynamodb": - return newSendAWSDynamoDB(ctx, cfg) - case "send_aws_eventbridge": - return newSendAWSEventBridge(ctx, cfg) - case "send_aws_kinesis_data_firehose": - return newSendAWSKinesisDataFirehose(ctx, cfg) - case "send_aws_kinesis_data_stream": - return newSendAWSKinesisDataStream(ctx, cfg) - case "send_aws_lambda": - return newSendAWSLambda(ctx, cfg) - case "send_aws_s3": - return newSendAWSS3(ctx, cfg) - case "send_aws_sns": - return newSendAWSSNS(ctx, cfg) - case "send_aws_sqs": - return newSendAWSSQS(ctx, cfg) - case "send_file": - return newSendFile(ctx, cfg) - case "send_http_post": - return newSendHTTPPost(ctx, cfg) - case "send_stdout": - return newSendStdout(ctx, cfg) - // String transforms. - case "string_append": - return newStringAppend(ctx, cfg) - case "string_capture": - return newStringCapture(ctx, cfg) - case "string_to_lower": - return newStringToLower(ctx, cfg) - case "string_to_snake": - return newStringToSnake(ctx, cfg) - case "string_to_upper": - return newStringToUpper(ctx, cfg) - case "string_replace": - return newStringReplace(ctx, cfg) - case "string_split": - return newStringSplit(ctx, cfg) - case "string_uuid": - return newStringUUID(ctx, cfg) - // Time transforms. - case "time_from_string": - return newTimeFromString(ctx, cfg) - case "time_from_unix": - return newTimeFromUnix(ctx, cfg) - case "time_from_unix_milli": - return newTimeFromUnixMilli(ctx, cfg) - case "time_now": - return newTimeNow(ctx, cfg) - case "time_to_string": - return newTimeToString(ctx, cfg) - case "time_to_unix": - return newTimeToUnix(ctx, cfg) - case "time_to_unix_milli": - return newTimeToUnixMilli(ctx, cfg) - // Utility transforms. - case "utility_control": - return newUtilityControl(ctx, cfg) - case "utility_delay": - return newUtilityDelay(ctx, cfg) - case "utility_drop": - return newUtilityDrop(ctx, cfg) - case "utility_err": - return newUtilityErr(ctx, cfg) - case "utility_metric_bytes": - return newUtilityMetricBytes(ctx, cfg) - case "utility_metric_count": - return newUtilityMetricCount(ctx, cfg) - case "utility_metric_freshness": - return newUtilityMetricFreshness(ctx, cfg) - case "utility_secret": - return newUtilitySecret(ctx, cfg) - default: - return nil, fmt.Errorf("transform %s: %w", cfg.Type, errors.ErrInvalidFactoryInput) - } -} - -// Applies one or more transform functions to one or more messages. -func Apply(ctx context.Context, tf []Transformer, msgs ...*message.Message) ([]*message.Message, error) { - resultMsgs := make([]*message.Message, len(msgs)) - copy(resultMsgs, msgs) - - for i := 0; len(resultMsgs) > 0 && i < len(tf); i++ { - var nextResultMsgs []*message.Message - for _, m := range resultMsgs { - rMsgs, err := tf[i].Transform(ctx, m) - if err != nil { - // We immediately return if a transform hits an unrecoverable - // error on a message. - return nil, err - } - nextResultMsgs = append(nextResultMsgs, rMsgs...) - } - resultMsgs = nextResultMsgs - } - - return resultMsgs, nil -} - -func bytesToValue(b []byte) message.Value { - msg := message.New() - _ = msg.SetValue("_", b) - - return msg.GetValue("_") -} - -func anyToBytes(v any) []byte { - msg := message.New() - _ = msg.SetValue("_", v) - - return msg.GetValue("_").Bytes() -} - -// truncateTTL truncates the time-to-live (TTL) value from any precision greater -// than seconds (e.g., milliseconds, nanoseconds) to seconds. -// -// For example: -// - 1696482368492 -> 1696482368 -// - 1696482368492290 -> 1696482368 -func truncateTTL(v message.Value) int64 { - if len(v.String()) <= 10 { - return v.Int() - } - - l := len(v.String()) - 10 - return v.Int() / int64(math.Pow10(l)) -} diff --git a/v1/transform/transform_example_test.go b/v1/transform/transform_example_test.go deleted file mode 100644 index 4f5b45a6..00000000 --- a/v1/transform/transform_example_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package transform_test - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" -) - -func ExampleTransformer() { - ctx := context.TODO() - - // Copies the value of key "a" into key "b". - cfg := config.Config{ - Type: "object_copy", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - }, - } - - tf, err := transform.New(ctx, cfg) - if err != nil { - // handle err - panic(err) - } - - // Transformer is applied to a message. - msg := message.New().SetData([]byte(`{"a":1}`)) - results, err := tf.Transform(ctx, msg) - if err != nil { - // handle err - panic(err) - } - - for _, c := range results { - fmt.Println(string(c.Data())) - } - - // Output: - // {"a":1,"b":1} -} diff --git a/v1/transform/transform_test.go b/v1/transform/transform_test.go deleted file mode 100644 index 888297fb..00000000 --- a/v1/transform/transform_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var transformTests = []struct { - name string - conf config.Config - test []byte - expected [][]byte -}{ - { - "object_copy", - config.Config{ - Type: "object_copy", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - }, - }, - []byte(`b`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "object_insert", - config.Config{ - Type: "object_insert", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "c", - }, - "value": "d", - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","c":"d"}`), - }, - }, - { - "format_from_gzip", - config.Config{ - Type: "format_from_gzip", - }, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 170, 86, 202, 72, 205, 201, 201, 87, 178, 82, 74, 207, 207, 79, 73, 170, 76, 85, 170, 5, 4, 0, 0, 255, 255, 214, 182, 196, 150, 19, 0, 0, 0}, - [][]byte{ - []byte(`{"hello":"goodbye"}`), - }, - }, - { - "format_from_base64", - config.Config{ - Type: "format_from_base64", - }, - []byte(`eyJoZWxsbyI6IndvcmxkIn0=`), - [][]byte{ - []byte(`{"hello":"world"}`), - }, - }, - { - "time_to_string", - config.Config{ - Type: "time_to_string", - Settings: map[string]interface{}{ - "format": "2006-01-02T15:04:05.000000Z", - }, - }, - []byte(`1639877490000000000`), - [][]byte{ - []byte(`2021-12-19T01:31:30.000000Z`), - }, - }, -} - -func TestTransform(t *testing.T) { - ctx := context.TODO() - for _, test := range transformTests { - t.Run(test.name, func(t *testing.T) { - tf, err := New(ctx, test.conf) - if err != nil { - t.Error(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -var truncateTTLTests = []struct { - name string - test []byte - expected int64 -}{ - { - "unix millisecond", - []byte("1696482368492"), - 1696482368, - }, - { - "unix nanosecond", - []byte("1696482368492290"), - 1696482368, - }, -} - -func TestTruncateTTL(t *testing.T) { - for _, test := range truncateTTLTests { - t.Run(test.name, func(t *testing.T) { - tmp := bytesToValue(test.test) - result := truncateTTL(tmp) - - if !reflect.DeepEqual(result, test.expected) { - t.Errorf("expected %v, got %v", test.expected, result) - } - }) - } -} diff --git a/v1/transform/utility_control.go b/v1/transform/utility_control.go deleted file mode 100644 index 4621b972..00000000 --- a/v1/transform/utility_control.go +++ /dev/null @@ -1,86 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type utilityControlConfig struct { - ID string `json:"id"` - Batch iconfig.Batch `json:"batch"` -} - -func (c *utilityControlConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityControl(_ context.Context, cfg config.Config) (*utilityControl, error) { - conf := utilityControlConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_control: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_control" - } - - agg, err := aggregate.New(aggregate.Config{ - Count: conf.Batch.Count, - Size: conf.Batch.Size, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := utilityControl{ - conf: conf, - agg: *agg, - } - - return &tf, nil -} - -type utilityControl struct { - conf utilityControlConfig - - mu sync.Mutex - agg aggregate.Aggregate -} - -func (tf *utilityControl) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - // If a control message is received, then the aggregation is reset - // to prevent sending duplicate control messages. - tf.agg.ResetAll() - - return []*message.Message{msg}, nil - } - - if ok := tf.agg.Add("", msg.Data()); ok { - return []*message.Message{msg}, nil - } - - tf.agg.Reset("") - if ok := tf.agg.Add("", msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - ctrl := message.New().AsControl() - return []*message.Message{msg, ctrl}, nil -} - -func (tf *utilityControl) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_delay.go b/v1/transform/utility_delay.go deleted file mode 100644 index 5793600e..00000000 --- a/v1/transform/utility_delay.go +++ /dev/null @@ -1,79 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type utilityDelayConfig struct { - // Duration is the amount of time to delay. - Duration string `json:"duration"` - - ID string `json:"id"` -} - -func (c *utilityDelayConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *utilityDelayConfig) Validate() error { - if c.Duration == "" { - return fmt.Errorf("duration: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newUtilityDelay(_ context.Context, cfg config.Config) (*utilityDelay, error) { - conf := utilityDelayConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_delay: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_delay" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - dur, err := time.ParseDuration(conf.Duration) - if err != nil { - return nil, fmt.Errorf("transform %s: duration: %v", conf.ID, err) - } - - tf := utilityDelay{ - conf: conf, - dur: dur, - } - - return &tf, nil -} - -type utilityDelay struct { - conf utilityDelayConfig - - dur time.Duration -} - -func (tf *utilityDelay) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - time.Sleep(tf.dur) - return []*message.Message{msg}, nil -} - -func (tf *utilityDelay) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_drop.go b/v1/transform/utility_drop.go deleted file mode 100644 index 24a888c0..00000000 --- a/v1/transform/utility_drop.go +++ /dev/null @@ -1,53 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type utilityDropConfig struct { - ID string `json:"id"` -} - -func (c *utilityDropConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityDrop(_ context.Context, cfg config.Config) (*utilityDrop, error) { - conf := utilityDropConfig{} - if err := iconfig.Decode(cfg.Settings, &conf); err != nil { - return nil, fmt.Errorf("transform utility_drop: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_drop" - } - - tf := utilityDrop{ - conf: conf, - } - - return &tf, nil -} - -type utilityDrop struct { - conf utilityDropConfig -} - -func (tf *utilityDrop) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - return []*message.Message{}, nil -} - -func (tf *utilityDrop) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_err.go b/v1/transform/utility_err.go deleted file mode 100644 index f7e10c11..00000000 --- a/v1/transform/utility_err.go +++ /dev/null @@ -1,56 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" -) - -type utilityErrConfig struct { - // Message is the error message to return. - Message string `json:"message"` - - ID string `json:"id"` -} - -func (c *utilityErrConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityErr(_ context.Context, cfg config.Config) (*utilityErr, error) { - conf := utilityErrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_err: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_err" - } - - tf := utilityErr{ - conf: conf, - } - - return &tf, nil -} - -type utilityErr struct { - conf utilityErrConfig -} - -func (tf *utilityErr) Transform(_ context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - return []*message.Message{msg}, fmt.Errorf("%s", tf.conf.Message) -} - -func (tf *utilityErr) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_metric_bytes.go b/v1/transform/utility_metric_bytes.go deleted file mode 100644 index 8d91224e..00000000 --- a/v1/transform/utility_metric_bytes.go +++ /dev/null @@ -1,77 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync/atomic" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" -) - -type utilityMetricBytesConfig struct { - Metric iconfig.Metric `json:"metric"` - - ID string `json:"id"` -} - -func (c *utilityMetricBytesConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityMetricBytes(ctx context.Context, cfg config.Config) (*utilityMetricBytes, error) { - // conf gets validated when calling metrics.New. - conf := utilityMetricBytesConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_metric_bytes: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_metric_bytes" - } - - m, err := metrics.New(ctx, conf.Metric.Destination) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := utilityMetricBytes{ - conf: conf, - metric: m, - } - - return &tf, nil -} - -type utilityMetricBytes struct { - conf utilityMetricBytesConfig - - metric metrics.Generator - bytes uint32 -} - -func (tf *utilityMetricBytes) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if err := tf.metric.Generate(ctx, metrics.Data{ - Name: tf.conf.Metric.Name, - Value: tf.bytes, - Attributes: tf.conf.Metric.Attributes, - }); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - atomic.StoreUint32(&tf.bytes, 0) - return []*message.Message{msg}, nil - } - - atomic.AddUint32(&tf.bytes, uint32(len(msg.Data()))) - return []*message.Message{msg}, nil -} - -func (tf *utilityMetricBytes) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_metric_count.go b/v1/transform/utility_metric_count.go deleted file mode 100644 index 1e283a30..00000000 --- a/v1/transform/utility_metric_count.go +++ /dev/null @@ -1,77 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync/atomic" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" -) - -type utilityMetricsCountConfig struct { - Metric iconfig.Metric `json:"metric"` - - ID string `json:"id"` -} - -func (c *utilityMetricsCountConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilityMetricCount(ctx context.Context, cfg config.Config) (*utilityMetricsCount, error) { - // conf gets validated when calling metrics.New. - conf := utilityMetricsCountConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_metric_count: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_metric_count" - } - - m, err := metrics.New(ctx, conf.Metric.Destination) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := utilityMetricsCount{ - conf: conf, - metric: m, - } - - return &tf, nil -} - -type utilityMetricsCount struct { - conf utilityMetricsCountConfig - - metric metrics.Generator - count uint32 -} - -func (tf *utilityMetricsCount) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - if err := tf.metric.Generate(ctx, metrics.Data{ - Name: tf.conf.Metric.Name, - Value: tf.count, - Attributes: tf.conf.Metric.Attributes, - }); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - atomic.StoreUint32(&tf.count, 0) - return []*message.Message{msg}, nil - } - - atomic.AddUint32(&tf.count, 1) - return []*message.Message{msg}, nil -} - -func (tf *utilityMetricsCount) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_metric_freshness.go b/v1/transform/utility_metric_freshness.go deleted file mode 100644 index 8ac4f088..00000000 --- a/v1/transform/utility_metric_freshness.go +++ /dev/null @@ -1,129 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync/atomic" - "time" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" -) - -type utilityMetricFreshnessConfig struct { - Threshold string `json:"threshold"` - Metric iconfig.Metric `json:"metric"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *utilityMetricFreshnessConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *utilityMetricFreshnessConfig) Validate() error { - if c.Threshold == "" { - return fmt.Errorf("threshold: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newUtilityMetricFreshness(ctx context.Context, cfg config.Config) (*utilityMetricFreshness, error) { - conf := utilityMetricFreshnessConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_metric_freshness: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_metric_freshness" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - m, err := metrics.New(ctx, conf.Metric.Destination) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - dur, err := time.ParseDuration(conf.Threshold) - if err != nil { - return nil, fmt.Errorf("transform %s: duration: %v", conf.ID, err) - } - - tf := utilityMetricFreshness{ - conf: conf, - metric: m, - dur: dur, - } - - return &tf, nil -} - -type utilityMetricFreshness struct { - conf utilityMetricFreshnessConfig - metric metrics.Generator - dur time.Duration - - success uint32 - failure uint32 -} - -func (tf *utilityMetricFreshness) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - // ctrl messages are handled by only one thread, so the map - // updates below are safe for concurrency. - if msg.IsControl() { - tf.conf.Metric.Attributes["FreshnessType"] = "Success" - if err := tf.metric.Generate(ctx, metrics.Data{ - Name: tf.conf.Metric.Name, - Value: tf.success, - Attributes: tf.conf.Metric.Attributes, - }); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - tf.conf.Metric.Attributes["FreshnessType"] = "Failure" - if err := tf.metric.Generate(ctx, metrics.Data{ - Name: tf.conf.Metric.Name, - Value: tf.failure, - Attributes: tf.conf.Metric.Attributes, - }); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - atomic.StoreUint32(&tf.success, 0) - atomic.StoreUint32(&tf.failure, 0) - return []*message.Message{msg}, nil - } - - // This is a time value expected to be in nanoseconds. - val := msg.GetValue(tf.conf.Object.SourceKey).Int() - if val == 0 { - return []*message.Message{msg}, nil - } - - ts := time.Unix(0, val) - if time.Since(ts) < tf.dur { - atomic.AddUint32(&tf.success, 1) - } else { - atomic.AddUint32(&tf.failure, 1) - } - - return []*message.Message{msg}, nil -} - -func (tf *utilityMetricFreshness) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v1/transform/utility_secret.go b/v1/transform/utility_secret.go deleted file mode 100644 index 60903322..00000000 --- a/v1/transform/utility_secret.go +++ /dev/null @@ -1,77 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" -) - -type utilitySecretConfig struct { - // Secret is the secret to retrieve. - Secret config.Config `json:"secret"` - - ID string `json:"id"` -} - -func (c *utilitySecretConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func newUtilitySecret(ctx context.Context, cfg config.Config) (*utilitySecret, error) { - // conf gets validated when calling secrets.New. - conf := utilitySecretConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform utility_secret: %v", err) - } - - if conf.ID == "" { - conf.ID = "utility_secret" - } - - ret, err := secrets.New(ctx, conf.Secret) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := utilitySecret{ - conf: conf, - secret: ret, - } - - if err := tf.secret.Retrieve(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - return &tf, nil -} - -type utilitySecret struct { - conf utilitySecretConfig - - // secret is safe for concurrent access. - secret secrets.Retriever -} - -func (tf *utilitySecret) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - if tf.secret.Expired() { - if err := tf.secret.Retrieve(ctx); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - return []*message.Message{msg}, nil -} - -func (tf *utilitySecret) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/v2/cmd/README.md b/v2/cmd/README.md deleted file mode 100644 index 0a29d514..00000000 --- a/v2/cmd/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# cmd - -This directory contains applications that run Substation. Applications are organized by their deployment target (e.g., AWS Lambda) and the source of the data they process (e.g., file, http). - -## aws/lambda/ - -Applications that run on AWS Lambda. - -## development/ - -Applications that are used for development. diff --git a/v2/cmd/aws/lambda/README.md b/v2/cmd/aws/lambda/README.md deleted file mode 100644 index bb6b913c..00000000 --- a/v2/cmd/aws/lambda/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# lambda - -Contains Substation apps deployed as AWS Lambda functions. All Lambda functions get their configurations from [AWS AppConfig](https://docs.aws.amazon.com/appconfig/latest/userguide/what-is-appconfig.html) or AWS S3. - -## substation - -This app handles ingest, transform, and load for data from these AWS services: -* [API Gateway](https://docs.aws.amazon.com/lambda/latest/dg/services-apigateway.html) -* [DynamoDB Streams](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) -* [Kinesis Data Firehose](https://docs.aws.amazon.com/lambda/latest/dg/services-kinesisfirehose.html) -* [Kinesis Data Streams](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) -* [Asynchronous Invocation (Lambda)](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html) -* [Synchronous Invocation (Lambda)](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) -* [S3](https://docs.aws.amazon.com/lambda/latest/dg/with-s3.html) -* [S3 via SNS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ways-to-add-notification-config-to-bucket.html) -* [SNS](https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html) -* [SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) - -## autoscale - -This app handles Kinesis Data Stream autoscaling through SNS notifications and CloudWatch alarms. Scaling is based on stream capacity as determined by the number and size of incoming records written to the stream. By default, the scaling behavior follows this pattern: - -* If stream utilization is greater than 70% of the Kinesis service limits consistently within a 5 minute period, then scale up -* If stream utilization is less than 35% of the Kinesis service limits consistently within a 60 minute period, then scale down - -The scaling behavior is customizable using environment variables: - -* `AUTOSCALE_KINESIS_THRESHOLD` - The target threshold to cause a scaling event. The default value is 0.7 (70%), but it can be set to any value between 0.4 (40%) and 0.9 (90%). If the threshold is low, then the stream is more sensitive to scaling up and less sensitive to scaling down. If the threshold is high, then the stream is less sensitive to scaling up and more sensitive to scaling down. -* `AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS` - The number of data points required to scale up. The default value is 5, but it can be set to any value between 1 and 30. The number of data points affects the evaluation period; every 5 data points is equivalent to 5 minutes and the maximum evaluation period is 30 minutes. Use a higher value to reduce the frequency of scaling up. -* `AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS` - The number of data points required to scale down. The default value is 60, but it can be set to any value between 1 and 360. The number of data points affects the evaluation period; every 60 data points is equivalent to 1 hour and the maximum evaluation period is 6 hours. Use a higher value to reduce the frequency of scaling down. - -Shards do not scale evenly, but the autoscaling follows [AWS best practices for resharding streams](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_UpdateShardCount.html). UpdateShardCount has many limitations that the application is designed around, but there may be times when these limits cannot be avoided; if any limits are met, then users should file a service limit increase with AWS. Although rare, the most common service limits that users may experience are: - -* Scaling a stream more than 10 times per 24 hour rolling period -* Scaling a stream beyond 10000 shards - -We recommend using one autoscaling Lambda for an entire Substation deployment, but many can be used if needed. For example, one can be assigned to data pipelines that have predictable traffic (e.g., steady stream utilization) and another can be assigned to data pipelines that have unpredictable traffic (e.g., sporadic stream utilization, bursty stream utilization). - -## validate - -This app handles checking if a configuration for the Substation app is valid without processing any data. It supports input from these methods: - -* [AppConfig Validator Lambda](https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/API_Validator.html) -* [Lambda Invocation](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) diff --git a/v2/cmd/development/kinesis-tap/substation/README.md b/v2/cmd/development/kinesis-tap/substation/README.md deleted file mode 100644 index bd33d8c8..00000000 --- a/v2/cmd/development/kinesis-tap/substation/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# kinesis-tap/substation - -`kinesis-tap` is a tool for tapping into and transforming data from an AWS Kinesis Data Stream in real-time with Substation. - -This is intended as a Substation development aid, but it has other uses as well, such as: -- Previewing live data in a stream by printing it to the console (default behavior) -- Sampling live data from a stream and saving it to a local file -- Forwarding live data between data pipeline stages for testing - -Warning: This is a development tool intended to provide temporary access to live data in a Kinesis Data Stream; if you need to process data from a Kinesis Data Stream with strict reliability guarantees, use the [AWS Lambda applications](/cmd/aws/lambda/). - -## Usage - -``` -% ./substation -h -Usage of ./substation: - -config string - The Substation configuration file used to transform records (default "./config.json") - -stream-name string - The AWS Kinesis Data Stream to fetch records from - -stream-offset string - Determines the offset of the stream (earliest, latest) (default "earliest") -``` - -Use the `SUBSTATION_DEBUG=1` environment variable to enable debug logging: -``` -% SUBSTATION_DEBUG=1 ./substation -stream-name my-stream -DEBU[0000] Retrieved active shards from Kinesis stream. count=2 stream=my-stream -DEBU[0001] Retrieved records from Kinesis shard. count=981 shard=0x140004a6f80 stream=my-stream -DEBU[0002] Retrieved records from Kinesis shard. count=1055 shard=0x140004a6fe0 stream=my-stream -DEBU[0003] Retrieved records from Kinesis shard. count=2333 shard=0x140004a6f80 stream=my-stream -DEBU[0003] Retrieved records from Kinesis shard. count=1110 shard=0x140004a6fe0 stream=my-stream -DEBU[0004] Retrieved records from Kinesis shard. count=2109 shard=0x140004a6f80 stream=my-stream -DEBU[0004] Retrieved records from Kinesis shard. count=1094 shard=0x140004a6fe0 stream=my-stream -^CDEBU[0004] Closed connections to the Kinesis stream. -DEBU[0004] Closed Substation pipeline. -DEBU[0004] Flushed Substation pipeline. -``` - -## Build - -Download, configure, and build the `kinesis-tap` binary with these commands: - -``` -git clone https://github.com/brexhq/substation.git && \ -cd substation/cmd/development/kinesis-tap/substation && \ -jsonnet config.jsonnet > config.json && \ -go build . -``` - -## Authentication - -`kinesis-tap` uses the AWS SDK for Go to authenticate with AWS. The SDK uses the same authentication methods as the AWS CLI, so you can use the same environment variables or configuration files to authenticate. - -For more information, see the [AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). diff --git a/v2/condition/README.md b/v2/condition/README.md deleted file mode 100644 index b9368a99..00000000 --- a/v2/condition/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# condition - -Contains interfaces and methods for evaluating data using success or failure criteria. Conditions combine inspectors (e.g. string equals "foo", string matches "^foo") and an operator (e.g., all, any) to check the state of data before applying other functions. diff --git a/v2/condition/network.go b/v2/condition/network.go deleted file mode 100644 index c1506c9a..00000000 --- a/v2/condition/network.go +++ /dev/null @@ -1,13 +0,0 @@ -package condition - -import ( - iconfig "github.com/brexhq/substation/internal/config" -) - -type networkIPConfig struct { - Object iconfig.Object `json:"object"` -} - -func (c *networkIPConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} diff --git a/v2/condition/number.go b/v2/condition/number.go deleted file mode 100644 index 979c171c..00000000 --- a/v2/condition/number.go +++ /dev/null @@ -1,62 +0,0 @@ -package condition - -import ( - "unicode/utf8" - - iconfig "github.com/brexhq/substation/internal/config" -) - -type numberBitwiseConfig struct { - // Value used for comparison during inspection. - Value int64 `json:"value"` - - Object iconfig.Object `json:"object"` -} - -func (c *numberBitwiseConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -type numberLengthConfig struct { - // Value used for comparison during inspection. - Value int `json:"value"` - // Measurement controls how the length is measured. The inspector automatically - // assigns measurement for objects when the key is an array. - // - // Must be one of: - // - // - byte: number of bytes - // - // - char: number of characters - // - // This is optional and defaults to byte. - Measurement string `json:"measurement"` - - Object iconfig.Object `json:"object"` -} - -func (c *numberLengthConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func numberLengthMeasurement(b []byte, measurement string) int { - switch measurement { - case "byte": - return len(b) - case "char", "rune": // rune is an alias for char - return utf8.RuneCount(b) - default: - return len(b) - } -} - -type numberConfig struct { - // Value used for comparison during inspection. - Value float64 `json:"value"` - - Object iconfig.Object `json:"object"` -} - -func (c *numberConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} diff --git a/v2/config/config.go b/v2/config/config.go deleted file mode 100644 index 743162bd..00000000 --- a/v2/config/config.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package config provides structures for building configurations. -package config - -// Config is a template used by Substation interface factories to produce new -// instances. Type refers to the type of instance and Settings contains options -// used in the instance. Examples of this are found in the condition and transforms -// packages. -type Config struct { - Type string `json:"type"` - Settings map[string]interface{} `json:"settings"` -} diff --git a/v2/examples/condition/meta/data.json b/v2/examples/condition/meta/data.json deleted file mode 100644 index 3fa4dcfc..00000000 --- a/v2/examples/condition/meta/data.json +++ /dev/null @@ -1 +0,0 @@ -["alice@brex.com","bob@brex.com"] diff --git a/v2/examples/condition/number/data.json b/v2/examples/condition/number/data.json deleted file mode 100644 index edfed892..00000000 --- a/v2/examples/condition/number/data.json +++ /dev/null @@ -1 +0,0 @@ -{"eventId":"123461","timestamp":"2024-07-29T10:00:00Z","sourceIP":"192.168.1.6","destinationIP":"172.16.0.7","sourcePort":"22","destinationPort":"22","protocol":"TCP","action":"ACCEPT","bytes":"20000"} diff --git a/v2/examples/condition/string/data.json b/v2/examples/condition/string/data.json deleted file mode 100644 index 3c5c76f3..00000000 --- a/v2/examples/condition/string/data.json +++ /dev/null @@ -1 +0,0 @@ -{"eventId":"123461","timestamp":"2024-07-29T10:00:00Z","sourceIP":"192.168.1.6","destinationIP":"172.16.0.7","sourcePort":"80","destinationPort":"443","protocol":"TCP","action":"ACCEPT","vpcId":"vpc-2b3c4d5e"} diff --git a/v2/examples/transform/aggregate/sample/data.jsonl b/v2/examples/transform/aggregate/sample/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/aggregate/sample/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/aggregate/summarize/data.jsonl b/v2/examples/transform/aggregate/summarize/data.jsonl deleted file mode 100644 index 9684c261..00000000 --- a/v2/examples/transform/aggregate/summarize/data.jsonl +++ /dev/null @@ -1,19 +0,0 @@ -{"client":"10.1.1.2","server":"8.8.8.8","bytes":11,"timestamp":1674429049} -{"client":"10.1.1.3","server":"8.8.4.4","bytes":20,"timestamp":1674429050} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":15,"timestamp":1674429051} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":8,"timestamp":1674429052} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":25,"timestamp":1674429053} -{"client":"10.1.1.4","server":"1.2.3.4","bytes":2400,"timestamp":1674429054} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":23,"timestamp":1674429055} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":12,"timestamp":1674429056} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":18,"timestamp":1674429057} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":6,"timestamp":1674429058} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":23,"timestamp":1674429059} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":12,"timestamp":1674429060} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":18,"timestamp":1674429061} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":6,"timestamp":1674429062} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":11,"timestamp":1674429063} -{"client":"10.1.1.3","server":"8.8.4.4","bytes":20,"timestamp":1674429064} -{"client":"10.1.1.2","server":"8.8.4.4","bytes":15,"timestamp":1674429065} -{"client":"10.1.1.3","server":"8.8.8.8","bytes":8,"timestamp":1674429066} -{"client":"10.1.1.2","server":"8.8.8.8","bytes":25,"timestamp":1674429067} diff --git a/v2/examples/transform/array/extend/data.json b/v2/examples/transform/array/extend/data.json deleted file mode 100644 index 9915a5f3..00000000 --- a/v2/examples/transform/array/extend/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2],"z":[3,4]} diff --git a/v2/examples/transform/array/flatten/data.json b/v2/examples/transform/array/flatten/data.json deleted file mode 100644 index 667836a7..00000000 --- a/v2/examples/transform/array/flatten/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,[3,4]]} diff --git a/v2/examples/transform/array/flatten_deep/data.json b/v2/examples/transform/array/flatten_deep/data.json deleted file mode 100644 index 852ff7c7..00000000 --- a/v2/examples/transform/array/flatten_deep/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,[3,4,[5,6]]]} diff --git a/v2/examples/transform/array/group/data.json b/v2/examples/transform/array/group/data.json deleted file mode 100644 index a0e2819a..00000000 --- a/v2/examples/transform/array/group/data.json +++ /dev/null @@ -1 +0,0 @@ -{"file_name":["foo.txt","bar.html"],"file_type":["text/plain","text/html"],"file_size":[100,500]} diff --git a/v2/examples/transform/enrich/http_secret/data.json b/v2/examples/transform/enrich/http_secret/data.json deleted file mode 100644 index 0967ef42..00000000 --- a/v2/examples/transform/enrich/http_secret/data.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/v2/examples/transform/enrich/kvstore_csv/data.jsonl b/v2/examples/transform/enrich/kvstore_csv/data.jsonl deleted file mode 100644 index 19a662e7..00000000 --- a/v2/examples/transform/enrich/kvstore_csv/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"product":"churro"} diff --git a/v2/examples/transform/enrich/kvstore_csv/kv.csv b/v2/examples/transform/enrich/kvstore_csv/kv.csv deleted file mode 100644 index d52fdd05..00000000 --- a/v2/examples/transform/enrich/kvstore_csv/kv.csv +++ /dev/null @@ -1,4 +0,0 @@ -product,price,calories -churro,9.99,500 -donut,1.99,300 -eclair,2.99,400 diff --git a/v2/examples/transform/enrich/kvstore_json/data.jsonl b/v2/examples/transform/enrich/kvstore_json/data.jsonl deleted file mode 100644 index 19a662e7..00000000 --- a/v2/examples/transform/enrich/kvstore_json/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"product":"churro"} diff --git a/v2/examples/transform/enrich/kvstore_set_add/data.jsonl b/v2/examples/transform/enrich/kvstore_set_add/data.jsonl deleted file mode 100644 index 5bf38854..00000000 --- a/v2/examples/transform/enrich/kvstore_set_add/data.jsonl +++ /dev/null @@ -1,6 +0,0 @@ -{"date": "2021-01-01","customer":"alice@brex.com","order":"pizza"} -{"date": "2021-01-01","customer":"bob@brex.com","order":"burger"} -{"date": "2021-01-03","customer":"bob@brex.com","order":"pizza"} -{"date": "2021-01-07","customer":"alice@brex.com","order":"pizza"} -{"date": "2021-01-07","customer":"bob@brex.com","order":"burger"} -{"date": "2021-01-13","customer":"alice@brex.com","order":"pizza"} diff --git a/v2/examples/transform/enrich/mmdb/data.jsonl b/v2/examples/transform/enrich/mmdb/data.jsonl deleted file mode 100644 index 6ea857b7..00000000 --- a/v2/examples/transform/enrich/mmdb/data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"ip":"1.1.1.1"} -{"ip":"8.8.8.8"} -{"ip":"9.9.9.9"} diff --git a/v2/examples/transform/enrich/urlscan/data.json b/v2/examples/transform/enrich/urlscan/data.json deleted file mode 100644 index 0315fbab..00000000 --- a/v2/examples/transform/enrich/urlscan/data.json +++ /dev/null @@ -1 +0,0 @@ -{"url":"https://www.brex.com/"} diff --git a/v2/examples/transform/format/zip/config.jsonnet b/v2/examples/transform/format/zip/config.jsonnet deleted file mode 100644 index 747fada5..00000000 --- a/v2/examples/transform/format/zip/config.jsonnet +++ /dev/null @@ -1,17 +0,0 @@ -// This example shows how to unzip a file and send the contents to stdout. -// Add the two data files in this directory to a Zip file and send it to -// Substation. You can use this command to create the Zip file: -// zip data.zip data.jsonl data.csv -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Unzip the file. The contents of each file in the Zip file are - // now messages in the pipeline (including EOL characters, if any). - sub.tf.format.from.zip(), - // Create individual messages from the contents of each file. - sub.tf.agg.from.string({ separator: '\n' }), - // Send the messages to stdout. - sub.tf.send.stdout(), - ], -} diff --git a/v2/examples/transform/format/zip/data.csv b/v2/examples/transform/format/zip/data.csv deleted file mode 100644 index d6e746e3..00000000 --- a/v2/examples/transform/format/zip/data.csv +++ /dev/null @@ -1,3 +0,0 @@ -foo,bar -baz,qux -quux,corge \ No newline at end of file diff --git a/v2/examples/transform/format/zip/data.jsonl b/v2/examples/transform/format/zip/data.jsonl deleted file mode 100644 index b7519d55..00000000 --- a/v2/examples/transform/format/zip/data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"foo":"bar"} -{"baz":"qux"} -{"quux":"corge"} \ No newline at end of file diff --git a/v2/examples/transform/meta/crash_program/data.json b/v2/examples/transform/meta/crash_program/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v2/examples/transform/meta/crash_program/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v2/examples/transform/meta/each_in_array/data.json b/v2/examples/transform/meta/each_in_array/data.json deleted file mode 100644 index b3c96393..00000000 --- a/v2/examples/transform/meta/each_in_array/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[{"b":1,"c":2},{"b":3,"c":4}]} diff --git a/v2/examples/transform/meta/exactly_once_consumer/data.jsonl b/v2/examples/transform/meta/exactly_once_consumer/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v2/examples/transform/meta/exactly_once_consumer/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v2/examples/transform/meta/exactly_once_producer/data.jsonl b/v2/examples/transform/meta/exactly_once_producer/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v2/examples/transform/meta/exactly_once_producer/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v2/examples/transform/meta/exactly_once_system/data.jsonl b/v2/examples/transform/meta/exactly_once_system/data.jsonl deleted file mode 100644 index 864e8bfd..00000000 --- a/v2/examples/transform/meta/exactly_once_system/data.jsonl +++ /dev/null @@ -1,8 +0,0 @@ -{"a":"b"} -{"a":"b"} -{"c":"d"} -{"a":"b"} -{"c":"d"} -{"c":"d"} -{"e":"f"} -{"a":"b"} diff --git a/v2/examples/transform/meta/execution_time/data.json b/v2/examples/transform/meta/execution_time/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v2/examples/transform/meta/execution_time/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v2/examples/transform/meta/retry_with_backoff/data.json b/v2/examples/transform/meta/retry_with_backoff/data.json deleted file mode 100644 index b6e81411..00000000 --- a/v2/examples/transform/meta/retry_with_backoff/data.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"b"} diff --git a/v2/examples/transform/number/clamp/data.txt b/v2/examples/transform/number/clamp/data.txt deleted file mode 100644 index 7779e6cb..00000000 --- a/v2/examples/transform/number/clamp/data.txt +++ /dev/null @@ -1,3 +0,0 @@ --1 -101 -50 diff --git a/v2/examples/transform/number/clamp/stdout.txt b/v2/examples/transform/number/clamp/stdout.txt deleted file mode 100644 index e7e321ee..00000000 --- a/v2/examples/transform/number/clamp/stdout.txt +++ /dev/null @@ -1,3 +0,0 @@ -100 -0 -50 diff --git a/v2/examples/transform/number/max/data.txt b/v2/examples/transform/number/max/data.txt deleted file mode 100644 index a5c891fc..00000000 --- a/v2/examples/transform/number/max/data.txt +++ /dev/null @@ -1,4 +0,0 @@ -0 --1 --1.1 -10 diff --git a/v2/examples/transform/number/max/stdout.txt b/v2/examples/transform/number/max/stdout.txt deleted file mode 100644 index 2360ec2f..00000000 --- a/v2/examples/transform/number/max/stdout.txt +++ /dev/null @@ -1,4 +0,0 @@ -0 -0 -0 -10 diff --git a/v2/examples/transform/number/min/data.txt b/v2/examples/transform/number/min/data.txt deleted file mode 100644 index a5c891fc..00000000 --- a/v2/examples/transform/number/min/data.txt +++ /dev/null @@ -1,4 +0,0 @@ -0 --1 --1.1 -10 diff --git a/v2/examples/transform/number/min/stdout.txt b/v2/examples/transform/number/min/stdout.txt deleted file mode 100644 index f27880a1..00000000 --- a/v2/examples/transform/number/min/stdout.txt +++ /dev/null @@ -1,4 +0,0 @@ -0 --1 --1.1 -0 diff --git a/v2/examples/transform/send/aux_transforms/data.jsonl b/v2/examples/transform/send/aux_transforms/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/send/aux_transforms/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/send/batch/data.jsonl b/v2/examples/transform/send/batch/data.jsonl deleted file mode 100644 index 40196852..00000000 --- a/v2/examples/transform/send/batch/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b","group_id":1} -{"c":"d","group_id":2} -{"e":"f","group_id":1} -{"g":"h","group_id":2} -{"i":"j","group_id":1} -{"k":"l","group_id":2} -{"m":"n","group_id":1} -{"o":"p","group_id":2} -{"q":"r","group_id":1} -{"s":"t","group_id":2} -{"u":"v","group_id":1} -{"w":"x","group_id":2} -{"y":"z","group_id":1} diff --git a/v2/examples/transform/send/datadog/data.jsonl b/v2/examples/transform/send/datadog/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/send/datadog/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/send/splunk/data.jsonl b/v2/examples/transform/send/splunk/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/send/splunk/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/send/sumologic/data.jsonl b/v2/examples/transform/send/sumologic/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/send/sumologic/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/time/str_conversion/data.json b/v2/examples/transform/time/str_conversion/data.json deleted file mode 100644 index 86e52d7c..00000000 --- a/v2/examples/transform/time/str_conversion/data.json +++ /dev/null @@ -1 +0,0 @@ -{"time":"2024-01-01T01:02:03.123Z"} diff --git a/v2/examples/transform/utility/generate_ctrl/data.jsonl b/v2/examples/transform/utility/generate_ctrl/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/utility/generate_ctrl/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/utility/message_bytes/data.jsonl b/v2/examples/transform/utility/message_bytes/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/utility/message_bytes/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/utility/message_count/data.jsonl b/v2/examples/transform/utility/message_count/data.jsonl deleted file mode 100644 index d101df01..00000000 --- a/v2/examples/transform/utility/message_count/data.jsonl +++ /dev/null @@ -1,13 +0,0 @@ -{"a":"b"} -{"c":"d"} -{"e":"f"} -{"g":"h"} -{"i":"j"} -{"k":"l"} -{"m":"n"} -{"o":"p"} -{"q":"r"} -{"s":"t"} -{"u":"v"} -{"w":"x"} -{"y":"z"} diff --git a/v2/examples/transform/utility/message_freshness/config.jsonnet b/v2/examples/transform/utility/message_freshness/config.jsonnet deleted file mode 100644 index 3ce8c5f5..00000000 --- a/v2/examples/transform/utility/message_freshness/config.jsonnet +++ /dev/null @@ -1,24 +0,0 @@ -// This example shows how to use the `utility_metric_freshness` transform to -// determine if data was processed by the system within a certain time frame. -// -// Freshness is calculated by comparing a time value in the message to the current -// time and determining if the difference is less than a threshold: -// - Success: current time - timestamp < threshold -// - Failure: current time - timestamp >= threshold -// -// The transform emits two metrics that describe success and failure, annotated -// in the `FreshnessType` attribute. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local attr = { AppName: 'example' }; -local dest = { type: 'aws_cloudwatch_embedded_metrics' }; - -{ - transforms: [ - sub.transform.utility.metric.freshness({ - threshold: '5s', // Amount of time spent in the system before considered stale. - object: { source_key: 'timestamp' }, // Used as the reference to determine freshness. - metric: { name: 'MessageFreshness', attributes: attr, destination: dest }, - }), - ], -} diff --git a/v2/examples/transform/utility/message_freshness/data.jsonl b/v2/examples/transform/utility/message_freshness/data.jsonl deleted file mode 100644 index 7ba48645..00000000 --- a/v2/examples/transform/utility/message_freshness/data.jsonl +++ /dev/null @@ -1 +0,0 @@ -{"timestamp":1724299266000000000} diff --git a/v2/internal/README.md b/v2/internal/README.md deleted file mode 100644 index 11ea7d7c..00000000 --- a/v2/internal/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# internal -All non-public source code is stored in this folder. Each package has its own README as needed. diff --git a/v2/internal/aggregate/aggregate.go b/v2/internal/aggregate/aggregate.go deleted file mode 100644 index 1c638ac2..00000000 --- a/v2/internal/aggregate/aggregate.go +++ /dev/null @@ -1,160 +0,0 @@ -package aggregate - -import ( - "time" -) - -const ( - defaultCount = 1000 - defaultSize = 1024 * 1024 // 1MB - defaultDuration = "1m" -) - -type Config struct { - Count int `json:"count"` - Size int `json:"size"` - Duration string `json:"duration"` -} - -type aggregate struct { - maxCount int - count int - - maxSize int - size int - - maxDuration time.Duration - now time.Time - - items [][]byte -} - -func (a *aggregate) Reset() { - a.count = 0 - a.size = 0 - a.now = time.Now() - - a.items = a.items[:0] -} - -func (a *aggregate) Add(data []byte) bool { - newCount := a.count + 1 - if newCount > a.maxCount { - return false - } - - newSize := a.size + len(data) - if newSize > a.maxSize { - return false - } - - if time.Since(a.now) > a.maxDuration { - return false - } - - a.now = time.Now() - a.count = newCount - a.size = newSize - a.items = append(a.items, data) - - return true -} - -func (a *aggregate) Get() [][]byte { - return a.items -} - -func (a *aggregate) Count() int { - return a.count -} - -func (a *aggregate) Size() int { - return a.size -} - -func New(cfg Config) (*Aggregate, error) { - if cfg.Count < 1 { - cfg.Count = defaultCount - } - - if cfg.Size < 1 { - cfg.Size = defaultSize - } - - if cfg.Duration == "" { - cfg.Duration = defaultDuration - } - - dur, err := time.ParseDuration(cfg.Duration) - if err != nil { - return nil, err - } - - return &Aggregate{ - maxCount: cfg.Count, - maxSize: cfg.Size, - maxDuration: dur, - aggs: make(map[string]*aggregate), - }, nil -} - -type Aggregate struct { - maxCount int - maxSize int - maxDuration time.Duration - - aggs map[string]*aggregate -} - -func (m *Aggregate) Add(key string, data []byte) bool { - agg, ok := m.aggs[key] - if !ok { - agg = &aggregate{ - maxCount: m.maxCount, - maxSize: m.maxSize, - maxDuration: m.maxDuration, - } - - agg.Reset() - m.aggs[key] = agg - } - - return agg.Add(data) -} - -func (m *Aggregate) Count(key string) int { - agg, ok := m.aggs[key] - if !ok { - return 0 - } - - return agg.Count() -} - -func (m *Aggregate) Get(key string) [][]byte { - agg, ok := m.aggs[key] - if !ok { - return nil - } - - return agg.Get() -} - -func (m *Aggregate) GetAll() map[string]*aggregate { - return m.aggs -} - -func (m *Aggregate) Reset(key string) { - agg, ok := m.aggs[key] - if !ok { - return - } - - agg.Reset() -} - -func (m *Aggregate) ResetAll() { - for _, agg := range m.aggs { - agg.Reset() - } -} diff --git a/v2/internal/aws/README.md b/v2/internal/aws/README.md deleted file mode 100644 index c6071b51..00000000 --- a/v2/internal/aws/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# aws - -Contains functions for managing AWS API calls. Substation follows these rules across every application: -* AWS clients are configured using [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) -* AWS clients use service interface APIs (e.g., s3iface, kinesisiface, etc.) -* AWS clients enable [X-Ray](https://aws.amazon.com/xray/) for tracing if a [daemon address](https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-envvars) is found diff --git a/v2/internal/aws/config_v2.go b/v2/internal/aws/config_v2.go deleted file mode 100644 index 62634a97..00000000 --- a/v2/internal/aws/config_v2.go +++ /dev/null @@ -1,83 +0,0 @@ -package aws - -import ( - "context" - "os" - "regexp" - "strconv" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-xray-sdk-go/instrumentation/awsv2" -) - -// NewV2 returns an SDK v2 configuration. -func NewV2(ctx context.Context, cfg Config) (aws.Config, error) { - var region string - if cfg.Region != "" { - region = cfg.Region - } else if v, ok := os.LookupEnv("AWS_REGION"); ok { - region = v - } else if v, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { - region = v - } - - var creds aws.CredentialsProvider // nil is a valid default. - if cfg.RoleARN != "" { - conf, err := config.LoadDefaultConfig(ctx, - config.WithRegion(region), - ) - if err != nil { - return aws.Config{}, err - } - - stsSvc := sts.NewFromConfig(conf) - creds = stscreds.NewAssumeRoleProvider(stsSvc, cfg.RoleARN) - } - - maxRetry := 3 // Matches the standard retryer. - if cfg.MaxRetries != 0 { - maxRetry = cfg.MaxRetries - } else if v, ok := os.LookupEnv("AWS_MAX_ATTEMPTS"); ok { - max, err := strconv.Atoi(v) - if err != nil { - return aws.Config{}, err - } - - maxRetry = max - } - - errMsg := make([]*regexp.Regexp, len(cfg.RetryableErrors)) - for i, err := range cfg.RetryableErrors { - errMsg[i] = regexp.MustCompile(err) - } - - conf, err := config.LoadDefaultConfig(ctx, - config.WithRegion(region), - config.WithCredentialsProvider(creds), - config.WithRetryer(func() aws.Retryer { - return retry.NewStandard(func(o *retry.StandardOptions) { - o.MaxAttempts = maxRetry - // Additional retryable errors ~must be appended~ to not overwrite the defaults. - o.Retryables = append(o.Retryables, retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - for _, msg := range errMsg { - if msg.MatchString(err.Error()) { - return aws.TrueTernary - } - } - - return aws.FalseTernary - })) - }) - }), - ) - - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - awsv2.AWSV2Instrumentor(&conf.APIOptions) - } - - return conf, err -} diff --git a/v2/internal/aws/dynamodb/dynamodb_test.go b/v2/internal/aws/dynamodb/dynamodb_test.go deleted file mode 100644 index 6003b073..00000000 --- a/v2/internal/aws/dynamodb/dynamodb_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package dynamodb - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" -) - -type mockedGetItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.GetItemOutput -} - -func (m mockedGetItem) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInput, opts ...request.Option) (*dynamodb.GetItemOutput, error) { - return &m.Resp, nil -} - -func TestGetItem(t *testing.T) { - tests := []struct { - resp dynamodb.GetItemOutput - expected string - }{ - { - resp: dynamodb.GetItemOutput{ - Item: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - ConsumedCapacity: &dynamodb.ConsumedCapacity{}, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetItem{Resp: test.resp}, - } - - m := make(map[string]interface{}) - resp, err := a.GetItem(ctx, "", m, false) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.Item, &item) - if err != nil { - t.Fatalf("%v, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedBatchPutItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.BatchWriteItemOutput -} - -func (m mockedBatchPutItem) BatchWriteItemWithContext(ctx aws.Context, input *dynamodb.BatchWriteItemInput, opts ...request.Option) (*dynamodb.BatchWriteItemOutput, error) { - return &m.Resp, nil -} - -func TestBatchPutItem(t *testing.T) { - tests := []struct { - resp dynamodb.BatchWriteItemOutput - expected string - }{ - { - resp: dynamodb.BatchWriteItemOutput{ - ItemCollectionMetrics: map[string][]*dynamodb.ItemCollectionMetrics{ - "table": { - { - ItemCollectionKey: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - }, - }, - }, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedBatchPutItem{Resp: test.resp}, - } - - resp, err := a.BatchPutItem(ctx, "", []map[string]*dynamodb.AttributeValue{}) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.ItemCollectionMetrics["table"][0].ItemCollectionKey, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedPutItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.PutItemOutput -} - -func (m mockedPutItem) PutItemWithContext(ctx aws.Context, input *dynamodb.PutItemInput, opts ...request.Option) (*dynamodb.PutItemOutput, error) { - return &m.Resp, nil -} - -func TestPutItem(t *testing.T) { - tests := []struct { - resp dynamodb.PutItemOutput - expected string - }{ - { - resp: dynamodb.PutItemOutput{ - Attributes: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - ConsumedCapacity: &dynamodb.ConsumedCapacity{}, - ItemCollectionMetrics: &dynamodb.ItemCollectionMetrics{}, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutItem{Resp: test.resp}, - } - - resp, err := a.PutItem(ctx, "", map[string]*dynamodb.AttributeValue{}) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.Attributes, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedQuery struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.QueryOutput -} - -func (m mockedQuery) QueryWithContext(ctx aws.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error) { - return &m.Resp, nil -} - -func TestQuery(t *testing.T) { - tests := []struct { - resp dynamodb.QueryOutput - expected string - }{ - { - resp: dynamodb.QueryOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "foo": { - S: aws.String("bar"), - }, - }, - }, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedQuery{Resp: test.resp}, - } - - resp, err := a.Query(ctx, "", "", "", "", 0, true) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var items []map[string]interface{} - for _, i := range resp.Items { - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(i, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - items = append(items, item) - } - - if items[0]["foo"] != test.expected { - t.Errorf("expected %+v, got %s", items[0]["foo"], test.expected) - } - } -} diff --git a/v2/internal/aws/firehose/firehose_test.go b/v2/internal/aws/firehose/firehose_test.go deleted file mode 100644 index ca40cca7..00000000 --- a/v2/internal/aws/firehose/firehose_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package firehose - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" -) - -type mockedPutRecord struct { - firehoseiface.FirehoseAPI - Resp firehose.PutRecordOutput -} - -func (m mockedPutRecord) PutRecordWithContext(ctx aws.Context, in *firehose.PutRecordInput, opts ...request.Option) (*firehose.PutRecordOutput, error) { - return &m.Resp, nil -} - -func TestPutRecord(t *testing.T) { - tests := []struct { - resp firehose.PutRecordOutput - expected string - }{ - { - resp: firehose.PutRecordOutput{ - Encrypted: aws.Bool(true), - RecordId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecord{Resp: test.resp}, - } - resp, err := a.PutRecord(ctx, []byte{}, "") - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.RecordId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.RecordId) - } - } -} - -type mockedPutRecordBatch struct { - firehoseiface.FirehoseAPI - Resp firehose.PutRecordBatchOutput -} - -func (m mockedPutRecordBatch) PutRecordBatchWithContext(ctx aws.Context, in *firehose.PutRecordBatchInput, opts ...request.Option) (*firehose.PutRecordBatchOutput, error) { - return &m.Resp, nil -} - -func TestPutRecordBatch(t *testing.T) { - tests := []struct { - resp firehose.PutRecordBatchOutput - expected []string - }{ - { - resp: firehose.PutRecordBatchOutput{ - Encrypted: aws.Bool(true), - FailedPutCount: aws.Int64(0), - RequestResponses: []*firehose.PutRecordBatchResponseEntry{ - { - RecordId: aws.String("foo"), - }, - { - RecordId: aws.String("bar"), - }, - { - RecordId: aws.String("baz"), - }, - }, - }, - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecordBatch{Resp: test.resp}, - } - - resp, err := a.PutRecordBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.RequestResponses { - if *resp.RecordId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.RecordId) - } - } - } -} diff --git a/v2/internal/aws/kinesis/kinesis_test.go b/v2/internal/aws/kinesis/kinesis_test.go deleted file mode 100644 index 8119b7a0..00000000 --- a/v2/internal/aws/kinesis/kinesis_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package kinesis - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" -) - -type mockedPutRecords struct { - kinesisiface.KinesisAPI - Resp kinesis.PutRecordsOutput -} - -func (m mockedPutRecords) PutRecordsWithContext(ctx aws.Context, in *kinesis.PutRecordsInput, opts ...request.Option) (*kinesis.PutRecordsOutput, error) { - return &m.Resp, nil -} - -func TestPutRecords(t *testing.T) { - tests := []struct { - resp kinesis.PutRecordsOutput - expected string - }{ - { - resp: kinesis.PutRecordsOutput{ - EncryptionType: aws.String("NONE"), - Records: []*kinesis.PutRecordsResultEntry{ - { - ErrorCode: aws.String(""), - ErrorMessage: aws.String(""), - SequenceNumber: aws.String("ABCDEF"), - ShardId: aws.String("XYZ"), - }, - }, - }, - expected: "ABCDEF", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecords{Resp: test.resp}, - } - - b := [][]byte{ - []byte(""), - } - resp, err := a.PutRecords(ctx, "", "", b) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.Records[0].SequenceNumber != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.Records[0].SequenceNumber) - } - } -} - -type mockedGetTags struct { - kinesisiface.KinesisAPI - Resp kinesis.ListTagsForStreamOutput -} - -func (m mockedGetTags) ListTagsForStreamWithContext(ctx aws.Context, in *kinesis.ListTagsForStreamInput, opts ...request.Option) (*kinesis.ListTagsForStreamOutput, error) { - return &m.Resp, nil -} - -func TestGetTags(t *testing.T) { - tests := []struct { - resp kinesis.ListTagsForStreamOutput - expected []*kinesis.Tag - }{ - { - resp: kinesis.ListTagsForStreamOutput{ - Tags: []*kinesis.Tag{ - { - Key: aws.String("foo"), - Value: aws.String("bar"), - }, - { - Key: aws.String("baz"), - Value: aws.String("qux"), - }, - }, - // can't test recursion via this style of mock - HasMoreTags: aws.Bool(false), - }, - expected: []*kinesis.Tag{ - { - Key: aws.String("foo"), - Value: aws.String("bar"), - }, - { - Key: aws.String("baz"), - Value: aws.String("qux"), - }, - }, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetTags{Resp: test.resp}, - } - tags, err := a.GetTags(ctx, "") - if err != nil { - t.Fatalf("%v", err) - } - - for idx, test := range test.expected { - tag := tags[idx] - if *tag.Key != *test.Key { - t.Logf("expected %s, got %s", *test.Key, *tag.Key) - t.Fail() - } - - if *tag.Value != *test.Value { - t.Logf("expected %s, got %s", *test.Value, *tag.Value) - t.Fail() - } - } - } -} diff --git a/v2/internal/aws/lambda/lambda_test.go b/v2/internal/aws/lambda/lambda_test.go deleted file mode 100644 index d0ddf985..00000000 --- a/v2/internal/aws/lambda/lambda_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package lambda - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" -) - -type mockedInvoke struct { - lambdaiface.LambdaAPI - Resp lambda.InvokeOutput -} - -func (m mockedInvoke) InvokeWithContext(ctx aws.Context, input *lambda.InvokeInput, opts ...request.Option) (*lambda.InvokeOutput, error) { - return &m.Resp, nil -} - -func TestInvoke(t *testing.T) { - tests := []struct { - resp lambda.InvokeOutput - expected int64 - }{ - { - resp: lambda.InvokeOutput{ - StatusCode: aws.Int64(200), - }, - expected: 200, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedInvoke{Resp: test.resp}, - } - - resp, err := a.Invoke(ctx, "", nil) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if *resp.StatusCode != test.expected { - t.Errorf("expected %+v, got %d", resp.Payload, test.expected) - } - } -} - -func TestInvokeAsync(t *testing.T) { - tests := []struct { - resp lambda.InvokeOutput - expected int64 - }{ - { - resp: lambda.InvokeOutput{ - StatusCode: aws.Int64(202), - }, - expected: 202, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedInvoke{Resp: test.resp}, - } - - resp, err := a.Invoke(ctx, "", nil) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if *resp.StatusCode != test.expected { - t.Errorf("expected %+v, got %d", resp.Payload, test.expected) - } - } -} diff --git a/v2/internal/aws/s3manager/s3manager_test.go b/v2/internal/aws/s3manager/s3manager_test.go deleted file mode 100644 index 5ae62b43..00000000 --- a/v2/internal/aws/s3manager/s3manager_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package s3manager - -import ( - "context" - "io" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" -) - -type mockedDownload struct { - s3manageriface.DownloaderAPI - Resp int64 -} - -func (m mockedDownload) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (int64, error) { - return m.Resp, nil -} - -func TestDownload(t *testing.T) { - tests := []struct { - resp int64 - input struct { - bucket string - key string - } - expected int64 - }{ - { - resp: 1, - input: struct { - bucket string - key string - }{ - bucket: "foo", - key: "bar", - }, - expected: 1, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := DownloaderAPI{ - mockedDownload{Resp: test.resp}, - } - - var dst io.WriterAt - size, err := a.Download(ctx, test.input.bucket, test.input.key, dst) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if size != test.expected { - t.Errorf("expected %d, got %d", size, test.expected) - } - } -} - -type mockedUpload struct { - s3manageriface.UploaderAPI - Resp s3manager.UploadOutput -} - -func (m mockedUpload) UploadWithContext(ctx aws.Context, input *s3manager.UploadInput, options ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { - return &m.Resp, nil -} - -func TestUpload(t *testing.T) { - tests := []struct { - resp s3manager.UploadOutput - input struct { - buffer []byte - bucket string - key string - } - expected string - }{ - { - resp: s3manager.UploadOutput{ - Location: "foo", - }, - input: struct { - buffer []byte - bucket string - key string - }{ - buffer: []byte("foo"), - bucket: "bar", - key: "baz", - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := UploaderAPI{ - mockedUpload{Resp: test.resp}, - } - - src := strings.NewReader("foo") - resp, err := a.Upload(ctx, test.input.bucket, test.input.key, "", src) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if resp.Location != test.expected { - t.Errorf("expected %s, got %s", resp.Location, test.expected) - } - } -} diff --git a/v2/internal/aws/secretsmanager/secretsmanager_test.go b/v2/internal/aws/secretsmanager/secretsmanager_test.go deleted file mode 100644 index 587bbdad..00000000 --- a/v2/internal/aws/secretsmanager/secretsmanager_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package secretsmanager - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" -) - -type mockedGetSecret struct { - secretsmanageriface.SecretsManagerAPI - Resp secretsmanager.GetSecretValueOutput -} - -func (m mockedGetSecret) GetSecretValueWithContext(ctx aws.Context, input *secretsmanager.GetSecretValueInput, opts ...request.Option) (*secretsmanager.GetSecretValueOutput, error) { - return &m.Resp, nil -} - -func TestGetSecret(t *testing.T) { - tests := []struct { - resp secretsmanager.GetSecretValueOutput - input string - expected string - }{ - { - resp: secretsmanager.GetSecretValueOutput{ - SecretString: aws.String("foo"), - }, - input: "fooer", - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetSecret{Resp: test.resp}, - } - - resp, err := a.GetSecret(ctx, test.input) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if resp != test.expected { - t.Errorf("expected %+v, got %s", resp, test.expected) - } - } -} diff --git a/v2/internal/aws/sns/sns_test.go b/v2/internal/aws/sns/sns_test.go deleted file mode 100644 index 3e2f69d8..00000000 --- a/v2/internal/aws/sns/sns_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package sns - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/aws/aws-sdk-go/service/sns/snsiface" -) - -type mockedPublish struct { - snsiface.SNSAPI - Resp sns.PublishOutput -} - -func (m mockedPublish) PublishWithContext(ctx aws.Context, in *sns.PublishInput, opts ...request.Option) (*sns.PublishOutput, error) { - return &m.Resp, nil -} - -func TestPublish(t *testing.T) { - tests := []struct { - resp sns.PublishOutput - expected string - }{ - { - resp: sns.PublishOutput{ - MessageId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPublish{Resp: test.resp}, - } - - resp, err := a.Publish(ctx, "", []byte("")) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.MessageId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.MessageId) - } - } -} - -type mockedPublishBatch struct { - snsiface.SNSAPI - Resp sns.PublishBatchOutput -} - -func (m mockedPublishBatch) PublishBatchWithContext(ctx aws.Context, in *sns.PublishBatchInput, opts ...request.Option) (*sns.PublishBatchOutput, error) { - return &m.Resp, nil -} - -func TestPublishBatch(t *testing.T) { - tests := []struct { - resp sns.PublishBatchOutput - expected []string - }{ - { - resp: sns.PublishBatchOutput{ - Successful: []*sns.PublishBatchResultEntry{ - { - MessageId: aws.String("foo"), - }, - { - MessageId: aws.String("bar"), - }, - { - MessageId: aws.String("baz"), - }, - }, - }, - - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPublishBatch{Resp: test.resp}, - } - - resp, err := a.PublishBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.Successful { - if *resp.MessageId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.MessageId) - } - } - } -} diff --git a/v2/internal/aws/sqs/sqs_test.go b/v2/internal/aws/sqs/sqs_test.go deleted file mode 100644 index 297232ae..00000000 --- a/v2/internal/aws/sqs/sqs_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package sqs - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/aws/aws-sdk-go/service/sqs/sqsiface" -) - -type mockedSendMessage struct { - sqsiface.SQSAPI - Resp sqs.SendMessageOutput -} - -func (m mockedSendMessage) SendMessageWithContext(ctx aws.Context, in *sqs.SendMessageInput, opts ...request.Option) (*sqs.SendMessageOutput, error) { - return &m.Resp, nil -} - -func TestSendMessage(t *testing.T) { - tests := []struct { - resp sqs.SendMessageOutput - expected string - }{ - { - resp: sqs.SendMessageOutput{ - MessageId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedSendMessage{Resp: test.resp}, - } - - resp, err := a.SendMessage(ctx, "", []byte("")) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.MessageId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.MessageId) - } - } -} - -type mockedSendMessageBatch struct { - sqsiface.SQSAPI - Resp sqs.SendMessageBatchOutput -} - -func (m mockedSendMessageBatch) SendMessageBatchWithContext(ctx aws.Context, in *sqs.SendMessageBatchInput, opts ...request.Option) (*sqs.SendMessageBatchOutput, error) { - return &m.Resp, nil -} - -func TestSendMessageBatch(t *testing.T) { - tests := []struct { - resp sqs.SendMessageBatchOutput - expected []string - }{ - { - resp: sqs.SendMessageBatchOutput{ - Successful: []*sqs.SendMessageBatchResultEntry{ - { - MessageId: aws.String("foo"), - }, - { - MessageId: aws.String("bar"), - }, - { - MessageId: aws.String("baz"), - }, - }, - }, - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedSendMessageBatch{Resp: test.resp}, - } - - resp, err := a.SendMessageBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.Successful { - if *resp.MessageId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.MessageId) - } - } - } -} diff --git a/v2/internal/base64/base64.go b/v2/internal/base64/base64.go deleted file mode 100644 index 79e8538c..00000000 --- a/v2/internal/base64/base64.go +++ /dev/null @@ -1,25 +0,0 @@ -package base64 - -import ( - "encoding/base64" - "fmt" -) - -// Decode is a convenience wrapper for base64 decoding bytes. -func Decode(b []byte) ([]byte, error) { - decode := make([]byte, base64.StdEncoding.DecodedLen(len(b))) - n, err := base64.StdEncoding.Decode(decode, b) - if err != nil { - return nil, fmt.Errorf("decode: %v", err) - } - - return decode[:n], nil -} - -// Encode is a convenience wrapper for base64 encoding bytes. -func Encode(b []byte) []byte { - encode := make([]byte, base64.StdEncoding.EncodedLen(len(b))) - base64.StdEncoding.Encode(encode, b) - - return encode -} diff --git a/v2/internal/base64/base64_test.go b/v2/internal/base64/base64_test.go deleted file mode 100644 index bbd1b193..00000000 --- a/v2/internal/base64/base64_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package base64 - -import ( - "bytes" - "testing" -) - -var decodeTests = []struct { - name string - test []byte - expected []byte -}{ - { - name: "foo", - test: []byte(`Zm9v`), - expected: []byte(`foo`), - }, - { - name: "zlib", - test: []byte(`eJwFwCENAAAAgLC22Pd3LAYCggFF`), - expected: []byte{120, 156, 5, 192, 33, 13, 0, 0, 0, 128, 176, 182, 216, 247, 119, 44, 6, 2, 130, 1, 69}, - }, -} - -func TestBase64Decode(t *testing.T) { - for _, test := range decodeTests { - result, err := Decode(test.test) - if err != nil { - t.Errorf("got error %v", err) - return - } - - if c := bytes.Compare(result, test.expected); c != 0 { - t.Errorf("expected %s, got %s", test.expected, result) - } - } -} - -func benchmarkBase64Decode(b *testing.B, test []byte) { - for i := 0; i < b.N; i++ { - _, _ = Decode(test) - } -} - -func BenchmarkBase64Decode(b *testing.B) { - for _, test := range encodeTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkBase64Decode(b, test.test) - }, - ) - } -} - -var encodeTests = []struct { - name string - test []byte - expected []byte -}{ - { - name: "foo", - test: []byte(`foo`), - expected: []byte(`Zm9v`), - }, - { - name: "zlib", - test: []byte{120, 156, 5, 192, 33, 13, 0, 0, 0, 128, 176, 182, 216, 247, 119, 44, 6, 2, 130, 1, 69}, - expected: []byte(`eJwFwCENAAAAgLC22Pd3LAYCggFF`), - }, -} - -func TestBase64Encode(t *testing.T) { - for _, test := range encodeTests { - result := Encode(test.test) - - if c := bytes.Compare(result, test.expected); c != 0 { - t.Errorf("expected %s, got %s", test.expected, result) - } - } -} - -func benchmarkBase64Encode(b *testing.B, test []byte) { - for i := 0; i < b.N; i++ { - Encode(test) - } -} - -func BenchmarkBase64Encode(b *testing.B) { - for _, test := range encodeTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkBase64Encode(b, test.test) - }, - ) - } -} diff --git a/v2/internal/bufio/bufio_test.go b/v2/internal/bufio/bufio_test.go deleted file mode 100644 index fce8d82f..00000000 --- a/v2/internal/bufio/bufio_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package bufio - -import ( - "os" - "testing" -) - -func benchmarkScannerReadFile(b *testing.B, s *scanner, file *os.File) { - for i := 0; i < b.N; i++ { - _ = s.ReadFile(file) - for s.Scan() { - s.Text() - } - } -} - -func BenchmarkScannerReadFile(b *testing.B) { - file, _ := os.CreateTemp("", "substation") - defer os.Remove(file.Name()) - - _, _ = file.Write([]byte("foo\nbar\nbaz")) - - s := NewScanner() - defer s.Close() - - b.Run("scanner_read_file", - func(b *testing.B) { - benchmarkScannerReadFile(b, s, file) - }, - ) -} diff --git a/v2/internal/channel/channel.go b/v2/internal/channel/channel.go deleted file mode 100644 index 8eeacda7..00000000 --- a/v2/internal/channel/channel.go +++ /dev/null @@ -1,54 +0,0 @@ -package channel - -import "sync" - -func New[T any](options ...func(*Channel[T])) *Channel[T] { - ch := &Channel[T]{c: make(chan T)} - for _, o := range options { - o(ch) - } - - return ch -} - -// Channel provides methods for safely reading from and writing to channels. -type Channel[T any] struct { - mu sync.Mutex - c chan T - closed bool -} - -// WithBuffer sets the buffer size of the channel. -func WithBuffer[T any](i int) func(*Channel[T]) { - return func(s *Channel[T]) { - s.c = make(chan T, i) - } -} - -// Closes the channel. If the channel is already closed, then this is a no-op. -func (c *Channel[T]) Close() { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.closed { - close(c.c) - c.closed = true - } -} - -// Sends a value to the channel. If the channel is closed, then this is a no-op. -func (c *Channel[T]) Send(t T) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return - } - - c.c <- t -} - -// Recv returns a read-only channel. -func (c *Channel[T]) Recv() <-chan T { - return c.c -} diff --git a/v2/internal/errors/errors.go b/v2/internal/errors/errors.go deleted file mode 100644 index b41e0215..00000000 --- a/v2/internal/errors/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package errors - -import "fmt" - -// ErrInvalidFactoryInput is returned when an unsupported input is referenced in any factory function. -var ErrInvalidFactoryInput = fmt.Errorf("invalid factory input") - -// ErrMissingRequiredOption is returned when a component does not have the required options to properly run. -var ErrMissingRequiredOption = fmt.Errorf("missing required option") - -// ErrInvalidOption is returned when an invalid option is received in a constructor. -var ErrInvalidOption = fmt.Errorf("invalid option") diff --git a/v2/internal/http/README.md b/v2/internal/http/README.md deleted file mode 100644 index cf518876..00000000 --- a/v2/internal/http/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## http - -Contains functions for managing HTTP requests. Substation follows these rules across every application: -* HTTP clients are always retryable clients from [this package](github.com/hashicorp/go-retryablehttp) -* For AWS deployments, HTTP clients enable AWS X-Ray diff --git a/v2/internal/http/http.go b/v2/internal/http/http.go deleted file mode 100644 index c7f0d01b..00000000 --- a/v2/internal/http/http.go +++ /dev/null @@ -1,88 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - "github.com/aws/aws-xray-sdk-go/xray" - "github.com/hashicorp/go-retryablehttp" -) - -// errHTTPInvalidPayload is returned by Post when it receives an unexpected payload interface. -var errHTTPInvalidPayload = fmt.Errorf("invalid payload interface") - -// Header contains a single HTTP header that can be passed to HTTP.Post. Multiple headers can be passed to HTTP.Post as a slice. -type Header struct { - Key string - Value string -} - -// HTTP wraps a retryable HTTP client. -type HTTP struct { - Client *retryablehttp.Client -} - -// Setup creates a retryable HTTP client. -func (h *HTTP) Setup() { - h.Client = retryablehttp.NewClient() -} - -// EnableXRay replaces the standard retryable HTTP client with an AWS XRay client. This method can be used when making HTTP calls on AWS infrastructure and should be enabled by looking for the environment variable "AWS_XRAY_DAEMON_ADDRESS". -func (h *HTTP) EnableXRay() { - h.Client.HTTPClient = xray.Client(h.Client.HTTPClient) -} - -// IsEnabled identifies if the HTTP client is enabled and ready to use. This method can be used for lazy loading the client. -func (h *HTTP) IsEnabled() bool { - return h.Client != nil -} - -// Get is a context-aware convenience function for making GET requests. -func (h *HTTP) Get(ctx context.Context, url string, headers ...Header) (*http.Response, error) { - req, err := retryablehttp.NewRequestWithContext(context.WithoutCancel(ctx), "GET", url, nil) - if err != nil { - return nil, fmt.Errorf("http get URL %s: %v", url, err) - } - - for _, h := range headers { - req.Header.Add(h.Key, h.Value) - } - - resp, err := h.Client.Do(req) - if err != nil { - return nil, fmt.Errorf("http get URL %s: %v", url, err) - } - - return resp, err -} - -// Post is a context-aware convenience function for making POST requests. This method optionally supports custom headers. -func (h *HTTP) Post(ctx context.Context, url string, payload interface{}, headers ...Header) (resp *http.Response, err error) { - var tmp []byte - - switch p := payload.(type) { - case []byte: - tmp = p - case string: - tmp = []byte(p) - default: - return nil, fmt.Errorf("http post URL %s: %v", url, errHTTPInvalidPayload) - } - - req, err := retryablehttp.NewRequestWithContext(context.WithoutCancel(ctx), "POST", url, tmp) - if err != nil { - return nil, fmt.Errorf("http post URL %s: %v", url, err) - } - - for _, h := range headers { - req.Header.Add(h.Key, h.Value) - } - - resp, err = h.Client.Do(req) - if err != nil { - return nil, fmt.Errorf("http post URL %s: %v", url, err) - } - - return resp, nil -} diff --git a/v2/internal/http/http_test.go b/v2/internal/http/http_test.go deleted file mode 100644 index 95beff6c..00000000 --- a/v2/internal/http/http_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package http - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/hashicorp/go-retryablehttp" -) - -func TestPost(t *testing.T) { - tests := []struct { - payload interface{} - expected error - }{ - { - payload: []byte("test"), - expected: nil, - }, - { - payload: []byte("test"), - expected: nil, - }, - } - - serv := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - })) - defer serv.Close() - - ctx := context.TODO() - - h := HTTP{ - retryablehttp.NewClient(), - } - - for _, test := range tests { - _, err := h.Post(ctx, serv.URL, test.payload) - if !errors.Is(err, test.expected) { - t.Errorf("expected %+v, got %+v", test.expected, err) - } - } -} - -func TestGet(t *testing.T) { - tests := []struct { - expected []byte - }{ - { - expected: []byte("foo"), - }, - } - - serv := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - _, _ = w.Write([]byte("foo")) - })) - defer serv.Close() - - ctx := context.TODO() - - h := HTTP{ - retryablehttp.NewClient(), - } - - for _, test := range tests { - resp, err := h.Get(ctx, serv.URL) - if err != nil { - t.Fatalf("%v", err) - } - - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("%v", err) - } - - if c := bytes.Compare(body, test.expected); c != 0 { - t.Errorf("expected %+v, got %+v", test.expected, body) - } - } -} diff --git a/v2/internal/log/log.go b/v2/internal/log/log.go deleted file mode 100644 index c9288b67..00000000 --- a/v2/internal/log/log.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package log wraps logrus and provides global logging -// only debug logging should be used in condition/, process/, and internal/ to reduce the likelihood of corrupting output for apps -// debug and info logging can be used in cmd/ -package log - -import ( - "os" - - "github.com/sirupsen/logrus" -) - -var log = logrus.New() - -// Debug wraps logrus Debug function with stack information -func Debug(args ...interface{}) { - log.Debug(args...) -} - -// Info wraps logrus Debug function with stack information -func Info(args ...interface{}) { - log.Info(args...) -} - -// WithField wraps logrus WithField function -func WithField(k string, v interface{}) *logrus.Entry { - return log.WithField(k, v) -} - -func init() { - if _, ok := os.LookupEnv("SUBSTATION_DEBUG"); ok { - log.SetLevel(logrus.DebugLevel) - return - } - - log.SetLevel(logrus.InfoLevel) -} diff --git a/v2/internal/media/media.go b/v2/internal/media/media.go deleted file mode 100644 index bbecdc89..00000000 --- a/v2/internal/media/media.go +++ /dev/null @@ -1,48 +0,0 @@ -// package media provides capabilities for inspecting the content of data and identifying its media (Multipurpose Internet Mail Extensions, MIME) type. -package media - -import ( - "bytes" - "fmt" - "net/http" - "os" -) - -// Bytes returns the media type of a byte slice. -func Bytes(b []byte) string { - switch { - // http.DetectContentType cannot detect bzip2 - case bytes.HasPrefix(b, []byte("\x42\x5a\x68")): - return "application/x-bzip2" - // http.DetectContentType occasionally (rarely) generates false positive matches for application/vnd.ms-fontobject when the bytes are application/x-gzip - case bytes.HasPrefix(b, []byte("\x1f\x8b\x08")): - return "application/x-gzip" - // http.DetectContentType cannot detect zstd - case bytes.HasPrefix(b, []byte("\x28\xb5\x2f\xfd")): - return "application/x-zstd" - // http.DetectContentType cannot detect snappy - case bytes.HasPrefix(b, []byte("\xff\x06\x00\x00\x73\x4e\x61\x50\x70\x59")): - return "application/x-snappy-framed" - default: - return http.DetectContentType(b) - } -} - -// File returns the media type of an open file. The caller is responsible for resetting the position of the file. -func File(f *os.File) (string, error) { - if _, err := f.Seek(0, 0); err != nil { - return "", fmt.Errorf("media file: %v", err) - } - - // http.DetectContentType reads the first 512 bytes of data - buf := make([]byte, 512) - n, err := f.Read(buf) - if err != nil { - return "", fmt.Errorf("media file: %v", err) - } - - // buf is truncated to avoid false positives due to missing bytes - buf = buf[:n] - - return Bytes(buf), nil -} diff --git a/v2/internal/media/media_test.go b/v2/internal/media/media_test.go deleted file mode 100644 index 1ea43a6b..00000000 --- a/v2/internal/media/media_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package media - -import ( - "os" - "testing" -) - -var mediaTests = []struct { - name string - test []byte - expected string -}{ - { - "bzip2", - []byte("\x42\x5a\x68"), - "application/x-bzip2", - }, - { - "gzip", - []byte("\x1f\x8b\x08"), - "application/x-gzip", - }, - { - "zstd", - []byte("\x28\xb5\x2f\xfd"), - "application/x-zstd", - }, - { - "snappy", - []byte("\xff\x06\x00\x00\x73\x4e\x61\x50\x70\x59"), - "application/x-snappy-framed", - }, -} - -func TestBytes(t *testing.T) { - for _, test := range mediaTests { - mediaType := Bytes(test.test) - - if mediaType != test.expected { - t.Errorf("expected %s, got %s", test.expected, mediaType) - } - } -} - -func benchmarkBytes(b *testing.B, test []byte) { - for i := 0; i < b.N; i++ { - _ = Bytes(test) - } -} - -func BenchmarkBytes(b *testing.B) { - for _, test := range mediaTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkBytes(b, test.test) - }, - ) - } -} - -func TestFile(t *testing.T) { - for _, test := range mediaTests { - temp, err := os.CreateTemp("", "substation") - if err != nil { - t.Errorf("got error %v", err) - } - defer os.Remove(temp.Name()) - defer temp.Close() - - if _, err := temp.Write(test.test); err != nil { - t.Errorf("got error %v", err) - } - - mediaType, err := File(temp) - if err != nil { - t.Errorf("got error %v", err) - } - - if mediaType != test.expected { - t.Errorf("expected %s, got %s", test.expected, mediaType) - } - } -} - -func benchmarkFile(b *testing.B, test *os.File) { - for i := 0; i < b.N; i++ { - _, _ = File(test) - } -} - -func BenchmarkFile(b *testing.B) { - temp, _ := os.CreateTemp("", "substation") - defer os.Remove(temp.Name()) - defer temp.Close() - - for _, test := range mediaTests { - _, _ = temp.Seek(0, 0) - _, _ = temp.Write(test.test) - - b.Run(test.name, - func(b *testing.B) { - benchmarkFile(b, temp) - }, - ) - } -} diff --git a/v2/message/message_test.go b/v2/message/message_test.go deleted file mode 100644 index b7b356a1..00000000 --- a/v2/message/message_test.go +++ /dev/null @@ -1,343 +0,0 @@ -package message - -import ( - "bytes" - "strings" - "testing" -) - -var messageNewTests = []struct { - name string - data []byte - expected []byte -}{ - { - "empty", - []byte{}, - []byte{}, - }, - { - "data", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d"}`), - }, -} - -func TestMessageNew(t *testing.T) { - for _, test := range messageNewTests { - msg := New().SetData(test.data) - - if !bytes.Equal(msg.Data(), test.expected) { - t.Errorf("expected %s, got %s", test.expected, msg.Data()) - } - } -} - -func benchmarkTestMessageNew(b *testing.B, data []byte) { - for i := 0; i < b.N; i++ { - _ = New().SetData(data) - } -} - -func BenchmarkTestMessageNew(b *testing.B) { - for _, test := range messageNewTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkTestMessageNew(b, test.data) - }, - ) - } -} - -var messageDeleteTests = []struct { - name string - data []byte - expected []byte - key string -}{ - { - "a", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"c":"d"}`), - "a", - }, -} - -func TestMessageDeleteData(t *testing.T) { - for _, test := range messageDeleteTests { - msg := New().SetData(test.data) - - if err := msg.DeleteValue(test.key); err != nil { - t.Error(err) - } - - if !bytes.Equal(msg.Data(), test.expected) { - t.Errorf("expected %s, got %s", test.expected, msg.Data()) - } - } -} - -func benchmarkTestMessageDeleteData(b *testing.B, key string, data []byte) { - msg := New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.DeleteValue(key) - } -} - -func BenchmarkTestMessageDeleteData(b *testing.B) { - for _, test := range messageDeleteTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkTestMessageDeleteData(b, test.key, test.data) - }, - ) - } -} - -func TestMessageDeleteMetadata(t *testing.T) { - for _, test := range messageDeleteTests { - message := New().SetMetadata(test.data) - - key := strings.Join([]string{metaKey, test.key}, " ") - if err := message.DeleteValue(key); err != nil { - t.Error(err) - } - - if !bytes.Equal(message.Metadata(), test.expected) { - t.Errorf("expected %s, got %s", test.expected, message.Metadata()) - } - } -} - -func benchmarkTestMessageDeleteMetadata(b *testing.B, key string, data []byte) { - msg := New().SetMetadata(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.DeleteValue(key) - } -} - -func BenchmarkTestMessageDeleteMetadata(b *testing.B) { - for _, test := range messageDeleteTests { - key := strings.Join([]string{metaKey, test.key}, " ") - - b.Run(test.name, - func(b *testing.B) { - benchmarkTestMessageDeleteMetadata(b, key, test.data) - }, - ) - } -} - -var messageGetTests = []struct { - name string - data []byte - expected string - key string -}{ - { - "a", - []byte(`{"a":"b","c":"d"}`), - "b", - "a", - }, - { - "@this", - []byte(`{"a":"b","c":"d"}`), - `{"a":"b","c":"d"}`, - "@this", - }, - { - "empty", - []byte(`{"a":"b","c":"d"}`), - "", - "", - }, -} - -func TestMessageGetData(t *testing.T) { - for _, test := range messageGetTests { - msg := New().SetData(test.data) - - result := msg.GetValue(test.key).String() - if result != test.expected { - t.Errorf("expected %s, got %s", test.expected, result) - } - } -} - -func benchmarkTestMessageGetData(b *testing.B, key string, data []byte) { - msg := New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.GetValue(key) - } -} - -func BenchmarkTestMessageGetData(b *testing.B) { - for _, test := range messageGetTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkTestMessageGetData(b, test.key, test.data) - }, - ) - } -} - -func TestMessageGetMetadata(t *testing.T) { - for _, test := range messageGetTests { - msg := New().SetMetadata(test.data) - - key := strings.Join([]string{metaKey, test.key}, " ") - result := msg.GetValue(key).String() - if result != test.expected { - t.Errorf("expected %s, got %s", test.expected, result) - } - } -} - -func benchmarkTestMessageGetMetadata(b *testing.B, key string, data []byte) { - msg := New().SetMetadata(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.GetValue(key) - } -} - -func BenchmarkTestMessageGetMetadata(b *testing.B) { - for _, test := range messageGetTests { - b.Run(test.name, - func(b *testing.B) { - key := strings.Join([]string{metaKey, test.key}, " ") - benchmarkTestMessageGetMetadata(b, key, test.data) - }, - ) - } -} - -var messageSetTests = []struct { - name string - data []byte - expected []byte - key string - value interface{} -}{ - { - "string", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":"f"}`), - "e", - "f", - }, - { - "int", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":1}`), - "e", - 1, - }, - { - "float", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":1.1}`), - "e", - 1.1, - }, - { - "object", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":{"f":"g"}}`), - "e", - []byte(`{"f":"g"}`), - }, - { - "array", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":["f","g","h"]}`), - "e", - []byte(`["f","g","h"]`), - }, - { - "struct", - []byte(`{"a":"b","c":"d"}`), - []byte(`{"a":"b","c":"d","e":{"f":"g"}}`), - "e", - struct { - F string `json:"f"` - }{ - F: "g", - }, - }, -} - -func TestMessageSetData(t *testing.T) { - for _, test := range messageSetTests { - msg := New().SetData(test.data) - if err := msg.SetValue(test.key, test.value); err != nil { - t.Error(err) - } - - if !bytes.Equal(msg.Data(), test.expected) { - t.Errorf("expected %s, got %s", test.expected, msg.Data()) - } - } -} - -func benchmarkTestMessageSetData(b *testing.B, key string, val interface{}, data []byte) { - msg := New().SetData(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.SetValue(key, val) - } -} - -func BenchmarkTestMessageSetData(b *testing.B) { - for _, test := range messageSetTests { - b.Run(test.name, - func(b *testing.B) { - benchmarkTestMessageSetData(b, test.key, test.value, test.data) - }, - ) - } -} - -func TestMessageSetMetadata(t *testing.T) { - for _, test := range messageSetTests { - msg := New().SetMetadata(test.data) - - key := strings.Join([]string{metaKey, test.key}, " ") - if err := msg.SetValue(key, test.value); err != nil { - t.Error(err) - } - - if !bytes.Equal(msg.Metadata(), test.expected) { - t.Errorf("expected %s, got %s", test.expected, msg.Metadata()) - } - } -} - -func benchmarkTestMessageSetMetadata(b *testing.B, key string, val interface{}, data []byte) { - msg := New().SetMetadata(data) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = msg.SetValue(key, val) - } -} - -func BenchmarkTestMessageSetMetadata(b *testing.B) { - for _, test := range messageSetTests { - b.Run(test.name, - func(b *testing.B) { - key := strings.Join([]string{metaKey, test.key}, " ") - benchmarkTestMessageSetMetadata(b, key, test.value, test.data) - }, - ) - } -} diff --git a/v2/transform/README.md b/v2/transform/README.md deleted file mode 100644 index 4042d5a0..00000000 --- a/v2/transform/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# transform - -Contains interfaces and methods for transforming data.