diff --git a/.yamllint b/.yamllint index 99260037e2..966d4fa953 100644 --- a/.yamllint +++ b/.yamllint @@ -1,6 +1,8 @@ extends: default rules: + braces: + max-spaces-inside: 1 colons: max-spaces-before: 0 max-spaces-after: 1 diff --git a/evergreen.yml b/evergreen.yml index 6f18ca4faf..a3cbc7f49f 100644 --- a/evergreen.yml +++ b/evergreen.yml @@ -230,6 +230,7 @@ task_groups: - t_compile - t_python_test - t_lint_python + - t_lint_workloads - t_cmake_test - t_integration_test_single_node_replset - t_integration_test_three_node_replset @@ -339,7 +340,6 @@ functions: binary: ./src/genny/run-genny args: - -v - # TODO: this fails right now - should show up in patch-build; need to remove this task and TODO as a ticket - lint-yaml ## diff --git a/run-genny b/run-genny index 6f5bfdcde1..bb55e288b0 100755 --- a/run-genny +++ b/run-genny @@ -26,7 +26,7 @@ _print_diagnostics() { if [ -n "$_HAVE_PRINTED_DIAGNOSTICS" ]; then return fi - echo >&2 "If you're stuck, please reach out to the #workload-generation MongoDB slack channel and paste this output." + echo >&2 "If you're stuck, please reach out to the #performance-tooling-users MongoDB slack channel and paste this output." echo >&2 "" echo >&2 " git rev-parse HEAD: $(git rev-parse HEAD)" echo >&2 " uname -a: $(uname -a)" diff --git a/src/lamplib/src/genny/tasks/create-new-actor.sh b/src/lamplib/src/genny/tasks/create-new-actor.sh index aa4e9f83f2..27b7a3a87c 100755 --- a/src/lamplib/src/genny/tasks/create-new-actor.sh +++ b/src/lamplib/src/genny/tasks/create-new-actor.sh @@ -461,7 +461,10 @@ create_workload_yml() { actor_name="$1" cat << EOF > "$GENNY_REPO_ROOT/src/workloads/docs/${actor_name}.yml" SchemaVersion: 2018-07-01 -Owner: TODO put your github team name here e.g. @mongodb/stm +Owner: TODO put your github team name here e.g. @10gen/dev-prod-tips +Description: | + TODO describe your workload. For an example description, check out + src/workloads/selftests/GennyOverhead.yml. # TODO: delete this file or add a meaningful workload using or # demonstrating your Actor @@ -615,11 +618,9 @@ cat << EOF Build and test ${actor_name} with the following command: - # TODO: these paths aren't right any more - ./scripts/lamp - ./scripts/lamp cmake-test - ./scripts/get-mongo-source.sh - ./scripts/lamp resmoke-test --suites src/resmokeconfig/genny_standalone.yml + ./run-genny install + ./run-genny cmake-test + ./run-genny resmoke-test --suites src/resmokeconfig/genny_standalone.yml The resmoke-test will fail because there is a "hidden" bug in the generated integration-test-case that you are expected to find as a part of reading through @@ -627,7 +628,7 @@ the generated code. Run your workload as follows: - ./dist/bin/genny run \\ + ./run-genny workload -- run \\ --workload-file ./src/workloads/docs/${actor_name}.yml \\ --metrics-format csv \\ --metrics-output-file build/genny-metrics.csv \\ diff --git a/src/lamplib/src/genny/tasks/yaml_linter.py b/src/lamplib/src/genny/tasks/yaml_linter.py index f52143fc2f..2b0533df4f 100644 --- a/src/lamplib/src/genny/tasks/yaml_linter.py +++ b/src/lamplib/src/genny/tasks/yaml_linter.py @@ -1,6 +1,7 @@ import os import os.path as path import sys +import yaml import structlog import yamllint.cli @@ -9,42 +10,81 @@ def main(genny_repo_root: str): - yaml_dirs = [ + workload_dirs = [ path.join(genny_repo_root, "src", "workloads"), path.join(genny_repo_root, "src", "phases"), - path.join(genny_repo_root, "src", "resmokeconfig"), ] + resmoke_dirs = [path.join(genny_repo_root, "src", "resmokeconfig")] + evergreen_dirs = [path.join(genny_repo_root, "evergreen.yml")] - yaml_files = [path.join(os.getcwd(), "evergreen.yml")] + workload_yamls, workload_error = _traverse_yamls(workload_dirs) + resmoke_yamls, resmoke_error = _traverse_yamls(resmoke_dirs) + evergreen_yamls, evergreen_error = _traverse_yamls(evergreen_dirs) + all_yamls = workload_yamls + resmoke_yamls + evergreen_yamls - has_error = False - - for yaml_dir in yaml_dirs: - for dirpath, dirnames, filenames in os.walk(yaml_dir): - for filename in filenames: - if filename.endswith(".yaml"): - SLOG.error("All YAML files should have the .yml extension", found=filename) - # Don't error immediately so all violations can be printed with one run - # of this script. - has_error = True - elif filename.endswith(".yml"): - yaml_files.append(path.join(dirpath, filename)) - - if has_error: + if workload_error or resmoke_error or evergreen_error: + SLOG.error( + "Found invalidly-named yaml files. Please correct and rerun ./run-genny lint-yaml." + ) sys.exit(1) - if len(yaml_files) == 0: - SLOG.error("Did not find any YAML files to lint", in_dirs=yaml_dirs) - raise Exception("No yamls found") - - config_file_path = path.join(os.getcwd(), ".yamllint") + all_have_descriptions = True + for workload_yaml in workload_yamls: + if not check_description(workload_yaml): + all_have_descriptions = False + if not all_have_descriptions: + SLOG.error( + "The above YAML workloads lack a Description field. This field should be populated with a human-readable description " + "of the workload and its output metrics. After doing so, please re-run ./run-genny lint-yaml" + ) + sys.exit(1) - yamllint_argv = ["--strict", "--config-file", config_file_path] + yaml_files + config_file_path = path.join(genny_repo_root, ".yamllint") + yamllint_argv = ["--strict", "--config-file", config_file_path] + all_yamls SLOG.info( "Linting workload YAML files with yamllint", - count=len(yaml_files), + count=len(all_yamls), yamllint_argv=yamllint_argv, ) yamllint.cli.run(yamllint_argv) + + +def check_description(yaml_path): + workload = _load_yaml(yaml_path) + if "Description" not in workload: + SLOG.error(f"Genny workload {yaml_path} lacks a Description field.") + return False + return True + + +def _traverse_yamls(roots): + def check_filename(filename): + if filename.endswith(".yaml"): + SLOG.error("All YAML files should have the .yml extension", found=filename) + return True + return False + + yaml_files = [] + has_error = False + for root in roots: + if os.path.isfile(root): + return [root], check_filename(root) + for dirpath, dirnames, filenames in os.walk(root): + for filename in filenames: + # Don't error immediately so all violations can be printed with one run + # of this script. + has_error = check_filename(filename) + if filename.endswith(".yml"): + yaml_files.append(path.join(dirpath, filename)) + if len(yaml_files) == 0: + SLOG.error("Did not find any YAML files to lint", in_dirs=directories) + raise Exception("No yamls found") + return yaml_files, has_error + + +def _load_yaml(yaml_path): + with open(yaml_path) as file: + workload = yaml.safe_load(file) + return workload diff --git a/src/phases/HelloWorld/ExamplePhase2.yml b/src/phases/HelloWorld/ExamplePhase2.yml index acb06a73a8..69b9bfa03f 100644 --- a/src/phases/HelloWorld/ExamplePhase2.yml +++ b/src/phases/HelloWorld/ExamplePhase2.yml @@ -1,5 +1,6 @@ -# Example to illustrate how PhaseConfig composition works. SchemaVersion: 2018-07-01 +Description: | + Example phase to illustrate how PhaseConfig composition works. UseMe: Message: Hello Phase 2 diff --git a/src/phases/execution/CreateIndexPhase.yml b/src/phases/execution/CreateIndexPhase.yml index d38dfd8d03..3e51ef338d 100644 --- a/src/phases/execution/CreateIndexPhase.yml +++ b/src/phases/execution/CreateIndexPhase.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/server-execution" +Description: | + TODO: TIG-3322 InsertData: Repeat: 1 diff --git a/src/phases/scale/DesignDocWorkloadPhases.yml b/src/phases/scale/DesignDocWorkloadPhases.yml index b159982df9..12f46f77e5 100644 --- a/src/phases/scale/DesignDocWorkloadPhases.yml +++ b/src/phases/scale/DesignDocWorkloadPhases.yml @@ -1,4 +1,6 @@ SchemaVersion: 2018-07-01 +Description: | + TODO: TIG-3318 Document: &Doc # Documents are approximately 1 KB in size t: {^RandomInt: {min: 0, max: 10}} diff --git a/src/phases/scale/LargeScalePhases.yml b/src/phases/scale/LargeScalePhases.yml index 40f917db38..5d70f521f1 100644 --- a/src/phases/scale/LargeScalePhases.yml +++ b/src/phases/scale/LargeScalePhases.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 # This is the set of shared phases for the Large Scale Workload Automation project. # @@ -9,7 +11,7 @@ Owner: Storage Engines # This section contains definitions that are used in the following phases. GlobalDefaults: Random10KInt: &rand_10k_int {^RandomInt: {min: 0, max: 10000}} - Random10KInt: &rand_1k_int {^RandomInt: {min: 0, max: 1000}} + Random1KInt: &rand_1k_int {^RandomInt: {min: 0, max: 1000}} Random4ByteInt: &rand_4b_int {^RandomInt: {min: 0, max: 2147483647}} Random30String: &rand_30b_string {^RandomString: {length: 30}} Random130String: &rand_100b_string {^RandomString: {length: 100}} @@ -28,15 +30,15 @@ GlobalDefaults: h: *rand_4b_int i: *rand_4b_int RollingIndexes: &RollingIndexes - - keys: {a: 1} - - keys: {b: 1} - - keys: {c: 1} - - keys: {d: 1} - - keys: {e: 1} - - keys: {f: 1} - - keys: {i: 1, g: 1} - - keys: {g: 1, h: 1} - - keys: {h: 1, i: 1} + - keys: {a: 1} + - keys: {b: 1} + - keys: {c: 1} + - keys: {d: 1} + - keys: {e: 1} + - keys: {f: 1} + - keys: {i: 1, g: 1} + - keys: {g: 1, h: 1} + - keys: {h: 1, i: 1} # Commonly used parameters. DatabaseParam: &DatabaseParam {^Parameter: {Name: "Database", Default: ""}} @@ -153,7 +155,7 @@ HotDocumentUpdaterCmd: OperationCommand: WriteOperations: - WriteCommand: updateOne - Filter: {first : first} + Filter: {first: first} Update: {$set: {second: *rand_1k_int}} GlobalRate: *WritesParam @@ -176,7 +178,7 @@ HotCollectionUpdaterCmd: OperationCommand: WriteOperations: - WriteCommand: insertOne - Document: { a : *rand_1k_int} + Document: { a: *rand_1k_int } GlobalRate: *WritesParam HotCollectionDeleterCmd: @@ -249,8 +251,8 @@ SnapshotScannerAllCmd: Duration: *Duration Documents: 100000000000 ScanType: snapshot - #GlobalRate: 1 per 24 hours # CHANGE - #Note: using "2 per 1" hour kicks off two scans at the top of the hour! + # GlobalRate: 1 per 24 hours # CHANGE + # Note: using "2 per 1" hour kicks off two scans at the top of the hour! GlobalRate: 1 per 30 minutes OplogTrailerCmd: diff --git a/src/phases/scale/MixPhases.yml b/src/phases/scale/MixPhases.yml index 147fff803a..fa9e60e612 100644 --- a/src/phases/scale/MixPhases.yml +++ b/src/phases/scale/MixPhases.yml @@ -1,4 +1,7 @@ SchemaVersion: 2018-07-01 +Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 dbname: &dbname mix runtime: &runtime 7 minutes diff --git a/src/workloads/contrib/historystore/eMRCfBench.yml b/src/workloads/contrib/historystore/eMRCfBench.yml index 5381ca7c8a..c05ff9f482 100644 --- a/src/workloads/contrib/historystore/eMRCfBench.yml +++ b/src/workloads/contrib/historystore/eMRCfBench.yml @@ -1,10 +1,12 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3322 -# Test workload to evaluate Storage Engine behavior when running in a +# Test workload to evaluate Storage Engine behavior when running in a # degraded replica set with or without eMRCf enabled. A "degraded" replica # set here means one without an active majority of data bearing nodes. I.e., -# a PSA set with the Secondary offline. +# a PSA set with the Secondary offline. # Currently the workload is split across several yml files because we need to script # some things between different parts. This is the grow phase. It churns our dataset @@ -24,7 +26,7 @@ GlobalDefaults: RandomMediumString: &rand_medium_string {^RandomString: {length: {^RandomInt: {min: 160, max: 960}}}} RandomLargeString: &rand_large_string {^RandomString: {length: {^RandomInt: {min: 960, max: 4960}}}} - TestDB: &TestDB test # Name of test database + TestDB: &TestDB test # Name of test database SmallDocCount: &small_doc_count 10000000 LargeDocCount: &large_doc_count 1000000 @@ -44,12 +46,12 @@ GlobalDefaults: # a random key will target the oldest document with that key value, thus 1% of the dataset # # The secondary key (key2) is a random value selected from 1 to 1000. It can be used (awkwardly) -# to broaden the set of documents that a test targets. For example, issuing a pair of operations +# to broaden the set of documents that a test targets. For example, issuing a pair of operations # to random primary keys and one selecting key2 <= 500 and the other selecting key2 > 500 will # target two documents for each primary key value, thus targeting 2 documents for each key # value, or 2% of the population. -Document: &small_doc +SmallDocument: &small_doc key1: *small_doc_key key2: *rand_1k_int data1: *rand_4b_int @@ -60,7 +62,7 @@ Document: &small_doc tag: *short_string payload: *rand_short_string -Document: &large_doc +LargeDocument: &large_doc key1: *large_doc_key key2: *rand_1k_int data1: *rand_4b_int @@ -87,7 +89,7 @@ Actors: Type: CrudActor Database: *TestDB Threads: 10 - Phases: + Phases: - Repeat: 100000 Threads: 10 Collection: Collection0 @@ -112,22 +114,22 @@ Actors: Operations: - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 500}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 500}}]} - OperationName: updateOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$lte: 500}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$lte: 500}}]} Update: {$set: {data1: *rand_4b_int}} - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$lte: 500}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$lte: 500}}]} - OperationName: updateOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 500}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 500}}]} Update: {$set: {payload: *rand_short_string}} - {Nop: true} - {Nop: true} -# 90/10 mix of reads/updates. +# 90/10 mix of reads/updates. - Name: ReadMostlyPhase Type: CrudActor Database: *TestDB @@ -161,22 +163,22 @@ Actors: # a given primary key - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 0}}, {key2: {$lte: 200}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 0}}, {key2: {$lte: 200}}]} - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 200}}, {key2: {$lte: 400}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 200}}, {key2: {$lte: 400}}]} - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 400}}, {key2: {$lte: 600}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 400}}, {key2: {$lte: 600}}]} - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 600}}, {key2: {$lte: 800}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 600}}, {key2: {$lte: 800}}]} - OperationName: findOne OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 800}}, {key2: {$lte: 1000}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 800}}, {key2: {$lte: 1000}}]} - {Nop: true} -# Delete the same number of documents that we added, returning the database to the original +# Delete the same number of documents that we added, returning the database to the original # population (i.e., # of documents) - Name: DeletePhase Type: CrudActor @@ -190,9 +192,9 @@ Actors: Threads: 10 Collection: Collection0 Operations: - - OperationName: deleteOne - OperationCommand: - Filter: {key1: *small_doc_key} + - OperationName: deleteOne + OperationCommand: + Filter: {key1: *small_doc_key} # WARNING: Future versions of Genny won't support the cvs-ftdc metrics format. Metrics: diff --git a/src/workloads/contrib/historystore/eMRCfGrow.yml b/src/workloads/contrib/historystore/eMRCfGrow.yml index d0f0f5b447..141a51fc39 100644 --- a/src/workloads/contrib/historystore/eMRCfGrow.yml +++ b/src/workloads/contrib/historystore/eMRCfGrow.yml @@ -1,10 +1,12 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3322 -# Test workload to evaluate Storage Engine behavior when running in a +# Test workload to evaluate Storage Engine behavior when running in a # degraded replica set with or without eMRCf enabled. A "degraded" replica # set here means one without an active majority of data bearing nodes. I.e., -# a PSA set with the Secondary offline. +# a PSA set with the Secondary offline. # Currently the workload is split across several yml files because we need to script # some things between different parts. This is the grow phase. It churns our dataset @@ -24,7 +26,7 @@ GlobalDefaults: RandomMediumString: &rand_medium_string {^RandomString: {length: {^RandomInt: {min: 160, max: 960}}}} RandomLargeString: &rand_large_string {^RandomString: {length: {^RandomInt: {min: 960, max: 4960}}}} - TestDB: &TestDB test # Name of test database + TestDB: &TestDB test # Name of test database SmallDocCount: &small_doc_count 10000000 LargeDocCount: &large_doc_count 1000000 @@ -44,12 +46,12 @@ GlobalDefaults: # a random key will target the oldest document with that key value, thus 1% of the dataset # # The secondary key (key2) is a random value selected from 1 to 1000. It can be used (awkwardly) -# to broaden the set of documents that a test targets. For example, issuing a pair of operations +# to broaden the set of documents that a test targets. For example, issuing a pair of operations # to random primary keys and one selecting key2 <= 500 and the other selecting key2 > 500 will # target two documents for each primary key value, thus targeting 2 documents for each key # value, or 2% of the population. -Document: &small_doc +SmallDocument: &small_doc key1: *small_doc_key key2: *rand_1k_int data1: *rand_4b_int @@ -60,7 +62,7 @@ Document: &small_doc tag: *short_string payload: *rand_short_string -Document: &large_doc +LargeDocument: &large_doc key1: *large_doc_key key2: *rand_1k_int data1: *rand_4b_int @@ -76,7 +78,7 @@ Document: &large_doc # # Workload is 80% updates, 10% insert, 10% delete. Most updates target the "hot" 1% of # documents with occasionally operations on 10% of the data set (documents with key2 > 900) -# Deletes remove from the hot 1%, so over time the hot set changes. It also means we +# Deletes remove from the hot 1%, so over time the hot set changes. It also means we # typically delete documents with a bunch of history. Actors: @@ -84,9 +86,8 @@ Actors: Type: CrudActor Database: *TestDB Threads: 10 - Phases: - - &ChurnPhase - Repeat: 50000 + Phases: + - Repeat: 50000 Collection: Collection0 Operations: - OperationName: updateOne @@ -119,7 +120,7 @@ Actors: Update: {$set: {payload: *rand_short_string}} - OperationName: updateMany OperationCommand: - Filter: {$and: [ {key1: *small_doc_key}, {key2: {$gt: 900}} ] } + Filter: {$and: [{key1: *small_doc_key}, {key2: {$gt: 900}}]} Update: {$set: {data2: *rand_4b_int}} - OperationName: insertOne OperationCommand: @@ -132,9 +133,8 @@ Actors: Type: CrudActor Database: *TestDB Threads: 1 - Phases: - - &ChurnPhase - Repeat: 50000 + Phases: + - Repeat: 50000 Collection: Collection0 Operations: - OperationName: updateOne @@ -167,7 +167,7 @@ Actors: Update: {$set: {payload: *rand_medium_string}} - OperationName: updateMany OperationCommand: - Filter: {$and: [ {key1: *large_doc_key}, {key2: {$gt: 900}} ] } + Filter: {$and: [{key1: *large_doc_key}, {key2: {$gt: 900}}]} Update: {$set: {data2: *rand_4b_int}} - OperationName: insertOne OperationCommand: diff --git a/src/workloads/contrib/historystore/eMRCfPopulate.yml b/src/workloads/contrib/historystore/eMRCfPopulate.yml index 5ce5f12093..18dd173f3e 100644 --- a/src/workloads/contrib/historystore/eMRCfPopulate.yml +++ b/src/workloads/contrib/historystore/eMRCfPopulate.yml @@ -1,15 +1,17 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3322 -# Test workload to evaluate Storage Engine behavior when running in a +# Test workload to evaluate Storage Engine behavior when running in a # degraded replica set with or without eMRCf enabled. A "degraded" replica # set here means one without an active majority of data bearing nodes. I.e., -# a PSA set with the Secondary offline. +# a PSA set with the Secondary offline. # Currently the workload is split across several yml files because we need to script # some things between different parts. This is the population phase. It starts with # an empty database and populates it with an initial set of documents. We create a mix -# of small (50-200 byte) and large (200-1000 byte) documents in a 10:1 ratio. +# of small (50-200 byte) and large (200-1000 byte) documents in a 10:1 ratio. # This section contains shared definitions that are used in the workload. # These defaults should match the similar declarations in the yml files for the other @@ -23,7 +25,7 @@ GlobalDefaults: RandomMediumString: &rand_medium_string {^RandomString: {length: {^RandomInt: {min: 160, max: 960}}}} RandomLargeString: &rand_large_string {^RandomString: {length: {^RandomInt: {min: 960, max: 4960}}}} - TestDB: &TestDB test # Name of test database + TestDB: &TestDB test # Name of test database SmallDocCount: &small_doc_count 10000000 LargeDocCount: &large_doc_count 1000000 @@ -43,12 +45,12 @@ GlobalDefaults: # a random key will target the oldest document with that key value, thus 1% of the dataset # # The secondary key (key2) is a random value selected from 1 to 1000. It can be used (awkwardly) -# to broaden the set of documents that a test targets. For example, issuing a pair of operations +# to broaden the set of documents that a test targets. For example, issuing a pair of operations # to random primary keys and one selecting key2 <= 500 and the other selecting key2 > 500 will # target two documents for each primary key value, thus targeting 2 documents for each key # value, or 2% of the population. -Document: &small_doc +SmallDocument: &small_doc key1: *small_doc_key key2: *rand_1k_int data1: *rand_4b_int @@ -59,7 +61,7 @@ Document: &small_doc tag: *short_string payload: *rand_short_string -Document: &large_doc +LargeDocument: &large_doc key1: *large_doc_key key2: *rand_1k_int data1: *rand_4b_int diff --git a/src/workloads/docs/CollectionScanner.yml b/src/workloads/docs/CollectionScanner.yml index e7f7d9921d..9624c8408c 100644 --- a/src/workloads/docs/CollectionScanner.yml +++ b/src/workloads/docs/CollectionScanner.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Actors: - Name: Loader @@ -23,7 +25,7 @@ Actors: Document: a: {^RandomString: { length: 100 }} FindOptions: - Hint: a_1 # Currently only support the index name. + Hint: a_1 # Currently only support the index name. Comment: "Phase 1 loader" - {Nop: true} - {Nop: true} @@ -63,7 +65,7 @@ Actors: GlobalRate: 10 per 30 seconds ScanType: standard # To peform an index scan use the Filter config. - Filter: {a : 1} + Filter: {a: 1} - Duration: 2 minutes # The Documents configuration specifies the max number of documents we # want to scan per scan loop. Once we hit this limit we exit the loop. @@ -77,15 +79,15 @@ Actors: GlobalRate: 10 per 10 seconds # Count the frequency of each character in array field "a". AggregatePipeline: - {array: [{"$addFields":{"a":{"$map":{"input":{"$range":[0,{"$strLenCP":"$a"}]},"in":{"$substrCP":["$a","$$this",1]}}}}}, - {"$unwind": "$a"}, - {"$group": {"_id": "$a", "count": {"$sum":1}}}, - {"$sort": {"_id":1}}, - ]} + {array: [{"$addFields": {"a": {"$map": {"input": {"$range": [0, {"$strLenCP": "$a"}]}, "in": {"$substrCP": ["$a", "$$this", 1]}}}}}, + {"$unwind": "$a"}, + {"$group": {"_id": "$a", "count": {"$sum":1}}}, + {"$sort": {"_id":1}}, + ]} AggregateOptions: BatchSize: 1000 Comment: Aggregation to count char frequency - AllowDiskUse: true # Naming convention follows c++ driver rather than mongo shell. + AllowDiskUse: true # Naming convention follows c++ driver rather than mongo shell. # A snapshot scanner begins a transaction prior to starting its scan # with read concern majority, which should pin the read for the diff --git a/src/workloads/docs/CrudActor.yml b/src/workloads/docs/CrudActor.yml index cc8211cc82..1c98fe2731 100644 --- a/src/workloads/docs/CrudActor.yml +++ b/src/workloads/docs/CrudActor.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This is a demonstration of the CrudActor. It performs writes, updates, and drops + to demonstrate the actor. Clients: Default: @@ -13,7 +16,7 @@ Actors: - Name: CrudActor Type: CrudActor Database: mydb - ClientName: SomeOtherPool # Which connection pool to use. Must be defined in Clients section. + ClientName: SomeOtherPool # Which connection pool to use. Must be defined in Clients section. Phases: - Repeat: 10 Collection: test @@ -37,7 +40,7 @@ Actors: - {a: {^RandomString: {length: {^RandomInt: {min: 3, max: 5}}}}} - {b: {^RandomInt: {min: 5, max: 15}}} ThrowOnFailure: false # Whether to throw an exception if an operation fails -# RecordFailure: true # If ThrowOnFailure is false, whether the failed operations should be recorded. + # RecordFailure: true # If ThrowOnFailure is false, whether the failed operations should be recorded. - Repeat: 1 Collection: test Operation: @@ -52,23 +55,23 @@ Actors: # The collection names are generated from the current actor id mod CollectionCount. CollectionCount: 4 Operations: - - OperationName: bulkWrite - OperationCommand: - WriteOperations: - - WriteCommand: insertOne - Document: {a: 1} - - WriteCommand: updateOne - Filter: {a: 1} - Update: {$set: {a: {^RandomInt: {min: 5, max: 15}}}} - Options: - WriteConcern: - Level: majority - TimeoutMillis: 5000 - - OperationName: insertMany - OperationCommand: - Documents: - - {a: 1} - - {a: {^RandomString: {length: {^RandomInt: {min: 3, max: 5}}}}} - - {b: {^RandomInt: {min: 5, max: 15}}} - ThrowOnFailure: false # Whether to throw an exception if an operation fails -# RecordFailure: true # If ThrowOnFailure is false, whether the failed operations should be recorded. + - OperationName: bulkWrite + OperationCommand: + WriteOperations: + - WriteCommand: insertOne + Document: {a: 1} + - WriteCommand: updateOne + Filter: {a: 1} + Update: {$set: {a: {^RandomInt: {min: 5, max: 15}}}} + Options: + WriteConcern: + Level: majority + TimeoutMillis: 5000 + - OperationName: insertMany + OperationCommand: + Documents: + - {a: 1} + - {a: {^RandomString: {length: {^RandomInt: {min: 3, max: 5}}}}} + - {b: {^RandomInt: {min: 5, max: 15}}} + ThrowOnFailure: false # Whether to throw an exception if an operation fails + # RecordFailure: true # If ThrowOnFailure is false, whether the failed operations should be recorded. diff --git a/src/workloads/docs/CrudActorTransaction.yml b/src/workloads/docs/CrudActorTransaction.yml index 90116099e7..8e335fdd5e 100644 --- a/src/workloads/docs/CrudActorTransaction.yml +++ b/src/workloads/docs/CrudActorTransaction.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload provides an example of using the CrudActor with a transaction. The + behavior is largely the same, nesting operations inside the transaction block. Actors: - Name: BulkWriteInTransaction diff --git a/src/workloads/docs/Deleter.yml b/src/workloads/docs/Deleter.yml index e452f1dc50..53f8ecd615 100644 --- a/src/workloads/docs/Deleter.yml +++ b/src/workloads/docs/Deleter.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Actors: - Name: Loader @@ -29,7 +31,7 @@ Actors: OperationCommand: WriteOperations: - WriteCommand: insertOne - Document: { a : {^RandomInt: {min: 0, max: 1000}}} + Document: { a: {^RandomInt: {min: 0, max: 1000}}} GlobalRate: 1000 per 1 second - Name: HotDeleter diff --git a/src/workloads/docs/Generators.yml b/src/workloads/docs/Generators.yml index 2b1d7106cd..29db1011f7 100644 --- a/src/workloads/docs/Generators.yml +++ b/src/workloads/docs/Generators.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload exhibits various generators, performing insertions to show them off. + Follow the inline commentary to learn more about them. # This workload demonstrates all the current Document and Value Generators. It creates documents # that look like this (simple) @@ -94,7 +97,7 @@ Actors: # increment generator ^Inc with parameters start (default 1), multiplier (default 0}, and step (default 1) # only non-default parameters should be specified # if you have multiple threads, you need to specify multiplier: - # it would be calculated as start + ActorId * multiplier + i * step + # it would be calculated as start + ActorId * multiplier + i * step counter: {^Inc: {start: 1000}} # You can randomly choose objects. from is an array of values to pick from. Weigths is @@ -131,7 +134,7 @@ Actors: # Default max is 2150-01-01. But you can specify a date greater than this value. # # min and max are evaluated once and max must be greater than min (so by extension max cannot be 1970-01-01). - # The generated date uses the extended json $date notation + # The generated date uses the extended json $date notation # (see https://docs.mongodb.com/manual/reference/mongodb-extended-json/#bson.Date). # # RandomDate supports other generators as input (generators that produce string or numeric types): @@ -172,7 +175,7 @@ Actors: # # As min must be less than max, care must be taken to ensure that the min value range is always less than the max value range. # # Other wise you might get unexpected results. For example the following should fail 25% of the time - # # (whenever 2100-05-31 is selected). + # # (whenever 2100-05-31 is selected). # probablyNotWhatYouWant: {^RandomDate: {min: {^Choose: {from: ["2014-05-31", "2012-05-31", "2013-05-31", "2100-05-31"]}}, max: {^Now: {}} }} - WriteCommand: insertOne # Nested Generators @@ -209,8 +212,8 @@ Actors: cuid: { ^RandomInt: { min: 0, max: 100000 } }, prod: { ^RandomInt: { min: 0, max: 10000 } }, prid: { ^RandomDouble: { min: 0.0, max: 1000.0 } }, - data: { ^Join: { array: [ "aaaaaaaaaa", { ^FastRandomString: { length: { ^RandomInt: { min: 0, max: 10 } } } } ] } }}, - number: 10}} + data: { ^Join: { array: ["aaaaaaaaaa", {^FastRandomString: {length: {^RandomInt: {min: 0, max: 10}}}}]}}}, + number: 10}} # Large sequences of random strings can be expensive to generate. FixedGeneratedValue # can be used to generate an array once during initialization for re-use @@ -235,11 +238,11 @@ Actors: GlobalRate: 1 per 278 microseconds Collection: test Operations: - - OperationName: findOne - OperationCommand: - Filter: - roid: {^ObjectId: {^RandomString: {length: 24, alphabet: "0123456789ABCDEF"}}} - $where: {^FormatString: {format: "function() { sleep(%2.d); return true; }", withArgs: [{^Choose: {from: [0, 60], weights: [9, 1]}}]}} + - OperationName: findOne + OperationCommand: + Filter: + roid: {^ObjectId: {^RandomString: {length: 24, alphabet: "0123456789ABCDEF"}}} + $where: {^FormatString: {format: "function() { sleep(%2.d); return true; }", withArgs: [{^Choose: {from: [0, 60], weights: [9, 1]}}]}} - Repeat: 1 diff --git a/src/workloads/docs/HelloWorld-ActorTemplate.yml b/src/workloads/docs/HelloWorld-ActorTemplate.yml index 9be851aabd..12fd678e6a 100644 --- a/src/workloads/docs/HelloWorld-ActorTemplate.yml +++ b/src/workloads/docs/HelloWorld-ActorTemplate.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload shows off the actor template utility, which can be used to create a general + actor template which can then be instantiated with parameters substituted. ActorTemplates: - TemplateName: HelloTemplate @@ -8,19 +11,19 @@ ActorTemplates: Type: HelloWorld Threads: {^Parameter: {Name: "Threads", Default: 1}} Phases: - - Message: Hello Phase 0 🐳 - Duration: 50 milliseconds - - Message: Hello Phase 1 👬 - Repeat: 100 - # Actor templates work just fine with external phase configs, but it's recommended - # to avoid when possible. At that point writing a new actor may be simpler. - # You can always use `genny evaluate` on a workload yaml to see what it gets - # preprocessed into. - - LoadConfig: - Path: ../../phases/HelloWorld/ExamplePhase2.yml - Key: UseMe # Only load the YAML structure from this top-level key. - Parameters: - Repeat: 2 + - Message: Hello Phase 0 🐳 + Duration: 50 milliseconds + - Message: Hello Phase 1 👬 + Repeat: 100 + # Actor templates work just fine with external phase configs, but it's recommended + # to avoid when possible. At that point writing a new actor may be simpler. + # You can always use `genny evaluate` on a workload yaml to see what it gets + # preprocessed into. + - LoadConfig: + Path: ../../phases/HelloWorld/ExamplePhase2.yml + Key: UseMe # Only load the YAML structure from this top-level key. + Parameters: + Repeat: 2 Actors: @@ -40,13 +43,12 @@ Actors: Type: HelloWorld Threads: {^Parameter: {Name: "Threads", Default: 1}} Phases: - - Message: Hello Phase 0 🐳 - Duration: 50 milliseconds - - Message: Hello Phase 1 👬 - Repeat: 100 - - LoadConfig: - Path: ../../phases/HelloWorld/ExamplePhase2.yml - Key: UseMe # Only load the YAML structure from this top-level key. - Parameters: - Repeat: 2 - + - Message: Hello Phase 0 🐳 + Duration: 50 milliseconds + - Message: Hello Phase 1 👬 + Repeat: 100 + - LoadConfig: + Path: ../../phases/HelloWorld/ExamplePhase2.yml + Key: UseMe # Only load the YAML structure from this top-level key. + Parameters: + Repeat: 2 diff --git a/src/workloads/docs/HelloWorld-LoadConfig.yml b/src/workloads/docs/HelloWorld-LoadConfig.yml index ef07a18c47..2c2e5e5923 100644 --- a/src/workloads/docs/HelloWorld-LoadConfig.yml +++ b/src/workloads/docs/HelloWorld-LoadConfig.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 Owner: "@10gen/dev-prod-tips" +Description: | + This workload demonstrates the general workload substitution utility. You can use "LoadConfig" + to load anything, even other workloads. LoadConfig: Path: "./HelloWorld-ActorTemplate.yml" diff --git a/src/workloads/docs/HelloWorld-MultiplePhases.yml b/src/workloads/docs/HelloWorld-MultiplePhases.yml index 6601a2a6f8..c53e32e292 100644 --- a/src/workloads/docs/HelloWorld-MultiplePhases.yml +++ b/src/workloads/docs/HelloWorld-MultiplePhases.yml @@ -1,5 +1,5 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" Description: | diff --git a/src/workloads/docs/HelloWorld.yml b/src/workloads/docs/HelloWorld.yml index 97131ecae5..2991ab5be2 100644 --- a/src/workloads/docs/HelloWorld.yml +++ b/src/workloads/docs/HelloWorld.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This is an introductory workload that shows how to write a workload in Genny. + This workload writes a few messages to the screen. Actors: - Name: HelloWorld @@ -24,12 +27,12 @@ Actors: Parameters: Repeat: 2 - # As an alternate phase syntax, use the following for an actor that - # runs a total of 3 phases, active in phases named 0 and 2 and Nop for the rest: - # Phases: - # OnlyActiveInPhases: - # Active: [0, 2] - # NopInPhasesUpTo: 2 - # PhaseConfig: - # Message: Alternate Phase 1 - # Repeat: 100 +# As an alternate phase syntax, use the following for an actor that +# runs a total of 3 phases, active in phases named 0 and 2 and Nop for the rest: +# Phases: +# OnlyActiveInPhases: +# Active: [0, 2] +# NopInPhasesUpTo: 2 +# PhaseConfig: +# Message: Alternate Phase 1 +# Repeat: 100 diff --git a/src/workloads/docs/HotCollectionWriter.yml b/src/workloads/docs/HotCollectionWriter.yml index 55043508b2..fa8464c8c8 100644 --- a/src/workloads/docs/HotCollectionWriter.yml +++ b/src/workloads/docs/HotCollectionWriter.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -35,5 +37,5 @@ Actors: OperationCommand: WriteOperations: - WriteCommand: insertOne - Document: { a : {^RandomInt: {min: 0, max: 1000}}} + Document: {a: {^RandomInt: {min: 0, max: 1000}}} GlobalRate: 1000 per 1 second diff --git a/src/workloads/docs/HotDocumentWriter.yml b/src/workloads/docs/HotDocumentWriter.yml index 816580194b..c1006962f1 100644 --- a/src/workloads/docs/HotDocumentWriter.yml +++ b/src/workloads/docs/HotDocumentWriter.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -35,6 +37,6 @@ Actors: OperationCommand: WriteOperations: - WriteCommand: updateOne - Filter: {first : first} + Filter: {first: first} Update: {$set: {second: {^RandomInt: {min: 0, max: 1000}}}} GlobalRate: 1000 per 1 second diff --git a/src/workloads/docs/InsertWithNop.yml b/src/workloads/docs/InsertWithNop.yml index 94d1d37782..e2d4e9f5ec 100644 --- a/src/workloads/docs/InsertWithNop.yml +++ b/src/workloads/docs/InsertWithNop.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 # Demonstrate the InsertRemove actor. The InsertRemove actor is a simple actor that inserts and then # removes the same document from a collection in a loop. Each instance of the actor uses a different diff --git a/src/workloads/docs/Loader.yml b/src/workloads/docs/Loader.yml index ca62dd0ca6..f53f5964bf 100644 --- a/src/workloads/docs/Loader.yml +++ b/src/workloads/docs/Loader.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 # More threads requires a larger connection pool. Clients: @@ -36,134 +38,134 @@ Clients: # Actors: - - Name: MultipleCollectionsPerLoaderThread - Type: Loader +- Name: MultipleCollectionsPerLoaderThread + Type: Loader + Threads: 10 + Phases: + - Repeat: 1 + # create 10 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # 1. Thread 0 creates and loads Collection0 + # 2. Thread 1 creates and loads Collection1 + # Thread 1 creates collection 6 to 11 + # etc. + Database: OneLoaderThreadPerCollection + CollectionCount: 10 Threads: 10 - Phases: - - Repeat: 1 - # create 10 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # 1. Thread 0 creates and loads Collection0 - # 2. Thread 1 creates and loads Collection1 - # Thread 1 creates collection 6 to 11 - # etc. - Database: OneLoaderThreadPerCollection + DocumentCount: &DocumentCount 200000 + BatchSize: &BatchSize 100000 + Document: + a: {^RandomString: { length: 100 }} + FindOptions: + Hint: a_index # Currently only support the index name. + Comment: "Phase 1 loader" + # Each thread creates the indexes for each collection it manages. + Indexes: + - keys: {a: 1} + options: {name: "a_index"} + - Repeat: 1 + # create 60 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # * Thread 0 creates collection 0 to 5 + # * Thread 1 creates collection 6 to 11 + # etc. + Database: SixCollectionsPerThread + CollectionCount: 6 + Threads: 1 + DocumentCount: *DocumentCount + BatchSize: *BatchSize + Document: + a: {^RandomString: { length: 100 }} + FindOptions: + Hint: a_index # Currently only support the index name. + Comment: "Phase 1 loader" + # Each thread creates the indexes for each collection it manages. + Indexes: + - keys: {a: 1} + options: {name: "a_index"} + - Repeat: 1 + # create 30 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # * Thread 0 creates collection 0 to 2 + # * Thread 1 creates collection 3 to 5 + # etc. + Database: ThreeCollectionsPerThread + CollectionCount: 6 + Threads: 2 + DocumentCount: *DocumentCount + BatchSize: *BatchSize + Document: + a: {^RandomString: { length: 100 }} + - Repeat: 1 + # create 10 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # * Thread 0 creates collection 0 to 2 + # * Thread 1 creates collection 3 to 5 + # etc. + Database: AlsoOneLoaderThreadPerCollection + CollectionCount: 6 + Threads: 6 + DocumentCount: *DocumentCount + BatchSize: *BatchSize + Document: + a: {^RandomString: { length: 100 }} + - Repeat: 1 + # create 20 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # * Thread 0 creates collection 0 to 1 + # * Thread 1 creates collection 2 to 3 + # etc. + Database: TwoCollectionsPerThread + CollectionCount: 17 + Threads: 6 + DocumentCount: *DocumentCount + BatchSize: *BatchSize + Document: + a: {^RandomString: { length: 100 }} + - Repeat: 1 + # create 10 collection (Phase.CollectionCount / Phase.Threads) * Actor.Threads. + # * Thread 0 creates collection 0 to 2 + # * Thread 1 creates collection 3 to 5 + # etc. + Database: SixCollectionsSingleThread + CollectionCount: 6 + Threads: 8 # Any value between 7 (Phase.CollectionCount + 1) and Actor.Threads (10) + DocumentCount: *DocumentCount + BatchSize: *BatchSize + Document: + a: {^RandomString: { length: 100 }} + - Repeat: 1 + # create 0 collection (Phase.CollectionCount / Phase.Threads) * Actor.Threads. + Database: ZeroCollections + CollectionCount: 1 + Threads: 20 # Any value greater than Actor.Threads (10) and Phase.CollectionCount (1) + DocumentCount: *DocumentCount + BatchSize: 100000 + Document: + a: {^RandomString: { length: 100 }} +- Name: MultipleLoadThreadsPerCollection + Type: Loader + Threads: 100 + Phases: + # Run MultiThreadedLoader every even Phase number. + OnlyActiveInPhases: + Active: [0, 2, 4] + NopInPhasesUpTo: 5 + PhaseConfig: + Repeat: 1 + # create 10 (Phase.CollectionCount) collection populated by 100 (Actor.Threads) threads. Each + # collection will be written to by 10 (Actor.Threads / Phase.CollectionCount) threads. + # A single thread for each collection will create any indexes. + Database: MultiThreaded + # Phase.Threads cannot be set if MultipleThreadsPerCollection is true. + # CollectionCount must be an even divisor of Threads. + # Create 10 collections and populate with 10 threads per collection. + MultipleThreadsPerCollection: true CollectionCount: 10 - Threads: 10 - DocumentCount: &DocumentCount 200000 - BatchSize: &BatchSize 100000 + # DocumentCount is the total document count. Each thread populate an equal fraction of the count. + DocumentCount: 200001 + BatchSize: 100 Document: a: {^RandomString: { length: 100 }} FindOptions: - Hint: a_index # Currently only support the index name. + Hint: a_index # Currently only support the index name. Comment: "Phase 1 loader" - # Each thread creates the indexes for each collection it manages. + # Only one thread will create the indexes (for each collection). Indexes: - - keys: {a: 1} - options: {name: "a_index"} - - Repeat: 1 - # create 60 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # * Thread 0 creates collection 0 to 5 - # * Thread 1 creates collection 6 to 11 - # etc. - Database: SixCollectionsPerThread - CollectionCount: 6 - Threads: 1 - DocumentCount: *DocumentCount - BatchSize: *BatchSize - Document: - a: {^RandomString: { length: 100 }} - FindOptions: - Hint: a_index # Currently only support the index name. - Comment: "Phase 1 loader" - # Each thread creates the indexes for each collection it manages. - Indexes: - - keys: {a: 1} - options: {name: "a_index"} - - Repeat: 1 - # create 30 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # * Thread 0 creates collection 0 to 2 - # * Thread 1 creates collection 3 to 5 - # etc. - Database: ThreeCollectionsPerThread - CollectionCount: 6 - Threads: 2 - DocumentCount: *DocumentCount - BatchSize: *BatchSize - Document: - a: {^RandomString: { length: 100 }} - - Repeat: 1 - # create 10 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # * Thread 0 creates collection 0 to 2 - # * Thread 1 creates collection 3 to 5 - # etc. - Database: AlsoOneLoaderThreadPerCollection - CollectionCount: 6 - Threads: 6 - DocumentCount: *DocumentCount - BatchSize: *BatchSize - Document: - a: {^RandomString: { length: 100 }} - - Repeat: 1 - # create 20 collection math.floor(Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # * Thread 0 creates collection 0 to 1 - # * Thread 1 creates collection 2 to 3 - # etc. - Database: TwoCollectionsPerThread - CollectionCount: 17 - Threads: 6 - DocumentCount: *DocumentCount - BatchSize: *BatchSize - Document: - a: {^RandomString: { length: 100 }} - - Repeat: 1 - # create 10 collection (Phase.CollectionCount / Phase.Threads) * Actor.Threads. - # * Thread 0 creates collection 0 to 2 - # * Thread 1 creates collection 3 to 5 - # etc. - Database: SixCollectionsSingleThread - CollectionCount: 6 - Threads: 8 # Any value between 7 (Phase.CollectionCount + 1) and Actor.Threads (10) - DocumentCount: *DocumentCount - BatchSize: *BatchSize - Document: - a: {^RandomString: { length: 100 }} - - Repeat: 1 - # create 0 collection (Phase.CollectionCount / Phase.Threads) * Actor.Threads. - Database: ZeroCollections - CollectionCount: 1 - Threads: 20 # Any value greater than Actor.Threads (10) and Phase.CollectionCount (1) - DocumentCount: *DocumentCount - BatchSize: 100000 - Document: - a: {^RandomString: { length: 100 }} - - Name: MultipleLoadThreadsPerCollection - Type: Loader - Threads: 100 - Phases: - # Run MultiThreadedLoader every even Phase number. - OnlyActiveInPhases: - Active: [0, 2, 4] - NopInPhasesUpTo: 5 - PhaseConfig: - Repeat: 1 - # create 10 (Phase.CollectionCount) collection populated by 100 (Actor.Threads) threads. Each - # collection will be written to by 10 (Actor.Threads / Phase.CollectionCount) threads. - # A single thread for each collection will create any indexes. - Database: MultiThreaded - # Phase.Threads cannot be set if MultipleThreadsPerCollection is true. - # CollectionCount must be an even divisor of Threads. - # Create 10 collections and populate with 10 threads per collection. - MultipleThreadsPerCollection: true - CollectionCount: 10 - # DocumentCount is the total document count. Each thread populate an equal fraction of the count. - DocumentCount: 200001 - BatchSize: 100 - Document: - a: {^RandomString: { length: 100 }} - FindOptions: - Hint: a_index # Currently only support the index name. - Comment: "Phase 1 loader" - # Only one thread will create the indexes (for each collection). - Indexes: - - keys: {a: 1} - options: {name: "a_index"} + - keys: {a: 1} + options: {name: "a_index"} diff --git a/src/workloads/docs/LoggingActorExample.yml b/src/workloads/docs/LoggingActorExample.yml index 51f26fbdae..3ca80c4f83 100644 --- a/src/workloads/docs/LoggingActorExample.yml +++ b/src/workloads/docs/LoggingActorExample.yml @@ -1,7 +1,6 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" - -Description: +Owner: "@10gen/dev-prod-tips" +Description: | The LoggingActor exists so DSI and Evergreen don't quit your workload for not outputting anything to stdout. (They do this in case your workload has timed-out.) If @@ -26,8 +25,8 @@ Description: Actors: - Name: LoggingActor Type: LoggingActor - Threads: 1 # must be 1 + Threads: 1 # must be 1 Phases: - Phase: 0 - LogEvery: 15 minutes # TimeSpec - Blocking: None # must be Blocking:None + LogEvery: 15 minutes # TimeSpec + Blocking: None # must be Blocking:None diff --git a/src/workloads/docs/LongLivedCreator.yml b/src/workloads/docs/LongLivedCreator.yml index ed1b85e408..d43ba8206a 100644 --- a/src/workloads/docs/LongLivedCreator.yml +++ b/src/workloads/docs/LongLivedCreator.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -40,4 +42,3 @@ Actors: - keys: {x7: 1} - keys: {x8: 1} - {Nop: true} - diff --git a/src/workloads/docs/LongLivedReader.yml b/src/workloads/docs/LongLivedReader.yml index a05e8a2a7e..2a8084eb32 100644 --- a/src/workloads/docs/LongLivedReader.yml +++ b/src/workloads/docs/LongLivedReader.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -40,7 +42,7 @@ Actors: - keys: {x7: 1} - keys: {x8: 1} - {Nop: true} - + - Name: LongLivedIndexReader Type: MultiCollectionQuery Threads: 100 diff --git a/src/workloads/docs/LongLivedWriter.yml b/src/workloads/docs/LongLivedWriter.yml index e885a752ce..9755349166 100644 --- a/src/workloads/docs/LongLivedWriter.yml +++ b/src/workloads/docs/LongLivedWriter.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -40,7 +42,7 @@ Actors: - keys: {x7: 1} - keys: {x8: 1} - {Nop: true} - + - Name: LongLivedWriter Type: MultiCollectionUpdate Threads: 100 diff --git a/src/workloads/docs/MonotonicSingleLoader.yml b/src/workloads/docs/MonotonicSingleLoader.yml index a97005bbb5..d33b8e06cd 100644 --- a/src/workloads/docs/MonotonicSingleLoader.yml +++ b/src/workloads/docs/MonotonicSingleLoader.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/sharding" +Description: | + TODO: TIG-3320 Actors: - Name: LoadInitialData diff --git a/src/workloads/docs/MoveRandomChunkToRandomShard.yml b/src/workloads/docs/MoveRandomChunkToRandomShard.yml index 34b4a25e9c..bf3492a0ed 100644 --- a/src/workloads/docs/MoveRandomChunkToRandomShard.yml +++ b/src/workloads/docs/MoveRandomChunkToRandomShard.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/sharding" +Description: | + TODO: TIG-3320 + Actors: - Name: CreateShardedCollection Type: AdminCommand diff --git a/src/workloads/docs/ParallelInsert.yml b/src/workloads/docs/ParallelInsert.yml index 1dae07a975..51fbb5b40c 100644 --- a/src/workloads/docs/ParallelInsert.yml +++ b/src/workloads/docs/ParallelInsert.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/repl" +Description: | + TODO: TIG-3321 Clients: Default: @@ -181,13 +183,13 @@ Actors: - *DropCollection AutoRun: - - When: - mongodb_setup: - $eq: - - replica - branch_name: - $neq: - - v4.0 - ThenRun: - - mongodb_setup: replica-delay-mixed - - mongodb_setup: replica +- When: + mongodb_setup: + $eq: + - replica + branch_name: + $neq: + - v4.0 + ThenRun: + - mongodb_setup: replica-delay-mixed + - mongodb_setup: replica diff --git a/src/workloads/docs/QuiesceActor.yml b/src/workloads/docs/QuiesceActor.yml index 7c78592e27..7f54e645ae 100644 --- a/src/workloads/docs/QuiesceActor.yml +++ b/src/workloads/docs/QuiesceActor.yml @@ -1,7 +1,10 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload demonstrates the quiesce actor, used to ensure stable + database state and reduce noise. -# Note: This actor is effectively in beta mode. We expect it to work, but +# Note: This actor is effectively in beta mode. We expect it to work, but # it hasn't been used extensively in production. Please let STM know of any # use so we can help monitor its effectiveness. Actors: diff --git a/src/workloads/docs/RandomSampler.yml b/src/workloads/docs/RandomSampler.yml index e04b48be89..453ce4baa3 100644 --- a/src/workloads/docs/RandomSampler.yml +++ b/src/workloads/docs/RandomSampler.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: diff --git a/src/workloads/docs/RollingCollections.yml b/src/workloads/docs/RollingCollections.yml index 50a8253a96..47ff805afc 100644 --- a/src/workloads/docs/RollingCollections.yml +++ b/src/workloads/docs/RollingCollections.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 + Actors: - Name: Setup Type: RollingCollections @@ -12,7 +15,7 @@ Actors: Phases: - Repeat: 1 CollectionWindowSize: 10 - Document: {a: {^RandomString: {length: 30}}, z : {^RandomInt: {min: 100, max: 200}}} + Document: {a: {^RandomString: {length: 30}}, z: {^RandomInt: {min: 100, max: 200}}} DocumentCount: 1000 Indexes: - keys: {a: 1} @@ -54,7 +57,7 @@ Actors: Phases: - {Nop: true} - Duration: *Duration - Document: {a: {^RandomString: {length: 30}}, z : {^RandomInt: {min: 100, max: 200}}} + Document: {a: {^RandomString: {length: 30}}, z: {^RandomInt: {min: 100, max: 200}}} - Name: Reader Type: RollingCollections @@ -87,5 +90,5 @@ Actors: Threads: 1 Operation: OplogTailer Phases: - - {Nop : true} + - {Nop: true} - Duration: *Duration diff --git a/src/workloads/docs/RunCommand-Simple.yml b/src/workloads/docs/RunCommand-Simple.yml index c6dcf9cad7..3a23b99b3c 100644 --- a/src/workloads/docs/RunCommand-Simple.yml +++ b/src/workloads/docs/RunCommand-Simple.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload demonstrates the RunCommand actor, which can be used + to execute a command against the server. Actors: - Name: ServerStatusInsertFind @@ -27,6 +30,6 @@ Actors: OperationName: RunCommand AutoRun: - - When: - mongodb_setup: - $eq: standalone-dsi-integration-test +- When: + mongodb_setup: + $eq: standalone-dsi-integration-test diff --git a/src/workloads/docs/RunCommand.yml b/src/workloads/docs/RunCommand.yml index 0d2a19fbb6..adfd5a3711 100644 --- a/src/workloads/docs/RunCommand.yml +++ b/src/workloads/docs/RunCommand.yml @@ -1,5 +1,8 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" +Description: | + This workload demonstrates the RunCommand and AdminCommand actors, which can be used to + run commands against a target server. Actors: - Name: ServerStatusInsertFind diff --git a/src/workloads/execution/BackgroundValidateCmd.yml b/src/workloads/execution/BackgroundValidateCmd.yml index abc114183d..e3e95d4629 100644 --- a/src/workloads/execution/BackgroundValidateCmd.yml +++ b/src/workloads/execution/BackgroundValidateCmd.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Actors: - Name: InsertData @@ -31,7 +33,7 @@ Actors: - keys: {x: 1, y: 1} - keys: {x: 1, y: 1, b: 1} - keys: {arr: 1, z: 1} - - keys: {arr: 1, b : 1} + - keys: {arr: 1, b: 1} - keys: {a: "text"} - keys: {a: 1} options: {sparse: true} @@ -66,7 +68,7 @@ Actors: Filter: {y: {^RandomInt: {min: -1000, max: 1000}}} - *Nop - *Nop - + - Name: CrudWithValidation Type: CrudActor Database: *db @@ -117,10 +119,10 @@ Actors: background: true AutoRun: - - When: - mongodb_setup: - $eq: standalone - branch_name: - $neq: - - v4.0 - - v4.2 +- When: + mongodb_setup: + $eq: standalone + branch_name: + $neq: + - v4.0 + - v4.2 diff --git a/src/workloads/execution/CreateBigIndex.yml b/src/workloads/execution/CreateBigIndex.yml index edcd1725eb..b97e162d66 100644 --- a/src/workloads/execution/CreateBigIndex.yml +++ b/src/workloads/execution/CreateBigIndex.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/server-execution" +Description: | + TODO: TIG-3322 Actors: @@ -53,7 +55,7 @@ Actors: Phases: - *Nop - *Nop - # Build an index on an integer field. + # Build an index on an integer field. - Repeat: 5 Database: *db Operations: @@ -70,7 +72,7 @@ Actors: OperationCommand: dropIndexes: Collection0 index: random_int - # Build an index on a string field. + # Build an index on a string field. - Repeat: 5 Database: *db Operations: @@ -89,11 +91,11 @@ Actors: index: random_string AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-all-feature-flags - - single-replica - - standalone +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-all-feature-flags + - single-replica + - standalone diff --git a/src/workloads/execution/CreateIndex.yml b/src/workloads/execution/CreateIndex.yml index df2c8e42fd..32d5ac8903 100644 --- a/src/workloads/execution/CreateIndex.yml +++ b/src/workloads/execution/CreateIndex.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/server-execution" +Description: | + TODO: TIG-3322 Actors: - Name: InsertData @@ -59,13 +61,13 @@ Actors: Repeat: 5 AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - single-replica - - standalone - branch_name: - $neq: - - v4.0 +- When: + mongodb_setup: + $eq: + - atlas + - replica + - single-replica + - standalone + branch_name: + $neq: + - v4.0 diff --git a/src/workloads/execution/CreateIndexSharded.yml b/src/workloads/execution/CreateIndexSharded.yml index d005ef3095..ab76331a7b 100644 --- a/src/workloads/execution/CreateIndexSharded.yml +++ b/src/workloads/execution/CreateIndexSharded.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/sharding" +Description: | + TODO: TIG-3320 Actors: - Name: EnableSharding @@ -33,7 +35,7 @@ Actors: - OperationMetricsName: ShardCollection OperationName: AdminCommand OperationCommand: - shardCollection: test.Collection0 # Collection0 is the default collection populated by the Loader. + shardCollection: test.Collection0 # Collection0 is the default collection populated by the Loader. key: _id: hashed - *Nop @@ -101,11 +103,11 @@ Actors: Duration: 5 minutes AutoRun: - - When: - mongodb_setup: - $eq: - - shard - - shard-lite - branch_name: - $neq: - - v4.0 +- When: + mongodb_setup: + $eq: + - shard + - shard-lite + branch_name: + $neq: + - v4.0 diff --git a/src/workloads/execution/CumulativeWindows.yml b/src/workloads/execution/CumulativeWindows.yml index 4805b1a575..151c9ea3aa 100644 --- a/src/workloads/execution/CumulativeWindows.yml +++ b/src/workloads/execution/CumulativeWindows.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: - Name: InsertData @@ -18,7 +20,7 @@ Actors: t: {^RandomDate: {min: "2020-01-01", max: "2021-01-01"}} x: {^RandomDouble: {distribution: normal, mean: 0, sigma: 3}} y: {^RandomDouble: {distribution: normal, mean: 1, sigma: 3}} - z: {^RandomString: {length: 1000}} # Unused field + z: {^RandomString: {length: 1000}} # Unused field - Nop: true - Nop: true @@ -48,116 +50,116 @@ Actors: - OperationMetricsName: Sum OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - sum: { - $sum: "$x", - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + sum: { + $sum: "$x", + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} - OperationMetricsName: Avg OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - avg: { - $avg: "$x", - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + avg: { + $avg: "$x", + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} - OperationMetricsName: StdDevPop OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - stdDevPop: { - $stdDevPop: "$x", - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + stdDevPop: { + $stdDevPop: "$x", + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} - OperationMetricsName: StdDevSamp OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - stdDevSamp: { - $stdDevSamp: "$x", - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + stdDevSamp: { + $stdDevSamp: "$x", + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} - OperationMetricsName: CovPop OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - covariancePop: { - $covariancePop: ["$x", "$y"], - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + covariancePop: { + $covariancePop: ["$x", "$y"], + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} - OperationMetricsName: CovSamp OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{ - $setWindowFields: { - sortBy: {t: 1}, - output: { - covarianceSamp: { - $covarianceSamp: ["$x", "$y"], - window: {documents: ["unbounded", "current"]} - } + aggregate: Collection0 + pipeline: + [{ + $setWindowFields: { + sortBy: {t: 1}, + output: { + covarianceSamp: { + $covarianceSamp: ["$x", "$y"], + window: {documents: ["unbounded", "current"]} } } - }] - cursor: {batchSize: *batchSize} + } + }] + cursor: {batchSize: *batchSize} AutoRun: - - When: - mongodb_setup: - $eq: - - standalone - - replica - - replica-all-feature-flags - - shard-lite - branch_name: - $neq: - - v4.0 - - v4.2 - - v4.4 +- When: + mongodb_setup: + $eq: + - standalone + - replica + - replica-all-feature-flags + - shard-lite + branch_name: + $neq: + - v4.0 + - v4.2 + - v4.4 diff --git a/src/workloads/execution/ExpressiveQueries.yml b/src/workloads/execution/ExpressiveQueries.yml index 79aea95f18..649147b390 100644 --- a/src/workloads/execution/ExpressiveQueries.yml +++ b/src/workloads/execution/ExpressiveQueries.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: @@ -21,7 +23,7 @@ Actors: - Repeat: 1 Database: &DB test Threads: 1 - CollectionCount: 1 # Collection name will be Collection0, this is not configurable. + CollectionCount: 1 # Collection name will be Collection0, this is not configurable. DocumentCount: 1e6 BatchSize: 1000 Document: @@ -167,7 +169,7 @@ Actors: projection: &projection {_id: 0, firstname: 1, lastname: 1, dob: 1} - *Nop - *Nop - - Repeat: 1000 # This query is very fast, so we need to run it more times than others. + - Repeat: 1000 # This query is very fast, so we need to run it more times than others. Duration: 5 minutes Database: *DB Operations: @@ -179,13 +181,13 @@ Actors: projection: *projection AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - single-replica - - standalone - branch_name: - $neq: - - v4.0 +- When: + mongodb_setup: + $eq: + - atlas + - replica + - single-replica + - standalone + branch_name: + $neq: + - v4.0 diff --git a/src/workloads/execution/ExternalSort.yml b/src/workloads/execution/ExternalSort.yml index 4c3860c705..47d458fedc 100644 --- a/src/workloads/execution/ExternalSort.yml +++ b/src/workloads/execution/ExternalSort.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: - Name: InsertData @@ -98,10 +100,10 @@ Actors: executionStats AutoRun: - - When: - mongodb_setup: - $eq: standalone - branch_name: - $neq: - - v4.0 - - v4.2 +- When: + mongodb_setup: + $eq: standalone + branch_name: + $neq: + - v4.0 + - v4.2 diff --git a/src/workloads/execution/LookupInUnshardedEnvironment.yml b/src/workloads/execution/LookupInUnshardedEnvironment.yml index 5d8dfd56f3..a85c212476 100644 --- a/src/workloads/execution/LookupInUnshardedEnvironment.yml +++ b/src/workloads/execution/LookupInUnshardedEnvironment.yml @@ -65,9 +65,9 @@ Actors: } }] cursor: {batchSize: *NumDocs} - + AutoRun: - - When: - mongodb_setup: - $eq: - - replica +- When: + mongodb_setup: + $eq: + - replica diff --git a/src/workloads/execution/PipelineUpdate.yml b/src/workloads/execution/PipelineUpdate.yml index 8d4634a88b..e33187440f 100644 --- a/src/workloads/execution/PipelineUpdate.yml +++ b/src/workloads/execution/PipelineUpdate.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: - Name: InsertData @@ -237,13 +239,13 @@ Actors: writeConcern: {w: majority} AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-all-feature-flags - - standalone - branch_name: - $neq: - - v4.0 +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-all-feature-flags + - standalone + branch_name: + $neq: + - v4.0 diff --git a/src/workloads/execution/SetWindowFieldsUnbounded.yml b/src/workloads/execution/SetWindowFieldsUnbounded.yml index 5d3d4b9cbe..c520740861 100644 --- a/src/workloads/execution/SetWindowFieldsUnbounded.yml +++ b/src/workloads/execution/SetWindowFieldsUnbounded.yml @@ -179,11 +179,11 @@ Actors: allowDiskUse: true AutoRun: - - When: - mongodb_setup: - $eq: standalone - branch_name: - $neq: - - v4.0 - - v4.2 - - v4.4 +- When: + mongodb_setup: + $eq: standalone + branch_name: + $neq: + - v4.0 + - v4.2 + - v4.4 diff --git a/src/workloads/execution/ShardedGraphLookup.yml b/src/workloads/execution/ShardedGraphLookup.yml index 7eeb8baed4..3469af529a 100644 --- a/src/workloads/execution/ShardedGraphLookup.yml +++ b/src/workloads/execution/ShardedGraphLookup.yml @@ -12,7 +12,7 @@ Description: | GlobalDefaults: NumDocsBase: &num_docs_base 95 NumDocsAdded: &num_docs_added 2905 - NumDocsTotal: &num_docs_total 3000 # Should be *num_docs_base + *num_docs_added + NumDocsTotal: &num_docs_total 3000 # Should be *num_docs_base + *num_docs_added Actors: - Name: CreateShardedCollections @@ -55,16 +55,16 @@ Actors: Threads: 1 Phases: - *Nop - # We want the size of 'Collection2' to be smaller than the other two collections for the queries - # where 'maxDepth' is not specified, so we first populate all three collections with the number of - # documents we want in 'Collection2' and then in the next phase add more to 'Collection0' and + # We want the size of 'Collection2' to be smaller than the other two collections for the queries + # where 'maxDepth' is not specified, so we first populate all three collections with the number of + # documents we want in 'Collection2' and then in the next phase add more to 'Collection0' and # 'Collection1'. - Repeat: 1 BatchSize: 1000 Threads: 1 DocumentCount: *num_docs_base Database: *Database - CollectionCount: 3 # Loader will populate 'Collection0', 'Collection1', and 'Collection2'. + CollectionCount: 3 # Loader will populate 'Collection0', 'Collection1', and 'Collection2'. Document: a: {^RandomInt: {min: 1, max: 100}} b: {^RandomInt: {min: 1, max: 100}} @@ -74,7 +74,7 @@ Actors: Threads: 1 DocumentCount: *num_docs_added Database: *Database - CollectionCount: 2 # Loader will add additional documents to 'Collection0' and 'Collection1'. + CollectionCount: 2 # Loader will add additional documents to 'Collection0' and 'Collection1'. Document: a: {^RandomInt: {min: 1, max: 100}} b: {^RandomInt: {min: 1, max: 100}} @@ -108,29 +108,29 @@ Actors: - Repeat: 10 Database: *Database Operations: - # The recursive targeted queries below work as follows: for each document X in Collection0 + # The recursive targeted queries below work as follows: for each document X in Collection0 # we find any document Y in Collection0 where X's 'c' field (startWith) is equal to Y's 'b' - # (connectToField). Then, we look at Y's 'a' (connectFromField) value and find any document Z in - # Collection0 that has the same value for its 'b' (connectToField), and so on. - # Because the collection is sharded by 'b' the $graphLookup is targeted towards + # (connectToField). Then, we look at Y's 'a' (connectFromField) value and find any document Z in + # Collection0 that has the same value for its 'b' (connectToField), and so on. + # Because the collection is sharded by 'b' the $graphLookup is targeted towards # specific shard(s). # Similarly, for the recursive untargeted queries below: for each document X in Collection0 - # we find any document Y in Collection0 where X's 'c' value (startWith) is equal to Y's 'a' + # we find any document Y in Collection0 where X's 'c' value (startWith) is equal to Y's 'a' # value (connectToField). Then, we look at Y's 'b' (connectFromField) and find any document Z - # in Collection0 that has the same value for its 'a' value (connectToField), and so on. + # in Collection0 that has the same value for its 'a' value (connectToField), and so on. # Because the collection is sharded by 'b' and 'a' is the connectToField, the $graphLookup # is not targeted to any specific shard. # For the queries where the local collection is unsharded, the same flow as above applies except # we do the process for each document X in Collection1. - - # It should be noted that the results of this test indicate that there is not much of a + + # It should be noted that the results of this test indicate that there is not much of a # time difference between the different depth limited queries. One reason for this could be # that in an attempt to limit the result set size by starting with a field 'c' that has values - # in a smaller range, the cache fills up with most (if not all) relevant documents in only a - # few (relatively) rounds of recursion. This would explain the lack of difference between the - # targeted and untargeted queries, as well as the unsharded and sharded queries since the + # in a smaller range, the cache fills up with most (if not all) relevant documents in only a + # few (relatively) rounds of recursion. This would explain the lack of difference between the + # targeted and untargeted queries, as well as the unsharded and sharded queries since the # unsharded case has a shared cache but the sharded case benefits from parallelism. # One other thing to note is that the results from the no recursion/depth limited cases cannot @@ -145,10 +145,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection0", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches", maxDepth: 0 } @@ -165,10 +165,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection0", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches", maxDepth: 2 } @@ -185,18 +185,18 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "b", - connectToField: "a", - as: "matches" , + from: "Collection0", + startWith: "$c", + connectFromField: "b", + connectToField: "a", + as: "matches", maxDepth: 0 } }] # To get meaningful results, the entire result set should fit in a single batch. This should # be possible since both collections are small. cursor: {batchSize: *num_docs_total} - + # Untargeted $graphLookup from sharded collection to sharded collection, depth limited - OperationMetricsName: UntargetedGraphLookupShardedToShardedDepthLimited OperationName: RunCommand @@ -205,10 +205,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "b", - connectToField: "a", + from: "Collection0", + startWith: "$c", + connectFromField: "b", + connectToField: "a", as: "matches", maxDepth: 2 } @@ -225,10 +225,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection0", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches", maxDepth: 0 } @@ -245,10 +245,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection0", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches", maxDepth: 2 } @@ -265,10 +265,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "b", - connectToField: "a", + from: "Collection0", + startWith: "$c", + connectFromField: "b", + connectToField: "a", as: "matches", maxDepth: 0 } @@ -276,7 +276,7 @@ Actors: # To get meaningful results, the entire result set should fit in a single batch. This should # be possible since both collections are small. cursor: {batchSize: *num_docs_total} - + # Untargeted $graphLookup from unsharded collection to sharded collection, depth limited - OperationMetricsName: UntargetedGraphLookupUnshardedToShardedDepthLimited OperationName: RunCommand @@ -285,10 +285,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection0", - startWith: "$c", - connectFromField: "b", - connectToField: "a", + from: "Collection0", + startWith: "$c", + connectFromField: "b", + connectToField: "a", as: "matches", maxDepth: 2 } @@ -310,17 +310,17 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection2", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection2", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches" } }] # To get meaningful results, the entire result set should fit in a single batch. This should # be possible since both collections are small. cursor: {batchSize: *num_docs_total} - + # Untargeted $graphLookup from sharded collection to sharded collection, depth unlimited - OperationMetricsName: UntargetedGraphLookupShardedToShardedDepthUnlimited OperationName: RunCommand @@ -329,17 +329,17 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection2", - startWith: "$c", - connectFromField: "b", - connectToField: "a", - as: "matches" + from: "Collection2", + startWith: "$c", + connectFromField: "b", + connectToField: "a", + as: "matches" } }] # To get meaningful results, the entire result set should fit in a single batch. This should # be possible since both collections are small. cursor: {batchSize: *num_docs_total} - + # Targeted $graphLookup from unsharded collection to sharded collection, depth unlimited - OperationMetricsName: TargetedGraphLookupUnshardedToShardedDepthUnlimited OperationName: RunCommand @@ -348,17 +348,17 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection2", - startWith: "$c", - connectFromField: "a", - connectToField: "b", + from: "Collection2", + startWith: "$c", + connectFromField: "a", + connectToField: "b", as: "matches" } }] # To get meaningful results, the entire result set should fit in a single batch. This should # be possible since both collections are small. cursor: {batchSize: *num_docs_total} - + # Untargeted $graphLookup from unsharded collection to sharded collection, depth unlimited - OperationMetricsName: UntargetedGraphLookupUnshardedToShardedDepthUnlimited OperationName: RunCommand @@ -367,10 +367,10 @@ Actors: pipeline: [{ $graphLookup: { - from: "Collection2", - startWith: "$c", - connectFromField: "b", - connectToField: "a", + from: "Collection2", + startWith: "$c", + connectFromField: "b", + connectToField: "a", as: "matches" } }] @@ -379,6 +379,6 @@ Actors: cursor: {batchSize: *num_docs_total} AutoRun: - - When: - mongodb_setup: - $eq: shard-lite-all-feature-flags +- When: + mongodb_setup: + $eq: shard-lite-all-feature-flags diff --git a/src/workloads/execution/ShardedLookup.yml b/src/workloads/execution/ShardedLookup.yml index f41af7594f..52acad9cf0 100644 --- a/src/workloads/execution/ShardedLookup.yml +++ b/src/workloads/execution/ShardedLookup.yml @@ -178,6 +178,6 @@ Actors: cursor: {batchSize: *NumDocs} AutoRun: - - When: - mongodb_setup: - $eq: shard-lite-all-feature-flags +- When: + mongodb_setup: + $eq: shard-lite-all-feature-flags diff --git a/src/workloads/execution/SlidingWindows.yml b/src/workloads/execution/SlidingWindows.yml index 8fb3918e1b..e91d7a7b23 100644 --- a/src/workloads/execution/SlidingWindows.yml +++ b/src/workloads/execution/SlidingWindows.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: - Name: InsertData @@ -45,98 +47,98 @@ Actors: - OperationMetricsName: MovingAvgPositionBased OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {avg: {$avg: "$temp", window: {documents: [-5, 5]}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {avg: {$avg: "$temp", window: {documents: [-5, 5]}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: MovingAvgTimeBased OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {avg: {$avg: "$temp", window: {range: [-1, 0], unit: "hour"}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {avg: {$avg: "$temp", window: {range: [-1, 0], unit: "hour"}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: MinPositionBased OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {min: {$min: "$temp", window: {documents: [-5, 5]}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {min: {$min: "$temp", window: {documents: [-5, 5]}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: MinTimeBased OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {min: {$min: "$temp", window: {range: [-1, 0], unit: "hour"}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {min: {$min: "$temp", window: {range: [-1, 0], unit: "hour"}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: ExpMovingAvgSmallN OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {ema: {$expMovingAvg: {input: "$temp", N: 10}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {ema: {$expMovingAvg: {input: "$temp", N: 10}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: ExpMovingAvgLargeN OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {ema: {$expMovingAvg: {input: "$temp", N: 500}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {ema: {$expMovingAvg: {input: "$temp", N: 500}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: DerivativeSmallWindow OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {rate: { - $derivative: {input: "$temp", unit: "second"}, - window: {documents: [-10, 0]}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {rate: { + $derivative: {input: "$temp", unit: "second"}, + window: {documents: [-10, 0]}}}}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: DerivativeLargeWindow OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$setWindowFields: { - partitionBy: "$partitionKey", - sortBy: {time: 1}, - output: {rate: { - $derivative: {input: "$temp", unit: "second"}, - window: {documents: [-500, 0]}}}}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$setWindowFields: { + partitionBy: "$partitionKey", + sortBy: {time: 1}, + output: {rate: { + $derivative: {input: "$temp", unit: "second"}, + window: {documents: [-500, 0]}}}}}] + cursor: {batchSize: *batchSize} AutoRun: - - When: - mongodb_setup: - $eq: - - standalone - - replica - - replica-all-feature-flags - - shard-lite - branch_name: - $neq: - - v4.0 - - v4.2 - - v4.4 +- When: + mongodb_setup: + $eq: + - standalone + - replica + - replica-all-feature-flags + - shard-lite + branch_name: + $neq: + - v4.0 + - v4.2 + - v4.4 diff --git a/src/workloads/execution/UnionWith.yml b/src/workloads/execution/UnionWith.yml index c5ecb70e89..55d2edea9f 100644 --- a/src/workloads/execution/UnionWith.yml +++ b/src/workloads/execution/UnionWith.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/query" +Description: | + TODO: TIG-3324 Actors: - Name: InsertData @@ -103,165 +105,165 @@ Actors: - OperationMetricsName: UnionWithTwoCollCompleteOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [{$unionWith: "Collection0_copy"}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [{$unionWith: "Collection0_copy"}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwoCollHalfOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0_1 - pipeline: [{$unionWith: "Collection0_2"}] - cursor: {batchSize: *batchSize} + aggregate: Collection0_1 + pipeline: [{$unionWith: "Collection0_2"}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwoCollNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [{$unionWith: "Collection1"}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [{$unionWith: "Collection1"}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwoCollSubpipelineCompleteOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$unionWith: {coll: "Collection0_copy", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$unionWith: {coll: "Collection0_copy", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwoCollSubpipelineHalfOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0_1 - pipeline: - [{$unionWith: {coll: "Collection0_2", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0_1 + pipeline: + [{$unionWith: {coll: "Collection0_2", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwoCollSubpipelineNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [{$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [{$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollSequentialHighOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$unionWith: {coll: "Collection0_copy", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection1_copy", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$unionWith: {coll: "Collection0_copy", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection1_copy", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollSequentialPartialOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0_1 - pipeline: - [{$unionWith: {coll: "Collection0_2", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection1_3", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0_1 + pipeline: + [{$unionWith: {coll: "Collection0_2", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection1_3", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollSequentialNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection2", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection2", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollNestedHighOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$unionWith: { - coll: "Collection0_copy", - pipeline: [{$set: {integer: "$integer"}}, - {$unionWith: {coll: "Collection1_copy", - pipeline: [{$set: {integer: "$integer"}}]}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$unionWith: { + coll: "Collection0_copy", + pipeline: [{$set: {integer: "$integer"}}, + {$unionWith: {coll: "Collection1_copy", + pipeline: [{$set: {integer: "$integer"}}]}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollNestedPartialOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0_1 - pipeline: - [{$unionWith: { - coll: "Collection0_2", - pipeline: [{$set: {integer: "$integer"}}, - {$unionWith: {coll: "Collection1_3", - pipeline: [{$set: {integer: "$integer"}}]}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0_1 + pipeline: + [{$unionWith: { + coll: "Collection0_2", + pipeline: [{$set: {integer: "$integer"}}, + {$unionWith: {coll: "Collection1_3", + pipeline: [{$set: {integer: "$integer"}}]}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithThreeCollNestedNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: - [{$unionWith: { - coll: "Collection1", - pipeline: [{$set: {integer: "$integer"}}, - {$unionWith: {coll: "Collection2", - pipeline: [{$set: {integer: "$integer"}}]}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: + [{$unionWith: { + coll: "Collection1", + pipeline: [{$set: {integer: "$integer"}}, + {$unionWith: {coll: "Collection2", + pipeline: [{$set: {integer: "$integer"}}]}}]}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithSingleFollowingStageNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [ - {$unionWith: "Collection1"}, - {$unionWith: "Collection2"}, - {$count: "num_documents"}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [ + {$unionWith: "Collection1"}, + {$unionWith: "Collection2"}, + {$count: "num_documents"}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithMultipleFollowingStagesNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [ - {$unionWith: - {coll: "Collection1", - pipeline: [ - {$match: {"double": {$gte: 2, $lt: 20000000}}}, - {$addFields: {collection: "Collection1"}}, - {$unionWith: - {coll: "Collection2", - pipeline: [{$addFields: {collection: "Collection2"}}]}}]}}, - {$group: - {_id: {x: "$integer"}, - count: {$sum: 1}, - math: {$sum: {$divide: ["$double", "$integer"]}}}}, - {$sort: {math: -1}}, - {$addFields: {newField: "newField"}}, - {$match: {count: {$lt: 5}}}, - {$project: {_id: 0, count: 0, newField: 0}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [ + {$unionWith: + {coll: "Collection1", + pipeline: [ + {$match: {"double": {$gte: 2, $lt: 20000000}}}, + {$addFields: {collection: "Collection1"}}, + {$unionWith: + {coll: "Collection2", + pipeline: [{$addFields: {collection: "Collection2"}}]}}]}}, + {$group: + {_id: {x: "$integer"}, + count: {$sum: 1}, + math: {$sum: {$divide: ["$double", "$integer"]}}}}, + {$sort: {math: -1}}, + {$addFields: {newField: "newField"}}, + {$match: {count: {$lt: 5}}}, + {$project: {_id: 0, count: 0, newField: 0}}] + cursor: {batchSize: *batchSize} - OperationMetricsName: UnionWithTwentyCollSequentialNoOverlap OperationName: RunCommand OperationCommand: - aggregate: Collection0 - pipeline: [ - {$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection2", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection3", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection4", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection5", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection6", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection7", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection8", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection9", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection10", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection11", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection12", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection13", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection14", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection15", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection16", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection17", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection18", pipeline: [{$set: {integer: "$integer"}}]}}, - {$unionWith: {coll: "Collection19", pipeline: [{$set: {integer: "$integer"}}]}}] - cursor: {batchSize: *batchSize} + aggregate: Collection0 + pipeline: [ + {$unionWith: {coll: "Collection1", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection2", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection3", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection4", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection5", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection6", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection7", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection8", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection9", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection10", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection11", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection12", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection13", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection14", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection15", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection16", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection17", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection18", pipeline: [{$set: {integer: "$integer"}}]}}, + {$unionWith: {coll: "Collection19", pipeline: [{$set: {integer: "$integer"}}]}}] + cursor: {batchSize: *batchSize} AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-all-feature-flags - - shard-lite - - standalone - branch_name: - $neq: - - v4.0 - - v4.2 +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-all-feature-flags + - shard-lite + - standalone + branch_name: + $neq: + - v4.0 + - v4.2 diff --git a/src/workloads/execution/UnshardedGraphLookup.yml b/src/workloads/execution/UnshardedGraphLookup.yml index 760649298a..a067530fe8 100644 --- a/src/workloads/execution/UnshardedGraphLookup.yml +++ b/src/workloads/execution/UnshardedGraphLookup.yml @@ -46,7 +46,7 @@ Actors: Threads: 1 DocumentCount: &NumDocs 3000 Database: *Database - CollectionCount: 3 # Loader will populate 'Collection0', 'Collection1', and 'Collection2'. + CollectionCount: 3 # Loader will populate 'Collection0', 'Collection1', and 'Collection2'. Document: a: {^RandomInt: {min: 1, max: 3000}} b: {^RandomInt: {min: 1, max: 3000}} @@ -187,9 +187,8 @@ Actors: cursor: {batchSize: *NumDocs} AutoRun: - - When: - mongodb_setup: - $eq: - - shard-lite - - shard-lite-all-feature-flags - +- When: + mongodb_setup: + $eq: + - shard-lite + - shard-lite-all-feature-flags diff --git a/src/workloads/execution/UnshardedLookup.yml b/src/workloads/execution/UnshardedLookup.yml index 4525a12c2a..aaa8cfc7f3 100644 --- a/src/workloads/execution/UnshardedLookup.yml +++ b/src/workloads/execution/UnshardedLookup.yml @@ -139,8 +139,8 @@ Actors: cursor: {batchSize: *batchSize} AutoRun: - - When: - mongodb_setup: - $eq: - - shard-lite - - shard-lite-all-feature-flags +- When: + mongodb_setup: + $eq: + - shard-lite + - shard-lite-all-feature-flags diff --git a/src/workloads/execution/UnshardedLookupCachedPrefix.yml b/src/workloads/execution/UnshardedLookupCachedPrefix.yml index bf4c04c63c..c698b8d847 100644 --- a/src/workloads/execution/UnshardedLookupCachedPrefix.yml +++ b/src/workloads/execution/UnshardedLookupCachedPrefix.yml @@ -113,10 +113,10 @@ Actors: } }] cursor: {batchSize: *NumDocs} - + AutoRun: - - When: - mongodb_setup: - $eq: - - shard-lite - - shard-lite-all-feature-flags +- When: + mongodb_setup: + $eq: + - shard-lite + - shard-lite-all-feature-flags diff --git a/src/workloads/execution/UserAcquisition.yml b/src/workloads/execution/UserAcquisition.yml index a0adb1af07..ab43bfe9a9 100644 --- a/src/workloads/execution/UserAcquisition.yml +++ b/src/workloads/execution/UserAcquisition.yml @@ -14,172 +14,172 @@ Actors: Type: RunCommand Threads: 1 Phases: - - Phase: 0 # Create a user we can pull from the privilege database + - Phase: 0 # Create a user we can pull from the privilege database Repeat: 1 Database: admin Operations: - # Setup a tree of roles and privileges - # The specific roles and privileges are unimportant - # so long as they are complex, but non-cyclic. - # Note that the yaml parser has some issues - # with empty arrays, but createRole must - # have `roles` and `privileges` present. - # So we use builtin `backup` role and filler privs. - - # Simple leaf roles - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleA' - roles: ['backup'] - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'A' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleB' - roles: ['backup'] - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'B' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleC' - roles: ['backup'] - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'C' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleD' - roles: ['backup'] - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'D' } - actions: ['insert'] - - # First degree inheritors - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleAB' - roles: - - userAcquisitionRoleA - - userAcquisitionRoleB - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'AB' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleBC' - roles: - - userAcquisitionRoleB - - userAcquisitionRoleC - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'BC' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleCD' - roles: - - userAcquisitionRoleC - - userAcquisitionRoleD - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'CD' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleDA' - roles: - - userAcquisitionRoleD - - userAcquisitionRoleA - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'DA' } - actions: ['insert'] - - # 2nd degree inheritors - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleABCD' - roles: - - userAcquisitionRoleAB - - userAcquisitionRoleCD - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'ABCD' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleBCDA' - roles: - - userAcquisitionRoleBC - - userAcquisitionRoleDA - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'BCDA' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleCDAB' - roles: - - userAcquisitionRoleCD - - userAcquisitionRoleAB - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'CDAB' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleDABC' - roles: - - userAcquisitionRoleDA - - userAcquisitionRoleBC - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'DABC' } - actions: ['insert'] - - # Composite of 2nd degree inheritors - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createRole: 'userAcquisitionRoleAll' - roles: - - userAcquisitionRoleABCD - - userAcquisitionRoleBCDA - - userAcquisitionRoleCDAB - - userAcquisitionRoleDABC - privileges: - - resource: { db: 'userAcquisitionDB', collection: 'All' } - actions: ['insert'] - - - OperationName: RunCommand - OperationIsQuiet: true - OperationCommand: - createUser: 'testUserAcquisition' - pwd: 'pwd' - roles: - - { db: 'admin', role: 'userAcquisitionRoleAll' } + # Setup a tree of roles and privileges + # The specific roles and privileges are unimportant + # so long as they are complex, but non-cyclic. + # Note that the yaml parser has some issues + # with empty arrays, but createRole must + # have `roles` and `privileges` present. + # So we use builtin `backup` role and filler privs. + + # Simple leaf roles + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleA' + roles: ['backup'] + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'A' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleB' + roles: ['backup'] + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'B' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleC' + roles: ['backup'] + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'C' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleD' + roles: ['backup'] + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'D' } + actions: ['insert'] + + # First degree inheritors + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleAB' + roles: + - userAcquisitionRoleA + - userAcquisitionRoleB + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'AB' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleBC' + roles: + - userAcquisitionRoleB + - userAcquisitionRoleC + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'BC' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleCD' + roles: + - userAcquisitionRoleC + - userAcquisitionRoleD + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'CD' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleDA' + roles: + - userAcquisitionRoleD + - userAcquisitionRoleA + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'DA' } + actions: ['insert'] + + # 2nd degree inheritors + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleABCD' + roles: + - userAcquisitionRoleAB + - userAcquisitionRoleCD + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'ABCD' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleBCDA' + roles: + - userAcquisitionRoleBC + - userAcquisitionRoleDA + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'BCDA' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleCDAB' + roles: + - userAcquisitionRoleCD + - userAcquisitionRoleAB + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'CDAB' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleDABC' + roles: + - userAcquisitionRoleDA + - userAcquisitionRoleBC + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'DABC' } + actions: ['insert'] + + # Composite of 2nd degree inheritors + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createRole: 'userAcquisitionRoleAll' + roles: + - userAcquisitionRoleABCD + - userAcquisitionRoleBCDA + - userAcquisitionRoleCDAB + - userAcquisitionRoleDABC + privileges: + - resource: { db: 'userAcquisitionDB', collection: 'All' } + actions: ['insert'] + + - OperationName: RunCommand + OperationIsQuiet: true + OperationCommand: + createUser: 'testUserAcquisition' + pwd: 'pwd' + roles: + - { db: 'admin', role: 'userAcquisitionRoleAll' } - Phase: 1 Duration: 5 minutes Database: admin Operations: - # Make sure the user is ejected from the read-through cache. + # Make sure the user is ejected from the read-through cache. - OperationName: RunCommand OperationIsQuiet: true OperationCommand: {invalidateUserCache: 1} diff --git a/src/workloads/execution/ValidateCmd.yml b/src/workloads/execution/ValidateCmd.yml index 7bd180dec9..562be8b499 100644 --- a/src/workloads/execution/ValidateCmd.yml +++ b/src/workloads/execution/ValidateCmd.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/server-execution" +Description: | + TODO: TIG-3322 Actors: - Name: InsertData @@ -33,7 +35,7 @@ Actors: - keys: {x: 1, y: 1} - keys: {x: 1, y: 1, b: 1} - keys: {arr: 1, z: 1} - - keys: {arr: 1, b : 1} + - keys: {arr: 1, b: 1} - keys: {a: "text"} - keys: {a: 1} options: {sparse: true} @@ -56,6 +58,6 @@ Actors: validate: Collection0 AutoRun: - - When: - mongodb_setup: - $eq: standalone +- When: + mongodb_setup: + $eq: standalone diff --git a/src/workloads/issues/ConnectionsBuildup.yml b/src/workloads/issues/ConnectionsBuildup.yml index 2669abfc96..6390ad9fe4 100644 --- a/src/workloads/issues/ConnectionsBuildup.yml +++ b/src/workloads/issues/ConnectionsBuildup.yml @@ -1,6 +1,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" # this workload created to reproduce SERVER-53853 +Description: | + TODO: TIG-3318 Clients: Default: @@ -34,7 +36,7 @@ Actors: - OperationMetricsName: ShardCollection OperationName: AdminCommand OperationCommand: - shardCollection: test.Collection0 # Collection0 is the default collection populated by the Loader. + shardCollection: test.Collection0 # Collection0 is the default collection populated by the Loader. key: _id: 1 - *Nop @@ -83,18 +85,18 @@ Actors: # to avoid connection closing - Name: LoggingActor Type: LoggingActor - Threads: 1 # must be 1 + Threads: 1 # must be 1 Phases: - - LogEvery: 10 second # TimeSpec - Blocking: None # must be Blocking:None - - LogEvery: 10 second # TimeSpec - Blocking: None # must be Blocking:None - - LogEvery: 10 second # TimeSpec - Blocking: None # must be Blocking:None - - LogEvery: 1 minute # TimeSpec - Blocking: None # must be Blocking:None + - LogEvery: 10 second # TimeSpec + Blocking: None # must be Blocking:None + - LogEvery: 10 second # TimeSpec + Blocking: None # must be Blocking:None + - LogEvery: 10 second # TimeSpec + Blocking: None # must be Blocking:None + - LogEvery: 1 minute # TimeSpec + Blocking: None # must be Blocking:None AutoRun: - - When: - mongodb_setup: - $eq: shard-single +- When: + mongodb_setup: + $eq: shard-single diff --git a/src/workloads/networking/CommitLatency.yml b/src/workloads/networking/CommitLatency.yml index 5f2e626d7d..094a9718cf 100644 --- a/src/workloads/networking/CommitLatency.yml +++ b/src/workloads/networking/CommitLatency.yml @@ -4,6 +4,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Actors: - Name: Load phase diff --git a/src/workloads/networking/CommitLatencySingleUpdate.yml b/src/workloads/networking/CommitLatencySingleUpdate.yml index 1b5fb9b073..c0bd1b39ca 100644 --- a/src/workloads/networking/CommitLatencySingleUpdate.yml +++ b/src/workloads/networking/CommitLatencySingleUpdate.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Actors: - Name: SingleThreadUpdate diff --git a/src/workloads/networking/SecondaryAllowed.yml b/src/workloads/networking/SecondaryAllowed.yml index b49df14c3a..0a05eb07e6 100644 --- a/src/workloads/networking/SecondaryAllowed.yml +++ b/src/workloads/networking/SecondaryAllowed.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/service-architecture" +Description: | + TODO: TIG-3323 Clients: Default: diff --git a/src/workloads/networking/ServiceArchitectureWorkloads.yml b/src/workloads/networking/ServiceArchitectureWorkloads.yml index 0b4405f1e0..e621a61605 100644 --- a/src/workloads/networking/ServiceArchitectureWorkloads.yml +++ b/src/workloads/networking/ServiceArchitectureWorkloads.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/service-architecture" +Description: | + TODO: TIG-3323 Clients: Default: @@ -70,10 +72,10 @@ Actors: Nop: true AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-noflowcontrol - - replica-all-feature-flags +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-noflowcontrol + - replica-all-feature-flags diff --git a/src/workloads/networking/TransportLayerConnectTiming.yml b/src/workloads/networking/TransportLayerConnectTiming.yml index 6adf1f0bf4..c331b37cc0 100644 --- a/src/workloads/networking/TransportLayerConnectTiming.yml +++ b/src/workloads/networking/TransportLayerConnectTiming.yml @@ -27,6 +27,6 @@ Actors: replSetTestEgress: 1 AutoRun: - - When: - mongodb_setup: - $eq: replica-auth-cluster-delay +- When: + mongodb_setup: + $eq: replica-auth-cluster-delay diff --git a/src/workloads/scale/AuthNInsert.yml b/src/workloads/scale/AuthNInsert.yml index 8aedce5ac8..0009158ea1 100644 --- a/src/workloads/scale/AuthNInsert.yml +++ b/src/workloads/scale/AuthNInsert.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Clients: Default: diff --git a/src/workloads/scale/BigUpdate.yml b/src/workloads/scale/BigUpdate.yml index d20ebcd8f8..4e16e3bb0e 100644 --- a/src/workloads/scale/BigUpdate.yml +++ b/src/workloads/scale/BigUpdate.yml @@ -7,6 +7,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Clients: Default: @@ -118,13 +120,13 @@ Actors: Limit: 20 AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-noflowcontrol - - replica-1dayhistory-15gbwtcache - - replica-all-feature-flags - - single-replica - - standalone +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-noflowcontrol + - replica-1dayhistory-15gbwtcache + - replica-all-feature-flags + - single-replica + - standalone diff --git a/src/workloads/scale/BigUpdate10k.yml b/src/workloads/scale/BigUpdate10k.yml index a14bfcb7b1..da995ab7d5 100644 --- a/src/workloads/scale/BigUpdate10k.yml +++ b/src/workloads/scale/BigUpdate10k.yml @@ -8,6 +8,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Clients: Default: diff --git a/src/workloads/scale/CollScan.yml b/src/workloads/scale/CollScan.yml index 0366f3efee..abfd503937 100644 --- a/src/workloads/scale/CollScan.yml +++ b/src/workloads/scale/CollScan.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 Actors: diff --git a/src/workloads/scale/InCacheSnapshotReads.yml b/src/workloads/scale/InCacheSnapshotReads.yml index d0498e13aa..ebcb6740e0 100644 --- a/src/workloads/scale/InCacheSnapshotReads.yml +++ b/src/workloads/scale/InCacheSnapshotReads.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Replication +Description: | + TODO: TIG-3321 Clients: Default: @@ -76,6 +78,6 @@ Actors: ScanType: point-in-time AutoRun: - - When: - mongodb_setup: - $eq: replica-1dayhistory-15gbwtcache +- When: + mongodb_setup: + $eq: replica-1dayhistory-15gbwtcache diff --git a/src/workloads/scale/InsertBigDocs.yml b/src/workloads/scale/InsertBigDocs.yml index 305888ad98..6d33413805 100644 --- a/src/workloads/scale/InsertBigDocs.yml +++ b/src/workloads/scale/InsertBigDocs.yml @@ -2,6 +2,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/replication" +Description: | + TODO: TIG-3321 Actors: @@ -20,10 +22,10 @@ Actors: string0: {^FastRandomString: {length: 15000000}} AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-all-feature-flags - - single-replica +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-all-feature-flags + - single-replica diff --git a/src/workloads/scale/InsertRemove.yml b/src/workloads/scale/InsertRemove.yml index d3232133b8..c92853710d 100644 --- a/src/workloads/scale/InsertRemove.yml +++ b/src/workloads/scale/InsertRemove.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 # Demonstrate the InsertRemove actor. The InsertRemove actor is a simple actor that inserts and then # removes the same document from a collection in a loop. Each instance of the actor uses a different @@ -15,13 +17,13 @@ Actors: Database: test AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - single-replica - - standalone - - shard - - replica-1dayhistory-15gbwtcache - - replica-all-feature-flags +- When: + mongodb_setup: + $eq: + - atlas + - replica + - single-replica + - standalone + - shard + - replica-1dayhistory-15gbwtcache + - replica-all-feature-flags diff --git a/src/workloads/scale/LargeIndexedIns.yml b/src/workloads/scale/LargeIndexedIns.yml index 30b330f8f9..3b621f81d6 100644 --- a/src/workloads/scale/LargeIndexedIns.yml +++ b/src/workloads/scale/LargeIndexedIns.yml @@ -28,7 +28,7 @@ Actors: DocumentCount: 1000 BatchSize: 1000 Indexes: - - keys: {hash : 1, key : 1, string1 : 1, string2 : 1} + - keys: {hash: 1, key: 1, string1: 1, string2: 1} - keys: {key: 1} Document: _id: {^Inc: {start: 0}} @@ -74,6 +74,6 @@ Actors: $inc: {a: 1} AutoRun: - - When: - mongodb_setup: - $eq: replica \ No newline at end of file +- When: + mongodb_setup: + $eq: replica diff --git a/src/workloads/scale/LargeScaleLongLived.yml b/src/workloads/scale/LargeScaleLongLived.yml index 2315373788..7cf8d993f1 100644 --- a/src/workloads/scale/LargeScaleLongLived.yml +++ b/src/workloads/scale/LargeScaleLongLived.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -80,13 +82,13 @@ Actors: Update: {$inc: {x1: 1}} AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - replica-all-feature-flags - - single-replica - - single-replica-15gbwtcache - - shard - - standalone +- When: + mongodb_setup: + $eq: + - atlas + - replica + - replica-all-feature-flags + - single-replica + - single-replica-15gbwtcache + - shard + - standalone diff --git a/src/workloads/scale/LargeScaleModel.yml b/src/workloads/scale/LargeScaleModel.yml index d34d58150f..b962cf1132 100644 --- a/src/workloads/scale/LargeScaleModel.yml +++ b/src/workloads/scale/LargeScaleModel.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 # This is the "model" workload for the Large Scale Workload Automation project. # At present, it is set up to run the basic workload, but will evolve. @@ -37,10 +39,10 @@ GlobalDefaults: AllDB: &AllDB hotdoc,hotcoll,rolling,longlived,scancold # According to the design, the number of writes should add up to 10K second. - LongLivedWrites: &LongLivedWrites 600 per 1 second # x5 threads - RollingWrites: &RollingWrites 1000 per 1 second # x40 threads - HotDocumentWrites: &HotDocumentWrites 1000 per 1 second # x40 threads - HotCollectionWrites: &HotCollectionWrites 1000 per 1 second # x40 threads + LongLivedWrites: &LongLivedWrites 600 per 1 second # x5 threads + RollingWrites: &RollingWrites 1000 per 1 second # x40 threads + HotDocumentWrites: &HotDocumentWrites 1000 per 1 second # x40 threads + HotCollectionWrites: &HotCollectionWrites 1000 per 1 second # x40 threads # According to the design, the number of reads should add up to 1K second. LongLivedReads: &LongLivedReads 20 per 1 second # x10 threads @@ -273,9 +275,9 @@ Actors: # can sometimes causes failures. Threads: 5 Phases: - # Each collection is ~100M. Assuming N is equal to ScannerColdDBGigabytes. - # Each thread has N collections, and with 10 threads, that's - # 10*N collections, or N gigabytes of space. + # Each collection is ~100M. Assuming N is equal to ScannerColdDBGigabytes. + # Each thread has N collections, and with 10 threads, that's + # 10*N collections, or N gigabytes of space. - LoadConfig: Path: ../../phases/scale/LargeScalePhases.yml Key: ScannerLoaderCmd diff --git a/src/workloads/scale/LargeScaleParallel.yml b/src/workloads/scale/LargeScaleParallel.yml index 7e7e7dfd16..93ad70959d 100644 --- a/src/workloads/scale/LargeScaleParallel.yml +++ b/src/workloads/scale/LargeScaleParallel.yml @@ -1,4 +1,6 @@ SchemaVersion: 2018-07-01 +Description: | + TODO: TIG-3318 Clients: Default: diff --git a/src/workloads/scale/LargeScaleSerial.yml b/src/workloads/scale/LargeScaleSerial.yml index 2a87134fe0..8e5ac1a7fa 100644 --- a/src/workloads/scale/LargeScaleSerial.yml +++ b/src/workloads/scale/LargeScaleSerial.yml @@ -1,4 +1,7 @@ SchemaVersion: 2018-07-01 +Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: diff --git a/src/workloads/scale/MassDeleteRegression.yml b/src/workloads/scale/MassDeleteRegression.yml index 3702db5902..d8d4727419 100644 --- a/src/workloads/scale/MassDeleteRegression.yml +++ b/src/workloads/scale/MassDeleteRegression.yml @@ -54,6 +54,6 @@ Actors: Filter: {_id: {$lte: *docs}} AutoRun: - - When: - mongodb_setup: - $eq: replica +- When: + mongodb_setup: + $eq: replica diff --git a/src/workloads/scale/MixedWorkloadsGenny.yml b/src/workloads/scale/MixedWorkloadsGenny.yml index 3adb9c45b9..7da04ddab7 100644 --- a/src/workloads/scale/MixedWorkloadsGenny.yml +++ b/src/workloads/scale/MixedWorkloadsGenny.yml @@ -193,7 +193,7 @@ Actors: OnlyActiveInPhase: 2 - ActorFromTemplate: - TemplateName: RemoveTemplate + TemplateName: RemoveTemplate TemplateParameters: Name: Remove_16 Threads: 16 @@ -230,7 +230,7 @@ Actors: OnlyActiveInPhase: 2 - ActorFromTemplate: - TemplateName: InsertTemplate + TemplateName: InsertTemplate TemplateParameters: Name: Insert_16 Threads: 16 @@ -267,7 +267,7 @@ Actors: OnlyActiveInPhase: 2 - ActorFromTemplate: - TemplateName: FindTemplate + TemplateName: FindTemplate TemplateParameters: Name: Find_16 Threads: 16 @@ -295,14 +295,14 @@ Actors: OnlyActiveInPhase: 10 AutoRun: - - When: - mongodb_setup: - $eq: - - atlas - - replica - - single-replica - - standalone - - replica-noflowcontrol - - replica-1dayhistory-15gbwtcache - - replica-maintenance-events - - replica-all-feature-flags +- When: + mongodb_setup: + $eq: + - atlas + - replica + - single-replica + - standalone + - replica-noflowcontrol + - replica-1dayhistory-15gbwtcache + - replica-maintenance-events + - replica-all-feature-flags diff --git a/src/workloads/scale/MixedWrites.yml b/src/workloads/scale/MixedWrites.yml index f7f4ef6930..b77d667739 100644 --- a/src/workloads/scale/MixedWrites.yml +++ b/src/workloads/scale/MixedWrites.yml @@ -1,5 +1,5 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" Description: | Does w:2 writes for a Phase followed by w:3 writes for a second Phase. @@ -36,9 +36,9 @@ Actors: Timeout: 6000 milliseconds AutoRun: - - When: - infrastructure_provisioning: - $eq: replica - ThenRun: - - mongodb_setup: replica-delay-mixed - - mongodb_setup: replica +- When: + infrastructure_provisioning: + $eq: replica + ThenRun: + - mongodb_setup: replica-delay-mixed + - mongodb_setup: replica diff --git a/src/workloads/scale/OutOfCacheScanner.yml b/src/workloads/scale/OutOfCacheScanner.yml index f1b9b27077..4a328ffe88 100644 --- a/src/workloads/scale/OutOfCacheScanner.yml +++ b/src/workloads/scale/OutOfCacheScanner.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Storage Engines +Description: | + TODO: TIG-3319 Clients: Default: @@ -54,6 +56,6 @@ Actors: ScanType: Standard AutoRun: - - When: - mongodb_setup: - $eq: single-replica-15gbwtcache +- When: + mongodb_setup: + $eq: single-replica-15gbwtcache diff --git a/src/workloads/scale/OutOfCacheSnapshotReads.yml b/src/workloads/scale/OutOfCacheSnapshotReads.yml index 1e348e054c..8dbf766d8f 100644 --- a/src/workloads/scale/OutOfCacheSnapshotReads.yml +++ b/src/workloads/scale/OutOfCacheSnapshotReads.yml @@ -1,5 +1,7 @@ SchemaVersion: 2018-07-01 Owner: Replication +Description: | + TODO: TIG-3321 Clients: Default: @@ -76,6 +78,6 @@ Actors: ScanType: point-in-time AutoRun: - - When: - mongodb_setup: - $eq: replica-1dayhistory-15gbwtcache +- When: + mongodb_setup: + $eq: replica-1dayhistory-15gbwtcache diff --git a/src/workloads/scale/ReplaceMillionDocsInSeparateTxns.yml b/src/workloads/scale/ReplaceMillionDocsInSeparateTxns.yml index 8245787d58..a7ee5b3f34 100644 --- a/src/workloads/scale/ReplaceMillionDocsInSeparateTxns.yml +++ b/src/workloads/scale/ReplaceMillionDocsInSeparateTxns.yml @@ -3,8 +3,10 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/sharding" +Description: | + TODO: TIG-3320 -Document: &Doc # The size of each document is about 2kb. +Document: &Doc # The size of each document is about 2kb. a: 1 x: {^RandomInt: {min: 0, max: 2147483647}} string0: {^FastRandomString: {length: 2000}} @@ -133,7 +135,7 @@ Actors: - *Nop - MetricsName: TotalModificationTime Repeat: 10000 - Collection: &Coll Collection0 # This is the default collection populated by the Loader. + Collection: &Coll Collection0 # This is the default collection populated by the Loader. ThrowOnFailure: false # Transactions are allowed to fail. RecordFailure: true # We still want to record the metrics even if the transaction is rolled back. Operations: diff --git a/src/workloads/scale/ScanWithLongLived.yml b/src/workloads/scale/ScanWithLongLived.yml index 5624d33678..7dc82e486c 100644 --- a/src/workloads/scale/ScanWithLongLived.yml +++ b/src/workloads/scale/ScanWithLongLived.yml @@ -7,14 +7,14 @@ Description: | GlobalDefaults: TrackProportion: &TrackProportion 0 - LongLivedDB: &LongLivedDB longlived # name of the long lived database + LongLivedDB: &LongLivedDB longlived # name of the long lived database LongLivedCollectionCount: &LongLivedCollectionCount 1000 LongLivedDocumentCount: &LongLivedDocumentCount 1000 ScannerHotDB: &ScannerHotDB rolling - LongLivedWrites: &LongLivedWrites 600 per 1 second # x5 threads - LongLivedReads: &LongLivedReads 20 per 1 second # x10 threads + LongLivedWrites: &LongLivedWrites 600 per 1 second # x5 threads + LongLivedReads: &LongLivedReads 20 per 1 second # x10 threads binomial_10k_int: &binomial_10k_int {^RandomInt: {distribution: binomial, t: 1000, p: 0.5}} @@ -98,8 +98,8 @@ Actors: Key: SnapshotScanner10GigabytesFixedRateCmd AutoRun: - - When: - mongodb_setup: - $eq: - - single-replica - - replica-noflowcontrol +- When: + mongodb_setup: + $eq: + - single-replica + - replica-noflowcontrol diff --git a/src/workloads/scale/UniqueIndexStress.yml b/src/workloads/scale/UniqueIndexStress.yml index 3ceb0d46c7..4c5ad7b5ca 100644 --- a/src/workloads/scale/UniqueIndexStress.yml +++ b/src/workloads/scale/UniqueIndexStress.yml @@ -5,6 +5,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/product-perf" +Description: | + TODO: TIG-3318 string_length: &string_length 10 insert_threads: &insert_threads 1 @@ -361,4 +363,3 @@ Actors: - Phase: 0..14 Nop: true - *InsertMany - diff --git a/src/workloads/scale/UpdateMillionDocsInTxn.yml b/src/workloads/scale/UpdateMillionDocsInTxn.yml index cc06da2d23..f298a0b5c4 100644 --- a/src/workloads/scale/UpdateMillionDocsInTxn.yml +++ b/src/workloads/scale/UpdateMillionDocsInTxn.yml @@ -5,6 +5,8 @@ SchemaVersion: 2018-07-01 Owner: "@mongodb/sharding" +Description: | + TODO: TIG-3320 Actors: - Name: Loader @@ -48,7 +50,7 @@ Actors: - *Nop - MetricsName: TransactionUpdate Repeat: 5 - Collection: &Coll Collection0 # This is the default collection populated by the Loader. + Collection: &Coll Collection0 # This is the default collection populated by the Loader. ThrowOnFailure: false # Transactions are allowed to fail. RecordFailure: true # We still want to record the metrics even if the transaction is rolled back. Operations: @@ -68,7 +70,7 @@ Actors: OperationCommand: WriteOperations: - WriteCommand: updateMany - Filter: {x: { $exists: true }} # This will match all documents in the collection. + Filter: {x: { $exists: true }} # This will match all documents in the collection. Update: {$inc: {x: 1}} Options: Ordered: true diff --git a/src/workloads/selftests/GennyOverhead.yml b/src/workloads/selftests/GennyOverhead.yml index 4fa23e312b..123a719880 100644 --- a/src/workloads/selftests/GennyOverhead.yml +++ b/src/workloads/selftests/GennyOverhead.yml @@ -1,5 +1,5 @@ SchemaVersion: 2018-07-01 -Owner: "@mongodb/stm" +Owner: "@10gen/dev-prod-tips" Description: | @@ -21,6 +21,12 @@ Description: | can be added as needed, but unfortunately the "5 phases" building block can't be reused en masse because YAML doesn't support merging lists. + The primary metric recorded in this workload is the average duration + of each iteration. + + This workload is intended to stress the Genny client itself, so should + be run with the smallest MongoDB setup. + Actors: - Name: GennyOverhead100T Type: NopMetrics @@ -63,6 +69,6 @@ Actors: SleepAfter: 1 millisecond AutoRun: - - When: - mongodb_setup: - $eq: standalone +- When: + mongodb_setup: + $eq: standalone diff --git a/src/workloads/sharding/ReshardCollection.yml b/src/workloads/sharding/ReshardCollection.yml index 25ec64b5de..f46fa69929 100644 --- a/src/workloads/sharding/ReshardCollection.yml +++ b/src/workloads/sharding/ReshardCollection.yml @@ -139,6 +139,6 @@ Actors: Operations: *ReadWriteOperations AutoRun: - - When: - mongodb_setup: - $eq: shard-lite-all-feature-flags +- When: + mongodb_setup: + $eq: shard-lite-all-feature-flags diff --git a/src/workloads/sharding/WouldChangeOwningShardBatchWrite.yml b/src/workloads/sharding/WouldChangeOwningShardBatchWrite.yml index 855f65ce1c..ddee8b4417 100644 --- a/src/workloads/sharding/WouldChangeOwningShardBatchWrite.yml +++ b/src/workloads/sharding/WouldChangeOwningShardBatchWrite.yml @@ -5,7 +5,7 @@ Description: | the shard key to trigger WouldChangeOwningShard errors. The workload consists of 3 phases: - 1. Shard an empty collection (using ranged sharding) into two chunks and wait for the balancer + 1. Shard an empty collection (using ranged sharding) into two chunks and wait for the balancer to distribute each chunk to its own shard. 2. Populate the sharded collection with data. 3. Update the shard key value to trigger WouldChangeOwningShard errors. @@ -19,7 +19,7 @@ GlobalDefaults: Namespace: &Namespace test.Collection0 DocumentCount: &DocumentCount 10000 # Number of documents to insert and modify. - BalancerWait: &BalancerWait "10 seconds" # Wait out the 10 second delay between balancing rounds + BalancerWait: &BalancerWait "10 seconds" # Wait out the 10 second delay between balancing rounds # with a little extra leeway for balancing to finish. Actors: @@ -66,7 +66,7 @@ Actors: - Name: UpdateShardKey Type: CrudActor - Threads: 1 # We want to use 1 thread to avoid updates throwing WriteConflict errors. + Threads: 1 # We want to use 1 thread to avoid updates throwing WriteConflict errors. Phases: - *Nop - *Nop diff --git a/src/workloads/transactions/LLTAnalytics.yml b/src/workloads/transactions/LLTAnalytics.yml index 9d9123eb6b..91d652a7c3 100644 --- a/src/workloads/transactions/LLTAnalytics.yml +++ b/src/workloads/transactions/LLTAnalytics.yml @@ -31,14 +31,14 @@ GlobalDefaults: price: {^RandomDouble: {min: 0.0, max: 1000.0}} data: {^Join: {array: ["aaaaaaaaaa", {^FastRandomString: {length: {^RandomInt: {min: 0, max: 10}}}}]}} orders: {^Array: {of: {ts: { ^RandomDate: { min: "2011-01-01", max: "2021-01-01" }}, - quantity: { ^RandomInt: { min: 1, max: 100 } }, - unitPrice: { ^RandomDouble: {min: 0.0, max: 1000.0 } } }, + quantity: { ^RandomInt: { min: 1, max: 100 } }, + unitPrice: { ^RandomDouble: {min: 0.0, max: 1000.0 } } }, number: { ^RandomInt: { min: 0, max: 10 }} }} LLTIndexes: &LLTIndexes - - keys: {price: 1, ts: 1, cuid: 1} # Ptc - - keys: {price: 1, cuid: 1} # Pc - - keys: {caid: 1, price: 1, cuid: 1} # Cpc + - keys: {price: 1, ts: 1, cuid: 1} # Ptc + - keys: {price: 1, cuid: 1} # Pc + - keys: {caid: 1, price: 1, cuid: 1} # Cpc # Loader Config. LoadThreads: &LoadThreads 4 @@ -53,7 +53,7 @@ GlobalDefaults: PtcUpdateOperation: &PtcUpdateOperation OperationName: updateOne OperationCommand: - Filter: {price: {'$gte' : {^RandomDouble: { min: 0.0, max: 500.0 }}}} + Filter: {price: {'$gte': {^RandomDouble: { min: 0.0, max: 500.0 }}}} Update: $set: ts: {^Now: {}} @@ -67,7 +67,7 @@ GlobalDefaults: PcUpdateOperation: &PcUpdateOperation OperationName: updateOne OperationCommand: - Filter: {'price': {'$gte' : {^RandomDouble: { min: 0.0, max: 500.0 }}}} + Filter: {'price': {'$gte': {^RandomDouble: { min: 0.0, max: 500.0 }}}} Update: $set: ts: {^Now: {}} @@ -81,7 +81,7 @@ GlobalDefaults: CpcUpdateOperation: &CpcUpdateOperation OperationName: updateOne OperationCommand: - Filter: {'caid': {'$gte': {^RandomInt: { min: 0, max: 1000 }}}} + Filter: {'caid': {'$gte': {^RandomInt: { min: 0, max: 1000 }}}} Update: $set: ts: {^Now: {}} @@ -98,76 +98,76 @@ GlobalDefaults: SnapshotScannerLongDuration: &SnapshotScannerLongDuration 60 minutes ActorTemplates: - - TemplateName: UpdateTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Update.Baseline"}} - Type: CrudActor - Threads: *ThreadsValue - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [5]}} - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - GlobalRate: *GlobalRateValue - Threads: *ThreadsValue - CollectionCount: *CollectionCount - Database: *dbname - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} - Blocking: {^Parameter: {Name: "Blocking", Default: yes}} - Operations: - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation +- TemplateName: UpdateTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Update.Baseline"}} + Type: CrudActor + Threads: *ThreadsValue + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [5]}} + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + GlobalRate: *GlobalRateValue + Threads: *ThreadsValue + CollectionCount: *CollectionCount + Database: *dbname + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} + Blocking: {^Parameter: {Name: "Blocking", Default: true}} + Operations: + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation - - TemplateName: ScanTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Scan.Snapshot"}} - Type: CollectionScanner - Threads: *CollectionCount - CollectionCount: *CollectionCount - Database: *dbname - GenerateCollectionNames: true - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [7]}}# [7] - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} - #ScanDuration: {^Parameter: {Name: "ScanDuration", Default: *SnapshotScannerShortDuration}} - ScanType: snapshot - #ScanContinuous: true - # GenerateCollectionNames: true - AggregatePipeline: - {array: [{"$match":{"cuid":{"$gte":500}}}, - {"$unwind": "$orders"}, - {"$group": {"_id": "$ts", "total": {"$sum":{ $multiply: [ "$orders.unitPrice", "$orders.quantity" ] }}}}, - {"$sort": {"_id":1}}] - } - AggregateOptions: - BatchSize: 1000 - Comment: {^Parameter: {Name: "Comment", Default: "Aggregate.Snapshot"}} - AllowDiskUse: true # Naming convention follows c++ driver rather than mongo shell. +- TemplateName: ScanTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Scan.Snapshot"}} + Type: CollectionScanner + Threads: *CollectionCount + CollectionCount: *CollectionCount + Database: *dbname + GenerateCollectionNames: true + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [7]}} # [7] + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} + # ScanDuration: {^Parameter: {Name: "ScanDuration", Default: *SnapshotScannerShortDuration}} + ScanType: snapshot + # ScanContinuous: true + # GenerateCollectionNames: true + AggregatePipeline: + {array: [{"$match":{"cuid":{"$gte":500}}}, + {"$unwind": "$orders"}, + {"$group": {"_id": "$ts", "total": {"$sum":{ $multiply: ["$orders.unitPrice", "$orders.quantity"] }}}}, + {"$sort": {"_id":1}}] + } + AggregateOptions: + BatchSize: 1000 + Comment: {^Parameter: {Name: "Comment", Default: "Aggregate.Snapshot"}} + AllowDiskUse: true # Naming convention follows c++ driver rather than mongo shell. Clients: Default: @@ -192,7 +192,7 @@ Actors: OperationName: RunCommand OperationCommand: setParameter: 1 - transactionLifetimeLimitSeconds: 14400 # 4 Hours + transactionLifetimeLimitSeconds: 14400 # 4 Hours - Name: InitialLoad Type: Loader @@ -280,12 +280,12 @@ Actors: Active: [7] Comment: SnapshotAggregateLong Duration: *SnapshotScannerLongDuration - #ScanDuration: *SnapshotScannerLongDuration + # ScanDuration: *SnapshotScannerLongDuration -#AutoRun: -# Requires: -# mongodb_setup: -# - atlas -# - replica -# - replica-all-feature-flags -# - single-replica +# AutoRun: +# Requires: +# mongodb_setup: +# - atlas +# - replica +# - replica-all-feature-flags +# - single-replica diff --git a/src/workloads/transactions/LLTMixed.yml b/src/workloads/transactions/LLTMixed.yml index 02655fe383..c894f54a77 100644 --- a/src/workloads/transactions/LLTMixed.yml +++ b/src/workloads/transactions/LLTMixed.yml @@ -26,9 +26,9 @@ GlobalDefaults: data: {^Join: {array: ["aaaaaaaaaa", {^FastRandomString: {length: {^RandomInt: {min: 0, max: 10}}}}]}} LLTIndexes: &LLTIndexes - - keys: {price: 1, ts: 1, cuid: 1} # Ptc - - keys: {price: 1, cuid: 1} # Pc - - keys: {caid: 1, price: 1, cuid: 1} # Cpc + - keys: {price: 1, ts: 1, cuid: 1} # Ptc + - keys: {price: 1, cuid: 1} # Pc + - keys: {caid: 1, price: 1, cuid: 1} # Cpc # Loader Config. LoadThreads: &LoadThreads 4 @@ -72,7 +72,7 @@ GlobalDefaults: CpcQueryOperation: &CpcQueryOperation OperationName: findOne OperationCommand: - Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} + Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} Options: Hint: caid_1_price_1_cuid_1 Comment: CpcQueryOperation @@ -109,7 +109,7 @@ GlobalDefaults: CpcUpdateOperation: &CpcUpdateOperation OperationName: updateOne OperationCommand: - Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} + Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} Update: $set: ts: {^Now: {}} @@ -141,7 +141,7 @@ GlobalDefaults: CpcRemoveOperation: &CpcRemoveOperation OperationName: deleteOne OperationCommand: - Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} + Filter: {caid: {$gte: {^RandomInt: { min: 0, max: 1000 }}}} OperationOptions: WriteConcern: Level: majority @@ -152,130 +152,130 @@ GlobalDefaults: SnapshotScannerLongDuration: &SnapshotScannerLongDuration 60 minutes ActorTemplates: - - TemplateName: InsertTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Insert.Baseline"}} - Type: CrudActor - Threads: *ThreadsValue - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [5]}} - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - GlobalRate: *GlobalRateValue - Threads: *ThreadsValue - CollectionCount: *CollectionCount - Database: *dbname - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}}# *SnapshotScannerShortDuration - Blocking: {^Parameter: {Name: "Blocking", Default: yes}} - Operations: - # Create - - *InsertOperation - - *InsertOperation - - *InsertOperation - - *InsertOperation - - *InsertOperation - - *InsertOperation - - - TemplateName: QueryTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Query.Baseline"}} - Type: CrudActor - Threads: *ThreadsValue - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [5]}} - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - GlobalRate: *GlobalRateValue - Threads: *ThreadsValue - CollectionCount: *CollectionCount - Database: *dbname - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}}# *SnapshotScannerShortDuration - Blocking: {^Parameter: {Name: "Blocking", Default: yes}} - Operations: - # Read - - *PtcQueryOperation - - *PcQueryOperation - - *CpcQueryOperation - - *PtcQueryOperation - - *PcQueryOperation - - *CpcQueryOperation - - - TemplateName: UpdateTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Update.Baseline"}} - Type: CrudActor - Threads: *ThreadsValue - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [5]}} - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - GlobalRate: *GlobalRateValue - Threads: *ThreadsValue - CollectionCount: *CollectionCount - Database: *dbname - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}}# *SnapshotScannerShortDuration - Blocking: {^Parameter: {Name: "Blocking", Default: yes}} - Operations: - # Update - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - *PtcUpdateOperation - - *PcUpdateOperation - - *CpcUpdateOperation - - - TemplateName: RemoveTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Remove.Baseline"}} - Type: CrudActor - Threads: *ThreadsValue - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [5]}} - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - GlobalRate: *GlobalRateValue - Threads: *ThreadsValue - CollectionCount: *CollectionCount - Database: *dbname - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} - Blocking: {^Parameter: {Name: "Blocking", Default: yes}} - Operations: - # Delete - - *PtcRemoveOperation - - *PcRemoveOperation - - *CpcRemoveOperation - - *PtcRemoveOperation - - *PcRemoveOperation - - *CpcRemoveOperation - - - - TemplateName: ScanTemplate - Config: - Name: {^Parameter: {Name: "Name", Default: "Short.Scan.Snapshot"}} - Type: CollectionScanner - Threads: *CollectionCount - CollectionCount: *CollectionCount - Database: *dbname - GenerateCollectionNames: true - Phases: - OnlyActiveInPhases: - Active: {^Parameter: {Name: "Active", Default: [7]}}# [7] - NopInPhasesUpTo: *MaxPhases - PhaseConfig: - Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} - ScanDuration: {^Parameter: {Name: "ScanDuration", Default: *SnapshotScannerShortDuration}} - ScanType: snapshot - ScanContinuous: true - GenerateCollectionNames: true - CollectionSortOrder: forward - FindOptions: - BatchSize: 1000 - Hint: _id_ - Comment: {^Parameter: {Name: "Comment", Default: "Scan.Snapshot"}} +- TemplateName: InsertTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Insert.Baseline"}} + Type: CrudActor + Threads: *ThreadsValue + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [5]}} + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + GlobalRate: *GlobalRateValue + Threads: *ThreadsValue + CollectionCount: *CollectionCount + Database: *dbname + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} # *SnapshotScannerShortDuration + Blocking: {^Parameter: {Name: "Blocking", Default: true}} + Operations: + # Create + - *InsertOperation + - *InsertOperation + - *InsertOperation + - *InsertOperation + - *InsertOperation + - *InsertOperation + +- TemplateName: QueryTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Query.Baseline"}} + Type: CrudActor + Threads: *ThreadsValue + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [5]}} + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + GlobalRate: *GlobalRateValue + Threads: *ThreadsValue + CollectionCount: *CollectionCount + Database: *dbname + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} # *SnapshotScannerShortDuration + Blocking: {^Parameter: {Name: "Blocking", Default: true}} + Operations: + # Read + - *PtcQueryOperation + - *PcQueryOperation + - *CpcQueryOperation + - *PtcQueryOperation + - *PcQueryOperation + - *CpcQueryOperation + +- TemplateName: UpdateTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Update.Baseline"}} + Type: CrudActor + Threads: *ThreadsValue + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [5]}} + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + GlobalRate: *GlobalRateValue + Threads: *ThreadsValue + CollectionCount: *CollectionCount + Database: *dbname + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} # *SnapshotScannerShortDuration + Blocking: {^Parameter: {Name: "Blocking", Default: true}} + Operations: + # Update + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + - *PtcUpdateOperation + - *PcUpdateOperation + - *CpcUpdateOperation + +- TemplateName: RemoveTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Remove.Baseline"}} + Type: CrudActor + Threads: *ThreadsValue + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [5]}} + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + GlobalRate: *GlobalRateValue + Threads: *ThreadsValue + CollectionCount: *CollectionCount + Database: *dbname + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} + Blocking: {^Parameter: {Name: "Blocking", Default: true}} + Operations: + # Delete + - *PtcRemoveOperation + - *PcRemoveOperation + - *CpcRemoveOperation + - *PtcRemoveOperation + - *PcRemoveOperation + - *CpcRemoveOperation + + +- TemplateName: ScanTemplate + Config: + Name: {^Parameter: {Name: "Name", Default: "Short.Scan.Snapshot"}} + Type: CollectionScanner + Threads: *CollectionCount + CollectionCount: *CollectionCount + Database: *dbname + GenerateCollectionNames: true + Phases: + OnlyActiveInPhases: + Active: {^Parameter: {Name: "Active", Default: [7]}} # [7] + NopInPhasesUpTo: *MaxPhases + PhaseConfig: + Duration: {^Parameter: {Name: "Duration", Default: *SnapshotScannerShortDuration}} + ScanDuration: {^Parameter: {Name: "ScanDuration", Default: *SnapshotScannerShortDuration}} + ScanType: snapshot + ScanContinuous: true + GenerateCollectionNames: true + CollectionSortOrder: forward + FindOptions: + BatchSize: 1000 + Hint: _id_ + Comment: {^Parameter: {Name: "Comment", Default: "Scan.Snapshot"}} Clients: Default: @@ -300,7 +300,7 @@ Actors: OperationName: RunCommand OperationCommand: setParameter: 1 - transactionLifetimeLimitSeconds: 14400 # 4 Hours + transactionLifetimeLimitSeconds: 14400 # 4 Hours - Name: InitialLoad Type: Loader @@ -562,14 +562,14 @@ Actors: ScanDuration: *SnapshotScannerLongDuration AutoRun: - - When: - mongodb_setup: - $eq: - - atlas-like-replica - - replica - - replica-all-feature-flags - - single-replica - branch_name: - $neq: - - v4.0 - - v4.2 +- When: + mongodb_setup: + $eq: + - atlas-like-replica + - replica + - replica-all-feature-flags + - single-replica + branch_name: + $neq: + - v4.0 + - v4.2