From 245e5d2580859fd4f739eeb908a686877e0ab755 Mon Sep 17 00:00:00 2001 From: yarongilor Date: Thu, 12 Dec 2024 18:39:13 +0200 Subject: [PATCH] test(elasticity): Added a test of 90% utilization with a lot of small tables Test splitting the 90% utilization among a lot of small tables. --- ...e-elasticity-many-small-tables.jenkinsfile | 13 ++++ ...ongevity-elasticity-many-small-tables.yaml | 40 ++++++++++++ .../scale/templated-elasticity-tables.yaml | 65 +++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 jenkins-pipelines/oss/tier2/scale-elasticity-many-small-tables.jenkinsfile create mode 100644 test-cases/scale/longevity-elasticity-many-small-tables.yaml create mode 100644 test-cases/scale/templated-elasticity-tables.yaml diff --git a/jenkins-pipelines/oss/tier2/scale-elasticity-many-small-tables.jenkinsfile b/jenkins-pipelines/oss/tier2/scale-elasticity-many-small-tables.jenkinsfile new file mode 100644 index 00000000000..434d2d1cab4 --- /dev/null +++ b/jenkins-pipelines/oss/tier2/scale-elasticity-many-small-tables.jenkinsfile @@ -0,0 +1,13 @@ +#!groovy + +// trick from https://github.com/jenkinsci/workflow-cps-global-lib-plugin/pull/43 +def lib = library identifier: 'sct@snapshot', retriever: legacySCM(scm) + +longevityPipeline( + backend: 'aws', + region: 'eu-west-1', + test_name: 'longevity_test.LongevityTest.test_user_batch_custom_time', + test_config: 'test-cases/scale/longevity-elasticity-many-small-tables.yaml', + + timeout: [time: 4440, unit: 'MINUTES'], +) diff --git a/test-cases/scale/longevity-elasticity-many-small-tables.yaml b/test-cases/scale/longevity-elasticity-many-small-tables.yaml new file mode 100644 index 00000000000..6e853ac8273 --- /dev/null +++ b/test-cases/scale/longevity-elasticity-many-small-tables.yaml @@ -0,0 +1,40 @@ +# This is a test case for having 90% disk utilization with a lot of small tables. +# The data is split equally among 500 tables. +# The dataset size is aligned with 'i4i.xlarge'. +# It uses a c-s user-profile template for all 500 tables. +# It runs 4 batches of 125 tables each. +# On each batch cycle, 125 tables are created, then a load is generated for all of these tables. +# When all the 125 stress writes/reads are done, it continue with the next batch until stress to all 500 tables is completed (after 4 cycles). +# Each one of the 500 tables has both write and read load. + +test_duration: 480 + +cs_duration: '0m' +cs_user_profiles: + - test-cases/scale/templated-elasticity-tables.yaml + +pre_create_schema: true +user_profile_table_count: 501 +batch_size: 125 + +n_loaders: 5 +n_monitor_nodes: 1 +n_db_nodes: 3 +add_node_cnt: 0 + +jmx_heap_memory: 1024 # this is a fix/workaround for https://github.com/scylladb/scylla/issues/7609 + +instance_type_db: 'i4i.xlarge' +instance_type_loader: 'c6i.2xlarge' +user_prefix: 'longevity-elasticity-many-small-tables' +root_disk_size_runner: 120 + + +cluster_health_check: false + +nemesis_class_name: 'NoOpMonkey' +nemesis_interval: 60 + +# TODO: remove when https://github.com/scylladb/scylla-tools-java/issues/175 resolved +stop_test_on_stress_failure: false +use_hdr_cs_histogram: true diff --git a/test-cases/scale/templated-elasticity-tables.yaml b/test-cases/scale/templated-elasticity-tables.yaml new file mode 100644 index 00000000000..de2d44b26cf --- /dev/null +++ b/test-cases/scale/templated-elasticity-tables.yaml @@ -0,0 +1,65 @@ +### DML ### + +# Keyspace Name +keyspace: keyspace1 + +# The CQL for creating a keyspace (optional if it already exists) +keyspace_definition: | + CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3} AND durable_writes = true; + +# Table name +table: ${table_name} + +# The CQL for creating a table you wish to stress (optional if it already exists) +table_definition: | + CREATE TABLE keyspace1.${table_name} ( + key blob, + "C0" blob, + "C1" blob, + "C2" blob, + "C3" blob, + "C4" blob, + "C5" blob, + "C6" blob, + "C7" blob, + PRIMARY KEY (key) + ) + +### Column Distribution Specifications ### +columnspec: + - name: key + size: fixed(10) # Primary key size (10 bytes) + - name: C0 + size: fixed(128) # Column size (128 bytes) + - name: C1 + size: fixed(128) + - name: C2 + size: fixed(128) + - name: C3 + size: fixed(128) + - name: C4 + size: fixed(128) + - name: C5 + size: fixed(128) + - name: C6 + size: fixed(128) + - name: C7 + size: fixed(128) + +### Batch Ratio Distribution Specifications ### +insert: + partitions: fixed(1) + select: fixed(1)/1000 + batchtype: UNLOGGED + +# +# A list of queries you wish to run against the schema +# +queries: + read1: + cql: SELECT * FROM keyspace1.${table_name} WHERE key = ? + fields: samerow + +# Run stress +# cassandra-stress user profile={} 'ops(insert=1)' cl=QUORUM n=2572262 -rate threads=1 -errors ignore +# cassandra-stress user profile={} 'ops(read1=1)' cl=QUORUM n=1286128 -rate threads=1 -errors ignore