├── .idea
├── .gitignore
├── vcs.xml
├── inspectionProfiles
│ └── profiles_settings.xml
├── modules.xml
├── misc.xml
├── cql-stress-mng.iml
└── runConfigurations
│ ├── help.xml
│ ├── version.xml
│ ├── remove.xml
│ ├── extract.xml
│ ├── zeebe_extract.xml
│ ├── zeebe_graph.xml
│ ├── zeebe_generate_all.xml
│ ├── SEPARATE___generate_all.xml
│ ├── SEQUENCE___generate_all.xml
│ ├── SEQUENCE___generate_v4_.xml
│ ├── SEQUENCE___generate_v5.xml
│ └── zeebe_compare.xml
├── stress
├── version.py
├── config
│ ├── cass
│ │ ├── remove_keyspace.txt
│ │ ├── remove_table.txt
│ │ ├── template_read.txt
│ │ ├── template_profile_UCSx.txt
│ │ ├── template_write_UCSx.txt
│ │ ├── template_write_LCS.txt
│ │ ├── template_write_STCS.txt
│ │ ├── process_data_1_6B.yaml
│ │ ├── process_data_2_1KB.yaml
│ │ ├── process_data_3_10KB.yaml
│ │ ├── process_data_4_50KB.yaml
│ │ ├── process_data_5_100KB.yaml
│ │ ├── process_data_6_300KB.yaml
│ │ ├── process_data_7_1MB.yaml
│ │ └── process_data_8_4MB.yaml
│ ├── compareV4V5_sequenceTHR
│ │ ├── _cass_v4_1_local_one.env
│ │ ├── _cass_v4_2_local_quorum.env
│ │ ├── _cass_v5_1_local_one.env
│ │ └── _cass_v5_2_local_quorum.env
│ └── compareV4V5_separateTHR
│ │ ├── _cass_v4_1_local_one.env
│ │ ├── _cass_v4_2_local_quorum.env
│ │ ├── _cass_v5_1_local_one.env
│ │ └── _cass_v5_2_local_quorum.env
├── file_marker.py
├── helper
│ └── stress_run.sh
├── cql_config.py
├── cql_helper.py
├── graph_output.py
├── cql_output.py
├── stress_graph.py
├── stress-cmd
│ ├── _cass_v4.sh
│ ├── _cass_seq_v4.sh
│ ├── _cass_v5.sh
│ ├── _cass_seq_v5.sh
│ └── _cass_seq_all.sh
├── cql_access.py
├── extract_summary.py
├── stress_mng.py
└── stress_compare.py
├── docs
├── assets
│ ├── r2-local_quorum.png
│ ├── r3-local_quorum.png
│ ├── final-local_quorum.png
│ ├── PRF-v4_vs_v5_read_LOCAL_ONE_LCS-2024-12-01_12-18-17-bulk-1x1.png
│ ├── PRF-v4_vs_v5_write_LOCAL_ONE_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png
│ └── PRF-v4_vs_v5_write_LOCAL_QUORUM_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png
├── todo_list.md
└── samples
│ ├── userprofile.md
│ ├── write.md
│ ├── read.md
│ └── yaml
│ └── cqlstress-example.yaml
├── requirements.txt
├── .gitignore
├── README.md
└── LICENSE
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/stress/version.py:
--------------------------------------------------------------------------------
1 | # Store the version here so:
2 |
3 | __version__ = '0.0.7'
--------------------------------------------------------------------------------
/stress/config/cass/remove_keyspace.txt:
--------------------------------------------------------------------------------
1 | python3.11 stress/stress_mng.py remove -e %ENV% -k %KEYSPACE% -d stress -s 10
--------------------------------------------------------------------------------
/docs/assets/r2-local_quorum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/r2-local_quorum.png
--------------------------------------------------------------------------------
/docs/assets/r3-local_quorum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/r3-local_quorum.png
--------------------------------------------------------------------------------
/docs/assets/final-local_quorum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/final-local_quorum.png
--------------------------------------------------------------------------------
/stress/config/cass/remove_table.txt:
--------------------------------------------------------------------------------
1 | python3.11 stress/stress_mng.py remove -e %ENV% -k %KEYSPACE% -t tbltest -d stress -s 10
2 |
--------------------------------------------------------------------------------
/docs/todo_list.md:
--------------------------------------------------------------------------------
1 | # To-Do list
2 |
3 | 1. use for all run variable without relation to previous RUN
4 | - see stress_test
5 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/docs/assets/PRF-v4_vs_v5_read_LOCAL_ONE_LCS-2024-12-01_12-18-17-bulk-1x1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/PRF-v4_vs_v5_read_LOCAL_ONE_LCS-2024-12-01_12-18-17-bulk-1x1.png
--------------------------------------------------------------------------------
/docs/assets/PRF-v4_vs_v5_write_LOCAL_ONE_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/PRF-v4_vs_v5_write_LOCAL_ONE_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png
--------------------------------------------------------------------------------
/docs/assets/PRF-v4_vs_v5_write_LOCAL_QUORUM_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/george0st/cql-stress-mng/HEAD/docs/assets/PRF-v4_vs_v5_write_LOCAL_QUORUM_STCS-UCS8-2024-12-01_12-18-17-bulk-1x1.png
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/stress/config/cass/template_read.txt:
--------------------------------------------------------------------------------
1 | %CMD% %OPERATION% duration=%DURATION% cl=%CL% no-warmup
2 | -node %IP%
3 | -mode user=%USER% password=%PWD% prepared protocolVersion=%PROTOCOL% connectionsPerHost=%HOSTS% maxPending=%MAXPENDING%
4 | -errors skip-read-validation
5 | -rate "%THREAD_CMP%%THREAD%"
6 | -reporting output-frequency=5s > "%OUTPUT%"
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/stress/config/cass/template_profile_UCSx.txt:
--------------------------------------------------------------------------------
1 | %CMD% %OPERATION% profile=%PROFILE_PATH%%PROFILE%.yaml duration=%DURATION% "ops(%OPS%=1)" cl=%CL% no-warmup
2 | -node %IP%
3 | -mode user=%USER% password=%PWD% prepared protocolVersion=%PROTOCOL% connectionsPerHost=%HOSTS% maxPending=%MAXPENDING%
4 | -rate "%THREAD_CMP%%THREAD%"
5 | -reporting output-frequency=5s > "%OUTPUT%"
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # command line
2 | click~=8.1
3 |
4 | # cassandra - remove keyspace, table
5 | cassandra-driver==3.29.2
6 |
7 | # CSV processing
8 | polars==1.15
9 |
10 | # generate graphs
11 | qgate-graph==1.4.30
12 |
13 | # env file
14 | python-dotenv~=1.0
15 |
16 | # text as the table
17 | prettytable==3.11.0
18 |
19 | # color in terminal
20 | colorama==0.4.6
21 |
--------------------------------------------------------------------------------
/stress/config/cass/template_write_UCSx.txt:
--------------------------------------------------------------------------------
1 | %CMD% %OPERATION% duration=%DURATION% cl=%CL% no-warmup
2 | -node %IP%
3 | -mode user=%USER% password=%PWD% prepared protocolVersion=%PROTOCOL% connectionsPerHost=%HOSTS% maxPending=%MAXPENDING%
4 | -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=%SCALE%)"
5 | -rate "%THREAD_CMP%%THREAD%"
6 | -reporting output-frequency=5s > "%OUTPUT%"
--------------------------------------------------------------------------------
/stress/config/cass/template_write_LCS.txt:
--------------------------------------------------------------------------------
1 | %CMD% %OPERATION% duration=%DURATION% cl=%CL% no-warmup
2 | -node %IP%
3 | -mode user=%USER% password=%PWD% prepared protocolVersion=%PROTOCOL% connectionsPerHost=%HOSTS% maxPending=%MAXPENDING%
4 | -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)"
5 | -rate "%THREAD_CMP%%THREAD%"
6 | -reporting output-frequency=5s > "%OUTPUT%"
--------------------------------------------------------------------------------
/stress/config/cass/template_write_STCS.txt:
--------------------------------------------------------------------------------
1 | %CMD% %OPERATION% duration=%DURATION% cl=%CL% no-warmup
2 | -node %IP%
3 | -mode user=%USER% password=%PWD% prepared protocolVersion=%PROTOCOL% connectionsPerHost=%HOSTS% maxPending=%MAXPENDING%
4 | -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)"
5 | -rate "%THREAD_CMP%%THREAD%"
6 | -reporting output-frequency=5s > "%OUTPUT%"
--------------------------------------------------------------------------------
/.idea/cql-stress-mng.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/docs/samples/userprofile.md:
--------------------------------------------------------------------------------
1 |
2 | # 1. User profile
3 |
4 | ### 1.1 User profile with Write & Read
5 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress user profile=cqlstress-example.yaml duration=1m "ops(insert=1,simple1=1)" cl=LOCAL_ONE
6 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
7 | - -mode user=perf password=perf prepared protocolVersion=4
8 | - -rate "threads>=4" "threads<=200"
9 | - -graph "file=v5_read_1_local_one_$(date +%Y%m%d_%H%M%S).html" title=v5_read_1_local_one
10 | - -reporting output-frequency=5s > "xxxv5_write_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
11 |
12 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_sequenceTHR/_cass_v4_1_local_one.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
5 | IP = 10.129.52.58,10.129.53.21,10.129.52.57
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v4 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_sequenceTHR/_cass_v4_1_local_one.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 100
16 | THREAD_CMP = "threads<="
17 | CL = LOCAL_ONE
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
30 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
5 | IP = 10.129.52.58,10.129.53.21,10.129.52.57
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v4 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 100
16 | THREAD_CMP = "threads<="
17 | CL = LOCAL_QUORUM
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
--------------------------------------------------------------------------------
/stress/config/compareV4V5_separateTHR/_cass_v4_1_local_one.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
5 | IP = 10.129.52.58,10.129.53.21,10.129.52.57
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v4 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_separateTHR/_cass_v4_1_local_one.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 4, 8, 16, 32, 64
16 | THREAD_CMP = "threads="
17 | CL = LOCAL_ONE
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS, THREAD"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS, THREAD"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS, THREAD"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS, THREAD"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
30 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_separateTHR/_cass_v4_2_local_quorum.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
5 | IP = 10.129.52.58,10.129.53.21,10.129.52.57
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v4 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_separateTHR/_cass_v4_2_local_quorum.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 4, 8, 16, 32, 64
16 | THREAD_CMP = "threads="
17 | CL = LOCAL_QUORUM
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS, THREAD"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS, THREAD"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS, THREAD"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS, THREAD"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
--------------------------------------------------------------------------------
/.idea/runConfigurations/help.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/version.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/remove.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/extract.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/zeebe_extract.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/zeebe_graph.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/zeebe_generate_all.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/SEPARATE___generate_all.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/SEQUENCE___generate_all.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/SEQUENCE___generate_v4_.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/SEQUENCE___generate_v5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/runConfigurations/zeebe_compare.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/stress/file_marker.py:
--------------------------------------------------------------------------------
1 |
2 | class FileMarker:
3 |
4 | PRF_TYPE="type"
5 |
6 | # header
7 | PRF_HDR_TYPE="headr"
8 | PRF_HDR_LABEL="label"
9 | PRF_HDR_BULK="bulk"
10 | PRF_HDR_DURATION = "duration"
11 | PRF_HDR_RESPONSE_UNIT = "responsetime_unit"
12 | PRF_HDR_PERCENTILE = "percentile"
13 | PRF_HDR_AVIALABLE_CPU = "cpu"
14 | PRF_HDR_HOST="host"
15 | PRF_HDR_MEMORY="mem"
16 | PRF_HDR_MEMORY_FREE="mem_free"
17 | PRF_HDR_NOW="now"
18 |
19 | # detail
20 | PRF_DETAIL_TYPE="detail"
21 | PRF_DETAIL_PROCESSID="processid"
22 | PRF_DETAIL_CALLS="calls"
23 | PRF_DETAIL_COUNT="count"
24 | PRF_DETAIL_TOTAL="total"
25 | PRF_DETAIL_AVRG="avrg"
26 | PRF_DETAIL_MIN="min"
27 | PRF_DETAIL_MAX="max"
28 | PRF_DETAIL_STDEV="st-dev"
29 | PRF_DETAIL_ERR="err"
30 | PRF_DETAIL_TIME_INIT="initexec"
31 | PRF_DETAIL_TIME_START="startexec"
32 | PRF_DETAIL_TIME_END="endexec"
33 |
34 | # core output
35 | PRF_CORE_TYPE="core"
36 | PRF_CORE_PLAN_EXECUTOR_ALL="plan_executors"
37 | PRF_CORE_PLAN_EXECUTOR="plan_executors_detail"
38 | PRF_CORE_REAL_EXECUTOR="real_executors"
39 | PRF_CORE_GROUP="group"
40 | PRF_CORE_TOTAL_CALL="total_calls"
41 | PRF_CORE_AVRG_TIME="avrg_time"
42 | PRF_CORE_STD_DEVIATION = "std_deviation"
43 | PRF_CORE_MIN = "min"
44 | PRF_CORE_MAX = "max"
45 | PRF_CORE_TOTAL_CALL_PER_SEC = "total_call_per_sec" # total raw performance and multiply by rows in bundle
46 | PRF_CORE_TOTAL_CALL_PER_SEC_RAW = "total_call_per_sec_raw" # total raw performance (calls per one second)
47 | PRF_CORE_TIME_END = "endexec"
48 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_sequenceTHR/_cass_v5_1_local_one.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
5 | IP = 10.129.53.159,10.129.53.154,10.129.53.153
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v5 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_sequenceTHR/_cass_v5_1_local_one.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 100
16 | THREAD_CMP = "threads<="
17 | CL = LOCAL_ONE
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
30 |
31 | RUN7 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS4, SCALE=4"
32 | RUN8 = "cass/template_read.txt, OPERATION=read, CS=UCS4"
33 | RUN9 = "cass/remove_keyspace.txt, OPERATION=remove"
34 |
35 | RUN10 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS8, SCALE=8"
36 | RUN11 = "cass/template_read.txt, OPERATION=read, CS=UCS8"
37 | RUN12 = "cass/remove_keyspace.txt, OPERATION=remove"
38 |
39 | RUN13 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS10, SCALE=10"
40 | RUN14 = "cass/template_read.txt, OPERATION=read, CS=UCS10"
41 | RUN15 = "cass/remove_keyspace.txt, OPERATION=remove"
42 |
43 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
5 | IP = 10.129.53.159,10.129.53.154,10.129.53.153
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v5 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 100
16 | THREAD_CMP = "threads<="
17 | CL = LOCAL_QUORUM
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
30 |
31 | RUN7 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS4, SCALE=4"
32 | RUN8 = "cass/template_read.txt, OPERATION=read, CS=UCS4"
33 | RUN9 = "cass/remove_keyspace.txt, OPERATION=remove"
34 |
35 | RUN10 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS8, SCALE=8"
36 | RUN11 = "cass/template_read.txt, OPERATION=read, CS=UCS8"
37 | RUN12 = "cass/remove_keyspace.txt, OPERATION=remove"
38 |
39 | RUN13 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS10, SCALE=10"
40 | RUN14 = "cass/template_read.txt, OPERATION=read, CS=UCS10"
41 | RUN15 = "cass/remove_keyspace.txt, OPERATION=remove"
42 |
43 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_separateTHR/_cass_v5_1_local_one.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
5 | IP = 10.129.53.159,10.129.53.154,10.129.53.153
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v5 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_separateTHR/_cass_v5_1_local_one.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 4, 8, 16, 32, 64
16 | THREAD_CMP = "threads="
17 | CL = LOCAL_ONE
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 |
22 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
23 |
24 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS, THREAD"
25 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS, THREAD"
26 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
27 |
28 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS, THREAD"
29 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS, THREAD"
30 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
31 |
32 | RUN7 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS4, SCALE=4, THREAD"
33 | RUN8 = "cass/template_read.txt, OPERATION=read, CS=UCS4, THREAD"
34 | RUN9 = "cass/remove_keyspace.txt, OPERATION=remove"
35 |
36 | RUN10 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS8, SCALE=8, THREAD"
37 | RUN11 = "cass/template_read.txt, OPERATION=read, CS=UCS8, THREAD"
38 | RUN12 = "cass/remove_keyspace.txt, OPERATION=remove"
39 |
40 | RUN13 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS10, SCALE=10, THREAD"
41 | RUN14 = "cass/template_read.txt, OPERATION=read, CS=UCS10, THREAD"
42 | RUN15 = "cass/remove_keyspace.txt, OPERATION=remove"
43 |
44 |
--------------------------------------------------------------------------------
/stress/config/compareV4V5_separateTHR/_cass_v5_2_local_quorum.env:
--------------------------------------------------------------------------------
1 | DURATION = 1m
2 | CMD = "./apache-cassandra-5.0.2/tools/bin/cassandra-stress"
3 |
4 | #IP = 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
5 | IP = 10.129.53.159,10.129.53.154,10.129.53.153
6 | USER = perf
7 | PWD = ../secrets/perf.txt
8 | PROTOCOL = 4
9 | OUTPUT = ./stress-output/$curr_date/$curr_date v5 %OPERATION%_%CL%_%CS%_%THREAD%xTHR.txt
10 | ENV=compareV4V5_separateTHR/_cass_v5_2_local_quorum.env
11 | KEYSPACE = keyspace1
12 |
13 | # EXECUTION
14 | ##########################
15 | THREAD = 4, 8, 16, 32, 64
16 | THREAD_CMP = "threads="
17 | CL = LOCAL_QUORUM
18 | MAXPENDING = 384
19 | HOSTS = 24
20 |
21 | RUN0 = "cass/remove_keyspace.txt, OPERATION=remove"
22 |
23 | RUN1 = "cass/template_write_LCS.txt, OPERATION=write, CS=LCS, THREAD"
24 | RUN2 = "cass/template_read.txt, OPERATION=read, CS=LCS, THREAD"
25 | RUN3 = "cass/remove_keyspace.txt, OPERATION=remove"
26 |
27 | RUN4 = "cass/template_write_STCS.txt, OPERATION=write, CS=STCS, THREAD"
28 | RUN5 = "cass/template_read.txt, OPERATION=read, CS=STCS, THREAD"
29 | RUN6 = "cass/remove_keyspace.txt, OPERATION=remove"
30 |
31 | RUN7 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS4, SCALE=4, THREAD"
32 | RUN8 = "cass/template_read.txt, OPERATION=read, CS=UCS4, THREAD"
33 | RUN9 = "cass/remove_keyspace.txt, OPERATION=remove"
34 |
35 | RUN10 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS8, SCALE=8, THREAD"
36 | RUN11 = "cass/template_read.txt, OPERATION=read, CS=UCS8, THREAD"
37 | RUN12 = "cass/remove_keyspace.txt, OPERATION=remove"
38 |
39 | RUN13 = "cass/template_write_UCSx.txt, OPERATION=write, CS=UCS10, SCALE=10, THREAD"
40 | RUN14 = "cass/template_read.txt, OPERATION=read, CS=UCS10, THREAD"
41 | RUN15 = "cass/remove_keyspace.txt, OPERATION=remove"
42 |
43 |
--------------------------------------------------------------------------------
/stress/helper/stress_run.sh:
--------------------------------------------------------------------------------
1 | # Run from terminal
2 | ###########################
3 |
4 | # Run without terminal
5 | ###########################
6 | python3.11 stress/stress_mng.py generate -e private/zeebe/_cass_profile.env -l ../_zeebe_all.sh -d stress
7 | nohup ./_zeebe_all.sh
8 |
9 |
10 | python3.11 stress/stress_mng.py generate -e private/zeebe/_cass_*.env -l ../_zeebe_all.sh -d stress
11 | ./_zeebe_all.sh
12 |
13 | # Mains for 'compareV4V5_sequenceTHR'
14 | python3.11 stress/stress_mng.py run -e compareV4V5_sequenceTHR/_cass_v4_*.env -l ../_cass_seq_v4.sh -d stress
15 | python3.11 stress/stress_mng.py run -e compareV4V5_sequenceTHR/_cass_v5_*.env -l ../_cass_seq_v5.sh -d stress
16 | python3.11 stress/stress_mng.py run -e compareV4V5_sequenceTHR/_cass_*.env -l ../_cass_seq_all.sh -d stress
17 |
18 | nohup ./_cass_seq_v4.sh
19 | nohup ./_cass_seq_v5.sh
20 | nohup ./_cass_seq_all.sh
21 |
22 | # Mains for 'compareV4V5_separateTHR'
23 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v4_*.env -l ../_cass_v4.sh -d stress
24 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v5_*.env -l ../_cass_v5.sh -d stress
25 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_*.env -l ../_cass_all.sh -d stress
26 |
27 | nohup ./_cass_v4.sh
28 | nohup ./_cass_v5.sh
29 | nohup ./_cass_all.sh
30 |
31 | # Others
32 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v4_1*.env -l ../_cass_v4_1.sh -d stress
33 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v4_2*.env -l ../_cass_v4_2.sh -d stress
34 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v5_1*.env -l ../_cass_v5_1.sh -d stress
35 | python3.11 stress/stress_mng.py run -e compareV4V5_separateTHR/_cass_v5_2*.env -l ../_cass_v5_2.sh -d stress
36 |
37 | nohup ./_cass_v4_1.sh
38 | nohup ./_cass_v4_2.sh
39 | nohup ./_cass_v5_1.sh
40 | nohup ./_cass_v5_2.sh
41 |
42 |
--------------------------------------------------------------------------------
/stress/cql_config.py:
--------------------------------------------------------------------------------
1 | from dotenv import dotenv_values
2 | from os import path
3 | import cql_helper
4 |
5 |
6 | class ConfigSetting:
7 |
8 | # The key parameters
9 | ADAPTER = "Cassandra"
10 | EXECUTOR_DURATION = "5"
11 | BULK_LIST = "[[200, 10]]"
12 | BULK_LIST_W = "[[200, 10]]"
13 | BULK_LIST_R = "[[1, 10]]"
14 | BULK_LIST_RW = "[[5, 10]]"
15 | EXECUTORS = "[[1, 1, '1x threads'], [2, 1, '1x threads']]"
16 |
17 | # The other tuning
18 | EXECUTOR_START_DELAY = "0"
19 | DETAIL_OUTPUT = "True"
20 | GENERATE_GRAPH = "Perf"
21 | CLUSTER_DIAGNOSE = "Short"
22 | MULTIPLE_ENV_DELAY = "0"
23 |
24 | TABLE = "t01"
25 | KEYSPACE = "prftest"
26 | TEST_TYPE = "W"
27 | REPLICATION_CLASS = "NetworkTopologyStrategy"
28 | REPLICATION_FACTOR = "3"
29 | CONSISTENCY_LEVEL = "LOCAL_QUORUM"
30 | USERNAME = "cassandra"
31 | PASSWORD = "cassandra"
32 | PORT = "9042"
33 | IP = "localhost"
34 | LABEL = "local"
35 | NUMERIC_SCOPE = "99999"
36 |
37 | MODEL_REBUILD = "True"
38 | KEYSPACE_REBUILD = "True"
39 |
40 | class CQLConfig:
41 | """The configuration of CQL, based on ENV files."""
42 |
43 | def __init__(self, perf_dir = "."):
44 | """Processing/Parsing of dictionary parameters from config/ENV files"""
45 | self._perf_dir = perf_dir
46 | self._config = {}
47 |
48 | def get_global_params(self, env_file) -> dict:
49 |
50 | global_param = {}
51 |
52 | #env_file_path = path.join(self._perf_dir, "config", env_file)
53 | if not path.exists(env_file):
54 | raise Exception(f"Invalid path to ENV file '{env_file}'.")
55 |
56 | self._config = dotenv_values(env_file)
57 |
58 | for key in self._config.keys():
59 | global_param[key]=self._config[key]
60 |
61 | password_path =self._config["PWD"]
62 | global_param['PWD'] = cql_helper.read_file_line(path.join(self._perf_dir, password_path)) if password_path else ConfigSetting.PASSWORD
63 |
64 | return global_param
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/stress/cql_helper.py:
--------------------------------------------------------------------------------
1 | from time import perf_counter, perf_counter_ns, sleep
2 | import re
3 | from json import loads
4 |
5 |
6 | def str2bool(value) -> bool:
7 | """Conversion of text value ("True", "1", "Yes", "On") to Bool value"""
8 | return value.lower() in ['true', '1', 'yes', 'on']
9 |
10 | def bool2str(value, value_true, value_false, value_none) -> str:
11 | """Conversion value as bool (or None) to text value"""
12 | if value is not None:
13 | return value_true if value else value_false
14 | return value_none
15 |
16 | def read_file_line(file) -> str:
17 | with open(file) as f:
18 | return f.readline()
19 |
20 | def read_file_lines(file) -> list[str]:
21 | with open(file) as f:
22 | return f.readlines()
23 |
24 | def read_file_all(file) -> str:
25 | with open(file) as f:
26 | content=""
27 | for itm in f.readlines():
28 | content += f"{itm.strip()}\n"
29 | return content[:-1]
30 |
31 | def to_seconds(duration: str):
32 | """Convert text description of duration to the seconds. Expected inputs
33 | '5 minutes', '15 hours"', etc.
34 | """
35 | number, unit = duration.lower().split()
36 | number = int(number.strip())
37 | unit = unit.strip()
38 |
39 | if unit == "seconds":
40 | return number
41 | if unit == "minutes":
42 | return number * 60
43 | elif unit == "hours":
44 | return number * 3600
45 | elif unit == "days":
46 | return number * 86400
47 | else:
48 | return -1
49 |
50 | def get_readable_duration(duration_seconds):
51 | """Return duration in human-readable form"""
52 |
53 | if duration_seconds < 0:
54 | return "n/a"
55 |
56 | str_duration = []
57 | days = int(duration_seconds // 86400)
58 | if days > 0:
59 | str_duration.append(f"{days} day")
60 | hours = int(duration_seconds // 3600 % 24)
61 | if hours > 0:
62 | str_duration.append(f"{hours} hour")
63 | minutes = int(duration_seconds // 60 % 60)
64 | if minutes > 0:
65 | str_duration.append(f"{minutes} min")
66 | seconds = int(duration_seconds % 60)
67 | if seconds > 0:
68 | str_duration.append(f"{seconds} sec")
69 | return ' '.join(str_duration)
70 |
71 | def load_json(line):
72 | try:
73 | return loads(line.strip())
74 | except Exception as ex:
75 | pass
76 |
--------------------------------------------------------------------------------
/stress/graph_output.py:
--------------------------------------------------------------------------------
1 | from json import dumps
2 | from file_marker import FileMarker
3 | from cql_helper import get_readable_duration, to_seconds
4 | from cql_output import CQLOutput
5 |
6 |
7 | class GraphOutput:
8 |
9 | def __init__(self, output:CQLOutput):
10 | self._output=output
11 |
12 | def print_header(self, start_tasks, label, duration):
13 | self._output.print(f"############### {start_tasks.isoformat(' ')} ###############")
14 | out = {}
15 | out[FileMarker.PRF_TYPE] = FileMarker.PRF_HDR_TYPE
16 | out[FileMarker.PRF_HDR_LABEL] = label if label is not None else "Noname"
17 | out[FileMarker.PRF_HDR_BULK] = [1, 1]
18 | out[FileMarker.PRF_HDR_DURATION] = duration
19 | out[FileMarker.PRF_HDR_RESPONSE_UNIT] = "msec"
20 | out[FileMarker.PRF_HDR_NOW] = start_tasks.isoformat(' ')
21 |
22 | self._output.print(dumps(out))
23 |
24 | def print_footer(self, final_state, duration_seconds):
25 | self._output.print(f"############### State: {'OK' if final_state else 'Error'}, "
26 | f"Duration: {get_readable_duration(duration_seconds)} ({duration_seconds} "
27 | f"seconds) ###############")
28 |
29 | def print_details(self, performances):
30 | for performance in performances:
31 | out = {}
32 | out[FileMarker.PRF_TYPE] = FileMarker.PRF_CORE_TYPE
33 | out[FileMarker.PRF_CORE_REAL_EXECUTOR] = int(performance[FileMarker.PRF_CORE_REAL_EXECUTOR])
34 | out[FileMarker.PRF_CORE_GROUP] = performance[FileMarker.PRF_CORE_GROUP]
35 | out[FileMarker.PRF_CORE_TOTAL_CALL_PER_SEC] = float(performance[FileMarker.PRF_CORE_TOTAL_CALL_PER_SEC])
36 | out[FileMarker.PRF_CORE_AVRG_TIME] = float(performance[FileMarker.PRF_CORE_AVRG_TIME])
37 | self._output.print(f" {dumps(out)}")
38 |
39 | def print_detail(self, performance, group=''):
40 | """
41 | Print detail from performance
42 | """
43 |
44 | out = {}
45 | out[FileMarker.PRF_TYPE] = FileMarker.PRF_CORE_TYPE
46 | out[FileMarker.PRF_CORE_REAL_EXECUTOR] = int(performance['executors'])
47 | out[FileMarker.PRF_CORE_GROUP] = group
48 | out[FileMarker.PRF_CORE_TOTAL_CALL_PER_SEC] = float(performance['performance']) # ok
49 | out[FileMarker.PRF_CORE_AVRG_TIME] = float(performance['avrg']) # ok
50 |
51 | # final dump
52 | self._output.print(f" {dumps(out)}")
--------------------------------------------------------------------------------
/stress/cql_output.py:
--------------------------------------------------------------------------------
1 | from os import path, makedirs, linesep
2 | from datetime import datetime
3 | import platform
4 |
5 |
6 | class CQLOutput:
7 |
8 | def __init__(self, perf_dir = None, output_file = None, output_screen = True, mode = "wt", text = True):
9 | self._perf_dir = perf_dir
10 | self._output_file = output_file
11 | self._output_screen = output_screen
12 | self._mode=mode
13 | self._text = text
14 | self._text_buffer = []
15 | self._file = None
16 | self._newLine="\n" if platform.system().lower()=="windows" else linesep
17 |
18 | def open(self):
19 | if self._output_file is not None:
20 | if self._perf_dir:
21 | dirname = path.dirname(self._output_file)
22 | full_dir=path.join(self._perf_dir, dirname) if dirname else self._perf_dir
23 | if not path.exists(full_dir):
24 | makedirs(full_dir, mode=0o777)
25 | self._file = open(path.join(self._perf_dir,self._output_file), self._mode, encoding="utf-8")
26 | if self._text:
27 | self._text_buffer=[]
28 |
29 | def close(self):
30 | if self._file is not None:
31 | self._file.close()
32 | self._file = None
33 | if self._text:
34 | self._text_buffer=[]
35 |
36 | def print(self, out: str = ""):
37 |
38 | # print to the file 'out'
39 | if self._file is not None:
40 | self._file.write(f"{out}{self._newLine}")
41 |
42 | if self._output_screen:
43 | print(out)
44 |
45 | if self._text:
46 | self._text_buffer.append(out)
47 |
48 | def print_cmd(self, cmd, global_counter, run_value_index, params:dict):
49 |
50 | operation = params['OPERATION']
51 | thread = params['THREAD']
52 | if operation.lower()!='remove':
53 | self.print(f"echo 'START {params['OPERATION']}, {thread}x thread: {global_counter}/{run_value_index}...'")
54 | else:
55 | #self.print(f"echo 'START {params['OPERATION']}: {global_counter}/{run_value_index}...'")
56 | self.print(f"echo 'START {params['OPERATION']}: ...'")
57 | self.print(cmd)
58 |
59 | def print_header(self):
60 | self.print("#!/bin/sh")
61 | self.print("# GENERATED: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
62 | self.print("curr_date=$(date +%Y-%m-%d_%H-%M-%S)")
63 | self.print("mkdir -p ./stress-output/$curr_date/")
64 |
65 | def print_footer(self):
66 | pass
67 |
68 | @property
69 | def text_buffer(self):
70 | return self._newLine.join(self._text_buffer)
71 |
--------------------------------------------------------------------------------
/stress/stress_graph.py:
--------------------------------------------------------------------------------
1 | from glob import glob
2 | from os import path
3 | from cql_helper import load_json
4 | from file_marker import FileMarker as const
5 | from graph_output import GraphOutput
6 | from cql_output import CQLOutput
7 | import datetime
8 |
9 |
10 | class StressGraph:
11 |
12 | def __init__(self, input_path):
13 | self._input_path = input_path
14 |
15 | def _parse_json(self, input_file):
16 |
17 | header=""
18 | cores=[]
19 | with open(input_file, "r") as f:
20 |
21 | while True:
22 | line = f.readline()
23 | if not line:
24 | break
25 | if line[0] == '#':
26 | continue
27 | input_dict = load_json(line)
28 | if not input_dict:
29 | continue
30 | if input_dict[const.PRF_TYPE] == const.PRF_HDR_TYPE:
31 | # header items
32 | header=input_dict
33 | elif (input_dict[const.PRF_TYPE] == const.PRF_CORE_TYPE):
34 | cores.append(input_dict)
35 | return header, cores
36 |
37 | def join(self, file_names: list[str]):
38 | join_cores=[]
39 | for file_name in file_names:
40 | cores, header = self._get_data(file_name)
41 | if cores:
42 | if len(cores)>0:
43 | for core in cores:
44 | join_cores.append(core)
45 |
46 | if header and len(header)>0:
47 | return join_cores, header['duration'], datetime.datetime.fromisoformat(header['now'])
48 | return None, None, None
49 |
50 | def graph(self, output_dir, new_label, join_cores, duration, now):
51 | from qgate_graph.graph_performance import GraphPerformance
52 |
53 | if join_cores:
54 | # build text output
55 | output = CQLOutput(output_screen=False, text=True)
56 | output.open()
57 | graph = GraphOutput(output)
58 | graph.print_header(now, new_label, duration)
59 | graph.print_details(join_cores)
60 | graph.print_footer(True, duration)
61 |
62 | # create graph based on text output
63 | generator = GraphPerformance()
64 | generator.generate_from_text(output.text_buffer, output_dir, suppress_error=True)
65 | output.close()
66 | # else:
67 | # if not optional:
68 | # print(f"!!! Missing '{item_set[0]}' & '{item_set[2]}' !!!")
69 |
70 | def _get_data(self, file_name):
71 |
72 | real_file=None
73 | for file in glob(path.join(self._input_path, file_name + ".txt")):
74 | real_file = file
75 |
76 | if real_file is None:
77 | return None, None
78 |
79 | new_header, new_cores = self._parse_json(real_file)
80 |
81 | return new_cores,new_header
82 |
83 |
84 |
--------------------------------------------------------------------------------
/docs/samples/write.md:
--------------------------------------------------------------------------------
1 | # 1. Version 5
2 |
3 | ### 1.1 WRITE, CL=LOCAL_ONE (with SizeTieredCompactionStrategy)
4 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE
5 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
6 | - -mode user=perf password=perf prepared protocolVersion=4
7 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=6)"
8 | - -rate "threads>=4" "threads<=200"
9 | - -graph "file=v5_read_1_local_one_$(date +%Y%m%d_%H%M%S).html" title=v5_read_1_local_one
10 | - -reporting output-frequency=5s > "v5_write_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
11 |
12 | ### 1.2 WRITE, CL=LOCAL_QUORUM
13 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM
14 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
15 | - -mode user=perf password=perf prepared protocolVersion=4
16 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)"
17 | - -rate "threads>=4" "threads<=200"
18 | - -reporting output-frequency=5s > "v5_write_2_local_quorum_$(date +%Y%m%d_%H%M%S).txt"
19 |
20 | ### 1.3 WRITE, CL=EACH_QUORUM
21 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=EACH_QUORUM
22 | - -node 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
23 | - -mode user=perf password=perf prepared protocolVersion=4
24 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)"
25 | - -rate "threads>=4" "threads<=200"
26 | - -reporting output-frequency=5s > "v5_write_3_each_quorum_$(date +%Y%m%d_%H%M%S).txt"
27 |
28 |
29 | # 2. Verze 4
30 |
31 | ### 2.1 WRITE, CL=LOCAL_ONE
32 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE
33 | - -node 10.129.52.58,10.129.53.21,10.129.52.57
34 | - -mode user=perf password=perf prepared protocolVersion=4
35 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)"
36 | - -rate "threads>=4" "threads<=100"
37 | - -reporting output-frequency=5s > "v4_write_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
38 |
39 | ### 2.2 WRITE, CL=LOCAL_QUORUM
40 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM
41 | - -node 10.129.52.58,10.129.53.21,10.129.52.57
42 | - -mode user=perf password=perf prepared protocolVersion=4
43 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)"
44 | - -rate "threads>=4" "threads<=100"
45 | - -reporting output-frequency=5s > "v4_write_2_local_quorum_$(date +%Y%m%d_%H%M%S).txt"
46 |
47 | ### 2.3 WRITE, CL=EACH_QUORUM
48 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=ALL
49 | - -node 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
50 | - -mode user=perf password=perf prepared protocolVersion=4
51 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)"
52 | - -rate "threads>=4" "threads<=100"
53 | - -reporting output-frequency=5s > "aaav4_write_3_each_quorum_$(date +%Y%m%d_%H%M%S).txt"
54 |
55 |
--------------------------------------------------------------------------------
/docs/samples/read.md:
--------------------------------------------------------------------------------
1 |
2 | # 1. Version 5
3 |
4 | ### 1.1 READ, CL=LOCAL_ONE (with SizeTieredCompactionStrategy)
5 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE
6 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
7 | - -mode user=perf password=perf prepared protocolVersion=4
8 | - -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=6)"
9 | - -rate "threads>=4" "threads<=200"
10 | - -graph "file=v5_read_1_local_one_$(date +%Y%m%d_%H%M%S).html" title=v5_read_1_local_one
11 | - -reporting output-frequency=5s > "v5_write_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
12 |
13 | ### 1.2 READ, CL=LOCAL_ONE
14 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE
15 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
16 | - -mode user=perf password=perf prepared protocolVersion=4
17 | - -rate "threads>=4" "threads<=200"
18 | - -graph "file=v5_read_1_local_one_$(date +%Y%m%d_%H%M%S).html" title=v5_read_1_local_one
19 | - -reporting output-frequency=5s > "v5_read_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
20 |
21 | ### 1.3 READ, CL=LOCAL_QUORUM
22 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM
23 | - -node 10.129.53.159,10.129.53.154,10.129.53.153
24 | - -mode user=perf password=perf prepared protocolVersion=4
25 | - -rate "threads>=4" "threads<=200"
26 | - -reporting output-frequency=5s > "v5_read_2_local_quorum_$(date +%Y%m%d_%H%M%S).txt"
27 |
28 | ### 1.4 READ, CL=EACH_QUORUM
29 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=EACH_QUORUM
30 | - -node 10.129.53.159,10.129.53.154,10.129.53.153,10.117.19.6,10.117.19.5,10.117.19.4
31 | - -mode user=perf password=perf prepared protocolVersion=4
32 | - -rate "threads>=4" "threads<=200"
33 | - -reporting output-frequency=5s > "v5_read_3_each_quorum_$(date +%Y%m%d_%H%M%S).txt"
34 |
35 |
36 | # 2. Verze 4
37 |
38 | ### 2.1 READ, CL=LOCAL_ONE
39 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE
40 | - -node 10.129.52.58,10.129.53.21,10.129.52.57
41 | - -mode user=perf password=perf prepared protocolVersion=4
42 | - -rate "threads>=4" "threads<=100"
43 | - -reporting output-frequency=5s > "v4_read_1_local_one_$(date +%Y%m%d_%H%M%S).txt"
44 |
45 | ### 2.2 READ, CL=LOCAL_QUORUM
46 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM
47 | - -node 10.129.52.58,10.129.53.21,10.129.52.57
48 | - -mode user=perf password=perf prepared protocolVersion=4
49 | - -rate "threads>=4" "threads<=100"
50 | - -reporting output-frequency=5s > "v4_read_2_local_quorum_$(date +%Y%m%d_%H%M%S).txt"
51 |
52 | ### 2.3 READ, CL=EACH_QUORUM
53 | **NOTE: Error in Cassandra-stress v 5.0.2**
54 |
55 | - ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=ALL
56 | - -node 10.129.52.58,10.129.53.21,10.129.52.57,10.117.19.9,10.117.19.8,10.117.19.11
57 | - -mode user=perf password=perf prepared protocolVersion=4
58 | - -rate "threads>=4" "threads<=100"
59 | - -reporting output-frequency=5s > "v4_read_3_each_quorum_$(date +%Y%m%d_%H%M%S).txt"
60 |
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 | /secrets/
164 | /secrets
165 | /stress/config/private/
166 | /stress/stress-cmd/private/
167 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_1_6B.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(6)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_2_1KB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(1024)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_3_10KB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(10240)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_4_50KB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(51200)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_5_100KB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(102400)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_6_300KB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(307200)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_7_1MB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(1048576)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/config/cass/process_data_8_4MB.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: prftest1
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE prftest1 WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: process_data
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE prftest1.process_data (
32 | id bigint,
33 | entity text,
34 | orderId timeuuid,
35 | data blob,
36 | PRIMARY KEY (id, entity, orderId)
37 | ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
38 | AND compaction = { 'class': 'UnifiedCompactionStrategy', 'scaling_parameters':'L10,T10' }
39 | AND compression = { 'class': 'LZ4Compressor' }
40 |
41 | # CREATE TABLE prftest1.process_data (
42 | # id uuid,
43 | # entity text,
44 | # orderId timeuuid,
45 | # data blob,
46 | # PRIMARY KEY (id, entity, orderId)
47 | # ) WITH CLUSTERING ORDER BY (entity ASC, orderId ASC)
48 | # AND compaction = { 'class': 'SizeTieredCompactionStrategy' }
49 | # AND compression = { 'class': 'LZ4Compressor' }
50 |
51 |
52 | #
53 | # Optional meta information on the generated columns in the above table
54 | # The min and max only apply to text and blob types
55 | # The distribution field represents the total unique population
56 | # distribution of that column across rows. Supported types are
57 | #
58 | # EXP(min..max) An exponential distribution over the range [min..max]
59 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
60 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
61 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
62 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
63 | # FIXED(val) A fixed distribution, always returning the same value
64 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
65 | # Aliases: extr, gauss, normal, norm, weibull
66 | #
67 | # If preceded by ~, the distribution is inverted
68 | #
69 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
70 | #
71 | columnspec:
72 | - name: id
73 | population: seq(1..10000)
74 | - name: entity
75 | size: uniform(1..10)
76 | population: seq(1..10)
77 | - name: orderId
78 | population: uniform(1000..2000)
79 | - name: data
80 | size: fixed(4194304)
81 | population: seq(1..100000)
82 |
83 | insert:
84 | partitions: fixed(1)
85 | batchtype: UNLOGGED
86 | select: fixed(1)/1
87 |
88 | #
89 | # A list of queries you wish to run against the schema
90 | #
91 | queries:
92 | simple1:
93 | cql: select * from prftest1.process_data where id = ? and entity = ? LIMIT 1
94 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/docs/samples/yaml/cqlstress-example.yaml:
--------------------------------------------------------------------------------
1 | # insert data
2 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=1)
3 |
4 | # read, using query simple1:
5 | # cassandra-stress profile=/home/stress/cqlstress-example.yaml ops(simple1=1)
6 |
7 | # mixed workload (20/80)
8 | # cassandra-stress user profile=/home/stress/cqlstress-example.yaml ops(insert=2,simple1=8)
9 |
10 |
11 | #
12 | # Keyspace info
13 | #
14 | keyspace: stresscql
15 |
16 | #
17 | # The CQL for creating a keyspace (optional if it already exists)
18 | #
19 | keyspace_definition: |
20 | CREATE KEYSPACE stresscql WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3};
21 |
22 | #
23 | # Table info
24 | #
25 | table: typestest
26 |
27 | #
28 | # The CQL for creating a table you wish to stress (optional if it already exists)
29 | #
30 | table_definition: |
31 | CREATE TABLE typestest (
32 | name text,
33 | choice boolean,
34 | date timestamp,
35 | address inet,
36 | dbl double,
37 | lval bigint,
38 | ival int,
39 | uid timeuuid,
40 | value blob,
41 | PRIMARY KEY((name,choice), date, address, dbl, lval, ival, uid)
42 | )
43 | WITH compaction = { 'class':'SizeTieredCompactionStrategy' }
44 | # AND compression = { 'class' : 'LZ4Compressor' }
45 | # AND comment='A table of many types to test wide rows'
46 |
47 | #
48 | # Optional meta information on the generated columns in the above table
49 | # The min and max only apply to text and blob types
50 | # The distribution field represents the total unique population
51 | # distribution of that column across rows. Supported types are
52 | #
53 | # EXP(min..max) An exponential distribution over the range [min..max]
54 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
55 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
56 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
57 | # UNIFORM(min..max) A uniform distribution over the range [min, max]
58 | # FIXED(val) A fixed distribution, always returning the same value
59 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
60 | # Aliases: extr, gauss, normal, norm, weibull
61 | #
62 | # If preceded by ~, the distribution is inverted
63 | #
64 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
65 | #
66 | columnspec:
67 | - name: name
68 | size: uniform(1..10)
69 | population: uniform(1..10) # the range of unique values to select for the field (default is 100Billion)
70 | - name: date
71 | cluster: uniform(20..40)
72 | - name: lval
73 | population: gaussian(1..1000)
74 | cluster: uniform(1..4)
75 |
76 | insert:
77 | partitions: uniform(1..50) # number of unique partitions to update in a single operation
78 | # if batchcount > 1, multiple batches will be used but all partitions will
79 | # occur in all batches (unless they finish early); only the row counts will vary
80 | batchtype: LOGGED # type of batch to use
81 | select: uniform(1..10)/10 # uniform chance any single generated CQL row will be visited in a partition;
82 | # generated for each partition independently, each time we visit it
83 |
84 | #
85 | # A list of queries you wish to run against the schema
86 | #
87 | queries:
88 | simple1:
89 | cql: select * from typestest where name = ? and choice = ? LIMIT 100
90 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
91 | range1:
92 | cql: select * from typestest where name = ? and choice = ? and date >= ? LIMIT 100
93 | fields: multirow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
94 |
95 |
96 | #
97 | # A list of bulk read queries that analytics tools may perform against the schema
98 | # Each query will sweep an entire token range, page by page.
99 | #
100 | token_range_queries:
101 | all_columns_tr_query:
102 | columns: '*'
103 | page_size: 5000
104 |
105 | value_tr_query:
106 | columns: value
107 |
--------------------------------------------------------------------------------
/stress/stress-cmd/_cass_v4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # GENERATED: 2024-11-30 15:24:40
3 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
4 | mkdir -p ./stress-output/$curr_date/
5 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_1_local_one.env'
6 | echo 'START remove: 1/1...'
7 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
8 |
9 | echo 'START write, 128x thread: 2/1...'
10 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_128xTHR.txt"
11 |
12 | echo 'START read, 128x thread: 3/1...'
13 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_LCS_128xTHR.txt"
14 |
15 | echo 'START remove: 4/1...'
16 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
17 |
18 | echo 'START write, 128x thread: 5/1...'
19 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_STCS_128xTHR.txt"
20 |
21 | echo 'START read, 128x thread: 6/1...'
22 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_STCS_128xTHR.txt"
23 |
24 | echo 'START remove: 7/1...'
25 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
26 |
27 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_2_local_quorum.env'
28 | echo 'START remove: 8/1...'
29 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
30 |
31 | echo 'START write, 128x thread: 9/1...'
32 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_LCS_128xTHR.txt"
33 |
34 | echo 'START read, 128x thread: 10/1...'
35 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_LCS_128xTHR.txt"
36 |
37 | echo 'START remove: 11/1...'
38 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
39 |
40 | echo 'START write, 128x thread: 12/1...'
41 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_STCS_128xTHR.txt"
42 |
43 | echo 'START read, 128x thread: 13/1...'
44 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_STCS_128xTHR.txt"
45 |
46 | echo 'START remove: 14/1...'
47 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
48 |
49 |
--------------------------------------------------------------------------------
/stress/stress-cmd/_cass_seq_v4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # GENERATED: 2024-11-30 15:36:18
3 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
4 | mkdir -p ./stress-output/$curr_date/
5 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_1_local_one.env'
6 | echo 'START remove: 1/1...'
7 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
8 |
9 | echo 'START write, 128x thread: 2/1...'
10 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_128xTHR.txt"
11 |
12 | echo 'START read, 128x thread: 3/1...'
13 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_LCS_128xTHR.txt"
14 |
15 | echo 'START remove: 4/1...'
16 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
17 |
18 | echo 'START write, 128x thread: 5/1...'
19 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_STCS_128xTHR.txt"
20 |
21 | echo 'START read, 128x thread: 6/1...'
22 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_STCS_128xTHR.txt"
23 |
24 | echo 'START remove: 7/1...'
25 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
26 |
27 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_2_local_quorum.env'
28 | echo 'START remove: 8/1...'
29 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
30 |
31 | echo 'START write, 128x thread: 9/1...'
32 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_LCS_128xTHR.txt"
33 |
34 | echo 'START read, 128x thread: 10/1...'
35 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_LCS_128xTHR.txt"
36 |
37 | echo 'START remove: 11/1...'
38 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
39 |
40 | echo 'START write, 128x thread: 12/1...'
41 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_STCS_128xTHR.txt"
42 |
43 | echo 'START read, 128x thread: 13/1...'
44 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_STCS_128xTHR.txt"
45 |
46 | echo 'START remove: 14/1...'
47 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
48 |
49 |
--------------------------------------------------------------------------------
/stress/cql_access.py:
--------------------------------------------------------------------------------
1 | from cassandra.auth import PlainTextAuthProvider
2 | from cassandra.cluster import Cluster, Session
3 | from cassandra import ProtocolVersion
4 | from cassandra.policies import DCAwareRoundRobinPolicy, RoundRobinPolicy
5 | from cassandra.policies import RetryPolicy
6 | from colorama import Fore, Style
7 | from time import perf_counter
8 |
9 |
10 | class Setting:
11 | """Setting for CQLAccess"""
12 |
13 | TIMEOUT = 40
14 | TIMEOUT_CREATE_MODEL = 900
15 |
16 | class CQLAccess:
17 | """The access via CQL"""
18 |
19 | def __init__(self, run_setup):
20 | self._run_setup = run_setup
21 | self._cluster = None
22 |
23 | @property
24 | def cluster(self):
25 | return self._cluster
26 |
27 | def open(self):
28 | """Create cluster for connection"""
29 | auth_provider = None
30 |
31 | # authentication provider
32 | if self._run_setup.get('USER', None):
33 | auth_provider = PlainTextAuthProvider(username = self._run_setup["USER"],
34 | password = self._run_setup["PWD"])
35 |
36 | # load balancing policy
37 | if self._run_setup.get("LOCAL_DC", None):
38 | load_balancing_policy = DCAwareRoundRobinPolicy(local_dc = self._run_setup["LOCAL_DC"])
39 | else:
40 | load_balancing_policy = RoundRobinPolicy()
41 |
42 | if self._run_setup.get("SECURE_CONNECT_BUNDLE", None):
43 | # connection with 'secure_connect_bundle' to the cloud
44 | cloud_config = {
45 | "secure_connect_bundle": self._run_setup["SECURE_CONNECT_BUNDLE"],
46 | 'use_default_tempdir': True
47 | }
48 | self._cluster = Cluster(cloud = cloud_config,
49 | auth_provider = auth_provider,
50 | load_balancing_policy = load_balancing_policy,
51 | default_retry_policy = RetryPolicy(),
52 | control_connection_timeout = Setting.TIMEOUT,
53 | connect_timeout = Setting.TIMEOUT,
54 | protocol_version = ProtocolVersion.V4)
55 | else:
56 | # connection with 'ip' and 'port'
57 | self._cluster = Cluster(contact_points = [itm.strip() for itm in self._run_setup['IP'].split(",")],
58 | port = self._run_setup.get('PORT', 9042),
59 | auth_provider = auth_provider,
60 | load_balancing_policy = load_balancing_policy,
61 | default_retry_policy = RetryPolicy(),
62 | control_connection_timeout = Setting.TIMEOUT,
63 | connect_timeout = Setting.TIMEOUT,
64 | protocol_version = ProtocolVersion.V4)
65 | # issue with 'CRC mismatch on header ...', not use protocol V5
66 | # protocol_version = ProtocolVersion.V5)
67 |
68 | def create_session(self, timeout = Setting.TIMEOUT) -> Session:
69 | """Create new session"""
70 | session = self._cluster.connect()
71 | session.default_timeout = timeout
72 | return session
73 |
74 | def remove_keyspace(self, keyspace: str):
75 | session = None
76 |
77 | try:
78 | session = self.create_session(Setting.TIMEOUT_CREATE_MODEL)
79 | session.execute(f"DROP KEYSPACE IF EXISTS {keyspace};")
80 | except Exception as ex:
81 | print(Fore.LIGHTRED_EX + f" {type(ex).__name__}: {str(ex)}" + Style.RESET_ALL)
82 | finally:
83 | if session:
84 | session.shutdown()
85 |
86 | def remove_table(self, keyspace: str, table: str):
87 | session = None
88 |
89 | try:
90 | session = self.create_session(Setting.TIMEOUT_CREATE_MODEL)
91 | session.execute(f"DROP TABLE IF EXISTS {keyspace}.{table};")
92 | except Exception as ex:
93 | print(Fore.LIGHTRED_EX + f" {type(ex).__name__}: {str(ex)}" + Style.RESET_ALL)
94 | finally:
95 | if session:
96 | session.shutdown()
97 |
98 | def create_model(self):
99 | """Create new NoSQL model (create keyspace and table)"""
100 | if not self._run_setup["model_rebuild"]:
101 | return
102 |
103 | session = None
104 | print(f" Create new model [{self._run_setup['keyspace']}].[{self._run_setup['table']}]...")
105 | create_start = perf_counter()
106 |
107 | try:
108 | session = self.create_session(Setting.TIMEOUT_CREATE_MODEL)
109 | if self._run_setup["keyspace_rebuild"]:
110 | if self._run_setup['keyspace_replication_factor']:
111 | # Drop key space
112 | session.execute(f"DROP KEYSPACE IF EXISTS {self._run_setup['keyspace']};")
113 |
114 | # Create key space
115 | session.execute(f"CREATE KEYSPACE IF NOT EXISTS {self._run_setup['keyspace']} " +
116 | "WITH replication = {" +
117 | f"'class':'{self._run_setup['keyspace_replication_class']}', " +
118 | f"'replication_factor' : {self._run_setup['keyspace_replication_factor']}" +
119 | "};")
120 |
121 | # use LTW atomic command with IF
122 | session.execute(f"DROP TABLE IF EXISTS {self._run_setup['keyspace']}.{self._run_setup['table']};")
123 |
124 | # prepare insert statement for batch
125 | columns = ""
126 | for i in range(0, self._run_setup.bulk_col):
127 | columns += f"fn{i} int,"
128 |
129 | # complex primary key (partition key 'fn0' and cluster key 'fn1')
130 | create_tbl = (f"CREATE TABLE IF NOT EXISTS {self._run_setup['keyspace']}.{self._run_setup['table']} "
131 | f"({columns[:-1]}, PRIMARY KEY (fn0, fn1))")
132 |
133 | # add compaction setting
134 | if self._run_setup['compaction']:
135 | compaction_params = f", {self._run_setup['compaction_params']}" if self._run_setup['compaction_params'] else ""
136 | compaction = " WITH compaction = {" \
137 | f"'class': '{self._run_setup['compaction']}'{compaction_params}" \
138 | "};"
139 | create_tbl += compaction
140 |
141 | # create table
142 | session.execute(create_tbl)
143 | print(f" Model created (duration {round(perf_counter()-create_start,1)} seconds)!")
144 | except Exception as ex:
145 | print(Fore.LIGHTRED_EX + f" {type(ex).__name__}: {str(ex)}" + Style.RESET_ALL)
146 | finally:
147 | if session:
148 | session.shutdown()
149 |
150 | def close(self):
151 | """Close cluster connection and all sessions"""
152 | if self._cluster:
153 | self._cluster.shutdown()
154 | self._cluster = None
155 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cql-stress-mng
2 | An easy management of CQL stress tests with the official 'cassandra-stress' tool. You can use
3 | the solution for tests not only cassandra, but also other solutions with support CQL such as
4 | Scylla, AstraDB, etc.
5 |
6 | Key benefits:
7 | - **generate** scripts for performance tests (with 'cassandra-stress') based on templates
8 | - **extract** test summaries (from 'cassandra-stress' results)
9 | - **compare** test results (as text tables and/or graphs)
10 | - test cleaning/maintenance (ability to remove keyspace/table via CQL)
11 |
12 | Pre-requisites:
13 | - installed Python >= 3.11
14 | - installed Java (java version based on cassandra-stress specification)
15 | - installed 'cassandra-stress' tool (part of Apache Cassandra distribution)
16 | - access to CQL solution (e.g. Cassandra, Scylla, Astra, etc.)
17 | - open port 9042
18 | - IP addresses for connection
19 | - login information e.g. username/password
20 |
21 | Motivation for this tool/repo:
22 | - use the standard and verified official tool for testing CQL solutions
23 | - focus on your own tests (not on creating a tool for performance tests)
24 | - don't reinvent the wheel (unless necessary, the time is money)
25 |
26 | ## 1. Command line usage
27 |
28 | You can see standard description:
29 |
30 | ```sh
31 | python3.11 stress/stress_mng.py --help
32 | ```
33 | ```txt
34 | Usage: stress_mng.py [OPTIONS] COMMAND [ARGS]...
35 |
36 | Options:
37 | --help Show this message and exit.
38 |
39 | Commands:
40 | compare Compare data from TXT(JSON) to the sub-dir 'graph'
41 | extract Extract data from 'cassandra-stress' output to the sub-dir...
42 | generate Generate performance tests as *.sh for 'cassandra-stress'
43 | graph Create graphs from TXT(JSON) to the sub-dir 'graph'
44 | remove Remove keyspace or table from CQL solution
45 | version Print current version of key components
46 | ```
47 |
48 | ### 1.1 Generate
49 |
50 | You can generate shell scripts for 'cassandra-stress', see:
51 |
52 | #### 1.1.1 Generate shell script based on 'compareV4V5_separateTHR\_cass_*.env'
53 |
54 | ```sh
55 | python3.11 stress/stress_mng.py generate -e compareV4V5_separateTHR\_cass_*.env -l stress-cmd/_cass_all.sh
56 | ```
57 | The output ~140 calls with READ/WRITE tests and exact
58 | logic for thread growing (4, 8, 6, 32, ...)
59 |
60 | ```sh
61 | #!/bin/sh
62 | # GENERATED: 2024-12-05 16:23:20
63 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
64 | mkdir -p ./stress-output/$curr_date/
65 | echo 'Based on: .\config\compareV4V5_separateTHR\_cass_v4_1_local_one.env'
66 | echo 'START remove: ...'
67 | python3.11 stress/stress_mng.py remove -e compareV4V5_separateTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
68 |
69 | echo 'START write, 4x thread: 1/1...'
70 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads=4" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_4xTHR.txt"
71 | echo 'START write, 8x thread: 2/2...'
72 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads=8" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_8xTHR.txt"
73 | echo 'START write, 16x thread: 3/3...'
74 | # ...
75 | ```
76 | #### 1.1.2 Generate shell scripts based on 'compareV4V5_sequenceTHR\_cass_*.env'
77 |
78 | ```sh
79 | python3.11 stress/stress_mng.py generate -e compareV4V5_sequenceTHR\_cass_*.env -l stress-cmd/_cass_seq_all.sh
80 | ```
81 | The output ~28 calls with READ/WRITE tests with gradual
82 | thread growing (till <=100 thread) based on default
83 | 'cassandra-stress' logic
84 |
85 | ```sh
86 | #!/bin/sh
87 | # GENERATED: 2024-12-05 16:24:10
88 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
89 | mkdir -p ./stress-output/$curr_date/
90 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_1_local_one.env'
91 | echo 'START remove: ...'
92 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
93 |
94 | echo 'START write, 100x thread: 1/1...'
95 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_100xTHR.txt"
96 |
97 | echo 'START read, 100x thread: 2/1...'
98 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_LCS_100xTHR.txt"
99 | # ...
100 | ```
101 | ### 1.2 Extract
102 |
103 | The 'Extract' data from 'cassandra-stress' output in format CSV and TXT(JSON).
104 |
105 | ```sh
106 | python3.11 stress/stress_mng.py extract -d "C:/Python/.NEW Compare V4 vs V5/FULLFinal/"
107 | ```
108 |
109 | ### 1.3 Compare
110 |
111 | The 'Compare' data based on TXT(JSON) files to text (in console) or graphs (as PNG files).
112 | Compare different versions (e.g. compare output from Cassandra 'v4' vs 'v5'):
113 |
114 | ```sh
115 | python3.11 stress/stress_mng.py compare -d "C:/Python/.Cassandra v4 vs v5/" -t 0 -o v4 -n v5 -r read -w write
116 | ```
117 | or compare the same versions after the scale-out (e.g. compare Cassandra v5 outputs from '2024-12-11_09-15-55' vs '2025-01-28_13-14-49'):
118 |
119 | ```sh
120 | python3.11 stress/stress_mng.py compare -d "C:/Python/.Scaleout Cassandra v5/" -t 1 -o 2024-12-11_09-15-55 -n 2025-01-28_13-14-49 -r simple1 -w insert
121 | ```
122 |
123 | ### 1.4 Graph
124 |
125 | The 'Create' graphs from TXT(JSON) to the sub-dir 'graph'.
126 |
127 | ```sh
128 | python3.11 stress/stress_mng.py graph -d "C:/Python/.4NEW Compare V5 NONPROD/"
129 | ```
130 |
131 | or graphs plus graph summaries
132 |
133 | ```sh
134 | python3.11 stress/stress_mng.py graph -d "C:/Python/.4NEW Compare V5 NONPROD/" -g aa
135 | ```
136 |
137 | ## 2. Sample of outputs
138 | #### 2.1 Compare as graph
139 |
140 | It is useful for visual check, the inputs are TXT(JSON) files from extract command.
141 |
142 | 
143 |
144 | 
145 |
146 | 
147 |
148 | #### 2.2 Compare as text
149 |
150 | It is useful for table/excel compare (TAB as separator), the inputs are CSV files from extract command.
151 |
152 | ```txt
153 | ==== LOCAL_ONE===
154 | Test case 4 8 16 24 36 54 81 4 8 16 24 36 54 81
155 | v5 write_LOCAL_ONE_STCS 5938 11451 21774 29310 35638 39116 42557 0,7 0,7 0,7 0,8 1,0 1,4 1,9
156 | v4 write_LOCAL_ONE_STCS 5874 11053 19690 26178 30213 28000 23091 0,7 0,7 0,8 0,9 1,2 1,9 3,4
157 | ...
158 | ==== LOCAL_QUORUM===
159 | Test case 4 8 16 24 36 54 81 4 8 16 24 36 54 81
160 | v5 write_LOCAL_QUORUM_STCS 3907 7058 12638 18065 23462 29958 32159 1,0 1,1 1,2 1,3 1,5 1,8 2,5
161 | v4 write_LOCAL_QUORUM_STCS 3525 6394 10804 14313 18465 22709 25715 1,1 1,2 1,5 1,7 1,9 2,4 3,1
162 | ...
163 | ```
164 |
165 | The usage in the excel see a few final outputs:
166 |
167 | 
168 |
169 | 
170 |
171 | 
172 |
--------------------------------------------------------------------------------
/stress/extract_summary.py:
--------------------------------------------------------------------------------
1 | import cql_helper as helper
2 | from cql_output import CQLOutput
3 | from glob import glob
4 | import datetime, time
5 | from os import path, linesep
6 | import re
7 | from json import dumps
8 | from cql_helper import get_readable_duration, to_seconds
9 | from graph_output import GraphOutput
10 |
11 | class ExtractSummary:
12 | """ Generate summary outputs from particular tests (a lot of detail outputs).
13 | The outputs are in json, txt, csv formats
14 | """
15 |
16 | def __init__(self, input_dir, output_dir, file_extension = "*.txt"):
17 | self._input_dir = input_dir
18 | self._output_dir = output_dir
19 | self._file_extension = file_extension
20 | self._performance = {}
21 | self._output_sample = 2
22 |
23 | def _parse_results(self, file_name, content):
24 | results=[]
25 | output={}
26 |
27 | key = self._get_pattern_item("(.*)_[^_]*xTHR.txt$", file_name)
28 |
29 | # basic info
30 | output["operation_type"] = self._get_pattern_item(f"Type:(.*)$", content)
31 | output["duration"] = self._get_pattern_item(f"Duration:(.*)$", content)
32 | output["consistency_level"] = self._get_pattern_item(f"Consistency Level:(.*)$", content)
33 |
34 | columns=self._get_pattern_items(f"id, type([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),(.*)$", content)
35 | items=self._get_pattern_items(f"(.*)threadCount, total,([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*)", content)
36 |
37 | if len(items)>0:
38 | for itm in items:
39 | output = {}
40 | # basic info
41 | output["operation_type"] = self._get_pattern_item(f"Type:(.*)$", content)
42 | output["duration"] = self._get_pattern_item(f"Duration:(.*)$", content)
43 | output["consistency_level"] = self._get_pattern_item(f"Consistency Level:(.*)$", content)
44 | output["executors"] = int(itm[0].strip())
45 |
46 | # performance details
47 | output["performance"] = itm[2].strip()
48 | output["avrg"] = itm[5].strip()
49 | output["median"] = itm[6].strip()
50 | output["latency_95th"] = itm[7].strip()
51 | output["latency_99th"] = itm[8].strip()
52 | output["latency_999th"] = itm[9].strip()
53 | output["latency_max"] = itm[10].strip()
54 |
55 | if self._performance.get(key, None):
56 | self._performance[key].append(output)
57 | else:
58 | self._performance[key] = [output]
59 |
60 | def _parse_result(self, file_name, content):
61 | output={}
62 |
63 | key = self._get_pattern_item("(.*)_[^_]*xTHR.txt$", file_name)
64 |
65 | # basic info
66 | output["operation_type"] = self._get_pattern_item(f"Type:(.*)$", content)
67 | output["duration"] = self._get_pattern_item(f"Duration:(.*)$", content)
68 | output["consistency_level"] = self._get_pattern_item(f"Consistency Level:(.*)$", content)
69 | executors=self._get_pattern_item("Thread Count:(.*)$", content)
70 | output["executors"] = int(executors) if executors else 0
71 |
72 | # optional (can be zero for read operation)
73 | output["compaction"] = self._get_pattern_item("_([^_]*)_[^_]*xTHR.txt$", file_name)
74 |
75 | # performance details
76 | output["performance"] = self._get_pattern_item("Op rate.+:(.+) op\/s \[.+\]", content).replace(",","")
77 | output["avrg"] = self._get_pattern_item("Latency mean.+:(.+)\[.+\]", content).replace(",","")
78 | output["median"] = self._get_pattern_item("Latency median.+:(.+)\[.+\]", content).replace(",","")
79 | output["latency_95th"] = self._get_pattern_item("Latency 95th percentile.+:(.+)\[.+\]", content).replace(",","")
80 | output["latency_99th"] = self._get_pattern_item("Latency 99th percentile.+:(.+)\[.+\]", content).replace(",","")
81 | output["latency_999th"] = self._get_pattern_item("Latency 99.9th percentile.+:(.+)\[.+\]", content).replace(",","")
82 | output["latency_max"] = self._get_pattern_item("Latency max.+:(.+)\[.+\]", content).replace(",", "")
83 |
84 | if self._performance.get(key, None):
85 | self._performance[key].append(output)
86 | else:
87 | self._performance[key] = [output]
88 |
89 | def _multi_result(self, content) -> bool:
90 | summary_items=self._get_pattern_items(f"(.*)threadCount, total,([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*),([^,]*)", content)
91 | return len(summary_items)>0 if summary_items else False
92 |
93 | def _get_pattern_item(self, pattern, content):
94 | match=re.findall(pattern, content, re.MULTILINE)
95 | if match:
96 | return match[0].strip() if type(match[0]) is str else match
97 | else:
98 | return None
99 |
100 | def _get_pattern_items(self, pattern, content):
101 | match=re.findall(pattern, content, re.MULTILINE)
102 | return match if match else None
103 |
104 | def _sort_executors(self, e):
105 | return e['executors']
106 |
107 | def parse(self):
108 | """Parse input"""
109 | self._performance = {}
110 | items=[]
111 | count=0
112 |
113 | # iteration cross all files
114 | filter=path.join(self._input_dir, self._file_extension)
115 | for file_name in glob(filter):
116 | if count < self._output_sample:
117 | print(f"Parsing: '{path.basename(file_name)}'")
118 | else:
119 | if count == self._output_sample:
120 | print("Parsing ...")
121 |
122 | content=helper.read_file_all(file_name)
123 | if self._multi_result(content):
124 | self._parse_results(path.basename(file_name), content)
125 | else:
126 | self._parse_result(path.basename(file_name), content)
127 | count += 1
128 |
129 | # order by amount of executors
130 | for key in self._performance.keys():
131 | self._performance[key].sort(key=self._sort_executors)
132 |
133 | print(f"=== Parsed '{count}' files ===")
134 |
135 |
136 | def save_csv(self):
137 | """Save summary output to CSV file"""
138 | count = 0
139 | for key in self._performance.keys():
140 | output = None
141 | file_name = key + ".csv"
142 | try:
143 | output = CQLOutput(self._output_dir, file_name, False)
144 | output.open()
145 | output.print("Executors,Group,Performance,Avrg,Latency 95th,Latency 99th,Latency 999th,Max")
146 |
147 | for itm in self._performance[key]:
148 | output.print(str.format(f"{itm['executors']},"
149 | f"'norm',"
150 | f"{itm['performance']},"
151 | f"{itm['avrg']},"
152 | f"{itm['latency_95th']},"
153 | f"{itm['latency_99th']},"
154 | f"{itm['latency_999th']},"
155 | f"{itm['latency_max']}"))
156 | finally:
157 | if output:
158 | output.close()
159 | if count < self._output_sample:
160 | print(f"Saved CSV: '{file_name}'")
161 | else:
162 | if count == self._output_sample:
163 | print("Saved CSV ...")
164 | count += 1
165 |
166 | print(f"=== Saved '{count}' CSV files ===")
167 |
168 | def _to_datetime(self, label) -> datetime:
169 | keys = label.split()
170 | parts = keys[0].split('_')
171 | return datetime.datetime.fromisoformat(str.format(f"{parts[0]} {parts[1].replace('-',':')}"))
172 |
173 | def save_json(self):
174 | """Save summary output to TXT (JSON) file"""
175 |
176 | count = 0
177 | for key in self._performance.keys():
178 | output = None
179 | file_name = key + ".txt"
180 | try:
181 | output = CQLOutput(self._output_dir, file_name, False)
182 | graph = GraphOutput(output)
183 | output.open()
184 |
185 | if len(self._performance[key])>0:
186 | duration = to_seconds(self._performance[key][0]['duration'])
187 | keys = key.split()
188 | group = keys[1]
189 | date=self._to_datetime(keys[0])
190 |
191 | graph.print_header(date, str.format(f"{keys[1]} {keys[2]}"), duration)
192 | for itm in self._performance[key]:
193 | graph.print_detail(itm, group) #f"{keys[1]} {keys[2]}")
194 | graph.print_footer(True,duration)
195 | finally:
196 | if output:
197 | output.close()
198 |
199 | if count < self._output_sample:
200 | print(f"Saved TXT(JSON): '{file_name}'")
201 | else:
202 | if count == self._output_sample:
203 | print("Saved TXT(JSON) ...")
204 | count += 1
205 | print(f"=== Saved '{count}' TXT(JSON) files ===")
206 |
--------------------------------------------------------------------------------
/stress/stress-cmd/_cass_v5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # GENERATED: 2024-11-30 14:29:21
3 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
4 | mkdir -p ./stress-output/$curr_date/
5 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_1_local_one.env'
6 | echo 'START remove: 1/1...'
7 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
8 |
9 | echo 'START write, 128x thread: 2/1...'
10 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_LCS_128xTHR.txt"
11 |
12 | echo 'START read, 128x thread: 3/1...'
13 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_LCS_128xTHR.txt"
14 |
15 | echo 'START remove: 4/1...'
16 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
17 |
18 | echo 'START write, 128x thread: 5/1...'
19 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_STCS_128xTHR.txt"
20 |
21 | echo 'START read, 128x thread: 6/1...'
22 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_STCS_128xTHR.txt"
23 |
24 | echo 'START remove: 7/1...'
25 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
26 |
27 | echo 'START write, 128x thread: 8/1...'
28 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS4_128xTHR.txt"
29 |
30 | echo 'START read, 128x thread: 9/1...'
31 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS4_128xTHR.txt"
32 |
33 | echo 'START remove: 10/1...'
34 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
35 |
36 | echo 'START write, 128x thread: 11/1...'
37 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS8_128xTHR.txt"
38 |
39 | echo 'START read, 128x thread: 12/1...'
40 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS8_128xTHR.txt"
41 |
42 | echo 'START remove: 13/1...'
43 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
44 |
45 | echo 'START write, 128x thread: 14/1...'
46 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS10_128xTHR.txt"
47 |
48 | echo 'START read, 128x thread: 15/1...'
49 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS10_128xTHR.txt"
50 |
51 | echo 'START remove: 16/1...'
52 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
53 |
54 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_2_local_quorum.env'
55 | echo 'START remove: 17/1...'
56 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
57 |
58 | echo 'START write, 128x thread: 18/1...'
59 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_LCS_128xTHR.txt"
60 |
61 | echo 'START read, 128x thread: 19/1...'
62 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_LCS_128xTHR.txt"
63 |
64 | echo 'START remove: 20/1...'
65 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
66 |
67 | echo 'START write, 128x thread: 21/1...'
68 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_STCS_128xTHR.txt"
69 |
70 | echo 'START read, 128x thread: 22/1...'
71 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_STCS_128xTHR.txt"
72 |
73 | echo 'START remove: 23/1...'
74 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
75 |
76 | echo 'START write, 128x thread: 24/1...'
77 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS4_128xTHR.txt"
78 |
79 | echo 'START read, 128x thread: 25/1...'
80 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS4_128xTHR.txt"
81 |
82 | echo 'START remove: 26/1...'
83 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
84 |
85 | echo 'START write, 128x thread: 27/1...'
86 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS8_128xTHR.txt"
87 |
88 | echo 'START read, 128x thread: 28/1...'
89 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS8_128xTHR.txt"
90 |
91 | echo 'START remove: 29/1...'
92 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
93 |
94 | echo 'START write, 128x thread: 30/1...'
95 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS10_128xTHR.txt"
96 |
97 | echo 'START read, 128x thread: 31/1...'
98 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS10_128xTHR.txt"
99 |
100 | echo 'START remove: 32/1...'
101 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
102 |
103 |
--------------------------------------------------------------------------------
/stress/stress-cmd/_cass_seq_v5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # GENERATED: 2024-11-30 15:36:25
3 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
4 | mkdir -p ./stress-output/$curr_date/
5 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_1_local_one.env'
6 | echo 'START remove: 1/1...'
7 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
8 |
9 | echo 'START write, 128x thread: 2/1...'
10 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_LCS_128xTHR.txt"
11 |
12 | echo 'START read, 128x thread: 3/1...'
13 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_LCS_128xTHR.txt"
14 |
15 | echo 'START remove: 4/1...'
16 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
17 |
18 | echo 'START write, 128x thread: 5/1...'
19 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_STCS_128xTHR.txt"
20 |
21 | echo 'START read, 128x thread: 6/1...'
22 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_STCS_128xTHR.txt"
23 |
24 | echo 'START remove: 7/1...'
25 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
26 |
27 | echo 'START write, 128x thread: 8/1...'
28 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS4_128xTHR.txt"
29 |
30 | echo 'START read, 128x thread: 9/1...'
31 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS4_128xTHR.txt"
32 |
33 | echo 'START remove: 10/1...'
34 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
35 |
36 | echo 'START write, 128x thread: 11/1...'
37 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS8_128xTHR.txt"
38 |
39 | echo 'START read, 128x thread: 12/1...'
40 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS8_128xTHR.txt"
41 |
42 | echo 'START remove: 13/1...'
43 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
44 |
45 | echo 'START write, 128x thread: 14/1...'
46 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS10_128xTHR.txt"
47 |
48 | echo 'START read, 128x thread: 15/1...'
49 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS10_128xTHR.txt"
50 |
51 | echo 'START remove: 16/1...'
52 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
53 |
54 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_2_local_quorum.env'
55 | echo 'START remove: 17/1...'
56 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
57 |
58 | echo 'START write, 128x thread: 18/1...'
59 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_LCS_128xTHR.txt"
60 |
61 | echo 'START read, 128x thread: 19/1...'
62 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_LCS_128xTHR.txt"
63 |
64 | echo 'START remove: 20/1...'
65 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
66 |
67 | echo 'START write, 128x thread: 21/1...'
68 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_STCS_128xTHR.txt"
69 |
70 | echo 'START read, 128x thread: 22/1...'
71 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_STCS_128xTHR.txt"
72 |
73 | echo 'START remove: 23/1...'
74 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
75 |
76 | echo 'START write, 128x thread: 24/1...'
77 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS4_128xTHR.txt"
78 |
79 | echo 'START read, 128x thread: 25/1...'
80 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS4_128xTHR.txt"
81 |
82 | echo 'START remove: 26/1...'
83 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
84 |
85 | echo 'START write, 128x thread: 27/1...'
86 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS8_128xTHR.txt"
87 |
88 | echo 'START read, 128x thread: 28/1...'
89 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS8_128xTHR.txt"
90 |
91 | echo 'START remove: 29/1...'
92 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
93 |
94 | echo 'START write, 128x thread: 30/1...'
95 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS10_128xTHR.txt"
96 |
97 | echo 'START read, 128x thread: 31/1...'
98 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=128" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS10_128xTHR.txt"
99 |
100 | echo 'START remove: 32/1...'
101 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
102 |
103 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/stress/stress-cmd/_cass_seq_all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # GENERATED: 2024-12-05 16:24:10
3 | curr_date=$(date +%Y-%m-%d_%H-%M-%S)
4 | mkdir -p ./stress-output/$curr_date/
5 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_1_local_one.env'
6 | echo 'START remove: ...'
7 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
8 |
9 | echo 'START write, 100x thread: 1/1...'
10 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_LCS_100xTHR.txt"
11 |
12 | echo 'START read, 100x thread: 2/1...'
13 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_LCS_100xTHR.txt"
14 |
15 | echo 'START remove: ...'
16 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
17 |
18 | echo 'START write, 100x thread: 3/1...'
19 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_ONE_STCS_100xTHR.txt"
20 |
21 | echo 'START read, 100x thread: 4/1...'
22 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_ONE_STCS_100xTHR.txt"
23 |
24 | echo 'START remove: ...'
25 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_1_local_one.env -k keyspace1 -d stress -s 8
26 |
27 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v4_2_local_quorum.env'
28 | echo 'START remove: ...'
29 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
30 |
31 | echo 'START write, 100x thread: 5/1...'
32 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_LCS_100xTHR.txt"
33 |
34 | echo 'START read, 100x thread: 6/1...'
35 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_LCS_100xTHR.txt"
36 |
37 | echo 'START remove: ...'
38 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
39 |
40 | echo 'START write, 100x thread: 7/1...'
41 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 write_LOCAL_QUORUM_STCS_100xTHR.txt"
42 |
43 | echo 'START read, 100x thread: 8/1...'
44 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.52.58,10.129.53.21,10.129.52.57 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v4 read_LOCAL_QUORUM_STCS_100xTHR.txt"
45 |
46 | echo 'START remove: ...'
47 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v4_2_local_quorum.env -k keyspace1 -d stress -s 8
48 |
49 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_1_local_one.env'
50 | echo 'START remove: ...'
51 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
52 |
53 | echo 'START write, 100x thread: 9/1...'
54 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_LCS_100xTHR.txt"
55 |
56 | echo 'START read, 100x thread: 10/1...'
57 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_LCS_100xTHR.txt"
58 |
59 | echo 'START remove: ...'
60 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
61 |
62 | echo 'START write, 100x thread: 11/1...'
63 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_STCS_100xTHR.txt"
64 |
65 | echo 'START read, 100x thread: 12/1...'
66 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_STCS_100xTHR.txt"
67 |
68 | echo 'START remove: ...'
69 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
70 |
71 | echo 'START write, 100x thread: 13/1...'
72 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS4_100xTHR.txt"
73 |
74 | echo 'START read, 100x thread: 14/1...'
75 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS4_100xTHR.txt"
76 |
77 | echo 'START remove: ...'
78 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
79 |
80 | echo 'START write, 100x thread: 15/1...'
81 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS8_100xTHR.txt"
82 |
83 | echo 'START read, 100x thread: 16/1...'
84 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS8_100xTHR.txt"
85 |
86 | echo 'START remove: ...'
87 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
88 |
89 | echo 'START write, 100x thread: 17/1...'
90 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_ONE_UCS10_100xTHR.txt"
91 |
92 | echo 'START read, 100x thread: 18/1...'
93 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_ONE no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_ONE_UCS10_100xTHR.txt"
94 |
95 | echo 'START remove: ...'
96 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_1_local_one.env -k keyspace1 -d stress -s 8
97 |
98 | echo 'Based on: .\config\compareV4V5_sequenceTHR\_cass_v5_2_local_quorum.env'
99 | echo 'START remove: ...'
100 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
101 |
102 | echo 'START write, 100x thread: 19/1...'
103 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=160,fanout_size=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_LCS_100xTHR.txt"
104 |
105 | echo 'START read, 100x thread: 20/1...'
106 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_LCS_100xTHR.txt"
107 |
108 | echo 'START remove: ...'
109 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
110 |
111 | echo 'START write, 100x thread: 21/1...'
112 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=SizeTieredCompactionStrategy,max_threshold=32,min_threshold=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_STCS_100xTHR.txt"
113 |
114 | echo 'START read, 100x thread: 22/1...'
115 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_STCS_100xTHR.txt"
116 |
117 | echo 'START remove: ...'
118 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
119 |
120 | echo 'START write, 100x thread: 23/1...'
121 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=4)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS4_100xTHR.txt"
122 |
123 | echo 'START read, 100x thread: 24/1...'
124 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS4_100xTHR.txt"
125 |
126 | echo 'START remove: ...'
127 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
128 |
129 | echo 'START write, 100x thread: 25/1...'
130 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=8)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS8_100xTHR.txt"
131 |
132 | echo 'START read, 100x thread: 26/1...'
133 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS8_100xTHR.txt"
134 |
135 | echo 'START remove: ...'
136 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
137 |
138 | echo 'START write, 100x thread: 27/1...'
139 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress write duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -schema "replication(strategy=NetworkTopologyStrategy,factor=3)" "compaction(strategy=UnifiedCompactionStrategy,scaling_parameters=10)" -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 write_LOCAL_QUORUM_UCS10_100xTHR.txt"
140 |
141 | echo 'START read, 100x thread: 28/1...'
142 | ./apache-cassandra-5.0.2/tools/bin/cassandra-stress read duration=1m cl=LOCAL_QUORUM no-warmup -node 10.129.53.159,10.129.53.154,10.129.53.153 -mode user=perf password=perf prepared protocolVersion=4 connectionsPerHost=24 maxPending=384 -rate "threads<=100" -reporting output-frequency=5s > "./stress-output/$curr_date/$curr_date v5 read_LOCAL_QUORUM_UCS10_100xTHR.txt"
143 |
144 | echo 'START remove: ...'
145 | python3.11 stress/stress_mng.py remove -e compareV4V5_sequenceTHR/_cass_v5_2_local_quorum.env -k keyspace1 -d stress -s 8
146 |
147 |
--------------------------------------------------------------------------------
/stress/stress_mng.py:
--------------------------------------------------------------------------------
1 | from cql_access import CQLAccess
2 | from cql_config import CQLConfig
3 | from cql_output import CQLOutput
4 | from extract_summary import ExtractSummary
5 | from stress_compare import StressCompare
6 | from stress_graph import StressGraph
7 | from colorama import Fore, Style
8 | import cql_helper as helper
9 | from glob import glob
10 | import datetime, time
11 | from os import path
12 | import click
13 | import re
14 |
15 |
16 | def get_template(template_path, perf_dir = "."):
17 |
18 | template = ""
19 |
20 | if not template_path.lower().endswith(".txt"):
21 | template_path += ".txt"
22 | template_list = helper.read_file_lines(path.join(perf_dir, "config", template_path))
23 | for itm in template_list:
24 | template += f"{itm.strip()} "
25 | template = template[:-1]
26 | return template
27 |
28 | def get_variables(template, pattern=r"%([^%]+)%"):
29 | return re.findall(pattern, template)
30 |
31 | def get_arguments(params: dict, arguments):
32 |
33 | variables={}
34 | iterators=[]
35 | iterator_list=[]
36 |
37 | # define variables and iterators
38 | for index in range(1, len(arguments)):
39 | argument = arguments[index]
40 | values = argument.split("=")
41 |
42 | if len(values) == 2: # create KEY=VALUE
43 | params[values[0].strip()] = values[1].strip()
44 | #variables[values[0].strip()] = values[1].strip()
45 | else:
46 | key = values[0].strip()
47 | if params.get(key, None):
48 | itms = [itm.strip() for itm in params[key].split(",")]
49 | else:
50 | items = None
51 | iterators.append((key, itms))
52 | variables[key]=itms
53 |
54 |
55 | # define iterator list size
56 | count = 1
57 | for iter in iterators:
58 | key = iter[0]
59 | count = count * len(iter[1])
60 | if len(iterators)>0:
61 | iterator_list = [-1] * count
62 |
63 | # create combinations
64 | switch = True
65 | for iter in iterators:
66 | size = count / len(iter[1])
67 | key = iter[0]
68 | values = iter[1]
69 |
70 | if switch:
71 | # sequence 2,2,2 ... 4,4,4
72 | iter_index = 0
73 | for iter_value in values:
74 | for i in range(int(size)):
75 | if iterator_list[iter_index + i] == -1:
76 | item = {key: iter_value}
77 | iterator_list[iter_index + i] = item
78 | else:
79 | item = iterator_list[iter_index + i]
80 | item[key] = iter_value
81 | iter_index += int(size)
82 | switch = False
83 | else:
84 | # sequence 2,4,8 ... 2,4,8
85 | for i in range(int(size)):
86 | iter_index = i * len(iter[1])
87 | for iter_value in values:
88 | if iterator_list[iter_index] == -1:
89 | item = {key: iter_value}
90 | iterator_list[iter_index] = item
91 | else:
92 | item = iterator_list[iter_index]
93 | item[key] = iter_value
94 | iter_index += 1
95 | switch = True
96 | return variables, iterator_list
97 |
98 | def create_variables(params: dict, run_variable: dict):
99 | new_variables={}
100 |
101 | for key in params.keys():
102 | if run_variable:
103 | if run_variable.get(key, None):
104 | new_variables[key]=run_variable[key]
105 | continue
106 | new_variables[key]=params[key]
107 |
108 | # replace variables
109 | for key in new_variables.keys():
110 | itm=new_variables[key]
111 | variables=get_variables(itm)
112 | if variables:
113 | for variable in variables:
114 | if new_variables.get(variable, None):
115 | itm = itm.replace(f"%{variable}%", new_variables[variable])
116 | else:
117 | # TODO: propagate warning
118 | print(f"Warning: missing variable {variable}")
119 | new_variables[key]=itm
120 |
121 | return new_variables
122 |
123 | def remove_keyspace(keyspace: str, params, simulation: bool = False):
124 |
125 | cql = None
126 | try:
127 | if not simulation:
128 | cql = CQLAccess(params)
129 | cql.open()
130 | cql.remove_keyspace(keyspace)
131 | finally:
132 | if cql:
133 | cql.close()
134 |
135 | def remove_table(keyspace: str, table: str, params, simulation: bool = False):
136 |
137 | cql = None
138 | try:
139 | if not simulation:
140 | cql = CQLAccess(params)
141 | cql.open()
142 | cql.remove_table(keyspace, table)
143 | finally:
144 | if cql:
145 | cql.close()
146 |
147 | def stress_test(output: CQLOutput, params: dict, perf_dir = ".", counter=0):
148 |
149 | for i in range(100):
150 | ext_cmd=True
151 | key=f"RUN{i}"
152 | if params.get(key, None) is None:
153 | break
154 |
155 | arguments=params[key].split(",")
156 |
157 | # get list of variables for RUN (parse other arguments)
158 | run_variables, run_variable_values=get_arguments(params, arguments)
159 |
160 | # get template from RUN (parse first argument)
161 | template = get_template(arguments[0].strip(), perf_dir)
162 |
163 | # get variables from template
164 | variables=get_variables(template)
165 |
166 | if len(run_variable_values)==0:
167 | cmd_variable = create_variables(params, None)
168 | #cmd_variable = create_variables(run_variables, None)
169 | cmd = template
170 |
171 | for variable in variables:
172 | if cmd_variable.get(variable, None):
173 | cmd = cmd.replace(f"%{variable}%", cmd_variable[variable])
174 |
175 | if cmd_variable["OPERATION"].lower()!="remove":
176 | counter+=1
177 | output.print_cmd(cmd, counter, 1, cmd_variable)
178 | else:
179 | # create command
180 | run_value_index=0
181 | for combination in range(len(run_variable_values)):
182 |
183 | cmd_variable = create_variables(params, run_variable_values[combination])
184 | #cmd_variable = create_variables(run_variables, run_variable_values[combination])
185 | cmd = template
186 |
187 | for variable in variables:
188 | if cmd_variable.get(variable, None):
189 | cmd = cmd.replace(f"%{variable}%", cmd_variable[variable])
190 |
191 | if cmd_variable["OPERATION"].lower() != "remove":
192 | counter += 1
193 | run_value_index += 1
194 |
195 | output.print_cmd(cmd, counter, run_value_index, cmd_variable)
196 | output.print()
197 | return counter
198 |
199 |
200 | def main_execute(env="_cass*.env", perf_dir = ".", log=""):
201 | unique_date= datetime.datetime.now().strftime("%Y-%m-%d %H_%M_%S")
202 | global_counter=0
203 |
204 | output = None
205 | try:
206 | output = CQLOutput(perf_dir, log)
207 | output.open()
208 | output.print_header()
209 |
210 | filter=path.join(perf_dir, "config", env)
211 | for file in glob(filter):
212 |
213 | output.print(f"echo 'Based on: {file}'")
214 | global_params = CQLConfig(perf_dir).get_global_params(file)
215 | global_params["DATE"] = unique_date
216 | global_counter = stress_test(output, global_params, perf_dir, global_counter)
217 |
218 | except Exception as ex:
219 | output.print(f"SYSTEM ERROR in 'run_executor': {type(ex).__name__} - '{str(ex) if ex is not None else '!! Noname exception !!'}'")
220 |
221 | finally:
222 | if output:
223 | output.close()
224 |
225 | @click.group()
226 | def remove_group():
227 | pass
228 |
229 | @remove_group.command()
230 | @click.option("-e", "--env", help="name of ENV file in config subdirectory (default '_cass.env')", default="_cass.env")
231 | @click.option("-d", "--perf_dir", help="directory with stress tool (default '.')", default=".")
232 | @click.option("-k", "--keyspace", help="name of keyspace for remove", default="")
233 | @click.option("-t", "--table", help="name of table in specific keyspace for remove (if table is empty, the whole keyspace will be removed)", default="")
234 | @click.option("-s", "--sleep", help="sleep time in seconds after remove (default 5)", default="5")
235 | def remove(env, perf_dir, keyspace, table, sleep):
236 | """Remove keyspace or table from CQL solution"""
237 | for file in glob(path.join(perf_dir, "config", env)):
238 | params = CQLConfig(perf_dir).get_global_params(file)
239 |
240 | if table and len(table)>0:
241 | remove_table(keyspace, table, params, False)
242 | print(f"Removed table: '{keyspace}.{table}' (ENV: '{file}')")
243 | else:
244 | remove_keyspace(keyspace, params, False)
245 | print(f"Removed keyspace: '{keyspace}' (ENV: '{file}')")
246 | print(f"Sleep {sleep} seconds ...")
247 | time.sleep(int(sleep))
248 | break
249 |
250 | @click.group()
251 | def version_group():
252 | pass
253 |
254 | @version_group.command()
255 | def version():
256 | """Print current version of key components"""
257 | from cassandra import __version__ as cassandra_version
258 | from polars import __version__ as polars_version
259 | from qgate_graph import __version__ as qgate_graph_version
260 | from prettytable import PrettyTable
261 | import version
262 | import sys
263 |
264 | table = PrettyTable()
265 | table.border = True
266 | table.header = True
267 | table.padding_width = 1
268 | table.max_table_width = 75
269 |
270 | table.field_names = ["Component", "Version"]
271 | table.align = "l"
272 |
273 | table.add_row([Fore.LIGHTRED_EX + "stress"+ Style.RESET_ALL, Fore.LIGHTRED_EX + version.__version__+Style.RESET_ALL])
274 | table.add_row(["polars", polars_version])
275 | table.add_row(["qgate_graph", qgate_graph_version])
276 | table.add_row(["cassandra-driver", cassandra_version])
277 | table.add_row(["python", sys.version])
278 | print(table)
279 |
280 | @click.group()
281 | def extract_group():
282 | pass
283 |
284 | @extract_group.command()
285 | @click.option("-d", "--dir", help="directory with particular items (default './stress_output/')", default="./stress_output/")
286 | @click.option("-c", "--csv", help="generate output in CSV form (default 'True')", default="True")
287 | @click.option("-t", "--txt", help="generate output in TXT form (default 'True')", default="True")
288 | def extract(dir, csv, txt):
289 | """Extract data from 'cassandra-stress' output to the sub-dir 'extract' in CSV and TXT(JSON)"""
290 | summary = ExtractSummary(dir, path.join(dir, "extract"))
291 | summary.parse()
292 | if helper.str2bool(csv):
293 | summary.save_csv()
294 | if helper.str2bool(txt):
295 | summary.save_json()
296 |
297 | @click.group()
298 | def compare_group():
299 | pass
300 |
301 | @compare_group.command()
302 | @click.option("-d", "--dir", help="directory with particular items (default './stress_output/')", default="./stress_output/")
303 | @click.option("-c", "--console", help="compare output to the console (default 'True')", default="True")
304 | @click.option("-g", "--graph", help="compare output to the sub-directory 'graph' form (default 'graph')", default="graph")
305 | @click.option("-t", "--type", help="compare type '0' - the different versions, '1' - the same versions (default '0')", default="0")
306 | @click.option("-o", "--oldprefix", help="prefix for old files (default 'v4')", default="v4")
307 | @click.option("-n", "--newprefix", help="prefix for new files (default 'v5')", default="v5")
308 | @click.option("-r", "--read", help="identification of read operations (default 'read')", default="read")
309 | @click.option("-w", "--write", help="identification of write operations (default 'write')", default="write")
310 | def compare(dir, console, graph, type, oldprefix, newprefix, read, write):
311 | """Compare data from TXT(JSON) to the sub-dir 'graph'"""
312 | comp = StressCompare(path.join(dir, "extract"), old_prefix=oldprefix, new_prefix=newprefix)
313 | int_type = int(type)
314 |
315 | compact_level = "LOCAL_ONE"
316 | print(f"==== {compact_level}===")
317 | match int_type:
318 | case 0:
319 | comp.add_cmp_different_versions(compact_level, read, write)
320 | case default:
321 | comp.add_cmp_same_versions(compact_level, read, write)
322 | if helper.str2bool(console):
323 | comp.text()
324 | if len(graph) > 0:
325 | comp.graph(path.join(dir, graph))
326 |
327 | compact_level = "LOCAL_QUORUM"
328 | print(f"==== {compact_level}===")
329 | match int_type:
330 | case 0:
331 | comp.add_cmp_different_versions(compact_level, read, write)
332 | case default:
333 | comp.add_cmp_same_versions(compact_level, read, write)
334 | if helper.str2bool(console):
335 | comp.text()
336 | if len(graph) > 0:
337 | comp.graph(path.join(dir, graph))
338 |
339 |
340 | @click.group()
341 | def graph_group():
342 | pass
343 |
344 | @graph_group.command()
345 | @click.option("-d", "--dir", help="directory with particular items (default './stress_output/')", default="./stress_output/")
346 | @click.option("-i", "--input", help="input sub-directory under dir (default 'extract')", default="extract")
347 | @click.option("-o", "--output", help="output sub-directory under dir (default 'graph')", default="graph")
348 | @click.option("-g", "--groups", help="output sub-directory under dir (default '')", default="")
349 | def graph(dir, input, output, groups):
350 | """Create graphs from TXT(JSON) to the sub-dir 'graph'"""
351 | from qgate_graph.graph_performance import GraphPerformance
352 |
353 | # create graph based on text output
354 | generator = GraphPerformance()
355 | generator.generate_from_dir(path.join(dir, input), path.join(dir, output))
356 |
357 | if len(groups)>0:
358 | mix = StressGraph(path.join(dir, input))
359 |
360 | # INSERT
361 | join_cores, duration, now = mix.join(["* 1_*_insert user_LOCAL_ONE*",
362 | "* 2_*_insert user_LOCAL_ONE*",
363 | "* 3_*_insert user_LOCAL_ONE*"])
364 | mix.graph(path.join(dir, output), "summary_insert_1", join_cores, duration, now)
365 |
366 | join_cores, duration, now = mix.join(["* 3_*_insert user_LOCAL_ONE*",
367 | "* 4_*_insert user_LOCAL_ONE*",
368 | "* 5_*_insert user_LOCAL_ONE*"])
369 | mix.graph(path.join(dir, output), "summary_insert_2", join_cores, duration, now)
370 |
371 | join_cores, duration, now = mix.join(["* 5_*_insert user_LOCAL_ONE*",
372 | "* 6_*_insert user_LOCAL_ONE*",
373 | "* 7_*_insert user_LOCAL_ONE*"])
374 | mix.graph(path.join(dir, output), "summary_insert_3", join_cores, duration, now)
375 |
376 | join_cores, duration, now = mix.join(["* 7_*_insert user_LOCAL_ONE*",
377 | "* 8_*_insert user_LOCAL_ONE*"])
378 | mix.graph(path.join(dir, output), "summary_insert_4", join_cores, duration, now)
379 |
380 | # SELECT
381 | join_cores, duration, now = mix.join(["* 1_*_simple1 user_LOCAL_ONE*",
382 | "* 2_*_simple1 user_LOCAL_ONE*",
383 | "* 3_*_simple1 user_LOCAL_ONE*"])
384 | mix.graph(path.join(dir, output), "summary_select_1", join_cores, duration, now)
385 |
386 | join_cores, duration, now = mix.join(["* 3_*_simple1 user_LOCAL_ONE*",
387 | "* 4_*_simple1 user_LOCAL_ONE*",
388 | "* 5_*_simple1 user_LOCAL_ONE*"])
389 | mix.graph(path.join(dir, output), "summary_select_2", join_cores, duration, now)
390 |
391 | join_cores, duration, now = mix.join(["* 5_*_simple1 user_LOCAL_ONE*",
392 | "* 6_*_simple1 user_LOCAL_ONE*",
393 | "* 7_*_simple1 user_LOCAL_ONE*"])
394 | mix.graph(path.join(dir, output), "summary_select_3", join_cores, duration, now)
395 |
396 | join_cores, duration, now = mix.join(["* 7_*_simple1 user_LOCAL_ONE*",
397 | "* 8_*_simple1 user_LOCAL_ONE*"])
398 | mix.graph(path.join(dir, output), "summary_select_4", join_cores, duration, now)
399 |
400 | @click.group()
401 | def generate_group():
402 | pass
403 |
404 | @generate_group.command()
405 | @click.option("-e", "--env", help="name of ENV file (default '_cass.env')", default="_cass.env")
406 | @click.option("-d", "--perf_dir", help="directory with perf_cql (default '.')", default=".")
407 | @click.option("-l", "--log", help="output (default 'stress-run.sh')", default="stress-run.sh")
408 | def generate(env, perf_dir, log):
409 | """Generate performance tests as *.sh for 'cassandra-stress'"""
410 | main_execute(env, perf_dir, log)
411 |
412 | cli = click.CommandCollection(sources=[generate_group, remove_group, extract_group, compare_group, graph_group, version_group])
413 |
414 | if __name__ == '__main__':
415 | cli()
416 |
417 |
--------------------------------------------------------------------------------
/stress/stress_compare.py:
--------------------------------------------------------------------------------
1 | from polars import read_csv
2 | from graph_output import GraphOutput
3 | from glob import glob
4 | from os import path
5 | from cql_helper import load_json
6 | from file_marker import FileMarker as const
7 | from cql_output import CQLOutput
8 | import datetime
9 |
10 |
11 | class StressCompare:
12 | """ Generate compare stress tests (typically between e.g. cassandra v4/old and v5/new).
13 | The outputs are in json, txt format
14 | """
15 |
16 | COLUMNS_LOW = ["Performance", "Avrg"]
17 | COLUMNS_MEDIUM = ["Performance", "Avrg", "Latency 95th", "Latency 99th"]
18 | COLUMNS_HIGH = ["Performance", "Avrg", "Latency 95th", "Latency 99th", "Latency 999th", "Max"]
19 |
20 | #EXECUTORS_LOW = [4, 8, 16, 32, 64, 128]
21 | #EXECUTORS_HIGH = [4, 8, 16, 32, 64, 128, 256, 512]
22 |
23 | def __init__(self, input_path, columns: list[str]=COLUMNS_LOW, old_prefix="v4", new_prefix="v5"):
24 | """
25 | Compare requested outputs.
26 |
27 | :param input_path: Input path for extracted files
28 | :param columns: list of columns for compare
29 | :param old_prefix: old prefix for files e.g. "v4"
30 | :param new_prefix: new prefix for files e.g. "v5"
31 | """
32 | self._input_path = input_path
33 | self._items=[]
34 | self._columns=columns
35 | self._old_prefix=old_prefix
36 | self._new_prefix=new_prefix
37 |
38 | def add_file_set(self, join_label, old_label, old_file, new_label, new_file, optional = False):
39 | self._items.append((old_label, old_file, new_label, new_file, optional, join_label))
40 |
41 | def _run_item(self, old_label, old_file_name, new_label, new_file_name):
42 | new_file = None
43 | old_file = None
44 |
45 | for file in glob(path.join(self._input_path, new_file_name + ".csv")):
46 | new_file = file
47 | for file in glob(path.join(self._input_path, old_file_name + ".csv")):
48 | old_file = file
49 |
50 | if new_file is None or old_file is None:
51 | return None, None
52 |
53 | new = read_csv(new_file)
54 | old = read_csv(old_file)
55 |
56 | new_row=""
57 | new_executors=""
58 | old_executors=""
59 |
60 | # new dataset
61 | new_row += f"{new_label}\t"
62 | for index in range(len(new.columns)):
63 |
64 | if new.columns[index]=="Executors":
65 | for row in new.rows():
66 | new_executors += f"{row[index]}\t"
67 |
68 | for column in self._columns:
69 | if new.columns[index] == column:
70 | for row in new.rows():
71 | new_row += f"{row[index]}\t"
72 |
73 | new_executors = new_executors[:-1]
74 | new_row = new_row[:-1]
75 | new_row += "\n"
76 |
77 | # old dataset
78 | new_row += f"{old_label}\t"
79 | for index in range(len(old.columns)):
80 |
81 | if old.columns[index]=="Executors":
82 | for row in old.rows():
83 | old_executors+=f"{row[index]}\t"
84 |
85 | for column in self._columns:
86 | if old.columns[index] == column:
87 | for row in old.rows():
88 | new_row += f"{row[index]}\t"
89 |
90 | old_executors = old_executors[:-1]
91 | new_row = new_row[:-1]
92 | new_row += "\n"
93 |
94 | if old_executors != new_executors:
95 | print("!!! DIFFERENT EXECUTORS !!!!")
96 | return new_row.replace(".",",").replace(" ms",""), new_executors
97 |
98 | def text(self):
99 | """
100 | Text compare, focus on CSV files as input source
101 | :return:
102 | """
103 |
104 | executors = ""
105 | output = ""
106 | final_output = ""
107 | final_executors=""
108 |
109 | for item_set in self._items:
110 | optional = item_set[4]
111 | output, executors = self._run_item(item_set[0], item_set[1], item_set[2], item_set[3])
112 |
113 | if output:
114 | if len(final_executors)==0:
115 | final_executors = executors
116 |
117 | final_output += output+"\r\n"
118 |
119 | if executors != final_executors:
120 | print("!!! DIFFERENT EXECUTORS !!!")
121 | else:
122 | if not optional:
123 | print(f"!!! Missing '{item_set[0]}' & '{item_set[2]}' !!!")
124 |
125 | # create header
126 | header=f"Test case\t"
127 | for i in range(len(self._columns)):
128 | header += f"{executors}\t"
129 | header = header [:-1]
130 |
131 | print(header)
132 | print(final_output)
133 |
134 | def add_cmp_same_versions(self, consistency_level="LOCAL_ONE", read="simple1", write="insert"):
135 | self._items=[]
136 |
137 | # extra tests for WRITE (compare with 1_6B)
138 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 1_6B {write}_{consistency_level}",
139 | f"{self._old_prefix} {write} 1_6B_{consistency_level}", f"*{self._old_prefix} 1_6B_{write} *_{consistency_level}",
140 | f"{self._new_prefix} {write} 1_6B_{consistency_level}", f"*{self._new_prefix} 1_6B_{write} *_{consistency_level}")
141 |
142 | # extra tests for WRITE (compare with 2_1KB)
143 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 2_1KB {write}_{consistency_level}",
144 | f"{self._old_prefix} {write} 2_1KB_{consistency_level}", f"*{self._old_prefix} 2_1KB_{write} *_{consistency_level}",
145 | f"{self._new_prefix} {write} 2_1KB_{consistency_level}", f"*{self._new_prefix} 2_1KB_{write} *_{consistency_level}")
146 |
147 | # extra tests for WRITE (compare with 3_10KB)
148 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 3_10KB {write}_{consistency_level}",
149 | f"{self._old_prefix} {write} 3_10KB_{consistency_level}", f"*{self._old_prefix} 3_10KB_{write} *_{consistency_level}",
150 | f"{self._new_prefix} {write} 3_10KB_{consistency_level}", f"*{self._new_prefix} 3_10KB_{write} *_{consistency_level}")
151 |
152 | # extra tests for WRITE (compare with 4_50KB)
153 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 4_50KB {write}_{consistency_level}",
154 | f"{self._old_prefix} {write} 4_50KB_{consistency_level}", f"*{self._old_prefix} 4_50KB_{write} *_{consistency_level}",
155 | f"{self._new_prefix} {write} 4_50KB_{consistency_level}", f"*{self._new_prefix} 4_50KB_{write} *_{consistency_level}")
156 |
157 | # extra tests for WRITE (compare with 5_100KB)
158 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 5_100KB {write}_{consistency_level}",
159 | f"{self._old_prefix} {write} 5_100KB_{consistency_level}", f"*{self._old_prefix} 5_100KB_{write} *_{consistency_level}",
160 | f"{self._new_prefix} {write} 5_100KB_{consistency_level}", f"*{self._new_prefix} 5_100KB_{write} *_{consistency_level}")
161 |
162 | # extra tests for WRITE (compare with 6_300KB)
163 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 6_300KB {write}_{consistency_level}",
164 | f"{self._old_prefix} {write} 6_300KB_{consistency_level}", f"*{self._old_prefix} 6_300KB_{write} *_{consistency_level}",
165 | f"{self._new_prefix} {write} 6_300KB_{consistency_level}", f"*{self._new_prefix} 6_300KB_{write} *_{consistency_level}")
166 |
167 | # extra tests for WRITE (compare with 7_1MB)
168 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 7_1MB {write}_{consistency_level}",
169 | f"{self._old_prefix} {write} 7_1MB_{consistency_level}", f"*{self._old_prefix} 7_1MB_{write} *_{consistency_level}",
170 | f"{self._new_prefix} {write} 7_1MB_{consistency_level}", f"*{self._new_prefix} 7_1MB_{write} *_{consistency_level}")
171 |
172 | # extra tests for WRITE (compare with 8_4MB)
173 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 8_4MB {write}_{consistency_level}",
174 | f"{self._old_prefix} {write} 8_4MB_{consistency_level}", f"*{self._old_prefix} 8_4MB_{write} *_{consistency_level}",
175 | f"{self._new_prefix} {write} 8_4MB_{consistency_level}", f"*{self._new_prefix} 8_4MB_{write} *_{consistency_level}")
176 |
177 | # READ
178 | # extra tests for READ (compare with 1_6B)
179 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 1_6B {read}_{consistency_level}",
180 | f"{self._old_prefix} {read} 1_6B_{consistency_level}", f"*{self._old_prefix} 1_6B_{read} *_{consistency_level}",
181 | f"{self._new_prefix} {read} 1_6B_{consistency_level}", f"*{self._new_prefix} 1_6B_{read} *_{consistency_level}")
182 |
183 | # extra tests for READ (compare with 2_1KB)
184 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 2_1KB {read}_{consistency_level}",
185 | f"{self._old_prefix} {read} 2_1KB_{consistency_level}", f"*{self._old_prefix} 2_1KB_{read} *_{consistency_level}",
186 | f"{self._new_prefix} {read} 2_1KB_{consistency_level}", f"*{self._new_prefix} 2_1KB_{read} *_{consistency_level}")
187 |
188 | # extra tests for READ (compare with 3_10KB)
189 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 3_10KB {read}_{consistency_level}",
190 | f"{self._old_prefix} {read} 3_10KB_{consistency_level}", f"*{self._old_prefix} 3_10KB_{read} *_{consistency_level}",
191 | f"{self._new_prefix} {read} 3_10KB_{consistency_level}", f"*{self._new_prefix} 3_10KB_{read} *_{consistency_level}")
192 |
193 | # extra tests for READ (compare with 4_50KB)
194 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 4_50KB {read}_{consistency_level}",
195 | f"{self._old_prefix} {read} 4_50KB_{consistency_level}", f"*{self._old_prefix} 4_50KB_{read} *_{consistency_level}",
196 | f"{self._new_prefix} {read} 4_50KB_{consistency_level}", f"*{self._new_prefix} 4_50KB_{read} *_{consistency_level}")
197 |
198 | # extra tests for READ (compare with 5_100KB)
199 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 5_100KB {read}_{consistency_level}",
200 | f"{self._old_prefix} {read} 5_100KB_{consistency_level}", f"*{self._old_prefix} 5_100KB_{read} *_{consistency_level}",
201 | f"{self._new_prefix} {read} 5_100KB_{consistency_level}", f"*{self._new_prefix} 5_100KB_{read} *_{consistency_level}")
202 |
203 | # extra tests for READ (compare with 6_300KB)
204 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 6_300KB {read}_{consistency_level}",
205 | f"{self._old_prefix} {read} 6_300KB_{consistency_level}", f"*{self._old_prefix} 6_300KB_{read} *_{consistency_level}",
206 | f"{self._new_prefix} {read} 6_300KB_{consistency_level}", f"*{self._new_prefix} 6_300KB_{read} *_{consistency_level}")
207 |
208 | # extra tests for READ (compare with 7_1MB)
209 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 7_1MB {read}_{consistency_level}",
210 | f"{self._old_prefix} {read} 7_1MB_{consistency_level}", f"*{self._old_prefix} 7_1MB_{read} *_{consistency_level}",
211 | f"{self._new_prefix} {read} 7_1MB_{consistency_level}", f"*{self._new_prefix} 7_1MB_{read} *_{consistency_level}")
212 |
213 | # extra tests for READ (compare with 8_4MB)
214 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} 8_4MB {read}_{consistency_level}",
215 | f"{self._old_prefix} {read} 8_4MB_{consistency_level}", f"*{self._old_prefix} 8_4MB_{read} *_{consistency_level}",
216 | f"{self._new_prefix} {read} 8_4MB_{consistency_level}", f"*{self._new_prefix} 8_4MB_{read} *_{consistency_level}")
217 |
218 |
219 |
220 | def add_cmp_different_versions(self, consistency_level="LOCAL_ONE", read="read", write="write"):
221 | self._items=[]
222 | # STCS - heavy write, LCS - read heavy
223 | # L - read heavy, T - write heavy
224 |
225 | # WRITE
226 | # extra tests for WRITE (compare with STCS)
227 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {write}_{consistency_level}_STCS",
228 | f"{self._old_prefix} {write}_{consistency_level}_STCS", f"*{self._old_prefix} {write}_{consistency_level}_STCS",
229 | f"{self._new_prefix} {write}_{consistency_level}_STCS", f"*{self._new_prefix} {write}_{consistency_level}_STCS")
230 |
231 | # optional compare
232 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {write}_{consistency_level}_STCS-UCS2",
233 | f"{self._old_prefix} {write}_{consistency_level}_STCS", f"*{self._old_prefix} {write}_{consistency_level}_STCS",
234 | f"{self._new_prefix} {write}_{consistency_level}_UCS2", f"*{self._new_prefix} {write}_{consistency_level}_UCS2", True)
235 |
236 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {write}_{consistency_level}_STCS-UCS4",
237 | f"{self._old_prefix} {write}_{consistency_level}_STCS", f"*{self._old_prefix} {write}_{consistency_level}_STCS",
238 | f"{self._new_prefix} {write}_{consistency_level}_UCS4", f"*{self._new_prefix} {write}_{consistency_level}_UCS4")
239 |
240 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {write}_{consistency_level}_STCS-UCS8",
241 | f"{self._old_prefix} {write}_{consistency_level}_STCS", f"*{self._old_prefix} {write}_{consistency_level}_STCS",
242 | f"{self._new_prefix} {write}_{consistency_level}_UCS8", f"*{self._new_prefix} {write}_{consistency_level}_UCS8")
243 |
244 | self.add_file_set( f"{self._old_prefix} vs {self._new_prefix} {write}_{consistency_level}_STCS-UCS10",
245 | f"{self._old_prefix} {write}_{consistency_level}_STCS", f"*{self._old_prefix} {write}_{consistency_level}_STCS",
246 | f"{self._new_prefix} {write}_{consistency_level}_UCS10", f"*{self._new_prefix} {write}_{consistency_level}_UCS10")
247 |
248 | # READ
249 | # extra tests for READ (compare with LCS)
250 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {read}_{consistency_level}_LCS",
251 | f"{self._old_prefix} {read}_{consistency_level}_LCS", f"*{self._old_prefix} {read}_{consistency_level}_LCS",
252 | f"{self._new_prefix} {read}_{consistency_level}_LCS", f"*{self._new_prefix} {read}_{consistency_level}_LCS")
253 |
254 | # optional compare
255 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {read}_{consistency_level}_LCS-UCS2",
256 | f"{self._old_prefix} {read}_{consistency_level}_LCS", f"*{self._old_prefix} {read}_{consistency_level}_LCS",
257 | f"{self._new_prefix} {read}_{consistency_level}_UCS2", f"*{self._new_prefix} {read}_{consistency_level}_UCS2", True)
258 |
259 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {read}_{consistency_level}_LCS-UCS4",
260 | f"{self._old_prefix} {read}_{consistency_level}_LCS", f"*{self._old_prefix} {read}_{consistency_level}_LCS",
261 | f"{self._new_prefix} {read}_{consistency_level}_UCS4", f"*{self._new_prefix} {read}_{consistency_level}_UCS4")
262 |
263 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {read}_{consistency_level}_LCS-UCS8",
264 | f"{self._old_prefix} {read}_{consistency_level}_LCS", f"*{self._old_prefix} {read}_{consistency_level}_LCS",
265 | f"{self._new_prefix} {read}_{consistency_level}_UCS8", f"*{self._new_prefix} {read}_{consistency_level}_UCS8")
266 |
267 | self.add_file_set(f"{self._old_prefix} vs {self._new_prefix} {read}_{consistency_level}_LCS-UCS10",
268 | f"{self._old_prefix} {read}_{consistency_level}_LCS", f"*{self._old_prefix} {read}_{consistency_level}_LCS",
269 | f"{self._new_prefix} {read}_{consistency_level}_UCS10", f"*{self._new_prefix} {read}_{consistency_level}_UCS10")
270 |
271 |
272 | def run_default(self, consistency_level="LOCAL_ONE"):
273 | self.add_cmp_different_versions(consistency_level)
274 | self.run()
275 |
276 | def _graph_item(self, old_label, old_file_name, new_label, new_file_name):
277 | new_file = None
278 | old_file = None
279 |
280 | for file in glob(path.join(self._input_path, new_file_name + ".txt")):
281 | new_file = file
282 | for file in glob(path.join(self._input_path, old_file_name + ".txt")):
283 | old_file = file
284 |
285 | if new_file is None or old_file is None:
286 | return None, None, None
287 |
288 | new_header, new_cores = self._parse_json(new_file)
289 | old_header, old_cores = self._parse_json(old_file)
290 |
291 | # compare
292 | compare_cores = []
293 |
294 | for core in new_cores:
295 | core['group']=new_label
296 | compare_cores.append(core)
297 | for core in old_cores:
298 | core['group'] = old_label
299 | compare_cores.append(core)
300 |
301 | return compare_cores, new_header['duration'], datetime.datetime.fromisoformat(new_header['now'])
302 |
303 | def _parse_json(self, input_file):
304 |
305 | header=""
306 | cores=[]
307 | with open(input_file, "r") as f:
308 |
309 | while True:
310 | line = f.readline()
311 | if not line:
312 | break
313 | if line[0] == '#':
314 | continue
315 | input_dict = load_json(line)
316 | if not input_dict:
317 | continue
318 | if input_dict[const.PRF_TYPE] == const.PRF_HDR_TYPE:
319 | # header items
320 | header=input_dict
321 | elif (input_dict[const.PRF_TYPE] == const.PRF_CORE_TYPE):
322 | cores.append(input_dict)
323 | return header, cores
324 |
325 | def graph(self, output_dir="output"):
326 | """Generate graph based on TXT(JSON) files from extract"""
327 | from qgate_graph.graph_performance import GraphPerformance
328 |
329 | for item_set in self._items:
330 | optional = item_set[4]
331 | join_title = item_set[5]
332 | compare_cores, duration, now = self._graph_item(item_set[0], item_set[1], item_set[2], item_set[3])
333 |
334 | if compare_cores:
335 | # build text output
336 | output = CQLOutput(output_screen=False, text=True)
337 | output.open()
338 | graph = GraphOutput(output)
339 | graph.print_header(now, join_title, duration)
340 | graph.print_details(compare_cores)
341 | graph.print_footer(True, duration)
342 |
343 | # create graph based on text output
344 | generator = GraphPerformance()
345 | generator.generate_from_text(output.text_buffer, output_dir, suppress_error=True)
346 | output.close()
347 | else:
348 | if not optional:
349 | print(f"!!! Missing '{item_set[0]}' & '{item_set[2]}' !!!")
350 |
351 |
--------------------------------------------------------------------------------