├── .asf.yaml ├── .gitignore ├── .isort.cfg ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── auditlog_test.py ├── auth_join_ring_false_test.py ├── auth_test.py ├── batch_test.py ├── bootstrap_test.py ├── byteman ├── 4.0 │ ├── decommission_failure_inject.btm │ ├── election_counter_leader_favor_node2.btm │ ├── inject_failure_streaming_to_node2.btm │ ├── repair_validation_sleep.btm │ ├── skip_view_build_finalization.btm │ ├── skip_view_build_task_finalization.btm │ ├── stream_failure.btm │ └── view_builder_task_sleep.btm ├── bootstrap_5s_sleep.btm ├── fail_after_batchlog_write.btm ├── fail_after_view_write.btm ├── fail_before_view_write.btm ├── failing_repair.btm ├── gossip_alive_callback_sleep.btm ├── index_build_failure.btm ├── merge_schema_failure_3x.btm ├── merge_schema_failure_4x.btm ├── merge_schema_failure_5_1.btm ├── post4.0 │ └── request_verb_timing.btm ├── post5.1 │ └── delay_streaming_for_move.btm ├── pre4.0 │ ├── decommission_failure_inject.btm │ ├── election_counter_leader_favor_node2.btm │ ├── inject_failure_streaming_to_node2.btm │ ├── repair_validation_sleep.btm │ ├── skip_finish_view_build_status.btm │ ├── skip_view_build_update_distributed.btm │ └── stream_failure.btm ├── read_repair │ ├── sorted_live_endpoints.btm │ ├── sorted_live_endpoints_5_1.btm │ ├── stop_data_reads.btm │ ├── stop_digest_reads.btm │ ├── stop_rr_writes.btm │ ├── stop_rr_writes_5_1.btm │ └── stop_writes.btm ├── repair_anticompaction_sleep.btm ├── request_verb_timing.btm ├── slow_writes.btm ├── sstable_open_delay.btm ├── stop_reads.btm ├── stop_rr_writes.btm ├── stop_writes.btm ├── stream_5s_sleep.btm ├── stream_sleep.btm ├── throw_on_digest.btm └── truncate_fail.btm ├── cassandra-thrift ├── __init__.py └── v11 │ ├── Cassandra-remote │ ├── Cassandra.py │ ├── __init__.py │ ├── constants.py │ └── ttypes.py ├── cdc_test.py ├── cfid_test.py ├── client_network_stop_start_test.py ├── client_request_metrics_local_remote_test.py ├── client_request_metrics_test.py ├── commitlog_test.py ├── compaction_test.py ├── compression_test.py ├── concurrent_schema_changes_test.py ├── conf ├── README ├── cassandra-1.2_test-select.cfg ├── cassandra-2.0_test-select.cfg ├── cassandra-2.1_fast-select.cfg ├── cassandra-2.1_test-select.cfg ├── cassandra-2.2_fast-select.cfg ├── cassandra-2.2_test-select.cfg ├── cassandra-3.0_test-select.cfg ├── trunk_coverage_test-select.cfg └── trunk_test-select.cfg ├── configuration_test.py ├── conftest.py ├── consistency_test.py ├── consistent_bootstrap_test.py ├── counter_test.py ├── cql_prepared_test.py ├── cql_test.py ├── cql_tracing_test.py ├── cqlsh_tests ├── __init__.py ├── blogposts.yaml ├── cqlsh_test_types.py ├── cqlsh_tools.py ├── glass.cql ├── test_cqlsh.py ├── test_cqlsh_copy.py └── util.py ├── delete_insert_test.py ├── deletion_test.py ├── disk_balance_test.py ├── dtest.py ├── dtest_config.py ├── dtest_setup.py ├── dtest_setup_overrides.py ├── env.txt ├── findlibjemalloc.sh ├── fqltool_test.py ├── global_row_key_cache_test.py ├── gossip_test.py ├── hintedhandoff_test.py ├── internode_ssl_test.py ├── jmx_auth_test.py ├── jmx_test.py ├── json_test.py ├── json_tools_test.py ├── largecolumn_test.py ├── legacy_sstables_test.py ├── lib ├── cassandra-attack.jar ├── cassandra-attack.jar.txt ├── jolokia-jvm-1.7.1-agent.jar └── jolokia-jvm-1.7.1-agent.jar.txt ├── license.txt ├── linter_check.sh ├── materialized_views_test.py ├── meta_tests ├── __init__.py ├── assertion_test.py ├── cassandra-dir-3.2 │ └── 0.version.txt ├── cassandra-dir-4.0-beta │ ├── 0.version.txt │ ├── bin │ │ └── .do-not-delete │ └── conf │ │ └── cassandra.yaml ├── conftest_test.py ├── dtest_config_test.py └── utils_test │ ├── __init__.py │ ├── funcutils_test.py │ └── metadata_wrapper_test.py ├── metadata_test.py ├── mixed_version_test.py ├── multidc_putget_test.py ├── native_transport_ssl_test.py ├── nodetool_test.py ├── offline_tools_test.py ├── paging_test.py ├── paxos_test.py ├── pending_range_test.py ├── plugins ├── __init__.py └── assert_tools.py ├── prepared_statements_test.py ├── pushed_notifications_test.py ├── putget_test.py ├── pytest.ini ├── range_ghost_test.py ├── read_failures_test.py ├── read_repair_test.py ├── rebuild_test.py ├── refresh_test.py ├── repair_tests ├── __init__.py ├── deprecated_repair_test.py ├── incremental_repair_test.py ├── preview_repair_test.py └── repair_test.py ├── replace_address_test.py ├── replica_side_filtering_test.py ├── replication_test.py ├── requirements.txt ├── run_dtests.py ├── schema_metadata_test.py ├── schema_test.py ├── scrub_test.py ├── secondary_indexes_test.py ├── seed_test.py ├── snapshot_test.py ├── snitch_test.py ├── sslnodetonode_test.py ├── sstable_generation_loading_test.py ├── sstables └── ttl_test │ ├── 2.1 │ ├── ks-ttl_table-ka-1-CompressionInfo.db │ ├── ks-ttl_table-ka-1-Data.db │ ├── ks-ttl_table-ka-1-Digest.sha1 │ ├── ks-ttl_table-ka-1-Filter.db │ ├── ks-ttl_table-ka-1-Index.db │ ├── ks-ttl_table-ka-1-Statistics.db │ ├── ks-ttl_table-ka-1-Summary.db │ ├── ks-ttl_table-ka-1-TOC.txt │ ├── ks-ttl_table-ka-2-CompressionInfo.db │ ├── ks-ttl_table-ka-2-Data.db │ ├── ks-ttl_table-ka-2-Digest.sha1 │ ├── ks-ttl_table-ka-2-Filter.db │ ├── ks-ttl_table-ka-2-Index.db │ ├── ks-ttl_table-ka-2-Statistics.db │ ├── ks-ttl_table-ka-2-Summary.db │ └── ks-ttl_table-ka-2-TOC.txt │ ├── 3.0 │ ├── mc-1-big-CompressionInfo.db │ ├── mc-1-big-Data.db │ ├── mc-1-big-Digest.crc32 │ ├── mc-1-big-Filter.db │ ├── mc-1-big-Index.db │ ├── mc-1-big-Statistics.db │ ├── mc-1-big-Summary.db │ ├── mc-1-big-TOC.txt │ ├── mc-2-big-CompressionInfo.db │ ├── mc-2-big-Data.db │ ├── mc-2-big-Digest.crc32 │ ├── mc-2-big-Filter.db │ ├── mc-2-big-Index.db │ ├── mc-2-big-Statistics.db │ ├── mc-2-big-Summary.db │ └── mc-2-big-TOC.txt │ └── 3.11 │ ├── mc-1-big-CompressionInfo.db │ ├── mc-1-big-Data.db │ ├── mc-1-big-Digest.crc32 │ ├── mc-1-big-Filter.db │ ├── mc-1-big-Index.db │ ├── mc-1-big-Statistics.db │ ├── mc-1-big-Summary.db │ ├── mc-1-big-TOC.txt │ ├── mc-2-big-CompressionInfo.db │ ├── mc-2-big-Data.db │ ├── mc-2-big-Digest.crc32 │ ├── mc-2-big-Filter.db │ ├── mc-2-big-Index.db │ ├── mc-2-big-Statistics.db │ ├── mc-2-big-Summary.db │ └── mc-2-big-TOC.txt ├── sstablesplit_test.py ├── sstableutil_test.py ├── streaming_test.py ├── stress_profiles └── repair_wide_rows.yaml ├── stress_tool_test.py ├── super_column_cache_test.py ├── super_counter_test.py ├── system_keyspaces_test.py ├── thrift_bindings ├── __init__.py └── thrift010 │ ├── Cassandra-remote │ ├── Cassandra.py │ ├── __init__.py │ ├── constants.py │ └── ttypes.py ├── thrift_hsha_test.py ├── thrift_test.py ├── token_generator_test.py ├── tools ├── __init__.py ├── assertions.py ├── context.py ├── data.py ├── datahelp.py ├── env.py ├── files.py ├── flaky.py ├── funcutils.py ├── git.py ├── hacks.py ├── intervention.py ├── jmxutils.py ├── metadata_wrapper.py ├── misc.py ├── paging.py └── sslkeygen.py ├── topology_test.py ├── transient_replication_ring_test.py ├── transient_replication_test.py ├── ttl_test.py ├── udtencoding_test.py ├── upgrade_crc_check_chance_test.py ├── upgrade_internal_auth_test.py ├── upgrade_tests ├── README.md ├── __init__.py ├── bootstrap_upgrade_test.py ├── compatibility_flag_test.py ├── conftest.py ├── cql_tests.py ├── drop_compact_storage_upgrade_test.py ├── paging_test.py ├── regression_test.py ├── repair_test.py ├── storage_engine_upgrade_test.py ├── supercolumn-data │ └── cassandra-2.0 │ │ └── supcols │ │ └── cols │ │ ├── supcols-cols-jb-2-CompressionInfo.db │ │ ├── supcols-cols-jb-2-Data.db │ │ ├── supcols-cols-jb-2-Filter.db │ │ ├── supcols-cols-jb-2-Index.db │ │ ├── supcols-cols-jb-2-Statistics.db │ │ └── supcols-cols-jb-2-TOC.txt ├── thrift_upgrade_test.py ├── upgrade_base.py ├── upgrade_compact_storage.py ├── upgrade_manifest.py ├── upgrade_schema_agreement_test.py ├── upgrade_supercolumns_test.py ├── upgrade_through_versions_test.py └── upgrade_udtfix_test.py ├── user_functions_test.py ├── user_types_test.py ├── wide_rows_test.py └── write_failures_test.py /.asf.yaml: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | 18 | notifications: 19 | commits: commits@cassandra.apache.org 20 | issues: commits@cassandra.apache.org 21 | pullrequests: pr@cassandra.apache.org 22 | jira_options: link worklog 23 | 24 | github: 25 | description: "Distributed tests for Apache Cassandra®" 26 | homepage: https://cassandra.apache.org/ 27 | enabled_merge_buttons: 28 | squash: false 29 | merge: false 30 | rebase: true 31 | features: 32 | wiki: false 33 | issues: false 34 | projects: false 35 | autolink_jira: 36 | - CASSANDRA 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # PyCharm 2 | .idea/ 3 | *.iws 4 | *.ipr 5 | *.iml 6 | 7 | *.pyc 8 | logs 9 | last_test_dir 10 | upgrade 11 | html/ 12 | doxygen/doxypy-0.4.2/ 13 | .pytest_cache/ 14 | 15 | bin/ 16 | lib/ 17 | pyvenv.cfg 18 | src/ 19 | .vscode 20 | pytest.ini 21 | venv 22 | .venv 23 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | known_third_party=cassandra,ccm,ccmlib,parse 3 | -------------------------------------------------------------------------------- /byteman/4.0/decommission_failure_inject.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject decommission failure to fail streaming from 127.0.0.1 3 | # 4 | # Before start streaming files in `StreamSession#onInitializationComplete()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject decommission failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD prepareSynAck 10 | AT INVOKE startStreamingFiles 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF peer.equals(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.1")) AND NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE 18 | -------------------------------------------------------------------------------- /byteman/4.0/election_counter_leader_favor_node2.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Cheat during the leader election for a counter mutation and favour node 2, 127.0.0.2 3 | # 4 | # Note that this happens only if the node is known to be available. 5 | # 6 | RULE election counter leader cheat 7 | CLASS org.apache.cassandra.service.StorageProxy 8 | METHOD findSuitableEndpoint 9 | AT EXIT 10 | BIND isthere:boolean = $localEndpoints.contains(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2")); 11 | if isthere 12 | DO 13 | return org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2"); 14 | ENDRULE 15 | -------------------------------------------------------------------------------- /byteman/4.0/inject_failure_streaming_to_node2.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject streaming failure 3 | # 4 | # Before start streaming files in `StreamSession#prepare()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject stream failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD startStreamingFiles 10 | AT ENTRY 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF peer.equals(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2")) AND NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE 18 | -------------------------------------------------------------------------------- /byteman/4.0/repair_validation_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 60s during validation compaction 3 | # 4 | RULE sleep 60s on validation 5 | CLASS org.apache.cassandra.repair.ValidationManager 6 | METHOD doValidation 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(60000) 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/4.0/skip_view_build_finalization.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Skip the finalization of a view build. 3 | # 4 | RULE skip the finalization of a view build 5 | CLASS org.apache.cassandra.db.view.ViewBuilder 6 | METHOD finish 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | return 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/4.0/skip_view_build_task_finalization.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Skip the finalization of a view build task. 3 | # 4 | RULE skip the finalization of a view build task 5 | CLASS org.apache.cassandra.db.view.ViewBuilderTask 6 | METHOD finish 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | return 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/4.0/stream_failure.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject streaming failure 3 | # 4 | # Before start streaming files in `StreamSession#prepare()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject stream failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD prepareAck 10 | AT INVOKE startStreamingFiles 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE 18 | -------------------------------------------------------------------------------- /byteman/4.0/view_builder_task_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Slow down how fast a view builder task processes keys. 3 | # 4 | RULE slow down view builder task 5 | CLASS org.apache.cassandra.db.view.ViewBuilderTask 6 | METHOD buildKey 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | Thread.sleep(50); 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/bootstrap_5s_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 5s when finishing bootstrap 3 | # 4 | RULE Sleep 5s when finishing bootstrap 5 | CLASS org.apache.cassandra.service.StorageService 6 | METHOD bootstrapFinished 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(5000) 13 | ENDRULE -------------------------------------------------------------------------------- /byteman/fail_after_batchlog_write.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject node failure immediately after batchlog write. 3 | # Method signature required in 3.x to avoid pausing before legacy mutations sent 4 | # 5 | RULE skip writing batched mutations 6 | CLASS org.apache.cassandra.service.StorageProxy 7 | METHOD syncWriteBatchedMutations 8 | AT ENTRY 9 | IF TRUE 10 | DO return 11 | ENDRULE 12 | 13 | RULE skip removing from batchlog 14 | CLASS org.apache.cassandra.service.StorageProxy 15 | METHOD asyncRemoveFromBatchlog 16 | AT ENTRY 17 | IF TRUE 18 | DO return 19 | ENDRULE 20 | -------------------------------------------------------------------------------- /byteman/fail_after_view_write.btm: -------------------------------------------------------------------------------- 1 | RULE Die before applying base mutation 2 | CLASS org.apache.cassandra.db.view.TableViews 3 | METHOD pushViewReplicaUpdates 4 | AT EXIT 5 | IF callerEquals("applyInternal") 6 | DO 7 | throw new RuntimeException("Dummy failure"); 8 | ENDRULE 9 | -------------------------------------------------------------------------------- /byteman/fail_before_view_write.btm: -------------------------------------------------------------------------------- 1 | RULE Die before applying base mutation 2 | CLASS org.apache.cassandra.db.view.TableViews 3 | METHOD pushViewReplicaUpdates 4 | AT ENTRY 5 | IF callerEquals("applyInternal") 6 | DO 7 | throw new RuntimeException("Dummy failure"); 8 | ENDRULE 9 | -------------------------------------------------------------------------------- /byteman/failing_repair.btm: -------------------------------------------------------------------------------- 1 | RULE fail repairs 2 | CLASS org.apache.cassandra.repair.RepairMessageVerbHandler 3 | METHOD doVerb 4 | AT ENTRY 5 | IF true 6 | DO throw new RuntimeException("Repair failed"); 7 | ENDRULE 8 | -------------------------------------------------------------------------------- /byteman/gossip_alive_callback_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Slow down how fast a node builds a view on the cluster by postponing when gossip settles. 3 | # 4 | # Note that this happens only if the node is known to be available. 5 | # 6 | RULE slow down falure detector 7 | CLASS org.apache.cassandra.gms.Gossiper 8 | METHOD realMarkAlive 9 | AT ENTRY 10 | IF TRUE 11 | DO 12 | Thread.sleep(2000); 13 | ENDRULE -------------------------------------------------------------------------------- /byteman/index_build_failure.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 5s during index update 3 | # 4 | RULE fail during index building 5 | CLASS org.apache.cassandra.index.SecondaryIndexManager 6 | METHOD calculateIndexingPageSize 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | throw new java.lang.RuntimeException("index_build_failure.btm expected exception") 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/merge_schema_failure_3x.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject node failure on merge schema exit. 3 | # 4 | RULE inject node failure on merge schema exit 5 | CLASS org.apache.cassandra.schema.SchemaKeyspace 6 | METHOD mergeSchema 7 | AT EXIT 8 | # set flag to only run this rule once. 9 | IF TRUE 10 | DO 11 | System.exit(0) 12 | ENDRULE 13 | -------------------------------------------------------------------------------- /byteman/merge_schema_failure_4x.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject node failure on merge schema exit. 3 | # 4 | RULE inject node failure on merge schema exit 5 | CLASS org.apache.cassandra.schema.Schema 6 | METHOD merge 7 | AT EXIT 8 | # set flag to only run this rule once. 9 | IF TRUE 10 | DO 11 | Runtime.getRuntime().halt(0); 12 | ENDRULE 13 | -------------------------------------------------------------------------------- /byteman/merge_schema_failure_5_1.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject node failure on merge schema exit. 3 | # 4 | RULE inject node failure on merge schema exit 5 | CLASS org.apache.cassandra.tcm.listeners.SchemaListener 6 | METHOD notifyPostCommit 7 | AT EXIT 8 | # set flag to only run this rule once. 9 | IF TRUE 10 | DO 11 | Runtime.getRuntime().halt(0); 12 | ENDRULE 13 | -------------------------------------------------------------------------------- /byteman/post4.0/request_verb_timing.btm: -------------------------------------------------------------------------------- 1 | RULE timing of request messages broken down by verb 2 | CLASS org.apache.cassandra.net.MessagingService 3 | METHOD doSend 4 | AT ENTRY 5 | BIND prefix:String = "org.jboss.byteman."; # byteman in strict mode requires the o.j.b prefix 6 | toHost:String = $to.getAddress().toString(); 7 | verb:String = $message.header.verb.toString(); 8 | prop:String = prefix + "|request_verb_timing|" + toHost + "|" + verb; 9 | IF true 10 | DO 11 | System.setProperty(prop, String.valueOf(System.currentTimeMillis())); 12 | ENDRULE 13 | -------------------------------------------------------------------------------- /byteman/post5.1/delay_streaming_for_move.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Make progress barrier block for 60s 3 | # 4 | RULE delay streaming for move 5 | CLASS org.apache.cassandra.tcm.sequences.Move 6 | METHOD movementMap 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | Thread.sleep(60000); 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/pre4.0/decommission_failure_inject.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject decommission failure to fail streaming from 127.0.0.1 3 | # 4 | # Before start streaming files in `StreamSession#onInitializationComplete()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject decommission failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD onInitializationComplete 10 | AT INVOKE startStreamingFiles 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF peer.equals(InetAddress.getByName("127.0.0.1")) AND NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE -------------------------------------------------------------------------------- /byteman/pre4.0/election_counter_leader_favor_node2.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Cheat during the leader election for a counter mutation and favour node 2, 127.0.0.2 3 | # 4 | # Note that this happens only if the node is known to be available. 5 | # 6 | RULE election counter leader cheat 7 | CLASS org.apache.cassandra.service.StorageProxy 8 | METHOD findSuitableEndpoint 9 | AT EXIT 10 | BIND isthere:boolean = $localEndpoints.contains(java.net.InetAddress.getByName("127.0.0.2")); 11 | if isthere 12 | DO 13 | return java.net.InetAddress.getByName("127.0.0.2"); 14 | ENDRULE 15 | -------------------------------------------------------------------------------- /byteman/pre4.0/inject_failure_streaming_to_node2.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject streaming failure 3 | # 4 | # Before start streaming files in `StreamSession#prepare()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject stream failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD prepare 10 | AT INVOKE startStreamingFiles 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF peer.equals(InetAddress.getByName("127.0.0.2")) AND NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE 18 | -------------------------------------------------------------------------------- /byteman/pre4.0/repair_validation_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 60s during validation compaction 3 | # 4 | RULE sleep 60s on validation 5 | CLASS org.apache.cassandra.db.compaction.CompactionManager 6 | METHOD doValidationCompaction 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(60000) 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/pre4.0/skip_finish_view_build_status.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Skip the finalization of a view build. 3 | # 4 | RULE skip the finalization of a view build 5 | CLASS org.apache.cassandra.db.SystemKeyspace 6 | METHOD finishViewBuildStatus 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | return 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/pre4.0/skip_view_build_update_distributed.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Skip the finalization of a view build task. 3 | # 4 | RULE skip the finalization of a view build task 5 | CLASS org.apache.cassandra.db.view.ViewBuilder 6 | METHOD updateDistributed 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | return 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/pre4.0/stream_failure.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Inject streaming failure 3 | # 4 | # Before start streaming files in `StreamSession#prepare()` method, 5 | # interrupt streaming by throwing RuntimeException. 6 | # 7 | RULE inject stream failure 8 | CLASS org.apache.cassandra.streaming.StreamSession 9 | METHOD prepare 10 | AT INVOKE maybeCompleted 11 | BIND peer = $0.peer 12 | # set flag to only run this rule once. 13 | IF NOT flagged("done") 14 | DO 15 | flag("done"); 16 | throw new java.lang.RuntimeException("Triggering network failure") 17 | ENDRULE 18 | -------------------------------------------------------------------------------- /byteman/read_repair/sorted_live_endpoints.btm: -------------------------------------------------------------------------------- 1 | # Force 1 and 2 nodes to be used for quorum read by forcing the order of Replica objects 2 | RULE sorted live endpoints 3 | CLASS org.apache.cassandra.locator.SimpleSnitch 4 | METHOD sortedByProximity 5 | AT ENTRY 6 | IF true 7 | DO 8 | return $unsortedAddress.sorted(java.util.Comparator.naturalOrder()); 9 | ENDRULE -------------------------------------------------------------------------------- /byteman/read_repair/sorted_live_endpoints_5_1.btm: -------------------------------------------------------------------------------- 1 | # Force 1 and 2 nodes to be used for quorum read by forcing the order of Replica objects 2 | 3 | # For the case when snitch is still enabled in cassandra.yaml 4 | RULE sorted live endpoints for SimpleSnitch 5 | CLASS org.apache.cassandra.locator.SimpleSnitch 6 | METHOD sortedByProximity 7 | AT ENTRY 8 | IF true 9 | DO 10 | return $unsortedAddress.sorted(java.util.Comparator.naturalOrder()); 11 | ENDRULE 12 | 13 | # For the case when node_proximity is configured in cassandra_latest.yaml and SimpleSnitch is not used 14 | RULE sorted live endpoints for BaseProximity 15 | CLASS org.apache.cassandra.locator.BaseProximity 16 | METHOD sortedByProximity 17 | AT ENTRY 18 | IF true 19 | DO 20 | return $unsortedAddress.sorted(java.util.Comparator.naturalOrder()); 21 | ENDRULE -------------------------------------------------------------------------------- /byteman/read_repair/stop_data_reads.btm: -------------------------------------------------------------------------------- 1 | # block data (but not digest) reads 2 | RULE disable data reads 3 | CLASS org.apache.cassandra.db.ReadCommandVerbHandler 4 | METHOD doVerb 5 | # wait until command is declared locally. because generics 6 | AFTER WRITE $command 7 | # bail out if it's a data request 8 | IF NOT $command.isDigestQuery() 9 | DO return; 10 | ENDRULE 11 | -------------------------------------------------------------------------------- /byteman/read_repair/stop_digest_reads.btm: -------------------------------------------------------------------------------- 1 | # block data (but not digest) reads 2 | RULE disable digest reads 3 | CLASS org.apache.cassandra.db.ReadCommandVerbHandler 4 | METHOD doVerb 5 | # wait until command is declared locally. because generics 6 | AFTER WRITE $command 7 | # bail out if it's a digest request 8 | IF $command.isDigestQuery() 9 | DO return; 10 | ENDRULE 11 | -------------------------------------------------------------------------------- /byteman/read_repair/stop_rr_writes.btm: -------------------------------------------------------------------------------- 1 | # block remote read repair mutation messages 2 | RULE disable read repair mutations 3 | CLASS org.apache.cassandra.db.ReadRepairVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE 9 | -------------------------------------------------------------------------------- /byteman/read_repair/stop_rr_writes_5_1.btm: -------------------------------------------------------------------------------- 1 | # block remote read repair mutation messages 2 | RULE disable read repair mutations 3 | CLASS org.apache.cassandra.db.AbstractMutationVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE 9 | -------------------------------------------------------------------------------- /byteman/read_repair/stop_writes.btm: -------------------------------------------------------------------------------- 1 | # block remote read repair writes 2 | RULE disable mutations 3 | CLASS org.apache.cassandra.db.MutationVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE -------------------------------------------------------------------------------- /byteman/repair_anticompaction_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 60s during anticompaction 3 | # 4 | RULE sleep 60s on anticompaction 5 | CLASS org.apache.cassandra.db.compaction.CompactionManager 6 | METHOD performAnticompaction 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(60000) 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/request_verb_timing.btm: -------------------------------------------------------------------------------- 1 | RULE timing of request messages broken down by verb 2 | CLASS org.apache.cassandra.net.MessagingService 3 | METHOD doSend 4 | AT ENTRY 5 | BIND prefix:String = "org.jboss.byteman."; # byteman in strict mode requires the o.j.b prefix 6 | toHost:String = $to.address.toString(); 7 | verb:String = $message.header.verb.toString(); 8 | prop:String = prefix + "|request_verb_timing|" + toHost + "|" + verb; 9 | IF true 10 | DO 11 | System.setProperty(prop, String.valueOf(System.currentTimeMillis())); 12 | ENDRULE -------------------------------------------------------------------------------- /byteman/slow_writes.btm: -------------------------------------------------------------------------------- 1 | RULE slow mutations 2 | CLASS org.apache.cassandra.db.MutationVerbHandler 3 | METHOD doVerb 4 | AT ENTRY 5 | IF true 6 | DO Thread.sleep(60000); 7 | ENDRULE -------------------------------------------------------------------------------- /byteman/sstable_open_delay.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Make sstable opening on startup slower 3 | # 4 | RULE slow startup sstable opening 5 | CLASS org.apache.cassandra.io.sstable.format.big.BigFormat$ReaderFactory 6 | METHOD open 7 | AT ENTRY 8 | IF TRUE 9 | DO 10 | Thread.sleep(10000); 11 | ENDRULE 12 | -------------------------------------------------------------------------------- /byteman/stop_reads.btm: -------------------------------------------------------------------------------- 1 | # block mutation verb 2 | RULE disable mutations 3 | CLASS org.apache.cassandra.db.ReadCommandVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE -------------------------------------------------------------------------------- /byteman/stop_rr_writes.btm: -------------------------------------------------------------------------------- 1 | # block mutation verb 2 | RULE disable mutations 3 | CLASS org.apache.cassandra.db.ReadRepairVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE -------------------------------------------------------------------------------- /byteman/stop_writes.btm: -------------------------------------------------------------------------------- 1 | #block mutation verb 2 | RULE disable mutations 3 | CLASS org.apache.cassandra.db.MutationVerbHandler 4 | METHOD doVerb 5 | AT ENTRY 6 | IF true 7 | DO return; 8 | ENDRULE 9 | -------------------------------------------------------------------------------- /byteman/stream_5s_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 5s during streaming session 3 | # 4 | RULE sleep 10s on stream session 5 | CLASS org.apache.cassandra.streaming.StreamSession 6 | METHOD messageReceived 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(5000) 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/stream_sleep.btm: -------------------------------------------------------------------------------- 1 | # 2 | # Sleep 60s during streaming session 3 | # 4 | RULE sleep 60s on stream session 5 | CLASS org.apache.cassandra.streaming.StreamSession 6 | METHOD messageReceived 7 | AT ENTRY 8 | # set flag to only run this rule once. 9 | IF NOT flagged("done") 10 | DO 11 | flag("done"); 12 | Thread.sleep(60000) 13 | ENDRULE 14 | -------------------------------------------------------------------------------- /byteman/throw_on_digest.btm: -------------------------------------------------------------------------------- 1 | RULE block digest 2 | CLASS org.apache.cassandra.db.ReadResponse 3 | METHOD createDigestResponse 4 | AT ENTRY 5 | IF true 6 | DO throw new RuntimeException("Digest response throws"); 7 | ENDRULE -------------------------------------------------------------------------------- /byteman/truncate_fail.btm: -------------------------------------------------------------------------------- 1 | RULE Throw during truncate operation 2 | CLASS org.apache.cassandra.db.ColumnFamilyStore 3 | METHOD truncateBlocking() 4 | AT ENTRY 5 | IF TRUE 6 | DO 7 | throw new RuntimeException("Dummy failure"); 8 | ENDRULE -------------------------------------------------------------------------------- /cassandra-thrift/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/cassandra-thrift/__init__.py -------------------------------------------------------------------------------- /cassandra-thrift/v11/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['ttypes', 'constants', 'Cassandra'] 2 | -------------------------------------------------------------------------------- /cassandra-thrift/v11/constants.py: -------------------------------------------------------------------------------- 1 | # 2 | # Autogenerated by Thrift Compiler (0.8.0) 3 | # 4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | # 6 | # options string: py 7 | # 8 | 9 | from thrift.Thrift import TType, TMessageType, TException 10 | from .ttypes import * 11 | 12 | VERSION = "19.30.0" 13 | -------------------------------------------------------------------------------- /cfid_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import pytest 4 | 5 | from dtest import Tester, create_ks, create_cf 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class TestCFID(Tester): 11 | 12 | def test_cfid(self): 13 | """ Test through adding/dropping cf's that the path to sstables for each cf are unique 14 | and formatted correctly 15 | """ 16 | cluster = self.cluster 17 | 18 | cluster.populate(1).start() 19 | [node1] = cluster.nodelist() 20 | 21 | session = self.patient_cql_connection(node1) 22 | create_ks(session, 'ks', 1) 23 | 24 | for x in range(0, 5): 25 | create_cf(session, 'cf', gc_grace=0, key_type='int', columns={'c1': 'int'}) 26 | session.execute('insert into cf (key, c1) values (1,1)') 27 | session.execute('insert into cf (key, c1) values (2,1)') 28 | node1.flush() 29 | session.execute('drop table ks.cf;') 30 | 31 | # get a list of cf directories 32 | try: 33 | cfs = os.listdir(node1.get_path() + "/data0/ks") 34 | except OSError: 35 | pytest.fail("Path to sstables not valid.") 36 | 37 | # check that there are 5 unique directories 38 | assert len(cfs) == 5 39 | 40 | # check that these are in fact column family directories 41 | for dire in cfs: 42 | assert dire[0:2] == 'cf' 43 | -------------------------------------------------------------------------------- /client_network_stop_start_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import os.path 4 | import pytest 5 | import shutil 6 | import string 7 | import time 8 | 9 | from ccmlib.node import TimeoutError 10 | from distutils.version import LooseVersion 11 | from dtest import Tester 12 | from tools import sslkeygen 13 | 14 | since = pytest.mark.since 15 | logger = logging.getLogger(__name__) 16 | 17 | # see https://issues.apache.org/jira/browse/CASSANDRA-16127 18 | class TestClientNetworkStopStart(Tester): 19 | 20 | def _normalize(self, a): 21 | return a.translate(str.maketrans(dict.fromkeys(string.whitespace))) 22 | 23 | def _in(self, a, b): 24 | return self._normalize(a) in self._normalize(b) 25 | 26 | def _assert_client_active_msg(self, name, enabled, out): 27 | expected = "{} active: {}".format(name, str(enabled).lower()) 28 | actived = "actived" if enabled else "deactivated" 29 | assert self._in(expected, out), "{} is expected to be {} ({}) but was not found in output: {}".format(name, actived, str(enabled).lower(), out) 30 | 31 | def _assert_watch_log_for(self, node_or_cluster, to_watch, assert_msg=None): 32 | if assert_msg is None: 33 | assert_msg = "Unable to locate '{}'".format(to_watch) 34 | nodelist_fn = getattr(node_or_cluster, "nodelist", None) 35 | logger.debug("watching for '{}'".format(to_watch)) 36 | start = time.perf_counter() 37 | if callable(nodelist_fn): 38 | for node in nodelist_fn(): 39 | assert node.watch_log_for_no_errors(to_watch), assert_msg 40 | else: 41 | assert node_or_cluster.watch_log_for_no_errors(to_watch), assert_msg 42 | logger.debug("Completed watching for '{}'; took {}s".format(to_watch, time.perf_counter() - start)) 43 | 44 | def _assert_binary_actually_found(self, node_or_cluster): 45 | # ccm will silently move on if the logs don't have CQL in time, which then leads to 46 | # flaky tests; to avoid that force waiting to be correct and assert the log was seen. 47 | logger.debug("Verifying that the CQL log was seen and that ccm didn't return early...") 48 | self._assert_watch_log_for(node_or_cluster, "Starting listening for CQL clients on", "Binary didn't start...") 49 | 50 | def _assert_client_enable(self, node, native_enabled=True, thrift_enabled=False): 51 | out = node.nodetool("info") 52 | self._assert_client_active_msg("Native Transport", native_enabled, out.stdout) 53 | if node.get_cassandra_version() >= LooseVersion('4.0'): 54 | assert "Thrift" not in out.stdout, "Thrift found in output: {}".format(out.stdout) 55 | else: 56 | self._assert_client_active_msg("Thrift", thrift_enabled, out.stdout) 57 | 58 | def _assert_startup(self, node_or_cluster): 59 | """Checks to see if the startup message was found""" 60 | self._assert_watch_log_for(node_or_cluster, "Startup complete", "Unable to find startup message, either the process crashed or is missing CASSANDRA-16127") 61 | 62 | @since(['2.2', '3.0.23', '3.11.9']) 63 | def test_defaults(self): 64 | """Tests default configurations have the correct client network setup""" 65 | cluster = self.cluster 66 | logger.debug("Starting cluster..") 67 | cluster.set_environment_variable('CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True') 68 | cluster.populate(1).start(wait_for_binary_proto=True) 69 | self._assert_binary_actually_found(cluster) 70 | self._assert_startup(cluster) 71 | node = cluster.nodelist()[0] 72 | self._assert_client_enable(node) 73 | 74 | @since(['2.2', '3.0.23', '3.11.9'], max_version='3.11.x') 75 | def test_hsha_defaults(self): 76 | """Enables hsha""" 77 | cluster = self.cluster 78 | logger.debug("Starting cluster..") 79 | cluster.set_configuration_options(values={ 80 | 'rpc_server_type': 'hsha', 81 | # seems 1 causes a dead lock... heh... 82 | 'rpc_min_threads': 16, 83 | 'rpc_max_threads': 2048 84 | }) 85 | cluster.populate(1).start(wait_for_binary_proto=True) 86 | self._assert_binary_actually_found(cluster) 87 | self._assert_startup(cluster) 88 | node = cluster.nodelist()[0] 89 | self._assert_client_enable(node) 90 | 91 | @since(['2.2', '3.0.23', '3.11.9'], max_version='3.11.x') 92 | def test_hsha_with_ssl(self): 93 | """Enables hsha with ssl""" 94 | cluster = self.cluster 95 | logger.debug("Starting cluster..") 96 | cred = sslkeygen.generate_credentials("127.0.0.1") 97 | cluster.set_configuration_options(values={ 98 | 'rpc_server_type': 'hsha', 99 | # seems 1 causes a dead lock... heh... 100 | 'rpc_min_threads': 16, 101 | 'rpc_max_threads': 2048, 102 | 'client_encryption_options': { 103 | 'enabled': True, 104 | 'optional': False, 105 | 'keystore': cred.cakeystore, 106 | 'keystore_password': 'cassandra' 107 | } 108 | }) 109 | cluster.populate(1).start(wait_for_binary_proto=True) 110 | self._assert_binary_actually_found(cluster) 111 | self._assert_startup(cluster) 112 | node = cluster.nodelist()[0] 113 | self._assert_client_enable(node) 114 | -------------------------------------------------------------------------------- /conf/README: -------------------------------------------------------------------------------- 1 | This is a set of cassandra-dtest exclude configuration files for use with nose-test-select 2 | - https://github.com/EnigmaCurry/nose-test-select 3 | 4 | (This is also a TODO list of dtests that need TLC.. patches welcome :) ) 5 | 6 | The files are for each C* branch and are run with: 7 | 8 | nosetests --test-select-config=conf/${C_BRANCH}_test-select.cfg 9 | 10 | -------------------------------------------------------------------------------- /conf/cassandra-1.2_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | -------------------------------------------------------------------------------- /conf/cassandra-2.0_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | -------------------------------------------------------------------------------- /conf/cassandra-2.1_fast-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | # Subsequent repair is a long test, ~40-50 minutes 3 | incremental_repair_test.py:TestIncRepair.multiple_subsequent_repair_test 4 | tools.py:TestIncRepair.multiple_subsequent_repair_test 5 | -------------------------------------------------------------------------------- /conf/cassandra-2.1_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | consistency_test.py:TestAvailability.test_network_topology_strategy 3 | consistency_test.py:TestAvailability.test_network_topology_strategy_each_quorum 4 | tools.py:TestAvailability.test_network_topology_strategy_each_quorum 5 | consistency_test.py:TestAccuracy.test_network_topology_strategy_users 6 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 7 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 8 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 9 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 10 | consistency_test.py:TestAccuracy.test_simple_strategy_each_quorum_users 11 | tools.py:TestAccuracy.test_simple_strategy_each_quorum_users 12 | consistency_test.py:TestAccuracy.test_simple_strategy_users 13 | consistency_test.py:TestAccuracy.test_network_topology_strategy_counters 14 | replication_test.py:ReplicationTest.network_topology_test 15 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_collapse_gossiping_property_file_snitch_multi_dc 16 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_expand_gossiping_property_file_snitch_multi_dc 17 | upgrade_tests/upgrade_through_versions_test.py 18 | materialized_views_test.py:TestMaterializedViews.complex_repair_test 19 | tools.py:TestMaterializedViews.complex_repair_test 20 | -------------------------------------------------------------------------------- /conf/cassandra-2.2_fast-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | # Subsequent repair is a long test, ~40-50 minutes 3 | incremental_repair_test.py:TestIncRepair.multiple_subsequent_repair_test 4 | tools.py:TestIncRepair.multiple_subsequent_repair_test 5 | -------------------------------------------------------------------------------- /conf/cassandra-2.2_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | consistency_test.py:TestAvailability.test_network_topology_strategy 3 | consistency_test.py:TestAvailability.test_network_topology_strategy_each_quorum 4 | tools.py:TestAvailability.test_network_topology_strategy_each_quorum 5 | consistency_test.py:TestAccuracy.test_network_topology_strategy_users 6 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 7 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 8 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 9 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 10 | consistency_test.py:TestAccuracy.test_simple_strategy_each_quorum_users 11 | tools.py:TestAccuracy.test_simple_strategy_each_quorum_users 12 | consistency_test.py:TestAccuracy.test_simple_strategy_users 13 | consistency_test.py:TestAccuracy.test_network_topology_strategy_counters 14 | replication_test.py:ReplicationTest.network_topology_test 15 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_collapse_gossiping_property_file_snitch_multi_dc 16 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_expand_gossiping_property_file_snitch_multi_dc 17 | upgrade_tests/upgrade_through_versions_test.py 18 | materialized_views_test.py:TestMaterializedViews.complex_repair_test 19 | tools.py:TestMaterializedViews.complex_repair_test 20 | -------------------------------------------------------------------------------- /conf/cassandra-3.0_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | consistency_test.py:TestAvailability.test_network_topology_strategy 3 | consistency_test.py:TestAvailability.test_network_topology_strategy_each_quorum 4 | tools.py:TestAvailability.test_network_topology_strategy_each_quorum 5 | consistency_test.py:TestAccuracy.test_network_topology_strategy_users 6 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 7 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 8 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 9 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 10 | consistency_test.py:TestAccuracy.test_simple_strategy_each_quorum_users 11 | tools.py:TestAccuracy.test_simple_strategy_each_quorum_users 12 | consistency_test.py:TestAccuracy.test_simple_strategy_users 13 | consistency_test.py:TestAccuracy.test_network_topology_strategy_counters 14 | replication_test.py:ReplicationTest.network_topology_test 15 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_collapse_gossiping_property_file_snitch_multi_dc 16 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_expand_gossiping_property_file_snitch_multi_dc 17 | upgrade_tests/upgrade_through_versions_test.py 18 | materialized_views_test.py:TestMaterializedViews.complex_repair_test 19 | tools.py:TestMaterializedViews.complex_repair_test 20 | -------------------------------------------------------------------------------- /conf/trunk_coverage_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | # CSTAR-218 - server hang on these tests: 3 | offline_tools_test.py:TestOfflineTools.sstableofflinerelevel_test 4 | materialized_views_test.py:TestMaterializedViewsConsistency.single_partition_consistent_reads_after_write_test 5 | -------------------------------------------------------------------------------- /conf/trunk_test-select.cfg: -------------------------------------------------------------------------------- 1 | [exclude] 2 | consistency_test.py:TestAvailability.test_network_topology_strategy 3 | consistency_test.py:TestAvailability.test_network_topology_strategy_each_quorum 4 | tools.py:TestAvailability.test_network_topology_strategy_each_quorum 5 | consistency_test.py:TestAccuracy.test_network_topology_strategy_users 6 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 7 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_users 8 | consistency_test.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 9 | tools.py:TestAccuracy.test_network_topology_strategy_each_quorum_counters 10 | consistency_test.py:TestAccuracy.test_simple_strategy_each_quorum_users 11 | tools.py:TestAccuracy.test_simple_strategy_each_quorum_users 12 | consistency_test.py:TestAccuracy.test_simple_strategy_users 13 | consistency_test.py:TestAccuracy.test_network_topology_strategy_counters 14 | replication_test.py:ReplicationTest.network_topology_test 15 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_collapse_gossiping_property_file_snitch_multi_dc 16 | replication_test.py:SnitchConfigurationUpdateTest.test_rf_expand_gossiping_property_file_snitch_multi_dc 17 | upgrade_tests/upgrade_through_versions_test.py 18 | materialized_views_test.py:TestMaterializedViews.complex_repair_test 19 | tools.py:TestMaterializedViews.complex_repair_test 20 | replace_address_test.py:TestReplaceAddress.replace_active_node_test 21 | replace_address_test.py:TestReplaceAddress.replace_first_boot_test 22 | replace_address_test.py:TestReplaceAddress.replace_nonexistent_node_test 23 | replace_address_test.py:TestReplaceAddress.replace_shutdown_node_test 24 | replace_address_test.py:TestReplaceAddress.replace_stopped_node_test 25 | replace_address_test.py:TestReplaceAddress.replace_with_reset_resume_state_test 26 | tools.py:TestReplaceAddress.replace_with_reset_resume_state_test 27 | replace_address_test.py:TestReplaceAddress.resumable_replace_test 28 | tools.py:TestReplaceAddress.resumable_replace_test 29 | -------------------------------------------------------------------------------- /consistent_bootstrap_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from cassandra import ConsistencyLevel 5 | 6 | from dtest import Tester, create_ks 7 | from tools.data import create_c1c2_table, insert_c1c2, query_c1c2 8 | from tools.misc import new_node 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class TestBootstrapConsistency(Tester): 14 | 15 | @pytest.mark.no_vnodes 16 | def test_consistent_reads_after_move(self): 17 | logger.debug("Creating a ring") 18 | cluster = self.cluster 19 | cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 20 | 'write_request_timeout_in_ms': 60000, 21 | 'read_request_timeout_in_ms': 60000, 22 | 'dynamic_snitch_badness_threshold': 0.0}) 23 | cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0') 24 | 25 | cluster.populate(3, tokens=[0, 2**48, 2**62]).start() 26 | node1, node2, node3 = cluster.nodelist() 27 | 28 | logger.debug("Set to talk to node 2") 29 | n2session = self.patient_cql_connection(node2) 30 | create_ks(n2session, 'ks', 2) 31 | create_c1c2_table(self, n2session) 32 | 33 | logger.debug("Generating some data for all nodes") 34 | insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL) 35 | 36 | node1.flush() 37 | logger.debug("Taking down node1") 38 | node1.stop(wait_other_notice=True) 39 | 40 | logger.debug("Writing data to node2") 41 | insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE) 42 | node2.flush() 43 | 44 | logger.debug("Restart node1") 45 | node1.start() 46 | 47 | logger.debug("Move token on node3") 48 | node3.move(2) 49 | 50 | logger.debug("Checking that no data was lost") 51 | for n in range(10, 20): 52 | query_c1c2(n2session, n, ConsistencyLevel.ALL, max_attempts=3) 53 | 54 | for n in range(30, 1000): 55 | query_c1c2(n2session, n, ConsistencyLevel.ALL, max_attempts=3) 56 | 57 | def test_consistent_reads_after_bootstrap(self): 58 | logger.debug("Creating a ring") 59 | cluster = self.cluster 60 | cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 61 | 'write_request_timeout_in_ms': 60000, 62 | 'read_request_timeout_in_ms': 60000, 63 | 'dynamic_snitch_badness_threshold': 0.0}) 64 | cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0') 65 | 66 | cluster.populate(2) 67 | node1, node2 = cluster.nodelist() 68 | cluster.start() 69 | 70 | logger.debug("Set to talk to node 2") 71 | n2session = self.patient_cql_connection(node2) 72 | create_ks(n2session, 'ks', 2) 73 | create_c1c2_table(self, n2session) 74 | 75 | logger.debug("Generating some data for all nodes") 76 | insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL) 77 | 78 | node1.flush() 79 | logger.debug("Taking down node1") 80 | node1.stop(wait_other_notice=True) 81 | 82 | logger.debug("Writing data to only node2") 83 | insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE) 84 | node2.flush() 85 | 86 | logger.debug("Restart node1") 87 | node1.start() 88 | 89 | logger.debug("Bootstraping node3") 90 | node3 = new_node(cluster, data_center="dc1") 91 | node3.start(wait_for_binary_proto=True) 92 | 93 | n3session = self.patient_cql_connection(node3) 94 | n3session.execute("USE ks") 95 | logger.debug("Checking that no data was lost") 96 | for n in range(10, 20): 97 | query_c1c2(n3session, n, ConsistencyLevel.ALL) 98 | 99 | for n in range(30, 1000): 100 | query_c1c2(n3session, n, ConsistencyLevel.ALL) 101 | -------------------------------------------------------------------------------- /cql_prepared_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import logging 4 | 5 | from dtest import Tester, create_ks 6 | 7 | since = pytest.mark.since 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @since("1.2") 12 | class TestCQL(Tester): 13 | 14 | def prepare(self): 15 | cluster = self.cluster 16 | 17 | cluster.populate(1).start() 18 | node1 = cluster.nodelist()[0] 19 | time.sleep(0.2) 20 | 21 | session = self.patient_cql_connection(node1) 22 | create_ks(session, 'ks', 1) 23 | return session 24 | 25 | def test_batch_preparation(self): 26 | """ Test preparation of batch statement (#4202) """ 27 | session = self.prepare() 28 | 29 | session.execute(""" 30 | CREATE TABLE cf ( 31 | k varchar PRIMARY KEY, 32 | c int, 33 | ) 34 | """) 35 | 36 | query = "BEGIN BATCH INSERT INTO cf (k, c) VALUES (?, ?); APPLY BATCH" 37 | pq = session.prepare(query) 38 | 39 | session.execute(pq, ['foo', 4]) 40 | -------------------------------------------------------------------------------- /cqlsh_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/cqlsh_tests/__init__.py -------------------------------------------------------------------------------- /cqlsh_tests/blogposts.yaml: -------------------------------------------------------------------------------- 1 | ### DML ### 2 | 3 | keyspace: stresscql 4 | 5 | keyspace_definition: | 6 | CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; 7 | 8 | table: blogposts 9 | 10 | table_definition: | 11 | CREATE TABLE blogposts ( 12 | domain text, 13 | published_date timeuuid, 14 | url text, 15 | author text, 16 | title text, 17 | body text, 18 | PRIMARY KEY(domain, published_date) 19 | ) WITH CLUSTERING ORDER BY (published_date DESC) 20 | AND compaction = { 'class':'LeveledCompactionStrategy' } 21 | AND comment='A table to hold blog posts' 22 | 23 | ### Column Distribution Specifications ### 24 | 25 | columnspec: 26 | - name: domain 27 | size: gaussian(5..100) 28 | population: uniform(1..10M) 29 | 30 | - name: published_date 31 | cluster: fixed(5000) 32 | 33 | - name: url 34 | size: uniform(10..50) 35 | 36 | - name: title 37 | size: gaussian(5..10) 38 | 39 | - name: author 40 | size: uniform(5..10) 41 | 42 | - name: body 43 | size: gaussian(10..100) 44 | 45 | ### Batch Ratio Distribution Specifications ### 46 | 47 | insert: 48 | partitions: fixed(1) # insert 1 partition per batch 49 | 50 | select: fixed(1)/500 # insert 10 rows per partition 51 | 52 | batchtype: UNLOGGED 53 | 54 | 55 | queries: 56 | singlepost: 57 | cql: select * from blogposts where domain = ? LIMIT 1 58 | fields: samerow 59 | timeline: 60 | cql: select url, title, published_date from blogposts where domain = ? LIMIT 10 61 | fields: samerow 62 | -------------------------------------------------------------------------------- /cqlsh_tests/cqlsh_test_types.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import re 4 | 5 | from collections import namedtuple 6 | 7 | from cassandra.util import SortedSet 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def maybe_quote(s): 13 | """ 14 | Return a quoted string representation for strings, unicode and date time parameters, 15 | otherwise return a string representation of the parameter. 16 | """ 17 | return "'{}'".format(s) if isinstance(s, (str, Datetime)) else str(s) 18 | 19 | 20 | class Address(namedtuple('Address', ('name', 'number', 'street', 'phones'))): 21 | __slots__ = () 22 | 23 | def __repr__(self): 24 | phones_str = "{{{}}}".format(', '.join(maybe_quote(p) for p in sorted(self.phones))) 25 | return "{{name: {}, number: {}, street: '{}', phones: {}}}".format(self.name, 26 | self.number, 27 | self.street, 28 | phones_str) 29 | 30 | def __str__(self): 31 | phones_str = "{{{}}}".format(', '.join(maybe_quote(p) for p in sorted(self.phones))) 32 | return "{{name: {}, number: {}, street: '{}', phones: {}}}".format(self.name, 33 | self.number, 34 | self.street, 35 | phones_str) 36 | 37 | 38 | def drop_microseconds(val): 39 | """ 40 | For COPY TO, we need to round microsecond to milliseconds because server side 41 | TimestampSerializer.dateStringPatterns only parses milliseconds. If we keep microseconds, 42 | users may try to import with COPY FROM a file generated with COPY TO and have problems if 43 | prepared statements are disabled, see CASSANDRA-11631. 44 | """ 45 | def drop_micros(m): 46 | return m.group(0)[:12] + '+' 47 | 48 | # Matches H:MM:SS.000000+ and drops the last 3 digits before the + 49 | ret = re.sub('\\d{2}\\:\\d{2}\\:\\d{2}\\.(\\d{6})\\+', drop_micros, val) 50 | logger.debug("Rounded microseconds: {} -> {}".format(val, ret)) 51 | return ret 52 | 53 | 54 | class Datetime(datetime.datetime): 55 | """ 56 | Extend standard datetime.datetime class with cql formatting. 57 | This could be cleaner if this class was declared inside TestCqlshCopy, but then pickle 58 | wouldn't have access to the class. 59 | """ 60 | def __new__(cls, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, 61 | time_format='%Y-%m-%d %H:%M:%S%z', round_timestamp=True): 62 | self = datetime.datetime.__new__(cls, year, month, day, hour, minute, second, microsecond, tzinfo) 63 | self.default_time_format = time_format 64 | self.round_timestamp = round_timestamp 65 | return self 66 | 67 | def __repr__(self): 68 | return self._format_for_csv() 69 | 70 | def __str__(self): 71 | return self._format_for_csv() 72 | 73 | def _format_for_csv(self): 74 | ret = self.strftime(self.default_time_format) 75 | return drop_microseconds(ret) if self.round_timestamp else ret 76 | 77 | class ImmutableDict(frozenset): 78 | """ 79 | Immutable dictionary implementation to represent map types. 80 | We need to pass BoundStatement.bind() a dict() because it calls iteritems(), 81 | except we can't create a dict with another dict as the key, hence we use a class 82 | that adds iteritems to a frozen set of tuples (which is how dict are normally made 83 | immutable in python). 84 | Must be declared in the top level of the module to be available for pickling. 85 | """ 86 | iteritems = frozenset.__iter__ 87 | 88 | def items(self): 89 | for k, v in self.iteritems(): 90 | yield k, v 91 | 92 | def __repr__(self): 93 | return '{{{}}}'.format(', '.join(['{0}: {1}'.format(maybe_quote(k), maybe_quote(v)) for k, v in sorted(self.items())])) 94 | 95 | 96 | class ImmutableSet(SortedSet): 97 | 98 | def __repr__(self): 99 | return '{{{}}}'.format(', '.join([maybe_quote(t) for t in sorted(self._items)])) 100 | 101 | def __str__(self): 102 | return '{{{}}}'.format(', '.join([maybe_quote(t) for t in sorted(self._items)])) 103 | 104 | def __hash__(self): 105 | return hash(tuple([e for e in self])) 106 | 107 | 108 | class Name(namedtuple('Name', ('firstname', 'lastname'))): 109 | __slots__ = () 110 | 111 | def __repr__(self): 112 | return "{{firstname: '{}', lastname: '{}'}}".format(self.firstname, self.lastname) 113 | 114 | def __str__(self): 115 | return "{{firstname: '{}', lastname: '{}'}}".format(self.firstname, self.lastname) 116 | 117 | 118 | class UTC(datetime.tzinfo): 119 | """ 120 | A utility class to specify a UTC timezone. 121 | """ 122 | 123 | def utcoffset(self, dt): 124 | return datetime.timedelta(0) 125 | 126 | def tzname(self, dt): 127 | return "UTC" 128 | 129 | def dst(self, dt): 130 | return datetime.timedelta(0) 131 | -------------------------------------------------------------------------------- /cqlsh_tests/cqlsh_tools.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import csv 4 | import random 5 | from typing import List 6 | 7 | import cassandra 8 | 9 | from cassandra.cluster import ResultSet 10 | 11 | 12 | class DummyColorMap(object): 13 | 14 | def __getitem__(self, *args): 15 | return '' 16 | 17 | 18 | def csv_rows(filename, delimiter=None): 19 | """ 20 | Given a filename, opens a csv file and yields it line by line. 21 | """ 22 | reader_opts = {} 23 | if delimiter is not None: 24 | reader_opts['delimiter'] = delimiter 25 | with open(filename, 'r') as csvfile: 26 | for row in csv.reader(csvfile, **reader_opts): 27 | yield row 28 | 29 | 30 | def assert_csvs_items_equal(filename1, filename2): 31 | with open(filename1, 'r') as x, open(filename2, 'r') as y: 32 | list_x = list(x.readlines()) 33 | list_y = list(y.readlines()) 34 | list_x.sort() 35 | list_y.sort() 36 | assert list_x == list_y 37 | 38 | 39 | def random_list(gen=None, n=None): 40 | if gen is None: 41 | def gen(): 42 | return random.randint(-1000, 1000) 43 | if n is None: 44 | def length(): 45 | return random.randint(1, 5) 46 | else: 47 | def length(): 48 | return n 49 | 50 | return [gen() for _ in range(length())] 51 | 52 | 53 | def write_rows_to_csv(filename, data): 54 | with open(filename, 'w') as csvfile: 55 | writer = csv.writer(csvfile) 56 | for row in data: 57 | writer.writerow(row) 58 | csvfile.close 59 | 60 | 61 | def deserialize_date_fallback_int(byts, protocol_version): 62 | timestamp_ms = cassandra.marshal.int64_unpack(byts) 63 | try: 64 | return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0) 65 | except OverflowError: 66 | return timestamp_ms 67 | 68 | 69 | def monkeypatch_driver(): 70 | """ 71 | Monkeypatches the `cassandra` driver module in the same way 72 | that clqsh does. Returns a dictionary containing the original values of 73 | the monkeypatched names. 74 | """ 75 | cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize, 76 | 'DateType_deserialize': cassandra.cqltypes.DateType.deserialize, 77 | 'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values} 78 | 79 | cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts)) 80 | cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int) 81 | cassandra.cqltypes.CassandraType.support_empty_values = True 82 | 83 | if hasattr(cassandra, 'deserializers'): 84 | cache['DesDateType'] = cassandra.deserializers.DesDateType 85 | del cassandra.deserializers.DesDateType 86 | 87 | return cache 88 | 89 | 90 | def unmonkeypatch_driver(cache): 91 | """ 92 | Given a dictionary that was used to cache parts of `cassandra` for 93 | monkeypatching, restore those values to the `cassandra` module. 94 | """ 95 | cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize']) 96 | cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize']) 97 | cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values'] 98 | 99 | if hasattr(cassandra, 'deserializers'): 100 | cassandra.deserializers.DesDateType = cache['DesDateType'] 101 | 102 | 103 | def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None: 104 | """ 105 | So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering, 106 | however I'm not finding it atm. As such, this method isn't intended for use with large datasets. 107 | :param got: ResultSet, expect schema of [a, b] 108 | :param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet 109 | """ 110 | # Adding a touch of sanity check so people don't mis-use this. n^2 is bad. 111 | assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.' 112 | 113 | # First quick check: if we have a different count, we can just die. 114 | assert len(got.current_rows) == len(expected) 115 | 116 | for t in expected: 117 | assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t) 118 | found = False 119 | for row in got.current_rows: 120 | if found: 121 | break 122 | if row.a == t[0] and row.b == t[1]: 123 | found = True 124 | assert found, 'Failed to find expected row: {}'.format(t) 125 | -------------------------------------------------------------------------------- /cqlsh_tests/util.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | import pytest 4 | from ccmlib.node import ToolError 5 | 6 | 7 | def run_cqlsh_safe(node, cmds, cqlsh_options=None, expect_error=True): 8 | """ 9 | cqlsh behavior has changed to set an error code on exit. This wrapper 10 | makes it easier to run cqlsh commands while expecting exceptions. 11 | """ 12 | try: 13 | ret = node.run_cqlsh(cmds=cmds, cqlsh_options=cqlsh_options) 14 | if expect_error: 15 | pytest.fail("Expected ToolError but didn't get one") 16 | return ret 17 | except ToolError as e: 18 | ret = namedtuple('Subprocess_Return', 'stdout stderr rc') 19 | return ret(stdout=e.stdout, stderr=e.stderr, rc=e.exit_status) 20 | -------------------------------------------------------------------------------- /delete_insert_test.py: -------------------------------------------------------------------------------- 1 | import random 2 | import threading 3 | import uuid 4 | import logging 5 | 6 | from cassandra import ConsistencyLevel 7 | from cassandra.query import SimpleStatement 8 | 9 | from dtest import Tester, create_ks 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class TestDeleteInsert(Tester): 15 | """ 16 | Examines scenarios around deleting data and adding data back with the same key 17 | """ 18 | # Generate 1000 rows in memory so we can re-use the same ones over again: 19 | rows = [(str(uuid.uuid1()), x, random.choice(['group1', 'group2', 'group3', 'group4'])) for x in range(1000)] 20 | 21 | def create_ddl(self, session, rf={'dc1': 2, 'dc2': 2}): 22 | create_ks(session, 'delete_insert_search_test', rf) 23 | session.execute('CREATE TABLE test (id uuid PRIMARY KEY, val1 text, group text)') 24 | session.execute('CREATE INDEX group_idx ON test (group)') 25 | 26 | def delete_group_rows(self, session, group): 27 | """Delete rows from a given group and return them""" 28 | rows = [r for r in self.rows if r[2] == group] 29 | ids = [r[0] for r in rows] 30 | session.execute('DELETE FROM test WHERE id in (%s)' % ', '.join(ids)) 31 | return list(rows) 32 | 33 | def insert_all_rows(self, session): 34 | self.insert_some_rows(session, self.rows) 35 | 36 | def insert_some_rows(self, session, rows): 37 | for row in rows: 38 | session.execute("INSERT INTO test (id, val1, group) VALUES (%s, '%s', '%s')" % row) 39 | 40 | def test_delete_insert_search(self): 41 | cluster = self.cluster 42 | cluster.populate([2, 2]).start() 43 | node1 = cluster.nodelist()[0] 44 | 45 | session = self.patient_cql_connection(node1) 46 | session.consistency_level = 'LOCAL_QUORUM' 47 | 48 | self.create_ddl(session) 49 | # Create 1000 rows: 50 | self.insert_all_rows(session) 51 | # Delete all of group2: 52 | deleted = self.delete_group_rows(session, 'group2') 53 | # Put that group back: 54 | self.insert_some_rows(session, rows=deleted) 55 | 56 | # Verify that all of group2 is back, 20 times, in parallel 57 | # querying across all nodes: 58 | 59 | class ThreadedQuery(threading.Thread): 60 | 61 | def __init__(self, connection): 62 | threading.Thread.__init__(self) 63 | self.connection = connection 64 | 65 | def run(self): 66 | session = self.connection 67 | query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'", 68 | consistency_level=ConsistencyLevel.LOCAL_QUORUM) 69 | rows = session.execute(query) 70 | assert len(list(rows)) == len(deleted), "Expecting the length of {} to be equal to the " \ 71 | "length of {}.".format(list(rows), deleted) 72 | 73 | threads = [] 74 | for x in range(20): 75 | conn = self.cql_connection(random.choice(cluster.nodelist())) 76 | threads.append(ThreadedQuery(conn)) 77 | for t in threads: 78 | t.start() 79 | for t in threads: 80 | t.join() 81 | -------------------------------------------------------------------------------- /deletion_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | 4 | from dtest import Tester, create_ks, create_cf 5 | from tools.data import rows_to_list 6 | from tools.jmxutils import (JolokiaAgent, make_mbean) 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class TestDeletion(Tester): 12 | 13 | def test_gc(self): 14 | """ 15 | Test that tombstone purging doesn't bring back deleted data by writing 16 | 2 rows to a table with gc_grace=0, deleting one of those rows, then 17 | asserting that it isn't present in the results of SELECT *, before and 18 | after a flush and compaction. 19 | """ 20 | cluster = self.cluster 21 | 22 | cluster.populate(1).start() 23 | [node1] = cluster.nodelist() 24 | 25 | time.sleep(.5) 26 | session = self.patient_cql_connection(node1) 27 | create_ks(session, 'ks', 1) 28 | create_cf(session, 'cf', gc_grace=0, key_type='int', columns={'c1': 'int'}) 29 | 30 | session.execute('insert into cf (key, c1) values (1,1)') 31 | session.execute('insert into cf (key, c1) values (2,1)') 32 | node1.flush() 33 | 34 | assert rows_to_list(session.execute('select * from cf;')) == [[1, 1], [2, 1]] 35 | 36 | session.execute('delete from cf where key=1') 37 | 38 | assert rows_to_list(session.execute('select * from cf;')) == [[2, 1]] 39 | 40 | node1.flush() 41 | time.sleep(.5) 42 | node1.compact() 43 | time.sleep(.5) 44 | 45 | assert rows_to_list(session.execute('select * from cf;')) == [[2, 1]] 46 | 47 | def test_tombstone_size(self): 48 | self.cluster.populate(1) 49 | self.cluster.start() 50 | [node1] = self.cluster.nodelist() 51 | session = self.patient_cql_connection(node1) 52 | create_ks(session, 'ks', 1) 53 | session.execute('CREATE TABLE test (i int PRIMARY KEY)') 54 | 55 | stmt = session.prepare('DELETE FROM test where i = ?') 56 | for i in range(100): 57 | session.execute(stmt, [i]) 58 | 59 | assert memtable_count(node1, 'ks', 'test') == 100 60 | assert memtable_size(node1, 'ks', 'test') > 0 61 | 62 | 63 | def memtable_size(node, keyspace, table): 64 | return table_metric(node, keyspace, table, 'MemtableOnHeapSize') 65 | 66 | 67 | def memtable_count(node, keyspace, table): 68 | return table_metric(node, keyspace, table, 'MemtableColumnsCount') 69 | 70 | 71 | def table_metric(node, keyspace, table, name): 72 | version = node.get_cassandra_version() 73 | typeName = "ColumnFamily" if version < '3.0' else 'Table' 74 | with JolokiaAgent(node) as jmx: 75 | mbean = make_mbean('metrics', type=typeName, 76 | name=name, keyspace=keyspace, scope=table) 77 | value = jmx.read_attribute(mbean, 'Value') 78 | 79 | return value 80 | -------------------------------------------------------------------------------- /dtest_setup_overrides.py: -------------------------------------------------------------------------------- 1 | class DTestSetupOverrides: 2 | def __init__(self): 3 | self.cluster_options = [] -------------------------------------------------------------------------------- /env.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/env.txt -------------------------------------------------------------------------------- /findlibjemalloc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # taken from bin/cassandra 4 | 5 | find_library() 6 | { 7 | pattern=$1 8 | path=$(echo ${2} | tr ":" " ") 9 | find $path -regex "$pattern" -print 2>/dev/null | head -n 1 10 | } 11 | 12 | case "`uname -s`" in 13 | Linux) 14 | which ldconfig > /dev/null 2>&1 15 | if [ $? = 0 ] ; then 16 | # e.g. for CentOS 17 | dirs="/lib64 /lib /usr/lib64 /usr/lib `ldconfig -v 2>/dev/null | grep -v '^\s' | sed 's/^\([^:]*\):.*$/\1/'`" 18 | else 19 | # e.g. for Debian, OpenSUSE 20 | dirs="/lib64 /lib /usr/lib64 /usr/lib `cat /etc/ld.so.conf /etc/ld.so.conf.d/*.conf | grep '^/'`" 21 | fi 22 | dirs=`echo $dirs | tr " " ":"` 23 | CASSANDRA_LIBJEMALLOC=$(find_library '.*/libjemalloc\.so\(\.1\)*' $dirs) 24 | ;; 25 | Darwin) 26 | CASSANDRA_LIBJEMALLOC=$(find_library '.*/libjemalloc\.dylib' $DYLD_LIBRARY_PATH:${DYLD_FALLBACK_LIBRARY_PATH-$HOME/lib:/usr/local/lib:/lib:/usr/lib}) 27 | ;; 28 | esac 29 | 30 | if [ ! -z $CASSANDRA_LIBJEMALLOC ] ; then 31 | echo $(CASSANDRA_LIBJEMALLOC) 32 | exit 0 33 | else 34 | exit 1 35 | fi 36 | -------------------------------------------------------------------------------- /internode_ssl_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from dtest import Tester, create_ks, create_cf 4 | from tools.data import putget 5 | from tools.misc import generate_ssl_stores 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class TestInternodeSSL(Tester): 11 | 12 | def test_putget_with_internode_ssl(self): 13 | """ 14 | Simple putget test with internode ssl enabled 15 | with default 'all' internode compression 16 | @jira_ticket CASSANDRA-9884 17 | """ 18 | self.__putget_with_internode_ssl_test('all') 19 | 20 | def test_putget_with_internode_ssl_without_compression(self): 21 | """ 22 | Simple putget test with internode ssl enabled 23 | without internode compression 24 | @jira_ticket CASSANDRA-9884 25 | """ 26 | self.__putget_with_internode_ssl_test('none') 27 | 28 | def __putget_with_internode_ssl_test(self, internode_compression): 29 | cluster = self.cluster 30 | 31 | logger.debug("***using internode ssl***") 32 | generate_ssl_stores(self.fixture_dtest_setup.test_path) 33 | cluster.set_configuration_options({'internode_compression': internode_compression}) 34 | cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) 35 | 36 | cluster.populate(3).start() 37 | 38 | session = self.patient_cql_connection(cluster.nodelist()[0]) 39 | create_ks(session, 'ks', 3) 40 | create_cf(session, 'cf', compression=None) 41 | putget(cluster, session) 42 | -------------------------------------------------------------------------------- /jmx_auth_test.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | import pytest 4 | import logging 5 | from distutils.version import LooseVersion 6 | 7 | from ccmlib.node import ToolError 8 | from dtest import Tester 9 | from tools.jmxutils import apply_jmx_authentication 10 | 11 | since = pytest.mark.since 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | @since('3.6') 16 | class TestJMXAuth(Tester): 17 | """ 18 | Uses nodetool as a means of exercising the JMX interface as JolokiaAgent 19 | exposes its own connector which bypasses the in-built security features 20 | """ 21 | 22 | def test_basic_auth(self): 23 | """ 24 | Some basic smoke testing of JMX authentication and authorization. 25 | @jira_ticket CASSANDRA-10091 26 | """ 27 | self.prepare() 28 | [node] = self.cluster.nodelist() 29 | node.nodetool('-u cassandra -pw cassandra status') 30 | 31 | session = self.patient_cql_connection(node, user='cassandra', password='cassandra') 32 | # the jmx_user role has no login privilege but give it a password anyway 33 | # to demonstrate that LOGIN is required for JMX authentication 34 | session.execute("CREATE ROLE jmx_user WITH LOGIN=false AND PASSWORD='321cba'") 35 | session.execute("GRANT SELECT ON MBEAN 'org.apache.cassandra.net:type=FailureDetector' TO jmx_user") 36 | session.execute("GRANT DESCRIBE ON ALL MBEANS TO jmx_user") 37 | session.execute("CREATE ROLE test WITH LOGIN=true and PASSWORD='abc123'") 38 | 39 | with pytest.raises(ToolError, match=self.authentication_fail_message(node, 'baduser')): 40 | node.nodetool('-u baduser -pw abc123 gossipinfo') 41 | 42 | with pytest.raises(ToolError, match=self.authentication_fail_message(node, 'test')): 43 | node.nodetool('-u test -pw badpassword gossipinfo') 44 | 45 | with pytest.raises(ToolError, match="Required key 'username' is missing"): 46 | node.nodetool('gossipinfo') 47 | 48 | # role must have LOGIN attribute 49 | with pytest.raises(ToolError, match='jmx_user is not permitted to log in'): 50 | node.nodetool('-u jmx_user -pw 321cba gossipinfo') 51 | 52 | # test doesn't yet have any privileges on the necessary JMX resources 53 | with pytest.raises(ToolError, match='Access Denied'): 54 | node.nodetool('-u test -pw abc123 gossipinfo') 55 | 56 | session.execute("GRANT jmx_user TO test") 57 | node.nodetool('-u test -pw abc123 gossipinfo') 58 | 59 | # superuser status applies to JMX authz too 60 | node.nodetool('-u cassandra -pw cassandra gossipinfo') 61 | 62 | @since('4.1') 63 | def test_revoked_jmx_access(self): 64 | """ 65 | if a user's access to a JMX MBean is revoked while they're connected, 66 | all of their requests should fail once the cache is cleared. 67 | @jira_ticket CASSANDRA-16404 68 | """ 69 | self.prepare(permissions_validity=60000) 70 | [node] = self.cluster.nodelist() 71 | 72 | def test_revoked_access(cache_name): 73 | logger.debug('Testing with cache name: %s' % cache_name) 74 | username = self.username() 75 | session = self.patient_cql_connection(node, user='cassandra', password='cassandra') 76 | session.execute("CREATE ROLE %s WITH LOGIN=true AND PASSWORD='abc123'" % username) 77 | session.execute("GRANT SELECT ON MBEAN 'org.apache.cassandra.net:type=FailureDetector' TO %s" % username) 78 | session.execute("GRANT DESCRIBE ON ALL MBEANS TO %s" % username) 79 | 80 | # works fine 81 | node.nodetool('-u %s -pw abc123 gossipinfo' % username) 82 | 83 | session.execute("REVOKE SELECT ON MBEAN 'org.apache.cassandra.net:type=FailureDetector' FROM %s" % username) 84 | # works fine because the JMX permission is cached 85 | node.nodetool('-u %s -pw abc123 gossipinfo' % username) 86 | 87 | node.nodetool('-u cassandra -pw cassandra invalidatejmxpermissionscache') 88 | # the user has no permissions to the JMX resource anymore 89 | with pytest.raises(ToolError, match='Access Denied'): 90 | node.nodetool('-u %s -pw abc123 gossipinfo' % username) 91 | 92 | test_revoked_access("JmxPermissionsCache") 93 | 94 | # deprecated cache name, scheduled for removal in 5.0 95 | if self.dtest_config.cassandra_version_from_build < '5.0': 96 | test_revoked_access("JMXPermissionsCache") 97 | 98 | def prepare(self, nodes=1, permissions_validity=0): 99 | config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', 100 | 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer', 101 | 'permissions_validity_in_ms': permissions_validity} 102 | self.cluster.set_configuration_options(values=config) 103 | self.cluster.populate(nodes) 104 | [node] = self.cluster.nodelist() 105 | apply_jmx_authentication(node) 106 | node.start() 107 | node.watch_log_for('Created default superuser') 108 | 109 | def authentication_fail_message(self, node, username): 110 | return "Provided username {user} and/or password are incorrect".format(user=username) \ 111 | if node.cluster.version() >= LooseVersion('3.10') else "Username and/or password are incorrect" 112 | 113 | def username(self): 114 | return ''.join(random.choice(string.ascii_lowercase) for _ in range(8)) -------------------------------------------------------------------------------- /json_tools_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import pytest 4 | import logging 5 | 6 | from dtest import Tester, create_ks 7 | from tools.data import rows_to_list 8 | from tools.assertions import assert_lists_equal_ignoring_order 9 | 10 | since = pytest.mark.since 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | @since('0', max_version='2.2.X') 15 | class TestJson(Tester): 16 | 17 | def test_json_tools(self): 18 | 19 | logger.debug("Starting cluster...") 20 | cluster = self.cluster 21 | cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0') 22 | cluster.populate(1).start() 23 | 24 | logger.debug("Version: " + cluster.version().vstring) 25 | 26 | logger.debug("Getting CQLSH...") 27 | [node1] = cluster.nodelist() 28 | session = self.patient_cql_connection(node1) 29 | 30 | logger.debug("Inserting data...") 31 | create_ks(session, 'Test', 1) 32 | 33 | session.execute(""" 34 | CREATE TABLE users ( 35 | user_name varchar PRIMARY KEY, 36 | password varchar, 37 | gender varchar, 38 | state varchar, 39 | birth_year bigint 40 | ); 41 | """) 42 | 43 | session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) " 44 | "VALUES ('frodo', 'pass@', 'male', 'CA', 1985);") 45 | session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) " 46 | "VALUES ('sam', '@pass', 'male', 'NY', 1980);") 47 | 48 | res = session.execute("SELECT * FROM Test. users") 49 | 50 | assert_lists_equal_ignoring_order(rows_to_list(res), [['frodo', 1985, 'male', 'pass@', 'CA'], 51 | ['sam', 1980, 'male', '@pass', 'NY']]) 52 | 53 | logger.debug("Flushing and stopping cluster...") 54 | node1.flush() 55 | cluster.stop() 56 | 57 | logger.debug("Exporting to JSON file...") 58 | json_path = tempfile.mktemp(suffix='.schema.json') 59 | with open(json_path, 'w') as f: 60 | node1.run_sstable2json(f) 61 | 62 | with open(json_path, 'r') as fin: 63 | data = fin.read().splitlines(True) 64 | if data[0][0] == 'W': 65 | with open(json_path, 'w') as fout: 66 | fout.writelines(data[1:]) 67 | 68 | logger.debug("Deleting cluster and creating new...") 69 | cluster.clear() 70 | cluster.start() 71 | 72 | logger.debug("Inserting data...") 73 | session = self.patient_cql_connection(node1) 74 | create_ks(session, 'Test', 1) 75 | 76 | session.execute(""" 77 | CREATE TABLE users ( 78 | user_name varchar PRIMARY KEY, 79 | password varchar, 80 | gender varchar, 81 | state varchar, 82 | birth_year bigint 83 | ); 84 | """) 85 | 86 | session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) " 87 | "VALUES ('gandalf', 'p@$$', 'male', 'WA', 1955);") 88 | node1.flush() 89 | cluster.stop() 90 | 91 | logger.debug("Importing JSON file...") 92 | with open(json_path) as f: 93 | node1.run_json2sstable(f, "test", "users") 94 | os.remove(json_path) 95 | 96 | logger.debug("Verifying import...") 97 | cluster.start() 98 | [node1] = cluster.nodelist() 99 | session = self.patient_cql_connection(node1) 100 | 101 | res = session.execute("SELECT * FROM Test. users") 102 | 103 | logger.debug("data: " + str(res)) 104 | 105 | assert_lists_equal_ignoring_order(rows_to_list(res), [['frodo', 1985, 'male', 'pass@', 'CA'], 106 | ['sam', 1980, 'male', '@pass', 'NY'], 107 | ['gandalf', 1955, 'male', 'p@$$', 'WA']]) 108 | -------------------------------------------------------------------------------- /largecolumn_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import re 3 | import logging 4 | 5 | from dtest import Tester 6 | 7 | since = pytest.mark.since 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @since('2.2') 12 | class TestLargeColumn(Tester): 13 | """ 14 | Check that inserting and reading large columns to the database doesn't cause off heap memory usage 15 | that is proportional to the size of the memory read/written. 16 | """ 17 | 18 | def stress_with_col_size(self, cluster, node, size): 19 | size = str(size) 20 | node.stress(['write', 'n=5', "no-warmup", "cl=ALL", "-pop", "seq=1...5", "-schema", "replication(factor=2)", "-col", "n=fixed(1)", "size=fixed(" + size + ")", "-rate", "threads=1"]) 21 | node.stress(['read', 'n=5', "no-warmup", "cl=ALL", "-pop", "seq=1...5", "-schema", "replication(factor=2)", "-col", "n=fixed(1)", "size=fixed(" + size + ")", "-rate", "threads=1"]) 22 | 23 | def directbytes(self, node): 24 | def is_number(s): 25 | try: 26 | float(s) 27 | return True 28 | except ValueError: 29 | return False 30 | 31 | output, err, _ = node.nodetool("gcstats") 32 | logger.debug(output) 33 | output = output.split("\n") 34 | assert re.search('Interval', output[0].strip()) 35 | fields = output[1].split() 36 | assert len(fields) >= 6, "Expected output from nodetool gcstats has at least six fields. However >= fields is: {}".format(fields) 37 | for field in fields: 38 | assert is_number(field.strip()) or field == 'NaN', "Expected numeric from fields from nodetool gcstats. However, field.strip() is: {}".format(field.strip()) 39 | return fields[6] 40 | 41 | def test_cleanup(self): 42 | """ 43 | @jira_ticket CASSANDRA-8670 44 | """ 45 | cluster = self.cluster 46 | # Commit log segment size needs to increase for the database to be willing to accept columns that large 47 | # internode compression is disabled because the regression being tested occurs in NIO buffer pooling without compression 48 | configuration = {'commitlog_segment_size_in_mb': 128, 'internode_compression': 'none'} 49 | if cluster.version() >= '4.0': 50 | configuration['internode_max_message_size_in_bytes'] = 128 * 1024 * 1024 51 | if cluster.version() >= '4.1': 52 | configuration['native_transport_max_request_data_in_flight'] = '64MiB' 53 | configuration['native_transport_max_request_data_in_flight_per_ip'] = '64MiB' 54 | cluster.set_configuration_options(configuration) 55 | 56 | # Have Netty allocate memory on heap so it is clear if memory used for large columns is related to intracluster messaging 57 | # NOTE: we still have direct memory used by Cassandra for networking cache and other places 58 | cluster.populate(2).start(jvm_args=[" -Dcassandra.netty_use_heap_allocator=true "]) 59 | node1, node2 = cluster.nodelist() 60 | 61 | session = self.patient_cql_connection(node1) 62 | logger.info("Before stress, direct memory: {0}".format(self.directbytes(node1))) 63 | logger.debug("Running stress") 64 | # Run the full stack to see how much memory is utilized for "small" columns 65 | self.stress_with_col_size(cluster, node1, 1) 66 | beforeLargeStress = self.directbytes(node1) 67 | logger.info("Ran small column stress once, direct memory: {0}".format(beforeLargeStress)) 68 | 69 | # Now run the full stack to warm up internal caches/pools 70 | LARGE_COLUMN_SIZE = 1024 * 1024 * 63 71 | self.stress_with_col_size(cluster, node1, LARGE_COLUMN_SIZE) 72 | after1stLargeStress = self.directbytes(node1) 73 | logger.info("After 1st large column stress, direct memory: {0}".format(after1stLargeStress)) 74 | 75 | # Now run the full stack to see how much memory is allocated for the second "large" columns request 76 | self.stress_with_col_size(cluster, node1, LARGE_COLUMN_SIZE) 77 | after2ndLargeStress = self.directbytes(node1) 78 | logger.info("After 2nd large column stress, direct memory: {0}".format(after2ndLargeStress)) 79 | 80 | # We may allocate direct memory proportional to size of a request 81 | # but we want to ensure that when we do subsequent calls the used direct memory is not growing 82 | diff = int(after2ndLargeStress) - int(after1stLargeStress) 83 | logger.info("Direct memory delta: {0}".format(diff)) 84 | assert diff < LARGE_COLUMN_SIZE 85 | -------------------------------------------------------------------------------- /legacy_sstables_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from cassandra import ConsistencyLevel 4 | from dtest import Tester 5 | from tools.assertions import assert_all 6 | 7 | since = pytest.mark.since 8 | 9 | class TestLegacySSTables(Tester): 10 | 11 | @since('3.0', max_version='4') 12 | def test_14766(self): 13 | """ 14 | @jira_ticket CASSANDRA-14766 15 | 16 | A reproduction / regression test to illustrate CASSANDRA-14766: when 17 | reading a legacy 2.1 sstable with SSTableReversedIterator, it's possible 18 | to skip and not return the last Unfiltered in the last indexed block. 19 | 20 | It would lead to a missing row, if that Unfiltered was a row, or potentially 21 | resurrected data, if it's a tombstone. 22 | """ 23 | cluster = self.cluster 24 | 25 | # set column_index_size_in_kb to 1 for a small reproduction sequence 26 | cluster.set_configuration_options(values={'column_index_size_in_kb': 1}) 27 | 28 | # start with 2.1.20 to generate a legacy sstable 29 | cluster.set_install_dir(version='2.1.20') 30 | 31 | cluster.populate(1).start() 32 | self.install_nodetool_legacy_parsing() 33 | node1 = cluster.nodelist()[0] 34 | session = self.patient_cql_connection(node1) 35 | 36 | query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 1};" 37 | session.execute(query) 38 | 39 | query = 'CREATE TABLE test.test (pk int, ck int, value text, PRIMARY KEY (pk, ck));' 40 | session.execute(query) 41 | 42 | # insert 4 rows to fill 2 index blocks and flush the 2.1 sstable 43 | stmt = session.prepare('INSERT INTO test.test (pk, ck, value) VALUES (0, ?, ?);'); 44 | for i in range(0, 4): 45 | session.execute(stmt, [i, '0' * 512]) 46 | cluster.flush() 47 | 48 | # stop, upgrade to current version (3.0 or 3.11), start up 49 | node1.stop(wait_other_notice=True) 50 | self.set_node_to_current_version(node1) 51 | node1.start() 52 | session = self.patient_cql_connection(node1) 53 | 54 | # make sure all 4 rows are there when reading backwards 55 | # prior to the fix, this would return 3 rows (ck = 2, 1, 0), skipping ck = 3 56 | assert_all(session, 57 | "SELECT ck FROM test.test WHERE pk = 0 ORDER BY ck DESC;", 58 | [[3], [2], [1], [0]], 59 | cl=ConsistencyLevel.ONE) 60 | -------------------------------------------------------------------------------- /lib/cassandra-attack.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/lib/cassandra-attack.jar -------------------------------------------------------------------------------- /lib/cassandra-attack.jar.txt: -------------------------------------------------------------------------------- 1 | This is the test reproduction jar from https://issues.apache.org/jira/browse/CASSANDRA-6285 2 | Source code is available as an attachment on that JIRA - https://issues.apache.org/jira/secure/attachment/12633659/cassandra-attack-src.zip 3 | -------------------------------------------------------------------------------- /lib/jolokia-jvm-1.7.1-agent.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/lib/jolokia-jvm-1.7.1-agent.jar -------------------------------------------------------------------------------- /lib/jolokia-jvm-1.7.1-agent.jar.txt: -------------------------------------------------------------------------------- 1 | https://jolokia.org/license.html 2 | 3 | Copyright 2009-2010 Roland Huss 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /linter_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # linter_check ensures your changes will pass on travis. 4 | # Requires pycodestyle and flake8: pip install pycodestyle flake8 5 | 6 | flake8 --ignore=E501,F811,F812,F822,F823,F831,F841,N8,C9 --exclude=thrift_bindings,cassandra-thrift . 7 | flake8_result=$? 8 | 9 | # lint all files for everything but line length errors 10 | git diff apache/trunk...HEAD -U0 | pycodestyle --ignore=E501 --diff 11 | pep8_style_check=$? 12 | 13 | # lint all files except json_test.py for line length errors 14 | git diff apache/trunk...HEAD -U0 | pycodestyle --diff --exclude='json_test.py' --exclude='meta_tests/assertion_test.py' --max-line-length=200 15 | pep8_line_length=$? 16 | 17 | echo -e "\nflake8 exited with ${flake8_result}." 18 | echo "pep8 line length check exited with ${pep8_line_length} and style check exited with ${pep8_style_check}." 19 | 20 | if [ $flake8_result -ne 0 -o $pep8_line_length -ne 0 -o $pep8_style_check -ne 0 ]; 21 | then 22 | echo "Your changes contain linter errors." 23 | echo "You can fix these manually or with autopep8, which can be installed with pip." 24 | exit 1 25 | fi 26 | 27 | echo "Done" 28 | exit 0 29 | -------------------------------------------------------------------------------- /meta_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/meta_tests/__init__.py -------------------------------------------------------------------------------- /meta_tests/cassandra-dir-3.2/0.version.txt: -------------------------------------------------------------------------------- 1 | 3.2 -------------------------------------------------------------------------------- /meta_tests/cassandra-dir-4.0-beta/0.version.txt: -------------------------------------------------------------------------------- 1 | 4.0-beta -------------------------------------------------------------------------------- /meta_tests/cassandra-dir-4.0-beta/bin/.do-not-delete: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/meta_tests/cassandra-dir-4.0-beta/bin/.do-not-delete -------------------------------------------------------------------------------- /meta_tests/cassandra-dir-4.0-beta/conf/cassandra.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/meta_tests/cassandra-dir-4.0-beta/conf/cassandra.yaml -------------------------------------------------------------------------------- /meta_tests/dtest_config_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from re import search 3 | from unittest import TestCase 4 | 5 | from dtest_config import DTestConfig 6 | from mock import Mock, patch 7 | from pytest import UsageError, raises 8 | import ccmlib.repository 9 | import ccmlib.common 10 | 11 | 12 | def _mock_responses(responses, default_response=None): 13 | return lambda input: responses[input] if input in responses else \ 14 | "%s/meta_tests/cassandra-dir-4.0-beta" % os.getcwd() if input == "--cassandra-dir" else default_response 15 | 16 | 17 | def _check_with_params(params): 18 | config = Mock() 19 | config.getoption.side_effect = _mock_responses(params) 20 | config.getini.side_effect = _mock_responses({}) 21 | 22 | dTestConfig = DTestConfig() 23 | dTestConfig.setup(config) 24 | return dTestConfig 25 | 26 | 27 | def _check_with_params_expect(params, pattern): 28 | with raises(UsageError, match=pattern): 29 | _check_with_params(params) 30 | 31 | 32 | class DTestConfigTest(TestCase): 33 | 34 | def test_invalid_cass_dir_no_version(self): 35 | _check_with_params_expect({ 36 | '--cassandra-dir': 'blah' 37 | }, "The Cassandra directory blah does not seem to be valid") 38 | 39 | def test_cass_dir_and_version(self): 40 | _check_with_params_expect({ 41 | '--cassandra-version': '3.11' 42 | }, "Cassandra build directory is already defined") 43 | 44 | def test_no_cass_dir(self): 45 | with patch.object(ccmlib.repository, "setup") as mocked_setup: 46 | mocked_setup.side_effect = _mock_responses({'3.2': ("%s/meta_tests/cassandra-dir-3.2" % os.getcwd(), '3.2.0')}) 47 | c = _check_with_params({ 48 | '--cassandra-dir': None, 49 | '--cassandra-version': '3.2' 50 | }) 51 | assert c.cassandra_version == '3.2' 52 | assert search("^3.2", str(c.cassandra_version_from_build)) 53 | 54 | def test_valid_cass_dir_no_version(self): 55 | c = _check_with_params({ 56 | }) 57 | assert c.cassandra_version is None 58 | assert c.cassandra_version_from_build == '4.0-beta' 59 | 60 | def test_no_cass_dir_no_version(self): 61 | _check_with_params_expect({ 62 | '--cassandra-dir': None 63 | }, "You must provide either --cassandra-dir or --cassandra-version") 64 | 65 | def test_illegal_args_combinations_for_resource_intensive_tests(self): 66 | _check_with_params_expect({ 67 | '--only-resource-intensive-tests': True, 68 | '--skip-resource-intensive-tests': True 69 | }, 'does not make any sense') 70 | 71 | _check_with_params_expect({ 72 | '--force-resource-intensive-tests': True, 73 | '--skip-resource-intensive-tests': True 74 | }, 'does not make any sense') 75 | 76 | _check_with_params_expect({ 77 | '--only-resource-intensive-tests': True, 78 | '--force-resource-intensive-tests': True, 79 | '--skip-resource-intensive-tests': True 80 | }, 'does not make any sense') 81 | 82 | def test_legal_args_combinations_for_resource_intensive_tests(self): 83 | c = _check_with_params({ 84 | '--only-resource-intensive-tests': True 85 | }) 86 | assert c.only_resource_intensive_tests 87 | assert not c.skip_resource_intensive_tests 88 | assert not c.force_execution_of_resource_intensive_tests 89 | 90 | c = _check_with_params({ 91 | '--only-resource-intensive-tests': True, 92 | '--force-resource-intensive-tests': True 93 | }) 94 | assert c.only_resource_intensive_tests 95 | assert not c.skip_resource_intensive_tests 96 | assert c.force_execution_of_resource_intensive_tests 97 | 98 | c = _check_with_params({ 99 | '--skip-resource-intensive-tests': True 100 | }) 101 | assert not c.only_resource_intensive_tests 102 | assert c.skip_resource_intensive_tests 103 | assert not c.force_execution_of_resource_intensive_tests 104 | 105 | c = _check_with_params({ 106 | }) 107 | assert not c.only_resource_intensive_tests 108 | assert not c.skip_resource_intensive_tests 109 | assert not c.force_execution_of_resource_intensive_tests 110 | 111 | def off_heap_memtables_not_supported(self): 112 | _check_with_params_expect({ 113 | '--cassandra-dir': "%s/meta_tests/cassandra-dir-3.2" % os.getcwd(), 114 | '--use-off-heap-memtables': True 115 | }, "The selected Cassandra version 3.2 doesn't support the provided option") 116 | 117 | def off_heap_memtables_supported(self): 118 | c = _check_with_params({ 119 | '--use-off-heap-memtables': True 120 | }) 121 | assert c.use_off_heap_memtables 122 | -------------------------------------------------------------------------------- /meta_tests/utils_test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/meta_tests/utils_test/__init__.py -------------------------------------------------------------------------------- /metadata_test.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | import logging 4 | import pytest 5 | 6 | from dtest import Tester 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class TestMetadata(Tester): 12 | 13 | def force_compact(self): 14 | cluster = self.cluster 15 | (node1, node2) = cluster.nodelist() 16 | node1.nodetool("compact keyspace1 standard1") 17 | 18 | def force_repair(self): 19 | cluster = self.cluster 20 | (node1, node2) = cluster.nodelist() 21 | node1.nodetool('repair keyspace1 standard1') 22 | 23 | def do_read(self): 24 | cluster = self.cluster 25 | (node1, node2) = cluster.nodelist() 26 | 27 | node1.stress(['read', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 'compression=LZ4Compressor', 28 | '-rate', 'threads=1']) 29 | 30 | @pytest.mark.skip(reason='hangs CI') 31 | def test_metadata_reset_while_compact(self): 32 | """ 33 | Resets the schema while a compact, read and repair happens. 34 | All kinds of glorious things can fail. 35 | """ 36 | # while the schema is being reset, there will inevitably be some 37 | # queries that will error with this message 38 | self.fixture_dtest_setup.ignore_log_patterns = ['.*Unknown keyspace/cf pair.*'] 39 | 40 | cluster = self.cluster 41 | cluster.populate(2).start() 42 | (node1, node2) = cluster.nodelist() 43 | 44 | node1.nodetool("disableautocompaction") 45 | node1.nodetool("setcompactionthroughput 1") 46 | 47 | for i in range(3): 48 | node1.stress(['write', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 49 | 'compression=LZ4Compressor', '-rate', 'threads=5', '-pop', 'seq=1..30000']) 50 | node1.flush() 51 | 52 | thread = threading.Thread(target=self.force_compact) 53 | thread.start() 54 | time.sleep(1) 55 | 56 | thread2 = threading.Thread(target=self.force_repair) 57 | thread2.start() 58 | time.sleep(5) 59 | 60 | thread3 = threading.Thread(target=self.do_read) 61 | thread3.start() 62 | time.sleep(5) 63 | 64 | node1.nodetool("resetlocalschema") 65 | 66 | thread.join() 67 | thread2.join() 68 | thread3.join() 69 | -------------------------------------------------------------------------------- /mixed_version_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout 5 | from cassandra.query import SimpleStatement 6 | 7 | from dtest import Tester 8 | 9 | since = pytest.mark.since 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class TestSchemaChanges(Tester): 14 | 15 | @since('2.0') 16 | def test_friendly_unrecognized_table_handling(self): 17 | """ 18 | After upgrading one of two nodes, create a new table (which will 19 | not be propagated to the old node) and check that queries against 20 | that table result in user-friendly warning logs. 21 | """ 22 | cluster = self.cluster 23 | cluster.populate(2) 24 | cluster.start() 25 | 26 | node1, node2 = cluster.nodelist() 27 | original_version = node1.get_cassandra_version() 28 | upgraded_version = None 29 | if original_version.vstring.startswith('2.0'): 30 | upgraded_version = 'github:apache/cassandra-2.1' 31 | elif original_version.vstring.startswith('2.1'): 32 | upgraded_version = 'github:apache/cassandra-2.2' 33 | else: 34 | pytest.skip("This test is only designed to work with 2.0 and 2.1 right now") 35 | 36 | # start out with a major behind the previous version 37 | 38 | # upgrade node1 39 | node1.stop() 40 | node1.set_install_dir(version=upgraded_version) 41 | logger.debug("Set new cassandra dir for %s: %s" % (node1.name, node1.get_install_dir())) 42 | 43 | node1.set_log_level("INFO") 44 | node1.start() 45 | 46 | session = self.patient_exclusive_cql_connection(node1) 47 | session.cluster.max_schema_agreement_wait = -1 # don't wait for schema agreement 48 | 49 | logger.debug("Creating keyspace and table") 50 | session.execute("CREATE KEYSPACE test_upgrades WITH replication={'class': 'SimpleStrategy', " 51 | "'replication_factor': '2'}") 52 | session.execute("CREATE TABLE test_upgrades.foo (a int primary key, b int)") 53 | 54 | pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*" 55 | 56 | try: 57 | session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo", consistency_level=ConsistencyLevel.ALL)) 58 | pytest.fail("expected failure") 59 | except (ReadTimeout, OperationTimedOut): 60 | logger.debug("Checking node2 for warning in log") 61 | node2.watch_log_for(pattern, timeout=10) 62 | 63 | # non-paged range slice 64 | try: 65 | session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo", consistency_level=ConsistencyLevel.ALL, 66 | fetch_size=None)) 67 | pytest.fail("expected failure") 68 | except (ReadTimeout, OperationTimedOut): 69 | logger.debug("Checking node2 for warning in log") 70 | pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*" 71 | node2.watch_log_for(pattern, timeout=10) 72 | 73 | # single-partition slice 74 | try: 75 | for i in range(20): 76 | session.execute(SimpleStatement("SELECT * FROM test_upgrades.foo WHERE a = %d" % (i,), 77 | consistency_level=ConsistencyLevel.ALL, fetch_size=None)) 78 | pytest.fail("expected failure") 79 | except (ReadTimeout, OperationTimedOut): 80 | logger.debug("Checking node2 for warning in log") 81 | pattern = r".*Got .* command for nonexistent table test_upgrades.foo.*" 82 | node2.watch_log_for(pattern, timeout=10) 83 | -------------------------------------------------------------------------------- /multidc_putget_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from dtest import Tester, create_ks, create_cf 4 | from tools.data import putget 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class TestMultiDCPutGet(Tester): 10 | 11 | def test_putget_2dc_rf1(self): 12 | """ Simple put-get test for 2 DC with one node each (RF=1) [catches #3539] """ 13 | cluster = self.cluster 14 | cluster.populate([1, 1]).start() 15 | 16 | session = self.patient_cql_connection(cluster.nodelist()[0]) 17 | create_ks(session, 'ks', {'dc1': 1, 'dc2': 1}) 18 | create_cf(session, 'cf') 19 | 20 | putget(cluster, session) 21 | 22 | def test_putget_2dc_rf2(self): 23 | """ Simple put-get test for 2 DC with 2 node each (RF=2) -- tests cross-DC efficient writes """ 24 | cluster = self.cluster 25 | cluster.populate([2, 2]).start() 26 | 27 | session = self.patient_cql_connection(cluster.nodelist()[0]) 28 | create_ks(session, 'ks', {'dc1': 2, 'dc2': 2}) 29 | create_cf(session, 'cf') 30 | 31 | putget(cluster, session) 32 | -------------------------------------------------------------------------------- /pending_range_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | import pytest 5 | import re 6 | import threading 7 | 8 | from cassandra.query import SimpleStatement 9 | 10 | from dtest import Tester, create_ks, mk_bman_path 11 | 12 | from distutils.version import LooseVersion 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | @pytest.mark.no_vnodes 18 | class TestPendingRangeMovements(Tester): 19 | 20 | @pytest.mark.resource_intensive 21 | def test_pending_range(self): 22 | """ 23 | @jira_ticket CASSANDRA-10887 24 | """ 25 | cluster = self.cluster 26 | # If we are on 2.1, we need to set the log level to debug or higher, as debug.log does not exist. 27 | if cluster.version() < '2.2': 28 | cluster.set_log_level('DEBUG') 29 | 30 | # Create 5 node cluster 31 | ring_delay_ms = 3_600_000 # 1 hour 32 | install_byteman = cluster.version() >= '5.1' 33 | cluster.populate(5, install_byteman=install_byteman).start(jvm_args=['-Dcassandra.ring_delay_ms={}'.format(ring_delay_ms)]) 34 | node1, node2 = cluster.nodelist()[0:2] 35 | 36 | # Set up RF=3 keyspace 37 | session = self.patient_cql_connection(node1) 38 | create_ks(session, 'ks', 3) 39 | 40 | session.execute("CREATE TABLE users (login text PRIMARY KEY, email text, name text, login_count int)") 41 | 42 | # We use the partition key 'jdoe3' because it belongs to node1. 43 | # The key MUST belong to node1 to repro the bug. 44 | session.execute("INSERT INTO users (login, email, name, login_count) VALUES ('jdoe3', 'jdoe@abc.com', 'Jane Doe', 1) IF NOT EXISTS;") 45 | 46 | lwt_query = SimpleStatement("UPDATE users SET email = 'janedoe@abc.com' WHERE login = 'jdoe3' IF email = 'jdoe@abc.com'") 47 | 48 | # Show we can execute LWT no problem 49 | for i in range(1000): 50 | session.execute(lwt_query) 51 | 52 | token = '-634023222112864484' 53 | 54 | # delay progress of the move operation to give a chance to kill the moving node 55 | if self.cluster.version() >= LooseVersion('5.1'): 56 | node1.byteman_submit([mk_bman_path('post5.1/delay_streaming_for_move.btm')]) 57 | 58 | mark = node1.mark_log() 59 | # Move a node without waiting for the response of nodetool, so we don't have to wait for ring_delay 60 | threading.Thread(target=(lambda: node1.nodetool('move {}'.format(token)))).start() 61 | # Watch the log so we know when the node is moving 62 | node1.watch_log_for('Moving .* to \[?{}\]?'.format(token), timeout=10, from_mark=mark) 63 | if self.cluster.version() < LooseVersion('5.1'): 64 | node1.watch_log_for('Sleeping {} ms before start streaming/fetching ranges'.format(ring_delay_ms), 65 | timeout=10, from_mark=mark) 66 | 67 | # Watch the logs so we know when all the nodes see the status update to MOVING 68 | for node in cluster.nodelist(): 69 | if cluster.version() >= '2.2': 70 | if cluster.version() >= '4.0': 71 | node.watch_log_for('127.0.0.1:7000 state MOVING', timeout=10, filename='debug.log') 72 | else: 73 | node.watch_log_for('127.0.0.1 state moving', timeout=10, filename='debug.log') 74 | else: 75 | # 2.1 doesn't have debug.log, so we are logging at trace, and look 76 | # in the system.log file 77 | node.watch_log_for('127.0.0.1 state moving', timeout=10, filename='system.log') 78 | 79 | # Once the node is MOVING, kill it immediately, let the other nodes notice 80 | node1.stop(gently=False, wait_other_notice=True) 81 | 82 | # Verify other nodes believe that the killed node is Down/Moving 83 | out, _, _ = node2.nodetool('ring') 84 | logger.debug("Nodetool Ring output: {}".format(out)) 85 | assert re.search('127\.0\.0\.1.*?Down.*?Moving', out) is not None 86 | 87 | # Check we can still execute LWT 88 | for i in range(1000): 89 | session.execute(lwt_query) 90 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/plugins/__init__.py -------------------------------------------------------------------------------- /plugins/assert_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2016 Oliver Schoenborn. BSD 3-Clause license (see __license__ at bottom of this file for details). 3 | 4 | This module is part of the nose2pytest distribution. 5 | 6 | This module's assert_ functions provide drop-in replacements for nose.tools.assert_ functions (many of which are 7 | pep-8-ized extractions from Python's unittest.case.TestCase methods). As such, it can be imported in a test 8 | suite run by py.test, to replace the nose imports with functions that rely on py.test's assertion 9 | introspection for error reporting. When combined with running nose2pytest.py on your test suite, this 10 | module may be sufficient to decrease your test suite's third-party dependencies by 1. 11 | """ 12 | 13 | import unittest 14 | 15 | 16 | __all__ = [ 17 | 'assert_almost_equal', 18 | 'assert_not_almost_equal', 19 | 'assert_dict_contains_subset', 20 | 21 | 'assert_raises_regex', 22 | 'assert_raises_regexp', 23 | 'assert_regexp_matches', 24 | 'assert_warns_regex', 25 | ] 26 | 27 | 28 | def assert_almost_equal(a, b, places=7, msg=None): 29 | """ 30 | Fail if the two objects are unequal as determined by their 31 | difference rounded to the given number of decimal places 32 | and comparing to zero. 33 | 34 | Note that decimal places (from zero) are usually not the same 35 | as significant digits (measured from the most signficant digit). 36 | 37 | See the builtin round() function for places parameter. 38 | """ 39 | if msg is None: 40 | assert round(abs(b - a), places) == 0 41 | else: 42 | assert round(abs(b - a), places) == 0, msg 43 | 44 | 45 | def assert_not_almost_equal(a, b, places=7, msg=None): 46 | """ 47 | Fail if the two objects are equal as determined by their 48 | difference rounded to the given number of decimal places 49 | and comparing to zero. 50 | 51 | Note that decimal places (from zero) are usually not the same 52 | as significant digits (measured from the most signficant digit). 53 | 54 | See the builtin round() function for places parameter. 55 | """ 56 | if msg is None: 57 | assert round(abs(b - a), places) != 0 58 | else: 59 | assert round(abs(b - a), places) != 0, msg 60 | 61 | 62 | def assert_dict_contains_subset(subset, dictionary, msg=None): 63 | """ 64 | Checks whether dictionary is a superset of subset. If not, the assertion message will have useful details, 65 | unless msg is given, then msg is output. 66 | """ 67 | dictionary = dictionary 68 | missing_keys = sorted(list(set(subset.keys()) - set(dictionary.keys()))) 69 | mismatch_vals = {k: (subset[k], dictionary[k]) for k in subset if k in dictionary and subset[k] != dictionary[k]} 70 | if msg is None: 71 | assert missing_keys == [], 'Missing keys = {}'.format(missing_keys) 72 | assert mismatch_vals == {}, 'Mismatched values (s, d) = {}'.format(mismatch_vals) 73 | else: 74 | assert missing_keys == [], msg 75 | assert mismatch_vals == {}, msg 76 | 77 | 78 | # make other unittest.TestCase methods available as-is as functions; trick taken from Nose 79 | 80 | class _Dummy(unittest.TestCase): 81 | def do_nothing(self): 82 | pass 83 | 84 | _t = _Dummy('do_nothing') 85 | 86 | assert_raises_regex=_t.assertRaisesRegex, 87 | assert_raises_regexp=_t.assertRaisesRegexp, 88 | assert_regexp_matches=_t.assertRegexpMatches, 89 | assert_warns_regex=_t.assertWarnsRegex, 90 | 91 | del _Dummy 92 | del _t 93 | 94 | 95 | # py.test integration: add all assert_ function to the pytest package namespace 96 | 97 | # Use similar trick as Nose to bring in bound methods from unittest.TestCase as free functions: 98 | 99 | def pytest_namespace() -> {str: callable}: 100 | namespace = {} 101 | for name, obj in globals().items(): 102 | if name.startswith('assert_'): 103 | namespace[name] = obj 104 | 105 | return namespace 106 | 107 | 108 | # licensing 109 | 110 | __license__ = """ 111 | Copyright (c) 2016, Oliver Schoenborn 112 | All rights reserved. 113 | 114 | Redistribution and use in source and binary forms, with or without 115 | modification, are permitted provided that the following conditions are met: 116 | 117 | * Redistributions of source code must retain the above copyright notice, this 118 | list of conditions and the following disclaimer. 119 | 120 | * Redistributions in binary form must reproduce the above copyright notice, 121 | this list of conditions and the following disclaimer in the documentation 122 | and/or other materials provided with the distribution. 123 | 124 | * Neither the name of nose2pytest nor the names of its 125 | contributors may be used to endorse or promote products derived from 126 | this software without specific prior written permission. 127 | 128 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 129 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 130 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 131 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 132 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 133 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 134 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 135 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 136 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 137 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 138 | """ 139 | -------------------------------------------------------------------------------- /prepared_statements_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from cassandra import InvalidRequest 4 | 5 | from dtest import Tester 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | KEYSPACE = "foo" 10 | 11 | 12 | class TestPreparedStatements(Tester): 13 | """ 14 | Tests for pushed native protocol notification from Cassandra. 15 | """ 16 | 17 | def test_dropped_index(self): 18 | """ 19 | Prepared statements using dropped indexes should be handled correctly 20 | """ 21 | self.cluster.populate(1).start() 22 | node = list(self.cluster.nodes.values())[0] 23 | 24 | session = self.patient_cql_connection(node) 25 | session.execute(""" 26 | CREATE KEYSPACE IF NOT EXISTS %s 27 | WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1' } 28 | """ % KEYSPACE) 29 | 30 | session.set_keyspace(KEYSPACE) 31 | session.execute("CREATE TABLE IF NOT EXISTS mytable (a int PRIMARY KEY, b int)") 32 | session.execute("CREATE INDEX IF NOT EXISTS bindex ON mytable(b)") 33 | 34 | insert_statement = session.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)") 35 | for i in range(10): 36 | session.execute(insert_statement, (i, 0)) 37 | 38 | query_statement = session.prepare("SELECT * FROM mytable WHERE b=?") 39 | print("Number of matching rows:", len(list(session.execute(query_statement, (0,))))) 40 | 41 | session.execute("DROP INDEX bindex") 42 | 43 | try: 44 | print("Executing prepared statement with dropped index...") 45 | session.execute(query_statement, (0,)) 46 | except InvalidRequest as ir: 47 | print(ir) 48 | except Exception: 49 | raise 50 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --show-capture=stdout --timeout=900 3 | python_files = test_*.py *_test.py *_tests.py 4 | junit_suite_name = Cassandra dtests 5 | log_print = True 6 | log_cli = True 7 | log_cli_level = DEBUG 8 | log_cli_format = %(asctime)s,%(msecs)03d %(name)s %(levelname)s %(message)s 9 | log_cli_date_format = %Y-%m-%d %H:%M:%S 10 | log_file_level = DEBUG 11 | log_file_format = %(asctime)s,%(msecs)03d %(name)s %(levelname)s %(message)s 12 | log_file_date_format = %Y-%m-%d %H:%M:%S 13 | markers = 14 | since 15 | vnodes 16 | no_vnodes 17 | resource_intensive 18 | offheap_memtables 19 | no_offheap_memtables 20 | ported_to_in_jvm 21 | env 22 | skip_version 23 | upgrade_test 24 | depends_cqlshlib 25 | depends_driver 26 | -------------------------------------------------------------------------------- /range_ghost_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | 4 | from tools.assertions import assert_length_equal 5 | from dtest import Tester, create_ks, create_cf 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class TestRangeGhosts(Tester): 11 | 12 | def test_ghosts(self): 13 | """ Check range ghost are correctly removed by the system """ 14 | cluster = self.cluster 15 | cluster.populate(1).start() 16 | [node1] = cluster.nodelist() 17 | 18 | time.sleep(.5) 19 | session = self.cql_connection(node1) 20 | create_ks(session, 'ks', 1) 21 | create_cf(session, 'cf', gc_grace=0, columns={'c': 'text'}) 22 | 23 | rows = 1000 24 | 25 | for i in range(0, rows): 26 | session.execute("UPDATE cf SET c = 'value' WHERE key = 'k%i'" % i) 27 | 28 | res = list(session.execute("SELECT * FROM cf LIMIT 10000")) 29 | assert_length_equal(res, rows) 30 | 31 | node1.flush() 32 | 33 | for i in range(0, rows // 2): 34 | session.execute("DELETE FROM cf WHERE key = 'k%i'" % i) 35 | 36 | res = list(session.execute("SELECT * FROM cf LIMIT 10000")) 37 | # no ghosts in 1.2+ 38 | assert_length_equal(res, rows / 2) 39 | 40 | node1.flush() 41 | time.sleep(1) # make sure tombstones are collected 42 | node1.compact() 43 | 44 | res = list(session.execute("SELECT * FROM cf LIMIT 10000")) 45 | assert_length_equal(res, rows / 2) 46 | -------------------------------------------------------------------------------- /read_failures_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pytest 3 | 4 | from cassandra import ConsistencyLevel, ReadFailure, ReadTimeout 5 | from cassandra.policies import FallthroughRetryPolicy 6 | from cassandra.query import SimpleStatement 7 | 8 | from dtest import Tester 9 | 10 | since = pytest.mark.since 11 | logger = logging.getLogger(__name__) 12 | 13 | KEYSPACE = "readfailures" 14 | 15 | 16 | class TestReadFailures(Tester): 17 | """ 18 | Tests for read failures in the replicas, introduced as a part of 19 | @jira_ticket CASSANDRA-12311. 20 | """ 21 | @pytest.fixture(autouse=True) 22 | def fixture_add_additional_log_patterns(self, fixture_dtest_setup): 23 | fixture_dtest_setup.ignore_log_patterns = ( 24 | "Scanned over [1-9][0-9]* tombstones", # This is expected when testing read failures due to tombstones 25 | ) 26 | return fixture_dtest_setup 27 | 28 | @pytest.fixture(scope='function', autouse=True) 29 | def fixture_dtest_setup_params(self): 30 | self.tombstone_failure_threshold = 500 31 | self.replication_factor = 3 32 | self.consistency_level = ConsistencyLevel.ALL 33 | self.expected_expt = ReadFailure 34 | 35 | def _prepare_cluster(self): 36 | self.cluster.set_configuration_options( 37 | values={'tombstone_failure_threshold': self.tombstone_failure_threshold} 38 | ) 39 | self.cluster.populate(3) 40 | self.cluster.start() 41 | self.nodes = list(self.cluster.nodes.values()) 42 | 43 | session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version) 44 | 45 | session.execute(""" 46 | CREATE KEYSPACE IF NOT EXISTS %s 47 | WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' } 48 | """ % (KEYSPACE, self.replication_factor)) 49 | session.set_keyspace(KEYSPACE) 50 | session.execute("CREATE TABLE IF NOT EXISTS tombstonefailure (id int, c int, value text, primary key(id, c))") 51 | 52 | return session 53 | 54 | def _insert_tombstones(self, session, number_of_tombstones): 55 | for num_id in range(number_of_tombstones): 56 | session.execute(SimpleStatement("DELETE value FROM tombstonefailure WHERE id = 0 and c = {}".format(num_id), 57 | consistency_level=self.consistency_level, retry_policy=FallthroughRetryPolicy())) 58 | 59 | def _perform_cql_statement(self, session, text_statement): 60 | statement = SimpleStatement(text_statement, 61 | consistency_level=self.consistency_level, 62 | retry_policy=FallthroughRetryPolicy()) 63 | 64 | if self.expected_expt is None: 65 | session.execute(statement) 66 | else: 67 | with pytest.raises(self.expected_expt) as cm: 68 | # On 2.1, we won't return the ReadTimeout from coordinator until actual timeout, 69 | # so we need to up the default timeout of the driver session 70 | session.execute(statement, timeout=15) 71 | return cm._excinfo[1] 72 | 73 | def _assert_error_code_map_exists_with_code(self, exception, expected_code): 74 | """ 75 | Asserts that the given exception contains an error code map 76 | where at least one node responded with some expected code. 77 | This is meant for testing failure exceptions on protocol v5. 78 | """ 79 | assert exception is not None 80 | assert exception.error_code_map is not None 81 | expected_code_found = False 82 | for error_code in list(exception.error_code_map.values()): 83 | if error_code == expected_code: 84 | expected_code_found = True 85 | break 86 | assert expected_code_found, "The error code map did not contain " + str(expected_code) 87 | 88 | @since('2.1') 89 | def test_tombstone_failure_v3(self): 90 | """ 91 | A failed read due to tombstones at v3 should result in a ReadTimeout 92 | """ 93 | self.protocol_version = 3 94 | self.expected_expt = ReadTimeout 95 | session = self._prepare_cluster() 96 | self._insert_tombstones(session, 600) 97 | self._perform_cql_statement(session, "SELECT value FROM tombstonefailure") 98 | 99 | @since('2.2') 100 | def test_tombstone_failure_v4(self): 101 | """ 102 | A failed read due to tombstones at v4 should result in a ReadFailure 103 | """ 104 | self.protocol_version = 4 105 | session = self._prepare_cluster() 106 | self._insert_tombstones(session, 600) 107 | self._perform_cql_statement(session, "SELECT value FROM tombstonefailure") 108 | 109 | @since('4.0') 110 | def test_tombstone_failure_v5(self): 111 | """ 112 | A failed read due to tombstones at v5 should result in a ReadFailure with 113 | an error code map containing error code 0x0001 (indicating that the replica(s) 114 | read too many tombstones) 115 | """ 116 | self.protocol_version = 5 117 | session = self._prepare_cluster() 118 | self._insert_tombstones(session, 600) 119 | exc = self._perform_cql_statement(session, "SELECT value FROM tombstonefailure") 120 | self._assert_error_code_map_exists_with_code(exc, 0x0001) 121 | -------------------------------------------------------------------------------- /refresh_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from dtest import Tester 4 | from ccmlib.node import ToolError 5 | import pytest 6 | 7 | since = pytest.mark.since 8 | 9 | @since('3.0') 10 | class TestRefresh(Tester): 11 | def test_refresh_deadlock_startup(self): 12 | """ Test refresh deadlock during startup (CASSANDRA-14310) """ 13 | self.cluster.populate(1) 14 | node = self.cluster.nodelist()[0] 15 | node.byteman_port = '8100' 16 | node.import_config_files() 17 | self.cluster.start() 18 | session = self.patient_cql_connection(node) 19 | session.execute("CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}") 20 | session.execute("CREATE TABLE ks.a (id int primary key, d text)") 21 | session.execute("CREATE TABLE ks.b (id int primary key, d text)") 22 | node.nodetool("disableautocompaction") # make sure we have more than 1 sstable 23 | for x in range(0, 10): 24 | session.execute("INSERT INTO ks.a (id, d) VALUES (%d, '%d %d')"%(x, x, x)) 25 | session.execute("INSERT INTO ks.b (id, d) VALUES (%d, '%d %d')"%(x, x, x)) 26 | node.flush() 27 | node.stop() 28 | node.update_startup_byteman_script('byteman/sstable_open_delay.btm') 29 | node.start() 30 | node.watch_log_for("opening keyspace ks", filename="debug.log") 31 | time.sleep(5) 32 | for x in range(0, 20): 33 | try: 34 | node.nodetool("refresh ks a") 35 | node.nodetool("refresh ks b") 36 | except ToolError: 37 | pass # this is OK post-14310 - we just don't want to hang forever 38 | time.sleep(1) 39 | -------------------------------------------------------------------------------- /repair_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/repair_tests/__init__.py -------------------------------------------------------------------------------- /repair_tests/preview_repair_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | 4 | from cassandra import ConsistencyLevel 5 | from cassandra.query import SimpleStatement 6 | 7 | from dtest import Tester, create_ks 8 | from repair_tests.incremental_repair_test import assert_parent_repair_session_count 9 | from tools.data import create_c1c2_table 10 | from tools.jmxutils import make_mbean, JolokiaAgent 11 | 12 | since = pytest.mark.since 13 | 14 | 15 | @since('4.0') 16 | class TestPreviewRepair(Tester): 17 | 18 | @since('4.0') 19 | def test_parent_repair_session_cleanup(self): 20 | """ 21 | Calls incremental repair preview on 3 node cluster and verifies if all ParentRepairSession objects are cleaned 22 | @jira_ticket CASSANDRA-16446 23 | """ 24 | self.cluster.populate(3).start() 25 | session = self.patient_cql_connection(self.cluster.nodelist()[0]) 26 | create_ks(session, 'ks', 2) 27 | create_c1c2_table(self, session) 28 | 29 | for node in self.cluster.nodelist(): 30 | node.repair(options=['ks', '--preview']) 31 | 32 | assert_parent_repair_session_count(self.cluster.nodelist(), 0) 33 | 34 | @pytest.mark.no_vnodes 35 | def test_preview(self): 36 | """ Test that preview correctly detects out of sync data """ 37 | cluster = self.cluster 38 | cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500}) 39 | cluster.populate(3).start() 40 | node1, node2, node3 = cluster.nodelist() 41 | 42 | session = self.patient_exclusive_cql_connection(node3) 43 | session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}") 44 | session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)") 45 | 46 | # everything should be in sync 47 | result = node1.repair(options=['ks', '--preview']) 48 | assert "Previewed data was in sync" in result.stdout 49 | assert_no_repair_history(session) 50 | assert preview_failure_count(node1) == 0 51 | 52 | # make data inconsistent between nodes 53 | stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)") 54 | stmt.consistency_level = ConsistencyLevel.ALL 55 | for i in range(10): 56 | session.execute(stmt, (i, i)) 57 | node3.flush() 58 | time.sleep(1) 59 | node3.stop(gently=False) 60 | stmt.consistency_level = ConsistencyLevel.QUORUM 61 | 62 | session = self.exclusive_cql_connection(node1) 63 | for i in range(10): 64 | session.execute(stmt, (i + 10, i + 10)) 65 | node1.flush() 66 | time.sleep(1) 67 | node1.stop(gently=False) 68 | node3.start(wait_for_binary_proto=True) 69 | session = self.exclusive_cql_connection(node2) 70 | for i in range(10): 71 | session.execute(stmt, (i + 20, i + 20)) 72 | node1.start(wait_for_binary_proto=True) 73 | 74 | # data should not be in sync for full and unrepaired previews 75 | result = node1.repair(options=['ks', '--preview']) 76 | assert "Total estimated streaming" in result.stdout 77 | assert "Previewed data was in sync" not in result.stdout 78 | assert preview_failure_count(node1) == 1 79 | 80 | result = node1.repair(options=['ks', '--preview', '--full']) 81 | assert "Total estimated streaming" in result.stdout 82 | assert "Previewed data was in sync" not in result.stdout 83 | assert preview_failure_count(node1) == 2 84 | 85 | # repaired data should be in sync anyway 86 | result = node1.repair(options=['ks', '--validate']) 87 | assert "Repaired data is in sync" in result.stdout 88 | 89 | assert_no_repair_history(session) 90 | 91 | # repair the data... 92 | node1.repair(options=['ks']) 93 | for node in cluster.nodelist(): 94 | node.nodetool('compact ks tbl') 95 | 96 | # ...and everything should be in sync 97 | result = node1.repair(options=['ks', '--preview']) 98 | assert "Previewed data was in sync" in result.stdout 99 | # data is repaired, previewFailure metric should remain same 100 | assert preview_failure_count(node1) == 2 101 | 102 | result = node1.repair(options=['ks', '--preview', '--full']) 103 | assert "Previewed data was in sync" in result.stdout 104 | assert preview_failure_count(node1) == 2 105 | 106 | result = node1.repair(options=['ks', '--validate']) 107 | assert "Repaired data is in sync" in result.stdout 108 | 109 | assert preview_failure_count(node2) == 0 110 | assert preview_failure_count(node3) == 0 111 | 112 | 113 | def assert_no_repair_history(session): 114 | rows = session.execute("select * from system_distributed.repair_history") 115 | assert rows.current_rows == [] 116 | rows = session.execute("select * from system_distributed.parent_repair_history") 117 | assert rows.current_rows == [] 118 | 119 | 120 | def preview_failure_count(node): 121 | mbean = make_mbean('metrics', type='Repair', name='PreviewFailures') 122 | with JolokiaAgent(node) as jmx: 123 | return jmx.read_attribute(mbean, 'Count') 124 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/datastax/python-driver.git@cassandra-test#egg=cassandra-driver 2 | # For ccm changes, please, create a PR to the master branch of the ccm repository. 3 | # New commits to master are not automatically used by the python dtests. And changes 4 | # made to ccm and intended/needed to be used by the dtests can be put in use by re-tagging cassandra-test to master's HEAD. 5 | # The re-tagging approach is 6 | # git tag -a -f cassandra-test 7 | # git push origin :refs/tags/cassandra-test 8 | # git push -f origin refs/tags/cassandra-test 9 | # 10 | # In case you want to test a patch with your own CCM branch, further to changing below CCM repo and branch name, you need to add -e flag at the beginning 11 | # Example: -e git+https://github.com/userb/ccm.git@cassandra-17182#egg=ccm 12 | git+https://github.com/apache/cassandra-ccm.git@cassandra-test#egg=ccm 13 | click==8.0.4 14 | decorator==5.1.1 15 | docopt==0.6.2 16 | enum34==1.1.10 17 | exceptiongroup==0.0.0a0 18 | flaky==3.8.1 19 | geomet==0.2.1.post1 20 | iniconfig==1.1.1 21 | lxml==5.1.0 22 | mock==5.1.0 23 | netifaces==0.11.0 24 | packaging==21.3 25 | parse==1.20.1 26 | pluggy==1.0.0 27 | psutil==5.9.8 28 | py==1.11.0 29 | pycodestyle==2.10.0 30 | pyparsing==3.1.2 31 | pytest==7.0.1 32 | pytest-repeat==0.9.1 33 | pytest-timeout==2.1.0 34 | PyYAML==6.0.1 35 | six==1.16.0 36 | soupsieve==2.3.2.post1 37 | thrift==0.16.0 38 | tomli==1.2.3 39 | -------------------------------------------------------------------------------- /seed_test.py: -------------------------------------------------------------------------------- 1 | from ccmlib import node 2 | from dtest import Tester 3 | from time import sleep 4 | 5 | import pytest 6 | 7 | since = pytest.mark.since 8 | 9 | 10 | class TestGossiper(Tester): 11 | """ 12 | Test gossip states 13 | """ 14 | 15 | @since('3.11.2', max_version='5.0.x') 16 | def test_startup_no_live_seeds(self): 17 | """ 18 | Test that a node won't start with no live seeds. 19 | @jira_ticket CASSANDRA-13851 20 | """ 21 | 22 | self.fixture_dtest_setup.allow_log_errors = True 23 | self.cluster.populate(1) 24 | node1 = self.cluster.nodelist()[0] 25 | self.cluster.set_configuration_options({ 26 | 'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 27 | 'parameters': [{'seeds': '127.0.0.2'}] # dummy node doesn't exist 28 | }] 29 | }) 30 | 31 | try: 32 | STARTUP_TIMEOUT = 15 # seconds 33 | RING_DELAY = 10000 # ms 34 | # set startup timeout > ring delay so that startup failure happens before the call to start returns 35 | node1.start(wait_for_binary_proto=STARTUP_TIMEOUT, jvm_args=['-Dcassandra.ring_delay_ms={}'.format(RING_DELAY)]) 36 | except node.TimeoutError: 37 | self.assert_log_had_msg(node1, "Unable to gossip with any peers") 38 | except Exception as e: 39 | raise e 40 | else: 41 | pytest.fail("Expecting startup to raise a TimeoutError, but nothing was raised.") 42 | 43 | @since('3.11.2', max_version='5.0.x') 44 | def test_startup_non_seed_with_peers(self): 45 | """ 46 | Test that a node can start if peers are alive, or if a node has been bootstrapped 47 | but there are no live seeds or peers 48 | @jira_ticket CASSANDRA-13851 49 | """ 50 | 51 | self.fixture_dtest_setup.allow_log_errors = True 52 | 53 | self.cluster.populate(3) 54 | 55 | node1, node2, node3 = self.cluster.nodelist() 56 | 57 | self.cluster.start() 58 | node3.stop(wait=True) 59 | node1.stop(wait=True) 60 | self.cluster.set_configuration_options({ 61 | 'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 62 | 'parameters': [{'seeds': '127.0.0.1'}] 63 | }] 64 | }) 65 | 66 | # test non seed node can start when peer is started but seed isn't 67 | node3.start(wait_other_notice=False, wait_for_binary_proto=120) 68 | self.assert_log_had_msg(node3, "Received an ack from {}, who isn't a seed. Ensure your seed list includes a live node. Exiting shadow round".format(node2.address_for_current_version_slashy()), timeout=60) 69 | node2.stop(wait=False) 70 | node3.stop(wait=True) 71 | 72 | # test seed node starts when no other nodes started 73 | node1.start(wait_other_notice=False, wait_for_binary_proto=120) 74 | self.assert_log_had_msg(node1, 'Unable to gossip with any peers but continuing anyway since node is in its own seed list', timeout=60) 75 | 76 | @since('3.11.2', max_version='5.0.x') 77 | def test_startup_after_ring_delay(self): 78 | """ 79 | Tests that if we start a node with no live seeds, then start a seed after RING_DELAY 80 | we will still join the ring. More broadly tests that starting a seed while a node is in 81 | shadow round will still allow that node to join the ring. 82 | @jira_ticket CASSANDRA-13851 83 | """ 84 | RING_DELAY = 15000 # ms 85 | self.fixture_dtest_setup.allow_log_errors = True 86 | self.cluster.populate(2) 87 | node1, node2 = self.cluster.nodelist() 88 | 89 | node2.start(wait_other_notice=False, jvm_args=['-Dcassandra.ring_delay_ms={}'.format(RING_DELAY)], verbose=True) 90 | node2.watch_log_for('Starting shadow gossip round to check for endpoint collision', filename='debug.log') 91 | sleep(RING_DELAY / 1000) 92 | # Start seed, ensure node2 joins before it exits shadow round. 93 | node1.start(wait_for_binary_proto=120) 94 | self.assert_log_had_msg(node2, 'Starting listening for CQL clients', timeout=60) 95 | -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4CompressorP -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-1-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Digest.sha1: -------------------------------------------------------------------------------- 1 | 1496387912 -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Filter.db: -------------------------------------------------------------------------------- 1 | "  -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-1-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-1-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-1-TOC.txt: -------------------------------------------------------------------------------- 1 | Digest.sha1 2 | Filter.db 3 | CompressionInfo.db 4 | Index.db 5 | Summary.db 6 | Statistics.db 7 | Data.db 8 | TOC.txt 9 | -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4CompressorD -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-2-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Digest.sha1: -------------------------------------------------------------------------------- 1 | 163451363 -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Filter.db: -------------------------------------------------------------------------------- 1 |   -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-2-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/2.1/ks-ttl_table-ka-2-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/2.1/ks-ttl_table-ka-2-TOC.txt: -------------------------------------------------------------------------------- 1 | Data.db 2 | Index.db 3 | Summary.db 4 | CompressionInfo.db 5 | Filter.db 6 | TOC.txt 7 | Digest.sha1 8 | Statistics.db 9 | -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4Compressor -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-1-big-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Digest.crc32: -------------------------------------------------------------------------------- 1 | 3686576784 -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Filter.db: -------------------------------------------------------------------------------- 1 |  @@ -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-1-big-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-1-big-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-1-big-TOC.txt: -------------------------------------------------------------------------------- 1 | CompressionInfo.db 2 | Index.db 3 | Statistics.db 4 | TOC.txt 5 | Filter.db 6 | Data.db 7 | Summary.db 8 | Digest.crc32 9 | -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4Compressor#* -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-2-big-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Digest.crc32: -------------------------------------------------------------------------------- 1 | 2832791077 -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Filter.db: -------------------------------------------------------------------------------- 1 |  @@ -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-2-big-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.0/mc-2-big-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.0/mc-2-big-TOC.txt: -------------------------------------------------------------------------------- 1 | Summary.db 2 | TOC.txt 3 | CompressionInfo.db 4 | Data.db 5 | Index.db 6 | Statistics.db 7 | Filter.db 8 | Digest.crc32 9 | -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4Compressor -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-1-big-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Digest.crc32: -------------------------------------------------------------------------------- 1 | 3686576784 -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Filter.db: -------------------------------------------------------------------------------- 1 |  @@ -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-1-big-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-1-big-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-1-big-TOC.txt: -------------------------------------------------------------------------------- 1 | Index.db 2 | Filter.db 3 | CompressionInfo.db 4 | Statistics.db 5 | TOC.txt 6 | Data.db 7 | Summary.db 8 | Digest.crc32 9 | -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | LZ4Compressor#* -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-2-big-Data.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Digest.crc32: -------------------------------------------------------------------------------- 1 | 2832791077 -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Filter.db: -------------------------------------------------------------------------------- 1 |  @@ -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Index.db: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-2-big-Statistics.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-Summary.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/sstables/ttl_test/3.11/mc-2-big-Summary.db -------------------------------------------------------------------------------- /sstables/ttl_test/3.11/mc-2-big-TOC.txt: -------------------------------------------------------------------------------- 1 | Filter.db 2 | TOC.txt 3 | Digest.crc32 4 | Statistics.db 5 | Summary.db 6 | Data.db 7 | Index.db 8 | CompressionInfo.db 9 | -------------------------------------------------------------------------------- /sstablesplit_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | 4 | from math import floor 5 | from os.path import getsize 6 | 7 | from dtest import Tester 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class TestSSTableSplit(Tester): 13 | 14 | def test_split(self): 15 | """ 16 | Check that after running compaction, sstablessplit can succesfully split 17 | The resultant sstable. Check that split is reversible and that data is readable 18 | after carrying out these operations. 19 | """ 20 | cluster = self.cluster 21 | cluster.populate(1).start() 22 | node = cluster.nodelist()[0] 23 | version = cluster.version() 24 | 25 | logger.debug("Run stress to insert data") 26 | 27 | node.stress(['write', 'n=1000', 'no-warmup', '-rate', 'threads=50', 28 | '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)']) 29 | 30 | self._do_compaction(node) 31 | self._do_split(node, version) 32 | self._do_compaction(node) 33 | self._do_split(node, version) 34 | 35 | logger.debug("Run stress to ensure data is readable") 36 | node.stress(['read', 'n=1000', '-rate', 'threads=25', 37 | '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)']) 38 | 39 | def _do_compaction(self, node): 40 | logger.debug("Compact sstables.") 41 | node.flush() 42 | node.compact() 43 | keyspace = 'keyspace1' 44 | sstables = node.get_sstables(keyspace, '') 45 | logger.debug("Number of sstables after compaction: %s" % len(sstables)) 46 | 47 | def _do_split(self, node, version): 48 | logger.debug("Run sstablesplit") 49 | time.sleep(5.0) 50 | node.stop() 51 | 52 | # default split size is 50MB 53 | splitmaxsize = 10 54 | expected_sstable_size = (10 * 1024 * 1024) 55 | keyspace = 'keyspace1' 56 | 57 | # get the initial sstables and their total size 58 | origsstables = node.get_sstables(keyspace, '') 59 | origsstable_size = sum([getsize(sstable) for sstable in origsstables]) 60 | logger.debug("Original sstable and sizes before split: {}".format([(name, getsize(name)) for name in origsstables])) 61 | 62 | # calculate the expected number of sstables post-split 63 | expected_num_sstables = floor(origsstable_size / expected_sstable_size) 64 | 65 | # split the sstables 66 | result = node.run_sstablesplit(keyspace=keyspace, size=splitmaxsize, 67 | no_snapshot=True, debug=True) 68 | 69 | for (out, error, rc) in result: 70 | logger.debug("stdout: {}".format(out)) 71 | logger.debug("stderr: {}".format(error)) 72 | logger.debug("rc: {}".format(rc)) 73 | 74 | # get the sstables post-split and their total size 75 | sstables = node.get_sstables(keyspace, '') 76 | logger.debug("Number of sstables after split: %s. expected %s" % (len(sstables), expected_num_sstables)) 77 | assert expected_num_sstables <= len(sstables) + 1 78 | assert 1 <= len(sstables) 79 | 80 | # make sure none of the tables are bigger than the max expected size 81 | sstable_sizes = [getsize(sstable) for sstable in sstables] 82 | # add a bit extra for overhead 83 | assert max(sstable_sizes) <= expected_sstable_size + 512 84 | # make sure node can start with changed sstables 85 | node.start(wait_for_binary_proto=True) 86 | 87 | def test_single_file_split(self): 88 | """ 89 | Covers CASSANDRA-8623 90 | 91 | Check that sstablesplit doesn't crash when splitting a single sstable at the time. 92 | """ 93 | cluster = self.cluster 94 | cluster.populate(1).start() 95 | node = cluster.nodelist()[0] 96 | 97 | logger.debug("Run stress to insert data") 98 | node.stress(['write', 'n=300', 'no-warmup', '-rate', 'threads=50', 99 | '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)']) 100 | 101 | self._do_compaction(node) 102 | node.stop() 103 | result = node.run_sstablesplit(keyspace='keyspace1', size=1, no_snapshot=True) 104 | 105 | for (stdout, stderr, rc) in result: 106 | logger.debug(stderr) 107 | failure = stderr.find("java.lang.AssertionError: Data component is missing") 108 | assert failure, -1 == "Error during sstablesplit" 109 | 110 | sstables = node.get_sstables('keyspace1', '') 111 | assert len(sstables), 1 >= sstables 112 | -------------------------------------------------------------------------------- /streaming_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import operator 3 | 4 | import pytest 5 | from cassandra import ConsistencyLevel 6 | from pytest import mark 7 | 8 | from dtest import Tester, create_ks, create_cf 9 | from tools.data import insert_c1c2 10 | from tools.misc import generate_ssl_stores 11 | from itertools import product 12 | 13 | since = pytest.mark.since 14 | logger = logging.getLogger(__name__) 15 | 16 | opmap = { 17 | operator.eq: "==", 18 | operator.gt: ">", 19 | operator.lt: "<", 20 | operator.ne: "!=", 21 | operator.ge: ">=", 22 | operator.le: "<=" 23 | } 24 | 25 | 26 | class TestStreaming(Tester): 27 | 28 | @pytest.fixture(autouse=True) 29 | def fixture_add_additional_log_patterns(self, fixture_dtest_setup): 30 | fixture_dtest_setup.ignore_log_patterns = ( 31 | # This one occurs when trying to send the migration to a 32 | # node that hasn't started yet, and when it does, it gets 33 | # replayed and everything is fine. 34 | r'Can\'t send migration request: node.*is down', 35 | # ignore streaming error during bootstrap 36 | r'Exception encountered during startup', 37 | r'Streaming error occurred' 38 | ) 39 | 40 | def setup_internode_ssl(self, cluster): 41 | logger.debug("***using internode ssl***") 42 | generate_ssl_stores(self.fixture_dtest_setup.test_path) 43 | cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) 44 | 45 | def _test_streaming(self, op_zerocopy, op_partial, num_partial, num_zerocopy, 46 | compaction_strategy='LeveledCompactionStrategy', num_keys=1000, rf=3, num_nodes=3, ssl=False): 47 | keys = num_keys 48 | cluster = self.cluster 49 | 50 | if ssl: 51 | self.setup_internode_ssl(cluster) 52 | 53 | tokens = cluster.balanced_tokens(num_nodes) 54 | cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'}) 55 | cluster.set_configuration_options(values={'num_tokens': 1}) 56 | 57 | cluster.populate(num_nodes) 58 | nodes = cluster.nodelist() 59 | 60 | for i in range(0, len(nodes)): 61 | nodes[i].set_configuration_options(values={'initial_token': tokens[i]}) 62 | 63 | cluster.start() 64 | 65 | session = self.patient_cql_connection(nodes[0]) 66 | 67 | create_ks(session, name='ks2', rf=rf) 68 | 69 | create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, 70 | compaction_strategy=compaction_strategy) 71 | insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL) 72 | 73 | session_n2 = self.patient_exclusive_cql_connection(nodes[1]) 74 | session_n2.execute("TRUNCATE system.available_ranges;") 75 | 76 | mark = nodes[1].mark_log() 77 | nodes[1].nodetool('rebuild -ks ks2') 78 | 79 | nodes[1].watch_log_for('Completed submission of build tasks', filename='debug.log', timeout=120) 80 | zerocopy_streamed_sstable = len( 81 | nodes[1].grep_log('.*CassandraEntireSSTableStreamReader.*?Finished receiving Data.*', filename='debug.log', 82 | from_mark=mark)) 83 | partial_streamed_sstable = len( 84 | nodes[1].grep_log('.*CassandraStreamReader.*?Finished receiving file.*', filename='debug.log', 85 | from_mark=mark)) 86 | 87 | assert op_zerocopy(zerocopy_streamed_sstable, num_zerocopy), "%s %s %s" % (num_zerocopy, opmap.get(op_zerocopy), 88 | zerocopy_streamed_sstable) 89 | assert op_partial(partial_streamed_sstable, num_partial), "%s %s %s" % (num_partial, op_partial, 90 | partial_streamed_sstable) 91 | 92 | @since('4.0') 93 | @pytest.mark.parametrize('ssl,compaction_strategy', product(['SSL', 'NoSSL'], ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy'])) 94 | def test_zerocopy_streaming(self, ssl, compaction_strategy): 95 | self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.gt, num_zerocopy=1, num_partial=1, rf=2, 96 | num_nodes=3, ssl=(ssl == 'SSL'), compaction_strategy=compaction_strategy) 97 | 98 | @since('4.0') 99 | def test_zerocopy_streaming(self): 100 | self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.eq, num_zerocopy=1, num_partial=0, 101 | num_nodes=2, rf=2) 102 | 103 | @since('4.0') 104 | def test_zerocopy_streaming_no_replication(self): 105 | self._test_streaming(op_zerocopy=operator.eq, op_partial=operator.eq, num_zerocopy=0, num_partial=0, rf=1, 106 | num_nodes=3) 107 | -------------------------------------------------------------------------------- /stress_profiles/repair_wide_rows.yaml: -------------------------------------------------------------------------------- 1 | keyspace: stresscql 2 | keyspace_definition: | 3 | CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}; 4 | 5 | table: typestest 6 | table_definition: | 7 | CREATE TABLE typestest ( 8 | key text, 9 | col1 text, 10 | val blob, 11 | PRIMARY KEY(key, col1) 12 | ) 13 | WITH compaction = { 'class':'LeveledCompactionStrategy' } 14 | AND compression = {'chunk_length_in_kb': '1', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}; 15 | 16 | # 17 | # Optional meta information on the generated columns in the above table 18 | # The min and max only apply to text and blob types 19 | # The distribution field represents the total unique population 20 | # distribution of that column across rows. Supported types are 21 | # 22 | # EXP(min..max) An exponential distribution over the range [min..max] 23 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max] 24 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng 25 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev 26 | # UNIFORM(min..max) A uniform distribution over the range [min, max] 27 | # FIXED(val) A fixed distribution, always returning the same value 28 | # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary. 29 | # Aliases: extr, gauss, normal, norm, weibull 30 | # 31 | # If preceded by ~, the distribution is inverted 32 | # 33 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) 34 | # 35 | columnspec: 36 | - name: key 37 | size: fixed(10) 38 | population: fixed(1) # the range of unique values to select for the field (default is 100Billion) 39 | - name: col1 40 | cluster: fixed(1M) 41 | - name: val 42 | size: fixed(1K) 43 | 44 | insert: 45 | partitions: fixed(1) # number of unique partitions to update in a single operation 46 | # if batchcount > 1, multiple batches will be used but all partitions will 47 | # occur in all batches (unless they finish early); only the row counts will vary 48 | batchtype: LOGGED # type of batch to use 49 | select: fixed(10)/10 # uniform chance any single generated CQL row will be visited in a partition; 50 | # generated for each partition independently, each time we visit it 51 | queries: 52 | simple1: 53 | cql: select * from typestest where key = ? and col1 = ? LIMIT 100 54 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition) 55 | -------------------------------------------------------------------------------- /stress_tool_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from dtest import Tester 5 | from tools.data import rows_to_list 6 | 7 | since = pytest.mark.since 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @since('3.0') 12 | class TestStressSparsenessRatio(Tester): 13 | """ 14 | @jira_ticket CASSANDRA-9522 15 | 16 | Tests for the `row-population-ratio` parameter to `cassandra-stress`. 17 | """ 18 | 19 | def test_uniform_ratio(self): 20 | """ 21 | Tests that the ratio-specifying string 'uniform(5..15)/50' results in 22 | ~80% of the values written being non-null. 23 | """ 24 | self.distribution_template(ratio_spec='uniform(5..15)/50', 25 | expected_ratio=.8, 26 | delta=.1) 27 | 28 | def test_fixed_ratio(self): 29 | """ 30 | Tests that the string 'fixed(1)/3' results in ~1/3 of the values 31 | written being non-null. 32 | """ 33 | self.distribution_template(ratio_spec='fixed(1)/3', 34 | expected_ratio=1 - 1 / 3, 35 | delta=.01) 36 | 37 | def distribution_template(self, ratio_spec, expected_ratio, delta): 38 | """ 39 | @param ratio_spec the string passed to `row-population-ratio` in the call to `cassandra-stress` 40 | @param expected_ratio the expected ratio of null/non-null values in the values written 41 | @param delta the acceptable delta between the expected and actual ratios 42 | 43 | A parameterized test for the `row-population-ratio` parameter to 44 | `cassandra-stress`. 45 | """ 46 | self.cluster.populate(1).start() 47 | node = self.cluster.nodelist()[0] 48 | node.stress(['write', 'n=1000', 'no-warmup', '-rate', 'threads=50', '-col', 'n=FIXED(50)', 49 | '-insert', 'row-population-ratio={ratio_spec}'.format(ratio_spec=ratio_spec)]) 50 | session = self.patient_cql_connection(node) 51 | written = rows_to_list(session.execute('SELECT * FROM keyspace1.standard1;')) 52 | 53 | num_nones = sum(row.count(None) for row in written) 54 | num_results = sum(len(row) for row in written) 55 | 56 | assert abs(float(num_nones) / num_results - expected_ratio) <= delta 57 | 58 | 59 | @since('3.0') 60 | class TestStressWrite(Tester): 61 | 62 | @pytest.mark.timeout(3 * 60) 63 | def test_quick_write(self): 64 | """ 65 | @jira_ticket CASSANDRA-14890 66 | A simple write stress test should be done very quickly 67 | """ 68 | self.cluster.populate(1).start() 69 | node = self.cluster.nodelist()[0] 70 | node.stress(['write', 'err<0.9', 'n>1', '-rate', 'threads=1']) 71 | out, err, _ = node.run_cqlsh('describe table keyspace1.standard1') 72 | assert 'standard1' in out 73 | -------------------------------------------------------------------------------- /super_column_cache_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from dtest_setup_overrides import DTestSetupOverrides 5 | 6 | from dtest import Tester 7 | from thrift_bindings.thrift010.ttypes import \ 8 | ConsistencyLevel as ThriftConsistencyLevel 9 | from thrift_bindings.thrift010.ttypes import (CfDef, Column, ColumnOrSuperColumn, 10 | ColumnParent, KsDef, Mutation, 11 | SlicePredicate, SliceRange, 12 | SuperColumn) 13 | from thrift_test import get_thrift_client 14 | from tools.misc import ImmutableMapping 15 | 16 | since = pytest.mark.since 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | @since('2.0', max_version='4') 21 | class TestSCCache(Tester): 22 | 23 | @pytest.fixture(scope='function', autouse=True) 24 | def fixture_dtest_setup_overrides(self, dtest_config): 25 | dtest_setup_overrides = DTestSetupOverrides() 26 | dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'}) 27 | return dtest_setup_overrides 28 | 29 | def test_sc_with_row_cache(self): 30 | """ Test for bug reported in #4190 """ 31 | cluster = self.cluster 32 | 33 | cluster.populate(1).start() 34 | node1 = cluster.nodelist()[0] 35 | self.patient_cql_connection(node1) 36 | 37 | node = self.cluster.nodelist()[0] 38 | host, port = node.network_interfaces['thrift'] 39 | client = get_thrift_client(host, port) 40 | client.transport.open() 41 | 42 | ksdef = KsDef() 43 | ksdef.name = 'ks' 44 | ksdef.strategy_class = 'SimpleStrategy' 45 | ksdef.strategy_options = {'replication_factor': '1'} 46 | ksdef.cf_defs = [] 47 | 48 | client.system_add_keyspace(ksdef) 49 | client.set_keyspace('ks') 50 | 51 | # create a super column family with UTF8 for all types 52 | cfdef = CfDef() 53 | cfdef.keyspace = 'ks' 54 | cfdef.name = 'Users' 55 | cfdef.column_type = 'Super' 56 | cfdef.comparator_type = 'UTF8Type' 57 | cfdef.subcomparator_type = 'UTF8Type' 58 | cfdef.key_validation_class = 'UTF8Type' 59 | cfdef.default_validation_class = 'UTF8Type' 60 | cfdef.caching = 'rows_only' 61 | 62 | client.system_add_column_family(cfdef) 63 | 64 | column = Column(name='name'.encode(), value='Mina'.encode(), timestamp=100) 65 | client.batch_mutate( 66 | {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}}, 67 | ThriftConsistencyLevel.ONE) 68 | 69 | column_parent = ColumnParent(column_family='Users') 70 | predicate = SlicePredicate(slice_range=SliceRange("".encode(), "".encode(), False, 100)) 71 | super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE) 72 | assert 1 == len(super_columns) 73 | super_column = super_columns[0].super_column 74 | assert 'attrs'.encode() == super_column.name 75 | assert 1 == len(super_column.columns) 76 | assert 'name'.encode() == super_column.columns[0].name 77 | assert 'Mina'.encode() == super_column.columns[0].value 78 | 79 | # add a 'country' subcolumn 80 | column = Column(name='country'.encode(), value='Canada'.encode(), timestamp=100) 81 | client.batch_mutate( 82 | {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}}, 83 | ThriftConsistencyLevel.ONE) 84 | 85 | super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE) 86 | assert 1 == len(super_columns) 87 | super_column = super_columns[0].super_column 88 | assert 'attrs'.encode() == super_column.name 89 | assert 2 == len(super_column.columns) 90 | 91 | assert 'country'.encode() == super_column.columns[0].name 92 | assert 'Canada'.encode() == super_column.columns[0].value 93 | 94 | assert 'name'.encode() == super_column.columns[1].name 95 | assert 'Mina'.encode() == super_column.columns[1].value 96 | 97 | # add a 'region' subcolumn 98 | column = Column(name='region'.encode(), value='Quebec'.encode(), timestamp=100) 99 | client.batch_mutate( 100 | {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}}, 101 | ThriftConsistencyLevel.ONE) 102 | 103 | super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE) 104 | assert 1 == len(super_columns) 105 | super_column = super_columns[0].super_column 106 | assert 'attrs'.encode() == super_column.name 107 | assert 3 == len(super_column.columns) 108 | 109 | assert 'country'.encode() == super_column.columns[0].name 110 | assert 'Canada'.encode() == super_column.columns[0].value 111 | 112 | assert 'name'.encode() == super_column.columns[1].name 113 | assert 'Mina'.encode() == super_column.columns[1].value 114 | 115 | assert 'region'.encode() == super_column.columns[2].name 116 | assert 'Quebec'.encode() == super_column.columns[2].value 117 | -------------------------------------------------------------------------------- /super_counter_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import logging 4 | 5 | from dtest_setup_overrides import DTestSetupOverrides 6 | from dtest import Tester, create_ks 7 | from thrift_test import get_thrift_client 8 | from tools.misc import ImmutableMapping 9 | 10 | from thrift_bindings.thrift010.Cassandra import (CfDef, ColumnParent, ColumnPath, 11 | ConsistencyLevel, CounterColumn) 12 | 13 | since = pytest.mark.since 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | @since('2.0', max_version='4') 18 | class TestSuperCounterClusterRestart(Tester): 19 | """ 20 | This test is part of this issue: 21 | https://issues.apache.org/jira/browse/CASSANDRA-3821 22 | """ 23 | @pytest.fixture(scope='function', autouse=True) 24 | def fixture_dtest_setup_overrides(self, dtest_config): 25 | dtest_setup_overrides = DTestSetupOverrides() 26 | dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 'true'}) 27 | return dtest_setup_overrides 28 | 29 | def test_functional(self): 30 | NUM_SUBCOLS = 100 31 | NUM_ADDS = 100 32 | 33 | cluster = self.cluster 34 | cluster.populate(3).start() 35 | node1 = cluster.nodelist()[0] 36 | 37 | time.sleep(.5) 38 | session = self.patient_cql_connection(node1) 39 | create_ks(session, 'ks', 3) 40 | time.sleep(1) # wait for propagation 41 | 42 | # create the columnfamily using thrift 43 | host, port = node1.network_interfaces['thrift'] 44 | thrift_conn = get_thrift_client(host, port) 45 | thrift_conn.transport.open() 46 | thrift_conn.set_keyspace('ks') 47 | cf_def = CfDef(keyspace='ks', name='cf', column_type='Super', 48 | default_validation_class='CounterColumnType') 49 | thrift_conn.system_add_column_family(cf_def) 50 | 51 | # let the sediment settle to to the bottom before drinking... 52 | time.sleep(2) 53 | 54 | for subcol in range(NUM_SUBCOLS): 55 | for add in range(NUM_ADDS): 56 | column_parent = ColumnParent(column_family='cf', 57 | super_column=('subcol_%d' % subcol).encode()) 58 | counter_column = CounterColumn('col_0'.encode(), 1) 59 | thrift_conn.add('row_0'.encode(), column_parent, counter_column, 60 | ConsistencyLevel.QUORUM) 61 | time.sleep(1) 62 | cluster.flush() 63 | 64 | logger.debug("Stopping cluster") 65 | cluster.stop() 66 | time.sleep(5) 67 | logger.debug("Starting cluster") 68 | cluster.start() 69 | time.sleep(5) 70 | 71 | thrift_conn = get_thrift_client(host, port) 72 | thrift_conn.transport.open() 73 | thrift_conn.set_keyspace('ks') 74 | 75 | from_db = [] 76 | 77 | for i in range(NUM_SUBCOLS): 78 | column_path = ColumnPath(column_family='cf', column='col_0'.encode(), 79 | super_column=(('subcol_%d' % i).encode())) 80 | column_or_super_column = thrift_conn.get('row_0'.encode(), column_path, 81 | ConsistencyLevel.QUORUM) 82 | val = column_or_super_column.counter_column.value 83 | logger.debug(str(val)), 84 | from_db.append(val) 85 | logger.debug("") 86 | 87 | expected = [NUM_ADDS for i in range(NUM_SUBCOLS)] 88 | 89 | if from_db != expected: 90 | raise Exception("Expected a bunch of the same values out of the db. Got this: " + str(from_db)) 91 | -------------------------------------------------------------------------------- /system_keyspaces_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from cassandra import Unauthorized 5 | from dtest import Tester 6 | from tools.assertions import assert_all, assert_exception, assert_none 7 | 8 | since = pytest.mark.since 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | 13 | class TestSystemKeyspaces(Tester): 14 | 15 | @since('3.0') 16 | def test_local_system_keyspaces(self): 17 | cluster = self.cluster 18 | cluster.populate(1).start() 19 | 20 | node = cluster.nodelist()[0] 21 | session = self.patient_cql_connection(node) 22 | 23 | # ALTER KEYSPACE should fail for system and system_schema 24 | stmt = """ 25 | ALTER KEYSPACE system 26 | WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};""" 27 | assert_exception(session, stmt, expected=Unauthorized) 28 | 29 | stmt = """ 30 | ALTER KEYSPACE system_schema 31 | WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};""" 32 | assert_exception(session, stmt, expected=Unauthorized) 33 | 34 | # DROP KEYSPACE should fail for system and system_schema 35 | assert_exception(session, 'DROP KEYSPACE system;', expected=Unauthorized) 36 | assert_exception(session, 'DROP KEYSPACE system_schema;', expected=Unauthorized) 37 | 38 | # CREATE TABLE should fail in system and system_schema 39 | assert_exception(session, 40 | 'CREATE TABLE system.new_table (id int PRIMARY KEY);', 41 | expected=Unauthorized) 42 | 43 | assert_exception(session, 44 | 'CREATE TABLE system_schema.new_table (id int PRIMARY KEY);', 45 | expected=Unauthorized) 46 | 47 | # ALTER TABLE should fail in system and system_schema 48 | assert_exception(session, 49 | "ALTER TABLE system.local WITH comment = '';", 50 | expected=Unauthorized) 51 | 52 | assert_exception(session, 53 | "ALTER TABLE system_schema.tables WITH comment = '';", 54 | expected=Unauthorized) 55 | 56 | # DROP TABLE should fail in system and system_schema 57 | assert_exception(session, 'DROP TABLE system.local;', expected=Unauthorized) 58 | assert_exception(session, 'DROP TABLE system_schema.tables;', expected=Unauthorized) 59 | 60 | @since('3.0') 61 | def test_replicated_system_keyspaces(self): 62 | cluster = self.cluster 63 | cluster.populate(1).start() 64 | 65 | node = cluster.nodelist()[0] 66 | session = self.patient_cql_connection(node) 67 | 68 | # ALTER KEYSPACE should work for system_auth, system_distributed, and system_traces 69 | stmt = """ 70 | ALTER KEYSPACE system_auth 71 | WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};""" 72 | assert_none(session, stmt) 73 | 74 | stmt = """ 75 | ALTER KEYSPACE system_distributed 76 | WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};""" 77 | assert_none(session, stmt) 78 | 79 | stmt = """ 80 | ALTER KEYSPACE system_traces 81 | WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};""" 82 | assert_none(session, stmt) 83 | 84 | stmt = """ 85 | SELECT replication 86 | FROM system_schema.keyspaces 87 | WHERE keyspace_name IN ('system_auth', 'system_distributed', 'system_traces');""" 88 | replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '1'} 89 | assert_all(session, stmt, [[replication], [replication], [replication]]) 90 | 91 | # DROP KEYSPACE should fail for system_auth, system_distributed, and system_traces 92 | assert_exception(session, 'DROP KEYSPACE system_auth;', expected=Unauthorized) 93 | assert_exception(session, 'DROP KEYSPACE system_distributed;', expected=Unauthorized) 94 | assert_exception(session, 'DROP KEYSPACE system_traces;', expected=Unauthorized) 95 | 96 | # CREATE TABLE should fail in system_auth, system_distributed, and system_traces 97 | assert_exception(session, 98 | 'CREATE TABLE system_auth.new_table (id int PRIMARY KEY);', 99 | expected=Unauthorized) 100 | 101 | assert_exception(session, 102 | 'CREATE TABLE system_distributed.new_table (id int PRIMARY KEY);', 103 | expected=Unauthorized) 104 | 105 | assert_exception(session, 106 | 'CREATE TABLE system_traces.new_table (id int PRIMARY KEY);', 107 | expected=Unauthorized) 108 | 109 | # ALTER TABLE should fail in system_auth, system_distributed, and system_traces 110 | assert_exception(session, 111 | "ALTER TABLE system_auth.roles WITH comment = '';", 112 | expected=Unauthorized) 113 | 114 | assert_exception(session, 115 | "ALTER TABLE system_distributed.repair_history WITH comment = '';", 116 | expected=Unauthorized) 117 | 118 | assert_exception(session, 119 | "ALTER TABLE system_traces.sessions WITH comment = '';", 120 | expected=Unauthorized) 121 | 122 | # DROP TABLE should fail in system_auth, system_distributed, and system_traces 123 | assert_exception(session, 'DROP TABLE system_auth.roles;', expected=Unauthorized) 124 | assert_exception(session, 'DROP TABLE system_distributed.repair_history;', expected=Unauthorized) 125 | assert_exception(session, 'DROP TABLE system_traces.sessions;', expected=Unauthorized) 126 | -------------------------------------------------------------------------------- /thrift_bindings/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/thrift_bindings/__init__.py -------------------------------------------------------------------------------- /thrift_bindings/thrift010/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['ttypes', 'constants', 'Cassandra'] 2 | -------------------------------------------------------------------------------- /thrift_bindings/thrift010/constants.py: -------------------------------------------------------------------------------- 1 | # 2 | # Autogenerated by Thrift Compiler (0.10.0) 3 | # 4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | # 6 | # options string: py 7 | # 8 | 9 | from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException 10 | from thrift.protocol.TProtocol import TProtocolException 11 | import sys 12 | from .ttypes import * 13 | VERSION = "20.1.0" 14 | -------------------------------------------------------------------------------- /thrift_hsha_test.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shlex 4 | import subprocess 5 | import time 6 | import unittest 7 | import pytest 8 | import logging 9 | 10 | from dtest import DEFAULT_DIR, Tester, create_ks 11 | from thrift_test import get_thrift_client 12 | from tools.jmxutils import JolokiaAgent, make_mbean 13 | 14 | since = pytest.mark.since 15 | logger = logging.getLogger(__name__) 16 | 17 | JNA_PATH = '/usr/share/java/jna.jar' 18 | ATTACK_JAR = 'lib/cassandra-attack.jar' 19 | 20 | # Use jna.jar in {CASSANDRA_DIR,DEFAULT_DIR}/lib/, since >=2.1 needs correct version 21 | try: 22 | if glob.glob('%s/lib/jna-*.jar' % os.environ['CASSANDRA_DIR']): 23 | logger.debug('Using jna.jar in CASSANDRA_DIR/lib..') 24 | JNA_IN_LIB = glob.glob('%s/lib/jna-*.jar' % os.environ['CASSANDRA_DIR']) 25 | JNA_PATH = JNA_IN_LIB[0] 26 | except KeyError: 27 | if glob.glob('%s/lib/jna-*.jar' % DEFAULT_DIR): 28 | print ('Using jna.jar in DEFAULT_DIR/lib/..') 29 | JNA_IN_LIB = glob.glob('%s/lib/jna-*.jar' % DEFAULT_DIR) 30 | JNA_PATH = JNA_IN_LIB[0] 31 | 32 | 33 | @since('2.0', max_version='4') 34 | class TestThriftHSHA(Tester): 35 | 36 | def test_closing_connections(self): 37 | """ 38 | @jira_ticket CASSANDRA-6546 39 | 40 | Test CASSANDRA-6546 - do connections get closed when disabling / renabling thrift service? 41 | """ 42 | cluster = self.cluster 43 | cluster.set_configuration_options(values={ 44 | 'start_rpc': 'true', 45 | 'rpc_server_type': 'hsha', 46 | 'rpc_max_threads': 20 47 | }) 48 | 49 | cluster.populate(1) 50 | (node1,) = cluster.nodelist() 51 | cluster.start() 52 | 53 | session = self.patient_cql_connection(node1) 54 | create_ks(session, 'test', 1) 55 | session.execute("CREATE TABLE \"CF\" (key text PRIMARY KEY, val text) WITH COMPACT STORAGE;") 56 | 57 | def make_connection(): 58 | host, port = node1.network_interfaces['thrift'] 59 | client = get_thrift_client(host, port) 60 | client.transport.open() 61 | return client 62 | 63 | pools = [] 64 | connected_thrift_clients = make_mbean('metrics', type='Client', name='connectedThriftClients') 65 | for i in range(10): 66 | logger.debug("Creating connection pools..") 67 | for x in range(3): 68 | pools.append(make_connection()) 69 | logger.debug("Disabling/Enabling thrift iteration #{i}".format(i=i)) 70 | node1.nodetool('disablethrift') 71 | node1.nodetool('enablethrift') 72 | logger.debug("Closing connections from the client side..") 73 | for client in pools: 74 | client.transport.close() 75 | 76 | with JolokiaAgent(node1) as jmx: 77 | num_clients = jmx.read_attribute(connected_thrift_clients, "Value") 78 | assert int(num_clients) == 0, "There are still open Thrift connections after stopping service " + str(num_clients) 79 | 80 | @unittest.skipIf(not os.path.exists(ATTACK_JAR), "No attack jar found") 81 | @unittest.skipIf(not os.path.exists(JNA_PATH), "No JNA jar found") 82 | def test_6285(self): 83 | """ 84 | @jira_ticket CASSANDRA-6285 85 | 86 | Test CASSANDRA-6285 with Viktor Kuzmin's attack jar. 87 | 88 | This jar file is not a part of this repository, you can 89 | compile it yourself from sources found on CASSANDRA-6285. This 90 | test will be skipped if the jar file is not found. 91 | """ 92 | cluster = self.cluster 93 | cluster.set_configuration_options(values={ 94 | 'start_rpc': 'true', 95 | 'rpc_server_type': 'hsha', 96 | 'rpc_max_threads': 20 97 | }) 98 | 99 | # Enable JNA: 100 | with open(os.path.join(self.test_path, 'test', 'cassandra.in.sh'), 'w') as f: 101 | f.write('CLASSPATH={jna_path}:$CLASSPATH\n'.format( 102 | jna_path=JNA_PATH)) 103 | 104 | cluster.populate(2) 105 | nodes = (node1, node2) = cluster.nodelist() 106 | [n.start(use_jna=True) for n in nodes] 107 | logger.debug("Cluster started.") 108 | 109 | session = self.patient_cql_connection(node1) 110 | create_ks(session, 'tmp', 2) 111 | 112 | session.execute("""CREATE TABLE "CF" ( 113 | key blob, 114 | column1 timeuuid, 115 | value blob, 116 | PRIMARY KEY (key, column1) 117 | ) WITH COMPACT STORAGE; 118 | """) 119 | 120 | logger.debug("running attack jar...") 121 | p = subprocess.Popen(shlex.split("java -jar {attack_jar}".format(attack_jar=ATTACK_JAR))) 122 | p.communicate() 123 | 124 | logger.debug("Stopping cluster..") 125 | cluster.stop() 126 | logger.debug("Starting cluster..") 127 | cluster.start(no_wait=True) 128 | logger.debug("Waiting 10 seconds before we're done..") 129 | time.sleep(10) 130 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/tools/__init__.py -------------------------------------------------------------------------------- /tools/context.py: -------------------------------------------------------------------------------- 1 | """ 2 | Home for functionality that provides context managers, and anything related to 3 | making those context managers function. 4 | """ 5 | import logging 6 | from contextlib import contextmanager 7 | 8 | from tools.env import ALLOW_NOISY_LOGGING 9 | 10 | 11 | @contextmanager 12 | def log_filter(log_id, expected_strings=None): 13 | """ 14 | Context manager which allows silencing logs until exit. 15 | Log records matching expected_strings will be filtered out of logging. 16 | If expected_strings is not provided, everything is filtered for that log. 17 | """ 18 | logger = logging.getLogger(log_id) 19 | log_filter = _make_filter_class(expected_strings) 20 | logger.addFilter(log_filter) 21 | yield 22 | if log_filter.records_silenced > 0: 23 | print("Logs were filtered to remove messages deemed unimportant, total count: %d" % log_filter.records_silenced) 24 | logger.removeFilter(log_filter) 25 | 26 | 27 | def _make_filter_class(expected_strings): 28 | """ 29 | Builds an anon-ish filtering class and returns it. 30 | 31 | Returns a logfilter if filtering should take place, otherwise a nooplogfilter. 32 | 33 | We're just using a class here as a one-off object with a filter method, for 34 | use as a filter object on the desired log. 35 | """ 36 | class nooplogfilter(object): 37 | records_silenced = 0 38 | 39 | @classmethod 40 | def filter(cls, record): 41 | return True 42 | 43 | class logfilter(object): 44 | records_silenced = 0 45 | 46 | @classmethod 47 | def filter(cls, record): 48 | if expected_strings is None: 49 | cls.records_silenced += 1 50 | return False 51 | 52 | for s in expected_strings: 53 | if s in record.msg or s in record.name: 54 | cls.records_silenced += 1 55 | return False 56 | 57 | return True 58 | 59 | if ALLOW_NOISY_LOGGING: 60 | return nooplogfilter 61 | else: 62 | return logfilter 63 | -------------------------------------------------------------------------------- /tools/env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for defining the test environment. 3 | 4 | This is located in tools/ so that both the main dtest.py module and the various 5 | test modules can import from here without getting into circular import issues. 6 | """ 7 | import os 8 | 9 | ALLOW_NOISY_LOGGING = os.environ.get('ALLOW_NOISY_LOGGING', '').lower() in ('yes', 'true') 10 | -------------------------------------------------------------------------------- /tools/files.py: -------------------------------------------------------------------------------- 1 | import fileinput 2 | import os 3 | import re 4 | import sys 5 | import tempfile 6 | import logging 7 | import shutil 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def replace_in_file(filepath, search_replacements): 13 | """ 14 | In-place file search and replace. 15 | 16 | filepath - The path of the file to edit 17 | search_replacements - a list of tuples (regex, replacement) that 18 | represent however many search and replace operations you wish to 19 | perform. 20 | 21 | Note: This does not work with multi-line regexes. 22 | """ 23 | for line in fileinput.input(filepath, inplace=True): 24 | for regex, replacement in search_replacements: 25 | line = re.sub(regex, replacement, line) 26 | sys.stdout.write(line) 27 | 28 | 29 | def safe_mkdtemp(): 30 | tmpdir = tempfile.mkdtemp() 31 | # \ on Windows is interpreted as an escape character and doesn't do anyone any favors 32 | return tmpdir.replace('\\', '/') 33 | 34 | 35 | def size_of_files_in_dir(dir_name, verbose=True): 36 | """ 37 | Return the size of all files found in a non-recursive ls of the argument. 38 | Based on http://stackoverflow.com/a/1392549 39 | """ 40 | files = [os.path.join(dir_name, f) for f in os.listdir(dir_name)] 41 | if verbose: 42 | logger.debug('getting sizes of these files: {}'.format(files)) 43 | return sum(os.path.getsize(f) for f in files) 44 | 45 | def copytree(src, dst, symlinks=False, ignore=None): 46 | for item in os.listdir(src): 47 | s = os.path.join(src, item) 48 | d = os.path.join(dst, item) 49 | if os.path.isdir(s): 50 | shutil.copytree(s, d, symlinks, ignore) 51 | else: 52 | shutil.copy2(s, d) 53 | -------------------------------------------------------------------------------- /tools/flaky.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | class RerunTestException(Exception): 7 | """ 8 | This exception can be raised to signal a likely harmless test problem. If fixing a test is reasonable, that should be preferred. 9 | 10 | Ideally this is used in conjunction with the 'flaky' decorator, allowing the test to be automatically re-run and passed. 11 | When raising this exception in methods decorated with @flaky(rerun_filter=requires_rerun), do so carefully. 12 | Avoid overly broad try/except blocks, otherwise real (intermittent) bugs could be masked. 13 | 14 | example usage: 15 | 16 | @flaky(rerun_filter=requires_rerun) # see requires_rerun method below in this module 17 | def some_flaky_test(): 18 | # some predictable code 19 | # more predictable code 20 | 21 | try: 22 | # some code that occasionally fails for routine/predictable reasons (e.g. timeout) 23 | except SomeNarrowException: 24 | raise RerunTestException 25 | 26 | When the test raises RerunTestException, the flaky plugin will re-run the test and it will pass if the next attempt(s) succeed. 27 | """ 28 | 29 | 30 | def requires_rerun(err, *args): 31 | """ 32 | For use in conjunction with the flaky decorator and it's rerun_filter argument. See RerunTestException above. 33 | 34 | Returns True if the given flaky failure data (err) is of type RerunTestException, otherwise False. 35 | """ 36 | # err[0] contains the type of the error that occurred 37 | return err[0] == RerunTestException 38 | 39 | def retry(fn, max_attempts=10, allowed_error=None, sleep_seconds=1): 40 | if max_attempts <= 0: 41 | raise ValueError("max_attempts must be a positive value, but given {}".format(str(max_attempts))) 42 | last_error = None 43 | for _ in range(0, max_attempts): 44 | try: 45 | return fn() 46 | except Exception as e: 47 | last_error = e 48 | if allowed_error and not allowed_error(e): 49 | break 50 | logger.info("Retrying as error '{}' was seen; sleeping for {} seconds".format(str(e), str(sleep_seconds))) 51 | time.sleep(sleep_seconds) 52 | raise last_error 53 | 54 | 55 | -------------------------------------------------------------------------------- /tools/funcutils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class get_rate_limited_function(object): 5 | """ 6 | Close over a function and a time limit in seconds. The resulting object can 7 | be called like the function, but will not delegate to the function if that 8 | function was called through the object in the time limit. 9 | 10 | Clients can ignore the time limit by calling the function directly as the 11 | func attribute of the object. 12 | """ 13 | def __init__(self, func, limit): 14 | self.func, self.limit = func, limit 15 | self.last_called = False 16 | 17 | def __call__(self, *args, **kwargs): 18 | elapsed = time.time() - self.last_called 19 | if elapsed >= self.limit: 20 | self.last_called = time.time() 21 | return self.func(*args, **kwargs) 22 | 23 | def __repr__(self): 24 | return '{cls_name}(func={func}, limit={limit}, last_called={last_called})'.format( 25 | cls_name=self.__class__.__name__, 26 | func=self.func, 27 | limit=self.limit, 28 | last_called=self.last_called, 29 | ) 30 | 31 | 32 | def merge_dicts(*dict_args): 33 | """ 34 | Given any number of dicts, shallow copy and merge into a new dict, 35 | precedence goes to key value pairs in latter dicts. 36 | """ 37 | result = {} 38 | for dictionary in dict_args: 39 | result.update(dictionary) 40 | return result 41 | -------------------------------------------------------------------------------- /tools/git.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | def cassandra_git_branch(cassandra_dir): 8 | '''Get the name of the git branch at CASSANDRA_DIR. 9 | ''' 10 | try: 11 | p = subprocess.Popen(['git', 'branch'], cwd=cassandra_dir, 12 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 13 | except OSError as e: # e.g. if git isn't available, just give up and return None 14 | logger.debug('shelling out to git failed: {}'.format(e)) 15 | return 16 | 17 | out, err = p.communicate() 18 | # fail if git failed 19 | if p.returncode != 0: 20 | raise RuntimeError('Git printed error: {err}'.format(err=err.decode("utf-8"))) 21 | [current_branch_line] = [line for line in out.decode("utf-8").splitlines() if line.startswith('*')] 22 | return current_branch_line[1:].strip() 23 | -------------------------------------------------------------------------------- /tools/hacks.py: -------------------------------------------------------------------------------- 1 | """ 2 | This one's called hacks because it provides shared utilities to hack around 3 | weirdnesses in Cassandra. 4 | """ 5 | import os 6 | import time 7 | import logging 8 | 9 | from cassandra.concurrent import execute_concurrent 10 | 11 | from tools.funcutils import get_rate_limited_function 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def _files_in(directory): 17 | return { 18 | os.path.join(directory, name) for name in os.listdir(directory) 19 | } 20 | 21 | 22 | def advance_to_next_cl_segment(session, commitlog_dir, 23 | keyspace_name='ks', table_name='junk_table', 24 | timeout=180): 25 | """ 26 | This is a hack to work around problems like CASSANDRA-11811. 27 | 28 | The problem happens in commitlog-replaying tests, like the snapshot and CDC 29 | tests. If we replay the first commitlog that's created, we wind up 30 | replaying some mutations that initialize system tables, so this function 31 | advances the node to the next CL by filling up the first one. 32 | """ 33 | session.execute( 34 | 'CREATE TABLE {ks}.{tab} (' 35 | 'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, ' 36 | 'e uuid, f uuid, g uuid, h uuid' 37 | ')'.format(ks=keyspace_name, tab=table_name) 38 | ) 39 | prepared_insert = session.prepare( 40 | 'INSERT INTO {ks}.{tab} ' 41 | '(a, b, c, d, e, f, g, h) ' 42 | 'VALUES (' 43 | 'uuid(), uuid(), uuid(), uuid(), ' 44 | 'uuid(), uuid(), uuid(), uuid()' 45 | ')'.format(ks=keyspace_name, tab=table_name) 46 | ) 47 | 48 | # record segments that we want to advance past 49 | initial_cl_files = _files_in(commitlog_dir) 50 | 51 | start = time.time() 52 | stop_time = start + timeout 53 | rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5) 54 | logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files)) 55 | 56 | while _files_in(commitlog_dir) <= initial_cl_files: 57 | elapsed = time.time() - start 58 | rate_limited_debug_logger(' commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed)) 59 | assert ( 60 | time.time() <= stop_time), ("It's been over {s}s and we haven't written a new " + 61 | "commitlog segment. Something is wrong.").format(s=timeout) 62 | execute_concurrent( 63 | session, 64 | ((prepared_insert, ()) for _ in range(1000)), 65 | concurrency=500, 66 | raise_on_first_error=True, 67 | ) 68 | 69 | logger.debug('present commitlog segments: {}'.format(_files_in(commitlog_dir))) 70 | -------------------------------------------------------------------------------- /tools/intervention.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | import logging 4 | 5 | from threading import Thread 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class InterruptBootstrap(Thread): 11 | 12 | def __init__(self, node): 13 | Thread.__init__(self) 14 | self.node = node 15 | 16 | def run(self): 17 | self.node.watch_log_for("Prepare completed") 18 | self.node.stop(gently=False) 19 | 20 | 21 | class InterruptCompaction(Thread): 22 | """ 23 | Interrupt compaction by killing a node as soon as 24 | the "Compacting" string is found in the log file 25 | for the table specified. This requires debug level 26 | logging in 2.1+ and expects debug information to be 27 | available in a file called "debug.log" unless a 28 | different name is passed in as a paramter. 29 | """ 30 | 31 | def __init__(self, node, tablename, filename='debug.log', delay=0): 32 | Thread.__init__(self) 33 | self.node = node 34 | self.tablename = tablename 35 | self.filename = filename 36 | self.delay = delay 37 | self.mark = node.mark_log(filename=self.filename) 38 | 39 | def run(self): 40 | self.node.watch_log_for("Compacting(.*)%s" % (self.tablename,), from_mark=self.mark, filename=self.filename) 41 | if self.delay > 0: 42 | random_delay = random.uniform(0, self.delay) 43 | logger.debug("Sleeping for {} seconds".format(random_delay)) 44 | time.sleep(random_delay) 45 | logger.debug("Killing node {}".format(self.node.address())) 46 | self.node.stop(gently=False) 47 | 48 | 49 | class KillOnBootstrap(Thread): 50 | 51 | def __init__(self, node): 52 | Thread.__init__(self) 53 | self.node = node 54 | 55 | def run(self): 56 | self.node.watch_log_for("JOINING: Starting to bootstrap") 57 | self.node.stop(gently=False) 58 | 59 | class KillOnReadyToBootstrap(Thread): 60 | 61 | def __init__(self, node): 62 | Thread.__init__(self) 63 | self.node = node 64 | 65 | def run(self): 66 | self.node.watch_log_for("JOINING: calculation complete, ready to bootstrap") 67 | self.node.stop(gently=False) 68 | -------------------------------------------------------------------------------- /tools/metadata_wrapper.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractproperty 2 | from six import with_metaclass 3 | 4 | class UpdatingMetadataWrapperBase(with_metaclass(ABCMeta, object)): 5 | @abstractproperty 6 | def _wrapped(self): 7 | pass 8 | 9 | def __getattr__(self, name): 10 | return getattr(self._wrapped, name) 11 | 12 | def __getitem__(self, idx): 13 | return self._wrapped[idx] 14 | 15 | 16 | class UpdatingTableMetadataWrapper(UpdatingMetadataWrapperBase): 17 | """ 18 | A class that provides an interface to a table's metadata that is refreshed 19 | on access. 20 | """ 21 | def __init__(self, cluster, ks_name, table_name, max_schema_agreement_wait=None): 22 | self._cluster = cluster 23 | self._ks_name = ks_name 24 | self._table_name = table_name 25 | self.max_schema_agreement_wait = max_schema_agreement_wait 26 | 27 | @property 28 | def _wrapped(self): 29 | self._cluster.refresh_table_metadata( 30 | self._ks_name, 31 | self._table_name, 32 | max_schema_agreement_wait=self.max_schema_agreement_wait 33 | ) 34 | return self._cluster.metadata.keyspaces[self._ks_name].tables[self._table_name] 35 | 36 | def __repr__(self): 37 | return '{cls_name}(cluster={cluster}, ks_name={ks_name}, table_name={table_name}, max_schema_agreement_wait={max_wait})'.format( 38 | cls_name=self.__class__.__name__, 39 | cluster=repr(self._cluster), 40 | ks_name=self._ks_name, 41 | table_name=self._table_name, 42 | max_wait=self.max_schema_agreement_wait) 43 | 44 | 45 | class UpdatingKeyspaceMetadataWrapper(UpdatingMetadataWrapperBase): 46 | """ 47 | A class that provides an interface to a keyspace's metadata that is 48 | refreshed on access. 49 | """ 50 | def __init__(self, cluster, ks_name, max_schema_agreement_wait=None): 51 | self._cluster = cluster 52 | self._ks_name = ks_name 53 | self.max_schema_agreement_wait = max_schema_agreement_wait 54 | 55 | @property 56 | def _wrapped(self): 57 | self._cluster.refresh_keyspace_metadata( 58 | self._ks_name, 59 | max_schema_agreement_wait=self.max_schema_agreement_wait 60 | ) 61 | return self._cluster.metadata.keyspaces[self._ks_name] 62 | 63 | def __repr__(self): 64 | return '{cls_name}(cluster={cluster}, ks_name={ks_name}, max_schema_agreement_wait={max_wait})'.format( 65 | cls_name=self.__class__.__name__, 66 | cluster=repr(self._cluster), 67 | ks_name=self._ks_name, 68 | max_wait=self.max_schema_agreement_wait) 69 | 70 | 71 | class UpdatingClusterMetadataWrapper(UpdatingMetadataWrapperBase): 72 | """ 73 | A class that provides an interface to a cluster's metadata that is 74 | refreshed on access. 75 | """ 76 | def __init__(self, cluster, max_schema_agreement_wait=None): 77 | """ 78 | @param cluster The cassandra.cluster.Cluster object to wrap. 79 | """ 80 | self._cluster = cluster 81 | self.max_schema_agreement_wait = max_schema_agreement_wait 82 | 83 | @property 84 | def _wrapped(self): 85 | self._cluster.refresh_schema_metadata(max_schema_agreement_wait=self.max_schema_agreement_wait) 86 | return self._cluster.metadata 87 | 88 | def __repr__(self): 89 | return '{cls_name}(cluster={cluster}, max_schema_agreement_wait={max_wait})'.format( 90 | cls_name=self.__class__.__name__, 91 | cluster=repr(self._cluster), 92 | max_wait=self.max_schema_agreement_wait) 93 | -------------------------------------------------------------------------------- /tools/sslkeygen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | import tempfile 4 | import subprocess 5 | 6 | 7 | def generate_credentials(ip, cakeystore=None, cacert=None): 8 | 9 | tmpdir = tempfile.mkdtemp() 10 | 11 | if not cakeystore: 12 | cakeystore = generate_cakeypair(tmpdir, 'ca') 13 | if not cacert: 14 | cacert = generate_cert(tmpdir, "ca", cakeystore) 15 | 16 | # create keystore with new private key 17 | name = "ip" + ip 18 | jkeystore = generate_ipkeypair(tmpdir, name, ip) 19 | 20 | # create signed cert 21 | csr = generate_sign_request(tmpdir, name, jkeystore, ['-ext', 'san=ip:' + ip]) 22 | cert = sign_request(tmpdir, "ca", cakeystore, csr, ['-ext', 'san=ip:' + ip]) 23 | 24 | # import cert chain into keystore 25 | import_cert(tmpdir, "ca", cacert, jkeystore) 26 | import_cert(tmpdir, name, cert, jkeystore) 27 | 28 | return SecurityCredentials(jkeystore, cert, cakeystore, cacert) 29 | 30 | 31 | def generate_cakeypair(dir, name): 32 | return generate_keypair(dir, name, name, ['-ext', 'bc:c']) 33 | 34 | 35 | def generate_ipkeypair(dir, name, ip): 36 | return generate_keypair(dir, name, ip, ['-ext', 'san=ip:' + ip]) 37 | 38 | 39 | def generate_dnskeypair(dir, name, hostname): 40 | return generate_keypair(dir, name, hostname, ['-ext', 'san=dns:' + hostname]) 41 | 42 | 43 | def generate_keypair(dir, name, cn, opts): 44 | kspath = os.path.join(dir, name + '.keystore') 45 | return _exec_keytool(dir, kspath, ['-alias', name, '-genkeypair', '-keyalg', 'RSA', '-dname', 46 | "cn={}, ou=cassandra, o=apache.org, c=US".format(cn), '-keypass', 'cassandra'] + opts) 47 | 48 | 49 | def generate_cert(dir, name, keystore, opts=[]): 50 | fn = os.path.join(dir, name + '.pem') 51 | _exec_keytool(dir, keystore, ['-alias', name, '-exportcert', '-rfc', '-file', fn] + opts) 52 | return fn 53 | 54 | 55 | def generate_sign_request(dir, name, keystore, opts=[]): 56 | fn = os.path.join(dir, name + '.csr') 57 | _exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-certreq', '-file', fn] + opts) 58 | return fn 59 | 60 | 61 | def sign_request(dir, name, keystore, csr, opts=[]): 62 | fnout = os.path.splitext(csr)[0] + '.pem' 63 | _exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-gencert', 64 | '-rfc', '-infile', csr, '-outfile', fnout] + opts) 65 | return fnout 66 | 67 | 68 | def import_cert(dir, name, cert, keystore, opts=[]): 69 | _exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-importcert', '-noprompt', '-file', cert] + opts) 70 | return cert 71 | 72 | 73 | def _exec_keytool(dir, keystore, opts): 74 | args = ['keytool', '-keystore', keystore, '-storepass', 'cassandra', '-deststoretype', 'pkcs12'] + opts 75 | subprocess.check_call(args) 76 | return keystore 77 | 78 | 79 | class SecurityCredentials(): 80 | 81 | def __init__(self, keystore, cert, cakeystore, cacert): 82 | self.keystore = keystore 83 | self.cert = cert 84 | self.cakeystore = cakeystore 85 | self.cacert = cacert 86 | self.basedir = os.path.dirname(self.keystore) 87 | 88 | def __str__(self): 89 | return "keystore: {}, cert: {}, cakeystore: {}, cacert: {}".format( 90 | self.keystore, self.cert, self.cakeystore, self.cacert) 91 | -------------------------------------------------------------------------------- /udtencoding_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | 4 | from tools.assertions import assert_invalid 5 | from dtest import Tester, create_ks 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class TestUDTEncoding(Tester): 11 | 12 | def test_udt(self): 13 | """ Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """ 14 | cluster = self.cluster 15 | 16 | cluster.populate(3).start() 17 | node1, node2, node3 = cluster.nodelist() 18 | 19 | time.sleep(.5) 20 | session = self.patient_cql_connection(node1) 21 | create_ks(session, 'ks', 3) 22 | 23 | # create udt and insert correctly (should be successful) 24 | session.execute('CREATE TYPE address (city text,zip int);') 25 | session.execute('CREATE TABLE user_profiles (login text PRIMARY KEY, addresses map>);') 26 | session.execute("INSERT INTO user_profiles(login, addresses) VALUES ('tsmith', { 'home': {city: 'San Fransisco',zip: 94110 }});") 27 | 28 | # note here address looks likes a map -> which is what the driver thinks it is. udt is encoded server side, we test that if addresses is changed slightly whether encoder recognizes the errors 29 | 30 | # try adding a field - see if will be encoded to a udt (should return error) 31 | assert_invalid(session, 32 | "INSERT INTO user_profiles(login, addresses) VALUES ('jsmith', { 'home': {street: 'El Camino Real', city: 'San Fransisco', zip: 94110 }});", 33 | "Unknown field 'street' in value of user defined type address") 34 | 35 | # try modifying a field name - see if will be encoded to a udt (should return error) 36 | assert_invalid(session, 37 | "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {cityname: 'San Fransisco', zip: 94110 }});", 38 | "Unknown field 'cityname' in value of user defined type address") 39 | 40 | # try modifying a type within the collection - see if will be encoded to a udt (should return error) 41 | assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {city: 'San Fransisco', zip: '94110' }});", 42 | "Invalid map literal for addresses") 43 | -------------------------------------------------------------------------------- /upgrade_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/upgrade_tests/__init__.py -------------------------------------------------------------------------------- /upgrade_tests/bootstrap_upgrade_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from bootstrap_test import BootstrapTester 4 | 5 | since = pytest.mark.since 6 | 7 | 8 | @pytest.mark.upgrade_test 9 | class TestBootstrapUpgrade(BootstrapTester): 10 | 11 | """ 12 | @jira_ticket CASSANDRA-11841 13 | Test that bootstrap works with a mixed version cluster 14 | In particular, we want to test that keep-alive is not sent 15 | to a node with version < 3.10 16 | """ 17 | @pytest.mark.no_vnodes 18 | @since('3.10', max_version='3.99') 19 | def test_simple_bootstrap_mixed_versions(self): 20 | # Compatibility flag ensures that bootstrapping gets schema information during 21 | # upgrades from 3.0.14+ to anything upwards for 3.0.x or 3.x clusters. 22 | # @jira_ticket CASSANDRA-13004 for detailed context on `force_3_0_protocol_version` flag 23 | self._test_bootstrap_with_compatibility_flag_on(bootstrap_from_version="3.5") 24 | -------------------------------------------------------------------------------- /upgrade_tests/compatibility_flag_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | 4 | from cassandra import ConsistencyLevel 5 | from cassandra.query import SimpleStatement 6 | from dtest import Tester 7 | from tools.assertions import assert_all 8 | 9 | since = pytest.mark.since 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | @pytest.mark.upgrade_test 14 | class TestCompatibilityFlag(Tester): 15 | """ 16 | Test 30 protocol compatibility flag 17 | 18 | @jira CASSANDRA-13004 19 | """ 20 | 21 | def _compatibility_flag_off_with_30_node_test(self, from_version): 22 | """ 23 | Test compatibility with 30 protocol version: if the flag is unset, schema agreement can not be reached 24 | """ 25 | 26 | cluster = self.cluster 27 | cluster.populate(2) 28 | node1, node2 = cluster.nodelist() 29 | cluster.set_install_dir(version=from_version) 30 | self.fixture_dtest_setup.reinitialize_cluster_for_different_version() 31 | cluster.start() 32 | 33 | node1.drain() 34 | node1.watch_log_for("DRAINED") 35 | node1.stop(wait_other_notice=False) 36 | logger.debug("Upgrading to current version") 37 | self.set_node_to_current_version(node1) 38 | node1.start(wait_for_binary_proto=True) 39 | 40 | node1.watch_log_for("Not pulling schema because versions match or shouldPullSchemaFrom returned false", filename='debug.log') 41 | node2.watch_log_for("Not pulling schema because versions match or shouldPullSchemaFrom returned false", filename='debug.log') 42 | 43 | def _compatibility_flag_on_with_30_test(self, from_version): 44 | """ 45 | Test compatibility with 30 protocol version: if the flag is set, schema agreement can be reached 46 | """ 47 | 48 | cluster = self.cluster 49 | cluster.populate(2) 50 | node1, node2 = cluster.nodelist() 51 | cluster.set_install_dir(version=from_version) 52 | self.fixture_dtest_setup.reinitialize_cluster_for_different_version() 53 | cluster.start() 54 | 55 | node1.drain() 56 | node1.watch_log_for("DRAINED") 57 | node1.stop(wait_other_notice=False) 58 | logger.debug("Upgrading to current version") 59 | self.set_node_to_current_version(node1) 60 | node1.start(jvm_args=["-Dcassandra.force_3_0_protocol_version=true"], wait_for_binary_proto=True) 61 | 62 | session = self.patient_cql_connection(node1) 63 | self._run_test(session) 64 | 65 | def test__compatibility_flag_on_3014(self): 66 | """ 67 | Test compatibility between post-13004 nodes, one of which is in compatibility mode 68 | """ 69 | cluster = self.cluster 70 | cluster.populate(2) 71 | node1, node2 = cluster.nodelist() 72 | 73 | node1.start(wait_for_binary_proto=True) 74 | node2.start(jvm_args=["-Dcassandra.force_3_0_protocol_version=true"], wait_for_binary_proto=True) 75 | 76 | session = self.patient_cql_connection(node1) 77 | self._run_test(session) 78 | 79 | def test__compatibility_flag_off_3014(self): 80 | """ 81 | Test compatibility between post-13004 nodes 82 | """ 83 | cluster = self.cluster 84 | cluster.populate(2) 85 | node1, node2 = cluster.nodelist() 86 | 87 | node1.start(wait_for_binary_proto=True) 88 | node2.start(wait_for_binary_proto=True) 89 | 90 | session = self.patient_cql_connection(node1) 91 | self._run_test(session) 92 | 93 | def _run_test(self, session): 94 | # Make sure the system_auth table will get replicated to the node that we're going to replace 95 | 96 | session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'} ;") 97 | session.execute("CREATE TABLE test.test (a text PRIMARY KEY, b text, c text);") 98 | session.cluster.control_connection.wait_for_schema_agreement() 99 | 100 | for i in range(1, 6): 101 | session.execute(SimpleStatement("INSERT INTO test.test (a, b, c) VALUES ('{}', '{}', '{}');".format(i, i + 1, i + 2), 102 | consistency_level=ConsistencyLevel.ALL)) 103 | 104 | assert_all(session, 105 | "SELECT * FROM test.test", 106 | [[str(i), str(i + 1), str(i + 2)] for i in range(1, 6)], ignore_order=True, 107 | cl=ConsistencyLevel.ALL) 108 | 109 | assert_all(session, 110 | "SELECT a,c FROM test.test", 111 | [[str(i), str(i + 2)] for i in range(1, 6)], ignore_order=True, 112 | cl=ConsistencyLevel.ALL) 113 | 114 | 115 | @since('3.0.14', max_version='3.0.x') 116 | class CompatibilityFlag30XTest(TestCompatibilityFlag): 117 | 118 | def test_compatibility_flag_off_with_30_node(self): 119 | self._compatibility_flag_off_with_30_node_test('3.0.12') 120 | 121 | def test_compatibility_flag_on_with_3_0(self): 122 | self._compatibility_flag_on_with_30_test('3.0.12') 123 | 124 | def test_compatibility_flag_on_3014(self): 125 | self._compatibility_flag_on_3014_test() 126 | 127 | def test_compatibility_flag_off_3014(self): 128 | self._compatibility_flag_off_3014_test() 129 | 130 | 131 | @since('3.11', max_version='4') 132 | class CompatibilityFlag3XTest(TestCompatibilityFlag): 133 | 134 | def test_compatibility_flag_off_with_30_node(self): 135 | self._compatibility_flag_off_with_30_node_test('3.10') 136 | 137 | def test_compatibility_flag_on_with_3_0(self): 138 | self._compatibility_flag_on_with_30_test('3.10') 139 | 140 | def test_compatibility_flag_on_3014(self): 141 | self._compatibility_flag_on_3014_test() 142 | 143 | def test_compatibility_flag_off_3014(self): 144 | self._compatibility_flag_off_3014_test() 145 | -------------------------------------------------------------------------------- /upgrade_tests/conftest.py: -------------------------------------------------------------------------------- 1 | from .upgrade_manifest import set_config 2 | 3 | def pytest_configure(config): 4 | set_config(config) 5 | -------------------------------------------------------------------------------- /upgrade_tests/repair_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import logging 4 | 5 | from repair_tests.repair_test import BaseRepairTest 6 | 7 | since = pytest.mark.since 8 | logger = logging.getLogger(__name__) 9 | 10 | LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1", 11 | "-Dcassandra.streamdes.max_mem_buffer_size=5", 12 | "-Dcassandra.streamdes.max_spill_file_size=16"] 13 | 14 | 15 | # We don't support directly upgrading from 2.2 to 4.0 so disabling this on 4.0. 16 | # TODO: we should probably not hardcode versions? 17 | @pytest.mark.upgrade_test 18 | @since('3.0', max_version='3.99') 19 | class TestUpgradeRepair(BaseRepairTest): 20 | 21 | @since('3.0', max_version='3.99') 22 | def test_repair_after_upgrade(self): 23 | """ 24 | @jira_ticket CASSANDRA-10990 25 | """ 26 | default_install_dir = self.cluster.get_install_dir() 27 | cluster = self.cluster 28 | logger.debug("Setting version to 2.2.5") 29 | cluster.set_install_dir(version="2.2.5") 30 | self.install_nodetool_legacy_parsing() 31 | self._populate_cluster() 32 | 33 | self._do_upgrade(default_install_dir) 34 | self._repair_and_verify(True) 35 | 36 | def _do_upgrade(self, default_install_dir): 37 | cluster = self.cluster 38 | 39 | for node in cluster.nodelist(): 40 | logger.debug("Upgrading %s to current version" % node.name) 41 | if node.is_running(): 42 | node.flush() 43 | time.sleep(1) 44 | node.stop(wait_other_notice=True) 45 | node.set_install_dir(install_dir=default_install_dir) 46 | node.start(wait_for_binary_proto=True) 47 | cursor = self.patient_cql_connection(node) 48 | cluster.set_install_dir(default_install_dir) 49 | -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-CompressionInfo.db: -------------------------------------------------------------------------------- 1 | SnappyCompressorw -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Data.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Data.db -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Filter.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Filter.db -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Index.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Index.db -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Statistics.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/cassandra-dtest/28533ee239c23abdb1061dfeff793cc48bb3d8d7/upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-Statistics.db -------------------------------------------------------------------------------- /upgrade_tests/supercolumn-data/cassandra-2.0/supcols/cols/supcols-cols-jb-2-TOC.txt: -------------------------------------------------------------------------------- 1 | Statistics.db 2 | Summary.db 3 | Filter.db 4 | TOC.txt 5 | Index.db 6 | CompressionInfo.db 7 | Data.db 8 | -------------------------------------------------------------------------------- /wide_rows_test.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import random 3 | import logging 4 | 5 | from dtest import Tester, create_ks 6 | from tools.assertions import assert_length_equal 7 | 8 | status_messages = ( 9 | "I''m going to the Cassandra Summit in June!", 10 | "C* is awesome!", 11 | "All your sstables are belong to us.", 12 | "Just turned on another 50 C* nodes at , scales beautifully.", 13 | "Oh, look! Cats, on reddit!", 14 | "Netflix recommendations are really good, wonder why?", 15 | "Spotify playlists are always giving me good tunes, wonder why?" 16 | ) 17 | 18 | clients = ( 19 | "Android", 20 | "iThing", 21 | "Chromium", 22 | "Mozilla", 23 | "Emacs" 24 | ) 25 | 26 | logger = logging.getLogger(__name__) 27 | 28 | 29 | class TestWideRows(Tester): 30 | def test_wide_rows(self): 31 | self.write_wide_rows() 32 | 33 | def write_wide_rows(self): 34 | cluster = self.cluster 35 | cluster.populate(1).start() 36 | node1 = cluster.nodelist()[0] 37 | 38 | session = self.patient_cql_connection(node1) 39 | start_time = datetime.datetime.now() 40 | create_ks(session, 'wide_rows', 1) 41 | # Simple timeline: user -> {date: value, ...} 42 | logger.debug('Create Table....') 43 | session.execute('CREATE TABLE user_events (userid text, event timestamp, value text, PRIMARY KEY (userid, event));') 44 | date = datetime.datetime.now() 45 | # Create a large timeline for each of a group of users: 46 | for user in ('ryan', 'cathy', 'mallen', 'joaquin', 'erin', 'ham'): 47 | logger.debug("Writing values for: %s" % user) 48 | for day in range(5000): 49 | date_str = (date + datetime.timedelta(day)).strftime("%Y-%m-%d") 50 | client = random.choice(clients) 51 | msg = random.choice(status_messages) 52 | query = "UPDATE user_events SET value = '{msg:%s, client:%s}' WHERE userid='%s' and event='%s';" % (msg, client, user, date_str) 53 | # logger.debug(query) 54 | session.execute(query) 55 | 56 | # logger.debug('Duration of test: %s' % (datetime.datetime.now() - start_time)) 57 | 58 | # Pick out an update for a specific date: 59 | query = "SELECT value FROM user_events WHERE userid='ryan' and event='%s'" % \ 60 | (date + datetime.timedelta(10)).strftime("%Y-%m-%d") 61 | rows = session.execute(query) 62 | for value in rows: 63 | logger.debug(value) 64 | assert len(value[0]) > 0 65 | 66 | def test_column_index_stress(self): 67 | """Write a large number of columns to a single row and set 68 | 'column_index_size_in_kb' to a sufficiently low value to force 69 | the creation of a column index. The test will then randomly 70 | read columns from that row and ensure that all data is 71 | returned. See CASSANDRA-5225. 72 | """ 73 | cluster = self.cluster 74 | cluster.populate(1).start() 75 | (node1,) = cluster.nodelist() 76 | cluster.set_configuration_options(values={'column_index_size_in_kb': 1}) # reduce this value to force column index creation 77 | session = self.patient_cql_connection(node1) 78 | create_ks(session, 'wide_rows', 1) 79 | 80 | create_table_query = 'CREATE TABLE test_table (row varchar, name varchar, value int, PRIMARY KEY (row, name));' 81 | session.execute(create_table_query) 82 | 83 | # Now insert 100,000 columns to row 'row0' 84 | insert_column_query = "UPDATE test_table SET value = {value} WHERE row = '{row}' AND name = '{name}';" 85 | for i in range(100000): 86 | row = 'row0' 87 | name = 'val' + str(i) 88 | session.execute(insert_column_query.format(value=i, row=row, name=name)) 89 | 90 | # now randomly fetch columns: 1 to 3 at a time 91 | for i in range(10000): 92 | select_column_query = "SELECT value FROM test_table WHERE row='row0' AND name in ('{name1}', '{name2}', '{name3}');" 93 | values2fetch = [str(random.randint(0, 99999)) for i in range(3)] 94 | # values2fetch is a list of random values. Because they are random, they will not be unique necessarily. 95 | # To simplify the template logic in the select_column_query I will not expect the query to 96 | # necessarily return 3 values. Hence I am computing the number of unique values in values2fetch 97 | # and using that in the assert at the end. 98 | expected_rows = len(set(values2fetch)) 99 | rows = list(session.execute(select_column_query.format(name1="val" + values2fetch[0], 100 | name2="val" + values2fetch[1], 101 | name3="val" + values2fetch[2]))) 102 | assert_length_equal(rows, expected_rows) 103 | --------------------------------------------------------------------------------