├── checkstyle.properties
├── src
├── test
│ ├── resources
│ │ ├── cassandra-topology.properties
│ │ ├── logback.xml
│ │ ├── books.cql
│ │ ├── items.cql
│ │ └── another-cassandra.yaml
│ └── java
│ │ └── io
│ │ └── smartcat
│ │ └── migration
│ │ ├── ColumnNameMatcher.java
│ │ ├── BaseTest.java
│ │ ├── migrations
│ │ ├── schema
│ │ │ ├── AddBookISBNFieldMigration.java
│ │ │ ├── AddBookGenreFieldMigration.java
│ │ │ └── CreateItemByNumberAndExternalIdMigration.java
│ │ └── data
│ │ │ ├── InsertInitialItemsMigration.java
│ │ │ ├── InsertBooksMigration.java
│ │ │ ├── PopulateItemByNumberAndExternalIdMigration.java
│ │ │ └── AddGenreMigration.java
│ │ ├── CassandraMetadataAnalyzer.java
│ │ ├── MigrationTest.java
│ │ ├── CassandraVersionerTest.java
│ │ ├── MigrationEngineItemsTest.java
│ │ ├── MigrationEngineBooksTest.java
│ │ ├── MigrationResourcesTest.java
│ │ └── MigratorTest.java
└── main
│ └── java
│ └── io
│ └── smartcat
│ └── migration
│ ├── MigrationType.java
│ ├── DataMigration.java
│ ├── SchemaMigration.java
│ ├── Executor.java
│ ├── exceptions
│ ├── MigrationException.java
│ └── SchemaAgreementException.java
│ ├── MigrationResources.java
│ ├── CassandraVersioner.java
│ ├── MigrationEngine.java
│ └── Migration.java
├── .gitignore
├── .travis.yml
├── checkstyle-suppressions.xml
├── checkstyle.xml
├── README.md
├── LICENSE
├── pom.xml
└── smartcat-formatter.xml
/checkstyle.properties:
--------------------------------------------------------------------------------
1 | suppression_file=checkstyle-suppressions.xml
--------------------------------------------------------------------------------
/src/test/resources/cassandra-topology.properties:
--------------------------------------------------------------------------------
1 | 127.0.0.1=DC1:RAC1
2 |
--------------------------------------------------------------------------------
/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Common
2 | *~
3 |
4 | ## Csharp
5 | **/bin
6 | **/obj
7 | *.suo
8 | **/packages
9 | **/TestResults
10 |
11 | ## Java
12 | **/target
13 | *.settings
14 | *.classpath
15 | *.project
16 | target/
17 | *.java-version
18 |
19 | ## Scala
20 | **/.idea
21 |
22 | ## Intellij
23 | *.iml
24 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/MigrationType.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | /**
4 | * Enumeration for migration types (SCHEMA or DATA).
5 | */
6 | public enum MigrationType {
7 | /**
8 | * Schema migration type.
9 | */
10 | SCHEMA,
11 | /**
12 | * Data migration type.
13 | */
14 | DATA;
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/DataMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | /**
4 | * Data migration for migrations manipulating data.
5 | */
6 | public abstract class DataMigration extends Migration {
7 |
8 | /**
9 | * Creates new data migration.
10 | * @param version Version of this data migration
11 | */
12 | public DataMigration(int version) {
13 | super(MigrationType.DATA, version);
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/SchemaMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | /**
4 | * Schema migration for migrations manipulating schema.
5 | */
6 | public abstract class SchemaMigration extends Migration {
7 |
8 | /**
9 | * Create new schema migration with provided version.
10 | * @param version Version of this schema migration
11 | */
12 | public SchemaMigration(int version) {
13 | super(MigrationType.SCHEMA, version);
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/ColumnNameMatcher.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import com.datastax.driver.core.ColumnMetadata;
4 | import com.google.common.base.Predicate;
5 |
6 | public class ColumnNameMatcher implements Predicate {
7 | private String columnName;
8 |
9 | public ColumnNameMatcher(String columnName) {
10 | this.columnName = columnName;
11 | }
12 |
13 | public boolean apply(ColumnMetadata column) {
14 | return column.getName().equals(columnName);
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/test/resources/books.cql:
--------------------------------------------------------------------------------
1 | /*
2 | Create keyspace with Network Topology. This is more simular with keyspaces created on production servers.
3 | Also using lightweight transaction (ifNotExists) does not seem to work together with SimpleStrategy and
4 | consistency level EACH_QUORUM.
5 | */
6 | CREATE KEYSPACE migration_test_books WITH replication = { 'class': 'NetworkTopologyStrategy', 'DC1': '1' };
7 | USE migration_test_books;
8 |
9 | /* Books */
10 | CREATE TABLE books (
11 | name text,
12 | author text,
13 | PRIMARY KEY (author, name)
14 | ) WITH COMMENT='Books by author';
15 |
--------------------------------------------------------------------------------
/src/test/resources/items.cql:
--------------------------------------------------------------------------------
1 | /*
2 | Create keyspace with Network Topology. This is more simular with keyspaces created on production servers.
3 | Also using lightweight transaction (ifNotExists) does not seem to work together with SimpleStrategy and
4 | consistency level EACH_QUORUM.
5 | */
6 | CREATE KEYSPACE migration_test_items WITH replication = { 'class': 'NetworkTopologyStrategy', 'DC1': '1' };
7 | USE migration_test_items;
8 |
9 | /* Items by id */
10 | CREATE TABLE items_by_id (
11 | id uuid,
12 | number text,
13 | external_id uuid,
14 | PRIMARY KEY (id)
15 | ) WITH COMMENT='Items by item ID';
16 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/Executor.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import com.datastax.driver.core.Session;
4 |
5 | /**
6 | * Executor is a class which executes all the migration for given session.
7 | */
8 | public class Executor {
9 |
10 | private Executor() {
11 | }
12 |
13 | /**
14 | * Execute all migrations in migration resource collection.
15 | *
16 | * @param session Datastax driver sesison object
17 | * @param resources Migration resources collection
18 | * @return Return success
19 | */
20 | public static boolean migrate(final Session session, final MigrationResources resources) {
21 | return MigrationEngine.withSession(session).migrate(resources);
22 | }
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: java
2 | sudo: true
3 |
4 | jdk:
5 | - oraclejdk8
6 | install: mvn install -Dgpg.skip=true
7 | notifications:
8 | slack:
9 | secure: Uqth2gatGYTEOTdqAkZJD4ErqZmLXp+dMrFV8QVvm04F5iG6y1mDLJF5ZnvqQWMX0kr6xi77V/ZWE/HUoHFVl+IzzN1VJlNNcpEG74O6FWlhzCWw1ivPbOpUYRouQit2QZPrqq4MGNy5omr22df91ESwe9Ow6es45lhIQ1NNYvtgBccDj3MS2tD1PW4OTnjNiRMjBIEkcoIq0XNHxyE5g19LYo5XY+E0cYFL1LHG5L9sAcyX5GKxaQC7jyCRcEBVPq/Tv/YiHv0uv4cQBE96qQdHfJHhzUCbKDX6dQ+sn9QY+Pw+ztM2Pt51GfnnnBCHoWbHgfXa1CGfs+OwrO+MrVIa9ZUyQCYuI4qe0vtSsVAeA6w8PfMA9YVe0GSRV6KbZHY1cCfjAG4x3w5yETRak381Cnf4DieehuFmz5UMWAZi5BcBHIQu0gNEyqPpNrkewozebOcLNfkzOJBhi3MdtS92gs2cxGDH2j7ivT77cTDpmgYwiM3yxvmYPZXCYpSFOeC6Z/AC56tR8eUA/kAQTRMG6F3YoDoTQ8K25QBmHWNoC9lt30IIDyI5bHXkiPBwAlRxW+Rk+KWM5wWIz6c/UfMcixzJn7prAlZwcnerGao6TerNpaOc6zcEUO+t5U8HUk5vL4GzAOICBjLzgEuVNEisVOUAi1eBo8heJTyDonw=
10 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/exceptions/MigrationException.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.exceptions;
2 |
3 | /**
4 | * Domain exception for things that happen while migrating.
5 | */
6 | public class MigrationException extends Exception {
7 |
8 | private static final long serialVersionUID = 939170349798471411L;
9 |
10 | /**
11 | * Create migration exception with provided message.
12 | * @param message Message for this exception
13 | */
14 | public MigrationException(final String message) {
15 | super(message);
16 | }
17 |
18 | /**
19 | * Create migration exception with provided message and original cause.
20 | * @param message Message for this exception
21 | * @param throwable Throwable wrapping original cause.
22 | */
23 | public MigrationException(final String message, final Throwable throwable) {
24 | super(message, throwable);
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/exceptions/SchemaAgreementException.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.exceptions;
2 |
3 | /**
4 | * Schema agreement exception which wraps exceptions while schema is propagated on all nodes.
5 | */
6 | public class SchemaAgreementException extends MigrationException {
7 |
8 | private static final long serialVersionUID = 4672095868449483293L;
9 |
10 | /**
11 | * Create schema agreement exception with provided message.
12 | * @param message Message for this exception
13 | */
14 | public SchemaAgreementException(final String message) {
15 | super(message);
16 | }
17 |
18 | /**
19 | * Create schema agreement exception with provided message and original cause.
20 | * @param message Message for this exception
21 | * @param throwable Throwable wrapping original cause
22 | */
23 | public SchemaAgreementException(final String message, final Throwable throwable) {
24 | super(message, throwable);
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/BaseTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import com.datastax.driver.core.*;
4 |
5 | import java.util.ArrayList;
6 | import java.util.List;
7 |
8 | public class BaseTest {
9 |
10 | public void truncateTables(final String keyspace, final Session session) {
11 | for (final String table : tables(keyspace, session)) {
12 | session.execute(String.format("TRUNCATE %s.%s;", keyspace, table));
13 | }
14 | }
15 |
16 | private List tables(final String keyspace, final Session session) {
17 | final List tables = new ArrayList<>();
18 | final Cluster cluster = session.getCluster();
19 | final Metadata meta = cluster.getMetadata();
20 | final KeyspaceMetadata keyspaceMeta = meta.getKeyspace(keyspace);
21 | for (final TableMetadata tableMeta : keyspaceMeta.getTables()) {
22 | tables.add(tableMeta.getName());
23 | }
24 |
25 | return tables;
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/schema/AddBookISBNFieldMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.schema;
2 |
3 | import com.datastax.driver.core.SimpleStatement;
4 |
5 | import io.smartcat.migration.SchemaMigration;
6 | import io.smartcat.migration.exceptions.MigrationException;
7 |
8 | /**
9 | * Example of schema migration which adds new column to existing table.
10 | */
11 | public class AddBookISBNFieldMigration extends SchemaMigration {
12 |
13 | public AddBookISBNFieldMigration(final int version) {
14 | super(version);
15 | }
16 |
17 | @Override
18 | public String getDescription() {
19 | return "Alters books tables by adding ISBN column";
20 | }
21 |
22 | @Override
23 | public void execute() throws MigrationException {
24 | try {
25 | final String alterBooksAddISBNCQL = "ALTER TABLE books ADD isbn text;";
26 |
27 | executeWithSchemaAgreement(new SimpleStatement(alterBooksAddISBNCQL));
28 |
29 | } catch (final Exception e) {
30 | throw new MigrationException("Failed to execute AddBookISBNField migration", e);
31 | }
32 | }
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/checkstyle-suppressions.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/schema/AddBookGenreFieldMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.schema;
2 |
3 | import com.datastax.driver.core.SimpleStatement;
4 |
5 | import io.smartcat.migration.exceptions.MigrationException;
6 | import io.smartcat.migration.SchemaMigration;
7 |
8 | /**
9 | * Example of schema migration which adds new column to existing table.
10 | */
11 | public class AddBookGenreFieldMigration extends SchemaMigration {
12 |
13 | public AddBookGenreFieldMigration(final int version) {
14 | super(version);
15 | }
16 |
17 | @Override
18 | public String getDescription() {
19 | return "Alters books tables by adding genre column";
20 | }
21 |
22 | @Override
23 | public void execute() throws MigrationException {
24 | try {
25 | final String alterBooksAddGenreCQL = "ALTER TABLE books ADD genre text;";
26 |
27 | executeWithSchemaAgreement(new SimpleStatement(alterBooksAddGenreCQL));
28 |
29 | } catch (final Exception e) {
30 | throw new MigrationException("Failed to execute AddBookGenreField migration", e);
31 | }
32 | }
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/data/InsertInitialItemsMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.data;
2 |
3 | import com.datastax.driver.core.PreparedStatement;
4 | import io.smartcat.migration.DataMigration;
5 | import io.smartcat.migration.exceptions.MigrationException;
6 |
7 | import java.util.UUID;
8 |
9 | public class InsertInitialItemsMigration extends DataMigration {
10 |
11 | private final int count;
12 |
13 | public InsertInitialItemsMigration(final int count, final int version) {
14 | super(version);
15 | this.count = count;
16 | }
17 |
18 | @Override
19 | public String getDescription() {
20 | return "Populate items by id table with initial data";
21 | }
22 |
23 | @Override
24 | public void execute() throws MigrationException {
25 | try {
26 | final PreparedStatement preparedStatement =
27 | session.prepare("INSERT INTO items_by_id (id, number, external_id) VALUES (?, ?, ?);");
28 |
29 | for (int i = 0; i < count; i++) {
30 | session.execute(preparedStatement.bind(UUID.randomUUID(), Integer.toString(i), UUID.randomUUID()));
31 | }
32 | } catch (final Exception e) {
33 | throw new MigrationException("Failed to execute InsertInitialItemsMigration migration", e);
34 | }
35 |
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/schema/CreateItemByNumberAndExternalIdMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.schema;
2 |
3 | import com.datastax.driver.core.SimpleStatement;
4 | import io.smartcat.migration.SchemaMigration;
5 | import io.smartcat.migration.exceptions.MigrationException;
6 |
7 | public class CreateItemByNumberAndExternalIdMigration extends SchemaMigration {
8 |
9 | public CreateItemByNumberAndExternalIdMigration(final int version) {
10 | super(version);
11 | }
12 |
13 | @Override
14 | public String getDescription() {
15 | return "Creates item table with number and external id as composite key";
16 | }
17 |
18 | @Override
19 | public void execute() throws MigrationException {
20 | try {
21 | final String statement =
22 | "CREATE TABLE IF NOT EXISTS items_by_number_external_id (" +
23 | "id uuid," +
24 | "number text," +
25 | "external_id uuid," +
26 | "PRIMARY KEY ((number, external_id))" +
27 | ") WITH COMMENT='Items by item number and external id';";
28 | executeWithSchemaAgreement(new SimpleStatement(statement));
29 |
30 | } catch (final Exception e) {
31 | throw new MigrationException("Failed to execute CreateItemsByNumberAndExternalIdMigration migration", e);
32 | }
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/CassandraMetadataAnalyzer.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static com.google.common.base.Preconditions.checkNotNull;
4 | import static com.google.common.collect.Iterables.tryFind;
5 |
6 | import com.datastax.driver.core.ColumnMetadata;
7 | import com.datastax.driver.core.KeyspaceMetadata;
8 | import com.datastax.driver.core.Metadata;
9 | import com.datastax.driver.core.Session;
10 | import com.datastax.driver.core.TableMetadata;
11 | import com.google.common.base.Optional;
12 |
13 | public class CassandraMetadataAnalyzer {
14 |
15 | private Session session;
16 |
17 | public CassandraMetadataAnalyzer(Session session) {
18 | checkNotNull(session, "Session cannot be null");
19 | checkNotNull(session.getLoggedKeyspace(), "Session must be logged into a keyspace");
20 | this.session = session;
21 | }
22 |
23 | public boolean columnExistInTable(String columnName, String tableName) {
24 | TableMetadata table = getTableMetadata(this.session, tableName);
25 | Optional column = tryFind(table.getColumns(), new ColumnNameMatcher(columnName));
26 | return column.isPresent();
27 | }
28 |
29 | private static TableMetadata getTableMetadata(Session session, String tableName) {
30 | Metadata metadata = session.getCluster().getMetadata();
31 | KeyspaceMetadata keyspaceMetadata = metadata.getKeyspace(session.getLoggedKeyspace());
32 | return keyspaceMetadata.getTable(tableName);
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/data/InsertBooksMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.data;
2 |
3 | import com.datastax.driver.core.PreparedStatement;
4 |
5 | import io.smartcat.migration.DataMigration;
6 | import io.smartcat.migration.exceptions.MigrationException;
7 |
8 | /**
9 | * Example of data migration used for populating data in table. This shows use case when you need to populate data in
10 | * table fast. Can be used for test data, logic can be added to add near production like data in huge amounts.
11 | */
12 | public class InsertBooksMigration extends DataMigration {
13 |
14 | public InsertBooksMigration(final int version) {
15 | super(version);
16 | }
17 |
18 | @Override
19 | public String getDescription() {
20 | return "Insert few books so we can show schema and data migration";
21 | }
22 |
23 | @Override
24 | public void execute() throws MigrationException {
25 | try {
26 | final PreparedStatement preparedStatement =
27 | session.prepare("INSERT INTO books (name, author) VALUES (?, ? );");
28 |
29 | session.execute(preparedStatement.bind("Journey to the Center of the Earth", "Jules Verne"));
30 | session.execute(preparedStatement.bind("Fifty Shades of Grey", "E. L. James"));
31 | session.execute(preparedStatement.bind("Clean Code", "Robert C. Martin"));
32 | } catch (final Exception e) {
33 | throw new MigrationException("Failed to execute InsertBooksMigration migration", e);
34 | }
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/MigrationResources.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import java.util.LinkedHashSet;
4 | import java.util.Set;
5 |
6 | /**
7 | * Class which holds set of migrations.
8 | */
9 | public class MigrationResources {
10 |
11 | private final Set migrations = new LinkedHashSet<>();
12 |
13 | /**
14 | * Add Migration object to migration collection.
15 | * @param migration Migration object
16 | */
17 | public void addMigration(final Migration migration) {
18 | this.migrations.add(migration);
19 | }
20 |
21 | /**
22 | * Add Migration object collection to migration collection (set is used as internal collection so no duplicates will
23 | * be added and order will be preserved meaning that if migration was in collection on position it will stay on that
24 | * position).
25 | * @param migrations Migration object list
26 | */
27 | public void addMigrations(final Set migrations) {
28 | this.migrations.addAll(migrations);
29 | }
30 |
31 | /**
32 | * Get all Migration objects sorted by order of insert.
33 | * @return Sorted list of Migration objects
34 | */
35 | public Set getMigrations() {
36 | return this.migrations;
37 | }
38 |
39 | /**
40 | * Get migration on particular position (position of inserting).
41 | * @param position of migration in collection
42 | * @return Migration on provided position
43 | */
44 | public Migration getMigration(final int position) {
45 | return (Migration) this.migrations.toArray()[position];
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/data/PopulateItemByNumberAndExternalIdMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.data;
2 |
3 | import java.util.List;
4 |
5 | import com.datastax.driver.core.PreparedStatement;
6 | import com.datastax.driver.core.Row;
7 | import com.datastax.driver.core.querybuilder.QueryBuilder;
8 |
9 | import io.smartcat.migration.DataMigration;
10 | import io.smartcat.migration.exceptions.MigrationException;
11 |
12 | public class PopulateItemByNumberAndExternalIdMigration extends DataMigration {
13 |
14 | public PopulateItemByNumberAndExternalIdMigration(final int version) {
15 | super(version);
16 | }
17 |
18 | @Override
19 | public String getDescription() {
20 | return "Create entry for each item in items by id table into new items by number and external id table";
21 | }
22 |
23 | @Override
24 | public void execute() throws MigrationException {
25 | try {
26 | final PreparedStatement preparedStatement =
27 | session.prepare(
28 | "INSERT INTO items_by_number_external_id (id, number, external_id) VALUES (?, ?, ?);");
29 |
30 | final List rows = session.execute(QueryBuilder.select().from("items_by_id").setFetchSize(1000)).all();
31 | for (Row row : rows) {
32 | session.execute(
33 | preparedStatement.bind(row.getUUID("id"), row.getString("number"), row.getUUID("external_id")));
34 | }
35 | } catch (final Exception e) {
36 | throw new MigrationException("Failed to execute PopulateItemByNumberAndExternalId migration", e);
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/MigrationTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static org.junit.Assert.assertFalse;
4 | import static org.junit.Assert.assertTrue;
5 |
6 | import org.junit.Test;
7 |
8 | public class MigrationTest {
9 |
10 | @Test
11 | public void test_equal_migrations() {
12 | final Migration migration1 = new MigrationTestImplementation(MigrationType.SCHEMA, 1);
13 | final Migration migration2 = new MigrationTestImplementation(MigrationType.SCHEMA, 1);
14 |
15 | assertTrue(migration1.equals(migration2));
16 | }
17 |
18 | @Test
19 | public void test_different_type_non_equal_migrations() {
20 | final Migration migration1 = new MigrationTestImplementation(MigrationType.DATA, 1);
21 | final Migration migration2 = new MigrationTestImplementation(MigrationType.SCHEMA, 1);
22 |
23 | assertFalse(migration1.equals(migration2));
24 | }
25 |
26 | @Test
27 | public void test_different_version_non_equal_migrations() {
28 | final Migration migration1 = new MigrationTestImplementation(MigrationType.SCHEMA, 1);
29 | final Migration migration2 = new MigrationTestImplementation(MigrationType.SCHEMA, 2);
30 |
31 | assertFalse(migration1.equals(migration2));
32 | }
33 |
34 | public class MigrationTestImplementation extends Migration {
35 |
36 | protected MigrationTestImplementation(final MigrationType type, final int version) {
37 | super(type, version);
38 | }
39 |
40 | @Override
41 | public String getDescription() {
42 | return "Test description";
43 | }
44 |
45 | @Override
46 | public void execute() {
47 |
48 | }
49 |
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/migrations/data/AddGenreMigration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration.migrations.data;
2 |
3 | import com.datastax.driver.core.BoundStatement;
4 | import com.datastax.driver.core.PreparedStatement;
5 | import com.datastax.driver.core.ResultSet;
6 | import com.datastax.driver.core.Row;
7 | import com.datastax.driver.core.Statement;
8 | import com.datastax.driver.core.querybuilder.QueryBuilder;
9 |
10 | import io.smartcat.migration.DataMigration;
11 | import io.smartcat.migration.exceptions.MigrationException;
12 |
13 | /**
14 | * Example of data migration which will go through all entries in DB and for each add genre. Real life example which
15 | * covers this case is adding of new column to DB and need to populate it with some data for already existing entries
16 | */
17 | public class AddGenreMigration extends DataMigration {
18 | public AddGenreMigration(final int version) {
19 | super(version);
20 | }
21 |
22 | @Override
23 | public String getDescription() {
24 | return "Insert genre for each book in database";
25 | }
26 |
27 | @Override
28 | public void execute() throws MigrationException {
29 | try {
30 | addGenreToBooks();
31 | } catch (final Exception e) {
32 | throw new MigrationException("Failed to execute InsertBooksMigration migration", e);
33 | }
34 | }
35 |
36 | private void addGenreToBooks() {
37 | final Statement select = QueryBuilder.select().all().from("books");
38 | final ResultSet results = this.session.execute(select);
39 |
40 | final PreparedStatement updateBookGenreStatement =
41 | session.prepare("UPDATE books SET genre = ? WHERE name = ? AND author = ?;");
42 |
43 | for (final Row row : results) {
44 | final String name = row.getString("name");
45 | final String author = row.getString("author");
46 |
47 | BoundStatement update;
48 |
49 | if (name.equals("Journey to the Center of the Earth")) {
50 | update = updateBookGenreStatement.bind("fantasy", name, author);
51 | } else if (name.equals("Fifty Shades of Grey")) {
52 | update = updateBookGenreStatement.bind("erotica", name, author);
53 | } else {
54 | update = updateBookGenreStatement.bind("programming", name, author);
55 | }
56 |
57 | session.execute(update);
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/CassandraVersionerTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static io.smartcat.migration.MigrationType.SCHEMA;
4 | import static org.hamcrest.CoreMatchers.is;
5 | import static org.junit.Assert.assertThat;
6 | import static org.mockito.Mockito.mock;
7 | import static org.mockito.Mockito.when;
8 |
9 | import org.junit.Before;
10 | import org.junit.Test;
11 | import org.mockito.Mockito;
12 | import org.mockito.stubbing.OngoingStubbing;
13 |
14 | import com.datastax.driver.core.ResultSet;
15 | import com.datastax.driver.core.Row;
16 | import com.datastax.driver.core.Session;
17 | import com.datastax.driver.core.Statement;
18 |
19 | import io.smartcat.migration.migrations.schema.AddBookGenreFieldMigration;
20 |
21 | public class CassandraVersionerTest {
22 | private CassandraVersioner versioner;
23 | private Session session;
24 | private ResultSet versionResultSet;
25 |
26 | @Before
27 | public void setUp() throws Exception {
28 | session = mock(Session.class);
29 | versioner = new CassandraVersioner(session);
30 | versionResultSet = mock(ResultSet.class);
31 | }
32 |
33 | @Test
34 | public void whenSchemaVersionTableIsEmptyThenCurrentVersionShouldBe0() throws Exception {
35 | expectRetrieveEmptyCurrentVersion();
36 |
37 | int currentVersion = versioner.getCurrentVersion(SCHEMA);
38 |
39 | assertThat(currentVersion, is(0));
40 | }
41 |
42 | @Test
43 | public void whenSchemaVersionTableIsNotEmptyThenCurrentVersionShouldBeRetrievedFromTheTable() throws Exception {
44 | int expectedVersion = 1;
45 |
46 | expectRetrieveCurrentVersion(expectedVersion);
47 |
48 | int currentVersion = versioner.getCurrentVersion(SCHEMA);
49 |
50 | assertThat(currentVersion, is(expectedVersion));
51 | }
52 |
53 | @Test
54 | public void updateVersionSucess() throws Exception {
55 | versioner.updateVersion(new AddBookGenreFieldMigration(1));
56 | }
57 |
58 | private void expectRetrieveEmptyCurrentVersion() {
59 | expectRetrieveVersionResultSetWithRow(null);
60 | }
61 |
62 | private void expectRetrieveCurrentVersion(int expectedVersion) {
63 | Row row = expectRowWithVersion(expectedVersion);
64 | expectRetrieveVersionResultSetWithRow(row);
65 | }
66 |
67 | private void expectRetrieveVersionResultSetWithRow(Row row) {
68 | whenSessionExecuteQuery().thenReturn(versionResultSet);
69 | whenRetrieveRowFromVersionResultSet().thenReturn(row);
70 | }
71 |
72 | private Row expectRowWithVersion(int version) {
73 | Row row = mock(Row.class);
74 | when(row.getInt("version")).thenReturn(version);
75 | return row;
76 | }
77 |
78 | private OngoingStubbing whenSessionExecuteQuery() {
79 | return when(session.execute(Mockito.any(Statement.class)));
80 | }
81 |
82 | private OngoingStubbing whenRetrieveRowFromVersionResultSet() {
83 | return when(versionResultSet.one());
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/MigrationEngineItemsTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static junit.framework.Assert.assertEquals;
4 |
5 | import com.datastax.driver.core.*;
6 | import com.datastax.driver.core.querybuilder.QueryBuilder;
7 | import io.smartcat.migration.migrations.data.InsertInitialItemsMigration;
8 | import io.smartcat.migration.migrations.data.PopulateItemByNumberAndExternalIdMigration;
9 | import io.smartcat.migration.migrations.schema.CreateItemByNumberAndExternalIdMigration;
10 | import org.cassandraunit.CQLDataLoader;
11 | import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
12 | import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
13 | import org.junit.After;
14 | import org.junit.AfterClass;
15 | import org.junit.BeforeClass;
16 | import org.junit.Test;
17 | import org.slf4j.Logger;
18 | import org.slf4j.LoggerFactory;
19 |
20 | import java.util.List;
21 |
22 | public class MigrationEngineItemsTest extends BaseTest {
23 |
24 | private static final Logger LOGGER = LoggerFactory.getLogger(MigrationEngineItemsTest.class);
25 |
26 | private static final String CONTACT_POINT = "localhost";
27 | private static final int PORT = 9142;
28 | private static final String KEYSPACE = "migration_test_items";
29 | private static final String CQL = "items.cql";
30 |
31 | private static Session session;
32 | private static Cluster cluster;
33 |
34 | @BeforeClass
35 | public static void init() throws Exception {
36 | LOGGER.info("Starting embedded cassandra server");
37 | EmbeddedCassandraServerHelper.startEmbeddedCassandra("another-cassandra.yaml");
38 |
39 | LOGGER.info("Connect to embedded db");
40 | cluster = Cluster.builder().addContactPoints(CONTACT_POINT).withPort(PORT).build();
41 | session = cluster.connect();
42 |
43 | LOGGER.info("Initialize keyspace");
44 | final CQLDataLoader cqlDataLoader = new CQLDataLoader(session);
45 | cqlDataLoader.load(new ClassPathCQLDataSet(CQL, false, true, KEYSPACE));
46 | }
47 |
48 | @After
49 | public void cleanUp() {
50 | truncateTables(KEYSPACE, session);
51 | }
52 |
53 | @AfterClass
54 | public static void tearDown() {
55 | if (cluster != null) {
56 | cluster.close();
57 | cluster = null;
58 | }
59 | }
60 |
61 | @Test
62 | public void initial_insert_test() {
63 | final int count = 100;
64 |
65 | final MigrationResources resources = new MigrationResources();
66 | resources.addMigration(new InsertInitialItemsMigration(count, 1));
67 | final boolean result = MigrationEngine.withSession(session).migrate(resources);
68 |
69 | assertEquals(true, result);
70 |
71 | final List rows = session.execute(QueryBuilder.select().from("items_by_id")).all();
72 | assertEquals(count, rows.size());
73 | }
74 |
75 | @Test
76 | public void test_migrations() {
77 | final int count = 100;
78 |
79 | final MigrationResources resources = new MigrationResources();
80 | resources.addMigration(new InsertInitialItemsMigration(count, 1));
81 | resources.addMigration(new CreateItemByNumberAndExternalIdMigration(1));
82 | resources.addMigration(new PopulateItemByNumberAndExternalIdMigration(2));
83 | final boolean result = MigrationEngine.withSession(session).migrate(resources);
84 |
85 | assertEquals(true, result);
86 |
87 | final List rows = session.execute(QueryBuilder.select().from("items_by_number_external_id")).all();
88 | assertEquals(count, rows.size());
89 | }
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/CassandraVersioner.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import com.datastax.driver.core.ConsistencyLevel;
7 | import com.datastax.driver.core.ResultSet;
8 | import com.datastax.driver.core.Row;
9 | import com.datastax.driver.core.Session;
10 | import com.datastax.driver.core.Statement;
11 | import com.datastax.driver.core.querybuilder.QueryBuilder;
12 |
13 | /**
14 | * Class responsible for version management.
15 | */
16 | public class CassandraVersioner {
17 |
18 | private static final Logger LOGGER = LoggerFactory.getLogger(CassandraVersioner.class);
19 |
20 | private static final String SCHEMA_VERSION_CF = "schema_version";
21 | private static final String TYPE = "type";
22 | private static final String VERSION = "version";
23 | private static final String TIMESTAMP = "ts";
24 | private static final String DESCRIPTION = "description";
25 |
26 | private static final String CREATE_SCHEMA_VERSION_CQL = String.format("CREATE TABLE IF NOT EXISTS %s (",
27 | SCHEMA_VERSION_CF)
28 | + String.format("%s text,", TYPE)
29 | + String.format("%s int,", VERSION)
30 | + String.format("%s bigint,", TIMESTAMP)
31 | + String.format("%s text,", DESCRIPTION)
32 | + String.format("PRIMARY KEY (%s, %s)", TYPE, VERSION)
33 | + String.format(") WITH CLUSTERING ORDER BY (%s DESC)", VERSION) + " AND COMMENT='Schema version';";
34 |
35 | private final Session session;
36 |
37 | /**
38 | * Create Cassandra versioner for active session.
39 | * @param session Active Cassandra session
40 | */
41 | public CassandraVersioner(final Session session) {
42 | this.session = session;
43 |
44 | createSchemaVersion();
45 | }
46 |
47 | private void createSchemaVersion() {
48 | LOGGER.debug("Try to create schema version column family");
49 | this.session.execute(CREATE_SCHEMA_VERSION_CQL);
50 | }
51 |
52 | /**
53 | * Get current database version for given migration type with ALL consistency. Select one row since
54 | * migration history is saved ordered descending by timestamp. If there are no rows in the schema_version table,
55 | * return 0 as default database version. Data version is changed by executing migrations.
56 | *
57 | * @param type Migration type
58 | * @return Database version for given type
59 | */
60 | public int getCurrentVersion(final MigrationType type) {
61 | final Statement select = QueryBuilder.select().all().from(SCHEMA_VERSION_CF)
62 | .where(QueryBuilder.eq(TYPE, type.name())).limit(1).setConsistencyLevel(ConsistencyLevel.ALL);
63 | final ResultSet result = session.execute(select);
64 |
65 | final Row row = result.one();
66 | return row == null ? 0 : row.getInt(VERSION);
67 | }
68 |
69 | /**
70 | * Update current database version to the migration version. This is executed after migration success.
71 | *
72 | * @param migration Migration that updated the database version
73 | * @return Success of version update
74 | */
75 | public boolean updateVersion(final Migration migration) {
76 | final Statement insert = QueryBuilder.insertInto(SCHEMA_VERSION_CF).value(TYPE, migration.getType().name())
77 | .value(VERSION, migration.getVersion()).value(TIMESTAMP, System.currentTimeMillis())
78 | .value(DESCRIPTION, migration.getDescription()).setConsistencyLevel(ConsistencyLevel.ALL);
79 |
80 | try {
81 | session.execute(insert);
82 | return true;
83 | } catch (final Exception e) {
84 | LOGGER.error("Failed to execute update version statement", e);
85 | return false;
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/MigrationEngineBooksTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static junit.framework.Assert.assertEquals;
4 |
5 | import org.cassandraunit.CQLDataLoader;
6 | import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
7 | import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
8 | import org.junit.AfterClass;
9 | import org.junit.BeforeClass;
10 | import org.junit.Test;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | import com.datastax.driver.core.Cluster;
15 | import com.datastax.driver.core.ResultSet;
16 | import com.datastax.driver.core.Row;
17 | import com.datastax.driver.core.Session;
18 | import com.datastax.driver.core.Statement;
19 | import com.datastax.driver.core.querybuilder.QueryBuilder;
20 |
21 | import io.smartcat.migration.migrations.data.AddGenreMigration;
22 | import io.smartcat.migration.migrations.data.InsertBooksMigration;
23 | import io.smartcat.migration.migrations.schema.AddBookGenreFieldMigration;
24 |
25 | public class MigrationEngineBooksTest extends BaseTest {
26 |
27 | private static final Logger LOGGER = LoggerFactory.getLogger(MigrationEngineBooksTest.class);
28 |
29 | private static final String CONTACT_POINT = "localhost";
30 | private static final int PORT = 9142;
31 | private static final String KEYSPACE = "migration_test_books";
32 | private static final String CQL = "books.cql";
33 |
34 | private static Session session;
35 | private static Cluster cluster;
36 |
37 | @BeforeClass
38 | public static void init() throws Exception {
39 | LOGGER.info("Starting embedded cassandra server");
40 | EmbeddedCassandraServerHelper.startEmbeddedCassandra("another-cassandra.yaml");
41 |
42 | LOGGER.info("Connect to embedded db");
43 | cluster = Cluster.builder().addContactPoints(CONTACT_POINT).withPort(PORT).build();
44 | session = cluster.connect();
45 |
46 | LOGGER.info("Initialize keyspace");
47 | final CQLDataLoader cqlDataLoader = new CQLDataLoader(session);
48 | cqlDataLoader.load(new ClassPathCQLDataSet(CQL, false, true, KEYSPACE));
49 | }
50 |
51 | @AfterClass
52 | public static void tearDown() {
53 | if (cluster != null) {
54 | cluster.close();
55 | cluster = null;
56 | }
57 | }
58 |
59 | @Test
60 | public void test_schema_migration() {
61 | final MigrationResources resources = new MigrationResources();
62 | resources.addMigration(new AddBookGenreFieldMigration(1));
63 | final boolean result = MigrationEngine.withSession(session).migrate(resources);
64 |
65 | assertEquals(true, result);
66 | }
67 |
68 | @Test
69 | public void test_data_migration() {
70 | final MigrationResources initialResources = new MigrationResources();
71 | initialResources.addMigration(new InsertBooksMigration(1));
72 | MigrationEngine.withSession(session).migrate(initialResources);
73 |
74 | final MigrationResources resources = new MigrationResources();
75 | resources.addMigration(new AddBookGenreFieldMigration(1));
76 | resources.addMigration(new AddGenreMigration(2));
77 | MigrationEngine.withSession(session).migrate(resources);
78 |
79 | final Statement select = QueryBuilder.select().all().from("books");
80 | final ResultSet results = session.execute(select);
81 |
82 | for (final Row row : results) {
83 | final String genre = row.getString("genre");
84 | final String name = row.getString("name");
85 |
86 | if (name.equals("Journey to the Center of the Earth")) {
87 | assertEquals("fantasy", genre);
88 | } else if (name.equals("Fifty Shades of Grey")) {
89 | assertEquals("erotica", genre);
90 | } else {
91 | assertEquals("programming", genre);
92 | }
93 | }
94 | }
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/checkstyle.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
70 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/MigrationEngine.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import io.smartcat.migration.exceptions.MigrationException;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 |
7 | import com.datastax.driver.core.Session;
8 |
9 | /**
10 | * Migration engine wraps Migrator and provides DSL like API.
11 | *
12 | */
13 | public class MigrationEngine {
14 |
15 | private static final Logger LOGGER = LoggerFactory.getLogger(MigrationEngine.class);
16 |
17 | private MigrationEngine() {
18 | }
19 |
20 | /**
21 | * Create migrator out of session fully prepared for doing migration of resources.
22 | * @param session Datastax driver session object
23 | * @return migrator instance with versioner and session which can migrate resources
24 | */
25 | public static Migrator withSession(final Session session) {
26 | return new Migrator(session);
27 | }
28 |
29 | /**
30 | * Migrator handles migrations and errors.
31 | */
32 | public static class Migrator {
33 | private final Session session;
34 | private final CassandraVersioner versioner;
35 |
36 | /**
37 | * Create new Migrator with active Cassandra session.
38 | * @param session Active Cassandra session
39 | */
40 | public Migrator(final Session session) {
41 | this.session = session;
42 | this.versioner = new CassandraVersioner(session);
43 | }
44 |
45 | /**
46 | * Method that executes all migration from migration resources that are higher version than db version. If
47 | * migration fails, method will exit.
48 | *
49 | * @param resources Collection of migrations to be executed
50 | * @return Success of migration
51 | */
52 | public boolean migrate(final MigrationResources resources) {
53 | LOGGER.debug("Start migration");
54 |
55 | for (final Migration migration : resources.getMigrations()) {
56 | final MigrationType type = migration.getType();
57 | final int migrationVersion = migration.getVersion();
58 | final int version = versioner.getCurrentVersion(type);
59 |
60 | LOGGER.info("Db is version {} for type {}.", version, type.name());
61 | LOGGER.info("Compare {} migration version {} with description {}", type.name(), migrationVersion,
62 | migration.getDescription());
63 |
64 | if (migrationVersion <= version) {
65 | LOGGER.warn("Skipping migration [{}] with version {} since db is on higher version {}.",
66 | migration.getDescription(), migrationVersion, version);
67 | continue;
68 | }
69 |
70 | migration.setSession(session);
71 |
72 | final long start = System.currentTimeMillis();
73 | LOGGER.info("Start executing migration to version {}.", migrationVersion);
74 |
75 | try {
76 | migration.execute();
77 | } catch (final MigrationException e) {
78 | LOGGER.error("Failed to execute migration version {}, exception {}!", migrationVersion,
79 | e.getMessage());
80 | LOGGER.debug("Exception stack trace: {}", e);
81 | return false;
82 | }
83 |
84 | final long end = System.currentTimeMillis();
85 | final long seconds = (end - start) / 1000;
86 | LOGGER.info("Migration [{}] to version {} finished in {} seconds.", migration.getDescription(),
87 | migrationVersion, seconds);
88 |
89 | if (!versioner.updateVersion(migration)) {
90 | LOGGER.error("Db schema update failed for migration version {}!", migrationVersion);
91 | return false;
92 | }
93 | }
94 |
95 | return true;
96 | }
97 | }
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/MigrationResourcesTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static org.junit.Assert.assertEquals;
4 |
5 | import org.junit.Test;
6 |
7 | public class MigrationResourcesTest {
8 |
9 | @Test
10 | public void initialization_test() {
11 | final MigrationResources resources = new MigrationResources();
12 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 1));
13 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 2));
14 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 3));
15 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 4));
16 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 5));
17 |
18 | assertEquals(5, resources.getMigrations().size());
19 | assertEquals(3, resources.getMigration(2).getVersion());
20 | }
21 |
22 | @Test
23 | public void mixed_types_test() {
24 | final MigrationResources resources = new MigrationResources();
25 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 1));
26 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 2));
27 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 3));
28 | resources.addMigration(new MigrationTestImplementation(MigrationType.DATA, 1));
29 | resources.addMigration(new MigrationTestImplementation(MigrationType.DATA, 2));
30 | resources.addMigration(new MigrationTestImplementation(MigrationType.DATA, 3));
31 |
32 | resources.getMigrations();
33 | assertEquals(6, resources.getMigrations().size());
34 | assertEquals(3, resources.getMigration(2).getVersion());
35 | assertEquals(MigrationType.SCHEMA, resources.getMigration(2).getType());
36 | assertEquals(3, resources.getMigration(5).getVersion());
37 | assertEquals(MigrationType.DATA, resources.getMigration(5).getType());
38 | }
39 |
40 | @Test
41 | public void test_inserting_same_migration_ignored() {
42 | final MigrationResources resources = new MigrationResources();
43 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 1));
44 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 2));
45 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 3));
46 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 5));
47 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 3));
48 |
49 | assertEquals(4, resources.getMigrations().size());
50 | assertEquals(1, resources.getMigration(0).getVersion());
51 | assertEquals(2, resources.getMigration(1).getVersion());
52 | assertEquals(3, resources.getMigration(2).getVersion());
53 | assertEquals(5, resources.getMigration(3).getVersion());
54 | }
55 |
56 | @Test
57 | public void mixed_inserts_order() {
58 | final MigrationResources resources = new MigrationResources();
59 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 1));
60 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 3));
61 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 5));
62 |
63 | assertEquals(3, resources.getMigrations().size());
64 |
65 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 2));
66 | resources.addMigration(new MigrationTestImplementation(MigrationType.SCHEMA, 4));
67 |
68 | assertEquals(5, resources.getMigrations().size());
69 | assertEquals(1, resources.getMigration(0).getVersion());
70 | assertEquals(3, resources.getMigration(1).getVersion());
71 | assertEquals(5, resources.getMigration(2).getVersion());
72 | assertEquals(2, resources.getMigration(3).getVersion());
73 | assertEquals(4, resources.getMigration(4).getVersion());
74 | }
75 |
76 | public class MigrationTestImplementation extends Migration {
77 |
78 | protected MigrationTestImplementation(final MigrationType type, final int version) {
79 | super(type, version);
80 | }
81 |
82 | @Override
83 | public String getDescription() {
84 | return "Test description";
85 | }
86 |
87 | @Override
88 | public void execute() {
89 |
90 | }
91 |
92 | }
93 |
94 | }
95 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cassandra-migration-tool-java
2 | [](https://travis-ci.org/smartcat-labs/cassandra-migration-tool-java)
3 |
4 | Cassandra migration tool for java is a lightweight tool used to execute schema and data migration on Cassandra database. Schema versioning state is saved in `schema_version` table which stores name and description of migration along with type and timestamp.
5 |
6 | There are two types of migrations:
7 |
8 | 1. SCHEMA migrations, which alter database schema (add or remove column, change type of column, add table, etc)
9 | 2. DATA migrations, which alter data (update, read from one table to another, calculate new fields based on existing data, etc).
10 |
11 | # Why we did it
12 |
13 | The idea behind this project was born while working on a project with live data. Since the development required changes to the data model we had to figure out a way we can update the database schema while keeping the data. We ended up with a lightweight tool that versions database and enables us to change schema and transform data when and how we want (as a part of a build and deploy cycle or application itself). Since these changes are part of the codebase we could also test them before executing on a live cluster.
14 |
15 |
16 | # Versioning
17 |
18 | We are going to follow the official DataStax Java driver versions and build our driver for each major, minor and patch version of the driver. We chose this path because of the simplicity to develop, test and deploy for different versions of the driver.
19 | This means that if you are using the driver version 2.1.9 you can add this dependency to your project:
20 | ```xml
21 |
22 | io.smartcat
23 | cassandra-migration-tool
24 | 2.1.9.0
25 |
26 | ```
27 | or whatever the latest build version of the migration tool is. Check the version at the [Maven repository](http://mvnrepository.com/artifact/io.smartcat/cassandra-migration-tool).
28 |
29 | # Examples
30 | We have two test cases which explain common problems which migration tool can solve. This is only subset of use cases but we think these are most frequent once:
31 |
32 | **First use case** touches problem of adding new field and populating historic data with value. Cassandra does not have DDL `default` attribute, so you must populate data on application level. [MigrationEngineBooksTest](src/test/java/io/smartcat/migration/MigrationEngineBooksTest.java) can serve as an example of that use case which this tool can cover.
33 |
34 | The initial table is simple (can be found in [books.cql file](src/test/resources/books.cql)) and we use migration classes to do the following:
35 |
36 | 1. Populate data initially with first `data` migration [InsertBooksMigration](src/test/java/io/smartcat/migration/migrations/data/InsertBooksMigration.java)
37 | 2. Add `genre` column with `schema` migration [AddBookGenreFieldMigration](src/test/java/io/smartcat/migration/migrations/schema/AddBookGenreFieldMigration.java)
38 | 3. Populate `genre` column with second `data` migration [AddGenreMigration](src/test/java/io/smartcat/migration/migrations/data/AddGenreMigration.java)
39 |
40 | **Second use case** touches problem of query based modeling. Cassandra has good performance because you model your data as you will query it. Often after initial modeling you have request to read it based on different criteria. In Cassandra you do this with another table which is optimized for new requirements. You need to populate this new table with existing data and you can solve this with migration tool. [MigrationEngineItemsTest](src/test/java/io/smartcat/migration/MigrationEngineItemsTest.java) can serve as an example of that use case which this tool can cover.
41 |
42 | The initial table is simple (can be found in [items.cql file](src/test/resources/items.cql)) and we use migration classes to do the following:
43 |
44 | 1. Populate data initially with first `data` migration [InsertInitialItemsMigration](src/test/java/io/smartcat/migration/migrations/data/InsertInitialItemsMigration.java)
45 | 2. Add `items_by_number_external_id` table with `schema` migration [CreateItemByNumberAndExternalIdMigration](src/test/java/io/smartcat/migration/migrations/schema/CreateItemByNumberAndExternalIdMigration.java)
46 | 3. Populate `items_by_number_external_id` table with second `data` migration [PopulateItemByNumberAndExternalIdMigration](src/test/java/io/smartcat/migration/migrations/data/PopulateItemByNumberAndExternalIdMigration.java)
47 |
48 | # Schema agreement
49 | When executing schema migrations it is necessary to wait for cluster to propagate schema on all nodes. Schema agreement is implemented based on this [fix](https://datastax-oss.atlassian.net/browse/JAVA-669) and is exposed through [Migration](src/main/java/io/smartcat/migration/Migration.java) abstract class.
50 | To execute a statement with schema agreement you can use `executeWithSchemaAgreement` method.
51 |
--------------------------------------------------------------------------------
/src/main/java/io/smartcat/migration/Migration.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import com.datastax.driver.core.ResultSet;
4 | import com.datastax.driver.core.Session;
5 | import com.datastax.driver.core.Statement;
6 |
7 | import io.smartcat.migration.exceptions.MigrationException;
8 | import io.smartcat.migration.exceptions.SchemaAgreementException;
9 |
10 | /**
11 | * Abstract migration class that implements session DI and exposes required methods for execution.
12 | */
13 | public abstract class Migration {
14 |
15 | private int version = -1;
16 | private MigrationType type = MigrationType.SCHEMA;
17 |
18 | /**
19 | * Active Cassandra session.
20 | */
21 | protected Session session;
22 |
23 | /**
24 | * Create new migration with provided type and version.
25 | * @param type Migration type (SCHEMA or DATA)
26 | * @param version Migration version
27 | */
28 | public Migration(final MigrationType type, final int version) {
29 | this.type = type;
30 | this.version = version;
31 | }
32 |
33 | /**
34 | * Enables session injection into migration class.
35 | * @param session Session object
36 | */
37 | public void setSession(final Session session) {
38 | this.session = session;
39 | }
40 |
41 | /**
42 | * Returns migration type (schema or data).
43 | * @return Migration type
44 | */
45 | public MigrationType getType() {
46 | return this.type;
47 | }
48 |
49 | /**
50 | * Returns resulting database schema version of this migration.
51 | * @return Resulting db schema version
52 | */
53 | public int getVersion() {
54 | return this.version;
55 | }
56 |
57 | /**
58 | * Returns migration description (for history purposes).
59 | * @return migration description.
60 | */
61 | public abstract String getDescription();
62 |
63 | /**
64 | * Executes migration implementation.
65 | * @throws MigrationException exception
66 | */
67 | public abstract void execute() throws MigrationException;
68 |
69 | /**
70 | * Execute provided statement and checks if the schema migration has been propagated
71 | * to all nodes in the cluster. Use this method when executing schema migrations.
72 | * @param statement Statement to be executed
73 | * @throws SchemaAgreementException exception
74 | */
75 | protected void executeWithSchemaAgreement(Statement statement)
76 | throws SchemaAgreementException {
77 | ResultSet result = this.session.execute(statement);
78 | if (checkSchemaAgreement(result)) {
79 | return;
80 | }
81 | if (checkClusterSchemaAgreement()) {
82 | return;
83 | }
84 |
85 | throw new SchemaAgreementException(
86 | "Failed to propagate schema update to all nodes (schema agreement error)");
87 | }
88 |
89 | /**
90 | * Whether the cluster had reached schema agreement after the execution of this query.
91 | *
92 | * After a successful schema-altering query (ex: creating a table), the driver will check if the cluster's nodes
93 | * agree on the new schema version. If not, it will keep retrying for a given delay (configurable via
94 | * {@link com.datastax.driver.core.Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)}).
95 | *
96 | * If this method returns {@code false}, clients can call
97 | * {@link com.datastax.driver.core.Metadata#checkSchemaAgreement()} later to perform the check manually.
98 | *
99 | * Note that the schema agreement check is only performed for schema-altering queries For other query types, this
100 | * method will always return {@code true}.
101 | *
102 | * @param resultSet Statement execution ResultSet
103 | * @return whether the cluster reached schema agreement, or {@code true} for a non schema-altering statement.
104 | */
105 | protected boolean checkSchemaAgreement(ResultSet resultSet) {
106 | return resultSet.getExecutionInfo().isSchemaInAgreement();
107 | }
108 |
109 | /**
110 | * Checks whether hosts that are currently up agree on the schema definition.
111 | *
112 | * This method performs a one-time check only, without any form of retry; therefore
113 | * {@link com.datastax.driver.core.Cluster.Builder#withMaxSchemaAgreementWaitSeconds(int)}
114 | * does not apply in this case.
115 | *
116 | * @return {@code true} if all hosts agree on the schema; {@code false} if
117 | * they don't agree, or if the check could not be performed
118 | * (for example, if the control connection is down).
119 | */
120 | protected boolean checkClusterSchemaAgreement() {
121 | return this.session.getCluster().getMetadata().checkSchemaAgreement();
122 | }
123 |
124 | @Override
125 | public int hashCode() {
126 | final int prime = 31;
127 | int result = 1;
128 | result = prime * result + ((type == null) ? 0 : type.hashCode());
129 | result = prime * result + version;
130 | return result;
131 | }
132 |
133 | @Override
134 | public boolean equals(Object obj) {
135 | if (this == obj)
136 | return true;
137 | if (obj == null)
138 | return false;
139 | if (getClass() != obj.getClass())
140 | return false;
141 | Migration other = (Migration) obj;
142 | if (type != other.type)
143 | return false;
144 | if (version != other.version)
145 | return false;
146 | return true;
147 | }
148 |
149 | }
150 |
--------------------------------------------------------------------------------
/src/test/java/io/smartcat/migration/MigratorTest.java:
--------------------------------------------------------------------------------
1 | package io.smartcat.migration;
2 |
3 | import static org.hamcrest.CoreMatchers.is;
4 | import static org.hamcrest.CoreMatchers.not;
5 | import static org.junit.Assert.assertFalse;
6 | import static org.junit.Assert.assertThat;
7 | import static org.junit.Assert.assertTrue;
8 |
9 | import org.cassandraunit.CQLDataLoader;
10 | import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
11 | import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
12 | import org.junit.AfterClass;
13 | import org.junit.Before;
14 | import org.junit.BeforeClass;
15 | import org.junit.Test;
16 | import org.slf4j.Logger;
17 | import org.slf4j.LoggerFactory;
18 |
19 | import com.datastax.driver.core.Cluster;
20 | import com.datastax.driver.core.Session;
21 |
22 | import io.smartcat.migration.MigrationEngine.Migrator;
23 | import io.smartcat.migration.migrations.schema.AddBookGenreFieldMigration;
24 | import io.smartcat.migration.migrations.schema.AddBookISBNFieldMigration;
25 |
26 | public class MigratorTest extends BaseTest {
27 | private static final Logger LOGGER = LoggerFactory.getLogger(MigratorTest.class);
28 |
29 | private static final String CONTACT_POINT = "localhost";
30 | private static final int PORT = 9142;
31 | private static final String KEYSPACE = "migration_test_books";
32 | private static final String CQL = "books.cql";
33 |
34 | private static Session session;
35 | private static Cluster cluster;
36 |
37 | private CassandraVersioner versioner;
38 | private Migrator migrator;
39 |
40 | private CassandraMetadataAnalyzer metadataAnalyzer;
41 |
42 | @BeforeClass
43 | public static void init() throws Exception {
44 | LOGGER.info("Starting embedded cassandra server");
45 | EmbeddedCassandraServerHelper.startEmbeddedCassandra("another-cassandra.yaml");
46 |
47 | LOGGER.info("Connect to embedded db");
48 | cluster = Cluster.builder().addContactPoints(CONTACT_POINT).withPort(PORT).build();
49 | session = cluster.connect();
50 | }
51 |
52 | @Before
53 | public void setUp() throws Exception {
54 | LOGGER.info("Initialize keyspace");
55 | final CQLDataLoader cqlDataLoader = new CQLDataLoader(session);
56 | cqlDataLoader.load(new ClassPathCQLDataSet(CQL, false, true, KEYSPACE));
57 |
58 | versioner = new CassandraVersioner(session);
59 | migrator = MigrationEngine.withSession(session);
60 | metadataAnalyzer = new CassandraMetadataAnalyzer(session);
61 | }
62 |
63 | @AfterClass
64 | public static void tearDown() {
65 | if (cluster != null) {
66 | cluster.close();
67 | cluster = null;
68 | }
69 | }
70 |
71 | @Test
72 | public void executeOneMigration() throws Exception {
73 | assertTableDoesntContainsColumns("books", "genre");
74 |
75 | final MigrationResources resources = new MigrationResources();
76 | resources.addMigration(new AddBookGenreFieldMigration(1));
77 |
78 | migrator.migrate(resources);
79 |
80 | assertTableContainsColumns("books", "genre");
81 | }
82 |
83 | @Test
84 | public void executeTwoMigrations() throws Exception {
85 | assertTableDoesntContainsColumns("books", "genre", "isbn");
86 |
87 | final MigrationResources resources = new MigrationResources();
88 | resources.addMigration(new AddBookGenreFieldMigration(1));
89 | resources.addMigration(new AddBookISBNFieldMigration(2));
90 |
91 | migrator.migrate(resources);
92 |
93 | assertTableContainsColumns("books", "genre", "isbn");
94 | }
95 |
96 | @Test
97 | public void updateVersionAfterMigration() throws Exception {
98 | int versionBeforeMigration = getCurrentVersion();
99 |
100 | Migration migration = new AddBookGenreFieldMigration(1);
101 | final MigrationResources resources = new MigrationResources();
102 | resources.addMigration(migration);
103 |
104 | migrator.migrate(resources);
105 |
106 | // verify
107 | assertThat(getCurrentVersion(), not(is(versionBeforeMigration)));
108 | assertThat(getCurrentVersion(), is(migration.getVersion()));
109 | }
110 |
111 | @Test
112 | public void skipMigrationWithVersionOlderThanCurrentSchemaVersion() throws Exception {
113 | Migration migrationWithNewerVersion = new AddBookGenreFieldMigration(2);
114 | Migration migrationWithOlderVersion = new AddBookISBNFieldMigration(1);
115 |
116 | final MigrationResources resources = new MigrationResources();
117 | resources.addMigration(migrationWithNewerVersion);
118 | resources.addMigration(migrationWithOlderVersion);
119 |
120 | migrator.migrate(resources);
121 |
122 | // verify
123 | assertThat(getCurrentVersion(), is(migrationWithNewerVersion.getVersion()));
124 | assertTableDoesntContainsColumns("books", "isbn");
125 | }
126 |
127 | @Test
128 | public void skipMigrationWithSameVersionThanCurrentSchemaVersion() throws Exception {
129 | int versionBeforeMigration = getCurrentVersion();
130 |
131 | final MigrationResources resources = new MigrationResources();
132 | resources.addMigration(new AddBookGenreFieldMigration(versionBeforeMigration));
133 |
134 | migrator.migrate(resources);
135 |
136 | // verify
137 | assertThat(getCurrentVersion(), is(versionBeforeMigration));
138 | }
139 |
140 | private int getCurrentVersion() {
141 | return versioner.getCurrentVersion(MigrationType.SCHEMA);
142 | }
143 |
144 | private void assertTableDoesntContainsColumns(String table, String... columns) {
145 | for (String column : columns) {
146 | assertFalse(metadataAnalyzer.columnExistInTable(column, table));
147 | }
148 | }
149 |
150 | private void assertTableContainsColumns(String table, String... columns) {
151 | for (String column : columns) {
152 | assertTrue(metadataAnalyzer.columnExistInTable(column, table));
153 | }
154 | }
155 |
156 | }
157 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
3 |
4 | 4.0.0
5 | io.smartcat
6 | cassandra-migration-tool
7 | 3.1.0.1-SNAPSHOT
8 | jar
9 |
10 | cassandra-migration-tool
11 | Cassandra schema migration tool
12 | https://github.com/smartcat-labs/cassandra-migration-tool-java
13 |
14 |
15 |
16 | Apache License, Version 2.0
17 | http://www.apache.org/licenses/LICENSE-2.0.txt
18 | repo
19 |
20 |
21 |
22 |
23 | github.com
24 | https://github.com/smartcat-labs/cassandra-migration-tool-java/issues
25 |
26 |
27 |
28 | scm:git:git@github.com:smartcat-labs/cassandra-migration-tool-java.git
29 | scm:git:git@github.com:smartcat-labs/cassandra-migration-tool-java.git
30 | https://github.com/smartcat-labs/cassandra-migration-tool-java
31 | HEAD
32 |
33 |
34 |
35 |
36 | Nenad Bozic
37 | nenad.bozic@smartcat.io
38 | SmartCat
39 | http://www.smartcat.io
40 |
41 |
42 | Matija Gobec
43 | matija.gobec@smartcat.io
44 | SmartCat
45 | http://www.smartcat.io
46 |
47 |
48 | Nikola Ivancevic
49 | nikola.ivancevic@smartcat.io
50 | SmartCat
51 | http://www.smartcat.io
52 |
53 |
54 |
55 |
56 | UTF-8
57 | 1.8
58 | 1.8
59 | 3.1.0
60 | 3.0.0.1
61 | 18.0
62 | 1.7.7
63 | 4.12
64 | 1.10.19
65 | 2.6
66 | 2.7
67 | 2.5.2
68 | 2.19.1
69 | 3.5.1
70 | 2.8.2
71 | 3.0.0
72 | 2.10.3
73 | 1.6
74 | 2.17
75 |
76 |
77 |
78 |
79 | com.datastax.cassandra
80 | cassandra-driver-core
81 | ${version.cassandra-driver}
82 |
83 |
84 | org.slf4j
85 | slf4j-api
86 | ${version.slf4j}
87 |
88 |
89 |
90 | com.google.guava
91 | guava
92 | ${version.guava}
93 | test
94 |
95 |
96 | junit
97 | junit
98 | ${version.junit}
99 | test
100 |
101 |
102 | org.mockito
103 | mockito-all
104 | ${version.mockito}
105 | test
106 |
107 |
108 | org.cassandraunit
109 | cassandra-unit
110 | ${version.cassandra-unit}
111 | test
112 |
113 |
114 | org.slf4j
115 | slf4j-log4j12
116 |
117 |
118 |
119 |
120 | org.fusesource
121 | sigar
122 | 1.6.4
123 | test
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 | org.apache.maven.plugins
132 | maven-jar-plugin
133 | ${version.plugin.jar}
134 |
135 |
136 | org.apache.maven.plugins
137 | maven-resources-plugin
138 | ${version.plugin.resources}
139 |
140 |
141 | org.apache.maven.plugins
142 | maven-install-plugin
143 | ${version.plugin.install}
144 |
145 |
146 | org.apache.maven.plugins
147 | maven-surefire-plugin
148 | ${version.plugin.surefire}
149 |
150 |
151 | org.apache.maven.plugins
152 | maven-deploy-plugin
153 | ${version.plugin.deploy}
154 |
155 | true
156 |
157 |
158 |
159 | org.apache.maven.plugins
160 | maven-compiler-plugin
161 | ${version.plugin.compiler}
162 | true
163 |
164 | ${source.level}
165 | ${code.level}
166 | ${project.build.sourceEncoding}
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 | bintray
176 | https://api.bintray.com/maven/smartcat-labs/maven/cassandra-migration-tool-java/;publish=1
177 |
178 |
179 |
180 |
181 |
182 | extras
183 |
184 | false
185 |
186 |
187 |
188 |
189 | org.apache.maven.plugins
190 | maven-source-plugin
191 | ${version.plugin.source}
192 |
193 |
194 |
195 | jar
196 |
197 |
198 |
199 |
200 |
201 | org.apache.maven.plugins
202 | maven-javadoc-plugin
203 | ${version.plugin.javadoc}
204 |
205 |
206 | attach-javadoc
207 |
208 | jar
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 | gpg
218 |
219 | false
220 |
221 |
222 |
223 |
224 |
225 | org.apache.maven.plugins
226 | maven-gpg-plugin
227 | ${version.plugin.gpg}
228 |
229 |
230 | verify
231 |
232 | sign
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 | checks
242 |
243 | true
244 |
245 |
246 |
247 |
248 |
249 | org.apache.maven.plugins
250 | maven-checkstyle-plugin
251 | ${version.plugin.checkstyle}
252 |
253 |
254 | validate
255 |
256 | check
257 |
258 |
259 | checkstyle.xml
260 | true
261 | true
262 | true
263 | checkstyle.properties
264 | **/generated/**/*
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
--------------------------------------------------------------------------------
/src/test/resources/another-cassandra.yaml:
--------------------------------------------------------------------------------
1 | # Cassandra storage config YAML
2 |
3 | # NOTE:
4 | # See http://wiki.apache.org/cassandra/StorageConfiguration for
5 | # full explanations of configuration directives
6 | # /NOTE
7 |
8 | # The name of the cluster. This is mainly used to prevent machines in
9 | # one logical cluster from joining another.
10 | cluster_name: 'Test Cluster'
11 |
12 | # You should always specify InitialToken when setting up a production
13 | # cluster for the first time, and often when adding capacity later.
14 | # The principle is that each node should be given an equal slice of
15 | # the token ring; see http://wiki.apache.org/cassandra/Operations
16 | # for more details.
17 | #
18 | # If blank, Cassandra will request a token bisecting the range of
19 | # the heaviest-loaded existing node. If there is no load information
20 | # available, such as is the case with a new cluster, it will pick
21 | # a random token, which will lead to hot spots.
22 | initial_token:
23 |
24 | # See http://wiki.apache.org/cassandra/HintedHandoff
25 | hinted_handoff_enabled: true
26 | # this defines the maximum amount of time a dead host will have hints
27 | # generated. After it has been dead this long, hints will be dropped.
28 | max_hint_window_in_ms: 3600000 # one hour
29 |
30 | hints_directory: target/embeddedCassandra/hints
31 | # DEPRECATED : Sleep this long after delivering each hint
32 | #hinted_handoff_throttle_delay_in_ms: 1
33 |
34 | # authentication backend, implementing IAuthenticator; used to identify users
35 | authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
36 |
37 | # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
38 | # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
39 | # CassandraAuthorizer}.
40 | #
41 | # - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
42 | # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
43 | # increase system_auth keyspace replication factor if you use this authorizer.
44 | authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
45 |
46 | # The partitioner is responsible for distributing rows (by key) across
47 | # nodes in the cluster. Any IPartitioner may be used, including your
48 | # own as long as it is on the classpath. Out of the box, Cassandra
49 | # provides org.apache.cassandra.dht.RandomPartitioner
50 | # org.apache.cassandra.dht.ByteOrderedPartitioner,
51 | # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
52 | # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
53 | # (deprecated).
54 | #
55 | # - RandomPartitioner distributes rows across the cluster evenly by md5.
56 | # When in doubt, this is the best option.
57 | # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
58 | # scanning rows in key order, but the ordering can generate hot spots
59 | # for sequential insertion workloads.
60 | # - OrderPreservingPartitioner is an obsolete form of BOP, that stores
61 | # - keys in a less-efficient format and only works with keys that are
62 | # UTF8-encoded Strings.
63 | # - CollatingOPP colates according to EN,US rules rather than lexical byte
64 | # ordering. Use this as an example if you need custom collation.
65 | #
66 | # See http://wiki.apache.org/cassandra/Operations for more on
67 | # partitioners and token selection.
68 | partitioner: org.apache.cassandra.dht.RandomPartitioner
69 |
70 | # directories where Cassandra should store data on disk.
71 | data_file_directories:
72 | - target/embeddedCassandra/data
73 |
74 | # commit log
75 | commitlog_directory: target/embeddedCassandra/commitlog
76 |
77 | # Maximum size of the key cache in memory.
78 | #
79 | # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
80 | # minimum, sometimes more. The key cache is fairly tiny for the amount of
81 | # time it saves, so it's worthwhile to use it at large numbers.
82 | # The row cache saves even more time, but must store the whole values of
83 | # its rows, so it is extremely space-intensive. It's best to only use the
84 | # row cache if you have hot rows or static rows.
85 | #
86 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
87 | #
88 | # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
89 | key_cache_size_in_mb:
90 |
91 | # Duration in seconds after which Cassandra should
92 | # safe the keys cache. Caches are saved to saved_caches_directory as
93 | # specified in this configuration file.
94 | #
95 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in
96 | # terms of I/O for the key cache. Row cache saving is much more expensive and
97 | # has limited use.
98 | #
99 | # Default is 14400 or 4 hours.
100 | key_cache_save_period: 14400
101 |
102 | # Number of keys from the key cache to save
103 | # Disabled by default, meaning all keys are going to be saved
104 | # key_cache_keys_to_save: 100
105 |
106 | # Maximum size of the row cache in memory.
107 | # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
108 | #
109 | # Default value is 0, to disable row caching.
110 | row_cache_size_in_mb: 0
111 |
112 | # Duration in seconds after which Cassandra should
113 | # safe the row cache. Caches are saved to saved_caches_directory as specified
114 | # in this configuration file.
115 | #
116 | # Saved caches greatly improve cold-start speeds, and is relatively cheap in
117 | # terms of I/O for the key cache. Row cache saving is much more expensive and
118 | # has limited use.
119 | #
120 | # Default is 0 to disable saving the row cache.
121 | row_cache_save_period: 0
122 |
123 | # Number of keys from the row cache to save
124 | # Disabled by default, meaning all keys are going to be saved
125 | # row_cache_keys_to_save: 100
126 |
127 | # saved caches
128 | saved_caches_directory: target/embeddedCassandra/saved_caches
129 |
130 | # commitlog_sync may be either "periodic" or "batch."
131 | # When in batch mode, Cassandra won't ack writes until the commit log
132 | # has been fsynced to disk. It will wait up to
133 | # commitlog_sync_batch_window_in_ms milliseconds for other writes, before
134 | # performing the sync.
135 | #
136 | # commitlog_sync: batch
137 | # commitlog_sync_batch_window_in_ms: 50
138 | #
139 | # the other option is "periodic" where writes may be acked immediately
140 | # and the CommitLog is simply synced every commitlog_sync_period_in_ms
141 | # milliseconds.
142 | commitlog_sync: periodic
143 | commitlog_sync_period_in_ms: 10000
144 |
145 | # any class that implements the SeedProvider interface and has a
146 | # constructor that takes a Map of parameters will do.
147 | seed_provider:
148 | # Addresses of hosts that are deemed contact points.
149 | # Cassandra nodes use this list of hosts to find each other and learn
150 | # the topology of the ring. You must change this if you are running
151 | # multiple nodes!
152 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider
153 | parameters:
154 | # seeds is actually a comma-delimited list of addresses.
155 | # Ex: ",,"
156 | - seeds: "127.0.0.1"
157 |
158 | # For workloads with more data than can fit in memory, Cassandra's
159 | # bottleneck will be reads that need to fetch data from
160 | # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
161 | # order to allow the operations to enqueue low enough in the stack
162 | # that the OS and drives can reorder them.
163 | #
164 | # On the other hand, since writes are almost never IO bound, the ideal
165 | # number of "concurrent_writes" is dependent on the number of cores in
166 | # your system; (8 * number_of_cores) is a good rule of thumb.
167 | concurrent_reads: 32
168 | concurrent_writes: 32
169 |
170 | # Total memory to use for memtables. Cassandra will flush the largest
171 | # memtable when this much memory is used.
172 | # If omitted, Cassandra will set it to 1/3 of the heap.
173 | # memtable_total_space_in_mb: 2048
174 |
175 | # Total space to use for commitlogs.
176 | # If space gets above this value (it will round up to the next nearest
177 | # segment multiple), Cassandra will flush every dirty CF in the oldest
178 | # segment and remove it.
179 | # commitlog_total_space_in_mb: 4096
180 |
181 | # This sets the amount of memtable flush writer threads. These will
182 | # be blocked by disk io, and each one will hold a memtable in memory
183 | # while blocked. If you have a large heap and many data directories,
184 | # you can increase this value for better flush performance.
185 | # By default this will be set to the amount of data directories defined.
186 | #memtable_flush_writers: 1
187 |
188 | # the number of full memtables to allow pending flush, that is,
189 | # waiting for a writer thread. At a minimum, this should be set to
190 | # the maximum number of secondary indexes created on a single CF.
191 | # memtable_flush_queue_size: 4
192 |
193 | # Whether to, when doing sequential writing, fsync() at intervals in
194 | # order to force the operating system to flush the dirty
195 | # buffers. Enable this to avoid sudden dirty buffer flushing from
196 | # impacting read latencies. Almost always a good idea on SSD:s; not
197 | # necessarily on platters.
198 | trickle_fsync: false
199 | trickle_fsync_interval_in_kb: 10240
200 |
201 | # TCP port, for commands and data
202 | storage_port: 7010
203 |
204 | # SSL port, for encrypted communication. Unused unless enabled in
205 | # encryption_options
206 | ssl_storage_port: 7011
207 |
208 | # Address to bind to and tell other Cassandra nodes to connect to. You
209 | # _must_ change this if you want multiple nodes to be able to
210 | # communicate!
211 | #
212 | # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
213 | # will always do the Right Thing *if* the node is properly configured
214 | # (hostname, name resolution, etc), and the Right Thing is to use the
215 | # address associated with the hostname (it might not be).
216 | #
217 | # Setting this to 0.0.0.0 is always wrong.
218 | listen_address: 127.0.0.1
219 |
220 | # Address to broadcast to other Cassandra nodes
221 | # Leaving this blank will set it to the same value as listen_address
222 | # broadcast_address: 1.2.3.4
223 |
224 | # The address to bind the Thrift RPC service to -- clients connect
225 | # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
226 | # you want Thrift to listen on all interfaces.
227 | #
228 | # Leaving this blank has the same effect it does for ListenAddress,
229 | # (i.e. it will be based on the configured hostname of the node).
230 | rpc_address: localhost
231 | # port for Thrift to listen for clients on
232 | rpc_port: 9171
233 |
234 | start_native_transport: true
235 | # port for the CQL native transport to listen for clients on
236 | native_transport_port: 9142
237 |
238 | # enable or disable keepalive on rpc connections
239 | rpc_keepalive: true
240 |
241 | # Cassandra provides three options for the RPC Server:
242 | #
243 | # sync -> One connection per thread in the rpc pool (see below).
244 | # For a very large number of clients, memory will be your limiting
245 | # factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
246 | # Connection pooling is very, very strongly recommended.
247 | #
248 | # async -> Nonblocking server implementation with one thread to serve
249 | # rpc connections. This is not recommended for high throughput use
250 | # cases. Async has been tested to be about 50% slower than sync
251 | # or hsha and is deprecated: it will be removed in the next major release.
252 | #
253 | # hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
254 | # (see below) is used to manage requests, but the threads are multiplexed
255 | # across the different clients.
256 | #
257 | # The default is sync because on Windows hsha is about 30% slower. On Linux,
258 | # sync/hsha performance is about the same, with hsha of course using less memory.
259 | rpc_server_type: sync
260 |
261 | # Uncomment rpc_min|max|thread to set request pool size.
262 | # You would primarily set max for the sync server to safeguard against
263 | # misbehaved clients; if you do hit the max, Cassandra will block until one
264 | # disconnects before accepting more. The defaults for sync are min of 16 and max
265 | # unlimited.
266 | #
267 | # For the Hsha server, the min and max both default to quadruple the number of
268 | # CPU cores.
269 | #
270 | # This configuration is ignored by the async server.
271 | #
272 | # rpc_min_threads: 16
273 | # rpc_max_threads: 2048
274 |
275 | # uncomment to set socket buffer sizes on rpc connections
276 | # rpc_send_buff_size_in_bytes:
277 | # rpc_recv_buff_size_in_bytes:
278 |
279 | # Frame size for thrift (maximum field length).
280 | # 0 disables TFramedTransport in favor of TSocket. This option
281 | # is deprecated; we strongly recommend using Framed mode.
282 | thrift_framed_transport_size_in_mb: 15
283 |
284 | # The max length of a thrift message, including all fields and
285 | # internal thrift overhead.
286 | thrift_max_message_length_in_mb: 16
287 |
288 | # Set to true to have Cassandra create a hard link to each sstable
289 | # flushed or streamed locally in a backups/ subdirectory of the
290 | # Keyspace data. Removing these links is the operator's
291 | # responsibility.
292 | incremental_backups: false
293 |
294 | # Whether or not to take a snapshot before each compaction. Be
295 | # careful using this option, since Cassandra won't clean up the
296 | # snapshots for you. Mostly useful if you're paranoid when there
297 | # is a data format change.
298 | snapshot_before_compaction: false
299 |
300 | # Whether or not a snapshot is taken of the data before keyspace truncation
301 | # or dropping of column families. The STRONGLY advised default of true
302 | # should be used to provide data safety. If you set this flag to false, you will
303 | # lose data on truncation or drop.
304 | auto_snapshot: false
305 |
306 | # Add column indexes to a row after its contents reach this size.
307 | # Increase if your column values are large, or if you have a very large
308 | # number of columns. The competing causes are, Cassandra has to
309 | # deserialize this much of the row to read a single column, so you want
310 | # it to be small - at least if you do many partial-row reads - but all
311 | # the index data is read for each access, so you don't want to generate
312 | # that wastefully either.
313 | column_index_size_in_kb: 64
314 |
315 | # Size limit for rows being compacted in memory. Larger rows will spill
316 | # over to disk and use a slower two-pass compaction process. A message
317 | # will be logged specifying the row key.
318 | # in_memory_compaction_limit_in_mb: 64
319 |
320 | # Number of simultaneous compactions to allow, NOT including
321 | # validation "compactions" for anti-entropy repair. Simultaneous
322 | # compactions can help preserve read performance in a mixed read/write
323 | # workload, by mitigating the tendency of small sstables to accumulate
324 | # during a single long running compactions. The default is usually
325 | # fine and if you experience problems with compaction running too
326 | # slowly or too fast, you should look at
327 | # compaction_throughput_mb_per_sec first.
328 | #
329 | # This setting has no effect on LeveledCompactionStrategy.
330 | #
331 | # concurrent_compactors defaults to the number of cores.
332 | # Uncomment to make compaction mono-threaded, the pre-0.8 default.
333 | #concurrent_compactors: 1
334 |
335 | # Multi-threaded compaction. When enabled, each compaction will use
336 | # up to one thread per core, plus one thread per sstable being merged.
337 | # This is usually only useful for SSD-based hardware: otherwise,
338 | # your concern is usually to get compaction to do LESS i/o (see:
339 | # compaction_throughput_mb_per_sec), not more.
340 | # multithreaded_compaction: false
341 |
342 | # Throttles compaction to the given total throughput across the entire
343 | # system. The faster you insert data, the faster you need to compact in
344 | # order to keep the sstable count down, but in general, setting this to
345 | # 16 to 32 times the rate you are inserting data is more than sufficient.
346 | # Setting this to 0 disables throttling. Note that this account for all types
347 | # of compaction, including validation compaction.
348 | compaction_throughput_mb_per_sec: 16
349 |
350 | # Track cached row keys during compaction, and re-cache their new
351 | # positions in the compacted sstable. Disable if you use really large
352 | # key caches.
353 | # compaction_preheat_key_cache: true
354 |
355 | # Throttles all outbound streaming file transfers on this node to the
356 | # given total throughput in Mbps. This is necessary because Cassandra does
357 | # mostly sequential IO when streaming data during bootstrap or repair, which
358 | # can lead to saturating the network connection and degrading rpc performance.
359 | # When unset, the default is 400 Mbps or 50 MB/s.
360 | # stream_throughput_outbound_megabits_per_sec: 400
361 |
362 | # DEPRECATED : Time to wait for a reply from other nodes before failing the command
363 | #rpc_timeout_in_ms: 10000
364 |
365 | # Enable socket timeout for streaming operation.
366 | # When a timeout occurs during streaming, streaming is retried from the start
367 | # of the current file. This *can* involve re-streaming an important amount of
368 | # data, so you should avoid setting the value too low.
369 | # Default value is 0, which never timeout streams.
370 | # streaming_socket_timeout_in_ms: 0
371 |
372 | # phi value that must be reached for a host to be marked down.
373 | # most users should never need to adjust this.
374 | # phi_convict_threshold: 8
375 |
376 | # endpoint_snitch -- Set this to a class that implements
377 | # IEndpointSnitch. The snitch has two functions:
378 | # - it teaches Cassandra enough about your network topology to route
379 | # requests efficiently
380 | # - it allows Cassandra to spread replicas around your cluster to avoid
381 | # correlated failures. It does this by grouping machines into
382 | # "datacenters" and "racks." Cassandra will do its best not to have
383 | # more than one replica on the same "rack" (which may not actually
384 | # be a physical location)
385 | #
386 | # IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
387 | # YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
388 | # ARE PLACED.
389 | #
390 | # Out of the box, Cassandra provides
391 | # - SimpleSnitch:
392 | # Treats Strategy order as proximity. This improves cache locality
393 | # when disabling read repair, which can further improve throughput.
394 | # Only appropriate for single-datacenter deployments.
395 | # - PropertyFileSnitch:
396 | # Proximity is determined by rack and data center, which are
397 | # explicitly configured in cassandra-topology.properties.
398 | # - RackInferringSnitch:
399 | # Proximity is determined by rack and data center, which are
400 | # assumed to correspond to the 3rd and 2nd octet of each node's
401 | # IP address, respectively. Unless this happens to match your
402 | # deployment conventions (as it did Facebook's), this is best used
403 | # as an example of writing a custom Snitch class.
404 | # - Ec2Snitch:
405 | # Appropriate for EC2 deployments in a single Region. Loads Region
406 | # and Availability Zone information from the EC2 API. The Region is
407 | # treated as the Datacenter, and the Availability Zone as the rack.
408 | # Only private IPs are used, so this will not work across multiple
409 | # Regions.
410 | # - Ec2MultiRegionSnitch:
411 | # Uses public IPs as broadcast_address to allow cross-region
412 | # connectivity. (Thus, you should set seed addresses to the public
413 | # IP as well.) You will need to open the storage_port or
414 | # ssl_storage_port on the public IP firewall. (For intra-Region
415 | # traffic, Cassandra will switch to the private IP after
416 | # establishing a connection.)
417 | #
418 | # You can use a custom Snitch by setting this to the full class name
419 | # of the snitch, which will be assumed to be on your classpath.
420 | endpoint_snitch: PropertyFileSnitch
421 |
422 | # controls how often to perform the more expensive part of host score
423 | # calculation
424 | dynamic_snitch_update_interval_in_ms: 100
425 | # controls how often to reset all host scores, allowing a bad host to
426 | # possibly recover
427 | dynamic_snitch_reset_interval_in_ms: 600000
428 | # if set greater than zero and read_repair_chance is < 1.0, this will allow
429 | # 'pinning' of replicas to hosts in order to increase cache capacity.
430 | # The badness threshold will control how much worse the pinned host has to be
431 | # before the dynamic snitch will prefer other replicas over it. This is
432 | # expressed as a double which represents a percentage. Thus, a value of
433 | # 0.2 means Cassandra would continue to prefer the static snitch values
434 | # until the pinned host was 20% worse than the fastest.
435 | dynamic_snitch_badness_threshold: 0.1
436 |
437 | # request_scheduler -- Set this to a class that implements
438 | # RequestScheduler, which will schedule incoming client requests
439 | # according to the specific policy. This is useful for multi-tenancy
440 | # with a single Cassandra cluster.
441 | # NOTE: This is specifically for requests from the client and does
442 | # not affect inter node communication.
443 | # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
444 | # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
445 | # client requests to a node with a separate queue for each
446 | # request_scheduler_id. The scheduler is further customized by
447 | # request_scheduler_options as described below.
448 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler
449 |
450 | # Scheduler Options vary based on the type of scheduler
451 | # NoScheduler - Has no options
452 | # RoundRobin
453 | # - throttle_limit -- The throttle_limit is the number of in-flight
454 | # requests per client. Requests beyond
455 | # that limit are queued up until
456 | # running requests can complete.
457 | # The value of 80 here is twice the number of
458 | # concurrent_reads + concurrent_writes.
459 | # - default_weight -- default_weight is optional and allows for
460 | # overriding the default which is 1.
461 | # - weights -- Weights are optional and will default to 1 or the
462 | # overridden default_weight. The weight translates into how
463 | # many requests are handled during each turn of the
464 | # RoundRobin, based on the scheduler id.
465 | #
466 | # request_scheduler_options:
467 | # throttle_limit: 80
468 | # default_weight: 5
469 | # weights:
470 | # Keyspace1: 1
471 | # Keyspace2: 5
472 |
473 | # request_scheduler_id -- An identifer based on which to perform
474 | # the request scheduling. Currently the only valid option is keyspace.
475 | # request_scheduler_id: keyspace
476 |
477 | # index_interval controls the sampling of entries from the primrary
478 | # row index in terms of space versus time. The larger the interval,
479 | # the smaller and less effective the sampling will be. In technicial
480 | # terms, the interval coresponds to the number of index entries that
481 | # are skipped between taking each sample. All the sampled entries
482 | # must fit in memory. Generally, a value between 128 and 512 here
483 | # coupled with a large key cache size on CFs results in the best trade
484 | # offs. This value is not often changed, however if you have many
485 | # very small rows (many to an OS page), then increasing this will
486 | # often lower memory usage without a impact on performance.
487 | index_interval: 128
488 |
489 | # Enable or disable inter-node encryption
490 | # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
491 | # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
492 | # suite for authentication, key exchange and encryption of the actual data transfers.
493 | # NOTE: No custom encryption options are enabled at the moment
494 | # The available internode options are : all, none, dc, rack
495 | #
496 | # If set to dc cassandra will encrypt the traffic between the DCs
497 | # If set to rack cassandra will encrypt the traffic between the racks
498 | #
499 | # The passwords used in these options must match the passwords used when generating
500 | # the keystore and truststore. For instructions on generating these files, see:
501 | # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
502 | #
503 | server_encryption_options:
504 | internode_encryption: none
505 | keystore: conf/.keystore
506 | keystore_password: cassandra
507 | truststore: conf/.truststore
508 | truststore_password: cassandra
509 | # More advanced defaults below:
510 | # protocol: TLS
511 | # algorithm: SunX509
512 | # store_type: JKS
513 | # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
514 |
515 | # Tombstone warning threshold
516 | tombstone_warn_threshold: 2000
517 |
--------------------------------------------------------------------------------
/smartcat-formatter.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
9 |
11 |
12 |
14 |
15 |
16 |
18 |
19 |
20 |
21 |
24 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
43 |
44 |
47 |
48 |
49 |
50 |
51 |
53 |
54 |
56 |
57 |
58 |
60 |
61 |
62 |
63 |
65 |
66 |
67 |
69 |
70 |
71 |
73 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
84 |
86 |
87 |
88 |
89 |
91 |
92 |
94 |
95 |
97 |
98 |
100 |
101 |
102 |
104 |
105 |
106 |
107 |
109 |
110 |
111 |
113 |
115 |
117 |
118 |
119 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
130 |
131 |
133 |
134 |
135 |
137 |
138 |
139 |
140 |
141 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
156 |
157 |
158 |
159 |
160 |
161 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
171 |
173 |
174 |
175 |
176 |
178 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
188 |
189 |
190 |
192 |
193 |
194 |
195 |
196 |
197 |
199 |
200 |
202 |
204 |
205 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
215 |
217 |
218 |
219 |
220 |
222 |
223 |
224 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
237 |
239 |
240 |
241 |
243 |
245 |
246 |
248 |
250 |
251 |
253 |
255 |
256 |
257 |
259 |
260 |
262 |
263 |
265 |
266 |
267 |
268 |
269 |
271 |
272 |
273 |
275 |
277 |
279 |
280 |
283 |
285 |
287 |
288 |
289 |
290 |
292 |
294 |
297 |
299 |
301 |
302 |
303 |
305 |
306 |
307 |
308 |
309 |
311 |
312 |
313 |
314 |
316 |
317 |
319 |
320 |
322 |
324 |
325 |
327 |
329 |
330 |
331 |
333 |
334 |
335 |
336 |
338 |
339 |
340 |
341 |
343 |
344 |
346 |
348 |
349 |
351 |
353 |
355 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
368 |
369 |
371 |
373 |
375 |
377 |
379 |
380 |
382 |
384 |
385 |
387 |
388 |
389 |
391 |
392 |
394 |
395 |
396 |
398 |
399 |
400 |
401 |
--------------------------------------------------------------------------------