├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── AUTHORS.md ├── src ├── main │ └── scala │ │ └── allods │ │ └── db │ │ └── migration │ │ ├── ResourceReading.scala │ │ ├── MigrationBuilder.scala │ │ ├── TreeSchemaT.scala │ │ ├── PostgreSqlScriptSplitter.scala │ │ ├── sessionApi.scala │ │ ├── DbMigrationManagerImpl.scala │ │ ├── PersistentColumnReadingT.scala │ │ └── Migrations.scala └── test │ ├── scala │ └── allods │ │ └── db │ │ └── migration │ │ ├── PostgreSqlScriptSplitterTest.scala │ │ ├── TestDbServer.scala │ │ ├── DbServerPointer.scala │ │ └── MigrationTest.scala │ └── resources │ └── allods │ └── db │ └── migration │ └── test.sql ├── README.md ├── README.ru.md ├── LICENSE.md ├── gradlew.bat └── gradlew /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mailru/slick-migration/master/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Thu Aug 13 13:00:43 MSK 2015 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.10-all.zip 7 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | Slick-migration is a collective effort, and incorporates 2 | many contributions from the community. 3 | 4 | Below follows a list of people, who contributed their code. 5 | 6 | Arseniy Zhizhelev, Sergey Kazantsev, Yulia Loykova 7 | 8 | NOTE: If you can commit a change to this list, please do not hesitate 9 | to add your name to it. -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/ResourceReading.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.net.URL 4 | 5 | /** A conveniency trait for reading resources. 6 | * @author a.zhizhelev 7 | */ 8 | trait ResourceReading { 9 | /** Reads resource for the given class. */ 10 | def readResource(resourcePath: String, enc: String = "utf8") = 11 | ResourceReading.readResource(getClass, resourcePath, enc) 12 | } 13 | 14 | object ResourceReading { 15 | /** Reads resource for the given class. */ 16 | def readResource(clazz: Class[_], resourcePath: String, enc: String = "utf8") = { 17 | val resource: URL = clazz.getResource(resourcePath) 18 | if (resource == null) 19 | throw new IllegalArgumentException(s"Resource not found: $resourcePath in class ${clazz.getName}") 20 | io.Source.fromURL(resource, enc).mkString 21 | } 22 | 23 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Slick-migration 2 | =============== 3 | 4 | Slick-migration is an open source JVM library that allows developers to manage database schema migrations. 5 | DB schema migrations are vital for any project that requires a database. 6 | 7 | This library has distinctive features: 8 | 9 | * declare migrations in the source files that can be organized with Scala's cake-pattern; 10 | * each migration has a unique URN-identifier; 11 | * migrations can declare explicit dependencies and will be processed in appropriate order (using topological sort); 12 | * support SQL-scripts with PostgreSQL syntax; 13 | 14 | Having compilable migrations is very useful when developing features in independent branches. The compiler will ensure that a branch is merged properly to 15 | the trunk migration graph. 16 | 17 | License 18 | ------- 19 | 20 | The license for the library is MIT. 21 | 22 | Links 23 | ----- 24 | * source code: https://github.com/mailru/slick-migration 25 | -------------------------------------------------------------------------------- /README.ru.md: -------------------------------------------------------------------------------- 1 | Библиотека Slick-migration 2 | ========================== 3 | 4 | Библиотека с открытым исходным кодом Slick-migration предназначена для декларативного описания миграций схемы БД. В большинстве 5 | развивающихся проектов, использующих БД, необходимо обеспечить управление скриптами изменений структуры БД, т.к. структура БД 6 | меняется вместе с самим проектом. 7 | 8 | Библиотека slick-migration обладает следующими отличительными возможностями: 9 | 10 | * объявление миграций в Scala-trait'ах позволяет организовать совокупность миграций с помощью cake-паттерна; 11 | * каждая миграция имеет уникальный URN-идентификатор; 12 | * миграции могут объявлять зависимости явным образом и библиотека гарантирует их применение в определённом порядке (согласно топологической сортировке); 13 | * поддерживаются SQL-скрипты, использующие синтаксис PostgreSQL; 14 | 15 | Компилируемые миграции особенно полезны при разработке функциональности приложения в ветках. Компилятор выдаст ошибку, если какая-то необходимая 16 | миграция не была замержена. 17 | 18 | Лицензия 19 | -------- 20 | 21 | Лицензия MIT. 22 | 23 | Links 24 | ----- 25 | * исходный код: https://github.com/mailru/slick-migration 26 | -------------------------------------------------------------------------------- /src/test/scala/allods/db/migration/PostgreSqlScriptSplitterTest.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import org.junit.runner.RunWith 4 | import org.scalatest.FunSuite 5 | import org.scalatest.junit.JUnitRunner 6 | 7 | 8 | /** 9 | * @author a.zhizhelev 10 | */ 11 | @RunWith(classOf[JUnitRunner]) 12 | class PostgreSqlScriptSplitterTest extends FunSuite { 13 | test("Parse simple script") { 14 | val sql = "CREATE TABLE" 15 | val statements = PostgreSqlScriptSplitter(sql) 16 | // pattern matching to check that we have parsed properly. 17 | val Seq(PostgreSqlScriptSplitter.Statement(`sql`, _)) = statements 18 | } 19 | 20 | test("Parse script with quotes") { 21 | val sql = "CREATE TABLE 'name''ав\"'" 22 | val statements = PostgreSqlScriptSplitter(sql) 23 | // pattern matching to check that we have parsed properly. 24 | val Seq(PostgreSqlScriptSplitter.Statement(`sql`, _)) = statements 25 | } 26 | 27 | test("Parse resource script") { 28 | val testSql = classOf[PostgreSqlScriptSplitterTest].getResource("test.sql") 29 | val statements = PostgreSqlScriptSplitter(testSql) 30 | assert(statements.size === 8) 31 | } 32 | 33 | // test("Parse simple script"){ 34 | // val result = PostgreSqlScriptSplitter("$123$CREATE TABLE;$123$") 35 | // assert(result === "$123$CREATE TABLE;$123$") 36 | // } 37 | } 38 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2015 Slick-migration AUTHORS: please see AUTHORS file. 2 | 3 | Redistribution and use in source and binary forms, with or 4 | without modification, are permitted provided that the following 5 | conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above 8 | copyright notice, this list of conditions and the 9 | following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials 14 | provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 20 | AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 | BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 27 | THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 | SUCH DAMAGE. -------------------------------------------------------------------------------- /src/test/scala/allods/db/migration/TestDbServer.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | /** 4 | * Test db server description. 5 | * 6 | * DB-server address can be adjusted by environment properties: 7 | * 8 | * -Dtest.database.url=jdbc:postgresql://dev-db.a1.pvt:5432/ -Dtest.database.login=allods_online_t 9 | * 10 | * @author a.zhizhelev 11 | */ 12 | object TestDbServer { 13 | val urlProperty = "test.database.url" 14 | val loginProperty = "test.database.login" 15 | val passwordProperty = "test.database.password" 16 | 17 | val defaultDbHostSettings = Map( 18 | urlProperty -> "jdbc:postgresql://localhost:5432/", 19 | loginProperty -> "allods_online_t", 20 | passwordProperty -> "" 21 | ) 22 | 23 | // val testDbServer = new DbServerPointer("jdbc:postgresql://dev-db.a1.pvt:5432/", "allods_online_t", "") 24 | // val (devDbServer, user, password) = ("jdbc:postgresql://dev-db.a1.pvt:5432/", "allods_online_t", "") 25 | // val devDbServer = "jdbc:postgresql://localhost:5432/"; val user = "postgres"; val password = "postgres" 26 | // val testDbName = "test" 27 | 28 | lazy val envDbHostSettings = defaultDbHostSettings.map { case (prop, default) => 29 | val propValue = System.getProperty(prop, default) 30 | val propValueOrDefault = if (propValue.isEmpty) default else propValue 31 | (prop, propValueOrDefault) 32 | } //sys.env.getOrElse(prop, default))} 33 | 34 | lazy val testDbServer = new DbServerPointer()(envDbHostSettings(urlProperty), envDbHostSettings(loginProperty), envDbHostSettings(passwordProperty)) 35 | 36 | } 37 | -------------------------------------------------------------------------------- /src/test/scala/allods/db/migration/DbServerPointer.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.util.concurrent.TimeUnit 4 | 5 | import slick.driver.JdbcDriver 6 | import slick.jdbc.JdbcBackend._ 7 | import slick.jdbc.StaticQuery 8 | 9 | /** 10 | * Points to a Postgres server. 11 | * 12 | * @param dbServer сервер БД вместе с jdbc-префиксом. Вида: "jdbc:postgresql://127.0.0.1:5432/" 13 | * Пока поддерживается только Postgres 14 | * @param user имя пользователя, под которым надо подключаться к БД 15 | * @param password пароль пользователя 16 | * 17 | * @author a.zhizhelev 18 | */ 19 | class DbServerPointer(val driver: JdbcDriver = slick.driver.PostgresDriver)(val dbServer: String, val user: String, val password: String) { 20 | 21 | require(!dbServer.isEmpty) 22 | 23 | val postgresDriver = classOf[org.postgresql.Driver].getName 24 | 25 | def connect(db: String): Database = 26 | Database.forURL(dbServer + db, user, password, driver = postgresDriver) 27 | 28 | def withTemporaryDb[T](nameHint: String)(body: (Session) => T): T = 29 | temporaryDb(nameHint)(_.withSession(body)) 30 | 31 | def temporaryDb[T](nameHint: String)(body: (Database) => T): T = { 32 | val connect1: Database = connect("postgres") 33 | val name = nameHint + "_testdb_" + System.currentTimeMillis 34 | connect1.withSession( 35 | createSession => StaticQuery.updateNA("create database \"" + name + "\" with template template1;").execute(createSession) 36 | ) 37 | 38 | try { 39 | body(connect(name)) 40 | } finally { 41 | connect1.withSession( 42 | dropSession => StaticQuery.updateNA("drop database \"" + name + "\";").execute(dropSession) 43 | ) 44 | } 45 | } 46 | 47 | def measureMs(body: => Any): Long = { 48 | val startNs = System.nanoTime() 49 | body 50 | val deltaNs = System.nanoTime() - startNs 51 | TimeUnit.NANOSECONDS.toMillis(deltaNs) 52 | } 53 | 54 | override 55 | def toString = getClass.getSimpleName + "(" + 56 | "dbServer=" + dbServer + 57 | ", login=" + user + 58 | ", password=" + password.map(c => '*') + 59 | ")" 60 | } 61 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 12 | set DEFAULT_JVM_OPTS= 13 | 14 | set DIRNAME=%~dp0 15 | if "%DIRNAME%" == "" set DIRNAME=. 16 | set APP_BASE_NAME=%~n0 17 | set APP_HOME=%DIRNAME% 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windowz variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | if "%@eval[2+2]" == "4" goto 4NT_args 53 | 54 | :win9xME_args 55 | @rem Slurp the command line arguments. 56 | set CMD_LINE_ARGS= 57 | set _SKIP=2 58 | 59 | :win9xME_args_slurp 60 | if "x%~1" == "x" goto execute 61 | 62 | set CMD_LINE_ARGS=%* 63 | goto execute 64 | 65 | :4NT_args 66 | @rem Get arguments from the 4NT Shell from JP Software 67 | set CMD_LINE_ARGS=%$ 68 | 69 | :execute 70 | @rem Setup the command line 71 | 72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 73 | 74 | @rem Execute Gradle 75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 76 | 77 | :end 78 | @rem End local scope for the variables with windows NT shell 79 | if "%ERRORLEVEL%"=="0" goto mainEnd 80 | 81 | :fail 82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 83 | rem the _cmd.exe /c_ return code! 84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 85 | exit /b 1 86 | 87 | :mainEnd 88 | if "%OS%"=="Windows_NT" endlocal 89 | 90 | :omega 91 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/MigrationBuilder.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import javax.sql.DataSource 4 | 5 | import allods.db.migration.Migrations._ 6 | 7 | case class VersionTagImpl(subSchemaId: String, version: String, comment: String = "", override val 8 | dependencies: Seq[DependencyNodeTag] = Seq()) extends VersionTag { 9 | def comment(comment: String) = 10 | copy(comment = comment) 11 | 12 | def dependsOn(deps: DependencyNodeTag*) = 13 | copy(dependencies = dependencies ++ deps) 14 | } 15 | 16 | /** Обновление БД. 17 | * Имеет идентификатор и скрипт, который выполняет обновление. 18 | * 19 | * Также содержит DSL, который позволяет последовательно сконструировать описание обновления. 20 | */ 21 | case class MigrationDefinitionImpl 22 | (migrationUrn: MigrationUrn,//TODO: validate urn 23 | comment: String = "", script: Script = EmptyScript, 24 | override val 25 | dependencies: Seq[DependencyNodeTag] = Seq()) extends MigrationDefinition { 26 | 27 | def comment(comment: String): MigrationDefinitionImpl = 28 | copy(comment = comment) 29 | 30 | def script(script: Script) = 31 | copy(script = script) 32 | 33 | def sql(sql: String) = 34 | copy(script = SqlScript(sql)) 35 | 36 | def sqlResource(clazz: Class[_], resourcePath: String, enc: String = "utf8") = 37 | sql(ResourceReading.readResource(clazz, resourcePath, enc)) 38 | 39 | def dependsOn(deps: DependencyNodeTag*) = 40 | copy(dependencies = dependencies ++ deps) 41 | 42 | def andThen(migration: MigrationDefinitionImpl) = 43 | migration.dependsOn(this) 44 | } 45 | 46 | /** Предок для пользовательских описаний обновлений. 47 | * В этом классе описан dsl, позволяющий пользователю описывать структуру обновлений 48 | * с помощью скриптов. 49 | * @author a.zhizhelev 50 | */ 51 | abstract class MigrationBuilder(val subschema: SubSchemaId) extends ResourceReading { 52 | 53 | def migrate(task: String, scriptId: String = "0") = MigrationDefinitionImpl(migrationUrn(subschema, task, scriptId)) 54 | 55 | def version(v: String) = VersionTagImpl(subschema, v) 56 | 57 | def createMigrationManager(migrations: DependencyNodeTag*): DbMigrationManager = 58 | new DbMigrationManagerImplSessionApi(subschema, createMigrationPack(migrations: _*)) 59 | 60 | def createMigrationManager(migrationPack: MigrationPack): DbMigrationManager = 61 | new DbMigrationManagerImplSessionApi(subschema, migrationPack) 62 | 63 | import slick.driver.PostgresDriver.api._ 64 | 65 | def dataSourceToSlickDatabase(dataSource: DataSource): Database = 66 | Database.forDataSource(dataSource) 67 | 68 | def readResourceUtf8(resourcePath: String) = 69 | readResource(resourcePath, "utf8") 70 | 71 | } -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/TreeSchemaT.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | /** Представление структуры БД в виде дерева, удобного для сравнения. 4 | * Структура отражает только существенную часть схемы БД, отличия в которой должны быть ликвидированы миграциями. 5 | * 6 | * Включает схемы, таблицы, колонки таблиц, функции, тела функций. 7 | */ 8 | trait TreeSchemaT { 9 | type NodeKind = String 10 | 11 | // sealed trait NodeKind 12 | // case object RootNodeKind extends NodeKind 13 | // case object SchemeNodeKind extends NodeKind 14 | // case object TableNodeKind extends NodeKind 15 | // case object ColumnNodeKind extends NodeKind 16 | // case object FunctionNodeKind extends NodeKind 17 | 18 | /** Основа дерева - именованный узел. */ 19 | sealed trait Node 20 | 21 | /** Родительский узел, имеющий потомков. 22 | * Потомки доступны по именам. 23 | * Структура имён включает типы узлов. В частности, schema, table, column, function, trigger, etc. */ 24 | case class ParentNode(children: Map[String, Node]) extends Node 25 | 26 | /** Листовой узел, имеющий конкретное наполнение. */ 27 | case class ContentNode(content: Any) extends Node 28 | 29 | /** Просматривает два дерева на предмет различий. 30 | * На одном уровне сравниваются узлы, имеющие одинаковые наименования и контент. 31 | * @param left условное имя левого дерева. Либо Full schema, либо Migrations schema 32 | * @param right условное имя правого дерева. Либо Full schema, либо Migrations schema 33 | */ 34 | def difference(left: String, right: String, n1: Node, n2: Node): Stream[String] = { 35 | def difference0(path: String, n1: Node, n2: Node): Stream[String] = (n1, n2) match { 36 | case (ContentNode(content1), ContentNode(content2)) => 37 | if (content1 == content2) 38 | Stream.empty 39 | else 40 | Stream(s"@$path: content (!=):\n$left:\n\t$content1\n$right:\n\t$content2") 41 | case (ParentNode(children1), ParentNode(children2)) => 42 | val s1 = children1.keySet 43 | val s2 = children2.keySet 44 | val s12 = s1 -- s2 45 | val s21 = s2 -- s1 46 | val s = s1.intersect(s2) 47 | (if (s12.isEmpty) 48 | Stream.empty[String] 49 | else 50 | Stream(s"@$path: $left -- $right = ${s12.toSeq.sorted.mkString(",")}") 51 | ).append( 52 | if (s21.isEmpty) 53 | Stream.empty[String] 54 | else 55 | Stream(s"@$path: $right -- $left = ${s21.toSeq.sorted.mkString(",")}") 56 | ).append( 57 | if (s.isEmpty) 58 | Stream.empty[String] 59 | else 60 | s.toSeq.sorted.toStream.flatMap(key => difference0(path + " " + key, children1(key), children2(key))) 61 | ) 62 | case _ => 63 | Stream(s"@$path: Cannot find difference of @$path/${n1.getClass}, @$path/${n2.getClass}") 64 | } 65 | difference0("", n1, n2) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/PostgreSqlScriptSplitter.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.io.{InputStreamReader, Reader} 4 | import scala.util.parsing.combinator.RegexParsers 5 | import scala.language.postfixOps 6 | import scala.util.parsing.input.Position 7 | import java.net.URL 8 | 9 | /** 10 | * Splits an sql script into independent statements. 11 | * 12 | * The statement is considered finished when a semicolon is encountered. 13 | * 14 | * There are a few escapes for semicolons: 15 | * 16 | * 1. "..." 17 | * 2. '...' 18 | * 3. $something$ ... $something$, where $something$ in the beginning and at the end are the same. 19 | * 4. -- ... \n 20 | * 21 | * // it would be possible to import JdbcTestUtils.executeSqlScript or iBatis, however, the proposed splitter 22 | * has an advantage - it supports ordinary postrgreSql-scripts without additional artificial delimiters. 23 | * @author a.zhizhelev 24 | */ 25 | object PostgreSqlScriptSplitter { 26 | 27 | /** Statement with position info. */ 28 | case class Statement(sql: String, pos: Position) 29 | 30 | private 31 | object P extends RegexParsers { 32 | 33 | override val skipWhitespace = false 34 | 35 | /** one line comment */ 36 | def comment = "--.*$".r 37 | 38 | /** Any text before quotes. */ 39 | def ORDINARY_CHARS = """[^$"';-]+""".r 40 | 41 | def quotedString = '\"' ~> "[^\"]".r <~ '\"' ^^ ("\"" + _ + '\"') 42 | 43 | def squotedString = '\'' ~> "[^\']".r <~ '\'' ^^ ("\'" + _ + '\'') 44 | 45 | def dollarStringStartTag = ("$" ~> "[^$]*".r <~ "$") map ("$" + _ + "$") 46 | 47 | def dollarString: Parser[String] = dollarStringStartTag.flatMap { 48 | tag => 49 | (rep("[^$]+".r | not(tag) ~> "$") <~ tag). 50 | map(tag + _.mkString("") + tag) 51 | } 52 | 53 | def statementPiece: Parser[String] = 54 | ORDINARY_CHARS | 55 | dollarString | 56 | quotedString | squotedString | comment | 57 | "\"" | "\'" | "$" | "-" 58 | 59 | def statementString = 60 | rep(statementPiece) map (_.mkString("")) 61 | 62 | /** We want to keep the original position of the statement in the script file. */ 63 | def statement: Parser[Statement] = new Parser[Statement] { 64 | def apply(input: Input) = { 65 | val startPos = input.pos 66 | statementString(input).map(Statement(_, startPos)) 67 | } 68 | } 69 | 70 | def statements = repsep(statement, ';') // <~ (not(statement) ~> anyChar *) 71 | } 72 | 73 | def apply(script: String): Seq[Statement] = 74 | P.parseAll(P.statements, script).get 75 | 76 | def apply(reader: Reader): Seq[Statement] = 77 | P.parseAll(P.statements, reader).get 78 | 79 | def apply(url: URL): Seq[Statement] = { 80 | if (url == null) 81 | throw new NullPointerException("Url is null") 82 | val r = new InputStreamReader(url.openStream(), "utf8") 83 | try { 84 | apply(r) 85 | } finally { 86 | r.close() 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/test/resources/allods/db/migration/test.sql: -------------------------------------------------------------------------------- 1 | -- versions of avatar's levels 2 | CREATE TABLE "gm.avatar.level.version" ( 3 | "VersionId" SERIAL NOT NULL PRIMARY KEY, 4 | "VersionStart" TIMESTAMP NOT NULL, 5 | "VersionEnd" TIMESTAMP DEFAULT TIMESTAMP '9999-01-01 00:00:00.0' NOT NULL, 6 | "EntityId" BIGINT NOT NULL, 7 | "Level" INTEGER NOT NULL); 8 | 9 | COMMENT ON TABLE "gm.avatar.level.version" IS 'Версии свойства avatar.level'; 10 | 11 | CREATE INDEX "versionStartIndex" ON "gm.avatar.level.version" ("VersionStart"); 12 | CREATE INDEX "versionEndIndex" ON "gm.avatar.level.version" ("VersionEnd"); 13 | CREATE INDEX "entityIdIndex" ON "gm.avatar.level.version" ("EntityId"); 14 | 15 | CREATE OR REPLACE FUNCTION VersionsBeforeInsert() RETURNS TRIGGER AS $beforeInsert$ 16 | DECLARE versionEnd TIMESTAMP; 17 | DECLARE newVersionStartLiteral TEXT; 18 | DECLARE tableNameLiteral TEXT; 19 | 20 | BEGIN 21 | tableNameLiteral := quote_ident( TG_TABLE_NAME); 22 | 23 | IF (TG_OP = 'DELETE') THEN 24 | RAISE EXCEPTION 'Version DELETE has not been implemented yet.'; 25 | ELSIF (TG_OP = 'UPDATE') THEN 26 | RAISE EXCEPTION 'Version UPDATE has not been implemented yet.'; 27 | ELSIF (TG_OP = 'INSERT') THEN 28 | newVersionStartLiteral := quote_literal(NEW."VersionStart"); 29 | 30 | -- look up for old version end. If exists then we are modifying history and should retain subsequent versions. 31 | EXECUTE 32 | 'SELECT "VersionEnd" FROM ' || tableNameLiteral || 33 | ' WHERE "VersionStart" <= $1 34 | AND "VersionEnd" > $1 35 | AND "EntityId" = $2' 36 | INTO versionEnd 37 | USING NEW."VersionStart", NEW."EntityId"; 38 | 39 | IF versionEnd IS NOT NULL THEN 40 | NEW."VersionEnd" := versionEnd; 41 | -- updating previous version end moment so that it is adjacent with the new version. 42 | EXECUTE 43 | 'UPDATE ' || tableNameLiteral || 44 | ' SET "VersionEnd" = '|| newVersionStartLiteral || 45 | ' WHERE "VersionStart" <= '|| newVersionStartLiteral || 46 | ' AND "VersionEnd" > '|| newVersionStartLiteral || 47 | ' AND "EntityId" = '|| quote_literal(NEW."EntityId") || 48 | ';' 49 | ; 50 | 51 | ELSE -- END IF;IF versionEnd IS NULL THEN -- нет старой версии, которую надо модифицировать. 52 | -- the current version is absent. However, we may insert before the first version. Let's check this case. 53 | EXECUTE 54 | 'SELECT "VersionStart" FROM ' || tableNameLiteral || 55 | ' WHERE "VersionStart" > $1 56 | AND "EntityId" = $2 57 | ORDER BY "VersionStart" 58 | LIMIT 1' 59 | INTO versionEnd 60 | USING NEW."VersionStart", NEW."EntityId"; -- вызов запроса с 2-мя аргументами 61 | 62 | IF versionEnd IS NOT NULL THEN 63 | NEW."VersionEnd" := versionEnd; 64 | ELSE -- versionEnd IS NULL 65 | NEW."VersionEnd" := '9999-01-01 00:00:00'::TIMESTAMP; -- if default value works then this line can be removed. 66 | END IF; 67 | END IF; 68 | RETURN NEW; 69 | END IF; 70 | RETURN NULL; 71 | END; 72 | $beforeInsert$ LANGUAGE plpgsql; 73 | 74 | CREATE TRIGGER beforeInsertTrigger 75 | BEFORE INSERT ON "gm.avatar.level.version" 76 | FOR EACH ROW 77 | EXECUTE PROCEDURE VersionsBeforeInsert(); 78 | 79 | 80 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/sessionApi.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.sql.Timestamp 4 | 5 | import allods.db.migration.Migrations._ 6 | import slick.driver.PostgresDriver 7 | import slick.driver.PostgresDriver.api._ 8 | 9 | import scala.concurrent.ExecutionContext.Implicits.global 10 | import scala.util.Try 11 | 12 | /** 13 | * Legacy session API that will be removed in 3.1.0 14 | * @author a.zhizhelev 15 | */ 16 | @deprecated("use DbMigrationManager", "09.11.2015") 17 | trait DbMigrationManagerSessionApi { 18 | 19 | /** Загружает из БД все скрипты, которые были выполнены на базе. */ 20 | def load(implicit session: Session): SeqAppliedMigration 21 | 22 | /** Возвращает те апдейты, которые надо выполнить, чтобы привести БД к актуальному состоянию. */ 23 | def findRequiredMigrations(implicit session: Session): SeqMigration 24 | 25 | /** Проверяет наличие метаданных по миграциям и создаёт таблицы в случае их отсутствия. */ 26 | def ensureMigrationSupport(implicit session: Session):Unit 27 | 28 | /** Обновляет БД до текущей версии, применяя необходимые обновления. */ 29 | def upgrade(implicit session: Session) 30 | 31 | /** Требуется ли миграция. */ 32 | def isMigrationRequired(implicit session: Session): Boolean = { 33 | val migrations: SeqMigration = findRequiredMigrations.collect { case m: MigrationDefinition => m} 34 | migrations.nonEmpty 35 | } 36 | 37 | def findUnknownMigrations(implicit session: Session): SeqAppliedMigration 38 | 39 | def migrationsDiffPrettyPrint(implicit session: Session) = { 40 | "\n" + 41 | findRequiredMigrations.map(m => "+ " + m.tagUrn + " " + m.comment).mkString("\n") + 42 | findUnknownMigrations.map(m => "- " + m.tagUrn + " " + m.comment).mkString("\n") 43 | } 44 | 45 | def migrationsDiff(implicit session: Session):String = { 46 | (findRequiredMigrations.map("+" + _.tagUrn) ++ 47 | findUnknownMigrations.map("-" + _.tagUrn)).mkString(",") 48 | } 49 | 50 | def versionOpt(implicit session: Session): Option[DependencyTagUrn] 51 | 52 | } 53 | 54 | /** An adapter for implicit session:Session. */ 55 | @deprecated("use DbMigrationManagerImpl", "09.11.2015") 56 | class DbMigrationManagerImplSessionApi(managementSchemaName: String, 57 | migrationPack: MigrationPack) extends DbMigrationManagerImpl(managementSchemaName, migrationPack) with DbMigrationManagerSessionApi { 58 | private def dbRunAwait[T](action: DBIO[T])(implicit session: Session): T = 59 | scala.concurrent.Await.result(session.database.run(action), scala.concurrent.duration.Duration.Inf) 60 | 61 | /** Обновляет БД до текущей версии, применяя необходимые обновления. 62 | * Если версия отсутствует, то в таблице dbVersion изменения не производятся. 63 | * @param session подключение к БД 64 | */ 65 | override def upgrade(implicit session: Session): Unit = //:Seq[AppliedMigration] = 66 | dbRunAwait(upgradeDbio) 67 | 68 | /** Проверяет наличие метаданных по миграциям и создаёт таблицы в случае их отсутствия. */ 69 | def ensureMigrationSupport(implicit session: Session): Unit = 70 | Migrations.ensureMigrationSupport 71 | 72 | def versionOpt(implicit session: Session): Option[String] = 73 | dbRunAwait(versionOptDbio) 74 | 75 | /** Возвращает те апдейты, которые надо выполнить, чтобы привести БД к актуальному состоянию. */ 76 | override def findRequiredMigrations(implicit session: PostgresDriver.api.Session): SeqMigration = 77 | dbRunAwait(findRequiredMigrationsDbio) 78 | 79 | /** Возвращает уже применённые апдейты, которые неизвестны текущей версии программы. */ 80 | override def findUnknownMigrations(implicit session: Session): SeqAppliedMigration = { 81 | val migrationIds = migrationsOrdered.map(_.tagUrn).toSet 82 | load.filterNot(am => migrationIds.contains(am.migrationUrn)) 83 | } 84 | 85 | /** Загружает из БД все скрипты, которые были выполнены на базе. */ 86 | override def load(implicit session: PostgresDriver.api.Session): Seq[AppliedMigration] = 87 | dbRunAwait(loadDbio) 88 | 89 | /** Выполняет указанное обновление на базе. 90 | * 91 | * Следует вызывать только в том случае, если точно установлено, 92 | * что это обновление надо выполнить. 93 | */ 94 | def applyMigration(migration: DependencyNodeTag)(implicit session: Session): AppliedMigration = 95 | dbRunAwait(applyMigrationDbio(migration)).head 96 | 97 | } 98 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/DbMigrationManagerImpl.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.sql.Timestamp 4 | 5 | import allods.db.migration.Migrations._ 6 | import slick.driver.PostgresDriver 7 | import slick.driver.PostgresDriver.api._ 8 | 9 | import scala.concurrent.ExecutionContext.Implicits.global 10 | import scala.util.Try 11 | 12 | /** 13 | * Migration manager API implementation. 14 | * 15 | * @param managementSchemaName the db-schema name for system tables (applied_migrations, db_version) 16 | * @param migrationPack the collection of migrations declared in the application. 17 | * @author a.zhizhelev 18 | */ 19 | class DbMigrationManagerImpl 20 | (val managementSchemaName: String, 21 | val migrationPack: MigrationPack) extends Migrations.DbMigrationManager { 22 | val migrationsOrdered = { 23 | migrationPack.dependencySeq sortWith { 24 | case (left, right) => right.dependencySet contains left 25 | } 26 | } 27 | 28 | def ensureMigrationSupportDbio: DBIO[Unit] = 29 | Migrations.ensureMigrationSupportDbio 30 | 31 | def upgradeDbio: DBIO[Seq[AppliedMigration]] = 32 | findRequiredMigrationsDbio.flatMap { found => 33 | found.map(applyMigrationDbio). 34 | foldLeft(DBIO.successful(Seq()): DBIO[Seq[AppliedMigration]]) { 35 | case (prev, next) => 36 | prev.flatMap { case seq => next.map(a => seq ++ a) } 37 | } 38 | }.withPinnedSession 39 | 40 | 41 | def findRequiredMigrationsDbio: DBIO[SeqMigration] = { 42 | appliedMigrations.result.map { loaded => 43 | val appliedSet = loaded.map(_.migrationUrn).toSet 44 | migrationPack.dependencySeq.filterNot(u => appliedSet.contains(u.tagUrn)) 45 | } 46 | } 47 | 48 | def findUnknownMigrationsDbio: DBIO[SeqAppliedMigration] = { 49 | val migrationIds = migrationsOrdered.map(_.tagUrn).toSet 50 | loadDbio.map(_.filterNot(am => migrationIds.contains(am.migrationUrn))) 51 | } 52 | 53 | def loadDbio: DBIO[SeqAppliedMigration] = 54 | appliedMigrations.result 55 | 56 | def now = System.currentTimeMillis() 57 | 58 | def applyMigrationDbio(migration: DependencyNodeTag): DBIO[Seq[AppliedMigration]] = migration match { 59 | case m: MigrationDefinition => 60 | import m._ 61 | val dbioAction = script match { 62 | case SqlScript(sqlScript) => 63 | val statements = PostgreSqlScriptSplitter(sqlScript) 64 | statements.foldLeft(DBIO.successful((now, now)): DBIO[(Long, Long)]) { 65 | case (prev, statement) => 66 | prev.flatMap { case (start, _) => 67 | statement match { 68 | case PostgreSqlScriptSplitter.Statement(sql, pos) => 69 | SimpleDBIO[(Long, Long)] { ctx => 70 | // val start = now 71 | val t = Try { 72 | ctx.session.prepareStatement(sql).execute() 73 | } 74 | if (t.isFailure) 75 | throw new IllegalStateException(s"[Migration] Migration $migrationUrn failed. Couldn't execute statement in script at position $pos. SQL:\n$sql", t.failed.get) 76 | (start, now) 77 | }.asInstanceOf[DBIO[(Long, Long)]] 78 | } 79 | } 80 | } 81 | case j: CustomMigrationScript => 82 | SimpleDBIO[(Long, Long)] { ctx => val start = now; j.execute(ctx.session); (start, now) } 83 | case EmptyScript => 84 | SimpleDBIO[(Long, Long)] { ctx => (now, now) } 85 | } 86 | // id = -1: slick for inserts ignores ids for AutoInc fields. 87 | dbioAction.flatMap { case (start, end) => 88 | appliedMigrationInsert(AppliedMigration(migrationUrn, new Timestamp(start), new Timestamp(end), content, comment)) 89 | }.transactionally 90 | case version: VersionTag => 91 | dbVersion.filter(_.schema === version.subSchemaId).delete.andThen( 92 | dbVersion.forceInsert(version.subSchemaId, version.version)).andThen( 93 | appliedMigrationInsert(AppliedMigration(version.tagUrn, new Timestamp(now), new Timestamp(now), version.content, version.comment)) 94 | //DBIO.successful(Seq()) //AppliedMigration(version.tagUrn, new Timestamp(0), new Timestamp(0), version.content, version.comment))) 95 | ).transactionally 96 | 97 | } 98 | 99 | def appliedMigrationInsert(am: AppliedMigration): DBIO[Seq[AppliedMigration]] = 100 | appliedMigrations.forceInsert(am).andThen( 101 | appliedMigrations.filter(_.migrationUrn === am.tagUrn).result 102 | ).transactionally 103 | 104 | def versionOptDbio: DBIO[Option[DependencyTagUrn]] = 105 | dbVersion.filter(_.schema === managementSchemaName).map(_.version).result.headOption 106 | 107 | } 108 | -------------------------------------------------------------------------------- /src/test/scala/allods/db/migration/MigrationTest.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import org.junit.runner.RunWith 4 | import org.scalatest.FunSuite 5 | import org.scalatest.junit.JUnitRunner 6 | import slick.driver.PostgresDriver.api._ 7 | import allods.db.migration.Migrations._ 8 | 9 | /** 10 | * @author a.zhizhelev 11 | */ 12 | @RunWith(classOf[JUnitRunner]) 13 | class MigrationTest extends FunSuite { 14 | 15 | private def dbExecute[T](action:DBIO[T])(implicit session: Session):T = 16 | scala.concurrent.Await.result(session.database.run(action), scala.concurrent.duration.Duration.Inf) 17 | 18 | /** Базовый класс для деталей схемы БД. 19 | * 20 | * Части схемы, относящиеся к одному объекту, описываются в trait'ах. 21 | * trait'ы могут располагаться в нескольких файлах. 22 | * 23 | * */ 24 | abstract class MySchemaBuilder extends MigrationBuilder("test") 25 | 26 | trait MySchema1 extends MySchemaBuilder { 27 | val table1Creation = migrate("ALLODS-432535") 28 | .comment("создание таблицы table1 (id)") 29 | .sql("CREATE TABLE \"table1\" (id bigint)") 30 | val table1WithName = migrate("ALLODS-432536") 31 | .dependsOn(table1Creation) 32 | .comment("добавление в таблицу table1 колонки name") 33 | .sql("ALTER TABLE \"table1\" ADD COLUMN name text") 34 | val version1 = version("version1").dependsOn(table1WithName) 35 | } 36 | 37 | trait MySchema2 extends MySchemaBuilder with MySchema1 { 38 | // self:SubschemaDefinition => 39 | val table2Creation = migrate("ALLODS-432537") 40 | .comment("создание таблицы table2 (id)") 41 | .sql("CREATE TABLE table2 (id bigint)") 42 | val table2WithName = migrate("ALLODS-432535", "1") 43 | .dependsOn(table1WithName, table2Creation) 44 | .comment("добавление в таблицу table2 колонки name, а в таблицу table1 - secondName") 45 | .sql( 46 | """ALTER TABLE table2 ADD COLUMN name text; 47 | |ALTER TABLE "table1" ADD COLUMN secondName text""".stripMargin) 48 | val version2 = version("version2").dependsOn(version1, table2WithName) 49 | } 50 | 51 | val Version1 = new MySchema1 {} 52 | val Version2 = new MySchema1 with MySchema2 {} 53 | 54 | trait MyIllegalSchema3 extends MySchemaBuilder with MySchema2 { 55 | val table3CreationTwice = migrate("ALLODS-264641"). 56 | comment("Создание таблицы t дважды, чтобы при второй операции произошла ошибка."). 57 | sql( 58 | """CREATE TABLE tab1 (id bigint); 59 | |CREATE TABLE tab1 (id bigint); 60 | """.stripMargin) 61 | val version3 = version("version3").dependsOn(version2,table3CreationTwice) 62 | } 63 | object Version3Illegal extends MyIllegalSchema3 64 | test("Migrations construction") { 65 | TestDbServer.testDbServer.withTemporaryDb("MigrationsTestDb") { 66 | implicit session => 67 | val deltaMs = TestDbServer.testDbServer.measureMs { 68 | info("1") 69 | dbExecute(Migrations.migrationsDbDdl.create) 70 | info("2") 71 | val mp1 = createMigrationPack(Version1.version1) 72 | assert(mp1.versionOpt.map(_.version) === Some("version1")) 73 | val migrationManager = Version1.createMigrationManager(mp1).asInstanceOf[DbMigrationManagerSessionApi] 74 | migrationManager.upgrade 75 | assert(migrationManager.versionOpt === mp1.versionOpt.map(_.version)) 76 | 77 | info("DB version 1") 78 | 79 | val mm2 = Version2.createMigrationManager(Version2.version2).asInstanceOf[DbMigrationManagerSessionApi] 80 | assert(mm2.versionOpt === Some("version1")) 81 | assert(mm2.isMigrationRequired, "Version2.isMigrationRequired") 82 | mm2.upgrade 83 | assert(mm2.versionOpt === Some("version2")) 84 | info("DB version 2") 85 | assert(!mm2.isMigrationRequired, "!Version2.isMigrationRequired") 86 | } 87 | info(s"Time=$deltaMs, ms") 88 | } 89 | } 90 | test("Migration rollback") { 91 | TestDbServer.testDbServer.withTemporaryDb("MigrationsTestDbRollBack") { 92 | implicit session => 93 | val deltaMs = TestDbServer.testDbServer.measureMs { 94 | info("1") 95 | dbExecute(Migrations.migrationsDbDdl.create) 96 | info("2") 97 | val mp2 = createMigrationPack(Version3Illegal.version2) 98 | val mp3 = createMigrationPack(Version3Illegal.version3) 99 | // assert(mp1.versionOpt.map(_.version) === Some("version1")) 100 | val migrationManager = Version3Illegal.createMigrationManager(mp3).asInstanceOf[DbMigrationManagerSessionApi] 101 | val exception = intercept[IllegalStateException](migrationManager.upgrade) 102 | assert(exception.getMessage.contains("tab1")) 103 | // assert(migrationManager.versionOpt === mp2.versionOpt.map(_.version)) 104 | } 105 | info(s"Time=$deltaMs, ms") 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 10 | DEFAULT_JVM_OPTS="" 11 | 12 | APP_NAME="Gradle" 13 | APP_BASE_NAME=`basename "$0"` 14 | 15 | # Use the maximum available, or set MAX_FD != -1 to use that value. 16 | MAX_FD="maximum" 17 | 18 | warn ( ) { 19 | echo "$*" 20 | } 21 | 22 | die ( ) { 23 | echo 24 | echo "$*" 25 | echo 26 | exit 1 27 | } 28 | 29 | # OS specific support (must be 'true' or 'false'). 30 | cygwin=false 31 | msys=false 32 | darwin=false 33 | case "`uname`" in 34 | CYGWIN* ) 35 | cygwin=true 36 | ;; 37 | Darwin* ) 38 | darwin=true 39 | ;; 40 | MINGW* ) 41 | msys=true 42 | ;; 43 | esac 44 | 45 | # For Cygwin, ensure paths are in UNIX format before anything is touched. 46 | if $cygwin ; then 47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 48 | fi 49 | 50 | # Attempt to set APP_HOME 51 | # Resolve links: $0 may be a link 52 | PRG="$0" 53 | # Need this for relative symlinks. 54 | while [ -h "$PRG" ] ; do 55 | ls=`ls -ld "$PRG"` 56 | link=`expr "$ls" : '.*-> \(.*\)$'` 57 | if expr "$link" : '/.*' > /dev/null; then 58 | PRG="$link" 59 | else 60 | PRG=`dirname "$PRG"`"/$link" 61 | fi 62 | done 63 | SAVED="`pwd`" 64 | cd "`dirname \"$PRG\"`/" >&- 65 | APP_HOME="`pwd -P`" 66 | cd "$SAVED" >&- 67 | 68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 69 | 70 | # Determine the Java command to use to start the JVM. 71 | if [ -n "$JAVA_HOME" ] ; then 72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 73 | # IBM's JDK on AIX uses strange locations for the executables 74 | JAVACMD="$JAVA_HOME/jre/sh/java" 75 | else 76 | JAVACMD="$JAVA_HOME/bin/java" 77 | fi 78 | if [ ! -x "$JAVACMD" ] ; then 79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 80 | 81 | Please set the JAVA_HOME variable in your environment to match the 82 | location of your Java installation." 83 | fi 84 | else 85 | JAVACMD="java" 86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 87 | 88 | Please set the JAVA_HOME variable in your environment to match the 89 | location of your Java installation." 90 | fi 91 | 92 | # Increase the maximum file descriptors if we can. 93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then 94 | MAX_FD_LIMIT=`ulimit -H -n` 95 | if [ $? -eq 0 ] ; then 96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 97 | MAX_FD="$MAX_FD_LIMIT" 98 | fi 99 | ulimit -n $MAX_FD 100 | if [ $? -ne 0 ] ; then 101 | warn "Could not set maximum file descriptor limit: $MAX_FD" 102 | fi 103 | else 104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 105 | fi 106 | fi 107 | 108 | # For Darwin, add options to specify how the application appears in the dock 109 | if $darwin; then 110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 111 | fi 112 | 113 | # For Cygwin, switch paths to Windows format before running java 114 | if $cygwin ; then 115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules 158 | function splitJvmOpts() { 159 | JVM_OPTS=("$@") 160 | } 161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS 162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" 163 | 164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" 165 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/PersistentColumnReadingT.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import slick.dbio.DBIO 4 | import slick.jdbc.ActionBasedSQLInterpolation 5 | import slick.jdbc.meta._ 6 | 7 | import scala.concurrent.ExecutionContext.Implicits.global 8 | import scala.language.implicitConversions 9 | import scala.concurrent._ 10 | 11 | /** 12 | * A mechanism that reads signuficant parts of schema and some means to compare two schemas. 13 | * The scheme is represented with a tree with labelled branches. 14 | */ 15 | trait PersistentColumnReadingT extends TreeSchemaT { 16 | 17 | implicit def ActionBasedSQLInterpolationImport(s: StringContext): ActionBasedSQLInterpolation = new ActionBasedSQLInterpolation(s) 18 | 19 | /** Load function definitions from schemaName. */ 20 | def loadFunctionsDbio(schemaName: String)(implicit executionContext: ExecutionContext): DBIO[Vector[(String, String)]] = 21 | sql"""SELECT proc.proname, proc.proargnames, proc.prosrc 22 | FROM pg_proc AS proc 23 | JOIN pg_namespace AS nsp ON nsp.oid = proc.pronamespace 24 | WHERE nsp.nspname LIKE $schemaName""".as[(String, String, String)]. 25 | map(_.map { case (name, args, body) => name + "(" + args + ")" -> lowerCaseAndReplaceBlank(body) }) 26 | 27 | def loadViewsDbio(schemaName: String)(implicit executionContext: ExecutionContext): DBIO[Vector[(String, String)]] = { 28 | sql"""SELECT viewname, 29 | pg_get_viewdef(schemaname||'.'||viewname, TRUE) AS definition 30 | FROM pg_views 31 | WHERE schemaname LIKE $schemaName""".as[(String, String)]. 32 | map(_.map { case (name, body) => name -> body }) 33 | 34 | } 35 | 36 | /** 37 | * Normalize function body. 38 | * As an alternative one may use: 39 | * {{{ 40 | * def normalizeFunctionBody(body: String) = body.replaceAll("\\s{1,}", " ").trim.toLowerCase 41 | * }}} 42 | * @param source function definition 43 | * @return normalized function definition. 44 | */ 45 | def lowerCaseAndReplaceBlank(source: String) = source.replaceAll("\\s", "").toLowerCase 46 | 47 | /** A column representation that can be directly compared.*/ 48 | case class PersistentColumn(name: String, columnDef: Option[String], sqlType: Int, typeName: String, 49 | size: Option[Int]) 50 | 51 | val toPersistentColumn = { (c: MColumn) => 52 | import c._ 53 | PersistentColumn(name, columnDef, sqlType, typeName, size) 54 | } 55 | 56 | /** Load trigger list for the tableName. */ 57 | def triggersDbio(tableName: MQName): DBIO[(String, Node)] = 58 | sql""" 59 | SELECT tgname 60 | FROM pg_trigger 61 | JOIN pg_class ON tgrelid = pg_class.oid 62 | JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace 63 | WHERE pg_namespace.nspname LIKE ${tableName.schema.getOrElse("public")} 64 | AND pg_class.relname LIKE ${tableName.name} 65 | AND NOT tgisinternal 66 | """.as[String].map { triggers => 67 | "trigger" -> ParentNode(triggers.map { case name => quote(name) -> ContentNode("") }.toMap) 68 | } 69 | 70 | /** Table name format.*/ 71 | def MQNameToString(name:MQName) = 72 | name.schema.map(_ + ".").getOrElse("") + name.name 73 | 74 | /** Load inherited tables. */ 75 | def inheritsDbio(table: MTable): DBIO[Seq[(String, Node)]] = 76 | if (table.tableType == "TABLE") 77 | sql""" 78 | SELECT inhparent::REGCLASS::TEXT 79 | FROM pg_catalog.pg_inherits 80 | WHERE inhrelid = ${MQNameToString(table.name)}::REGCLASS::OID 81 | """.as[String].map { inherits => 82 | Seq("inherit" -> ParentNode(inherits.map { case name => quote(name) -> ContentNode("") }.toMap)) 83 | } 84 | else 85 | DBIO.successful(Seq()) 86 | 87 | /** Create quoted name.*/ 88 | def quote(name: String) = "\"" + name + "\"" 89 | 90 | /** Load function definitions. */ 91 | def functionsDbio(schema: String): DBIO[(String, Node)] = 92 | loadFunctionsDbio(schema).map(functions => "function" -> ParentNode(functions.map { case (name, body) => 93 | quote(name) -> ContentNode(body) 94 | }.toMap)) 95 | 96 | def columnsDbio(table: MTable): DBIO[(String, Node)] = 97 | table.getColumns.map(columns => "column" -> ParentNode(columns.map(column => 98 | quote(column.name) -> ContentNode(toPersistentColumn(column).toString) 99 | ).toMap)) 100 | 101 | def tablesDbio(schema: String): DBIO[(String, Node)] = 102 | MTable.getTables(None, Some(schema), None, None). 103 | flatMap { mtables => 104 | DBIO.sequence(mtables.map(mtable => 105 | DBIO.sequence(Seq( 106 | columnsDbio(mtable), 107 | triggersDbio(mtable.name) 108 | )).flatMap(s1 => 109 | inheritsDbio(mtable). 110 | map(s2 => s1 ++ s2) 111 | ).map(_.toMap). 112 | map { m => 113 | mtable.tableType + " " + quote(mtable.name.name) -> ParentNode(m) 114 | } 115 | )).map(ns => "table" -> ParentNode(ns.toMap)) 116 | } 117 | 118 | def viewsDbio(schema: String): DBIO[(String, Node)] = 119 | loadViewsDbio(schema).map(views => "view" -> ParentNode(views.map { 120 | case (name, body) => 121 | quote(name) -> ContentNode(body) 122 | }.toMap)) 123 | 124 | /** Load the essential DB schema. */ 125 | def readEssentialSchemaDbio(dbExcludedSchemas: Set[String])(implicit executionContext: ExecutionContext): DBIO[Node] = { 126 | MSchema.getSchemas(None, None).flatMap { 127 | schemas => 128 | DBIO.sequence(schemas.filter(s => !dbExcludedSchemas.contains(s.schema)).map { 129 | schema => 130 | DBIO.sequence(Seq( 131 | functionsDbio(schema.schema), 132 | tablesDbio(schema.schema), 133 | viewsDbio(schema.schema) 134 | )).map(_.toMap).map { 135 | m => schema.schema -> ParentNode(m) 136 | } 137 | }) 138 | }.map { 139 | p => 140 | ParentNode(Map("schema" -> ParentNode(p.toMap))) 141 | }.withPinnedSession 142 | } 143 | 144 | 145 | } 146 | -------------------------------------------------------------------------------- /src/main/scala/allods/db/migration/Migrations.scala: -------------------------------------------------------------------------------- 1 | package allods.db.migration 2 | 3 | import java.sql.Timestamp 4 | import javax.sql.DataSource 5 | 6 | import slick.driver.PostgresDriver 7 | import slick.driver.PostgresDriver.DDL 8 | import slick.driver.PostgresDriver.api._ 9 | import slick.jdbc.meta.MTable 10 | 11 | import scala.annotation.tailrec 12 | import scala.collection.mutable 13 | import scala.collection.mutable.ListBuffer 14 | import scala.concurrent.ExecutionContext//.Implicits.global 15 | 16 | /** 17 | * Support for migration of database. objects. 18 | * 19 | * Схема БД может быть разбита на отдельные независимые подсхемы, располагающиеся в одной БД (при условии, 20 | * что подсхемы не пересекаются по отдельным объектам БД). Например, схема шарда, схема логсервера и т.п. 21 | * Внутри подсхемы объекты обычно зависят друг от друга. А объекты из разных подсхем друг от друга не зависят. 22 | * Подсхемы имеют независимые истории изменений. 23 | * 24 | * Каждое изменение представлено либо Sql-скриптом, либо Java-скриптом. 25 | * Каждое изменение имеет идентификатор (имя таска + порядковый номер). 26 | * Изменения имеют комментарий (могут не иметь, в принципе). 27 | * 28 | * Определённое состояние БД после внесения ряда изменений может иметь собственный номер, 29 | * именуемый версией. Версия представлена строкой составленной таким образом, чтобы 30 | * давать правильное лексикографическое упорядочение версий при сортировке строк 31 | * по возрастанию. 32 | * 33 | * Если на базе применены миграции, не соответствующие ни одной версии, то версия будет пустой. 34 | * 35 | * При мерже модификаций в ветку необходимо использовать согласованную схему именования версий. 36 | * 1. В каждой ветке, включая транк, версия нумеруется от базовой точке ветвления. К версии 37 | * точки ветвления прибавляется имя ветки, а дальше порядковый номер версии. Например: 38 | * 39 | * Версия в транке была 5.0.01.20 (здесь 20 - число обновлений с последнего вывода ветки). 40 | * Хотим сделать новую ветку 5.0.02. 41 | * В транке создаётся новая версия 5.0.02.0, совпадающая с 5.0.01.20. 42 | * В ветке создаётся новая версия 5.0.02.branch.0, тоже совпадающая с 5.0.01.20. 43 | * Дальше в транке изменения приводят к увеличению номера на 1 5.0.02.1, 5.0.02.2, 5.0.02.3 ... 44 | * В ветке изменения, которые сливаются, также приводят к увеличению номера на 1. 45 | * 46 | * @author a.zhizhelev 47 | */ 48 | object Migrations extends MigrationsT with ResourceReading 49 | trait MigrationsT { 50 | private 51 | object sameThreadExecutionContext extends ExecutionContext { 52 | override def execute(runnable: Runnable): Unit = runnable.run() 53 | override def reportFailure(t: Throwable): Unit = throw t 54 | } 55 | 56 | /** Идентификатор подсхемы. */ 57 | type SubSchemaId = String 58 | /** Идентификатор объекта в БД */ 59 | type DatabaseObjectId = String 60 | 61 | /** Идентификатор точки на графе зависимостей. Представляет собой URN (universal resource name). 62 | * Обычно содержит идентификатор версии - 5.0.1.trunk.1 63 | */ 64 | type DependencyTagUrn = String 65 | /** Идентификатор обновления. Представляет собой URN (universal resource name). 66 | * Одновременно служит идентификатором точки на графе зависимостей, которая образуется сразу после 67 | * выполнения скрипта. 68 | */ 69 | type MigrationUrn = DependencyTagUrn 70 | 71 | /** Скрипт идентифицируется уникальным URL, составленным из схемы, номера таска и идентификатора скрипта. 72 | * urn:subschema/taskNumber/scriptNumber */ 73 | def migrationUrn(subschema: SubSchemaId, task: String, migrationNumber: String): MigrationUrn = 74 | s"urn:$subschema/$task/$migrationNumber" 75 | 76 | def versionTagUrn(subschema: SubSchemaId, tag: String): DependencyTagUrn = 77 | s"urn:$subschema/v/$tag" 78 | 79 | sealed trait Script { 80 | /** Information to store in the applied migrations table. */ 81 | def text: String 82 | } 83 | 84 | /** 85 | * Представление скрипта обновления указанного объекта БД. 86 | * objectId идентификатор обновляемого объекта 87 | * change тип изменения, описываемый этим скриптом 88 | * id:String, objectId:DatabaseObjectId, change:Change, 89 | * @param sql скрипт 90 | */ 91 | case class SqlScript(sql: String) extends Script { 92 | def text = sql 93 | } 94 | 95 | case object EmptyScript extends Script { 96 | def text = "" 97 | } 98 | 99 | // /** 100 | // * Идентификатор объекта в БД. 101 | // * 102 | // * withData=true: Объекты, содержащие данные. А именно, таблицы. 103 | // * Управление такими объектами требует строго последовательного выполнения всех обновлений. 104 | // * withData=false: Объекты, не содержащие данных, Например, функции. 105 | // * Такие объекты заменяются на последнюю имеющуюся версию. 106 | // */ 107 | // case class DatabaseObjectId(schema:String, name:String, objectType:String, withData:Boolean) 108 | // 109 | // class Schema(schema:String) { 110 | // /** Идентификатор таблицы БД. */ 111 | // def tableId(name: String) = DatabaseObjectId(schema, name, "TABLE", withData = true) 112 | // } 113 | 114 | /** Интерфейс для Java-скриптов. 115 | * Выполняет обновление с помощью кода. 116 | */ 117 | trait CustomMigrationScript extends Script { 118 | def execute(implicit session: Session) 119 | 120 | def text = this.getClass.getName 121 | } 122 | 123 | /** Тег, отмечающий некоторую точку на графе зависимостей. 124 | * Существует две разновидности тега - пустой тег, отмечающий просто 125 | * узел в графе зависимостей, и тег, содержащий скрипт. В последнем случае идентификатор 126 | * скрипта также служит идентификатором узла графа, расположенного 127 | * сразу после скрипта. 128 | * */ 129 | trait DependencyNodeTag { 130 | /** идентификатор определённой точки на графе зависимостей, представляющей собой совокупность скриптов обновления. 131 | */ 132 | def tagUrn: DependencyTagUrn 133 | 134 | def comment: String 135 | 136 | def dependencies: Seq[DependencyNodeTag] = Seq() 137 | 138 | lazy val dependencySet = dependencies.toSet 139 | 140 | def content: String 141 | } 142 | 143 | /** идентификатор определённой версии БД, представляющей собой совокупность скриптов обновления. 144 | * Можно использовать номер версии программы или версию транка. 145 | * Желательно, чтобы tag был коротким, чтобы можно было использовать в имени шаблона БД. 146 | * 147 | * Версия может не иметь названия. В db.properties записывается версия в том случае, если 148 | * состав обновлений в точности соответствует некоторой объявленной версии БД. 149 | * 150 | * Объявляется версия как базовая плюс несколько обновлений. 151 | * val version419 = version(version418, migrationAllods544613) 152 | */ 153 | 154 | trait VersionTag extends DependencyNodeTag { 155 | def subSchemaId: SubSchemaId 156 | 157 | def version: String 158 | 159 | def tagUrn = versionTagUrn(subSchemaId, version) 160 | 161 | def content = "" 162 | } 163 | 164 | /** Миграция БД. */ 165 | trait MigrationDefinition extends DependencyNodeTag { 166 | /** идентификатор скрипта. Можно использовать номер таска и номер скрипта в пределах таска. */ 167 | def migrationUrn: MigrationUrn 168 | 169 | override def tagUrn = migrationUrn 170 | 171 | def script: Script 172 | 173 | def content = script.text 174 | } 175 | 176 | /** 177 | * Менеджер обновлений БД хранит в БД полный список обновлений 178 | **/ 179 | trait DbMigrationManager { 180 | /** Все обновления БД, собранные в линейную цепочку. 181 | * При необходимости следует предварительно вызвать метод Migrations.migrationsToSeq, 182 | * который построит такую цепочку по графу зависимостей. */ 183 | val migrationPack: MigrationPack 184 | /** Имя схемы, которая используется для хранения данных обновлений. 185 | * В этой схеме автоматически будут созданы стандартные таблицы, позволяющие */ 186 | val managementSchemaName: String 187 | 188 | /** Отрисовывает список миграций, которые будут выполнены на пустой БД. */ 189 | def migrationsFullCreatePrettyPrint = 190 | migrationPack.dependencySeq.map(m => 191 | "-- " + m.tagUrn + "\n" + 192 | "-- " + m.comment + "\n" + 193 | "-- DEPENDS ON: " + m.dependencies.map(_.tagUrn).mkString(", ") + "\n" + 194 | m.content +(if(m.content.endsWith(";")||m.content.endsWith(";\n")) "\n" else ";\n")). 195 | mkString("\n") 196 | 197 | def loadDbio: DBIO[Seq[AppliedMigration]] 198 | 199 | def findRequiredMigrationsDbio: DBIO[SeqMigration] 200 | 201 | def upgradeDbio: DBIO[Seq[AppliedMigration]] 202 | 203 | def isMigrationRequiredDbio(implicit ec: ExecutionContext): DBIO[Boolean] = 204 | findRequiredMigrationsDbio.map(_.nonEmpty) 205 | 206 | def findUnknownMigrationsDbio: DBIO[SeqAppliedMigration] 207 | 208 | def migrationsDiffPrettyPrintDbio(implicit ec: ExecutionContext): DBIO[String] = 209 | for {req <- findRequiredMigrationsDbio 210 | unk <- findUnknownMigrationsDbio 211 | } yield { 212 | "\n" + 213 | req.map(m => "+ " + m.tagUrn + " " + m.comment).mkString("\n") + 214 | unk.map(m => "- " + m.tagUrn + " " + m.comment).mkString("\n") 215 | } 216 | 217 | def migrationsDiffDbio(implicit ec: ExecutionContext): DBIO[String] = 218 | for {req <- findRequiredMigrationsDbio 219 | unk <- findUnknownMigrationsDbio 220 | } yield 221 | (req.map("+" + _.tagUrn) ++ 222 | unk.map("-" + _.tagUrn)).mkString(",") 223 | 224 | def ensureMigrationSupportDbio:DBIO[Unit] 225 | def versionOptDbio: DBIO[Option[DependencyTagUrn]] 226 | 227 | } 228 | 229 | /** JavaAPI 230 | * Преобразует Java DataSource в Slick Database. */ 231 | def dataSourceToDatabase(dataSource: DataSource): Database = Database.forDataSource(dataSource) 232 | 233 | /** Применённое изменение в БД. */ 234 | case class AppliedMigration(migrationUrn: String, startTime: Timestamp, endTime: Timestamp, script: String, comment: String) { 235 | def tagUrn = migrationUrn 236 | } 237 | 238 | val AppliedMigrationsTableName = "applied_migrations" 239 | val MigrationsSchema = Some("migrations") 240 | 241 | /** Представление обновлений в БД. 242 | * Дополнительно содержит время запуска обновления, 243 | * время окончания обновления, журнал ошибок обновления. 244 | */ 245 | class AppliedMigrations(tag: Tag) extends Table[AppliedMigration](tag, MigrationsSchema, AppliedMigrationsTableName) { 246 | // def migrationId = column[Long]("migrationId", O.PrimaryKey, O.AutoInc) 247 | 248 | def migrationUrn = column[String]("migration_urn", O.PrimaryKey, O.SqlType("text")) 249 | 250 | def migrationStart = column[Timestamp]("migration_start") 251 | 252 | def migrationEnd = column[Timestamp]("migration_end") 253 | 254 | // для Java-скрипта делается toString 255 | def script = column[String]("script", O.SqlType("text")) 256 | 257 | def comment = column[String]("comment", O.SqlType("text")) 258 | 259 | def * = (migrationUrn, migrationStart, migrationEnd, script, comment) <>(AppliedMigration.tupled, AppliedMigration.unapply) 260 | 261 | } 262 | 263 | val appliedMigrations = TableQuery[AppliedMigrations] 264 | 265 | /** Версия БД хранится в отдельной таблице. В случае, если набор миграций не соответствует 266 | * никакой версии, таблица будет пустой. */ 267 | class DbVersion(tag: Tag) extends Table[(String, String)](tag, MigrationsSchema, "db_version") { 268 | def schema = column[String]("schema", O.PrimaryKey, O.SqlType("text")) 269 | 270 | def version = column[String]("version", O.SqlType("text")) 271 | 272 | def * = (schema, version) 273 | } 274 | 275 | val dbVersion = TableQuery[DbVersion] 276 | 277 | val schemaDdl: DDL = DDL( 278 | List(s"CREATE SCHEMA IF NOT EXISTS ${MigrationsSchema.get}"), List(), 279 | List(s"DROP SCHEMA IF EXISTS ${MigrationsSchema.get}"), List()) 280 | 281 | val migrationsDbDdl = 282 | schemaDdl ++ 283 | appliedMigrations.schema ++ 284 | dbVersion.schema 285 | 286 | // TODO: журнал выполнения хранится в отдельной таблице. use JdbcAppender 287 | 288 | /** 289 | * Реализация топологической сортировки. 290 | * При обнаружении циклических зависимостей генерируется исключение. 291 | * 292 | * @see https://gist.github.com/ThiporKong/4399695 293 | * @see http://en.wikipedia.org/wiki/Topological_sorting 294 | * @param elements коллекция всех элементов, которые требуется упорядочить. 295 | * @tparam T тип элементов. Для него должна быть реализация PartialOrdering'а 296 | * @return отсортированные по зависимостям элементы. 297 | */ 298 | def topologicalSort[T](elements: TraversableOnce[T], directDependencies: T => Set[T]): Seq[T] = { 299 | @tailrec 300 | def tsort(elementsWithPredecessors: Seq[(T, Set[T])], alreadySorted: ListBuffer[T]): Seq[T] = { 301 | val (noPredecessors, hasPredecessors) = 302 | elementsWithPredecessors.partition(_._2.isEmpty) 303 | 304 | if (noPredecessors.isEmpty) { 305 | if (hasPredecessors.isEmpty) 306 | alreadySorted.toSeq 307 | else 308 | throw new IllegalStateException( 309 | "There seems to be a cycle in the dependency subgraph: " + elementsWithPredecessors.map(_._1).mkString(",")) 310 | } else { 311 | val found = noPredecessors.map(_._1) 312 | val restWithoutFound = hasPredecessors.map(p => (p._1, p._2 -- found)) //оставшиеся миграции с удалёнными узлами. 313 | tsort(restWithoutFound, alreadySorted ++ found) 314 | } 315 | } 316 | 317 | val elementsWithPredecessors = elements.map(e => (e, directDependencies(e))).toSeq 318 | val keys = elements.toSet 319 | require(elementsWithPredecessors.forall(_._2.forall(keys.contains)), 320 | "It is required that all dependent elements appear in elements argument. The following element is absent:"+ 321 | elementsWithPredecessors.find(p => !p._2.forall(keys.contains)) 322 | ) 323 | tsort(elementsWithPredecessors, ListBuffer()) 324 | } 325 | 326 | /** Собирает все зависимости. 327 | * Сохраняет порядок их объявления. 328 | * @param targets несколько зависимостей верхнего уровня 329 | * @return все миграции, собранные транзитивно. 330 | */ 331 | def collectAll(targets: List[DependencyNodeTag]): Seq[DependencyNodeTag] = { 332 | def collectAll0(targets: List[DependencyNodeTag], urns: Set[DependencyTagUrn], result: mutable.ListBuffer[DependencyNodeTag]): Seq[DependencyNodeTag] = targets match { 333 | case Nil => 334 | result.toSeq 335 | case head :: tail => 336 | if (urns.contains(head.tagUrn)) // игнорируем собранные ранее объекты 337 | collectAll0(tail, urns, result) 338 | else { 339 | result += head 340 | collectAll0(head.dependencies.toList ::: tail, urns + head.tagUrn, result) 341 | } 342 | } 343 | collectAll0(targets, Set(), mutable.ListBuffer()) 344 | } 345 | 346 | /** Вычисляет цепочку миграций, которые зависят от указанных, включая указанные. Порядок 347 | * миграций строго определён, исходя из обхода графа. 348 | * 349 | * Сначала рекурсивным обходом мы собираем все Migration'ы. При этом зависимости могут быть 350 | * отсортированы некорректно. Затем мы просто сортируем зависимости по транзитивному отношению hasDependencyOn 351 | * 352 | * При обнаружении циклических зависимостей генерируется исключение. 353 | * */ 354 | def collectAndSortDependencies(targets: List[DependencyNodeTag]): Seq[DependencyNodeTag] = { 355 | val collected = collectAll(targets) 356 | 357 | val urns = collected.map(_.tagUrn) 358 | val duplicatedUrns = urns.groupBy(identity).map(p => (p._1, p._2.size)).filter(_._2>1) 359 | require(duplicatedUrns.isEmpty, "There are duplicates in the list of migrations: "+duplicatedUrns.mkString(",")) 360 | 361 | val map = collected.map(n => n.tagUrn -> n).toMap 362 | val graph = map.map{case (name, node) => (name, node.dependencies.map(_.tagUrn).toSet)} 363 | 364 | val sortedUrns = topologicalSort[DependencyTagUrn](urns, graph) 365 | sortedUrns.map(map) 366 | } 367 | 368 | /** Последовательность скриптов обновления. Все скрипты собраны в линейную цепочку. 369 | * Прогон всей цепочки должен быть валидной операцией. */ 370 | type SeqMigration = Seq[DependencyNodeTag] 371 | 372 | case class MigrationPack(dependencySeq: Seq[DependencyNodeTag]) 373 | { 374 | def versionOpt = dependencySeq.reverse.collect { case v: VersionTag => v }.headOption 375 | def migrations: Seq[MigrationDefinition] = dependencySeq.collect { case m: MigrationDefinition => m} 376 | } 377 | 378 | /** Формирует MigrationPack для линейной цепочки зависимостей. 379 | * Если на самом верху находится версия, то MigrationPack будет иметь версию. 380 | * В противном случае, версия будет отсутствовать. */ 381 | def dependencySeqToMigrationPack(dependencySeq: Seq[DependencyNodeTag]) = 382 | MigrationPack(dependencySeq) 383 | 384 | def createMigrationPack(targets: DependencyNodeTag*): MigrationPack = { 385 | val sorted = collectAndSortDependencies(targets.toList) 386 | 387 | dependencySeqToMigrationPack(sorted) 388 | } 389 | 390 | 391 | /** 392 | * Совокупность применённых обновлений 393 | */ 394 | type SeqAppliedMigration = Seq[AppliedMigration] 395 | 396 | @deprecated("use isMigrationsMetaExistsDbio", "23.06.2015") 397 | def isMigrationsMetaExists(implicit session: Session) = 398 | dbRunAwait(isMigrationsMetaExistsDbio(sameThreadExecutionContext)) 399 | 400 | private def dbRunAwait[T](action: DBIO[T])(implicit session: PostgresDriver.api.Session): T = 401 | scala.concurrent.Await.result(session.database.run(action), scala.concurrent.duration.Duration.Inf) 402 | 403 | /** Проверяет наличие метаданных по миграциям и создаёт таблицы в случае их отсутствия. */ 404 | @deprecated("use ensureMigrationSupportDbio", "23.06.2015") 405 | def ensureMigrationSupport(implicit session: Session): Unit = 406 | dbRunAwait(ensureMigrationSupportDbio(sameThreadExecutionContext)) 407 | 408 | def isMigrationsMetaExistsDbio(implicit ec: ExecutionContext): DBIO[Boolean] = MTable.getTables( 409 | cat = None, 410 | schemaPattern = MigrationsSchema, 411 | namePattern = Some(AppliedMigrationsTableName), 412 | types = None 413 | ).map(_.nonEmpty) 414 | 415 | def ensureMigrationSupportDbio(implicit ec: ExecutionContext): DBIO[Unit] = 416 | isMigrationsMetaExistsDbio.flatMap { 417 | t => if(t) DBIO.successful(()) else migrationsDbDdl.create 418 | }.transactionally 419 | 420 | // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s06.html 421 | val urnRegex = "^urn:[a-z0-9][a-z0-9-]{0,31}:[a-z0-9()+,\\-.:=@;$_!*'%/?#]+$".r 422 | def isUrnValid(urn:String):Boolean = 423 | urnRegex.findFirstIn(urn).nonEmpty 424 | 425 | } 426 | 427 | --------------------------------------------------------------------------------