├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ ├── automerge.yml │ ├── build.yml │ ├── codeql-analysis.yml │ ├── docker-image.yml │ └── docker-publish.yml ├── .gitignore ├── APIv1.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── PERFORMANCE.md ├── README.md ├── common ├── common.go └── common_test.go ├── config ├── config.go └── config_test.go ├── coordinator ├── coordinator.go ├── coordinator_mocks.go └── coordinator_test.go ├── coverage.sh ├── data ├── graphql.go ├── graphql_mocks.go └── graphql_test.go ├── db ├── db.go ├── db_dialect.go ├── db_dialect_test.go ├── db_error_handling_test.go ├── db_integration_test.go ├── db_mssql.go ├── db_mssql_test.go ├── db_mysql.go ├── db_mysql_test.go ├── db_postgresql.go ├── db_postgresql_test.go └── db_test.go ├── docker-entrypoint.sh ├── go.mod ├── go.sum ├── loader ├── azureblob_loader.go ├── azureblob_loader_test.go ├── disk_loader.go ├── disk_loader_test.go ├── loader.go ├── loader_test.go ├── s3_loader.go └── s3_loader_test.go ├── metrics ├── metrics.go └── metrics_test.go ├── migrator.go ├── notifications ├── notifications.go └── notifications_test.go ├── server ├── server.go ├── server_mocks.go └── server_test.go ├── staticcheck.sh ├── test ├── create-test-tenants-mssql.sql ├── create-test-tenants.sql ├── docker-compose-it.yaml ├── docker-compose.yaml ├── empty.yaml ├── http-integration-tests.sh ├── migrations │ ├── config-scripts │ │ └── 200012181227.sql │ ├── config │ │ ├── 201602160001.sql │ │ └── 201602160002.sql │ ├── ref │ │ ├── 201602160003.sql │ │ └── 201602160004.sql │ ├── tenants-scripts │ │ ├── 200001181228.sql │ │ ├── a.sql │ │ └── b.sql │ └── tenants │ │ ├── 201602160002.sql │ │ ├── 201602160003.sql │ │ ├── 201602160004.sql │ │ └── 201602160005.sql ├── migrator-dev │ └── Dockerfile ├── migrator-docker.yaml ├── migrator-mssql.yaml ├── migrator-mysql.yaml ├── migrator-overrides.yaml ├── migrator-postgresql.yaml ├── migrator-test-envs.yaml ├── migrator-test.yaml └── performance │ ├── create-test-tenants.sh │ ├── flyway.conf │ ├── flywaydb-test.sh │ ├── generate-test-migrations.sh │ ├── liquibase-changelog.xml │ ├── liquibase-test.sh │ ├── liquibase.properties │ ├── migrator-performance.yaml │ └── test.sh ├── tutorials ├── aws-ecs │ ├── Dockerfile │ ├── README.md │ └── migrator.yaml ├── aws-eks │ ├── README.md │ ├── kustomization.yaml │ ├── migrator-deployment.yaml │ ├── migrator-ingress.yaml │ └── migrator-service.yaml ├── azure-aks │ ├── Dockerfile │ ├── README.md │ ├── kustomization.yaml │ ├── migrator-deployment.yaml │ ├── migrator-ingress.yaml │ ├── migrator-service.yaml │ └── migrator.yaml ├── oauth2-proxy-oidc-haproxy │ ├── README.md │ ├── docker-compose.yaml │ ├── haproxy │ │ ├── haproxy.cfg │ │ └── keycloak.pem │ ├── keycloak │ │ ├── master-realm.json │ │ └── master-users-0.json │ ├── migrator-oidc.png │ ├── migrator.yaml │ └── oauth2-proxy.cfg └── oauth2-proxy │ ├── README.md │ ├── docker-compose.yaml │ ├── keycloak │ ├── master-realm.json │ └── master-users-0.json │ ├── migrator.yaml │ └── oauth2-proxy.cfg └── types └── types.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [lukaszbudnik] 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | time: "07:00" 9 | day: "sunday" 10 | # Maintain dependencies for go lang 11 | - package-ecosystem: "gomod" 12 | directory: "/" 13 | schedule: 14 | interval: "weekly" 15 | time: "02:00" 16 | day: "sunday" 17 | # Maintain dependencies for docker 18 | - package-ecosystem: "docker" 19 | directory: "/" 20 | schedule: 21 | interval: "weekly" 22 | time: "05:00" 23 | day: "sunday" 24 | # Maintain dependencies for dev docker 25 | - package-ecosystem: "docker" 26 | directory: "/test/migrator-dev" 27 | schedule: 28 | interval: "weekly" 29 | time: "06:00" 30 | day: "sunday" -------------------------------------------------------------------------------- /.github/workflows/automerge.yml: -------------------------------------------------------------------------------- 1 | name: Automerge 2 | on: 3 | pull_request: 4 | types: 5 | - labeled 6 | - unlabeled 7 | - synchronize 8 | - opened 9 | - edited 10 | - ready_for_review 11 | - reopened 12 | - unlocked 13 | pull_request_review: 14 | types: 15 | - submitted 16 | check_suite: 17 | types: 18 | - completed 19 | status: {} 20 | jobs: 21 | automerge: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Automerge 25 | uses: "pascalgn/automerge-action@v0.16.4" 26 | env: 27 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 28 | MERGE_LABELS: "dependencies" 29 | MERGE_FILTER_AUTHOR: "dependabot[bot]" 30 | MERGE_FORKS: "false" 31 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build and test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | if: ${{ github.actor != 'dependabot[bot]' }} 9 | strategy: 10 | matrix: 11 | go: [ '1.17', '1.16', '1.15' ] 12 | name: Go ${{ matrix.go }} 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Setup Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version: ${{ matrix.go }} 21 | 22 | - name: Run staticcheck (only latest go version) 23 | if: matrix.go == '1.17' 24 | run: ./staticcheck.sh 25 | 26 | - name: Login to Docker Hub 27 | uses: docker/login-action@v3 28 | with: 29 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 30 | password: ${{ secrets.DOCKER_HUB_TOKEN }} 31 | 32 | - name: Start docker-compose services 33 | run: docker-compose -f test/docker-compose-it.yaml up -d 34 | 35 | - name: Build and test migrator 36 | env: 37 | AZURE_STORAGE_ACCESS_KEY: ${{ secrets.AZURE_STORAGE_ACCESS_KEY }} 38 | AZURE_STORAGE_ACCOUNT: ${{ secrets.AZURE_STORAGE_ACCOUNT }} 39 | run: ./coverage.sh 40 | 41 | - name: Run HTTP integration tests 42 | run: ./test/http-integration-tests.sh 43 | 44 | - uses: codecov/codecov-action@v5.4.3 45 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | name: "CodeQL" 7 | 8 | on: 9 | push: 10 | branches: [main] 11 | pull_request: 12 | # The branches below must be a subset of the branches above 13 | branches: [main] 14 | schedule: 15 | - cron: '0 7 * * 6' 16 | 17 | jobs: 18 | analyze: 19 | name: Analyze 20 | runs-on: ubuntu-latest 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | # Override automatic language detection by changing the below list 26 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 27 | language: ['go'] 28 | # Learn more... 29 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 30 | 31 | steps: 32 | - name: Checkout repository 33 | uses: actions/checkout@v4 34 | with: 35 | # We must fetch at least the immediate parents so that if this is 36 | # a pull request then we can checkout the head. 37 | fetch-depth: 2 38 | 39 | # If this run was triggered by a pull request event, then checkout 40 | # the head of the pull request instead of the merge commit. 41 | - run: git checkout HEAD^2 42 | if: ${{ github.event_name == 'pull_request' }} 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v3 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v3 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v3 72 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | build: 8 | 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Build the dev Docker image 15 | run: docker build . --file test/migrator-dev/Dockerfile --tag migrator-dev:$(date +%s) 16 | 17 | - name: Build the production Docker image 18 | run: docker build . --build-arg GIT_REF=${{ github.ref }} --build-arg GIT_SHA=${{ github.sha }} --tag migrator-prod:$(date +%s) 19 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker image 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | release: 7 | types: [published] 8 | 9 | jobs: 10 | push_to_registries: 11 | name: Push Docker image to Docker Hub and ghcr.io registries 12 | runs-on: ubuntu-latest 13 | permissions: 14 | packages: write 15 | contents: read 16 | steps: 17 | - name: Check out the repo 18 | uses: actions/checkout@v4 19 | 20 | - name: Log in to Docker Hub 21 | uses: docker/login-action@v3 22 | with: 23 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 24 | password: ${{ secrets.DOCKER_HUB_TOKEN }} 25 | 26 | - name: Log in to the Container registry 27 | uses: docker/login-action@v3 28 | with: 29 | registry: ghcr.io 30 | username: ${{ github.actor }} 31 | password: ${{ github.token }} 32 | 33 | - name: Extract metadata (tags, labels) for Docker 34 | id: meta 35 | uses: docker/metadata-action@v5 36 | with: 37 | images: | 38 | lukasz/migrator 39 | ghcr.io/${{ github.repository }} 40 | tags: | 41 | type=edge 42 | type=semver,pattern={{version}} 43 | type=semver,pattern={{major}}.{{minor}} 44 | type=semver,pattern={{major}} 45 | 46 | - name: Build and push Docker images 47 | uses: docker/build-push-action@v6 48 | with: 49 | context: . 50 | push: true 51 | tags: ${{ steps.meta.outputs.tags }} 52 | labels: ${{ steps.meta.outputs.labels }} 53 | build-args: | 54 | GIT_REF=${{ github.ref }} 55 | GIT_SHA=${{ github.sha }} 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | migrator 3 | test/migrator.yaml 4 | coverage-*.txt 5 | coverage.txt 6 | debug.test 7 | .vscode 8 | unit-tests.xml 9 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing, code style, running unit & integration tests 2 | 3 | Contributions are most welcomed. 4 | 5 | If you would like to help me and implement a new feature, enhance existing one, or spotted and fixed bug please send me a pull request. 6 | 7 | Code should be formatted, staticchecked, and tested using the following commands: 8 | 9 | ``` 10 | ./staticcheck.sh 11 | docker-compose -f test/docker-compose.yaml up 12 | ./coverage.sh 13 | ./test/http-integration-tests.sh 14 | ``` 15 | 16 | The `db/db_integration_test.go` uses go subtests and runs all tests agains 3 database containers (MySQL, PostgreSQL, and MSSQL). These databases are automatically provisioned by the docker-compose tool. 17 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.19.3-alpine3.15 as builder 2 | 3 | LABEL org.opencontainers.image.authors="Łukasz Budnik " 4 | 5 | ARG GIT_REF 6 | ARG GIT_SHA 7 | 8 | # build migrator 9 | RUN mkdir -p /go/migrator 10 | COPY . /go/migrator 11 | 12 | RUN cd /go/migrator && \ 13 | go build -ldflags "-X main.GitSha=$GIT_SHA -X main.GitRef=$GIT_REF" 14 | 15 | FROM alpine:3.22.0 16 | 17 | LABEL org.opencontainers.image.authors="Łukasz Budnik " 18 | 19 | COPY --from=builder /go/migrator/migrator /bin 20 | 21 | VOLUME ["/data"] 22 | 23 | # copy and register entrypoint script 24 | COPY docker-entrypoint.sh / 25 | ENTRYPOINT ["/docker-entrypoint.sh"] 26 | 27 | EXPOSE 8080 28 | -------------------------------------------------------------------------------- /PERFORMANCE.md: -------------------------------------------------------------------------------- 1 | # migrator performance 2 | 3 | Back in 2018 I wrote a small benchmark to compare 3 DB migrations frameworks. In 2021 I decided to refresh the results. Also, instead of the deprecated proprietary Ruby framework, I included a new contester (liquibase). 4 | 5 | The frameworks and their versions used in 2021 performance benchmark were: 6 | 7 | - migrator - version used `v2021.1.0` 8 | - flyway - feature rich Java-based DB migration framework: https://flywaydb.org - version used `Flyway Teams Edition 7.14.0 by Redgate` 9 | - liquibase - feature rich Java-based DB migration framework: https://liquibase.org - version used `Liquibase Community 4.4.3 by Datical` 10 | 11 | # 2021 results 12 | 13 | Compared to 2018 the tests are single tenant with 10k migrations. This is because somewhere between 2018 and 2021 [multiple schemas](https://flywaydb.org/documentation/learnmore/faq.html#multiple-schemas) stopped working in flyway. Also, liquibase doesn't support natively multiple schemas. 14 | 15 | You can play around with performance benchmarks yourself. See `test/performance/test.sh` for migrator, `test/performance/flywaydb-test.sh` for flyway, and `test/performance/liquibase-test.sh` for liquibase. 16 | 17 | _Note: These are simple tests/helpers scripts and not full blown DB migration benchmark tools. I didn't spend too much time making them look slick. They do their job quite well though._ 18 | 19 | Results are an average of a few runs. All benchmarks were run on my MacBook. 20 | 21 | Applying first 10k migrations on an empty database: 22 | 23 | | rank | framework | number of migrations (before - after) | time (s) | memory (MB) | 24 | | ---- | --------- | ------------------------------------- | -------- | ----------- | 25 | | 1. | migrator | 0 - 10000 | 31 | 47 | 26 | | 2. | liquibase | 0 - 10000 | 298 | 385 | 27 | | 3. | flyway | 0 - 10000 | 540 | 265 | 28 | 29 | And then, on top of existing 10k migrations, applying 10k new ones: 30 | 31 | | rank | framework | number of migrations (before - after) | time (s) | memory (MB) | 32 | | ---- | --------- | ------------------------------------- | -------- | ----------- | 33 | | 1. | migrator | 10000 - 20000 | 43 | 79 | 34 | | 2. | flyway | 10000 - 20000 | 816 | 324 | 35 | | 3. | liquibase | 10000 - 20000 | 1533 | 660 | 36 | 37 | migrator is orders of magnitude better than flyway and liquibase. 38 | 39 | flyway was slower in the first test, however behaves better than liquibase in the second test. 40 | 41 | Because multiple schemas stopped working in flyway and liquibase doesn't support them natively there wasn't too much sense in doing more comparison benchmarks. 42 | 43 | Instead, I ran additional multi-tenant/multiple-schema migrator benchmarks. 44 | 45 | # migrator's performance showcase 46 | 47 | You can use `test/performance/test.sh` to run any simulation you want. You can also use it to simulate adding new migrations (so called append mode) - scroll to the bottom of that script to see a comment showing you how to do this. 48 | 49 | I prepared 2 multi-tenant simulations: 50 | 51 | 1. 1000 tenants, 20 SQL files in each version, 1000 \* 20 = 20k migrations to apply in each version 52 | 2. 500 tenants, 100 SQL files in each version, 500 \* 100 = 50k migrations to apply in each version 53 | 54 | ## 1000 tenants 55 | 56 | Execution time is growing slightly with every new version. The memory consumption grows proportionally to how many migrations are in the database. This is because migrator fetches all migrations from database to compute which migrations were already applied and which are to be applied. 57 | 58 | | version | number of migrations (before - after) | time (s) | memory (MB) | 59 | | ------- | ------------------------------------- | -------- | ----------- | 60 | | 1 | 0 - 21001 | 57 | 66 | 61 | | 2 | 21001 - 41001 | 58 | 86 | 62 | | 3 | 41001 - 61001 | 56 | 101 | 63 | | 4 | 61001 - 81001 | 62 | 165 | 64 | | 5 | 81001 - 101001 | 62 | 175 | 65 | | 6 | 101001 - 121001 | 59 | 242 | 66 | | 7 | 121001 - 141001 | 71 | 280 | 67 | | 8 | 141001 - 161001 | 68 | 300 | 68 | | 9 | 161001 - 181001 | 70 | 324 | 69 | | 10 | 181001 - 201001 | 69 | 380 | 70 | 71 | ## 500 tenants 72 | 73 | Similarly to 1000 tenants, in 500 tenants simulation execution time is growing slightly with every new version. The memory consumption grows proportionally to how many migrations are in the database. 74 | 75 | | version | number of migrations (before - after) | time (s) | memory (MB) | 76 | | ------- | ------------------------------------- | -------- | ----------- | 77 | | 1 | 0 - 50501 | 167 | 126 | 78 | | 2 | 50501 - 100501 | 170 | 140 | 79 | | 3 | 100501 - 150501 | 167 | 218 | 80 | | 4 | 150501 - 200501 | 181 | 292 | 81 | | 5 | 200501 - 250501 | 178 | 396 | 82 | 83 | ## Summary 84 | 85 | Based on both simulations we can see that migrator under any load is stable and behaves very predictable: 86 | 87 | - applied ~300 migrations a second (creating tables, inserting multiple rows, database running on the same machine = my MacBook) 88 | - consumed ~2.5MB memory for every 1k migrations 89 | 90 | # 2018 91 | 92 | Keeping 2018 results for historical purposes. 93 | 94 | _Note: For all 3 DB frameworks I used multiple schemas benchmark. Unfortunately, back in 2018, I didn't commit flyway tests and its impossible now to recreate how multiple schemas were actually set up._ 95 | 96 | Execution times were the following: 97 | 98 | | # tenants | # existing migrations | # migrations to apply | migrator | ruby | flyway | 99 | | --------- | --------------------- | --------------------- | -------- | ---- | ------ | 100 | | 10 | 0 | 10001 | 154s | 670s | 2360s | 101 | | 10 | 10001 | 20 | 2s | 455s | 340s | 102 | 103 | migrator was the undisputed winner. 104 | 105 | The Ruby framework had the undesired functionality of making a DB call each time to check if given migration was already applied. migrator fetched all applied migrations at once and compared them in memory. That was the primary reason why migrator was so much better in the second test. 106 | 107 | flyway results were... very surprising. I was so shocked that I had to re-run flyway as well as all other tests. Yes, flyway was 15 times slower than migrator in the first test. In the second test flyway was faster than Ruby. Still a couple orders of magnitude slower than migrator. 108 | 109 | The other thing to consider is the fact that migrator is written in go which is known to be much faster than Ruby and Java. 110 | -------------------------------------------------------------------------------- /common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "path/filepath" 8 | "runtime" 9 | ) 10 | 11 | const ( 12 | panicLevel = "PANIC" 13 | errorLevel = "ERROR" 14 | infoLevel = "INFO" 15 | debugLevel = "DEBUG" 16 | ) 17 | 18 | // RequestIDKey is used together with context for setting/getting X-Request-ID 19 | type RequestIDKey struct{} 20 | 21 | // LogLevel 22 | type LogLevelKey struct{} 23 | 24 | // LogError logs error message 25 | func LogError(ctx context.Context, format string, a ...interface{}) string { 26 | return logLevel(ctx, errorLevel, format, a...) 27 | } 28 | 29 | // LogInfo logs info message 30 | func LogInfo(ctx context.Context, format string, a ...interface{}) string { 31 | return logLevel(ctx, infoLevel, format, a...) 32 | } 33 | 34 | // LogDebug logs debug message 35 | func LogDebug(ctx context.Context, format string, a ...interface{}) string { 36 | return logLevel(ctx, debugLevel, format, a...) 37 | } 38 | 39 | // LogPanic logs error message 40 | func LogPanic(ctx context.Context, format string, a ...interface{}) string { 41 | return logLevel(ctx, panicLevel, format, a...) 42 | } 43 | 44 | // Log logs message with a given level with no request context 45 | func Log(level string, format string, a ...interface{}) string { 46 | _, file, line, _ := runtime.Caller(2) 47 | 48 | message := fmt.Sprintf(format, a...) 49 | 50 | log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC) 51 | log.Printf("[%v:%v] %v %v", file, line, level, message) 52 | return message 53 | } 54 | 55 | func logLevel(ctx context.Context, level string, format string, a ...interface{}) string { 56 | 57 | logLevel := fmt.Sprintf("%v", ctx.Value(LogLevelKey{})) 58 | 59 | if shouldLogMessage(logLevel, level) { 60 | requestID := ctx.Value(RequestIDKey{}) 61 | message := fmt.Sprintf(format, a...) 62 | _, file, line, _ := runtime.Caller(2) 63 | filename := filepath.Base(file) 64 | 65 | log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC) 66 | log.Printf("[%v:%v] %v requestId=%v %v", filename, line, level, requestID, message) 67 | 68 | return message 69 | } 70 | 71 | return "" 72 | } 73 | 74 | // FindNthIndex finds index of nth occurance of a character c in string str 75 | func FindNthIndex(str string, c byte, n int) int { 76 | occur := 0 77 | for i := 0; i < len(str); i++ { 78 | if str[i] == c { 79 | occur++ 80 | } 81 | if occur == n { 82 | return i 83 | } 84 | } 85 | return -1 86 | } 87 | 88 | func shouldLogMessage(configLogLevel, targetLevel string) bool { 89 | // if configLogLevel and targetLevel match then log 90 | if configLogLevel == targetLevel { 91 | return true 92 | } 93 | // if configLogLevel is debug then all messages are logged no need to check targetLevel 94 | if configLogLevel == debugLevel { 95 | return true 96 | } 97 | // if configLogLevel not set then INFO is assumed 98 | // if INFO then all levels should log except of debug 99 | if (len(configLogLevel) == 0 || configLogLevel == infoLevel) && targetLevel != debugLevel { 100 | return true 101 | } 102 | 103 | // if logLevel is ERROR then only ERROR and PANIC are logged 104 | // ERROR is covered in the beginning of method so need to check only Panic level 105 | if configLogLevel == errorLevel && targetLevel == panicLevel { 106 | return true 107 | } 108 | 109 | return false 110 | } 111 | -------------------------------------------------------------------------------- /common/common_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func newTestContext() context.Context { 11 | ctx := context.TODO() 12 | ctx = context.WithValue(ctx, RequestIDKey{}, "123") 13 | // log level empty = default log level = INFO 14 | ctx = context.WithValue(ctx, LogLevelKey{}, "") 15 | return ctx 16 | } 17 | 18 | func newTestContextWithDebugLogLevel() context.Context { 19 | ctx := newTestContext() 20 | ctx = context.WithValue(ctx, LogLevelKey{}, debugLevel) 21 | return ctx 22 | } 23 | 24 | func TestLogDebugSkip(t *testing.T) { 25 | // DEBUG message will be skipped, as the default log level is INFO 26 | message := LogDebug(newTestContext(), "success") 27 | assert.Empty(t, message) 28 | } 29 | 30 | func TestLogDebug(t *testing.T) { 31 | // DEBUG message will be returned, as the log level is set to DEBUG 32 | message := LogDebug(newTestContextWithDebugLogLevel(), "success") 33 | assert.Equal(t, "success", message) 34 | } 35 | 36 | func TestLogInfo(t *testing.T) { 37 | message := LogInfo(newTestContext(), "success") 38 | assert.Equal(t, "success", message) 39 | } 40 | 41 | func TestLogError(t *testing.T) { 42 | message := LogError(newTestContext(), "param=%v", 123) 43 | assert.Equal(t, "param=123", message) 44 | } 45 | 46 | func TestLogPanic(t *testing.T) { 47 | message := LogPanic(newTestContext(), "param=%v", 123456) 48 | assert.Equal(t, "param=123456", message) 49 | } 50 | 51 | func TestLog(t *testing.T) { 52 | message := Log("INFO", "param=%v", 456) 53 | assert.Equal(t, "param=456", message) 54 | } 55 | 56 | func TestFindNthIndex(t *testing.T) { 57 | indx := FindNthIndex("https://lukaszbudniktest.blob.core.windows.net/mycontainer/prod/artefacts", '/', 4) 58 | assert.Equal(t, 58, indx) 59 | } 60 | 61 | func TestFindNthIndexNotFound(t *testing.T) { 62 | indx := FindNthIndex("https://lukaszbudniktest.blob.core.windows.net/mycontainer", '/', 4) 63 | assert.Equal(t, -1, indx) 64 | } 65 | 66 | func TestShouldLogMessage(t *testing.T) { 67 | // default logLevel is info, should log all except of debug 68 | assert.False(t, shouldLogMessage("", debugLevel)) 69 | assert.True(t, shouldLogMessage("", infoLevel)) 70 | assert.True(t, shouldLogMessage("", errorLevel)) 71 | assert.True(t, shouldLogMessage("", panicLevel)) 72 | 73 | // debug logLevel logs all 74 | assert.True(t, shouldLogMessage(debugLevel, debugLevel)) 75 | assert.True(t, shouldLogMessage(debugLevel, infoLevel)) 76 | assert.True(t, shouldLogMessage(debugLevel, errorLevel)) 77 | assert.True(t, shouldLogMessage(debugLevel, panicLevel)) 78 | 79 | // info logLevel logs all except of debug 80 | assert.False(t, shouldLogMessage(infoLevel, debugLevel)) 81 | assert.True(t, shouldLogMessage(infoLevel, infoLevel)) 82 | assert.True(t, shouldLogMessage(infoLevel, errorLevel)) 83 | assert.True(t, shouldLogMessage(infoLevel, panicLevel)) 84 | 85 | // error logLevel logs only error or panic 86 | assert.False(t, shouldLogMessage(errorLevel, debugLevel)) 87 | assert.False(t, shouldLogMessage(errorLevel, infoLevel)) 88 | assert.True(t, shouldLogMessage(errorLevel, errorLevel)) 89 | assert.True(t, shouldLogMessage(errorLevel, panicLevel)) 90 | 91 | // panic logLevel logs only panic 92 | assert.False(t, shouldLogMessage(panicLevel, debugLevel)) 93 | assert.False(t, shouldLogMessage(panicLevel, infoLevel)) 94 | assert.False(t, shouldLogMessage(panicLevel, errorLevel)) 95 | assert.True(t, shouldLogMessage(panicLevel, panicLevel)) 96 | } 97 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "reflect" 7 | "strings" 8 | 9 | "gopkg.in/go-playground/validator.v9" 10 | "gopkg.in/yaml.v2" 11 | ) 12 | 13 | // Config represents Migrator's yaml configuration file 14 | type Config struct { 15 | BaseLocation string `yaml:"baseLocation" validate:"required"` 16 | Driver string `yaml:"driver" validate:"required"` 17 | DataSource string `yaml:"dataSource" validate:"required"` 18 | TenantSelectSQL string `yaml:"tenantSelectSQL,omitempty"` 19 | TenantInsertSQL string `yaml:"tenantInsertSQL,omitempty"` 20 | SchemaPlaceHolder string `yaml:"schemaPlaceHolder,omitempty"` 21 | SingleMigrations []string `yaml:"singleMigrations" validate:"min=1"` 22 | TenantMigrations []string `yaml:"tenantMigrations,omitempty"` 23 | SingleScripts []string `yaml:"singleScripts,omitempty"` 24 | TenantScripts []string `yaml:"tenantScripts,omitempty"` 25 | Port string `yaml:"port,omitempty"` 26 | PathPrefix string `yaml:"pathPrefix,omitempty"` 27 | WebHookURL string `yaml:"webHookURL,omitempty"` 28 | WebHookHeaders []string `yaml:"webHookHeaders,omitempty"` 29 | WebHookTemplate string `yaml:"webHookTemplate,omitempty"` 30 | LogLevel string `yaml:"logLevel,omitempty" validate:"logLevel"` 31 | } 32 | 33 | func (config Config) String() string { 34 | c, _ := yaml.Marshal(config) 35 | return strings.TrimSpace(string(c)) 36 | } 37 | 38 | // FromFile reads config from file which name is passed as an argument 39 | func FromFile(configFileName string) (*Config, error) { 40 | contents, err := ioutil.ReadFile(configFileName) 41 | 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | return FromBytes(contents) 47 | } 48 | 49 | // FromBytes reads config from raw bytes passed as an argument 50 | func FromBytes(contents []byte) (*Config, error) { 51 | var config Config 52 | 53 | if err := yaml.Unmarshal(contents, &config); err != nil { 54 | return nil, err 55 | } 56 | 57 | validate := validator.New() 58 | validate.RegisterValidation("logLevel", validateLogLevel) 59 | if err := validate.Struct(config); err != nil { 60 | return nil, err 61 | } 62 | 63 | substituteEnvVariables(&config) 64 | 65 | return &config, nil 66 | } 67 | 68 | func substituteEnvVariables(config *Config) { 69 | val := reflect.ValueOf(config).Elem() 70 | for i := 0; i < val.NumField(); i++ { 71 | valueField := val.Field(i) 72 | typeField := val.Type().Field(i) 73 | if val.CanAddr() && val.CanSet() { 74 | switch typeField.Type.Kind() { 75 | case reflect.String: 76 | s := valueField.Interface().(string) 77 | s = substituteEnvVariable(s) 78 | valueField.SetString(s) 79 | case reflect.Slice: 80 | ss := valueField.Interface().([]string) 81 | for i := range ss { 82 | ss[i] = substituteEnvVariable(ss[i]) 83 | } 84 | valueField.Set(reflect.ValueOf(ss)) 85 | } 86 | } 87 | } 88 | } 89 | 90 | func substituteEnvVariable(s string) string { 91 | start := strings.Index(s, "${") 92 | end := strings.Index(s, "}") 93 | if start > -1 && end > start && len(s) > end { 94 | after := s[0:start] + os.Getenv(s[start+2:end]) + s[end+1:] 95 | return substituteEnvVariable(after) 96 | } 97 | return s 98 | } 99 | 100 | func validateLogLevel(fl validator.FieldLevel) bool { 101 | value := fl.Field().String() 102 | return value == "" || value == "DEBUG" || value == "INFO" || value == "ERROR" || value == "PANIC" 103 | } 104 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "gopkg.in/go-playground/validator.v9" 10 | "gopkg.in/yaml.v2" 11 | ) 12 | 13 | func TestFromFile(t *testing.T) { 14 | config, err := FromFile("../test/migrator-test.yaml") 15 | assert.Nil(t, err) 16 | assert.Equal(t, "test/migrations", config.BaseLocation) 17 | assert.Equal(t, "select name from migrator.migrator_tenants", config.TenantSelectSQL) 18 | assert.Equal(t, "postgres", config.Driver) 19 | assert.Equal(t, "user=postgres dbname=migrator_test host=192.168.99.100 port=55432 sslmode=disable", config.DataSource) 20 | assert.Equal(t, []string{"tenants"}, config.TenantMigrations) 21 | assert.Equal(t, []string{"public", "ref", "config"}, config.SingleMigrations) 22 | assert.Equal(t, "8811", config.Port) 23 | assert.Equal(t, "{schema}", config.SchemaPlaceHolder) 24 | assert.Equal(t, "https://slack.com/api/api.test", config.WebHookURL) 25 | assert.Equal(t, []string{"Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l", "Content-Type: application/json", "X-CustomHeader: value1,value2"}, config.WebHookHeaders) 26 | } 27 | 28 | func TestWithEnvFromFile(t *testing.T) { 29 | os.Setenv("COMMIT_SHA", "62fd74506651982fe317721d7e07145f8c2fa166") 30 | config, err := FromFile("../test/migrator-test-envs.yaml") 31 | assert.Nil(t, err) 32 | assert.Equal(t, "s3://bucket-name/application-x/"+os.Getenv("TERM")+"/"+os.Getenv("COMMIT_SHA"), config.BaseLocation) 33 | assert.Equal(t, os.Getenv("PATH"), config.TenantSelectSQL) 34 | assert.Equal(t, os.Getenv("GOPATH"), config.TenantInsertSQL) 35 | assert.Equal(t, os.Getenv("PWD"), config.Driver) 36 | assert.Equal(t, fmt.Sprintf("lets_assume_password=%v&and_something_else=%v¶m=value", os.Getenv("HOME"), os.Getenv("USER")), config.DataSource) 37 | assert.Equal(t, os.Getenv("_"), config.Port) 38 | assert.Equal(t, os.Getenv("USER"), config.SchemaPlaceHolder) 39 | assert.Equal(t, []string{"tenants"}, config.TenantMigrations) 40 | assert.Equal(t, []string{"public", "ref", "config"}, config.SingleMigrations) 41 | assert.Equal(t, os.Getenv("SHLVL"), config.WebHookURL) 42 | assert.Equal(t, fmt.Sprintf("X-Security-Token: %v", os.Getenv("USER")), config.WebHookHeaders[0]) 43 | } 44 | 45 | func TestConfigString(t *testing.T) { 46 | config := &Config{ 47 | BaseLocation: "/opt/app/migrations", 48 | Driver: "postgres", 49 | DataSource: "user=p dbname=db host=localhost", 50 | TenantSelectSQL: "select abc", 51 | TenantInsertSQL: "insert into table", 52 | SchemaPlaceHolder: ":tenant", 53 | SingleMigrations: []string{"ref"}, 54 | TenantMigrations: []string{"tenants"}, 55 | SingleScripts: []string{"procedures"}, 56 | TenantScripts: []string{}, 57 | Port: "8181", 58 | PathPrefix: "", 59 | WebHookURL: "https://hooks.slack.com/services/TTT/BBB/XXX", 60 | WebHookHeaders: []string{}, 61 | WebHookTemplate: `{"text": "Results are: ${summary}"}`, 62 | } 63 | // check if go naming convention applies 64 | expected := `baseLocation: /opt/app/migrations 65 | driver: postgres 66 | dataSource: user=p dbname=db host=localhost 67 | tenantSelectSQL: select abc 68 | tenantInsertSQL: insert into table 69 | schemaPlaceHolder: :tenant 70 | singleMigrations: 71 | - ref 72 | tenantMigrations: 73 | - tenants 74 | singleScripts: 75 | - procedures 76 | port: "8181" 77 | webHookURL: https://hooks.slack.com/services/TTT/BBB/XXX 78 | webHookTemplate: '{"text": "Results are: ${summary}"}'` 79 | actual := fmt.Sprintf("%v", config) 80 | assert.Equal(t, expected, actual) 81 | } 82 | 83 | func TestConfigReadFromEmptyFileError(t *testing.T) { 84 | config, err := FromFile("../test/empty.yaml") 85 | assert.Nil(t, config) 86 | assert.IsType(t, (validator.ValidationErrors)(nil), err, "Should error because of validation errors") 87 | } 88 | 89 | func TestConfigReadFromNonExistingFileError(t *testing.T) { 90 | config, err := FromFile("abcxyz.yaml") 91 | assert.Nil(t, config) 92 | assert.IsType(t, (*os.PathError)(nil), err, "Should error because non-existing file") 93 | } 94 | 95 | func TestConfigFromWrongSyntaxFile(t *testing.T) { 96 | config, err := FromFile("../Dockerfile") 97 | assert.Nil(t, config) 98 | assert.IsType(t, (*yaml.TypeError)(nil), err, "Should error because of wrong yaml syntax") 99 | } 100 | 101 | func TestCustomValidatorLogLevelError(t *testing.T) { 102 | config := `baseLocation: /opt/app/migrations 103 | driver: postgres 104 | dataSource: user=p dbname=db host=localhost 105 | tenantSelectSQL: select abc 106 | tenantInsertSQL: insert into table 107 | schemaPlaceHolder: :tenant 108 | singleMigrations: 109 | - ref 110 | tenantMigrations: 111 | - tenants 112 | singleScripts: 113 | - procedures 114 | port: "8181" 115 | webHookURL: https://hooks.slack.com/services/TTT/BBB/XXX 116 | webHookTemplate: '{"text": "Results are: ${summary}"}' 117 | logLevel: ABC` 118 | 119 | _, err := FromBytes([]byte(config)) 120 | assert.NotNil(t, err) 121 | assert.Contains(t, err.Error(), `Error:Field validation for 'LogLevel' failed on the 'logLevel' tag`) 122 | } 123 | -------------------------------------------------------------------------------- /coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which gotestsum &> /dev/null 4 | if [[ $? -ne 0 ]]; then 5 | go get gotest.tools/gotestsum 6 | fi 7 | 8 | # when called with no arguments calls tests for all packages 9 | if [[ -z "$1" ]]; then 10 | packages='./...' 11 | else 12 | packages="./$1" 13 | fi 14 | 15 | fail=0 16 | 17 | gotestsum --junitfile unit-tests.xml -- -covermode=atomic -coverprofile=coverage-all.txt $packages 18 | 19 | if [[ $? -ne 0 ]]; then 20 | fail=1 21 | fi 22 | 23 | cat coverage-all.txt | sed '/_mocks.go/d' > coverage.txt 24 | 25 | rm coverage-all.txt 26 | 27 | exit $fail 28 | -------------------------------------------------------------------------------- /data/graphql.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "github.com/lukaszbudnik/migrator/coordinator" 5 | "github.com/lukaszbudnik/migrator/types" 6 | ) 7 | 8 | // SchemaDefinition contains GraphQL migrator schema 9 | const SchemaDefinition = ` 10 | schema { 11 | query: Query 12 | mutation: Mutation 13 | } 14 | enum MigrationType { 15 | SingleMigration 16 | TenantMigration 17 | SingleScript 18 | TenantScript 19 | } 20 | enum Action { 21 | // Apply is the default action, migrator reads all source migrations and applies them 22 | Apply 23 | // Sync is an action where migrator reads all source migrations and marks them as applied in DB 24 | // typical use cases are: 25 | // importing source migrations from a legacy tool or synchronising tenant migrations when tenant was created using external tool 26 | Sync 27 | } 28 | scalar Time 29 | interface Migration { 30 | name: String! 31 | migrationType: MigrationType! 32 | sourceDir: String! 33 | file: String! 34 | contents: String! 35 | checkSum: String! 36 | } 37 | type SourceMigration implements Migration { 38 | name: String! 39 | migrationType: MigrationType! 40 | sourceDir: String! 41 | file: String! 42 | contents: String! 43 | checkSum: String! 44 | } 45 | type DBMigration implements Migration { 46 | id: Int! 47 | name: String! 48 | migrationType: MigrationType! 49 | sourceDir: String! 50 | file: String! 51 | contents: String! 52 | checkSum: String! 53 | schema: String! 54 | created: Time! 55 | } 56 | type Tenant { 57 | name: String! 58 | } 59 | type Version { 60 | id: Int! 61 | name: String! 62 | created: Time! 63 | dbMigrations: [DBMigration!]! 64 | } 65 | input SourceMigrationFilters { 66 | name: String 67 | sourceDir: String 68 | file: String 69 | migrationType: MigrationType 70 | } 71 | input VersionInput { 72 | versionName: String! 73 | action: Action = Apply 74 | dryRun: Boolean = false 75 | } 76 | input TenantInput { 77 | tenantName: String! 78 | versionName: String! 79 | action: Action = Apply 80 | dryRun: Boolean = false 81 | } 82 | type Summary { 83 | // date time operation started 84 | startedAt: Time! 85 | // how long the operation took in seconds 86 | duration: Float! 87 | // number of tenants in the system 88 | tenants: Int! 89 | // number of loaded and applied single schema migrations 90 | singleMigrations: Int! 91 | // number of loaded multi-tenant schema migrations 92 | tenantMigrations: Int! 93 | // number of applied multi-tenant schema migrations (equals to tenants * tenantMigrations) 94 | tenantMigrationsTotal: Int! 95 | // sum of singleMigrations and tenantMigrationsTotal 96 | migrationsGrandTotal: Int! 97 | // number of loaded and applied single schema scripts 98 | singleScripts: Int! 99 | // number of loaded multi-tenant schema scripts 100 | tenantScripts: Int! 101 | // number of applied multi-tenant schema migrations (equals to tenants * tenantScripts) 102 | tenantScriptsTotal: Int! 103 | // sum of singleScripts and tenantScriptsTotal 104 | scriptsGrandTotal: Int! 105 | } 106 | type CreateResults { 107 | summary: Summary! 108 | version: Version 109 | } 110 | type Query { 111 | // returns array of SourceMigration objects 112 | // all parameters are optional and can be used to filter source migrations 113 | // note that if the input query includes "contents" field this operation can produce large amounts of data 114 | // if you want to return "contents" field it may be better to get individual source migrations using sourceMigration(file: String!) 115 | sourceMigrations(filters: SourceMigrationFilters): [SourceMigration!]! 116 | // returns a single SourceMigration 117 | // this operation can be used to fetch a complete SourceMigration including "contents" field 118 | // file is the unique identifier for a source migration file which you can get from sourceMigrations() 119 | sourceMigration(file: String!): SourceMigration 120 | // returns array of Version objects 121 | // file is optional and can be used to return versions in which given source migration file was applied 122 | // note that if input query includes DBMigration array and "contents" field this operation can produce large amounts of data 123 | // if you want to return "contents" field it may be better to get individual versions using either 124 | // version(id: Int!) or even get individual DB migration using dbMigration(id: Int!) 125 | versions(file: String): [Version!]! 126 | // returns a single Version 127 | // id is the unique identifier of a version which you can get from versions() 128 | // note that if input query includes "contents" field this operation can produce large amounts of data 129 | // if you want to return "contents" field it may be better to get individual DB migration using dbMigration(id: Int!) 130 | version(id: Int!): Version 131 | // returns a single DBMigration 132 | // this operation can be used to fetch a complete DBMigration including "contents" field 133 | // id is the unique identifier of a DB migration which you can get from versions(file: String) or version(id: Int!) 134 | dbMigration(id: Int!): DBMigration 135 | // returns array of Tenant objects 136 | tenants(): [Tenant!]! 137 | } 138 | type Mutation { 139 | // creates new DB version by applying all eligible DB migrations & scripts 140 | createVersion(input: VersionInput!): CreateResults! 141 | // creates new tenant by applying only tenant-specific DB migrations & scripts, also creates new DB version 142 | createTenant(input: TenantInput!): CreateResults! 143 | } 144 | ` 145 | 146 | // RootResolver is resolver for all the migrator data 147 | type RootResolver struct { 148 | Coordinator coordinator.Coordinator 149 | } 150 | 151 | // Tenants resolves all tenants 152 | func (r *RootResolver) Tenants() ([]types.Tenant, error) { 153 | tenants := r.Coordinator.GetTenants() 154 | return tenants, nil 155 | } 156 | 157 | // Versions resoves all versions, optionally can return versions with specific source migration (file is the identifier for source migrations) 158 | func (r *RootResolver) Versions(args struct { 159 | File *string 160 | }) ([]types.Version, error) { 161 | if args.File != nil { 162 | return r.Coordinator.GetVersionsByFile(*args.File), nil 163 | } 164 | return r.Coordinator.GetVersions(), nil 165 | } 166 | 167 | // Version resolves version by ID 168 | func (r *RootResolver) Version(args struct { 169 | ID int32 170 | }) (*types.Version, error) { 171 | return r.Coordinator.GetVersionByID(args.ID) 172 | } 173 | 174 | // SourceMigrations resolves source migrations using optional filters 175 | func (r *RootResolver) SourceMigrations(args struct { 176 | Filters *coordinator.SourceMigrationFilters 177 | }) ([]types.Migration, error) { 178 | sourceMigrations := r.Coordinator.GetSourceMigrations(args.Filters) 179 | return sourceMigrations, nil 180 | } 181 | 182 | // SourceMigration resolves source migration by its file name 183 | func (r *RootResolver) SourceMigration(args struct { 184 | File string 185 | }) (*types.Migration, error) { 186 | return r.Coordinator.GetSourceMigrationByFile(args.File) 187 | } 188 | 189 | // DBMigration resolves DB migration by ID 190 | func (r *RootResolver) DBMigration(args struct { 191 | ID int32 192 | }) (*types.DBMigration, error) { 193 | return r.Coordinator.GetDBMigrationByID(args.ID) 194 | } 195 | 196 | // CreateVersion creates new DB version 197 | func (r *RootResolver) CreateVersion(args struct { 198 | Input types.VersionInput 199 | }) (*types.CreateResults, error) { 200 | results := r.Coordinator.CreateVersion(args.Input.VersionName, args.Input.Action, args.Input.DryRun) 201 | return results, nil 202 | } 203 | 204 | // CreateTenant creates new tenant 205 | func (r *RootResolver) CreateTenant(args struct { 206 | Input types.TenantInput 207 | }) (*types.CreateResults, error) { 208 | results := r.Coordinator.CreateTenant(args.Input.VersionName, args.Input.Action, args.Input.DryRun, args.Input.TenantName) 209 | return results, nil 210 | } 211 | -------------------------------------------------------------------------------- /data/graphql_mocks.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/graph-gophers/graphql-go" 8 | "github.com/lukaszbudnik/migrator/coordinator" 9 | "github.com/lukaszbudnik/migrator/types" 10 | ) 11 | 12 | type mockedCoordinator struct { 13 | } 14 | 15 | func (m *mockedCoordinator) safeString(value *string) string { 16 | if value == nil { 17 | return "" 18 | } 19 | return *value 20 | } 21 | 22 | func (m *mockedCoordinator) CreateTenant(string, types.Action, bool, string) *types.CreateResults { 23 | version, _ := m.GetVersionByID(0) 24 | return &types.CreateResults{Summary: &types.Summary{}, Version: version} 25 | } 26 | 27 | func (m *mockedCoordinator) CreateVersion(string, types.Action, bool) *types.CreateResults { 28 | // re-use mocked version from GetVersionByID... 29 | version, _ := m.GetVersionByID(0) 30 | return &types.CreateResults{Summary: &types.Summary{}, Version: version} 31 | } 32 | 33 | func (m *mockedCoordinator) GetSourceMigrations(filters *coordinator.SourceMigrationFilters) []types.Migration { 34 | 35 | if filters == nil { 36 | m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 37 | m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select def"} 38 | m3 := types.Migration{Name: "201602220001.sql", SourceDir: "config", File: "config/201602220001.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select def"} 39 | m4 := types.Migration{Name: "201602220002.sql", SourceDir: "source", File: "source/201602220002.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select def"} 40 | m5 := types.Migration{Name: "201602220003.sql", SourceDir: "tenant", File: "tenant/201602220003.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select def"} 41 | return []types.Migration{m1, m2, m3, m4, m5} 42 | } 43 | 44 | m1 := types.Migration{Name: m.safeString(filters.Name), SourceDir: m.safeString(filters.SourceDir), File: m.safeString(filters.File), MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 45 | return []types.Migration{m1} 46 | } 47 | 48 | func (m *mockedCoordinator) GetSourceMigrationByFile(file string) (*types.Migration, error) { 49 | i := strings.Index(file, "/") 50 | sourceDir := file[:i] 51 | name := file[i+1:] 52 | m1 := types.Migration{Name: name, SourceDir: sourceDir, File: file, MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 53 | return &m1, nil 54 | } 55 | 56 | func (m *mockedCoordinator) Dispose() { 57 | } 58 | 59 | func (m *mockedCoordinator) GetTenants() []types.Tenant { 60 | a := types.Tenant{Name: "a"} 61 | b := types.Tenant{Name: "b"} 62 | c := types.Tenant{Name: "c"} 63 | return []types.Tenant{a, b, c} 64 | } 65 | 66 | func (m *mockedCoordinator) GetVersions() []types.Version { 67 | a := types.Version{ID: 12, Name: "a", Created: graphql.Time{Time: time.Now().AddDate(0, 0, -2)}} 68 | b := types.Version{ID: 121, Name: "bb", Created: graphql.Time{Time: time.Now().AddDate(0, 0, -1)}} 69 | c := types.Version{ID: 122, Name: "ccc", Created: graphql.Time{Time: time.Now()}} 70 | return []types.Version{a, b, c} 71 | } 72 | 73 | func (m *mockedCoordinator) GetVersionsByFile(file string) []types.Version { 74 | a := types.Version{ID: 12, Name: "a", Created: graphql.Time{Time: time.Now().AddDate(0, 0, -2)}} 75 | return []types.Version{a} 76 | } 77 | 78 | func (m *mockedCoordinator) GetVersionByID(ID int32) (*types.Version, error) { 79 | m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 80 | d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) 81 | db1 := types.DBMigration{Migration: m1, Schema: "source", Created: graphql.Time{Time: d1}} 82 | 83 | m2 := types.Migration{Name: "202002180000.sql", SourceDir: "config", File: "config/202002180000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 84 | d2 := time.Date(2020, 02, 18, 16, 41, 1, 123, time.UTC) 85 | db2 := types.DBMigration{Migration: m2, Schema: "source", Created: graphql.Time{Time: d2}} 86 | 87 | m3 := types.Migration{Name: "202002180000.sql", SourceDir: "tenants", File: "tenants/202002180000.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select abc"} 88 | d3 := time.Date(2020, 02, 18, 16, 41, 1, 123, time.UTC) 89 | db3 := types.DBMigration{Migration: m3, Schema: "abc", Created: graphql.Time{Time: d3}} 90 | db4 := types.DBMigration{Migration: m3, Schema: "def", Created: graphql.Time{Time: d3}} 91 | db5 := types.DBMigration{Migration: m3, Schema: "xyz", Created: graphql.Time{Time: d3}} 92 | 93 | a := types.Version{ID: ID, Name: "a", Created: graphql.Time{Time: time.Now().AddDate(0, 0, -2)}, DBMigrations: []types.DBMigration{db1, db2, db3, db4, db5}} 94 | 95 | return &a, nil 96 | } 97 | 98 | // not used in GraphQL 99 | func (m *mockedCoordinator) GetAppliedMigrations() []types.DBMigration { 100 | return []types.DBMigration{} 101 | } 102 | 103 | func (m *mockedCoordinator) GetDBMigrationByID(ID int32) (*types.DBMigration, error) { 104 | migration := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 105 | d := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) 106 | db := types.DBMigration{Migration: migration, ID: ID, Schema: "source", Created: graphql.Time{Time: d}} 107 | return &db, nil 108 | } 109 | 110 | func (m *mockedCoordinator) VerifySourceMigrationsCheckSums() (bool, []types.Migration) { 111 | return true, nil 112 | } 113 | 114 | func (m *mockedCoordinator) HealthCheck() types.HealthResponse { 115 | return types.HealthResponse{Status: types.HealthStatusUp, Checks: []types.HealthChecks{}} 116 | } 117 | -------------------------------------------------------------------------------- /db/db_dialect.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | 7 | "github.com/lukaszbudnik/migrator/config" 8 | ) 9 | 10 | var isValidIdentifier = regexp.MustCompile(`^[A-Za-z0-9_-]+$`).MatchString 11 | 12 | // dialect returns SQL statements for given DB 13 | type dialect interface { 14 | GetTenantInsertSQL() string 15 | GetTenantSelectSQL() string 16 | GetMigrationInsertSQL() string 17 | GetMigrationSelectSQL() string 18 | GetMigrationByIDSQL() string 19 | GetCreateTenantsTableSQL() string 20 | GetCreateMigrationsTableSQL() string 21 | GetCreateSchemaSQL(string) string 22 | GetCreateVersionsTableSQL() []string 23 | GetVersionInsertSQL() string 24 | GetVersionsSelectSQL() string 25 | GetVersionsByFileSQL() string 26 | GetVersionByIDSQL() string 27 | LastInsertIDSupported() bool 28 | } 29 | 30 | // baseDialect struct is used to provide default dialect interface implementation 31 | type baseDialect struct { 32 | } 33 | 34 | const ( 35 | selectVersionsSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id order by vid desc, mid asc" 36 | selectMigrationsSQL = "select name, source_dir as sd, filename, type, db_schema, created, contents, checksum from %v.%v order by name, source_dir" 37 | selectTenantsSQL = "select name from %v.%v" 38 | createMigrationsTableSQL = ` 39 | create table if not exists %v.%v ( 40 | id serial primary key, 41 | name varchar(200) not null, 42 | source_dir varchar(200) not null, 43 | filename varchar(200) not null, 44 | type int not null, 45 | db_schema varchar(200) not null, 46 | created timestamp default now(), 47 | contents text, 48 | checksum varchar(64) 49 | ) 50 | ` 51 | createTenantsTableSQL = ` 52 | create table if not exists %v.%v ( 53 | id serial primary key, 54 | name varchar(200) not null, 55 | created timestamp default now() 56 | ) 57 | ` 58 | createSchemaSQL = "create schema if not exists %v" 59 | ) 60 | 61 | // GetCreateTenantsTableSQL returns migrator's default create tenants table SQL statement. 62 | // This SQL is used by both MySQL and PostgreSQL. 63 | func (bd *baseDialect) GetCreateTenantsTableSQL() string { 64 | return fmt.Sprintf(createTenantsTableSQL, migratorSchema, migratorTenantsTable) 65 | } 66 | 67 | // GetCreateMigrationsTableSQL returns migrator's create migrations table SQL statement. 68 | // This SQL is used by both MySQL and PostgreSQL. 69 | func (bd *baseDialect) GetCreateMigrationsTableSQL() string { 70 | return fmt.Sprintf(createMigrationsTableSQL, migratorSchema, migratorMigrationsTable) 71 | } 72 | 73 | // GetTenantSelectSQL returns migrator's default tenant select SQL statement. 74 | // This SQL is used by all MySQL, PostgreSQL, and MS SQL. 75 | func (bd *baseDialect) GetTenantSelectSQL() string { 76 | return fmt.Sprintf(selectTenantsSQL, migratorSchema, migratorTenantsTable) 77 | } 78 | 79 | // GetMigrationSelectSQL returns migrator's migrations select SQL statement. 80 | // This SQL is used by all MySQL, PostgreSQL, MS SQL. 81 | func (bd *baseDialect) GetMigrationSelectSQL() string { 82 | return fmt.Sprintf(selectMigrationsSQL, migratorSchema, migratorMigrationsTable) 83 | } 84 | 85 | // GetCreateSchemaSQL returns create schema SQL statement. 86 | // This SQL is used by both MySQL and PostgreSQL. 87 | func (bd *baseDialect) GetCreateSchemaSQL(schema string) string { 88 | if !isValidIdentifier(schema) { 89 | panic(fmt.Sprintf("Schema name contains invalid characters: %v", schema)) 90 | } 91 | return fmt.Sprintf(createSchemaSQL, schema) 92 | } 93 | 94 | // GetVersionsSelectSQL returns select SQL statement that returns all versions 95 | // This SQL is used by both MySQL and PostgreSQL. 96 | func (bd *baseDialect) GetVersionsSelectSQL() string { 97 | return fmt.Sprintf(selectVersionsSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable) 98 | } 99 | 100 | // newDialect constructs dialect instance based on the passed Config 101 | func newDialect(config *config.Config) dialect { 102 | 103 | var dialect dialect 104 | 105 | switch config.Driver { 106 | case "mysql": 107 | dialect = &mySQLDialect{} 108 | case "sqlserver": 109 | dialect = &msSQLDialect{} 110 | case "postgres": 111 | dialect = &postgreSQLDialect{} 112 | default: 113 | panic(fmt.Sprintf("Failed to create Connector unknown driver: %v", config.Driver)) 114 | } 115 | 116 | return dialect 117 | } 118 | -------------------------------------------------------------------------------- /db/db_dialect_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/lukaszbudnik/migrator/config" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestBaseDialectGetCreateTenantsTableSQL(t *testing.T) { 12 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 13 | assert.Nil(t, err) 14 | 15 | dialect := newDialect(config) 16 | 17 | createTenantsTableSQL := dialect.GetCreateTenantsTableSQL() 18 | 19 | expected := ` 20 | create table if not exists migrator.migrator_tenants ( 21 | id serial primary key, 22 | name varchar(200) not null, 23 | created timestamp default now() 24 | ) 25 | ` 26 | 27 | assert.Equal(t, expected, createTenantsTableSQL) 28 | } 29 | 30 | func TestBaseDialectGetCreateMigrationsTableSQL(t *testing.T) { 31 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 32 | assert.Nil(t, err) 33 | 34 | dialect := newDialect(config) 35 | 36 | createMigrationsTableSQL := dialect.GetCreateMigrationsTableSQL() 37 | 38 | expected := ` 39 | create table if not exists migrator.migrator_migrations ( 40 | id serial primary key, 41 | name varchar(200) not null, 42 | source_dir varchar(200) not null, 43 | filename varchar(200) not null, 44 | type int not null, 45 | db_schema varchar(200) not null, 46 | created timestamp default now(), 47 | contents text, 48 | checksum varchar(64) 49 | ) 50 | ` 51 | 52 | assert.Equal(t, expected, createMigrationsTableSQL) 53 | } 54 | 55 | func TestBaseDialectGetCreateSchemaSQL(t *testing.T) { 56 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 57 | assert.Nil(t, err) 58 | 59 | dialect := newDialect(config) 60 | 61 | createSchemaSQL := dialect.GetCreateSchemaSQL("abc") 62 | 63 | expected := "create schema if not exists abc" 64 | 65 | assert.Equal(t, expected, createSchemaSQL) 66 | } 67 | 68 | func TestBaseDialectGetCreateSchemaSQLError(t *testing.T) { 69 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 70 | assert.Nil(t, err) 71 | 72 | dialect := newDialect(config) 73 | 74 | sqlInjection := "abc; drop schema migrator;" 75 | expectedValue := fmt.Sprintf("Schema name contains invalid characters: %v", sqlInjection) 76 | assert.PanicsWithValue(t, expectedValue, func() { dialect.GetCreateSchemaSQL(sqlInjection) }) 77 | } 78 | 79 | func TestBaseDialectGetVersionsSelectSQL(t *testing.T) { 80 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 81 | assert.Nil(t, err) 82 | 83 | dialect := newDialect(config) 84 | 85 | versionsSelectSQL := dialect.GetVersionsSelectSQL() 86 | 87 | expected := "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from migrator.migrator_versions mv left join migrator.migrator_migrations mm on mv.id = mm.version_id order by vid desc, mid asc" 88 | 89 | assert.Equal(t, expected, versionsSelectSQL) 90 | } 91 | -------------------------------------------------------------------------------- /db/db_mssql.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | // blank import for MSSQL driver 6 | _ "github.com/denisenkom/go-mssqldb" 7 | ) 8 | 9 | type msSQLDialect struct { 10 | baseDialect 11 | } 12 | 13 | const ( 14 | insertMigrationMSSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum, version_id) values (@p1, @p2, @p3, @p4, @p5, @p6, @p7, @p8)" 15 | insertTenantMSSQLDialectSQL = "insert into %v.%v (name) values (@p1)" 16 | insertVersionMSSQLSQLDialectSQL = "insert into %v.%v (name) output inserted.id values (@p1)" 17 | selectVersionsByFileMSSQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id in (select version_id from %v.%v where filename = @p1) order by vid desc, mid asc" 18 | selectVersionByIDMSSQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id = @p1 order by mid asc" 19 | selectMigrationByIDMSSQLDialectSQL = "select id, name, source_dir, filename, type, db_schema, created, contents, checksum from %v.%v where id = @p1" 20 | createTenantsTableMSSQLDialectSQL = ` 21 | IF NOT EXISTS (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') 22 | BEGIN 23 | create table [%v].%v ( 24 | id int identity (1,1) primary key, 25 | name varchar(200) not null, 26 | created datetime default CURRENT_TIMESTAMP 27 | ); 28 | END 29 | ` 30 | createMigrationsTableMSSQLDialectSQL = ` 31 | IF NOT EXISTS (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') 32 | BEGIN 33 | create table [%v].%v ( 34 | id int identity (1,1) primary key, 35 | name varchar(200) not null, 36 | source_dir varchar(200) not null, 37 | filename varchar(200) not null, 38 | type int not null, 39 | db_schema varchar(200) not null, 40 | created datetime default CURRENT_TIMESTAMP, 41 | contents text, 42 | checksum varchar(64) 43 | ); 44 | END 45 | ` 46 | createSchemaMSSQLDialectSQL = ` 47 | IF NOT EXISTS (select * from information_schema.schemata where schema_name = '%v') 48 | BEGIN 49 | EXEC sp_executesql N'create schema %v'; 50 | END 51 | ` 52 | versionsTableSetupMSSQLDialectSQL = ` 53 | if not exists (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') 54 | begin 55 | declare @cn nvarchar(200); 56 | create table [%v].%v ( 57 | id int identity (1,1) primary key, 58 | name varchar(200) not null, 59 | created datetime default CURRENT_TIMESTAMP 60 | ); 61 | -- workaround for MSSQL not finding a newly created column 62 | -- when creating initial version default value is set to 1 63 | alter table [%v].%v add version_id int not null default 1; 64 | if exists (select * from [%v].%v) 65 | begin 66 | insert into [%v].%v (name) values ('Initial version'); 67 | end 68 | -- change version_id to not null 69 | alter table [%v].%v 70 | alter column version_id int not null; 71 | alter table [%v].%v 72 | add constraint migrator_versions_version_id_fk foreign key (version_id) references [%v].%v (id) on delete cascade; 73 | create index migrator_migrations_version_id_idx on [%v].%v (version_id); 74 | -- remove workaround default value 75 | select @cn = name from sys.default_constraints where parent_object_id = object_id('[%v].%v') and name like '%%ver%%'; 76 | EXEC ('alter table [%v].%v drop constraint ' + @cn); 77 | end 78 | ` 79 | ) 80 | 81 | // LastInsertIDSupported instructs migrator if Result.LastInsertId() is supported by the DB driver 82 | func (md *msSQLDialect) LastInsertIDSupported() bool { 83 | return false 84 | } 85 | 86 | // GetMigrationInsertSQL returns MS SQL-specific migration insert SQL statement 87 | func (md *msSQLDialect) GetMigrationInsertSQL() string { 88 | return fmt.Sprintf(insertMigrationMSSQLDialectSQL, migratorSchema, migratorMigrationsTable) 89 | } 90 | 91 | // GetTenantInsertSQL returns MS SQL-specific migrator's default tenant insert SQL statement 92 | func (md *msSQLDialect) GetTenantInsertSQL() string { 93 | return fmt.Sprintf(insertTenantMSSQLDialectSQL, migratorSchema, migratorTenantsTable) 94 | } 95 | 96 | // GetCreateTenantsTableSQL returns migrator's default create tenants table SQL statement. 97 | // This SQL is used by MS SQL. 98 | func (md *msSQLDialect) GetCreateTenantsTableSQL() string { 99 | return fmt.Sprintf(createTenantsTableMSSQLDialectSQL, migratorSchema, migratorTenantsTable, migratorSchema, migratorTenantsTable) 100 | } 101 | 102 | // GetCreateMigrationsTableSQL returns migrator's create migrations table SQL statement. 103 | // This SQL is used by MS SQL. 104 | func (md *msSQLDialect) GetCreateMigrationsTableSQL() string { 105 | return fmt.Sprintf(createMigrationsTableMSSQLDialectSQL, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable) 106 | } 107 | 108 | // GetCreateSchemaSQL returns create schema SQL statement. 109 | // This SQL is used by MS SQL. 110 | func (md *msSQLDialect) GetCreateSchemaSQL(schema string) string { 111 | if !isValidIdentifier(schema) { 112 | panic(fmt.Sprintf("Schema name contains invalid characters: %v", schema)) 113 | } 114 | return fmt.Sprintf(createSchemaMSSQLDialectSQL, schema, schema) 115 | } 116 | 117 | func (md *msSQLDialect) GetVersionInsertSQL() string { 118 | return fmt.Sprintf(insertVersionMSSQLSQLDialectSQL, migratorSchema, migratorVersionsTable) 119 | } 120 | 121 | func (md *msSQLDialect) GetCreateVersionsTableSQL() []string { 122 | return []string{fmt.Sprintf(versionsTableSetupMSSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable)} 123 | } 124 | 125 | func (md *msSQLDialect) GetVersionsByFileSQL() string { 126 | return fmt.Sprintf(selectVersionsByFileMSSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable) 127 | } 128 | 129 | func (md *msSQLDialect) GetVersionByIDSQL() string { 130 | return fmt.Sprintf(selectVersionByIDMSSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable) 131 | } 132 | 133 | func (md *msSQLDialect) GetMigrationByIDSQL() string { 134 | return fmt.Sprintf(selectMigrationByIDMSSQLDialectSQL, migratorSchema, migratorMigrationsTable) 135 | } 136 | -------------------------------------------------------------------------------- /db/db_mysql.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | // blank import for MySQL driver 6 | _ "github.com/go-sql-driver/mysql" 7 | ) 8 | 9 | type mySQLDialect struct { 10 | baseDialect 11 | } 12 | 13 | const ( 14 | insertMigrationMySQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum, version_id) values (?, ?, ?, ?, ?, ?, ?, ?)" 15 | insertTenantMySQLDialectSQL = "insert into %v.%v (name) values (?)" 16 | insertVersionMySQLDialectSQL = "insert into %v.%v (name) values (?)" 17 | selectVersionsByFileMySQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id in (select version_id from %v.%v where filename = ?) order by vid desc, mid asc" 18 | selectVersionByIDMySQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id = ? order by mid asc" 19 | selectMigrationByIDMySQLDialectSQL = "select id, name, source_dir, filename, type, db_schema, created, contents, checksum from %v.%v where id = ?" 20 | versionsTableSetupMySQLDropDialectSQL = `drop procedure if exists migrator_create_versions` 21 | versionsTableSetupMySQLCallDialectSQL = `call migrator_create_versions()` 22 | versionsTableSetupMySQLProcedureDialectSQL = ` 23 | create procedure migrator_create_versions() 24 | begin 25 | if not exists (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') then 26 | create table %v.%v ( 27 | id serial primary key, 28 | name varchar(200) not null, 29 | created timestamp default now() 30 | ); 31 | alter table %v.%v add column version_id bigint unsigned; 32 | create index migrator_versions_version_id_idx on %v.%v (version_id); 33 | if exists (select * from %v.%v) then 34 | insert into %v.%v (name) values ('Initial version'); 35 | -- initial version_id sequence is always 1 36 | update %v.%v set version_id = 1; 37 | end if; 38 | alter table %v.%v 39 | modify version_id bigint unsigned not null; 40 | alter table %v.%v 41 | add constraint migrator_versions_version_id_fk foreign key (version_id) references %v.%v (id) on delete cascade; 42 | end if; 43 | end; 44 | ` 45 | ) 46 | 47 | // LastInsertIDSupported instructs migrator if Result.LastInsertId() is supported by the DB driver 48 | func (md *mySQLDialect) LastInsertIDSupported() bool { 49 | return true 50 | } 51 | 52 | // GetMigrationInsertSQL returns MySQL-specific migration insert SQL statement 53 | func (md *mySQLDialect) GetMigrationInsertSQL() string { 54 | return fmt.Sprintf(insertMigrationMySQLDialectSQL, migratorSchema, migratorMigrationsTable) 55 | } 56 | 57 | // GetTenantInsertSQL returns MySQL-specific migrator's default tenant insert SQL statement 58 | func (md *mySQLDialect) GetTenantInsertSQL() string { 59 | return fmt.Sprintf(insertTenantMySQLDialectSQL, migratorSchema, migratorTenantsTable) 60 | } 61 | 62 | func (md *mySQLDialect) GetVersionInsertSQL() string { 63 | return fmt.Sprintf(insertVersionMySQLDialectSQL, migratorSchema, migratorVersionsTable) 64 | } 65 | 66 | // GetCreateVersionsTableSQL returns MySQL-specific SQLs which does: 67 | // 1. drop procedure if exists 68 | // 2. create procedure 69 | // 3. calls procedure 70 | // far from ideal MySQL in contrast to MS SQL and PostgreSQL does not support the execution of anonymous blocks of code 71 | func (md *mySQLDialect) GetCreateVersionsTableSQL() []string { 72 | return []string{ 73 | versionsTableSetupMySQLDropDialectSQL, 74 | fmt.Sprintf(versionsTableSetupMySQLProcedureDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable), 75 | versionsTableSetupMySQLCallDialectSQL, 76 | } 77 | } 78 | 79 | func (md *mySQLDialect) GetVersionsByFileSQL() string { 80 | return fmt.Sprintf(selectVersionsByFileMySQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable) 81 | } 82 | 83 | func (md *mySQLDialect) GetVersionByIDSQL() string { 84 | return fmt.Sprintf(selectVersionByIDMySQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable) 85 | } 86 | 87 | func (md *mySQLDialect) GetMigrationByIDSQL() string { 88 | return fmt.Sprintf(selectMigrationByIDMySQLDialectSQL, migratorSchema, migratorMigrationsTable) 89 | } 90 | -------------------------------------------------------------------------------- /db/db_mysql_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/lukaszbudnik/migrator/config" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDBCreateDialectMysqlDriver(t *testing.T) { 11 | config := &config.Config{} 12 | config.Driver = "mysql" 13 | dialect := newDialect(config) 14 | assert.IsType(t, &mySQLDialect{}, dialect) 15 | } 16 | 17 | func TestMySQLLastInsertIdSupported(t *testing.T) { 18 | config, err := config.FromFile("../test/migrator-mysql.yaml") 19 | assert.Nil(t, err) 20 | 21 | config.Driver = "mysql" 22 | dialect := newDialect(config) 23 | lastInsertIDSupported := dialect.LastInsertIDSupported() 24 | 25 | assert.True(t, lastInsertIDSupported) 26 | } 27 | 28 | func TestMySQLGetMigrationInsertSQL(t *testing.T) { 29 | config, err := config.FromFile("../test/migrator-mysql.yaml") 30 | assert.Nil(t, err) 31 | 32 | config.Driver = "mysql" 33 | 34 | dialect := newDialect(config) 35 | 36 | insertMigrationSQL := dialect.GetMigrationInsertSQL() 37 | 38 | assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum, version_id) values (?, ?, ?, ?, ?, ?, ?, ?)", insertMigrationSQL) 39 | } 40 | 41 | func TestMySQLGetTenantInsertSQLDefault(t *testing.T) { 42 | config, err := config.FromFile("../test/migrator-mysql.yaml") 43 | assert.Nil(t, err) 44 | 45 | config.Driver = "mysql" 46 | dialect := newDialect(config) 47 | connector := baseConnector{newTestContext(), config, dialect, nil, false} 48 | defer connector.Dispose() 49 | 50 | tenantInsertSQL := connector.getTenantInsertSQL() 51 | 52 | assert.Equal(t, "insert into migrator.migrator_tenants (name) values (?)", tenantInsertSQL) 53 | } 54 | 55 | func TestMySQLGetVersionInsertSQL(t *testing.T) { 56 | config, err := config.FromFile("../test/migrator-mysql.yaml") 57 | assert.Nil(t, err) 58 | 59 | config.Driver = "mysql" 60 | dialect := newDialect(config) 61 | 62 | versionInsertSQL := dialect.GetVersionInsertSQL() 63 | 64 | assert.Equal(t, "insert into migrator.migrator_versions (name) values (?)", versionInsertSQL) 65 | } 66 | 67 | func TestMySQLGetCreateVersionsTableSQL(t *testing.T) { 68 | config, err := config.FromFile("../test/migrator-mysql.yaml") 69 | assert.Nil(t, err) 70 | 71 | config.Driver = "mysql" 72 | dialect := newDialect(config) 73 | 74 | actual := dialect.GetCreateVersionsTableSQL() 75 | expectedDrop := `drop procedure if exists migrator_create_versions` 76 | expectedCall := `call migrator_create_versions()` 77 | expectedProcedure := 78 | ` 79 | create procedure migrator_create_versions() 80 | begin 81 | if not exists (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_versions') then 82 | create table migrator.migrator_versions ( 83 | id serial primary key, 84 | name varchar(200) not null, 85 | created timestamp default now() 86 | ); 87 | alter table migrator.migrator_migrations add column version_id bigint unsigned; 88 | create index migrator_versions_version_id_idx on migrator.migrator_migrations (version_id); 89 | if exists (select * from migrator.migrator_migrations) then 90 | insert into migrator.migrator_versions (name) values ('Initial version'); 91 | -- initial version_id sequence is always 1 92 | update migrator.migrator_migrations set version_id = 1; 93 | end if; 94 | alter table migrator.migrator_migrations 95 | modify version_id bigint unsigned not null; 96 | alter table migrator.migrator_migrations 97 | add constraint migrator_versions_version_id_fk foreign key (version_id) references migrator.migrator_versions (id) on delete cascade; 98 | end if; 99 | end; 100 | ` 101 | 102 | assert.Equal(t, expectedDrop, actual[0]) 103 | assert.Equal(t, expectedProcedure, actual[1]) 104 | assert.Equal(t, expectedCall, actual[2]) 105 | } 106 | 107 | func TestMySQLGetVersionsByFileSQL(t *testing.T) { 108 | config, err := config.FromFile("../test/migrator-mysql.yaml") 109 | assert.Nil(t, err) 110 | 111 | config.Driver = "mysql" 112 | dialect := newDialect(config) 113 | 114 | versionsByFile := dialect.GetVersionsByFileSQL() 115 | 116 | assert.Equal(t, "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from migrator.migrator_versions mv left join migrator.migrator_migrations mm on mv.id = mm.version_id where mv.id in (select version_id from migrator.migrator_migrations where filename = ?) order by vid desc, mid asc", versionsByFile) 117 | } 118 | 119 | func TestMySQLGetVersionByIDSQL(t *testing.T) { 120 | config, err := config.FromFile("../test/migrator-mysql.yaml") 121 | assert.Nil(t, err) 122 | 123 | config.Driver = "mysql" 124 | dialect := newDialect(config) 125 | 126 | versionsByID := dialect.GetVersionByIDSQL() 127 | 128 | assert.Equal(t, "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from migrator.migrator_versions mv left join migrator.migrator_migrations mm on mv.id = mm.version_id where mv.id = ? order by mid asc", versionsByID) 129 | } 130 | 131 | func TestMySQLGetMigrationByIDSQL(t *testing.T) { 132 | config, err := config.FromFile("../test/migrator-mysql.yaml") 133 | assert.Nil(t, err) 134 | 135 | config.Driver = "mysql" 136 | dialect := newDialect(config) 137 | 138 | migrationByID := dialect.GetMigrationByIDSQL() 139 | 140 | assert.Equal(t, "select id, name, source_dir, filename, type, db_schema, created, contents, checksum from migrator.migrator_migrations where id = ?", migrationByID) 141 | } 142 | -------------------------------------------------------------------------------- /db/db_postgresql.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | // blank import for PostgreSQL driver 6 | _ "github.com/lib/pq" 7 | ) 8 | 9 | type postgreSQLDialect struct { 10 | baseDialect 11 | } 12 | 13 | const ( 14 | insertMigrationPostgreSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum, version_id) values ($1, $2, $3, $4, $5, $6, $7, $8)" 15 | insertTenantPostgreSQLDialectSQL = "insert into %v.%v (name) values ($1)" 16 | insertVersionPostgreSQLDialectSQL = "insert into %v.%v (name) values ($1) returning id" 17 | selectVersionsByFilePostgreSQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id in (select version_id from %v.%v where filename = $1) order by vid desc, mid asc" 18 | selectVersionByIDPostgreSQLDialectSQL = "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from %v.%v mv left join %v.%v mm on mv.id = mm.version_id where mv.id = $1 order by mid asc" 19 | selectMigrationByIDPostgreSQLDialectSQL = "select id, name, source_dir, filename, type, db_schema, created, contents, checksum from %v.%v where id = $1" 20 | versionsTableSetupPostgreSQLDialectSQL = ` 21 | do $$ 22 | begin 23 | if not exists (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') then 24 | create table %v.%v ( 25 | id serial primary key, 26 | name varchar(200) not null, 27 | created timestamp with time zone default now() 28 | ); 29 | alter table %v.%v add column version_id integer; 30 | create index migrator_versions_version_id_idx on %v.%v (version_id); 31 | if exists (select * from %v.%v) then 32 | insert into %v.%v (name) values ('Initial version'); 33 | -- initial version_id sequence is always 1 34 | update %v.%v set version_id = 1; 35 | end if; 36 | alter table %v.%v 37 | alter column version_id set not null, 38 | add constraint migrator_versions_version_id_fk foreign key (version_id) references %v.%v (id) on delete cascade; 39 | end if; 40 | end $$; 41 | ` 42 | ) 43 | 44 | // LastInsertIDSupported instructs migrator if Result.LastInsertId() is supported by the DB driver 45 | func (pd *postgreSQLDialect) LastInsertIDSupported() bool { 46 | return false 47 | } 48 | 49 | // GetMigrationInsertSQL returns PostgreSQL-specific migration insert SQL statement 50 | func (pd *postgreSQLDialect) GetMigrationInsertSQL() string { 51 | return fmt.Sprintf(insertMigrationPostgreSQLDialectSQL, migratorSchema, migratorMigrationsTable) 52 | } 53 | 54 | // GetTenantInsertSQL returns PostgreSQL-specific migrator's default tenant insert SQL statement 55 | func (pd *postgreSQLDialect) GetTenantInsertSQL() string { 56 | return fmt.Sprintf(insertTenantPostgreSQLDialectSQL, migratorSchema, migratorTenantsTable) 57 | } 58 | 59 | func (pd *postgreSQLDialect) GetVersionInsertSQL() string { 60 | return fmt.Sprintf(insertVersionPostgreSQLDialectSQL, migratorSchema, migratorVersionsTable) 61 | } 62 | 63 | // GetCreateVersionsTableSQL returns PostgreSQL-specific SQL which does: 64 | // 1. create versions table 65 | // 2. alter statement used to add version column to migration 66 | // 3. create initial version if migrations exists (backwards compatibility) 67 | // 4. create not null consttraint on version column 68 | func (pd *postgreSQLDialect) GetCreateVersionsTableSQL() []string { 69 | return []string{fmt.Sprintf(versionsTableSetupPostgreSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorVersionsTable)} 70 | } 71 | 72 | func (pd *postgreSQLDialect) GetVersionsByFileSQL() string { 73 | return fmt.Sprintf(selectVersionsByFilePostgreSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable, migratorSchema, migratorMigrationsTable) 74 | } 75 | 76 | func (pd *postgreSQLDialect) GetVersionByIDSQL() string { 77 | return fmt.Sprintf(selectVersionByIDPostgreSQLDialectSQL, migratorSchema, migratorVersionsTable, migratorSchema, migratorMigrationsTable) 78 | } 79 | 80 | func (pd *postgreSQLDialect) GetMigrationByIDSQL() string { 81 | return fmt.Sprintf(selectMigrationByIDPostgreSQLDialectSQL, migratorSchema, migratorMigrationsTable) 82 | } 83 | -------------------------------------------------------------------------------- /db/db_postgresql_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/lukaszbudnik/migrator/config" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDBCreateDialectPostgreSQLDriver(t *testing.T) { 11 | config := &config.Config{} 12 | config.Driver = "postgres" 13 | dialect := newDialect(config) 14 | assert.IsType(t, &postgreSQLDialect{}, dialect) 15 | } 16 | 17 | func TestPostgreSQLLastInsertIdSupported(t *testing.T) { 18 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 19 | assert.Nil(t, err) 20 | 21 | config.Driver = "postgres" 22 | dialect := newDialect(config) 23 | lastInsertIDSupported := dialect.LastInsertIDSupported() 24 | 25 | assert.False(t, lastInsertIDSupported) 26 | } 27 | 28 | func TestPostgreSQLGetMigrationInsertSQL(t *testing.T) { 29 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 30 | assert.Nil(t, err) 31 | 32 | config.Driver = "postgres" 33 | 34 | dialect := newDialect(config) 35 | 36 | insertMigrationSQL := dialect.GetMigrationInsertSQL() 37 | 38 | assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum, version_id) values ($1, $2, $3, $4, $5, $6, $7, $8)", insertMigrationSQL) 39 | } 40 | 41 | func TestPostgreSQLGetTenantInsertSQLDefault(t *testing.T) { 42 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 43 | assert.Nil(t, err) 44 | 45 | config.Driver = "postgres" 46 | dialect := newDialect(config) 47 | connector := baseConnector{newTestContext(), config, dialect, nil, false} 48 | defer connector.Dispose() 49 | 50 | tenantInsertSQL := connector.getTenantInsertSQL() 51 | 52 | assert.Equal(t, "insert into migrator.migrator_tenants (name) values ($1)", tenantInsertSQL) 53 | } 54 | 55 | func TestPostgreSQLGetVersionInsertSQL(t *testing.T) { 56 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 57 | assert.Nil(t, err) 58 | 59 | config.Driver = "postgres" 60 | dialect := newDialect(config) 61 | 62 | versionInsertSQL := dialect.GetVersionInsertSQL() 63 | 64 | assert.Equal(t, "insert into migrator.migrator_versions (name) values ($1) returning id", versionInsertSQL) 65 | } 66 | 67 | func TestPostgreSQLGetCreateVersionsTableSQL(t *testing.T) { 68 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 69 | assert.Nil(t, err) 70 | 71 | config.Driver = "postgres" 72 | dialect := newDialect(config) 73 | 74 | actual := dialect.GetCreateVersionsTableSQL() 75 | 76 | expected := 77 | ` 78 | do $$ 79 | begin 80 | if not exists (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_versions') then 81 | create table migrator.migrator_versions ( 82 | id serial primary key, 83 | name varchar(200) not null, 84 | created timestamp with time zone default now() 85 | ); 86 | alter table migrator.migrator_migrations add column version_id integer; 87 | create index migrator_versions_version_id_idx on migrator.migrator_migrations (version_id); 88 | if exists (select * from migrator.migrator_migrations) then 89 | insert into migrator.migrator_versions (name) values ('Initial version'); 90 | -- initial version_id sequence is always 1 91 | update migrator.migrator_migrations set version_id = 1; 92 | end if; 93 | alter table migrator.migrator_migrations 94 | alter column version_id set not null, 95 | add constraint migrator_versions_version_id_fk foreign key (version_id) references migrator.migrator_versions (id) on delete cascade; 96 | end if; 97 | end $$; 98 | ` 99 | 100 | assert.Equal(t, expected, actual[0]) 101 | } 102 | 103 | func TestPostgreSQLGetVersionsByFileSQL(t *testing.T) { 104 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 105 | assert.Nil(t, err) 106 | 107 | config.Driver = "postgres" 108 | dialect := newDialect(config) 109 | 110 | versionsByFile := dialect.GetVersionsByFileSQL() 111 | 112 | assert.Equal(t, "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from migrator.migrator_versions mv left join migrator.migrator_migrations mm on mv.id = mm.version_id where mv.id in (select version_id from migrator.migrator_migrations where filename = $1) order by vid desc, mid asc", versionsByFile) 113 | } 114 | 115 | func TestPostgreSQLGetVersionByIDSQL(t *testing.T) { 116 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 117 | assert.Nil(t, err) 118 | 119 | config.Driver = "postgres" 120 | dialect := newDialect(config) 121 | 122 | versionsByID := dialect.GetVersionByIDSQL() 123 | 124 | assert.Equal(t, "select mv.id as vid, mv.name as vname, mv.created as vcreated, mm.id as mid, mm.name, mm.source_dir, mm.filename, mm.type, mm.db_schema, mm.created, mm.contents, mm.checksum from migrator.migrator_versions mv left join migrator.migrator_migrations mm on mv.id = mm.version_id where mv.id = $1 order by mid asc", versionsByID) 125 | } 126 | 127 | func TestPostgreSQLGetMigrationByIDSQL(t *testing.T) { 128 | config, err := config.FromFile("../test/migrator-postgresql.yaml") 129 | assert.Nil(t, err) 130 | 131 | config.Driver = "postgres" 132 | dialect := newDialect(config) 133 | 134 | migrationByID := dialect.GetMigrationByIDSQL() 135 | 136 | assert.Equal(t, "select id, name, source_dir, filename, type, db_schema, created, contents, checksum from migrator.migrator_migrations where id = $1", migrationByID) 137 | } 138 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | DEFAULT_YAML_LOCATION="/data/migrator.yaml" 4 | 5 | # if migrator config file is not provided explicitly fallback to default location 6 | if [ -z "$MIGRATOR_YAML" ]; then 7 | MIGRATOR_YAML=$DEFAULT_YAML_LOCATION 8 | fi 9 | 10 | migrator -configFile "$MIGRATOR_YAML" 11 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/lukaszbudnik/migrator 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/Azure/azure-storage-blob-go v0.15.0 7 | github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 8 | github.com/DATA-DOG/go-sqlmock v1.5.2 9 | github.com/Depado/ginprom v1.8.1 10 | github.com/aws/aws-sdk-go v1.55.7 11 | github.com/denisenkom/go-mssqldb v0.12.3 12 | github.com/gin-gonic/gin v1.10.1 13 | github.com/go-sql-driver/mysql v1.8.1 14 | github.com/graph-gophers/graphql-go v1.6.0 15 | github.com/lib/pq v1.10.9 16 | github.com/stretchr/testify v1.10.0 17 | github.com/thedevsaddam/gojsonq/v2 v2.5.2 18 | gopkg.in/go-playground/assert.v1 v1.2.1 // indirect 19 | gopkg.in/go-playground/validator.v9 v9.31.0 20 | gopkg.in/yaml.v2 v2.4.0 21 | ) 22 | -------------------------------------------------------------------------------- /loader/azureblob_loader.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "fmt" 8 | "net/url" 9 | "os" 10 | "strings" 11 | 12 | "github.com/Azure/azure-storage-blob-go/azblob" 13 | "github.com/Azure/go-autorest/autorest/azure/auth" 14 | 15 | "github.com/lukaszbudnik/migrator/common" 16 | "github.com/lukaszbudnik/migrator/types" 17 | ) 18 | 19 | // azureBlobLoader is struct used for implementing Loader interface for loading migrations from Azure Blob 20 | type azureBlobLoader struct { 21 | baseLoader 22 | } 23 | 24 | // GetSourceMigrations returns all migrations from Azure Blob location 25 | func (abl *azureBlobLoader) GetSourceMigrations() []types.Migration { 26 | 27 | credential, err := abl.getAzureStorageCredentials() 28 | if err != nil { 29 | panic(err.Error()) 30 | } 31 | 32 | p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) 33 | 34 | // migrator expects that container as a part of the service url 35 | // the URL can contain optional prefixes like prod/artefacts 36 | // for example: 37 | // https://lukaszbudniktest.blob.core.windows.net/mycontainer/ 38 | // https://lukaszbudniktest.blob.core.windows.net/mycontainer/prod/artefacts/ 39 | 40 | // check if optional prefixes are provided 41 | baseLocation := strings.TrimRight(abl.config.BaseLocation, "/") 42 | indx := common.FindNthIndex(baseLocation, '/', 4) 43 | 44 | optionalPrefixes := "" 45 | if indx > -1 { 46 | optionalPrefixes = baseLocation[indx+1:] 47 | baseLocation = baseLocation[:indx] 48 | } 49 | 50 | u, err := url.Parse(baseLocation) 51 | if err != nil { 52 | panic(err.Error()) 53 | } 54 | 55 | containerURL := azblob.NewContainerURL(*u, p) 56 | 57 | return abl.doGetSourceMigrations(containerURL, optionalPrefixes) 58 | } 59 | 60 | func (abl *azureBlobLoader) doGetSourceMigrations(containerURL azblob.ContainerURL, optionalPrefixes string) []types.Migration { 61 | migrations := []types.Migration{} 62 | 63 | singleMigrationsObjects := abl.getObjectList(containerURL, optionalPrefixes, abl.config.SingleMigrations) 64 | tenantMigrationsObjects := abl.getObjectList(containerURL, optionalPrefixes, abl.config.TenantMigrations) 65 | singleScriptsObjects := abl.getObjectList(containerURL, optionalPrefixes, abl.config.SingleScripts) 66 | tenantScriptsObjects := abl.getObjectList(containerURL, optionalPrefixes, abl.config.TenantScripts) 67 | 68 | migrationsMap := make(map[string][]types.Migration) 69 | abl.getObjects(containerURL, migrationsMap, singleMigrationsObjects, types.MigrationTypeSingleMigration) 70 | abl.getObjects(containerURL, migrationsMap, tenantMigrationsObjects, types.MigrationTypeTenantMigration) 71 | abl.sortMigrations(migrationsMap, &migrations) 72 | 73 | migrationsMap = make(map[string][]types.Migration) 74 | abl.getObjects(containerURL, migrationsMap, singleScriptsObjects, types.MigrationTypeSingleScript) 75 | abl.sortMigrations(migrationsMap, &migrations) 76 | 77 | migrationsMap = make(map[string][]types.Migration) 78 | abl.getObjects(containerURL, migrationsMap, tenantScriptsObjects, types.MigrationTypeTenantScript) 79 | abl.sortMigrations(migrationsMap, &migrations) 80 | 81 | return migrations 82 | } 83 | 84 | func (abl *azureBlobLoader) getObjectList(containerURL azblob.ContainerURL, optionalPrefixes string, prefixes []string) []string { 85 | objects := []string{} 86 | 87 | for _, prefix := range prefixes { 88 | 89 | for marker := (azblob.Marker{}); marker.NotDone(); { 90 | 91 | var fullPrefix string 92 | if optionalPrefixes != "" { 93 | fullPrefix = optionalPrefixes + "/" + prefix + "/" 94 | } else { 95 | fullPrefix = prefix + "/" 96 | } 97 | 98 | listBlob, err := containerURL.ListBlobsFlatSegment(abl.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: fullPrefix}) 99 | if err != nil { 100 | panic(err.Error()) 101 | } 102 | marker = listBlob.NextMarker 103 | 104 | for _, blobInfo := range listBlob.Segment.BlobItems { 105 | objects = append(objects, blobInfo.Name) 106 | } 107 | } 108 | 109 | } 110 | 111 | return objects 112 | } 113 | 114 | func (abl *azureBlobLoader) getObjects(containerURL azblob.ContainerURL, migrationsMap map[string][]types.Migration, objects []string, migrationType types.MigrationType) { 115 | for _, o := range objects { 116 | blobURL := containerURL.NewBlobURL(o) 117 | 118 | get, err := blobURL.Download(abl.ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) 119 | if err != nil { 120 | panic(err.Error()) 121 | } 122 | 123 | downloadedData := &bytes.Buffer{} 124 | reader := get.Body(azblob.RetryReaderOptions{}) 125 | downloadedData.ReadFrom(reader) 126 | reader.Close() 127 | 128 | contents := downloadedData.String() 129 | 130 | hasher := sha256.New() 131 | hasher.Write([]byte(contents)) 132 | file := fmt.Sprintf("%s/%s", abl.config.BaseLocation, o) 133 | from := strings.LastIndex(file, "/") 134 | sourceDir := file[0:from] 135 | name := file[from+1:] 136 | m := types.Migration{Name: name, SourceDir: sourceDir, File: file, MigrationType: migrationType, Contents: string(contents), CheckSum: hex.EncodeToString(hasher.Sum(nil))} 137 | 138 | e, ok := migrationsMap[m.Name] 139 | if ok { 140 | e = append(e, m) 141 | } else { 142 | e = []types.Migration{m} 143 | } 144 | migrationsMap[m.Name] = e 145 | 146 | } 147 | } 148 | 149 | func (abl *azureBlobLoader) getAzureStorageCredentials() (azblob.Credential, error) { 150 | // try shared key credentials first 151 | accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY") 152 | 153 | if len(accountName) > 0 && len(accountKey) > 0 { 154 | return azblob.NewSharedKeyCredential(accountName, accountKey) 155 | } 156 | 157 | // then try MSI and token credentials 158 | msiConfig := auth.NewMSIConfig() 159 | msiConfig.Resource = "https://storage.azure.com" 160 | 161 | azureServicePrincipalToken, err := msiConfig.ServicePrincipalToken() 162 | if err != nil { 163 | return nil, err 164 | } 165 | 166 | token := azureServicePrincipalToken.Token() 167 | 168 | credential := azblob.NewTokenCredential(token.AccessToken, nil) 169 | return credential, nil 170 | } 171 | 172 | func (abl *azureBlobLoader) HealthCheck() error { 173 | credential, err := abl.getAzureStorageCredentials() 174 | if err != nil { 175 | return err 176 | } 177 | 178 | p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) 179 | 180 | // check if optional prefixes are provided 181 | baseLocation := strings.TrimRight(abl.config.BaseLocation, "/") 182 | indx := common.FindNthIndex(baseLocation, '/', 4) 183 | 184 | prefix := "" 185 | if indx > -1 { 186 | prefix = baseLocation[indx+1:] 187 | baseLocation = baseLocation[:indx] 188 | } 189 | 190 | u, err := url.Parse(baseLocation) 191 | if err != nil { 192 | return err 193 | } 194 | 195 | containerURL := azblob.NewContainerURL(*u, p) 196 | 197 | _, err = containerURL.ListBlobsFlatSegment(abl.ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{Prefix: prefix, MaxResults: 1}) 198 | 199 | return err 200 | } 201 | -------------------------------------------------------------------------------- /loader/azureblob_loader_test.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "testing" 8 | 9 | "github.com/lukaszbudnik/migrator/config" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestAzureGetSourceMigrations(t *testing.T) { 14 | 15 | accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY") 16 | 17 | if len(accountName) == 0 || len(accountKey) == 0 { 18 | t.Skip("skipping test AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") 19 | } 20 | 21 | // migrator implements env variable substitution and normally we would use: 22 | // "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/mycontainer" 23 | // however below we are creating the Config struct directly 24 | // and that's why we need to build correct URL ourselves 25 | baseLocation := fmt.Sprintf("https://%v.blob.core.windows.net/mycontainer", accountName) 26 | 27 | config := &config.Config{ 28 | BaseLocation: baseLocation, 29 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 30 | TenantMigrations: []string{"migrations/tenants"}, 31 | SingleScripts: []string{"migrations/config-scripts"}, 32 | TenantScripts: []string{"migrations/tenants-scripts"}, 33 | } 34 | 35 | loader := &azureBlobLoader{baseLoader{context.TODO(), config}} 36 | migrations := loader.GetSourceMigrations() 37 | 38 | assert.Len(t, migrations, 12) 39 | 40 | assert.Contains(t, migrations[0].File, "migrations/config/201602160001.sql") 41 | assert.Contains(t, migrations[1].File, "migrations/config/201602160002.sql") 42 | assert.Contains(t, migrations[2].File, "migrations/tenants/201602160002.sql") 43 | assert.Contains(t, migrations[3].File, "migrations/ref/201602160003.sql") 44 | assert.Contains(t, migrations[4].File, "migrations/tenants/201602160003.sql") 45 | assert.Contains(t, migrations[5].File, "migrations/ref/201602160004.sql") 46 | assert.Contains(t, migrations[6].File, "migrations/tenants/201602160004.sql") 47 | assert.Contains(t, migrations[7].File, "migrations/tenants/201602160005.sql") 48 | assert.Contains(t, migrations[8].File, "migrations/config-scripts/200012181227.sql") 49 | assert.Contains(t, migrations[9].File, "migrations/tenants-scripts/200001181228.sql") 50 | assert.Contains(t, migrations[10].File, "migrations/tenants-scripts/a.sql") 51 | assert.Contains(t, migrations[11].File, "migrations/tenants-scripts/b.sql") 52 | 53 | } 54 | 55 | func TestAzureGetSourceMigrationsWithOptionalPrefix(t *testing.T) { 56 | 57 | accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY") 58 | 59 | if len(accountName) == 0 || len(accountKey) == 0 { 60 | t.Skip("skipping test AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") 61 | } 62 | 63 | baseLocation := fmt.Sprintf("https://%v.blob.core.windows.net/myothercontainer/prod/artefacts/", accountName) 64 | 65 | config := &config.Config{ 66 | BaseLocation: baseLocation, 67 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 68 | TenantMigrations: []string{"migrations/tenants"}, 69 | SingleScripts: []string{"migrations/config-scripts"}, 70 | TenantScripts: []string{"migrations/tenants-scripts"}, 71 | } 72 | 73 | loader := &azureBlobLoader{baseLoader{context.TODO(), config}} 74 | migrations := loader.GetSourceMigrations() 75 | 76 | assert.Len(t, migrations, 12) 77 | 78 | assert.Contains(t, migrations[0].File, "prod/artefacts/migrations/config/201602160001.sql") 79 | assert.Contains(t, migrations[1].File, "prod/artefacts/migrations/config/201602160002.sql") 80 | assert.Contains(t, migrations[2].File, "prod/artefacts/migrations/tenants/201602160002.sql") 81 | assert.Contains(t, migrations[3].File, "prod/artefacts/migrations/ref/201602160003.sql") 82 | assert.Contains(t, migrations[4].File, "prod/artefacts/migrations/tenants/201602160003.sql") 83 | assert.Contains(t, migrations[5].File, "prod/artefacts/migrations/ref/201602160004.sql") 84 | assert.Contains(t, migrations[6].File, "prod/artefacts/migrations/tenants/201602160004.sql") 85 | assert.Contains(t, migrations[7].File, "prod/artefacts/migrations/tenants/201602160005.sql") 86 | assert.Contains(t, migrations[8].File, "prod/artefacts/migrations/config-scripts/200012181227.sql") 87 | assert.Contains(t, migrations[9].File, "prod/artefacts/migrations/tenants-scripts/200001181228.sql") 88 | assert.Contains(t, migrations[10].File, "prod/artefacts/migrations/tenants-scripts/a.sql") 89 | assert.Contains(t, migrations[11].File, "prod/artefacts/migrations/tenants-scripts/b.sql") 90 | 91 | } 92 | 93 | func TestAzureHealthCheck(t *testing.T) { 94 | accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY") 95 | 96 | if len(accountName) == 0 || len(accountKey) == 0 { 97 | t.Skip("skipping test AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") 98 | } 99 | 100 | baseLocation := fmt.Sprintf("https://%v.blob.core.windows.net/myothercontainer/prod/artefacts/", accountName) 101 | 102 | config := &config.Config{ 103 | BaseLocation: baseLocation, 104 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 105 | TenantMigrations: []string{"migrations/tenants"}, 106 | SingleScripts: []string{"migrations/config-scripts"}, 107 | TenantScripts: []string{"migrations/tenants-scripts"}, 108 | } 109 | 110 | loader := &azureBlobLoader{baseLoader{context.TODO(), config}} 111 | err := loader.HealthCheck() 112 | assert.Nil(t, err) 113 | } 114 | 115 | func TestAzureMsiCredentials(t *testing.T) { 116 | // in CI/CD env the MSI credentials are not available 117 | // this code just assures that if no shared key envs are present it will fallback to MSI 118 | // unsetting one of the shared key envs will cause fallback to MSI 119 | os.Unsetenv("AZURE_STORAGE_ACCESS_KEY") 120 | 121 | config := &config.Config{ 122 | BaseLocation: "https://justtesting.blob.core.windows.net/myothercontainer/prod/artefacts/", 123 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 124 | TenantMigrations: []string{"migrations/tenants"}, 125 | SingleScripts: []string{"migrations/config-scripts"}, 126 | TenantScripts: []string{"migrations/tenants-scripts"}, 127 | } 128 | 129 | loader := &azureBlobLoader{baseLoader{context.TODO(), config}} 130 | loader.getAzureStorageCredentials() 131 | } 132 | -------------------------------------------------------------------------------- /loader/disk_loader.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "fmt" 7 | "io/ioutil" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/lukaszbudnik/migrator/types" 12 | ) 13 | 14 | // diskLoader is struct used for implementing Loader interface for loading migrations from disk 15 | type diskLoader struct { 16 | baseLoader 17 | } 18 | 19 | // GetSourceMigrations returns all migrations from disk 20 | func (dl *diskLoader) GetSourceMigrations() []types.Migration { 21 | migrations := []types.Migration{} 22 | 23 | absBaseDir, err := filepath.Abs(dl.config.BaseLocation) 24 | if err != nil { 25 | panic(fmt.Sprintf("Could not convert baseLocation to absolute path: %v", err.Error())) 26 | } 27 | 28 | singleMigrationsDirs := dl.getDirs(absBaseDir, dl.config.SingleMigrations) 29 | tenantMigrationsDirs := dl.getDirs(absBaseDir, dl.config.TenantMigrations) 30 | singleScriptsDirs := dl.getDirs(absBaseDir, dl.config.SingleScripts) 31 | tenantScriptsDirs := dl.getDirs(absBaseDir, dl.config.TenantScripts) 32 | 33 | migrationsMap := make(map[string][]types.Migration) 34 | dl.readFromDirs(migrationsMap, singleMigrationsDirs, types.MigrationTypeSingleMigration) 35 | dl.readFromDirs(migrationsMap, tenantMigrationsDirs, types.MigrationTypeTenantMigration) 36 | dl.sortMigrations(migrationsMap, &migrations) 37 | 38 | migrationsMap = make(map[string][]types.Migration) 39 | dl.readFromDirs(migrationsMap, singleScriptsDirs, types.MigrationTypeSingleScript) 40 | dl.sortMigrations(migrationsMap, &migrations) 41 | 42 | migrationsMap = make(map[string][]types.Migration) 43 | dl.readFromDirs(migrationsMap, tenantScriptsDirs, types.MigrationTypeTenantScript) 44 | dl.sortMigrations(migrationsMap, &migrations) 45 | 46 | return migrations 47 | } 48 | 49 | func (dl *diskLoader) HealthCheck() error { 50 | absBaseDir, err := filepath.Abs(dl.config.BaseLocation) 51 | if err != nil { 52 | return err 53 | } 54 | _, err = ioutil.ReadDir(absBaseDir) 55 | return err 56 | } 57 | 58 | func (dl *diskLoader) getDirs(baseDir string, migrationsDirs []string) []string { 59 | var filteredDirs []string 60 | for _, migrationsDir := range migrationsDirs { 61 | filteredDirs = append(filteredDirs, filepath.Join(baseDir, migrationsDir)) 62 | } 63 | return filteredDirs 64 | } 65 | 66 | func (dl *diskLoader) readFromDirs(migrations map[string][]types.Migration, sourceDirs []string, migrationType types.MigrationType) { 67 | for _, sourceDir := range sourceDirs { 68 | files, err := ioutil.ReadDir(sourceDir) 69 | if err != nil { 70 | panic(fmt.Sprintf("Could not read source dir %v: %v", sourceDir, err.Error())) 71 | } 72 | for _, file := range files { 73 | if !file.IsDir() { 74 | fullPath := filepath.Join(sourceDir, file.Name()) 75 | contents, err := ioutil.ReadFile(fullPath) 76 | if err != nil { 77 | panic(fmt.Sprintf("Could not read file %v: %v", fullPath, err.Error())) 78 | } 79 | hasher := sha256.New() 80 | hasher.Write([]byte(contents)) 81 | name := strings.Replace(file.Name(), dl.config.BaseLocation, "", 1) 82 | m := types.Migration{Name: name, SourceDir: sourceDir, File: filepath.Join(sourceDir, file.Name()), MigrationType: migrationType, Contents: string(contents), CheckSum: hex.EncodeToString(hasher.Sum(nil))} 83 | 84 | e, ok := migrations[m.Name] 85 | if ok { 86 | e = append(e, m) 87 | } else { 88 | e = []types.Migration{m} 89 | } 90 | migrations[m.Name] = e 91 | } 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /loader/disk_loader_test.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/lukaszbudnik/migrator/config" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestDiskReadDiskMigrationsNonExistingBaseLocationError(t *testing.T) { 12 | var config config.Config 13 | config.BaseLocation = "xyzabc" 14 | config.SingleMigrations = []string{"migrations/config"} 15 | 16 | loader := New(context.TODO(), &config) 17 | 18 | didPanic := false 19 | var message interface{} 20 | func() { 21 | 22 | defer func() { 23 | if message = recover(); message != nil { 24 | didPanic = true 25 | } 26 | }() 27 | 28 | loader.GetSourceMigrations() 29 | 30 | }() 31 | assert.True(t, didPanic) 32 | assert.Contains(t, message, "xyzabc/migrations/config: no such file or directory") 33 | } 34 | 35 | func TestDiskReadDiskMigrationsNonExistingMigrationsDirError(t *testing.T) { 36 | var config config.Config 37 | config.BaseLocation = "../test" 38 | config.SingleMigrations = []string{"migrations/abcdef"} 39 | 40 | loader := New(context.TODO(), &config) 41 | 42 | didPanic := false 43 | var message interface{} 44 | func() { 45 | 46 | defer func() { 47 | if message = recover(); message != nil { 48 | didPanic = true 49 | } 50 | }() 51 | 52 | loader.GetSourceMigrations() 53 | 54 | }() 55 | assert.True(t, didPanic) 56 | assert.Contains(t, message, "test/migrations/abcdef: no such file or directory") 57 | } 58 | 59 | func TestDiskGetDiskMigrations(t *testing.T) { 60 | var config config.Config 61 | config.BaseLocation = "../test" 62 | config.SingleMigrations = []string{"migrations/config", "migrations/ref"} 63 | config.TenantMigrations = []string{"migrations/tenants"} 64 | config.SingleScripts = []string{"migrations/config-scripts"} 65 | config.TenantScripts = []string{"migrations/tenants-scripts"} 66 | 67 | loader := New(context.TODO(), &config) 68 | migrations := loader.GetSourceMigrations() 69 | 70 | assert.Len(t, migrations, 12) 71 | 72 | assert.Contains(t, migrations[0].File, "test/migrations/config/201602160001.sql") 73 | assert.Contains(t, migrations[1].File, "test/migrations/config/201602160002.sql") 74 | assert.Contains(t, migrations[2].File, "test/migrations/tenants/201602160002.sql") 75 | assert.Contains(t, migrations[3].File, "test/migrations/ref/201602160003.sql") 76 | assert.Contains(t, migrations[4].File, "test/migrations/tenants/201602160003.sql") 77 | assert.Contains(t, migrations[5].File, "test/migrations/ref/201602160004.sql") 78 | assert.Contains(t, migrations[6].File, "test/migrations/tenants/201602160004.sql") 79 | assert.Contains(t, migrations[7].File, "test/migrations/tenants/201602160005.sql") 80 | // SingleScripts are second to last 81 | assert.Contains(t, migrations[8].File, "test/migrations/config-scripts/200012181227.sql") 82 | // TenantScripts are last 83 | assert.Contains(t, migrations[9].File, "test/migrations/tenants-scripts/200001181228.sql") 84 | assert.Contains(t, migrations[10].File, "test/migrations/tenants-scripts/a.sql") 85 | assert.Contains(t, migrations[11].File, "test/migrations/tenants-scripts/b.sql") 86 | } 87 | 88 | func TestDiskHealthCheck(t *testing.T) { 89 | config := &config.Config{ 90 | BaseLocation: "/path/to/baseDir", 91 | } 92 | loader := New(context.TODO(), config) 93 | err := loader.HealthCheck() 94 | assert.NotNil(t, err) 95 | } 96 | -------------------------------------------------------------------------------- /loader/loader.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "context" 5 | "regexp" 6 | "sort" 7 | "strings" 8 | 9 | "github.com/lukaszbudnik/migrator/config" 10 | "github.com/lukaszbudnik/migrator/types" 11 | ) 12 | 13 | // Loader interface abstracts all loading operations performed by migrator 14 | type Loader interface { 15 | GetSourceMigrations() []types.Migration 16 | HealthCheck() error 17 | } 18 | 19 | // Factory is a factory method for creating Loader instance 20 | type Factory func(context.Context, *config.Config) Loader 21 | 22 | // New returns new instance of Loader 23 | func New(ctx context.Context, config *config.Config) Loader { 24 | if strings.HasPrefix(config.BaseLocation, "s3://") { 25 | return &s3Loader{baseLoader{ctx, config}} 26 | } 27 | if matched, _ := regexp.Match(`^https://.*\.blob\.core\.windows\.net/.*`, []byte(config.BaseLocation)); matched { 28 | return &azureBlobLoader{baseLoader{ctx, config}} 29 | } 30 | return &diskLoader{baseLoader{ctx, config}} 31 | } 32 | 33 | // baseLoader is the base struct for implementing Loader interface 34 | type baseLoader struct { 35 | ctx context.Context 36 | config *config.Config 37 | } 38 | 39 | func (bl *baseLoader) sortMigrations(migrationsMap map[string][]types.Migration, migrations *[]types.Migration) { 40 | keys := make([]string, 0, len(migrationsMap)) 41 | for key := range migrationsMap { 42 | keys = append(keys, key) 43 | } 44 | sort.Strings(keys) 45 | 46 | for _, key := range keys { 47 | ms := migrationsMap[key] 48 | *migrations = append(*migrations, ms...) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /loader/loader_test.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/lukaszbudnik/migrator/config" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestNewDiskLoader(t *testing.T) { 12 | config := &config.Config{ 13 | BaseLocation: "/path/to/baseDir", 14 | } 15 | loader := New(context.TODO(), config) 16 | assert.IsType(t, &diskLoader{}, loader) 17 | } 18 | 19 | func TestNewAzureBlobLoader(t *testing.T) { 20 | config := &config.Config{ 21 | BaseLocation: "https://lukaszbudniktest.blob.core.windows.net/mycontainer", 22 | } 23 | loader := New(context.TODO(), config) 24 | assert.IsType(t, &azureBlobLoader{}, loader) 25 | } 26 | 27 | func TestNewS3Loader(t *testing.T) { 28 | config := &config.Config{ 29 | BaseLocation: "s3://lukaszbudniktest-bucket", 30 | } 31 | loader := New(context.TODO(), config) 32 | assert.IsType(t, &s3Loader{}, loader) 33 | } 34 | -------------------------------------------------------------------------------- /loader/s3_loader.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "fmt" 8 | "strings" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | "github.com/aws/aws-sdk-go/service/s3/s3iface" 14 | "github.com/lukaszbudnik/migrator/types" 15 | ) 16 | 17 | // s3Loader is struct used for implementing Loader interface for loading migrations from AWS S3 18 | type s3Loader struct { 19 | baseLoader 20 | } 21 | 22 | func (s3l *s3Loader) newClient() *s3.S3 { 23 | sess, err := session.NewSession() 24 | if err != nil { 25 | panic(err.Error()) 26 | } 27 | return s3.New(sess) 28 | } 29 | 30 | // GetSourceMigrations returns all migrations from AWS S3 location 31 | func (s3l *s3Loader) GetSourceMigrations() []types.Migration { 32 | client := s3l.newClient() 33 | return s3l.doGetSourceMigrations(client) 34 | } 35 | 36 | func (s3l *s3Loader) HealthCheck() error { 37 | client := s3l.newClient() 38 | return s3l.doHealthCheck(client) 39 | } 40 | 41 | func (s3l *s3Loader) doHealthCheck(client s3iface.S3API) error { 42 | bucketWithPrefixes := strings.Split(strings.Replace(strings.TrimRight(s3l.config.BaseLocation, "/"), "s3://", "", 1), "/") 43 | 44 | bucket := bucketWithPrefixes[0] 45 | prefix := "/" 46 | if len(bucketWithPrefixes) > 1 { 47 | prefix = strings.Join(bucketWithPrefixes[1:], "/") 48 | } 49 | 50 | input := &s3.ListObjectsV2Input{ 51 | Bucket: aws.String(bucket), 52 | Prefix: aws.String(prefix), 53 | MaxKeys: aws.Int64(1), 54 | } 55 | 56 | _, err := client.ListObjectsV2(input) 57 | 58 | return err 59 | } 60 | 61 | func (s3l *s3Loader) doGetSourceMigrations(client s3iface.S3API) []types.Migration { 62 | migrations := []types.Migration{} 63 | 64 | bucketWithPrefixes := strings.Split(strings.Replace(strings.TrimRight(s3l.config.BaseLocation, "/"), "s3://", "", 1), "/") 65 | 66 | bucket := bucketWithPrefixes[0] 67 | optionalPrefixes := "" 68 | if len(bucketWithPrefixes) > 1 { 69 | optionalPrefixes = strings.Join(bucketWithPrefixes[1:], "/") 70 | } 71 | 72 | singleMigrationsObjects := s3l.getObjectList(client, bucket, optionalPrefixes, s3l.config.SingleMigrations) 73 | tenantMigrationsObjects := s3l.getObjectList(client, bucket, optionalPrefixes, s3l.config.TenantMigrations) 74 | singleScriptsObjects := s3l.getObjectList(client, bucket, optionalPrefixes, s3l.config.SingleScripts) 75 | tenantScriptsObjects := s3l.getObjectList(client, bucket, optionalPrefixes, s3l.config.TenantScripts) 76 | 77 | migrationsMap := make(map[string][]types.Migration) 78 | s3l.getObjects(client, bucket, migrationsMap, singleMigrationsObjects, types.MigrationTypeSingleMigration) 79 | s3l.getObjects(client, bucket, migrationsMap, tenantMigrationsObjects, types.MigrationTypeTenantMigration) 80 | s3l.sortMigrations(migrationsMap, &migrations) 81 | 82 | migrationsMap = make(map[string][]types.Migration) 83 | s3l.getObjects(client, bucket, migrationsMap, singleScriptsObjects, types.MigrationTypeSingleScript) 84 | s3l.sortMigrations(migrationsMap, &migrations) 85 | 86 | migrationsMap = make(map[string][]types.Migration) 87 | s3l.getObjects(client, bucket, migrationsMap, tenantScriptsObjects, types.MigrationTypeTenantScript) 88 | s3l.sortMigrations(migrationsMap, &migrations) 89 | 90 | return migrations 91 | } 92 | 93 | func (s3l *s3Loader) getObjectList(client s3iface.S3API, bucket, optionalPrefixes string, prefixes []string) []*string { 94 | objects := []*string{} 95 | 96 | for _, prefix := range prefixes { 97 | 98 | var fullPrefix string 99 | if optionalPrefixes != "" { 100 | fullPrefix = optionalPrefixes + "/" + prefix 101 | } else { 102 | fullPrefix = prefix 103 | } 104 | 105 | input := &s3.ListObjectsV2Input{ 106 | Bucket: aws.String(bucket), 107 | Prefix: aws.String(fullPrefix), 108 | MaxKeys: aws.Int64(1000), 109 | } 110 | 111 | err := client.ListObjectsV2Pages(input, 112 | func(page *s3.ListObjectsV2Output, lastPage bool) bool { 113 | 114 | for _, o := range page.Contents { 115 | objects = append(objects, o.Key) 116 | } 117 | 118 | return !lastPage 119 | }) 120 | 121 | if err != nil { 122 | panic(err.Error()) 123 | } 124 | } 125 | 126 | return objects 127 | } 128 | 129 | func (s3l *s3Loader) getObjects(client s3iface.S3API, bucket string, migrationsMap map[string][]types.Migration, objects []*string, migrationType types.MigrationType) { 130 | objectInput := &s3.GetObjectInput{Bucket: aws.String(bucket)} 131 | for _, o := range objects { 132 | objectInput.Key = o 133 | objectOutput, err := client.GetObject(objectInput) 134 | if err != nil { 135 | panic(err.Error()) 136 | } 137 | buf := new(bytes.Buffer) 138 | buf.ReadFrom(objectOutput.Body) 139 | contents := buf.String() 140 | 141 | hasher := sha256.New() 142 | hasher.Write([]byte(contents)) 143 | file := fmt.Sprintf("%s/%s", s3l.config.BaseLocation, *o) 144 | from := strings.LastIndex(file, "/") 145 | sourceDir := file[0:from] 146 | name := file[from+1:] 147 | m := types.Migration{Name: name, SourceDir: sourceDir, File: file, MigrationType: migrationType, Contents: string(contents), CheckSum: hex.EncodeToString(hasher.Sum(nil))} 148 | 149 | e, ok := migrationsMap[m.Name] 150 | if ok { 151 | e = append(e, m) 152 | } else { 153 | e = []types.Migration{m} 154 | } 155 | migrationsMap[m.Name] = e 156 | 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /loader/s3_loader_test.go: -------------------------------------------------------------------------------- 1 | package loader 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io/ioutil" 8 | "testing" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/service/s3" 12 | "github.com/aws/aws-sdk-go/service/s3/s3iface" 13 | "github.com/lukaszbudnik/migrator/config" 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | type mockS3Client struct { 18 | s3iface.S3API 19 | } 20 | 21 | func (m *mockS3Client) ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { 22 | return &s3.ListObjectsV2Output{}, nil 23 | } 24 | 25 | func (m *mockS3Client) ListObjectsV2Pages(input *s3.ListObjectsV2Input, callback func(*s3.ListObjectsV2Output, bool) bool) error { 26 | 27 | var contents []*s3.Object 28 | 29 | switch *input.Prefix { 30 | case "migrations/config": 31 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160001.sql"))} 32 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160002.sql"))} 33 | contents = []*s3.Object{file1, file2} 34 | case "migrations/ref": 35 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100003.sql"))} 36 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100005.sql"))} 37 | contents = []*s3.Object{file1, file2} 38 | case "migrations/tenants": 39 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160002.sql"))} 40 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100004.sql"))} 41 | file3 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100007.sql"))} 42 | contents = []*s3.Object{file1, file2, file3} 43 | case "migrations/config-scripts": 44 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "recreate-triggers.sql"))} 45 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "cleanup.sql"))} 46 | contents = []*s3.Object{file1, file2} 47 | case "migrations/tenants-scripts": 48 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "recreate-triggers.sql"))} 49 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "cleanup.sql"))} 50 | file3 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "run-reports.sql"))} 51 | contents = []*s3.Object{file1, file2, file3} 52 | case "application-x/prod/migrations/config": 53 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160001.sql"))} 54 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160002.sql"))} 55 | contents = []*s3.Object{file1, file2} 56 | case "application-x/prod/migrations/ref": 57 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100003.sql"))} 58 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100005.sql"))} 59 | contents = []*s3.Object{file1, file2} 60 | case "application-x/prod/migrations/tenants": 61 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "201602160002.sql"))} 62 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100004.sql"))} 63 | file3 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "202001100007.sql"))} 64 | contents = []*s3.Object{file1, file2, file3} 65 | case "application-x/prod/migrations/config-scripts": 66 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "recreate-triggers.sql"))} 67 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "cleanup.sql"))} 68 | contents = []*s3.Object{file1, file2} 69 | case "application-x/prod/migrations/tenants-scripts": 70 | file1 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "recreate-triggers.sql"))} 71 | file2 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "cleanup.sql"))} 72 | file3 := &s3.Object{Key: aws.String(fmt.Sprintf("%v/%v", *input.Prefix, "run-reports.sql"))} 73 | contents = []*s3.Object{file1, file2, file3} 74 | } 75 | 76 | callback(&s3.ListObjectsV2Output{ 77 | Contents: contents, 78 | KeyCount: aws.Int64(int64(len(contents))), 79 | }, true) 80 | return nil 81 | } 82 | 83 | func (m *mockS3Client) GetObject(input *s3.GetObjectInput) (output *s3.GetObjectOutput, err error) { 84 | return &s3.GetObjectOutput{Body: ioutil.NopCloser(bytes.NewReader([]byte(*input.Key)))}, nil 85 | } 86 | 87 | func TestS3GetSourceMigrations(t *testing.T) { 88 | mock := &mockS3Client{} 89 | 90 | config := &config.Config{ 91 | BaseLocation: "s3://your-bucket-migrator", 92 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 93 | TenantMigrations: []string{"migrations/tenants"}, 94 | SingleScripts: []string{"migrations/config-scripts"}, 95 | TenantScripts: []string{"migrations/tenants-scripts"}, 96 | } 97 | 98 | loader := &s3Loader{baseLoader{context.TODO(), config}} 99 | migrations := loader.doGetSourceMigrations(mock) 100 | 101 | assert.Len(t, migrations, 12) 102 | 103 | assert.Contains(t, migrations[0].File, "migrations/config/201602160001.sql") 104 | assert.Contains(t, migrations[1].File, "migrations/config/201602160002.sql") 105 | assert.Contains(t, migrations[2].File, "migrations/tenants/201602160002.sql") 106 | assert.Contains(t, migrations[3].File, "migrations/ref/202001100003.sql") 107 | assert.Contains(t, migrations[4].File, "migrations/tenants/202001100004.sql") 108 | assert.Contains(t, migrations[5].File, "migrations/ref/202001100005.sql") 109 | assert.Contains(t, migrations[6].File, "migrations/tenants/202001100007.sql") 110 | assert.Contains(t, migrations[7].File, "migrations/config-scripts/cleanup.sql") 111 | assert.Contains(t, migrations[8].File, "migrations/config-scripts/recreate-triggers.sql") 112 | assert.Contains(t, migrations[9].File, "migrations/tenants-scripts/cleanup.sql") 113 | assert.Contains(t, migrations[10].File, "migrations/tenants-scripts/recreate-triggers.sql") 114 | assert.Contains(t, migrations[11].File, "migrations/tenants-scripts/run-reports.sql") 115 | 116 | } 117 | 118 | func TestS3GetSourceMigrationsBucketWithPrefix(t *testing.T) { 119 | mock := &mockS3Client{} 120 | 121 | config := &config.Config{ 122 | BaseLocation: "s3://your-bucket-migrator/application-x/prod/", 123 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 124 | TenantMigrations: []string{"migrations/tenants"}, 125 | SingleScripts: []string{"migrations/config-scripts"}, 126 | TenantScripts: []string{"migrations/tenants-scripts"}, 127 | } 128 | 129 | loader := &s3Loader{baseLoader{context.TODO(), config}} 130 | migrations := loader.doGetSourceMigrations(mock) 131 | 132 | assert.Len(t, migrations, 12) 133 | 134 | assert.Contains(t, migrations[0].File, "application-x/prod/migrations/config/201602160001.sql") 135 | assert.Contains(t, migrations[1].File, "application-x/prod/migrations/config/201602160002.sql") 136 | assert.Contains(t, migrations[2].File, "application-x/prod/migrations/tenants/201602160002.sql") 137 | assert.Contains(t, migrations[3].File, "application-x/prod/migrations/ref/202001100003.sql") 138 | assert.Contains(t, migrations[4].File, "application-x/prod/migrations/tenants/202001100004.sql") 139 | assert.Contains(t, migrations[5].File, "application-x/prod/migrations/ref/202001100005.sql") 140 | assert.Contains(t, migrations[6].File, "application-x/prod/migrations/tenants/202001100007.sql") 141 | assert.Contains(t, migrations[7].File, "application-x/prod/migrations/config-scripts/cleanup.sql") 142 | assert.Contains(t, migrations[8].File, "application-x/prod/migrations/config-scripts/recreate-triggers.sql") 143 | assert.Contains(t, migrations[9].File, "application-x/prod/migrations/tenants-scripts/cleanup.sql") 144 | assert.Contains(t, migrations[10].File, "application-x/prod/migrations/tenants-scripts/recreate-triggers.sql") 145 | assert.Contains(t, migrations[11].File, "application-x/prod/migrations/tenants-scripts/run-reports.sql") 146 | } 147 | 148 | func TestS3HealthCheck(t *testing.T) { 149 | mock := &mockS3Client{} 150 | 151 | config := &config.Config{ 152 | BaseLocation: "s3://your-bucket-migrator", 153 | SingleMigrations: []string{"migrations/config", "migrations/ref"}, 154 | TenantMigrations: []string{"migrations/tenants"}, 155 | SingleScripts: []string{"migrations/config-scripts"}, 156 | TenantScripts: []string{"migrations/tenants-scripts"}, 157 | } 158 | 159 | loader := &s3Loader{baseLoader{context.TODO(), config}} 160 | err := loader.doHealthCheck(mock) 161 | 162 | assert.Nil(t, err) 163 | } 164 | -------------------------------------------------------------------------------- /metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/Depado/ginprom" 5 | ) 6 | 7 | // Metrics interface abstracts all metrics operations performed by migrator 8 | type Metrics interface { 9 | SetGaugeValue(name string, labelValues []string, value float64) error 10 | AddGaugeValue(name string, labelValues []string, value float64) error 11 | IncrementGaugeValue(name string, labelValues []string) error 12 | } 13 | 14 | // New returns new instance of Metrics, currently Prometheus is available 15 | func New(prometheus *ginprom.Prometheus) Metrics { 16 | return &prometheusMetrics{prometheus} 17 | } 18 | 19 | // prometheusMetrics is struct for implementing Prometheus metrics 20 | type prometheusMetrics struct { 21 | prometheus *ginprom.Prometheus 22 | } 23 | 24 | // SetGaugeValue sets guage to a value 25 | func (m *prometheusMetrics) SetGaugeValue(name string, labelValues []string, value float64) error { 26 | return m.prometheus.SetGaugeValue(name, labelValues, value) 27 | } 28 | 29 | // AddGaugeValue adds value to guage 30 | func (m *prometheusMetrics) AddGaugeValue(name string, labelValues []string, value float64) error { 31 | return m.prometheus.AddGaugeValue(name, labelValues, value) 32 | } 33 | 34 | // IncrementGaugeValue increments guage 35 | func (m *prometheusMetrics) IncrementGaugeValue(name string, labelValues []string) error { 36 | return m.prometheus.IncrementGaugeValue(name, labelValues) 37 | } 38 | -------------------------------------------------------------------------------- /metrics/metrics_test.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "testing" 7 | 8 | "github.com/Depado/ginprom" 9 | "github.com/gin-gonic/gin" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func newMetrics(r *gin.Engine) Metrics { 14 | p := ginprom.New( 15 | ginprom.Engine(r), 16 | ginprom.Namespace("migrator"), 17 | ginprom.Subsystem("gin"), 18 | ginprom.Path("/metrics"), 19 | ) 20 | 21 | p.AddCustomGauge("gauge", "Test guage", []string{"type"}) 22 | 23 | r.Use(p.Instrument()) 24 | 25 | metrics := New(p) 26 | 27 | return metrics 28 | } 29 | 30 | func TestMetrics(t *testing.T) { 31 | r := gin.New() 32 | 33 | metrics := newMetrics(r) 34 | metrics.SetGaugeValue("gauge", []string{"first"}, 1) 35 | metrics.SetGaugeValue("gauge", []string{"second"}, 1) 36 | metrics.AddGaugeValue("gauge", []string{"first"}, 1) 37 | metrics.IncrementGaugeValue("gauge", []string{"second"}) 38 | 39 | w := httptest.NewRecorder() 40 | req, _ := http.NewRequest("GET", "/metrics", nil) 41 | r.ServeHTTP(w, req) 42 | 43 | assert.Equal(t, http.StatusOK, w.Code) 44 | assert.Equal(t, "text/plain; version=0.0.4; charset=utf-8", w.Result().Header.Get("Content-Type")) 45 | assert.Contains(t, w.Body.String(), `migrator_gin_gauge{type="first"} 2`) 46 | assert.Contains(t, w.Body.String(), `migrator_gin_gauge{type="second"} 2`) 47 | } 48 | -------------------------------------------------------------------------------- /migrator.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "flag" 7 | "os" 8 | 9 | "github.com/gin-gonic/gin" 10 | "github.com/lukaszbudnik/migrator/common" 11 | "github.com/lukaszbudnik/migrator/config" 12 | "github.com/lukaszbudnik/migrator/coordinator" 13 | "github.com/lukaszbudnik/migrator/db" 14 | "github.com/lukaszbudnik/migrator/loader" 15 | "github.com/lukaszbudnik/migrator/metrics" 16 | "github.com/lukaszbudnik/migrator/notifications" 17 | "github.com/lukaszbudnik/migrator/server" 18 | "github.com/lukaszbudnik/migrator/types" 19 | ) 20 | 21 | const ( 22 | // DefaultConfigFile defines default file name of migrator configuration file 23 | DefaultConfigFile = "migrator.yaml" 24 | ) 25 | 26 | // GitRef stores git branch/tag, value injected during production build 27 | var GitRef string 28 | 29 | // GitSha stores git commit sha, value injected during production build 30 | var GitSha string 31 | 32 | func main() { 33 | versionInfo := &types.VersionInfo{Release: GitRef, Sha: GitSha, APIVersions: []types.APIVersion{types.APIV2}} 34 | 35 | common.Log("INFO", "migrator %+v", versionInfo) 36 | 37 | flag := flag.NewFlagSet(os.Args[0], flag.ContinueOnError) 38 | buf := new(bytes.Buffer) 39 | flag.SetOutput(buf) 40 | 41 | var configFile string 42 | flag.StringVar(&configFile, "configFile", DefaultConfigFile, "path to migrator configuration yaml file") 43 | 44 | if err := flag.Parse(os.Args[1:]); err != nil { 45 | common.Log("ERROR", buf.String()) 46 | os.Exit(1) 47 | } 48 | 49 | cfg, err := config.FromFile(configFile) 50 | if err != nil { 51 | common.Log("ERROR", "Error reading config file: %v", err) 52 | os.Exit(1) 53 | } 54 | 55 | var createCoordinator = func(ctx context.Context, config *config.Config, metrics metrics.Metrics) coordinator.Coordinator { 56 | coordinator := coordinator.New(ctx, config, metrics, db.New, loader.New, notifications.New) 57 | return coordinator 58 | } 59 | 60 | gin.SetMode(gin.ReleaseMode) 61 | g := server.CreateRouterAndPrometheus(versionInfo, cfg, createCoordinator) 62 | if err := g.Run(":" + server.GetPort(cfg)); err != nil { 63 | common.Log("ERROR", "Error starting migrator: %v", err) 64 | } 65 | 66 | } 67 | -------------------------------------------------------------------------------- /notifications/notifications.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net/http" 10 | "regexp" 11 | "strings" 12 | 13 | gojsonq "github.com/thedevsaddam/gojsonq/v2" 14 | 15 | "github.com/lukaszbudnik/migrator/config" 16 | "github.com/lukaszbudnik/migrator/types" 17 | ) 18 | 19 | const ( 20 | defaultContentType = "application/json" 21 | ) 22 | 23 | // Notifier interface abstracts all notifications performed by migrator 24 | type Notifier interface { 25 | Notify(*types.Summary) (string, error) 26 | } 27 | 28 | // baseNotifier type is a base struct embedded by all implementations of Notifier interface 29 | type baseNotifier struct { 30 | config *config.Config 31 | } 32 | 33 | func (bn *baseNotifier) Notify(summary *types.Summary) (string, error) { 34 | summaryJSON, err := json.MarshalIndent(summary, "", " ") 35 | if err != nil { 36 | return "", err 37 | } 38 | 39 | payload := string(summaryJSON) 40 | 41 | if template := bn.config.WebHookTemplate; len(template) > 0 { 42 | // ${summary} will be replaced with the JSON object 43 | template = strings.Replace(template, "${summary}", strings.ReplaceAll(payload, "\"", "\\\""), -1) 44 | 45 | // migrator also supports parsing individual fields using ${summary.field} syntax 46 | if strings.Contains(template, "${summary.") { 47 | r, _ := regexp.Compile(`\${summary.([a-zA-Z]+)}`) 48 | matches := r.FindAllStringSubmatch(template, -1) 49 | for _, m := range matches { 50 | value := gojsonq.New().FromString(payload).Find(m[1]) 51 | valueString := fmt.Sprintf("%v", value) 52 | template = strings.Replace(template, m[0], valueString, -1) 53 | } 54 | } 55 | payload = template 56 | } 57 | 58 | reader := bytes.NewReader([]byte(payload)) 59 | url := bn.config.WebHookURL 60 | 61 | req, err := http.NewRequest(http.MethodPost, url, reader) 62 | if err != nil { 63 | return "", err 64 | } 65 | for _, header := range bn.config.WebHookHeaders { 66 | pair := strings.SplitN(header, ":", 2) 67 | req.Header.Set(pair[0], pair[1]) 68 | } 69 | 70 | // set default content type 71 | if req.Header.Get("Content-Type") == "" { 72 | req.Header.Set("Content-Type", defaultContentType) 73 | } 74 | 75 | client := &http.Client{} 76 | resp, err := client.Do(req) 77 | if err != nil { 78 | return "", err 79 | } 80 | defer resp.Body.Close() 81 | 82 | b, err := ioutil.ReadAll(resp.Body) 83 | if err != nil { 84 | return "", err 85 | } 86 | 87 | return string(b), nil 88 | } 89 | 90 | type noopNotifier struct { 91 | baseNotifier 92 | } 93 | 94 | func (sn *noopNotifier) Notify(summary *types.Summary) (string, error) { 95 | return "noop", nil 96 | } 97 | 98 | // Factory is a factory method for creating Loader instance 99 | type Factory func(context.Context, *config.Config) Notifier 100 | 101 | // New creates Notifier object based on config passed 102 | func New(ctx context.Context, config *config.Config) Notifier { 103 | // webhook URL is required 104 | if len(config.WebHookURL) > 0 { 105 | return &baseNotifier{config} 106 | } 107 | // otherwise return noop 108 | return &noopNotifier{baseNotifier{config}} 109 | } 110 | -------------------------------------------------------------------------------- /notifications/notifications_test.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "net/http/httptest" 9 | "testing" 10 | "time" 11 | 12 | "github.com/graph-gophers/graphql-go" 13 | 14 | "github.com/lukaszbudnik/migrator/config" 15 | "github.com/lukaszbudnik/migrator/types" 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func TestNoopNotifier(t *testing.T) { 20 | config := config.Config{} 21 | notifier := New(context.TODO(), &config) 22 | result, err := notifier.Notify(&types.Summary{}) 23 | 24 | assert.Equal(t, "noop", result) 25 | assert.Nil(t, err) 26 | } 27 | 28 | func TestWebHookNotifier(t *testing.T) { 29 | 30 | var contentType string 31 | var requestBody string 32 | 33 | server := httptest.NewServer(func() http.HandlerFunc { 34 | return func(w http.ResponseWriter, r *http.Request) { 35 | w.Header().Set("Content-Type", "application/json") 36 | request, _ := ioutil.ReadAll(r.Body) 37 | requestBody = string(request) 38 | contentType = r.Header.Get("Content-Type") 39 | w.Write([]byte("ok")) 40 | } 41 | }()) 42 | 43 | config := config.Config{} 44 | config.WebHookURL = server.URL 45 | config.WebHookTemplate = `{"text": "New version created: ${summary.versionId} started at: ${summary.startedAt} and took ${summary.duration}. Migrations/scripts total: ${summary.migrationsGrandTotal}/${summary.scriptsGrandTotal}. Full results are: ${summary}"}` 46 | 47 | notifier := New(context.TODO(), &config) 48 | 49 | summary := &types.Summary{ 50 | VersionID: 213, 51 | StartedAt: graphql.Time{Time: time.Now()}, 52 | Tenants: 123, 53 | Duration: 3213, 54 | MigrationsGrandTotal: 1024, 55 | ScriptsGrandTotal: 74, 56 | } 57 | result, err := notifier.Notify(summary) 58 | 59 | assert.Nil(t, err) 60 | assert.Equal(t, "ok", result) 61 | assert.Equal(t, "application/json", string(contentType)) 62 | // make sure placeholders were replaced 63 | assert.NotContains(t, requestBody, "${summary") 64 | // explicit placeholders ${summary.property} 65 | assert.Contains(t, requestBody, fmt.Sprint(summary.VersionID)) 66 | // graphql.Time fields needs to be marshalled first 67 | startedAt, _ := summary.StartedAt.MarshalText() 68 | assert.Contains(t, requestBody, string(startedAt)) 69 | assert.Contains(t, requestBody, fmt.Sprint(summary.Duration)) 70 | assert.Contains(t, requestBody, fmt.Sprint(summary.MigrationsGrandTotal)) 71 | assert.Contains(t, requestBody, fmt.Sprint(summary.ScriptsGrandTotal)) 72 | // mapped as ${summary} 73 | assert.Contains(t, requestBody, fmt.Sprint(summary.Tenants)) 74 | } 75 | 76 | func TestWebHookNotifierCustomHeaders(t *testing.T) { 77 | 78 | var xCustomHeader string 79 | var authorizationHeader string 80 | var contentTypeHeader string 81 | 82 | server := httptest.NewServer(func() http.HandlerFunc { 83 | return func(w http.ResponseWriter, r *http.Request) { 84 | w.Header().Set("Content-Type", "application/json") 85 | xCustomHeader = r.Header.Get("X-CustomHeader") 86 | authorizationHeader = r.Header.Get("Authorization") 87 | contentTypeHeader = r.Header.Get("Content-Type") 88 | w.Write([]byte(`{"result": "ok"}`)) 89 | } 90 | }()) 91 | 92 | config := config.Config{} 93 | config.WebHookURL = server.URL 94 | config.WebHookHeaders = []string{"Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l", "Content-Type: application/x-yaml", "X-CustomHeader: value1,value2"} 95 | 96 | notifier := New(context.TODO(), &config) 97 | 98 | result, err := notifier.Notify(&types.Summary{}) 99 | 100 | assert.Nil(t, err) 101 | assert.Equal(t, `{"result": "ok"}`, result) 102 | assert.Equal(t, `Basic QWxhZGRpbjpPcGVuU2VzYW1l`, string(authorizationHeader)) 103 | assert.Equal(t, `application/x-yaml`, string(contentTypeHeader)) 104 | assert.Equal(t, `value1,value2`, string(xCustomHeader)) 105 | } 106 | 107 | func TestWebHookURLError(t *testing.T) { 108 | config := config.Config{} 109 | config.WebHookURL = "://xczxcvv/path" 110 | notifier := New(context.TODO(), &config) 111 | _, err := notifier.Notify(&types.Summary{}) 112 | 113 | assert.NotNil(t, err) 114 | assert.Contains(t, err.Error(), "missing protocol scheme") 115 | } 116 | 117 | func TestWebHookClientError(t *testing.T) { 118 | config := config.Config{} 119 | // passes URL parsing but HTTP client returns error 120 | config.WebHookURL = "non-existing-server" 121 | notifier := New(context.TODO(), &config) 122 | _, err := notifier.Notify(&types.Summary{}) 123 | 124 | assert.NotNil(t, err) 125 | assert.Contains(t, err.Error(), "unsupported protocol scheme") 126 | } 127 | -------------------------------------------------------------------------------- /server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "runtime/debug" 8 | "strings" 9 | "time" 10 | 11 | "github.com/Depado/ginprom" 12 | "github.com/gin-gonic/gin" 13 | "github.com/graph-gophers/graphql-go" 14 | 15 | "github.com/lukaszbudnik/migrator/common" 16 | "github.com/lukaszbudnik/migrator/config" 17 | "github.com/lukaszbudnik/migrator/coordinator" 18 | "github.com/lukaszbudnik/migrator/data" 19 | "github.com/lukaszbudnik/migrator/metrics" 20 | "github.com/lukaszbudnik/migrator/types" 21 | ) 22 | 23 | const ( 24 | defaultPort string = "8080" 25 | requestIDHeader string = "X-Request-ID" 26 | ) 27 | 28 | type errorResponse struct { 29 | Errors []errorMessage `json:"errors"` 30 | } 31 | 32 | type errorMessage struct { 33 | Message string `json:"message"` 34 | } 35 | 36 | // GetPort gets the port from config or defaultPort 37 | func GetPort(config *config.Config) string { 38 | if strings.TrimSpace(config.Port) == "" { 39 | return defaultPort 40 | } 41 | return config.Port 42 | } 43 | 44 | func requestIDHandler() gin.HandlerFunc { 45 | return func(c *gin.Context) { 46 | requestID := c.Request.Header.Get(requestIDHeader) 47 | if requestID == "" { 48 | requestID = fmt.Sprintf("%d", time.Now().UnixNano()) 49 | } 50 | ctx := context.WithValue(c.Request.Context(), common.RequestIDKey{}, requestID) 51 | c.Request = c.Request.WithContext(ctx) 52 | c.Next() 53 | } 54 | } 55 | 56 | func logLevelHandler(config *config.Config) gin.HandlerFunc { 57 | return func(c *gin.Context) { 58 | ctx := context.WithValue(c.Request.Context(), common.LogLevelKey{}, config.LogLevel) 59 | c.Request = c.Request.WithContext(ctx) 60 | c.Next() 61 | } 62 | } 63 | 64 | func recovery() gin.HandlerFunc { 65 | return func(c *gin.Context) { 66 | defer func() { 67 | if err := recover(); err != nil { 68 | common.LogPanic(c.Request.Context(), "Panic recovered: %v", err) 69 | if gin.IsDebugging() { 70 | debug.PrintStack() 71 | } 72 | errorMsg := errorMessage{err.(string)} 73 | c.AbortWithStatusJSON(http.StatusInternalServerError, errorResponse{Errors: []errorMessage{errorMsg}}) 74 | } 75 | }() 76 | c.Next() 77 | } 78 | } 79 | 80 | func requestLoggerHandler() gin.HandlerFunc { 81 | return func(c *gin.Context) { 82 | common.LogInfo(c.Request.Context(), "clientIP=%v method=%v request=%v", c.ClientIP(), c.Request.Method, c.Request.URL.RequestURI()) 83 | c.Next() 84 | } 85 | } 86 | 87 | func makeHandler(config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory, handler func(*gin.Context, *config.Config, metrics.Metrics, coordinator.Factory)) gin.HandlerFunc { 88 | return func(c *gin.Context) { 89 | handler(c, config, metrics, newCoordinator) 90 | } 91 | } 92 | 93 | func configHandler(c *gin.Context, config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory) { 94 | c.YAML(200, config) 95 | } 96 | 97 | func schemaHandler(c *gin.Context, config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory) { 98 | c.String(http.StatusOK, strings.TrimSpace(data.SchemaDefinition)) 99 | } 100 | 101 | func healthHandler(c *gin.Context, config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory) { 102 | coordinator := newCoordinator(c.Request.Context(), config, metrics) 103 | healthStatus := coordinator.HealthCheck() 104 | 105 | status := http.StatusOK 106 | if healthStatus.Status == types.HealthStatusDown { 107 | status = http.StatusServiceUnavailable 108 | } 109 | 110 | c.JSON(status, healthStatus) 111 | } 112 | 113 | // GraphQL endpoint 114 | func serviceHandler(c *gin.Context, config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory) { 115 | var params struct { 116 | Query string `json:"query"` 117 | OperationName string `json:"operationName"` 118 | Variables map[string]interface{} `json:"variables"` 119 | } 120 | if err := c.ShouldBindJSON(¶ms); err != nil { 121 | common.LogError(c.Request.Context(), "Bad request: %v", err.Error()) 122 | errorMsg := errorMessage{"Invalid request, please see documentation for valid JSON payload"} 123 | c.AbortWithStatusJSON(http.StatusBadRequest, errorResponse{Errors: []errorMessage{errorMsg}}) 124 | return 125 | } 126 | 127 | coordinator := newCoordinator(c.Request.Context(), config, metrics) 128 | defer coordinator.Dispose() 129 | opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()} 130 | schema := graphql.MustParseSchema(data.SchemaDefinition, &data.RootResolver{Coordinator: coordinator}, opts...) 131 | 132 | response := schema.Exec(c.Request.Context(), params.Query, params.OperationName, params.Variables) 133 | if response.Errors == nil { 134 | c.JSON(http.StatusOK, response) 135 | } else { 136 | c.JSON(http.StatusInternalServerError, response) 137 | } 138 | 139 | } 140 | 141 | func CreateRouterAndPrometheus(versionInfo *types.VersionInfo, config *config.Config, newCoordinator coordinator.Factory) *gin.Engine { 142 | r := gin.New() 143 | 144 | p := ginprom.New( 145 | ginprom.Engine(r), 146 | ginprom.Namespace("migrator"), 147 | ginprom.Subsystem("gin"), 148 | ginprom.Path("/metrics"), 149 | ) 150 | p.AddCustomGauge("info", "Information about migrator app", []string{"version"}) 151 | p.AddCustomGauge("versions_created", "Number of versions created by migrator", []string{}) 152 | p.AddCustomGauge("tenants_created", "Number of migrations applied by migrator", []string{}) 153 | p.AddCustomGauge("migrations_applied", "Number of migrations applied by migrator", []string{"type"}) 154 | 155 | p.SetGaugeValue("info", []string{versionInfo.Release + " @ " + versionInfo.Sha}, 1) 156 | 157 | r.Use(p.Instrument()) 158 | 159 | metrics := metrics.New(p) 160 | 161 | return SetupRouter(r, versionInfo, config, metrics, newCoordinator) 162 | } 163 | 164 | // SetupRouter setups router 165 | func SetupRouter(r *gin.Engine, versionInfo *types.VersionInfo, config *config.Config, metrics metrics.Metrics, newCoordinator coordinator.Factory) *gin.Engine { 166 | r.HandleMethodNotAllowed = true 167 | r.Use(logLevelHandler(config), recovery(), requestIDHandler(), requestLoggerHandler()) 168 | 169 | if strings.TrimSpace(config.PathPrefix) == "" { 170 | config.PathPrefix = "/" 171 | } 172 | 173 | r.GET(config.PathPrefix+"/", func(c *gin.Context) { 174 | c.JSON(http.StatusOK, versionInfo) 175 | }) 176 | 177 | r.GET(config.PathPrefix+"/health", makeHandler(config, metrics, newCoordinator, healthHandler)) 178 | 179 | v2 := r.Group(config.PathPrefix + "/v2") 180 | v2.GET("/config", makeHandler(config, metrics, newCoordinator, configHandler)) 181 | v2.GET("/schema", makeHandler(config, metrics, newCoordinator, schemaHandler)) 182 | v2.POST("/service", makeHandler(config, metrics, newCoordinator, serviceHandler)) 183 | 184 | return r 185 | } 186 | -------------------------------------------------------------------------------- /server/server_mocks.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/graph-gophers/graphql-go" 10 | 11 | "github.com/lukaszbudnik/migrator/config" 12 | "github.com/lukaszbudnik/migrator/coordinator" 13 | "github.com/lukaszbudnik/migrator/metrics" 14 | "github.com/lukaszbudnik/migrator/types" 15 | ) 16 | 17 | type mockedCoordinator struct { 18 | errorThreshold int 19 | counter int 20 | } 21 | 22 | func newMockedCoordinator(ctx context.Context, config *config.Config, metrics metrics.Metrics) coordinator.Coordinator { 23 | return newMockedErrorCoordinator(-1)(ctx, config, metrics) 24 | } 25 | 26 | func newMockedErrorCoordinator(errorThreshold int) func(context.Context, *config.Config, metrics.Metrics) coordinator.Coordinator { 27 | return func(ctx context.Context, config *config.Config, metrics metrics.Metrics) coordinator.Coordinator { 28 | return &mockedCoordinator{errorThreshold: errorThreshold} 29 | } 30 | } 31 | 32 | func (m *mockedCoordinator) Dispose() { 33 | } 34 | 35 | func (m *mockedCoordinator) CreateTenant(string, types.Action, bool, string) *types.CreateResults { 36 | return &types.CreateResults{Summary: &types.Summary{}, Version: &types.Version{}} 37 | } 38 | 39 | func (m *mockedCoordinator) CreateVersion(string, types.Action, bool) *types.CreateResults { 40 | return &types.CreateResults{Summary: &types.Summary{}, Version: &types.Version{}} 41 | } 42 | 43 | func (m *mockedCoordinator) GetSourceMigrations(_ *coordinator.SourceMigrationFilters) []types.Migration { 44 | if m.errorThreshold == m.counter { 45 | panic(fmt.Sprintf("Mocked Coordinator: threshold %v reached", m.errorThreshold)) 46 | } 47 | if m.errorThreshold != -1 { 48 | m.counter++ 49 | } 50 | m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 51 | m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select def"} 52 | return []types.Migration{m1, m2} 53 | } 54 | 55 | func (m *mockedCoordinator) GetSourceMigrationByFile(file string) (*types.Migration, error) { 56 | if m.errorThreshold == m.counter { 57 | panic(fmt.Sprintf("Mocked Coordinator: threshold %v reached", m.errorThreshold)) 58 | } 59 | if m.errorThreshold != -1 { 60 | m.counter++ 61 | } 62 | i := strings.Index(file, "/") 63 | sourceDir := file[:i] 64 | name := file[i+1:] 65 | m1 := types.Migration{Name: name, SourceDir: sourceDir, File: file, MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} 66 | return &m1, nil 67 | } 68 | 69 | func (m *mockedCoordinator) GetAppliedMigrations() []types.DBMigration { 70 | m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc", CheckSum: "sha256"} 71 | d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) 72 | ms := []types.DBMigration{{Migration: m1, Schema: "source", Created: graphql.Time{Time: d1}}} 73 | return ms 74 | } 75 | 76 | // part of interface but not used in server tests - tested in data package 77 | func (m *mockedCoordinator) GetDBMigrationByID(ID int32) (*types.DBMigration, error) { 78 | return nil, nil 79 | } 80 | 81 | func (m *mockedCoordinator) GetTenants() []types.Tenant { 82 | a := types.Tenant{Name: "a"} 83 | b := types.Tenant{Name: "b"} 84 | c := types.Tenant{Name: "c"} 85 | return []types.Tenant{a, b, c} 86 | } 87 | 88 | // part of interface but not used in server tests - tested in data package 89 | func (m *mockedCoordinator) GetVersions() []types.Version { 90 | return []types.Version{} 91 | } 92 | 93 | // part of interface but not used in server tests - tested in data package 94 | func (m *mockedCoordinator) GetVersionsByFile(file string) []types.Version { 95 | return []types.Version{} 96 | } 97 | 98 | // part of interface but not used in server tests - tested in data package 99 | func (m *mockedCoordinator) GetVersionByID(ID int32) (*types.Version, error) { 100 | return nil, nil 101 | } 102 | 103 | func (m *mockedCoordinator) VerifySourceMigrationsCheckSums() (bool, []types.Migration) { 104 | if m.errorThreshold == m.counter { 105 | m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc", CheckSum: "123"} 106 | return false, []types.Migration{m1} 107 | } 108 | m.counter++ 109 | return true, nil 110 | } 111 | 112 | func (m *mockedCoordinator) HealthCheck() types.HealthResponse { 113 | if m.errorThreshold == m.counter { 114 | panic(fmt.Sprintf("Mocked Coordinator: threshold %v reached", m.errorThreshold)) 115 | } 116 | if m.errorThreshold != -1 { 117 | m.counter++ 118 | } 119 | return types.HealthResponse{Status: types.HealthStatusUp, Checks: []types.HealthChecks{}} 120 | } 121 | 122 | type mockedCoordinatorHealthCheckError struct { 123 | mockedCoordinator 124 | } 125 | 126 | func (m *mockedCoordinatorHealthCheckError) HealthCheck() types.HealthResponse { 127 | return types.HealthResponse{Status: types.HealthStatusDown, Checks: []types.HealthChecks{}} 128 | } 129 | 130 | func newMockedCoordinatorHealthCheckError(ctx context.Context, config *config.Config, metrics metrics.Metrics) coordinator.Coordinator { 131 | return &mockedCoordinatorHealthCheckError{} 132 | } 133 | 134 | func newNoopMetrics() metrics.Metrics { 135 | return &noopMetrics{} 136 | } 137 | 138 | type noopMetrics struct { 139 | } 140 | 141 | func (m *noopMetrics) SetGaugeValue(name string, labelValues []string, value float64) error { 142 | return nil 143 | } 144 | 145 | func (m *noopMetrics) AddGaugeValue(name string, labelValues []string, value float64) error { 146 | return nil 147 | } 148 | 149 | func (m *noopMetrics) IncrementGaugeValue(name string, labelValues []string) error { 150 | return nil 151 | } 152 | -------------------------------------------------------------------------------- /staticcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | which staticcheck &> /dev/null 4 | if [[ $? -ne 0 ]]; then 5 | go install honnef.co/go/tools/cmd/staticcheck@latest 6 | fi 7 | 8 | staticcheck ./... 9 | -------------------------------------------------------------------------------- /test/create-test-tenants-mssql.sql: -------------------------------------------------------------------------------- 1 | create schema [abc] 2 | GO 3 | create schema [def] 4 | GO 5 | create schema [xyz] 6 | GO 7 | create schema [migrator] 8 | GO 9 | 10 | IF NOT EXISTS (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_tenants') 11 | BEGIN 12 | create table [migrator].migrator_tenants ( 13 | id int identity (1,1) primary key, 14 | name varchar(200) not null, 15 | created datetime default CURRENT_TIMESTAMP 16 | ); 17 | 18 | insert into [migrator].migrator_tenants (name) values ('abc'); 19 | insert into [migrator].migrator_tenants (name) values ('def'); 20 | insert into [migrator].migrator_tenants (name) values ('xyz'); 21 | 22 | END 23 | 24 | GO 25 | -------------------------------------------------------------------------------- /test/create-test-tenants.sql: -------------------------------------------------------------------------------- 1 | create schema migrator; 2 | 3 | create table migrator.migrator_tenants ( 4 | id serial primary key, 5 | name varchar(200) not null, 6 | created timestamp default now() 7 | ); 8 | 9 | insert into migrator.migrator_tenants (name) values ('abc'); 10 | insert into migrator.migrator_tenants (name) values ('def'); 11 | insert into migrator.migrator_tenants (name) values ('xyz'); 12 | 13 | create schema abc; 14 | create schema def; 15 | create schema xyz; 16 | -------------------------------------------------------------------------------- /test/docker-compose-it.yaml: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | services: 3 | postgres: 4 | image: postgres 5 | ports: 6 | - "5432:5432" 7 | environment: 8 | - POSTGRES_PASSWORD=supersecret 9 | - POSTGRES_DB=migrator 10 | volumes: 11 | - ./create-test-tenants.sql:/docker-entrypoint-initdb.d/create-test-tenants.sql 12 | mysql: 13 | image: mysql 14 | ports: 15 | - "3306:3306" 16 | environment: 17 | - MYSQL_ROOT_PASSWORD=supersecret 18 | volumes: 19 | - ./create-test-tenants.sql:/docker-entrypoint-initdb.d/create-test-tenants.sql 20 | mssql: 21 | image: mcr.microsoft.com/mssql/server:2017-latest 22 | ports: 23 | - "1433:1433" 24 | environment: 25 | - SA_PASSWORD=Super5ecret 26 | - ACCEPT_EULA=Y 27 | volumes: 28 | - ./create-test-tenants-mssql.sql:/docker-entrypoint-initdb.d/create-test-tenants-mssql.sql 29 | command: 30 | - /bin/bash 31 | - -c 32 | - | 33 | /opt/mssql/bin/sqlservr & 34 | PID=$$! 35 | is_up=-1 36 | while [ $$is_up -ne 0 ] && [ $$is_up -ne 16 ] ; do 37 | /opt/mssql-tools/bin/sqlcmd -l 30 -S localhost -h-1 -V1 -U sa -P $$SA_PASSWORD -Q "CREATE DATABASE migrator" 38 | is_up=$$? 39 | sleep 5 40 | done 41 | if [ $$is_up -eq 0 ]; then 42 | for script in /docker-entrypoint-initdb.d/*.sql 43 | do /opt/mssql-tools/bin/sqlcmd -U sa -P $$SA_PASSWORD -d migrator -l 30 -e -i $$script 44 | done 45 | fi 46 | wait $$PID 47 | migrator-dev: 48 | image: migrator-dev 49 | build: 50 | context: .. 51 | dockerfile: test/migrator-dev/Dockerfile 52 | depends_on: 53 | - mysql 54 | - postgres 55 | - mssql 56 | ports: 57 | - "8282:8080" 58 | environment: 59 | - MIGRATOR_YAML=/data/migrator-docker.yaml 60 | volumes: 61 | - .:/data 62 | links: 63 | - mysql 64 | - postgres 65 | - mssql 66 | -------------------------------------------------------------------------------- /test/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | services: 3 | postgres: 4 | image: postgres 5 | ports: 6 | - "5432:5432" 7 | environment: 8 | - POSTGRES_PASSWORD=supersecret 9 | - POSTGRES_DB=migrator 10 | volumes: 11 | - ./create-test-tenants.sql:/docker-entrypoint-initdb.d/create-test-tenants.sql 12 | # pgadmin: 13 | # image: thajeztah/pgadmin4 14 | # depends_on: 15 | # - postgres 16 | # ports: 17 | # - "5050:5050" 18 | # links: 19 | # - postgres 20 | mysql: 21 | image: mysql 22 | ports: 23 | - "3306:3306" 24 | environment: 25 | - MYSQL_ROOT_PASSWORD=supersecret 26 | volumes: 27 | - ./create-test-tenants.sql:/docker-entrypoint-initdb.d/create-test-tenants.sql 28 | # phpmyadmin: 29 | # image: phpmyadmin/phpmyadmin 30 | # depends_on: 31 | # - mysql 32 | # ports: 33 | # - "8888:80" 34 | # environment: 35 | # - PMA_HOSTS=mysql 36 | # - PMA_USER=root 37 | # - PMA_PASSWORD=supersecret 38 | # links: 39 | # - mysql 40 | mssql: 41 | image: mcr.microsoft.com/mssql/server:2017-latest 42 | ports: 43 | - "1433:1433" 44 | environment: 45 | - SA_PASSWORD=Super5ecret 46 | - ACCEPT_EULA=Y 47 | volumes: 48 | - ./create-test-tenants-mssql.sql:/docker-entrypoint-initdb.d/create-test-tenants-mssql.sql 49 | command: 50 | - /bin/bash 51 | - -c 52 | - | 53 | /opt/mssql/bin/sqlservr & 54 | PID=$$! 55 | is_up=-1 56 | while [ $$is_up -ne 0 ] && [ $$is_up -ne 16 ] ; do 57 | /opt/mssql-tools/bin/sqlcmd -l 30 -S localhost -h-1 -V1 -U sa -P $$SA_PASSWORD -Q "CREATE DATABASE migrator" 58 | is_up=$$? 59 | sleep 5 60 | done 61 | if [ $$is_up -eq 0 ]; then 62 | for script in /docker-entrypoint-initdb.d/*.sql 63 | do /opt/mssql-tools/bin/sqlcmd -U sa -P $$SA_PASSWORD -d migrator -l 30 -e -i $$script 64 | done 65 | fi 66 | wait $$PID 67 | migrator: 68 | image: lukasz/migrator:latest 69 | depends_on: 70 | - mysql 71 | - postgres 72 | - mssql 73 | ports: 74 | - "8181:8080" 75 | environment: 76 | - MIGRATOR_YAML=/data/migrator-docker.yaml 77 | volumes: 78 | - .:/data 79 | links: 80 | - mysql 81 | - postgres 82 | - mssql 83 | migrator-dev: 84 | image: migrator-dev 85 | build: 86 | context: .. 87 | dockerfile: test/migrator-dev/Dockerfile 88 | depends_on: 89 | - mysql 90 | - postgres 91 | - mssql 92 | ports: 93 | - "8282:8080" 94 | environment: 95 | - MIGRATOR_YAML=/data/migrator-docker.yaml 96 | volumes: 97 | - .:/data 98 | links: 99 | - mysql 100 | - postgres 101 | - mssql 102 | -------------------------------------------------------------------------------- /test/empty.yaml: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /test/http-integration-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # stop on error 4 | set -e 5 | 6 | function cleanup { 7 | rm versions.txt || true 8 | rm create_version.txt || true 9 | rm tenants.txt || true 10 | rm create_tenant.txt || true 11 | rm metrics.txt || true 12 | } 13 | 14 | # use migrator service built from local branch 15 | MIGRATOR_PORT=8282 16 | 17 | # used in create* methods 18 | COMMIT_SHA=$(git rev-list -1 HEAD) 19 | 20 | echo "------------------------------------------------------------------------------" 21 | echo "0. migrator info" 22 | 23 | curl -s http://localhost:$MIGRATOR_PORT | jq '.' 24 | 25 | # 1. Fetch migrator versions 26 | 27 | echo "------------------------------------------------------------------------------" 28 | echo "1. About to fetch migrator versions..." 29 | cat < versions.txt 30 | { 31 | "query": " 32 | query Versions { 33 | versions { 34 | id, 35 | name, 36 | created, 37 | } 38 | }", 39 | "operationName": "Versions" 40 | } 41 | EOF 42 | versions=$(curl -s -d @versions.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r ".data.versions") 43 | 44 | versions_count_before=$(echo $versions | jq length) 45 | echo "Number of versions in migrator: $versions_count_before" 46 | 47 | # 2. Fetch tenants 48 | 49 | echo "------------------------------------------------------------------------------" 50 | echo "2. About to fetch tenants..." 51 | cat < tenants.txt 52 | { 53 | "query": " 54 | query Tenants { 55 | tenants { 56 | name 57 | } 58 | }", 59 | "operationName": "Tenants" 60 | } 61 | EOF 62 | tenants=$(curl -s -d @tenants.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r ".data.tenants") 63 | 64 | tenants_count_before=$(echo $tenants | jq length) 65 | echo "Number of tenants in migrator: $tenants_count_before" 66 | 67 | # 3. Create new migrator version 68 | 69 | echo "------------------------------------------------------------------------------" 70 | echo "3. About to create new migrator version..." 71 | VERSION_NAME="create-version-$COMMIT_SHA-$RANDOM" 72 | cat < create_version.txt 73 | { 74 | "query": " 75 | mutation CreateVersion(\$input: VersionInput!) { 76 | createVersion(input: \$input) { 77 | version { 78 | id, 79 | name, 80 | } 81 | summary { 82 | startedAt 83 | duration 84 | tenants 85 | migrationsGrandTotal 86 | scriptsGrandTotal 87 | } 88 | } 89 | }", 90 | "operationName": "CreateVersion", 91 | "variables": { 92 | "input": { 93 | "versionName": "$VERSION_NAME" 94 | } 95 | } 96 | } 97 | EOF 98 | version_create=$(curl -s -d @create_version.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r '.data.createVersion') 99 | version_create_name=$(echo $version_create | jq -r '.version.name') 100 | 101 | if [ "$version_create_name" != "$VERSION_NAME" ]; then 102 | >&2 echo "Version created with name '$version_create_name' but expected to be '$VERSION_NAME'" 103 | cleanup 104 | exit 1 105 | fi 106 | echo "New version successfully created" 107 | echo $version_create | jq '.' 108 | 109 | # 4. Fetch migrator versions - will now contain version created above 110 | 111 | echo "------------------------------------------------------------------------------" 112 | echo "4. About to fetch migrator versions..." 113 | versions=$(curl -s -d @versions.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r ".data.versions") 114 | versions_count=$(echo $versions | jq length) 115 | 116 | if [ "$versions_count" -le "$versions_count_before" ]; then 117 | >&2 echo "Versions count '$versions_count' should be greater than '$versions_count_before'" 118 | cleanup 119 | exit 1 120 | fi 121 | 122 | echo "Number of versions in migrator: $versions_count" 123 | 124 | # 5. Create new tenant 125 | 126 | echo "------------------------------------------------------------------------------" 127 | echo "5. About to create new tenant..." 128 | VERSION_NAME="create-tenant-$COMMIT_SHA-$RANDOM" 129 | TENANT_NAME="newcustomer$RANDOM" 130 | cat < create_tenant.txt 131 | { 132 | "query": " 133 | mutation CreateTenant(\$input: TenantInput!) { 134 | createTenant(input: \$input) { 135 | version { 136 | id, 137 | name, 138 | } 139 | summary { 140 | startedAt 141 | duration 142 | tenants 143 | migrationsGrandTotal 144 | scriptsGrandTotal 145 | } 146 | } 147 | }", 148 | "operationName": "CreateTenant", 149 | "variables": { 150 | "input": { 151 | "versionName": "$VERSION_NAME", 152 | "tenantName": "$TENANT_NAME" 153 | } 154 | } 155 | } 156 | EOF 157 | tenant_create=$(curl -s -d @create_tenant.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r '.data.createTenant') 158 | tenant_create_version_name=$(echo $tenant_create | jq -r '.version.name') 159 | 160 | if [ "$tenant_create_version_name" != "$VERSION_NAME" ]; then 161 | >&2 echo "Version created with name '$tenant_create_version_name' but expected to be '$VERSION_NAME'" 162 | cleanup 163 | exit 1 164 | fi 165 | echo "New tenant successfully created" 166 | echo $tenant_create | jq '.' 167 | 168 | # 6. Fetch tenants - will now contain tenant create above 169 | 170 | echo "------------------------------------------------------------------------------" 171 | echo "6. About to fetch tenants..." 172 | tenants=$(curl -s -d @tenants.txt http://localhost:$MIGRATOR_PORT/v2/service | jq -r ".data.tenants") 173 | tenants_count=$(echo $tenants | jq length) 174 | 175 | if [ "$tenants_count" -le "$tenants_count_before" ]; then 176 | >&2 echo "Tenant count '$tenants_count' should be greater than '$tenants_count_before'" 177 | cleanup 178 | exit 1 179 | fi 180 | 181 | echo "Number of tenants: $tenants_count" 182 | 183 | echo "------------------------------------------------------------------------------" 184 | 185 | echo "7. Checking if Prometheus metrics are exposed" 186 | 187 | curl -s http://localhost:$MIGRATOR_PORT/metrics > metrics.txt 188 | 189 | grep '^migrator_gin_tenants_created' metrics.txt 190 | grep '^migrator_gin_versions_created' metrics.txt 191 | grep '^migrator_gin_migrations_applied{type="single_migrations"}' metrics.txt 192 | grep '^migrator_gin_migrations_applied{type="single_scripts"}' metrics.txt 193 | grep '^migrator_gin_migrations_applied{type="tenant_migrations_total"}' metrics.txt 194 | grep '^migrator_gin_migrations_applied{type="tenant_scripts_total"}' metrics.txt 195 | 196 | echo "------------------------------------------------------------------------------" 197 | 198 | echo "8. Calling health checks" 199 | 200 | curl -s http://localhost:$MIGRATOR_PORT/health | jq '.' 201 | 202 | echo "------------------------------------------------------------------------------" 203 | 204 | echo "All good!" 205 | 206 | cleanup 207 | -------------------------------------------------------------------------------- /test/migrations/config-scripts/200012181227.sql: -------------------------------------------------------------------------------- 1 | select 1; 2 | -------------------------------------------------------------------------------- /test/migrations/config/201602160001.sql: -------------------------------------------------------------------------------- 1 | create schema config; 2 | -------------------------------------------------------------------------------- /test/migrations/config/201602160002.sql: -------------------------------------------------------------------------------- 1 | create table {schema}.config ( 2 | id integer, 3 | k varchar(100), 4 | v varchar(100), 5 | primary key (id) 6 | ); 7 | -------------------------------------------------------------------------------- /test/migrations/ref/201602160003.sql: -------------------------------------------------------------------------------- 1 | create schema {schema}; 2 | -------------------------------------------------------------------------------- /test/migrations/ref/201602160004.sql: -------------------------------------------------------------------------------- 1 | create table {schema}.roles (id integer primary key, name varchar(100)); 2 | -------------------------------------------------------------------------------- /test/migrations/tenants-scripts/200001181228.sql: -------------------------------------------------------------------------------- 1 | select 1; 2 | -------------------------------------------------------------------------------- /test/migrations/tenants-scripts/a.sql: -------------------------------------------------------------------------------- 1 | select 2; 2 | -------------------------------------------------------------------------------- /test/migrations/tenants-scripts/b.sql: -------------------------------------------------------------------------------- 1 | select 3; 2 | -------------------------------------------------------------------------------- /test/migrations/tenants/201602160002.sql: -------------------------------------------------------------------------------- 1 | create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id)); 2 | -------------------------------------------------------------------------------- /test/migrations/tenants/201602160003.sql: -------------------------------------------------------------------------------- 1 | create table {schema}.users (id integer, username varchar(100)); 2 | -------------------------------------------------------------------------------- /test/migrations/tenants/201602160004.sql: -------------------------------------------------------------------------------- 1 | alter table {schema}.users add id_role integer; 2 | -------------------------------------------------------------------------------- /test/migrations/tenants/201602160005.sql: -------------------------------------------------------------------------------- 1 | alter table {schema}.users add foreign key (id_role) references ref.roles(id); 2 | -------------------------------------------------------------------------------- /test/migrator-dev/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.19.3-alpine3.15 as builder 2 | 3 | LABEL org.opencontainers.image.authors="Łukasz Budnik " 4 | 5 | # git is required 6 | RUN apk add git 7 | 8 | RUN mkdir -p /go/migrator 9 | COPY . /go/migrator 10 | 11 | RUN cd /go/migrator && go get -t -d ./... 12 | 13 | RUN cd /go/migrator && \ 14 | GIT_REF=$(git branch --show-current) && \ 15 | GIT_SHA=$(git rev-parse HEAD) && \ 16 | go build -o /bin/migrator -ldflags "-X main.GitSha=$GIT_SHA -X main.GitRef=$GIT_REF" 17 | 18 | VOLUME ["/data"] 19 | 20 | # copy and register entrypoint script 21 | COPY docker-entrypoint.sh / 22 | ENTRYPOINT ["/docker-entrypoint.sh"] 23 | 24 | EXPOSE 8080 25 | -------------------------------------------------------------------------------- /test/migrator-docker.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: /data/migrations 2 | driver: mysql 3 | dataSource: "root:supersecret@tcp(mysql:3306)/migrator?parseTime=true&timeout=1s" 4 | singleMigrations: 5 | - ref 6 | - config 7 | singleScripts: 8 | - config-scripts 9 | tenantMigrations: 10 | - tenants 11 | -------------------------------------------------------------------------------- /test/migrator-mssql.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: test/migrations 2 | driver: sqlserver 3 | dataSource: "sqlserver://SA:Super5ecret@127.0.0.1/?database=migrator&connection+timeout=1&dial+timeout=1" 4 | singleMigrations: 5 | - ref 6 | - config 7 | tenantMigrations: 8 | - tenants 9 | -------------------------------------------------------------------------------- /test/migrator-mysql.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: test/migrations 2 | driver: mysql 3 | dataSource: "root:supersecret@tcp(127.0.0.1:3306)/migrator?parseTime=true&timeout=1s" 4 | singleMigrations: 5 | - ref 6 | - config 7 | tenantMigrations: 8 | - tenants 9 | -------------------------------------------------------------------------------- /test/migrator-overrides.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: test/migrations 2 | driver: postgres 3 | dataSource: "user=postgres dbname=A host=B port=C sslmode=disable" 4 | tenantSelectSQL: select somename from someschema.sometable 5 | tenantInsertSQL: insert into someschema.sometable (somename) values ($1) 6 | schemaPlaceHolder: "[schema]" 7 | singleMigrations: 8 | - public 9 | - ref 10 | - config 11 | tenantMigrations: 12 | - tenants 13 | port: 8811 14 | -------------------------------------------------------------------------------- /test/migrator-postgresql.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: test/migrations 2 | driver: postgres 3 | dataSource: "user=postgres password=supersecret dbname=migrator host=127.0.0.1 port=5432 sslmode=disable connect_timeout=1" 4 | singleMigrations: 5 | - ref 6 | - config 7 | tenantMigrations: 8 | - tenants 9 | -------------------------------------------------------------------------------- /test/migrator-test-envs.yaml: -------------------------------------------------------------------------------- 1 | # migrator configuration 2 | baseLocation: s3://bucket-name/application-x/${TERM}/${COMMIT_SHA} 3 | driver: ${PWD} 4 | dataSource: "lets_assume_password=${HOME}&and_something_else=${USER}¶m=value" 5 | # override only if you have own specific way of determining tenants 6 | tenantSelectSQL: ${PATH} 7 | tenantInsertSQL: ${GOPATH} 8 | schemaPlaceHolder: ${USER} 9 | port: ${_} 10 | singleMigrations: 11 | - public 12 | - ref 13 | - config 14 | tenantMigrations: 15 | - tenants 16 | webHookURL: ${SHLVL} 17 | webHookHeaders: 18 | - "X-Security-Token: ${USER}" 19 | -------------------------------------------------------------------------------- /test/migrator-test.yaml: -------------------------------------------------------------------------------- 1 | # migrator configuration 2 | baseLocation: test/migrations 3 | driver: postgres 4 | dataSource: "user=postgres dbname=migrator_test host=192.168.99.100 port=55432 sslmode=disable" 5 | # override only if you have own specific way of determining tenants 6 | tenantSelectSQL: "select name from migrator.migrator_tenants" 7 | schemaPlaceHolder: "{schema}" 8 | port: 8811 9 | singleMigrations: 10 | - public 11 | - ref 12 | - config 13 | tenantMigrations: 14 | - tenants 15 | webHookURL: https://slack.com/api/api.test 16 | webHookHeaders: 17 | - "Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l" 18 | - "Content-Type: application/json" 19 | - "X-CustomHeader: value1,value2" 20 | -------------------------------------------------------------------------------- /test/performance/create-test-tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # not the most fancy script in the world 4 | 5 | if [[ $# -eq 0 ]]; then 6 | echo "This script expects a number of test tenants to create as its argument" 7 | exit 8 | fi 9 | 10 | i=1 11 | let end=$1+1 12 | 13 | while [[ $i -lt $end ]]; do 14 | if [[ $i%10 -eq 0 ]]; then 15 | echo "creating new tenant $i" 16 | fi 17 | name="tenant_${RANDOM}_${RANDOM}" 18 | psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "create schema $name; insert into migrator.migrator_tenants (name) values ('$name');" 19 | let i+=1 20 | done 21 | -------------------------------------------------------------------------------- /test/performance/flyway.conf: -------------------------------------------------------------------------------- 1 | flyway.driver=org.postgresql.Driver 2 | flyway.url=jdbc:postgresql://127.0.0.1:5432/migrator?ssl=false 3 | flyway.user=postgres 4 | flyway.password=supersecret 5 | flyway.locations=test/performance/migrations/tenants 6 | flyway.schemas=set dynamically by test script 7 | -------------------------------------------------------------------------------- /test/performance/flywaydb-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NO_OF_MIGRATIONS=1000 4 | NO_OF_TENANTS=10 5 | 6 | export PGPASSWORD=supersecret 7 | # remove all existing tenants 8 | psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "delete from migrator.migrator_tenants" 9 | # create test tenants (connects to psql and creates them) 10 | ./test/performance/create-test-tenants.sh $NO_OF_TENANTS 11 | 12 | ./test/performance/generate-test-migrations.sh -f -n $NO_OF_MIGRATIONS 13 | 14 | # flyway doesn't support both single and multi-tenant migrations, delete public ones 15 | rm -rf ./test/performance/migrations/public 16 | 17 | # remove existing flyway.schemas config property 18 | gsed -i '/flyway.schemas/d' ./test/performance/flyway.conf 19 | 20 | output=$(psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "select string_agg(name, ',') from migrator.migrator_tenants") 21 | 22 | echo "flyway.schemas=$output" >> ./test/performance/flyway.conf 23 | 24 | start=`date +%s` 25 | flyway -configFiles=./test/performance/flyway.conf baseline migrate > /dev/null 26 | end=`date +%s` 27 | 28 | echo "Test took $((end-start)) seconds" 29 | 30 | rm -rf test/performance/migrations/ 31 | 32 | # append test 33 | # 1. comment out above rm command 34 | # 2. RUN TEST 35 | # 3. generate new migrations: 36 | # ./test/performance/generate-test-migrations.sh -a -f -n $NO_OF_MIGRATIONS 37 | # 4. execute flyway migrate command, measure start and end times: 38 | # start=`date +%s` && flyway -configFiles=./test/performance/flyway.conf migrate > /dev/null && end=`date +%s` 39 | -------------------------------------------------------------------------------- /test/performance/generate-test-migrations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "$0")" 4 | 5 | remove_tenant_placeholder=0 6 | append=0 7 | 8 | while [[ "$#" > 0 ]]; do case $1 in 9 | -n|--number-of-migrations) no_of_migrations="$2"; shift;; 10 | -a|--append) append=1;; 11 | -f|--flyway) remove_tenant_placeholder=1;; 12 | *) echo "Unknown parameter passed: $1"; exit 1;; 13 | esac; shift; done 14 | 15 | if [[ -z "$no_of_migrations" ]] || [[ -z "${no_of_migrations##*[!0-9]*}" ]]; then 16 | no_of_migrations=100 17 | fi 18 | 19 | tenantplaceholder=":tenant" 20 | tenantprefixplaceholder="$tenantplaceholder." 21 | if (( remove_tenant_placeholder == 1 )); then 22 | tenantplaceholder="" 23 | tenantprefixplaceholder="" 24 | fi 25 | 26 | function generate_first_table { 27 | year=$(date +'%Y') 28 | 29 | if (( remove_tenant_placeholder == 0 )); then 30 | cat > "migrations/tenants/V${year}.1__0000000000.sql" <> "migrations/tenants/V${year}.1__0000000000.sql" < $file < $file < $file 78 | # else 79 | # echo "alter table ${tenantprefixplaceholder}table_${counter} drop column d, drop column e, drop column f;" >> $file 80 | # fi 81 | # no_of_inserts=1000 82 | no_of_inserts=10 83 | while [[ $no_of_inserts -gt 0 ]]; do 84 | echo "insert into ${tenantprefixplaceholder}table_${counter} (a, b, c) values ($RANDOM, $RANDOM, '$RANDOM');" >> $file 85 | let no_of_inserts-=1 86 | done 87 | echo "insert into ${tenantprefixplaceholder}table_for_inserts values ($i);" >> $file 88 | } 89 | 90 | if [[ $append -eq 0 ]]; then 91 | rm -rf migrations 92 | mkdir -p migrations/tenants 93 | mkdir -p migrations/public 94 | 95 | generate_first_table 96 | generate_public_table 97 | 98 | i=0 99 | counter=0 100 | else 101 | i=$(ls -t migrations/tenants | head -1 | cut -d '_' -f 2 | cut -d '.' -f 1) 102 | counter=$((i/10+1)) 103 | i=$((i+1)) 104 | fi 105 | 106 | end=$((i+no_of_migrations)) 107 | 108 | echo "About to generate $no_of_migrations migrations" 109 | echo "is append? $append" 110 | echo "is without tenant prefix? $remove_tenant_placeholder" 111 | echo "counter = $counter" 112 | echo "i = $i" 113 | 114 | while [[ $i -lt $end ]]; do 115 | if [[ $i%10 -eq 0 ]]; then 116 | let counter+=1 117 | echo "generate_table $i $counter" 118 | generate_table $i $counter 119 | else 120 | #echo "generate_alter_drop_inserts $i $counter" 121 | generate_alter_drop_inserts $i $counter 122 | fi 123 | let i+=1 124 | done 125 | -------------------------------------------------------------------------------- /test/performance/liquibase-changelog.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | -------------------------------------------------------------------------------- /test/performance/liquibase-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # liquibase doesn't natively support multi-tenancy thus doing a single migration to public schema 4 | NO_OF_MIGRATIONS=10000 5 | 6 | ./test/performance/generate-test-migrations.sh -f -n $NO_OF_MIGRATIONS 7 | 8 | # download PostgreSQL JDBC Driver - update location in liquibase.properties! 9 | mvn -DgroupId=org.postgresql -DartifactId=postgresql -Dversion=42.2.23 dependency:get 10 | 11 | start=`date +%s` 12 | liquibase --defaultsFile ./test/performance/liquibase.properties update 13 | end=`date +%s` 14 | 15 | echo "Test took $((end-start)) seconds" 16 | 17 | rm -rf test/performance/migrations/ 18 | 19 | # append test 20 | # 1. comment out above rm command 21 | # 2. RUN TEST 22 | # 3. generate new migrations: 23 | # ./test/performance/generate-test-migrations.sh -a -f -n $NO_OF_MIGRATIONS 24 | # 4. execute liquibase migrate update, measure start and end times: 25 | # start=`date +%s` && liquibase --defaultsFile ./test/performance/liquibase.properties update && end=`date +%s` 26 | -------------------------------------------------------------------------------- /test/performance/liquibase.properties: -------------------------------------------------------------------------------- 1 | changeLogFile=./test/performance/liquibase-changelog.xml 2 | url=jdbc:postgresql://127.0.0.1:5432/migrator?ssl=false 3 | username=postgres 4 | password=supersecret 5 | driver=org.postgresql.Driver 6 | classpath=/Users/lukasz/.m2/repository/org/postgresql/postgresql/42.2.23/postgresql-42.2.23.jar 7 | logLevel=warn 8 | liquibase.hub.mode=off 9 | -------------------------------------------------------------------------------- /test/performance/migrator-performance.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: test/performance/migrations 2 | driver: postgres 3 | dataSource: "user=postgres password=supersecret dbname=migrator host=127.0.0.1 port=5432 sslmode=disable connect_timeout=1" 4 | singleMigrations: 5 | - public 6 | tenantMigrations: 7 | - tenants 8 | schemaPlaceHolder: ":tenant" 9 | port: 8888 10 | -------------------------------------------------------------------------------- /test/performance/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # assumes containers are already started 4 | # it uses PostgreSQL and requires psql tool to be installed 5 | 6 | NO_OF_MIGRATIONS=100 7 | NO_OF_TENANTS=500 8 | 9 | # test from scratch 10 | EXISTING_TABLES=0 11 | EXISTING_INSERTS=0 12 | # in append test 13 | # EXISTING_TABLES=10 14 | # EXISTING_INSERTS=100 15 | 16 | go build 17 | 18 | export PGPASSWORD=supersecret 19 | # remove all existing versions and migrations 20 | psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "delete from migrator.migrator_versions" 21 | # remove all existing tenants 22 | psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "delete from migrator.migrator_tenants" 23 | # create test tenants (connects to psql and creates them) 24 | ./test/performance/create-test-tenants.sh $NO_OF_TENANTS 25 | 26 | # generate test migrtations 27 | ./test/performance/generate-test-migrations.sh -n $NO_OF_MIGRATIONS 28 | # generate test migrtations in append test 29 | # ./test/performance/generate-test-migrations.sh -n $NO_OF_MIGRATIONS -a 30 | 31 | ./migrator -configFile=test/performance/migrator-performance.yaml &> /dev/null & 32 | sleep 5 33 | 34 | COMMIT_SHA="performance-tests" 35 | # new lines are used for readability but have to be removed from the actual request 36 | cat < create_version.txt 37 | { 38 | "query": " 39 | mutation CreateVersion(\$input: VersionInput!) { 40 | createVersion(input: \$input) { 41 | version { 42 | id, 43 | name, 44 | } 45 | summary { 46 | startedAt 47 | tenants 48 | migrationsGrandTotal 49 | scriptsGrandTotal 50 | } 51 | } 52 | }", 53 | "operationName": "CreateVersion", 54 | "variables": { 55 | "input": { 56 | "versionName": "$COMMIT_SHA" 57 | } 58 | } 59 | } 60 | EOF 61 | start=`date +%s` 62 | curl -d @create_version.txt http://localhost:8888/v2/service | jq -r '.data.createVersion.summary' 63 | end=`date +%s` 64 | 65 | echo "Done, checking if all migrations were applied correctly..." 66 | 67 | output=$(psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "select name from migrator.migrator_tenants") 68 | 69 | IFS=$'\n'; tenants=($output); unset IFS; 70 | 71 | for tenant in "${tenants[@]}"; do 72 | count=$(psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "select count(distinct col) from $tenant.table_for_inserts") 73 | tables_count=$(psql -U postgres -h 127.0.0.1 -p 5432 -d migrator -tAq -c "select count(*) from information_schema.tables where table_schema = '$tenant' and table_name like 'table_%'") 74 | 75 | if [[ $count -ne $NO_OF_MIGRATIONS+$EXISTING_INSERTS+1 ]]; then 76 | echo "[migrations inserts] error for $tenant, got $count, expected: $((NO_OF_MIGRATIONS+$EXISTING_INSERTS+1))" 77 | fi 78 | if [[ $tables_count -ne $NO_OF_MIGRATIONS/10+$EXISTING_TABLES+1 ]]; then 79 | echo "[migrations tables] error for $tenant, got $tables_count, expected: $((NO_OF_MIGRATIONS/10+$EXISTING_TABLES+1))" 80 | fi 81 | 82 | done 83 | 84 | echo "Test took $((end-start)) seconds" 85 | 86 | # remove generated migrations and test request and kill migrator 87 | rm -rf test/performance/migrations 88 | rm create_version.txt 89 | sleep 5 90 | killall migrator 91 | 92 | 93 | # prepare for append test 94 | # 1. comment out lines: 95 | # 87 96 | # 97 | # 2. RUN TEST 98 | # 99 | # 3. comment out lines: 100 | # 10, 11 101 | # 20, 22, 24 102 | # 27 103 | # 104 | # 4. uncomment lines: 105 | # 13, 14 106 | # 29 107 | # 108 | # 5. RUN TEST 109 | -------------------------------------------------------------------------------- /tutorials/aws-ecs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukasz/migrator 2 | 3 | MAINTAINER Łukasz Budnik lukasz.budnik@gmail.com 4 | 5 | COPY migrator.yaml /data/ 6 | -------------------------------------------------------------------------------- /tutorials/aws-ecs/migrator.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: s3://your-bucket-migrator 2 | driver: postgres 3 | dataSource: "user=${DATABASE_USERNAME} password=${DATABASE_PASSWORD} dbname=${DATABASE_NAME} host=${DATABASE_HOST}" 4 | singleMigrations: 5 | - migrations/ref 6 | - migrations/config 7 | tenantMigrations: 8 | - migrations/tenants 9 | port: 8080 10 | pathPrefix: /migrator 11 | -------------------------------------------------------------------------------- /tutorials/aws-eks/kustomization.yaml: -------------------------------------------------------------------------------- 1 | secretGenerator: 2 | - name: database-credentials 3 | literals: 4 | - password=abc 5 | - username=def 6 | - host=ghi 7 | - database=jkl 8 | -------------------------------------------------------------------------------- /tutorials/aws-eks/migrator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: migrator 5 | labels: 6 | app: migrator 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: migrator 11 | tier: frontend 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: migrator 18 | tier: frontend 19 | spec: 20 | serviceAccountName: migrator-serviceaccount 21 | containers: 22 | - image: XXX.dkr.ecr.XXX.amazonaws.com/migrator:v2020.1.0 23 | name: migrator 24 | env: 25 | - name: AWS_REGION 26 | value: XXX 27 | - name: DATABASE_USERNAME 28 | valueFrom: 29 | secretKeyRef: 30 | name: database-credentials-bd8htk2bk7 31 | key: username 32 | - name: DATABASE_PASSWORD 33 | valueFrom: 34 | secretKeyRef: 35 | name: database-credentials-bd8htk2bk7 36 | key: password 37 | - name: DATABASE_NAME 38 | valueFrom: 39 | secretKeyRef: 40 | name: database-credentials-bd8htk2bk7 41 | key: database 42 | - name: DATABASE_HOST 43 | valueFrom: 44 | secretKeyRef: 45 | name: database-credentials-bd8htk2bk7 46 | key: host 47 | ports: 48 | - containerPort: 8080 49 | name: migrator 50 | -------------------------------------------------------------------------------- /tutorials/aws-eks/migrator-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: "migrator-ingress" 5 | annotations: 6 | kubernetes.io/ingress.class: alb 7 | alb.ingress.kubernetes.io/scheme: internet-facing 8 | alb.ingress.kubernetes.io/target-type: ip 9 | alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:XXX:XXX:certificate/XXX 10 | alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' 11 | alb.ingress.kubernetes.io/inbound-cidrs: '0.0.0.0/0' 12 | alb.ingress.kubernetes.io/healthcheck-path: '/migrator/' 13 | labels: 14 | app: migrator-ingress 15 | spec: 16 | rules: 17 | - http: 18 | paths: 19 | - path: /migrator* 20 | backend: 21 | serviceName: "migrator-service" 22 | servicePort: 80 23 | -------------------------------------------------------------------------------- /tutorials/aws-eks/migrator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "migrator-service" 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: 8080 9 | protocol: TCP 10 | type: ClusterIP 11 | selector: 12 | app: "migrator" 13 | -------------------------------------------------------------------------------- /tutorials/azure-aks/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukasz/migrator 2 | 3 | MAINTAINER Łukasz Budnik lukasz.budnik@gmail.com 4 | 5 | COPY migrator.yaml /data/ 6 | -------------------------------------------------------------------------------- /tutorials/azure-aks/README.md: -------------------------------------------------------------------------------- 1 | # migrator on Azure AKS 2 | 3 | The goal of this tutorial is to publish migrator image to Azure ACR private container repository, deploy migrator to Azure AKS, load migrations from Azure Blob Container and apply them to Azure Database for PostgreSQL. 4 | 5 | In order to simplify the customisation of the whole deployment I will be using the following env variables. Update them before you start this tutorial. 6 | 7 | ``` 8 | # resource group name (resource group must exist) 9 | RG_NAME=lukaszbudnik 10 | # Azure ACR repo name to create 11 | ACR_NAME=migrator 12 | # Azure ACK cluster name 13 | ACK_CLUSTER_NAME=awesome-product 14 | ``` 15 | 16 | ## Recommendations for production deployment 17 | 18 | For keeping secrets in production workloads I recommend using [Azure Key Vault Provider for Secrets Store CSI Driver](https://github.com/Azure/secrets-store-csi-driver-provider-azure). It allows you to get secret contents stored in an Azure Key Vault instance and use the Secrets Store CSI driver interface to mount them into Kubernetes pods. It can also sync secrets from Azure Key Vault to Kubernetes secrets. Further, Azure Key Vault Provider can use managed identity so there is no need to juggle any credentials. 19 | 20 | As an ingress controller I recommend using Azure Application Gateway. For more information refer to: https://docs.microsoft.com/en-us/azure/developer/terraform/create-k8s-cluster-with-aks-applicationgateway-ingress and https://azure.github.io/application-gateway-kubernetes-ingress/. 21 | 22 | Another nice addition is to use [Service Catalog](https://svc-cat.io) and [Open Service Broker for Azure](https://osba.sh) to provision Azure Database and Azure Storage on your behalf. To use it check my gists: [Service Catalog with Open Service Broker for Azure to provision Azure Database](https://gist.github.com/lukaszbudnik/b2734c250e71b0c7f18dd93fb882cc42) and [Service Catalog with Open Service Broker for Azure to provision Azure Storage Account](https://gist.github.com/lukaszbudnik/c03549cfa9728d9e4957e6bc54ef3c6e). 23 | 24 | In this tutorial I decided to keep things simple. Azure Key Vault Provider for Secrets Store CSI Driver and Azure Application Gateway are a little bit more complex to setup and are outside of the scope of this tutorial. Also, Azure Database and Azure Storage are created manually. 25 | 26 | ## Azure Blob Container - upload test migrations 27 | 28 | Create new Azure Storage Account and a new container. Then: 29 | 30 | * update `baseLocation` property in `migrator.yaml`. 31 | * update `storage-account` credentials in `kustomization.yaml` 32 | 33 | You can use test migrations to play around with migrator. They are located in `test/migrations` directory. 34 | 35 | ## ACR - build and publish migrator image 36 | 37 | Create container repository: 38 | 39 | ``` 40 | az acr create --resource-group $RG_NAME --name $ACR_NAME --sku Basic 41 | ``` 42 | 43 | Build image and push it to ACR: 44 | 45 | ``` 46 | az acr build --registry $ACR_NAME --image migrator:v4.2-azure . 47 | ``` 48 | 49 | From the output you can see: 50 | 51 | ```yaml 52 | - image: 53 | registry: migrator.azurecr.io 54 | repository: migrator 55 | tag: v4.2-azure 56 | digest: sha256:e72108eec96204f01d4eaa87d83ea302bbb651194a13636597c32f7434de5933 57 | runtime-dependency: 58 | registry: registry.hub.docker.com 59 | repository: lukasz/migrator 60 | tag: dev-v4.2 61 | digest: sha256:b414ea94960048904a137ec1655b7e6471e57a483ccd095fca744c7a449a118e 62 | ``` 63 | 64 | which means that the ACR image is: `migrator.azurecr.io/migrator:v4.2-azure`. 65 | 66 | Edit `migrator-deployment.yaml` and update the image name to the one built above (line 21). 67 | 68 | ## Create and setup the AKS 69 | 70 | Create AKS cluster and attach our ACR repository to it (ACR attach is required otherwise AKS won't be able to pull our migrator image): 71 | 72 | ``` 73 | az aks create --name $ACK_CLUSTER_NAME \ 74 | --resource-group $RG_NAME \ 75 | --load-balancer-sku basic \ 76 | --vm-set-type AvailabilitySet \ 77 | --node-count 1 \ 78 | --enable-addons monitoring \ 79 | --attach-acr $ACR_NAME \ 80 | --no-ssh-key 81 | ``` 82 | 83 | Wait a moment for Azure to create the cluster. Fetch credentials so that kubectl can successfully connect to AKS. 84 | 85 | ``` 86 | az aks get-credentials --resource-group $RG_NAME --name $ACK_CLUSTER_NAME 87 | ``` 88 | 89 | Make sure kubectl points to our cluster: 90 | 91 | ``` 92 | kubectl config current-context 93 | ``` 94 | 95 | ## NGINX ingress controller 96 | 97 | Let's create a minimalistic (one replica) `nginx-ingress` controller, disable port 80 as we want to listen only on port 443. NGINX ingress will deploy self-signed cert (read documentation about how to set it up with your own existing certs or let's encrypt). Also, I'm allowing two specific IP address ranges for my app. And yes, comma needs to be escaped. For testing you may replace it with 0.0.0.0/0 or remove at all: 98 | 99 | ``` 100 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 101 | 102 | helm install nginx-ingress stable/nginx-ingress \ 103 | --set controller.replicaCount=1 \ 104 | --set controller.service.enableHttp=false \ 105 | --set controller.service.loadBalancerSourceRanges={1.2.3.4/32\,5.6.7.8/32} \ 106 | --set controller.nodeSelector."beta\.kubernetes\.io/os"=linux \ 107 | --set defaultBackend.nodeSelector."beta\.kubernetes\.io/os"=linux 108 | ``` 109 | 110 | ## Azure Database for PostgreSQL 111 | 112 | The example uses PostgreSQL. Go ahead and create a new database using Azure Database for PostgreSQL. 113 | 114 | By default newly provisioned DB blocks all traffic. Once the DB is up and running, open it in Azure portal and navigate to "Settings" -> "Connection security". Toggle on the "Allow access to Azure services" option and click "Save". 115 | 116 | Open `kustomization.yaml` and update `database-credentials` secret. 117 | 118 | ## Kubernetes Secrets 119 | 120 | Now that we have storage account and database credentials in `kustomization` it's time to create secrets: 121 | 122 | ``` 123 | kubectl apply -k . 124 | ``` 125 | 126 | The generated secret names have a suffix appended by hashing the contents. This ensures that a new Secret is generated each time the contents is modified. Open `migrator-deployment.yaml` and update references to: 127 | 128 | * storage-account secret on lines: 27, 32 129 | * database-credentials secret on lines: 37, 42, 47, 52 130 | 131 | ## Deploy migrator 132 | 133 | Review the config files and if all good deploy migrator: 134 | 135 | ``` 136 | kubectl apply -f migrator-deployment.yaml 137 | kubectl apply -f migrator-service.yaml 138 | kubectl apply -f migrator-ingress.yaml 139 | ``` 140 | 141 | Wait a few moments and check the external IP of the NGINX ingress controller: 142 | 143 | ``` 144 | kubectl get service nginx-ingress-controller 145 | ``` 146 | 147 | ## Accessing migrator 148 | 149 | The migrator is up and running. You can now access it by external IP address: 150 | 151 | ``` 152 | curl -v -k https://65.52.0.0/migrator/ 153 | curl -v -k https://65.52.0.0/migrator/v1/config 154 | ``` 155 | 156 | Check if migrator can load migrations from Azure Blob Storage and connect to Azure Database for PostgreSQL: 157 | 158 | ``` 159 | curl -v -k https://65.52.0.0/migrator/v1/migrations/source 160 | ``` 161 | 162 | When you're ready apply migrations: 163 | 164 | ``` 165 | curl -v -k -X POST -H "Content-Type: application/json" -d '{"mode": "apply", "response": "list"}' https://65.52.0.0/migrator/v1/migrations 166 | ``` 167 | 168 | Enjoy migrator! 169 | 170 | ## Cleanup 171 | 172 | ``` 173 | kubectl delete -k . 174 | kubectl delete -f migrator-ingress.yaml 175 | kubectl delete -f migrator-service.yaml 176 | kubectl delete -f migrator-deployment.yaml 177 | helm del nginx-ingress 178 | ``` 179 | -------------------------------------------------------------------------------- /tutorials/azure-aks/kustomization.yaml: -------------------------------------------------------------------------------- 1 | secretGenerator: 2 | - name: database-credentials 3 | literals: 4 | - password=how-are-you!1 5 | - username=lukasz@mytestserver.postgres.database.azure.com 6 | - host=mytestserver.postgres.database.azure.com 7 | - database=postgres 8 | - name: storage-account 9 | literals: 10 | - name=storageaccountname 11 | - accessKey=storageaccesskey 12 | -------------------------------------------------------------------------------- /tutorials/azure-aks/migrator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: migrator 5 | labels: 6 | app: migrator 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: migrator 11 | tier: frontend 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: migrator 18 | tier: frontend 19 | spec: 20 | containers: 21 | - image: migrator.azurecr.io/migrator:v4.2-azure 22 | name: migrator 23 | env: 24 | - name: AZURE_STORAGE_ACCOUNT 25 | valueFrom: 26 | secretKeyRef: 27 | name: storage-account-1ghhhkcfmf 28 | key: name 29 | - name: AZURE_STORAGE_ACCESS_KEY 30 | valueFrom: 31 | secretKeyRef: 32 | name: storage-account-1ghhhkcfmf 33 | key: accessKey 34 | - name: DATABASE_USERNAME 35 | valueFrom: 36 | secretKeyRef: 37 | name: database-credentials-8mc6566290 38 | key: username 39 | - name: DATABASE_PASSWORD 40 | valueFrom: 41 | secretKeyRef: 42 | name: database-credentials-8mc6566290 43 | key: password 44 | - name: DATABASE_NAME 45 | valueFrom: 46 | secretKeyRef: 47 | name: database-credentials-8mc6566290 48 | key: database 49 | - name: DATABASE_HOST 50 | valueFrom: 51 | secretKeyRef: 52 | name: database-credentials-8mc6566290 53 | key: host 54 | ports: 55 | - containerPort: 8080 56 | name: migrator 57 | -------------------------------------------------------------------------------- /tutorials/azure-aks/migrator-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: migrator-ingress 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - backend: 12 | serviceName: migrator-service 13 | servicePort: 80 14 | path: / 15 | -------------------------------------------------------------------------------- /tutorials/azure-aks/migrator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "migrator-service" 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: 8080 9 | protocol: TCP 10 | type: ClusterIP 11 | selector: 12 | app: "migrator" 13 | -------------------------------------------------------------------------------- /tutorials/azure-aks/migrator.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: https://storageaccountname.blob.core.windows.net/mycontainer 2 | driver: postgres 3 | dataSource: "user=${DATABASE_USERNAME} password=${DATABASE_PASSWORD} dbname=${DATABASE_NAME} host=${DATABASE_HOST}" 4 | singleMigrations: 5 | - migrations/ref 6 | - migrations/config 7 | tenantMigrations: 8 | - migrations/tenants 9 | port: 8080 10 | pathPrefix: /migrator 11 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/README.md: -------------------------------------------------------------------------------- 1 | # Securing migrator with OIDC 2 | 3 | In this tutorial I will show you how to use OIDC on top of OAuth2 to implement authentication and authorization. 4 | 5 | If you are interested in simple OAuth2 authorization see [Securing migrator with OAuth2](../tutorials/oauth2-proxy). 6 | 7 | ## OIDC 8 | 9 | OpenID Connect (OIDC) is an authentication layer on top of OAuth2 authorization framework. 10 | 11 | I will use oauth2-proxy project. It supports multiple OAuth2 providers. To name a few: Google, Facebook, GitHub, LinkedIn, Azure, Keycloak, login.gov, or any OpenID Connect compatible provider. 12 | 13 | As an OIDC provider I will re-use oauth2-proxy local-environment which creates and setups a ready-to-use Keycloak server. I extended the Keycloak server with additional configuration (client mappers to include roles in responses), created two migrator roles and two additional test accounts. 14 | 15 | I also put haproxy between oauth2-proxy and migrator. haproxy will validate the JWT access token and implement access control based on user's roles to allow or deny access to underlying migrator resources. I re-used a great lua script written by haproxytech folks which I modified to work with Keycloak realm roles. 16 | 17 | To learn more about oauth2-proxy visit https://github.com/oauth2-proxy/oauth2-proxy. 18 | 19 | To learn more about Keycloak visit https://www.keycloak.org. 20 | 21 | To learn more about haproxy jwtverify lua script visit https://github.com/haproxytech/haproxy-lua-jwt. 22 | 23 | ## Docker setup 24 | 25 | The provided `docker-compose.yaml` provision the following services: 26 | 27 | * keycloak - the Identity and Access Management service, available at: http://keycloak.localtest.me:9080 28 | * oauth2-proxy - proxy that protects migrator and connects to keycloak for OAuth2/OIDC authentication, available at: http://gateway.localtest.me:4180 29 | * haproxy - proxy that contains JWT access token validation and user access control logic, available at: http://haproxy.localtest.me:8080 30 | * migrator - deployed internally and accessible only from haproxy and only by authorized users 31 | 32 | > Note: above setup doesn't have a database as this is to only illustrate how to setup OIDC 33 | 34 | To build the test environment execute: 35 | 36 | ``` 37 | docker-compose up -d 38 | ``` 39 | 40 | ## Testing OIDC 41 | 42 | I created 2 test users in Keycloak: 43 | 44 | * `madmin@example.com` - migrator admin, has the following roles: `migrator_admin` and `migrator_user` 45 | * `muser@example.com` - migrator user, has one migrator role: `migrator_user` 46 | 47 | In haproxy.cfg I implemented the following sample rules: 48 | 49 | * all requests starting `/v1` will return 403 Forbidden 50 | * to access `/v2/service` user must have `migrator_user` role 51 | * to access `/v2/config` user must have `migrator_admin` role 52 | 53 | ### Test scenarios 54 | 55 | There are two ways to access migrator: 56 | 57 | 1. getting JWT access token via oauth2-proxy - shown in orange 58 | 1. getting JWT access token directly from Keycloak - shown in blue 59 | 60 | ![migrator OIDC setup](migrator-oidc.png?raw=true) 61 | 62 | Let's test them. 63 | 64 | ### oauth2-proxy - muser@example.com 65 | 66 | 1. Access http://gateway.localtest.me:4180/ 67 | 1. Authenticate using username: `muser@example.com` and password: `password`. 68 | 1. After a successful login you will see `/` response 69 | 1. Open http://gateway.localtest.me:4180/v2/config and you will see 403 Forbidden - this user doesn't have `migrator_admin` role 70 | 1. Logout from Keycloak http://keycloak.localtest.me:9080/auth/realms/master/protocol/openid-connect/logout 71 | 1. Invalidate session on auth2-proxy (oauth2-proxy cookie expires in 15 minutes) http://gateway.localtest.me:4180/oauth2/sign_out 72 | 73 | ### oauth2-proxy - madmin@example.com 74 | 75 | 1. Access http://gateway.localtest.me:4180/ 76 | 1. Authenticate using username: `madmin@example.com` and password: `password`. 77 | 1. After a successful login you will see successful `/` response 78 | 1. Open http://gateway.localtest.me:4180/v2/config and now you will see migrator config 79 | 1. Logout from Keycloak http://keycloak.localtest.me:9080/auth/realms/master/protocol/openid-connect/logout 80 | 1. Invalidate session on auth2-proxy (oauth2-proxy cookie expires in 15 minutes) http://gateway.localtest.me:4180/oauth2/sign_out 81 | 82 | ### Keycloak REST API 83 | 84 | 1. Get JWT access token for the `madmin@example.com` user: 85 | 86 | ``` 87 | access_token=$(curl -s http://keycloak.localtest.me:9080/auth/realms/master/protocol/openid-connect/token \ 88 | -H 'Content-Type: application/x-www-form-urlencoded' \ 89 | -d 'username=madmin@example.com' \ 90 | -d 'password=password' \ 91 | -d 'grant_type=password' \ 92 | -d 'client_id=oauth2-proxy' \ 93 | -d 'client_secret=72341b6d-7065-4518-a0e4-50ee15025608' | jq -r '.access_token') 94 | ``` 95 | 96 | 2. Execute migrator action and pass the JWT access token in HTTP Authorization header: 97 | 98 | ``` 99 | curl http://haproxy.localtest.me:8080/v2/config \ 100 | -H "Authorization: Bearer $access_token" 101 | ``` 102 | 103 | ## Miscellaneous 104 | 105 | You can copy JWT access token (haproxy log or Keycloak REST API) and decode it on https://jwt.io. 106 | 107 | You can verify the signature of the JWT token by providing the public key (`keycloak.pem` available in haproxy folder). 108 | 109 | Public key can be also fetched from: 110 | 111 | ``` 112 | curl http://keycloak.localtest.me:9080/auth/realms/master/ 113 | ``` 114 | 115 | > The response is a JSON and the public key is returned as a string. To be a valid PEM format you need to add `-----BEGIN PUBLIC KEY-----` header, `-----END PUBLIC KEY-----` footer, and break that string into lines of 64 characters. Compare `keycloak.pem` with the above response. 116 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.0" 2 | services: 3 | oauth2-proxy: 4 | image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.3 5 | command: --config /oauth2-proxy.cfg 6 | hostname: gateway 7 | volumes: 8 | - "./oauth2-proxy.cfg:/oauth2-proxy.cfg" 9 | # oauth2-proxy dies when not able to connect to keycloak 10 | restart: unless-stopped 11 | networks: 12 | keycloak: {} 13 | haproxy: {} 14 | oauth2-proxy: {} 15 | depends_on: 16 | - haproxy 17 | - keycloak 18 | ports: 19 | - 4180:4180 20 | 21 | haproxy: 22 | image: lukasz/haproxy-auth-gateway:2.0.0 23 | hostname: haproxy 24 | volumes: 25 | - "./haproxy/keycloak.pem:/etc/haproxy/pem/keycloak.pem" 26 | - "./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg" 27 | environment: 28 | - OAUTH_PUBKEY_PATH=/etc/haproxy/pem/keycloak.pem 29 | - OAUTH_ISSUER=http://keycloak.localtest.me:9080/auth/realms/master 30 | networks: 31 | migrator: {} 32 | haproxy: {} 33 | depends_on: 34 | - migrator 35 | ports: 36 | - 8080:8080 37 | 38 | migrator: 39 | image: lukasz/migrator:2021 40 | hostname: migrator 41 | volumes: 42 | - "./migrator.yaml:/data/migrator.yaml" 43 | networks: 44 | migrator: {} 45 | 46 | keycloak: 47 | image: quay.io/keycloak/keycloak:15.0.2 48 | hostname: keycloak 49 | command: 50 | [ 51 | "-b", 52 | "0.0.0.0", 53 | "-Djboss.socket.binding.port-offset=1000", 54 | "-Dkeycloak.migration.action=import", 55 | "-Dkeycloak.migration.provider=dir", 56 | "-Dkeycloak.migration.dir=/realm-config", 57 | "-Dkeycloak.migration.strategy=IGNORE_EXISTING", 58 | ] 59 | volumes: 60 | - ./keycloak:/realm-config 61 | networks: 62 | keycloak: 63 | aliases: 64 | - keycloak.localtest.me 65 | ports: 66 | - 9080:9080 67 | 68 | networks: 69 | migrator: {} 70 | keycloak: {} 71 | oauth2-proxy: {} 72 | haproxy: {} 73 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/haproxy/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log stdout local0 debug 3 | daemon 4 | lua-load /usr/local/share/lua/5.3/jwtverify.lua 5 | 6 | # do not harcode env variables in haproxy.cfg - they should be set externally by docker/kubernetes 7 | # below example is just a summary of available configuration options 8 | 9 | # Set env variables used by Lua file 10 | # setenv OAUTH_PUBKEY_PATH /etc/haproxy/pem/keycloak.pem 11 | 12 | # OPTIONAL: OAuth issuer 13 | # setenv OAUTH_ISSUER http://keycloak.localtest.me:9080/auth/realms/master 14 | 15 | # OPTIONAL: OAuth audience 16 | # not set because we use 2 different audiences 17 | # when using oauth2-proxy only the audience would be: 18 | # setenv OAUTH_AUDIENCE oauth2-proxy 19 | # when using Keycloak REST API only the audience would be: 20 | # setenv OAUTH_AUDIENCE account 21 | 22 | defaults 23 | timeout connect 5s 24 | timeout client 5s 25 | timeout server 5s 26 | mode http 27 | log global 28 | 29 | frontend api_gateway 30 | bind :8080 31 | 32 | # API v1 33 | # Deny all requests 34 | http-request deny if { path_beg /v1 } 35 | 36 | # Deny if no Authorization header sent 37 | http-request deny unless { req.hdr(authorization) -m found } 38 | 39 | # Invoke the jwtverify Lua file 40 | http-request lua.jwtverify 41 | 42 | # Deny unless jwtverify set 'authorized' to true 43 | http-request deny unless { var(txn.authorized) -m bool } 44 | 45 | # API v2 46 | # /v2/config available to only migrator_admin role 47 | http-request deny if { path_beg /v2/config } ! { var(txn.roles) -m sub migrator_admin } 48 | # /v2/service available to migrator_user role 49 | http-request deny if { path_beg /v2/service } ! { var(txn.roles) -m sub migrator_user } 50 | 51 | use_backend be_migrator 52 | 53 | backend be_migrator 54 | balance roundrobin 55 | server s1 migrator:8080 56 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/haproxy/keycloak.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjdo2HZ5ruNnIbkSeAfFY 3 | pbPvJw3vtz/VuKJerC4mUXYd7qRMhs3VLJZ3mFyeCuO8W81vkGrFiC9KQnX2lHj2 4 | dtA/RWEJw5bpz+JdOFr5pvXg0lQ0sa+hro9afWDygTU4FmLsEi5z98847TbH178R 5 | T6n7+JVqZ9jYU9rSpwVTC8E/4yxSuStmhGCcAkZ6dGhHNBdvGUgwxKYj7dYLRJiI 6 | +nilIdKuxPzxI/YZxZnXBHDdbNXJgDymTQPut99OnBxeZbH38CJ1MNo3VdV1fzOM 7 | GUHe+vn/EOD5E+pXC8PwvJnWU+XHUTFVZeyIXehh3pYLUsq/6bZ1MYsEaFIhznOk 8 | wwIDAQAB 9 | -----END PUBLIC KEY----- -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/keycloak/master-users-0.json: -------------------------------------------------------------------------------- 1 | { 2 | "realm" : "master", 3 | "users" : [ { 4 | "id" : "3356c0a0-d4d5-4436-9c5a-2299c71c08ec", 5 | "createdTimestamp" : 1591297959169, 6 | "username" : "admin@example.com", 7 | "enabled" : true, 8 | "totp" : false, 9 | "emailVerified" : true, 10 | "email" : "admin@example.com", 11 | "credentials" : [ { 12 | "id" : "a1a06ecd-fdc0-4e67-92cd-2da22d724e32", 13 | "type" : "password", 14 | "createdDate" : 1591297959315, 15 | "secretData" : "{\"value\":\"6rt5zuqHVHopvd0FTFE0CYadXTtzY0mDY2BrqnNQGS51/7DfMJeGgj0roNnGMGvDv30imErNmiSOYl+cL9jiIA==\",\"salt\":\"LI0kqr09JB7J9wvr2Hxzzg==\"}", 16 | "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" 17 | } ], 18 | "disableableCredentialTypes" : [ ], 19 | "requiredActions" : [ ], 20 | "realmRoles" : [ "offline_access", "admin", "migrator_admin", "uma_authorization", "migrator_user" ], 21 | "clientRoles" : { 22 | "account" : [ "view-profile", "manage-account" ] 23 | }, 24 | "notBefore" : 0, 25 | "groups" : [ ] 26 | }, { 27 | "id" : "cfe980a3-bab6-4e1e-bf3f-e69fb82e20ed", 28 | "createdTimestamp" : 1603893240321, 29 | "username" : "madmin@example.com", 30 | "enabled" : true, 31 | "totp" : false, 32 | "emailVerified" : true, 33 | "email" : "madmin@example.com", 34 | "credentials" : [ { 35 | "id" : "0a01d92c-e5d1-46eb-b573-e5f3dbefa826", 36 | "type" : "password", 37 | "createdDate" : 1603893517321, 38 | "secretData" : "{\"value\":\"dB39suZtlpvVMdx5dxYQfHM4+6eab38l6te/Gg3Hs1ufk+KIu9uaozmzydbquEbnTel7R4rvwo8InOVySEez4w==\",\"salt\":\"CSKyC0uG9skSkYbIjcMgdQ==\"}", 39 | "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" 40 | } ], 41 | "disableableCredentialTypes" : [ ], 42 | "requiredActions" : [ ], 43 | "realmRoles" : [ "offline_access", "migrator_admin", "uma_authorization", "migrator_user" ], 44 | "clientRoles" : { 45 | "account" : [ "view-profile", "manage-account" ] 46 | }, 47 | "notBefore" : 0, 48 | "groups" : [ ] 49 | }, { 50 | "id" : "f79a6508-1318-43f3-b8bd-1d10d1043d32", 51 | "createdTimestamp" : 1603893269815, 52 | "username" : "muser@example.com", 53 | "enabled" : true, 54 | "totp" : false, 55 | "emailVerified" : true, 56 | "email" : "muser@example.com", 57 | "credentials" : [ { 58 | "id" : "409854c3-4255-4ef7-b9a4-c2844bd97376", 59 | "type" : "password", 60 | "createdDate" : 1603893540333, 61 | "secretData" : "{\"value\":\"Exuex4gHetwasRLZno0abUsPvNJgJP6hpU5Ern1DNsiH7JF1QMJkbmk0aU3ne2mh0fYYAJ8BOBgnNlCBFMMVgw==\",\"salt\":\"SfUjhW51/D5BQER+zTmP4w==\"}", 62 | "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" 63 | } ], 64 | "disableableCredentialTypes" : [ ], 65 | "requiredActions" : [ ], 66 | "realmRoles" : [ "offline_access", "uma_authorization", "migrator_user" ], 67 | "clientRoles" : { 68 | "account" : [ "view-profile", "manage-account" ] 69 | }, 70 | "notBefore" : 0, 71 | "groups" : [ ] 72 | } ] 73 | } 74 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/migrator-oidc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lukaszbudnik/migrator/66c11837d9ef9492aa222505f8cfd00780cf2629/tutorials/oauth2-proxy-oidc-haproxy/migrator-oidc.png -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/migrator.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: /data/migrations 2 | driver: mysql 3 | dataSource: "root:supersecret@tcp(mysql:3306)/migrator?parseTime=true&timeout=1s" 4 | singleMigrations: 5 | - ref 6 | - config 7 | tenantMigrations: 8 | - tenants 9 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy-oidc-haproxy/oauth2-proxy.cfg: -------------------------------------------------------------------------------- 1 | http_address="0.0.0.0:4180" 2 | cookie_secret="OQINaROshtE9TcZkNAm-5Zs2Pv3xaWytBmc5W7sPX7w=" 3 | email_domains=["*"] 4 | cookie_secure="false" 5 | upstreams="http://haproxy:8080" 6 | cookie_domains=[".localtest.me"] # Required so cookie can be read on all subdomains. 7 | whitelist_domains=[".localtest.me"] # Required to allow redirection back to original requested target. 8 | 9 | # pass JWT token to upstream 10 | pass_authorization_header=true 11 | # JWT access token expires after 15m however the oauth2-proxy cookie by default expires after 168h (7 days) 12 | # this would result in oauth2-proxy thinking that session is still valid while JWT access token would long be expired 13 | # oauth2-proxy would keep sending JWT access tokens and haproxy would reject it and show 403 Forbidden error 14 | # we need to sync JWT access token expiry and oauth2-proxy cookie expiry providing a much better user experience 15 | cookie_expire="15m" 16 | 17 | # keycloak provider 18 | client_secret="72341b6d-7065-4518-a0e4-50ee15025608" 19 | client_id="oauth2-proxy" 20 | redirect_url="http://gateway.localtest.me:4180/oauth2/callback" 21 | 22 | # in this case oauth2-proxy is going to visit 23 | # http://keycloak.localtest.me:9080/auth/realms/master/.well-known/openid-configuration for configuration 24 | oidc_issuer_url="http://keycloak.localtest.me:9080/auth/realms/master" 25 | provider="oidc" 26 | provider_display_name="Keycloak" 27 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy/README.md: -------------------------------------------------------------------------------- 1 | # Securing migrator with OAuth2 2 | 3 | In this tutorial I will show you how to setup OAuth2 authorization in front of migrator. 4 | 5 | ## oauth2-proxy 6 | 7 | I will use oauth2-proxy project. It supports multiple OAuth2 providers. To name a few: Google, Facebook, GitHub, LinkedIn, Azure, Keycloak, login.gov, or any OpenID Connect compatible provider. For the sake of simplicity I will re-use oauth2-proxy local-environment which creates and setups a ready-to-use Keycloak server. 8 | 9 | To learn more about oauth2-proxy visit https://github.com/oauth2-proxy/oauth2-proxy. 10 | 11 | To learn more about Keycloak visit https://www.keycloak.org. 12 | 13 | ## Docker setup 14 | 15 | I re-used `docker-compose.yaml` from oauth2-proxy local-environment and updated it to provision the following services: 16 | 17 | * keycloak - the Identity and Access Management service, available at: http://keycloak.localtest.me:9080 18 | * oauth2-proxy - proxy that protects migrator and connects to keycloak for OAuth2 authorization, available at: http://gateway.localtest.me:4180 19 | * migrator - deployed internally and accessible only from oauth2-proxy and only by authorized users 20 | 21 | > Note: above setup doesn't have a database as this is to only illustrate how to setup oauth2-proxy. 22 | 23 | To build the test environment execute: 24 | 25 | ``` 26 | docker-compose up -d 27 | ``` 28 | 29 | Access http://gateway.localtest.me:4180/ to initiate a login cycle and authenticate with user `admin@example.com` and password `password`. After a successful login you will see migrator. 30 | 31 | Access http://keycloak.localtest.me:9080 to play around with Keycloak. 32 | 33 | ## GitHub setup 34 | 35 | OAuth2 can be easily setup in GitHub. Open `oauth2-proxy.cfg` and comment lines 9 and below. Follow https://docs.github.com/en/free-pro-team@latest/developers/apps/creating-an-oauth-app documentation to create OAuth2 application and then setup the following 3 parameters in the config file: 36 | 37 | ``` 38 | provider="github" 39 | client_id="XXX" 40 | client_secret="XXX" 41 | ``` 42 | 43 | In case of GitHub you can use additional out-of-the-box features to limit users who can access migrator. For example you can limit access to particular users, team, repository, or organisation. For a full list of GitHub provider features check out oauth2-proxy documentation: https://oauth2-proxy.github.io/oauth2-proxy/auth-configuration#github-auth-provider. 44 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.0' 2 | services: 3 | 4 | oauth2-proxy: 5 | image: quay.io/oauth2-proxy/oauth2-proxy:v6.1.1 6 | command: --config /oauth2-proxy.cfg 7 | hostname: gateway 8 | volumes: 9 | - "./oauth2-proxy.cfg:/oauth2-proxy.cfg" 10 | # oauth2-proxy dies when not able to connect to keycloak 11 | restart: unless-stopped 12 | networks: 13 | keycloak: {} 14 | migrator: {} 15 | oauth2-proxy: {} 16 | depends_on: 17 | - migrator 18 | - keycloak 19 | ports: 20 | - 4180:4180 21 | 22 | migrator: 23 | container_name: migrator 24 | image: lukasz/migrator:latest 25 | hostname: migrator 26 | volumes: 27 | - "./migrator.yaml:/data/migrator.yaml" 28 | networks: 29 | migrator: {} 30 | 31 | keycloak: 32 | image: jboss/keycloak:11.0.2 33 | hostname: keycloak 34 | command: 35 | [ 36 | '-b', 37 | '0.0.0.0', 38 | '-Djboss.socket.binding.port-offset=1000', 39 | '-Dkeycloak.migration.action=import', 40 | '-Dkeycloak.migration.provider=dir', 41 | '-Dkeycloak.migration.dir=/realm-config', 42 | '-Dkeycloak.migration.strategy=IGNORE_EXISTING', 43 | ] 44 | volumes: 45 | - ./keycloak:/realm-config 46 | networks: 47 | keycloak: 48 | aliases: 49 | - keycloak.localtest.me 50 | ports: 51 | - 9080:9080 52 | 53 | networks: 54 | migrator: {} 55 | keycloak: {} 56 | oauth2-proxy: {} 57 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy/keycloak/master-users-0.json: -------------------------------------------------------------------------------- 1 | { 2 | "realm" : "master", 3 | "users" : [ { 4 | "id" : "3356c0a0-d4d5-4436-9c5a-2299c71c08ec", 5 | "createdTimestamp" : 1591297959169, 6 | "username" : "admin@example.com", 7 | "email" : "admin@example.com", 8 | "enabled" : true, 9 | "totp" : false, 10 | "emailVerified" : true, 11 | "credentials" : [ { 12 | "id" : "a1a06ecd-fdc0-4e67-92cd-2da22d724e32", 13 | "type" : "password", 14 | "createdDate" : 1591297959315, 15 | "secretData" : "{\"value\":\"6rt5zuqHVHopvd0FTFE0CYadXTtzY0mDY2BrqnNQGS51/7DfMJeGgj0roNnGMGvDv30imErNmiSOYl+cL9jiIA==\",\"salt\":\"LI0kqr09JB7J9wvr2Hxzzg==\"}", 16 | "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" 17 | } ], 18 | "disableableCredentialTypes" : [ ], 19 | "requiredActions" : [ ], 20 | "realmRoles" : [ "offline_access", "admin", "uma_authorization" ], 21 | "clientRoles" : { 22 | "account" : [ "view-profile", "manage-account" ] 23 | }, 24 | "notBefore" : 0, 25 | "groups" : [ ] 26 | } ] 27 | } 28 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy/migrator.yaml: -------------------------------------------------------------------------------- 1 | baseLocation: /data/migrations 2 | driver: mysql 3 | dataSource: "root:supersecret@tcp(mysql:3306)/migrator?parseTime=true&timeout=1s" 4 | singleMigrations: 5 | - ref 6 | - config 7 | tenantMigrations: 8 | - tenants 9 | -------------------------------------------------------------------------------- /tutorials/oauth2-proxy/oauth2-proxy.cfg: -------------------------------------------------------------------------------- 1 | http_address="0.0.0.0:4180" 2 | cookie_secret="OQINaROshtE9TcZkNAm-5Zs2Pv3xaWytBmc5W7sPX7w=" 3 | email_domains=["*"] 4 | cookie_secure="false" 5 | upstreams="http://migrator:8080" 6 | cookie_domains=[".localtest.me"] # Required so cookie can be read on all subdomains. 7 | whitelist_domains=[".localtest.me"] # Required to allow redirection back to original requested target. 8 | 9 | # keycloak provider 10 | client_secret="72341b6d-7065-4518-a0e4-50ee15025608" 11 | client_id="oauth2-proxy" 12 | redirect_url="http://gateway.localtest.me:4180/oauth2/callback" 13 | 14 | # in this case oauth2-proxy is going to visit 15 | # http://keycloak.localtest.me:9080/auth/realms/master/.well-known/openid-configuration for configuration 16 | oidc_issuer_url="http://keycloak.localtest.me:9080/auth/realms/master" 17 | provider="oidc" 18 | provider_display_name="Keycloak" 19 | -------------------------------------------------------------------------------- /types/types.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/graph-gophers/graphql-go" 7 | ) 8 | 9 | // MigrationType stores information about type of migration 10 | type MigrationType uint32 11 | 12 | const ( 13 | // MigrationTypeSingleMigration is used to mark single migration 14 | MigrationTypeSingleMigration MigrationType = 1 15 | // MigrationTypeTenantMigration is used to mark tenant migrations 16 | MigrationTypeTenantMigration MigrationType = 2 17 | // MigrationTypeSingleScript is used to mark single SQL script which is executed always 18 | MigrationTypeSingleScript MigrationType = 3 19 | // MigrationTypeTenantScript is used to mark tenant SQL scripts which is executed always 20 | MigrationTypeTenantScript MigrationType = 4 21 | ) 22 | 23 | // ImplementsGraphQLType maps MigrationType Go type 24 | // to the graphql scalar type in the schema 25 | func (MigrationType) ImplementsGraphQLType(name string) bool { 26 | return name == "MigrationType" 27 | } 28 | 29 | // String converts MigrationType Go type to string literal 30 | func (t MigrationType) String() string { 31 | switch t { 32 | case MigrationTypeSingleMigration: 33 | return "SingleMigration" 34 | case MigrationTypeTenantMigration: 35 | return "TenantMigration" 36 | case MigrationTypeSingleScript: 37 | return "SingleScript" 38 | case MigrationTypeTenantScript: 39 | return "TenantScript" 40 | default: 41 | panic(fmt.Sprintf("Unknown MigrationType value: %v", uint32(t))) 42 | } 43 | } 44 | 45 | // UnmarshalGraphQL converts string literal to MigrationType Go type 46 | func (t *MigrationType) UnmarshalGraphQL(input interface{}) error { 47 | if str, ok := input.(string); ok { 48 | switch str { 49 | case "SingleMigration": 50 | *t = MigrationTypeSingleMigration 51 | case "TenantMigration": 52 | *t = MigrationTypeTenantMigration 53 | case "SingleScript": 54 | *t = MigrationTypeSingleScript 55 | case "TenantScript": 56 | *t = MigrationTypeTenantScript 57 | default: 58 | panic(fmt.Sprintf("Unknown MigrationType literal: %v", str)) 59 | } 60 | return nil 61 | } 62 | return fmt.Errorf("wrong type for MigrationType: %T", input) 63 | } 64 | 65 | // Tenant contains basic information about tenant 66 | type Tenant struct { 67 | Name string `json:"name"` 68 | } 69 | 70 | // Version contains information about migrator versions 71 | type Version struct { 72 | ID int32 `json:"id"` 73 | Name string `json:"name"` 74 | Created graphql.Time `json:"created"` 75 | DBMigrations []DBMigration `json:"dbMigrations"` 76 | } 77 | 78 | // Migration contains basic information about migration 79 | type Migration struct { 80 | Name string `json:"name"` 81 | SourceDir string `json:"sourceDir"` 82 | File string `json:"file"` 83 | MigrationType MigrationType `json:"migrationType"` 84 | Contents string `json:"contents,omitempty"` 85 | CheckSum string `json:"checkSum"` 86 | } 87 | 88 | // DBMigration embeds Migration and adds DB-specific fields 89 | type DBMigration struct { 90 | Migration 91 | ID int32 `json:"id"` 92 | Schema string `json:"schema"` 93 | Created graphql.Time `json:"created"` 94 | } 95 | 96 | // Summary contains summary information about executed migrations 97 | type Summary struct { 98 | VersionID int32 `json:"versionId"` 99 | StartedAt graphql.Time `json:"startedAt"` 100 | Duration float64 `json:"duration"` 101 | Tenants int32 `json:"tenants"` 102 | SingleMigrations int32 `json:"singleMigrations"` 103 | TenantMigrations int32 `json:"tenantMigrations"` 104 | TenantMigrationsTotal int32 `json:"tenantMigrationsTotal"` // tenant migrations for all tenants 105 | MigrationsGrandTotal int32 `json:"migrationsGrandTotal"` // total number of all migrations applied 106 | SingleScripts int32 `json:"singleScripts"` 107 | TenantScripts int32 `json:"tenantScripts"` 108 | TenantScriptsTotal int32 `json:"tenantScriptsTotal"` // tenant scripts for all tenants 109 | ScriptsGrandTotal int32 `json:"scriptsGrandTotal"` // total number of all scripts applied 110 | } 111 | 112 | // CreateResults contains results of CreateVersion or CreateTenant 113 | type CreateResults struct { 114 | Summary *Summary 115 | Version *Version 116 | } 117 | 118 | // Action stores information about migrator action 119 | type Action int 120 | 121 | const ( 122 | // ActionApply (the default action) tells migrator to apply all source migrations 123 | ActionApply Action = iota 124 | // ActionSync tells migrator to synchronise source migrations and not apply them 125 | ActionSync 126 | ) 127 | 128 | // ImplementsGraphQLType maps Action Go type 129 | // to the graphql scalar type in the schema 130 | func (Action) ImplementsGraphQLType(name string) bool { 131 | return name == "Action" 132 | } 133 | 134 | // String converts MigrationType Go type to string literal 135 | func (a Action) String() string { 136 | switch a { 137 | case ActionSync: 138 | return "Sync" 139 | case ActionApply: 140 | return "Apply" 141 | default: 142 | panic(fmt.Sprintf("Unknown Action value: %v", uint32(a))) 143 | } 144 | } 145 | 146 | // UnmarshalGraphQL converts string literal to MigrationType Go type 147 | func (a *Action) UnmarshalGraphQL(input interface{}) error { 148 | if str, ok := input.(string); ok { 149 | switch str { 150 | case "Sync": 151 | *a = ActionSync 152 | case "Apply": 153 | *a = ActionApply 154 | default: 155 | return fmt.Errorf("unknown Action literal: %v", str) 156 | } 157 | return nil 158 | } 159 | return fmt.Errorf("wrong type for Action: %T", input) 160 | } 161 | 162 | // VersionInput is used by GraphQL to create new version in DB 163 | type VersionInput struct { 164 | VersionName string 165 | Action Action 166 | DryRun bool 167 | } 168 | 169 | // TenantInput is used by GraphQL to create a new tenant in DB 170 | type TenantInput struct { 171 | VersionName string 172 | Action Action 173 | DryRun bool 174 | TenantName string 175 | } 176 | 177 | // APIVersion represents migrator API versions 178 | type APIVersion string 179 | 180 | const ( 181 | // APIV1 - REST API - removed in v2021.0.0 182 | APIV1 APIVersion = "v1" 183 | // APIV2 - GraphQL API - current 184 | APIV2 APIVersion = "v2" 185 | ) 186 | 187 | // VersionInfo contains build information and supported API versions 188 | type VersionInfo struct { 189 | Release string `json:"release"` 190 | Sha string `json:"sha"` 191 | APIVersions []APIVersion `json:"apiVersions"` 192 | } 193 | 194 | // Eclipse MicroProfile Health spec 195 | // https://download.eclipse.org/microprofile/microprofile-health-3.0-RC4/microprofile-health-spec.html 196 | 197 | type HealthStatus string 198 | 199 | const ( 200 | HealthStatusUp HealthStatus = "UP" 201 | HealthStatusDown HealthStatus = "DOWN" 202 | ) 203 | 204 | type HealthResponse struct { 205 | Status HealthStatus `json:"status"` 206 | Checks []HealthChecks `json:"checks"` 207 | } 208 | 209 | type HealthChecks struct { 210 | Name string `json:"name"` 211 | Status HealthStatus `json:"status"` 212 | Data *HealthData `json:"data,omitempty"` // optional thus pointer 213 | } 214 | 215 | type HealthData struct { 216 | Details string `json:"details"` 217 | } 218 | --------------------------------------------------------------------------------