├── .dockerignore ├── renovate.json ├── settings.gradle ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── src ├── main │ ├── resources │ │ └── META-INF │ │ │ └── gradle-plugins │ │ │ └── docker-compose.properties │ └── groovy │ │ └── com │ │ └── avast │ │ └── gradle │ │ └── dockercompose │ │ ├── DockerComposePlugin.groovy │ │ ├── ServiceHost.groovy │ │ ├── tasks │ │ ├── ComposeDown.groovy │ │ ├── ComposeBuild.groovy │ │ ├── ComposePush.groovy │ │ ├── ComposeLogs.groovy │ │ ├── ComposePull.groovy │ │ ├── ComposeDownForced.groovy │ │ └── ComposeUp.groovy │ │ ├── ServiceInfo.groovy │ │ ├── ContainerInfo.groovy │ │ ├── ComposeExtension.groovy │ │ ├── ComposeConfigParser.groovy │ │ ├── ServiceInfoCache.groovy │ │ ├── util │ │ └── VersionNumber.java │ │ ├── NoOpLogger.groovy │ │ ├── TasksConfigurator.groovy │ │ ├── DockerExecutor.groovy │ │ ├── ComposeExecutor.groovy │ │ └── ComposeSettings.groovy └── test │ └── groovy │ └── com │ └── avast │ └── gradle │ └── dockercompose │ ├── util │ └── VersionNumberTest.groovy │ ├── ServiceInfoCacheTest.groovy │ ├── Fixture.groovy │ ├── DockerExecutorTest.groovy │ ├── ComposeExecutorTest.groovy │ ├── CaptureOutputTest.groovy │ ├── CustomComposeFilesTest.groovy │ ├── ComposeConfigParserTest.groovy │ └── DockerComposePluginTest.groovy ├── init.gradle ├── .github └── workflows │ ├── build.yml │ └── release.yml ├── LICENSE ├── .gitignore ├── gradlew.bat ├── gradlew └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | */* 2 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name = 'gradle-docker-compose-plugin' 2 | 3 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avast/gradle-docker-compose-plugin/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /src/main/resources/META-INF/gradle-plugins/docker-compose.properties: -------------------------------------------------------------------------------- 1 | implementation-class=com.avast.gradle.dockercompose.DockerComposePlugin 2 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-9.2.1-bin.zip 4 | networkTimeout=10000 5 | validateDistributionUrl=true 6 | zipStoreBase=GRADLE_USER_HOME 7 | zipStorePath=wrapper/dists 8 | -------------------------------------------------------------------------------- /init.gradle: -------------------------------------------------------------------------------- 1 | // File to be copied to USER_HOME/.gradle/ directory. 2 | allprojects { 3 | afterEvaluate { 4 | def dcExt = getExtensions().findByName('dockerCompose') 5 | if (dcExt && dcExt.hasProperty('stopContainers')) { 6 | dcExt.stopContainers = false 7 | logger.info("'stopContainers' property of 'dockerCompose' extension set to 'false' from 'init.gradle' script") 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/DockerComposePlugin.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.Plugin 4 | import org.gradle.api.Project 5 | 6 | class DockerComposePlugin implements Plugin { 7 | @Override 8 | void apply(Project project) { 9 | // project parameter is no required for later Gradle version but we want to support also older Gradle versions 10 | project.extensions.create('dockerCompose', ComposeExtension, project) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-24.04 8 | steps: 9 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 10 | - name: Set up JDK 11 | uses: actions/setup-java@v5 12 | with: 13 | java-version: 17 14 | distribution: 'temurin' 15 | - name: Grant execute permission for gradlew 16 | run: chmod +x gradlew 17 | - name: Check with Gradle 18 | run: ./gradlew check --info 19 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ServiceHost.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import groovy.transform.Immutable 4 | 5 | @Immutable 6 | class ServiceHost { 7 | String host 8 | ServiceHostType type 9 | 10 | 11 | @Override 12 | public String toString() { 13 | return "ServiceHost{" + 14 | "host='" + host + '\'' + 15 | ", type=" + type + 16 | '}'; 17 | } 18 | } 19 | 20 | enum ServiceHostType { 21 | NetworkGateway, 22 | RemoteDockerHost, 23 | LocalHost, 24 | Host, 25 | DirectContainerAccess 26 | } 27 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposeDown.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import groovy.transform.CompileStatic 4 | import org.gradle.api.provider.Property 5 | import org.gradle.api.tasks.Internal 6 | import org.gradle.api.tasks.TaskAction 7 | 8 | @CompileStatic 9 | abstract class ComposeDown extends ComposeDownForced { 10 | @Internal 11 | abstract Property getStopContainers() 12 | 13 | ComposeDown() { 14 | group = 'docker' 15 | description = 'Stops and removes containers of docker-compose project (only if stopContainers is set to true)' 16 | } 17 | 18 | @TaskAction 19 | void down() { 20 | if (stopContainers.get()) { 21 | super.down() 22 | } else { 23 | logger.lifecycle('You\'re trying to stop the containers, but stopContainers is set to false. Please use composeDownForced task instead.') 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/util/VersionNumberTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.util 2 | 3 | import spock.lang.Specification 4 | import spock.lang.Unroll 5 | 6 | import static com.avast.gradle.dockercompose.util.VersionNumber.parse 7 | 8 | class VersionNumberTest extends Specification { 9 | 10 | @Unroll 11 | def "can compare version #a and #b"() { 12 | expect: 13 | parse(a) <=> parse(b) == expected 14 | where: 15 | a | b | expected 16 | "0.0.1" | "0.0.1" | 0 17 | "0.0.1" | "0.0.2" | -1 18 | "0.0.2" | "0.0.1" | 1 19 | "0.1.0" | "0.1.0" | 0 20 | "0.1.0" | "0.1.0" | 0 21 | "1.1.1" | "1.1.1" | 0 22 | "1.1.0" | "1.2.0" | -1 23 | "1.28.0" | "1.16.0" | 1 24 | "2.20.2-desktop.1" | "2.20.2" | 0 25 | "2.20.2+azure-1" | "2.20.2" | 0 26 | } 27 | 28 | def "handles non parseable versions as UNKNOWN"() { 29 | expect: 30 | parse("SomeInvalid") == VersionNumber.UNKNOWN 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Avast 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/ServiceInfoCacheTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | 4 | import spock.lang.Specification 5 | 6 | class ServiceInfoCacheTest extends Specification { 7 | 8 | def "gets what was set"() { 9 | def f = Fixture.withNginx() 10 | def target = ServiceInfoCache.getInstance(f.project, f.project.tasks.composeDown.nestedName.get()).get() 11 | when: 12 | f.project.tasks.composeBuild.build() 13 | f.project.tasks.composeUp.up() 14 | def original = f.project.tasks.composeUp.servicesInfos 15 | target.set(original, 'state') 16 | def fromCache = target.get({'state'}) 17 | String networkName = fromCache.find().value.firstContainer.inspection.NetworkSettings.Networks.find().key 18 | Integer firstPort = fromCache.find().value.firstContainer.tcpPorts.find().key 19 | then: 20 | noExceptionThrown() 21 | original.toString() == fromCache.toString() 22 | networkName 23 | firstPort == 80 24 | cleanup: 25 | f.project.tasks.composeDown.down() 26 | f.close() 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ServiceInfo.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import com.fasterxml.jackson.annotation.JsonIgnore 4 | import groovy.transform.Immutable 5 | 6 | @Immutable(knownImmutableClasses = [ContainerInfo], copyWith = true) 7 | class ServiceInfo { 8 | String name 9 | /* Key is instance name, for example service_1 */ 10 | Map containerInfos = [:] 11 | 12 | @JsonIgnore String getHost() { firstContainer?.serviceHost.host } 13 | @JsonIgnore Map getPorts() { tcpPorts } 14 | @JsonIgnore Map getTcpPorts() { firstContainer?.tcpPorts ?: [:] } 15 | @JsonIgnore Map getUdpPorts() { firstContainer?.udpPorts ?: [:] } 16 | @JsonIgnore Integer getPort() { firstContainer?.port } 17 | @JsonIgnore Integer getTcpPort() { firstContainer?.tcpPort } 18 | @JsonIgnore Integer getUdpPort() { firstContainer?.udpPort } 19 | 20 | @JsonIgnore ContainerInfo getFirstContainer() { 21 | containerInfos.values()?.find() 22 | } 23 | 24 | def propertyMissing(String name) { 25 | return containerInfos[name] 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposeBuild.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import groovy.transform.CompileStatic 5 | import org.gradle.api.DefaultTask 6 | import org.gradle.api.provider.ListProperty 7 | import org.gradle.api.provider.Property 8 | import org.gradle.api.provider.Provider 9 | import org.gradle.api.tasks.Internal 10 | import org.gradle.api.tasks.TaskAction 11 | 12 | @CompileStatic 13 | abstract class ComposeBuild extends DefaultTask { 14 | 15 | @Internal 16 | abstract ListProperty getBuildAdditionalArgs() 17 | 18 | @Internal 19 | abstract ListProperty getStartedServices() 20 | 21 | @Internal 22 | abstract Property getComposeExecutor() 23 | 24 | ComposeBuild() { 25 | group = 'docker' 26 | description = 'Builds images for services of docker-compose project' 27 | } 28 | 29 | @TaskAction 30 | void build() { 31 | String[] args = ['build'] 32 | args += (List) buildAdditionalArgs.get() 33 | args += (List) startedServices.get() 34 | composeExecutor.get().execute(args) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposePush.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import groovy.transform.CompileStatic 5 | import org.gradle.api.DefaultTask 6 | import org.gradle.api.provider.ListProperty 7 | import org.gradle.api.provider.Property 8 | import org.gradle.api.provider.Provider 9 | import org.gradle.api.tasks.Internal 10 | import org.gradle.api.tasks.TaskAction 11 | 12 | @CompileStatic 13 | abstract class ComposePush extends DefaultTask { 14 | 15 | @Internal 16 | abstract Property getIgnorePushFailure() 17 | 18 | @Internal 19 | abstract ListProperty getPushServices() 20 | 21 | @Internal 22 | abstract Property getComposeExecutor() 23 | 24 | ComposePush() { 25 | group = 'docker' 26 | description = 'Pushes images for services of docker-compose project' 27 | } 28 | 29 | @TaskAction 30 | void push() { 31 | String[] args = ['push'] 32 | if (ignorePushFailure.get()) { 33 | args += '--ignore-push-failures' 34 | } 35 | args += (List) pushServices.get() 36 | composeExecutor.get().execute(args) 37 | } 38 | } -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | release: 4 | types: [published] 5 | jobs: 6 | build: 7 | runs-on: ubuntu-24.04 8 | steps: 9 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 10 | - name: Set up JDK 11 | uses: actions/setup-java@v5 12 | with: 13 | java-version: 17 14 | distribution: 'temurin' 15 | - name: Grant execute permission for gradlew 16 | run: chmod +x gradlew 17 | - name: Check with Gradle 18 | run: ./gradlew check --info -Pversion=${{ github.event.release.tag_name }} 19 | - name: Publish with Gradle to Maven Central 20 | run: ./gradlew publish jreleaserDeploy publishPlugins --info -Pversion=${{ github.event.release.tag_name }} 21 | env: 22 | GRADLE_PUBLISH_KEY: ${{ secrets.GRADLE_PORTAL_KEY }} 23 | GRADLE_PUBLISH_SECRET: ${{ secrets.GRADLE_PORTAL_SECRET }} 24 | SIGNING_KEY: ${{ secrets.SIGNING_KEY }} 25 | SIGNING_PUBLIC_KEY: ${{ secrets.SIGNING_PUBLIC_KEY }} 26 | JRELEASER_GPG_PASSPHRASE: ${{ secrets.SIGNING_PASSWORD }} 27 | JRELEASER_MAVENCENTRAL_USERNAME: ${{ secrets.JRELEASER_MAVENCENTRAL_USERNAME }} 28 | JRELEASER_MAVENCENTRAL_PASSWORD: ${{ secrets.JRELEASER_MAVENCENTRAL_PASSWORD }} 29 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposeLogs.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import groovy.transform.CompileStatic 5 | import org.gradle.api.DefaultTask 6 | import org.gradle.api.file.DirectoryProperty 7 | import org.gradle.api.provider.Property 8 | import org.gradle.api.provider.Provider 9 | import org.gradle.api.tasks.Internal 10 | import org.gradle.api.tasks.TaskAction 11 | 12 | @CompileStatic 13 | abstract class ComposeLogs extends DefaultTask { 14 | 15 | @Internal 16 | abstract DirectoryProperty getContainerLogToDir() 17 | 18 | @Internal 19 | abstract Property getComposeExecutor() 20 | 21 | ComposeLogs() { 22 | group = 'docker' 23 | description = 'Stores log output from services in containers of docker-compose project' 24 | } 25 | 26 | @TaskAction 27 | void logs() { 28 | composeExecutor.get().serviceNames.each { service -> 29 | println "Extracting container log from service '${service}'" 30 | File logFile = containerLogToDir.get().asFile 31 | logFile.mkdirs() 32 | def logStream = new FileOutputStream("${logFile.absolutePath}/${service}.log") 33 | String[] args = ['logs', '-t', service] 34 | composeExecutor.get().executeWithCustomOutputWithExitValue(logStream, args) 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposePull.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import groovy.transform.CompileStatic 5 | import org.gradle.api.DefaultTask 6 | import org.gradle.api.provider.ListProperty 7 | import org.gradle.api.provider.Property 8 | import org.gradle.api.provider.Provider 9 | import org.gradle.api.tasks.Internal 10 | import org.gradle.api.tasks.TaskAction 11 | 12 | @CompileStatic 13 | abstract class ComposePull extends DefaultTask { 14 | 15 | @Internal 16 | abstract Property getIgnorePullFailure() 17 | 18 | @Internal 19 | abstract ListProperty getPullAdditionalArgs() 20 | 21 | @Internal 22 | abstract ListProperty getStartedServices() 23 | 24 | @Internal 25 | abstract Property getComposeExecutor() 26 | 27 | ComposePull() { 28 | group = 'docker' 29 | description = 'Builds and pulls images of docker-compose project' 30 | } 31 | 32 | @TaskAction 33 | void pull() { 34 | String[] args = ['pull'] 35 | if (ignorePullFailure.get()) { 36 | args += '--ignore-pull-failures' 37 | } 38 | args += (List) pullAdditionalArgs.get() 39 | args += (List) startedServices.get() 40 | composeExecutor.get().execute(args) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ContainerInfo.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import com.fasterxml.jackson.annotation.JsonIgnore 4 | import groovy.transform.Immutable 5 | 6 | @Immutable 7 | class ContainerInfo { 8 | /* For example serviceName_1 */ 9 | String instanceName 10 | ServiceHost serviceHost 11 | /* Mapping from exposed to forwarded port. */ 12 | Map tcpPorts 13 | Map udpPorts 14 | /* Docker inspection */ 15 | Map inspection 16 | @JsonIgnore String getContainerId() { inspection.Id } 17 | @JsonIgnore String getContainerHostname() { inspection.Config.Hostname } 18 | 19 | @JsonIgnore String getHost() { serviceHost.host } 20 | @JsonIgnore Map getPorts() { tcpPorts } 21 | @JsonIgnore Integer getPort() { ports.values().find() } 22 | @JsonIgnore Integer getTcpPort() { tcpPorts.values().find() } 23 | @JsonIgnore Integer getUdpPort() { udpPorts.values().find() } 24 | 25 | 26 | @Override 27 | public String toString() { 28 | return "ContainerInfo{" + 29 | "instanceName='" + instanceName + '\'' + 30 | ", serviceHost=" + serviceHost + 31 | ", tcpPorts=" + tcpPorts + 32 | ", udpPorts=" + udpPorts + 33 | ", inspection=" + inspection + 34 | '}'; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gradle 2 | build/ 3 | 4 | # Ignore Gradle GUI config 5 | gradle-app.setting 6 | 7 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 8 | !gradle-wrapper.jar 9 | 10 | 11 | ### Intellij ### 12 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio 13 | 14 | *.iml 15 | 16 | ## Directory-based project format: 17 | .idea/ 18 | # if you remove the above rule, at least ignore the following: 19 | 20 | # User-specific stuff: 21 | # .idea/workspace.xml 22 | # .idea/tasks.xml 23 | # .idea/dictionaries 24 | 25 | # Sensitive or high-churn files: 26 | # .idea/dataSources.ids 27 | # .idea/dataSources.xml 28 | # .idea/sqlDataSources.xml 29 | # .idea/dynamic.xml 30 | # .idea/uiDesigner.xml 31 | 32 | # Gradle: 33 | # .idea/gradle.xml 34 | # .idea/libraries 35 | 36 | # Mongo Explorer plugin: 37 | # .idea/mongoSettings.xml 38 | 39 | ## File-based project format: 40 | *.ipr 41 | *.iws 42 | 43 | ## Plugin-specific files: 44 | 45 | # IntelliJ 46 | /out/ 47 | 48 | # mpeltonen/sbt-idea plugin 49 | .idea_modules/ 50 | 51 | # JIRA plugin 52 | atlassian-ide-plugin.xml 53 | 54 | # Crashlytics plugin (for Android Studio and IntelliJ) 55 | com_crashlytics_export_strings.xml 56 | crashlytics.properties 57 | crashlytics-build.properties 58 | 59 | # Eclipse 60 | .settings/ 61 | .classpath 62 | .project 63 | /bin/ 64 | 65 | # NetBeans 66 | /.nb-gradle/ 67 | 68 | # Vagrant 69 | .vagrant/ 70 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/Fixture.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.Project 4 | import org.gradle.testfixtures.ProjectBuilder 5 | 6 | class Fixture implements AutoCloseable { 7 | private final File projectDir 8 | final Project project 9 | final ComposeExtension extension 10 | 11 | static withNginx() { 12 | new Fixture(''' 13 | services: 14 | web: 15 | image: nginx:stable 16 | command: bash -c " echo 'starting nginx' && sleep 5 && nginx -g 'daemon off;'" 17 | ports: 18 | - 80 19 | ''') 20 | } 21 | 22 | static withHelloWorld() { 23 | new Fixture(''' 24 | services: 25 | hello: 26 | image: hello-world 27 | ''') 28 | } 29 | 30 | static plain() { 31 | new Fixture() 32 | } 33 | 34 | static custom(String composeFileContent) { 35 | new Fixture(composeFileContent) 36 | } 37 | 38 | private Fixture(String composeFileContent = null) { 39 | if (composeFileContent) { 40 | projectDir = File.createTempDir("gradle", "projectDir") 41 | new File(projectDir, 'docker-compose.yml') << composeFileContent 42 | project = ProjectBuilder.builder().withProjectDir(projectDir).build() 43 | } else { 44 | project = ProjectBuilder.builder().build() 45 | } 46 | project.plugins.apply 'docker-compose' 47 | extension = (ComposeExtension)project.extensions.findByName('dockerCompose') 48 | } 49 | 50 | @Override 51 | void close() throws Exception { 52 | if (projectDir) { 53 | try { 54 | projectDir.delete() 55 | } catch (ignored) { 56 | projectDir.deleteOnExit() 57 | } 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/DockerExecutorTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import spock.lang.Specification 4 | 5 | class DockerExecutorTest extends Specification { 6 | 7 | def "reads Docker platform"() { 8 | def f = Fixture.plain() 9 | when: 10 | String dockerPlatform = f.extension.dockerExecutor.getDockerPlatform() 11 | then: 12 | noExceptionThrown() 13 | !dockerPlatform.empty 14 | } 15 | 16 | def "reads network gateway"() { 17 | def f = Fixture.withNginx() 18 | when: 19 | f.project.tasks.composeBuild.build() 20 | f.project.tasks.composeUp.up() 21 | ServiceInfo serviceInfo = f.project.tasks.composeUp.servicesInfos.find().value 22 | String networkName = serviceInfo.firstContainer.inspection.NetworkSettings.Networks.find().key 23 | String networkGateway = f.extension.dockerExecutor.getNetworkGateway(networkName) 24 | then: 25 | noExceptionThrown() 26 | !networkGateway.empty 27 | cleanup: 28 | f.project.tasks.composeDown.down() 29 | f.close() 30 | } 31 | 32 | def "reads container logs"() { 33 | def f = Fixture.withNginx() 34 | f.project.tasks.composeBuild.build() 35 | f.project.tasks.composeUp.up() 36 | String containerId = f.extension.servicesInfos.web.firstContainer.containerId 37 | when: 38 | String output = f.extension.dockerExecutor.getContainerLogs(containerId) 39 | then: 40 | output.contains('nginx') 41 | cleanup: 42 | f.project.tasks.composeDown.down() 43 | f.close() 44 | } 45 | 46 | def "expose service info from nested task"() { 47 | def f = Fixture.withNginx() 48 | f.project.plugins.apply 'java' 49 | f.project.dockerCompose { 50 | nested { } 51 | } 52 | when: 53 | f.project.tasks.nestedComposeUp.up() 54 | f.extension.nested.exposeAsSystemProperties(f.project.tasks.test) 55 | then: 56 | f.project.tasks.test.properties.systemProperties.containsKey('web.host') 57 | f.project.tasks.test.properties.systemProperties.containsKey('web.tcp.80') 58 | cleanup: 59 | f.project.tasks.nestedComposeDown.down() 60 | f.close() 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ComposeExtension.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.Project 4 | 5 | import javax.inject.Inject 6 | 7 | abstract class ComposeExtension extends ComposeSettings { 8 | @Inject 9 | ComposeExtension(Project project) { 10 | super(project, '', '') 11 | } 12 | 13 | private HashMap settings = [:] 14 | 15 | ComposeSettings getOrCreateNested(String name) { 16 | settings.computeIfAbsent(name, { cloneAsNested(name) }) 17 | } 18 | 19 | ComposeSettings createNested(String name) { 20 | getOrCreateNested(name) 21 | } 22 | 23 | ComposeSettings nested(String name) { 24 | getOrCreateNested(name) 25 | } 26 | 27 | def propertyMissing(String name) { 28 | def s = settings.get(name) 29 | if (s) { 30 | return s 31 | } 32 | throw new MissingPropertyException(name, getClass()) 33 | } 34 | 35 | def methodMissing(String name, def args) { 36 | if (name == "ext") throw new MissingMethodException(name, getClass(), args) 37 | // If the method name is 'isRequiredByXXX' then the name of nested configuration will be XXX 38 | // and we will call isRequiredBy(XXX) for the newly created nested configuration. 39 | // The method must have one parameter that is path to the Docker Compose file. 40 | if (name.startsWith('isRequiredBy') && args.length == 1 && args[0]) { 41 | def taskName = name.substring('isRequiredBy'.length()) 42 | if (taskName.empty) throw new RuntimeException('You called isRequiredBy method with an argument that is not of type Task') 43 | taskName = taskName[0].toLowerCase() + taskName.substring(1) 44 | ComposeSettings s = getOrCreateNested(taskName) 45 | s.useComposeFiles = [args[0].toString()] 46 | tasksConfigurator.setupMissingRequiredBy(taskName, s) 47 | s 48 | } else if (args.length == 1 && args[0] instanceof Closure) { 49 | ComposeSettings s = getOrCreateNested(name) 50 | Closure closure = (Closure)args[0].clone() 51 | closure.setResolveStrategy(Closure.DELEGATE_FIRST) 52 | closure.setDelegate(s) 53 | if (closure.getMaximumNumberOfParameters() == 0) { 54 | closure.call() 55 | } else { 56 | closure.call(s) 57 | } 58 | s 59 | } else { 60 | getOrCreateNested(name) 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/ComposeExecutorTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import spock.lang.Shared 4 | import spock.lang.Specification 5 | import spock.lang.Unroll 6 | 7 | class ComposeExecutorTest extends Specification { 8 | @Shared 9 | def composeV2_webMasterWithDeps = 10 | ''' 11 | services: 12 | web0: 13 | image: nginx:stable 14 | ports: 15 | - 80 16 | web1: 17 | image: nginx:stable 18 | ports: 19 | - 80 20 | depends_on: 21 | - web0 22 | webMaster: 23 | image: nginx:stable 24 | ports: 25 | - 80 26 | depends_on: 27 | - web1 28 | ''' 29 | 30 | @Unroll 31 | def "getServiceNames calculates service names correctly when includeDependencies is #includeDependencies" () { 32 | def f = Fixture.custom(composeV2_webMasterWithDeps) 33 | f.project.plugins.apply 'java' 34 | f.project.dockerCompose.includeDependencies = includeDependencies 35 | f.project.dockerCompose.startedServices = ['webMaster'] 36 | f.project.plugins.apply 'docker-compose' 37 | 38 | when: 39 | def configuredServices = ComposeExecutor.getInstance(f.project, f.project.dockerCompose).get().getServiceNames() 40 | 41 | then: 42 | configuredServices.containsAll(expectedServices) 43 | 44 | cleanup: 45 | f.close() 46 | 47 | where: 48 | // test it for both compose file version 1 and 2 49 | includeDependencies | expectedServices 50 | true | ["webMaster", "web0", "web1"] 51 | false | ["webMaster"] 52 | } 53 | 54 | @Unroll 55 | def "getDockerComposeBaseCommand returns correct values when useDockerComposeV2 is #useDockerComposeV2" () { 56 | def f = Fixture.withHelloWorld() 57 | f.project.plugins.apply 'java' 58 | 59 | if(useDockerComposeV2 != null) { 60 | f.project.dockerCompose.useDockerComposeV2 = useDockerComposeV2 61 | } 62 | 63 | f.project.plugins.apply 'docker-compose' 64 | 65 | when: 66 | def actual = ComposeExecutor.getInstance(f.project, f.project.dockerCompose).get().getDockerComposeBaseCommand() 67 | 68 | then: 69 | expectedDockerComposeBinaryArgs.size() == actual.size() 70 | actual.containsAll(expectedDockerComposeBinaryArgs) 71 | 72 | cleanup: 73 | f.close() 74 | 75 | where: 76 | useDockerComposeV2 | expectedDockerComposeBinaryArgs 77 | true | ["docker", "compose"] 78 | false | ["docker-compose"] 79 | null | ["docker", "compose"] 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ComposeConfigParser.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.internal.impldep.com.google.common.annotations.VisibleForTesting 4 | import org.yaml.snakeyaml.Yaml 5 | 6 | /** 7 | * Reads information from the output of docker-compose config 8 | */ 9 | class ComposeConfigParser 10 | { 11 | /** 12 | * Given the result of docker-compose config, parses through the output and builds a dependency graph between a service and the service's dependencies. The full graph is traversed, such that child dependencies are calculated 13 | * @param composeConfigOutput the output of docker-compose config 14 | * @return a map of a service's dependencies keyed by the service. 15 | */ 16 | static Map> findServiceDependencies (String composeConfigOutput) 17 | { 18 | Map parsed = new Yaml().load(composeConfigOutput) 19 | // if there is 'version' on top-level then information about services is in 'services' sub-tree 20 | Map services = (parsed.services ? parsed.services : parsed) 21 | Map> declaredServiceDependencies = services.collectEntries { [(it.key): getDirectServiceDependencies(it.value)] } 22 | services.keySet().collectEntries { [(it): calculateDependenciesFromGraph(it, declaredServiceDependencies)] } 23 | } 24 | 25 | protected static Set getDirectServiceDependencies(Map service) { 26 | List dependencies = [] 27 | if (service.depends_on) 28 | { 29 | def dependsOn = service.depends_on 30 | // just a list of services without properties 31 | if(dependsOn instanceof List) 32 | { 33 | dependencies.addAll(dependsOn) 34 | } 35 | // services that have properties 36 | if(dependsOn instanceof Map) 37 | { 38 | dependencies.addAll(dependsOn.keySet()) 39 | } 40 | } 41 | // in version one, links established service names 42 | if (service.links) 43 | { 44 | dependencies.addAll(service.links) 45 | } 46 | dependencies.toSet() 47 | } 48 | 49 | /** 50 | * Given a map of a service's declared dependencies, calculates the full dependency set for a given service. 51 | * @param declaredDependencies a map of service's dependencies 52 | * @return a set of the service's full dependencies 53 | */ 54 | @VisibleForTesting 55 | protected static Set calculateDependenciesFromGraph(String serviceName, Map> declaredDependencies) { 56 | def toVisit = [] 57 | toVisit.add(serviceName) 58 | Set serviceDependencies = [] 59 | while(!toVisit.isEmpty()) { 60 | String visitedService = toVisit.removeAt(0) 61 | def dependents = declaredDependencies.get(visitedService) 62 | if(dependents) { 63 | toVisit.addAll(dependents) 64 | serviceDependencies.addAll(dependents) 65 | } 66 | } 67 | serviceDependencies 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | @rem SPDX-License-Identifier: Apache-2.0 17 | @rem 18 | 19 | @if "%DEBUG%"=="" @echo off 20 | @rem ########################################################################## 21 | @rem 22 | @rem Gradle startup script for Windows 23 | @rem 24 | @rem ########################################################################## 25 | 26 | @rem Set local scope for the variables with windows NT shell 27 | if "%OS%"=="Windows_NT" setlocal 28 | 29 | set DIRNAME=%~dp0 30 | if "%DIRNAME%"=="" set DIRNAME=. 31 | @rem This is normally unused 32 | set APP_BASE_NAME=%~n0 33 | set APP_HOME=%DIRNAME% 34 | 35 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 36 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 37 | 38 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 39 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 40 | 41 | @rem Find java.exe 42 | if defined JAVA_HOME goto findJavaFromJavaHome 43 | 44 | set JAVA_EXE=java.exe 45 | %JAVA_EXE% -version >NUL 2>&1 46 | if %ERRORLEVEL% equ 0 goto execute 47 | 48 | echo. 1>&2 49 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 50 | echo. 1>&2 51 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 52 | echo location of your Java installation. 1>&2 53 | 54 | goto fail 55 | 56 | :findJavaFromJavaHome 57 | set JAVA_HOME=%JAVA_HOME:"=% 58 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 59 | 60 | if exist "%JAVA_EXE%" goto execute 61 | 62 | echo. 1>&2 63 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 64 | echo. 1>&2 65 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 66 | echo location of your Java installation. 1>&2 67 | 68 | goto fail 69 | 70 | :execute 71 | @rem Setup the command line 72 | 73 | 74 | 75 | @rem Execute Gradle 76 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* 77 | 78 | :end 79 | @rem End local scope for the variables with windows NT shell 80 | if %ERRORLEVEL% equ 0 goto mainEnd 81 | 82 | :fail 83 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 84 | rem the _cmd.exe /c_ return code! 85 | set EXIT_CODE=%ERRORLEVEL% 86 | if %EXIT_CODE% equ 0 set EXIT_CODE=1 87 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% 88 | exit /b %EXIT_CODE% 89 | 90 | :mainEnd 91 | if "%OS%"=="Windows_NT" endlocal 92 | 93 | :omega 94 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ServiceInfoCache.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | 4 | import groovy.json.JsonBuilder 5 | import groovy.json.JsonSlurper 6 | import org.gradle.api.Project 7 | import org.gradle.api.file.RegularFileProperty 8 | import org.gradle.api.logging.Logger 9 | import org.gradle.api.logging.Logging 10 | import org.gradle.api.provider.Provider 11 | import org.gradle.api.services.BuildService 12 | import org.gradle.api.services.BuildServiceParameters 13 | 14 | import java.nio.file.Files 15 | import java.util.function.Supplier 16 | 17 | abstract class ServiceInfoCache implements BuildService { 18 | static interface Parameters extends BuildServiceParameters { 19 | abstract RegularFileProperty getServicesInfosFile() 20 | abstract RegularFileProperty getStateFile() 21 | } 22 | 23 | static Provider getInstance(Project project, String nestedName) { 24 | String serviceId = "${ServiceInfoCache.class.canonicalName} $project.path $nestedName" 25 | return project.gradle.sharedServices.registerIfAbsent(serviceId, ServiceInfoCache) { 26 | def buildDirectory = project.layout.buildDirectory 27 | it.parameters.servicesInfosFile = buildDirectory.file("dockerCompose/${nestedName}servicesInfos.json") 28 | it.parameters.stateFile = buildDirectory.file("dockerCompose/${nestedName}state.txt") 29 | } 30 | } 31 | 32 | private static final Logger logger = Logging.getLogger(ServiceInfoCache.class) 33 | 34 | private File getServicesInfosFile() { 35 | return parameters.servicesInfosFile.asFile.get() 36 | } 37 | 38 | private File getStateFile() { 39 | parameters.stateFile.asFile.get() 40 | } 41 | 42 | Map get(Supplier stateSupplier) { 43 | if (servicesInfosFile.exists() && stateFile.exists()) { 44 | Map deserialized = new JsonSlurper().parse(servicesInfosFile) 45 | String cachedState = stateFile.text 46 | String currentState = stateSupplier.get() 47 | if (cachedState == currentState) { 48 | return deserialized.collectEntries { k, v -> [k, deserializeServiceInfo(v)] } 49 | } else { 50 | logger.lifecycle("Current and cached states are different, cannot use the cached service infos.") 51 | logger.info("Cached state:\n$cachedState\nCurrent state:\n$currentState") 52 | } 53 | } 54 | return null 55 | } 56 | 57 | void set(Map servicesInfos, String state) { 58 | Files.createDirectories(servicesInfosFile.parentFile.toPath()) 59 | servicesInfosFile.createNewFile() 60 | servicesInfosFile.text = new JsonBuilder(servicesInfos).toPrettyString() 61 | stateFile.createNewFile() 62 | stateFile.text = state 63 | } 64 | 65 | void clear() { 66 | servicesInfosFile.delete() 67 | stateFile.delete() 68 | } 69 | 70 | ServiceInfo deserializeServiceInfo(Map m) { 71 | Map ci = m.containerInfos 72 | new ServiceInfo(m.name, ci.collectEntries { k, v -> [(k): deserializeContainerInfo(v)] }) 73 | } 74 | 75 | ContainerInfo deserializeContainerInfo(Map m) { 76 | Map tcpPorts = m.tcpPorts.collectEntries { k, v -> [(Integer.parseInt(k)): v] } 77 | Map udpPorts = m.udpPorts.collectEntries { k, v -> [(Integer.parseInt(k)): v] } 78 | new ContainerInfo(instanceName: m.instanceName, serviceHost: new ServiceHost(m.serviceHost as HashMap), tcpPorts: tcpPorts, udpPorts: udpPorts, inspection: m.inspection) 79 | } 80 | 81 | boolean startupFailed = false 82 | } 83 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/util/VersionNumber.java: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.util; 2 | 3 | import javax.annotation.Nullable; 4 | import java.util.Objects; 5 | 6 | /** 7 | * This class is a simplified version of the deprecated org.gradle.util.VersionNumber class 8 | * See https://github.com/gradle/gradle/blob/7d8cacafe70e5c4cc06173550cb13511cfbf3749/subprojects/core/src/main/java/org/gradle/util/VersionNumber.java 9 | * causing compatibility issues with Gradle 8.1 onwards. 10 | */ 11 | public class VersionNumber implements Comparable { 12 | public static final VersionNumber UNKNOWN = new VersionNumber(0, 0, 0); 13 | 14 | private final int major; 15 | private final int minor; 16 | private final int micro; 17 | 18 | private VersionNumber(int major, int minor, int micro) { 19 | this.major = major; 20 | this.minor = minor; 21 | this.micro = micro; 22 | } 23 | 24 | @Override 25 | public int compareTo(VersionNumber other) { 26 | if (major != other.major) { 27 | return major - other.major; 28 | } 29 | if (minor != other.minor) { 30 | return minor - other.minor; 31 | } 32 | if (micro != other.micro) { 33 | return micro - other.micro; 34 | } 35 | return 0; 36 | } 37 | 38 | @Override 39 | public boolean equals(@Nullable Object other) { 40 | return other instanceof VersionNumber && compareTo((VersionNumber) other) == 0; 41 | } 42 | 43 | @Override 44 | public int hashCode() { 45 | return Objects.hash(major, minor, micro); 46 | } 47 | 48 | @Override 49 | public String toString() { 50 | return String.format("%d.%d.%d", major, minor, micro); 51 | } 52 | 53 | public static VersionNumber parse(String versionString) { 54 | if (versionString == null || versionString.length() == 0) { 55 | return UNKNOWN; 56 | } 57 | Scanner scanner = new Scanner(versionString); 58 | 59 | int major = 0; 60 | int minor = 0; 61 | int micro = 0; 62 | 63 | if (!scanner.hasDigit()) { 64 | return UNKNOWN; 65 | } 66 | major = scanner.scanDigit(); 67 | if (scanner.isSeparatorAndDigit()) { 68 | scanner.skipSeparator(); 69 | minor = scanner.scanDigit(); 70 | if (scanner.isSeparatorAndDigit()) { 71 | scanner.skipSeparator(); 72 | micro = scanner.scanDigit(); 73 | } 74 | } 75 | 76 | if (scanner.isEnd() || scanner.hasSpecifierSeparator()) { 77 | return new VersionNumber(major, minor, micro); 78 | } 79 | 80 | return UNKNOWN; 81 | } 82 | 83 | private static class Scanner { 84 | int pos; 85 | final String str; 86 | 87 | private Scanner(String string) { 88 | this.str = string; 89 | } 90 | 91 | boolean hasDigit() { 92 | return pos < str.length() && Character.isDigit(str.charAt(pos)); 93 | } 94 | 95 | boolean hasSpecifierSeparator() { 96 | return pos < str.length() && (str.charAt(pos) == '-' || str.charAt(pos) == '+'); 97 | } 98 | 99 | boolean isSeparatorAndDigit() { 100 | return pos < str.length() - 1 && isSeparator() && Character.isDigit(str.charAt(pos + 1)); 101 | } 102 | 103 | private boolean isSeparator() { 104 | return str.charAt(pos) == '.'; 105 | } 106 | 107 | int scanDigit() { 108 | int start = pos; 109 | while (hasDigit()) { 110 | pos++; 111 | } 112 | return Integer.parseInt(str.substring(start, pos)); 113 | } 114 | 115 | public boolean isEnd() { 116 | return pos == str.length(); 117 | } 118 | 119 | public void skipSeparator() { 120 | pos++; 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposeDownForced.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import com.avast.gradle.dockercompose.RemoveImages 5 | import com.avast.gradle.dockercompose.ServiceInfoCache 6 | import com.avast.gradle.dockercompose.util.VersionNumber 7 | import org.gradle.api.DefaultTask 8 | import org.gradle.api.file.RegularFileProperty 9 | import org.gradle.api.provider.ListProperty 10 | import org.gradle.api.provider.Property 11 | import org.gradle.api.tasks.Internal 12 | import org.gradle.api.tasks.TaskAction 13 | 14 | import java.time.Duration 15 | 16 | abstract class ComposeDownForced extends DefaultTask { 17 | 18 | @Internal 19 | abstract Property getDockerComposeStopTimeout() 20 | 21 | @Internal 22 | abstract Property getRemoveContainers() 23 | 24 | @Internal 25 | abstract ListProperty getStartedServices() 26 | 27 | @Internal 28 | abstract Property getRemoveVolumes() 29 | 30 | @Internal 31 | abstract Property getRemoveImages() 32 | 33 | @Internal 34 | abstract ListProperty getDownAdditionalArgs() 35 | 36 | @Internal 37 | abstract RegularFileProperty getComposeLogToFile() 38 | 39 | @Internal 40 | abstract Property getNestedName() 41 | 42 | @Internal 43 | abstract Property getComposeExecutor() 44 | 45 | @Internal 46 | abstract Property getServiceInfoCache() 47 | 48 | ComposeDownForced() { 49 | group = 'docker' 50 | description = 'Stops and removes containers of docker-compose project' 51 | } 52 | 53 | @TaskAction 54 | void down() { 55 | def servicesToStop = composeExecutor.get().serviceNames 56 | serviceInfoCache.get().clear() 57 | composeExecutor.get().execute(*['stop', '--timeout', dockerComposeStopTimeout.get().getSeconds().toString(), *servicesToStop]) 58 | if (removeContainers.get()) { 59 | if (composeExecutor.get().version >= VersionNumber.parse('1.6.0')) { 60 | String[] args = [] 61 | if (!startedServices.get().empty) { 62 | args += ['rm', '-f'] 63 | if (removeVolumes.get()) { 64 | args += ['-v'] 65 | } 66 | args += servicesToStop 67 | } else { 68 | args += ['down'] 69 | switch (removeImages.get()) { 70 | case RemoveImages.All: 71 | case RemoveImages.Local: 72 | args += ['--rmi', "${removeImages.get()}".toLowerCase()] 73 | break 74 | default: 75 | break 76 | } 77 | if (removeVolumes.get()) { 78 | args += ['--volumes'] 79 | } 80 | if (composeExecutor.get().shouldRemoveOrphans()) { 81 | args += '--remove-orphans' 82 | } 83 | args += downAdditionalArgs.get() 84 | } 85 | def composeLog = null 86 | if (composeLogToFile.isPresent()) { 87 | File logFile = composeLogToFile.get().asFile 88 | logger.debug "Logging docker-compose down to: $logFile" 89 | logFile.parentFile.mkdirs() 90 | composeLog = new FileOutputStream(logFile, true) 91 | } 92 | composeExecutor.get().executeWithCustomOutputWithExitValue(composeLog, args) 93 | } else { 94 | if (!startedServices.get().empty) { 95 | composeExecutor.get().execute(*['rm', '-f', *servicesToStop]) 96 | } else { 97 | composeExecutor.get().execute('rm', '-f') 98 | } 99 | } 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/CaptureOutputTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.logging.LogLevel 4 | import org.gradle.internal.logging.events.LogEvent 5 | import org.gradle.internal.logging.events.OutputEvent 6 | import org.gradle.internal.logging.events.OutputEventListener 7 | import org.gradle.internal.logging.slf4j.Slf4jLoggingConfigurer 8 | import spock.lang.Specification 9 | 10 | class CaptureOutputTest extends Specification { 11 | 12 | private String composeFileContent = ''' 13 | services: 14 | web: 15 | image: nginx:stable 16 | command: bash -c "echo -e 'here is some output' && echo -e 'and some more' && sleep 5 && nginx -g 'daemon off;'" 17 | ports: 18 | - 80 19 | ''' 20 | 21 | def "captures container output to stdout"() { 22 | def f = Fixture.custom(composeFileContent) 23 | def stdout = new StringBuffer() 24 | new Slf4jLoggingConfigurer(new OutputEventListener() { 25 | @Override 26 | void onOutput(OutputEvent outputEvent) { 27 | if (outputEvent instanceof LogEvent) { 28 | stdout.append(((LogEvent) outputEvent).message + '\n') 29 | } 30 | } 31 | }).configure(LogLevel.LIFECYCLE) 32 | 33 | when: 34 | f.extension.captureContainersOutput = true 35 | f.project.tasks.composeBuild.build() 36 | f.project.tasks.composeUp.up() 37 | then: 38 | noExceptionThrown() 39 | stdout.toString().contains("web_1 | here is some output\nweb_1 | and some more") || 40 | (stdout.toString().contains("web-1 | here is some output") && 41 | stdout.toString().contains("web-1 | and some more")) 42 | cleanup: 43 | f.project.tasks.composeDown.down() 44 | f.close() 45 | } 46 | 47 | def "captures container output to file"() { 48 | def f = Fixture.custom(composeFileContent) 49 | def logFile = new File(f.project.projectDir, "web.log") 50 | when: 51 | f.extension.captureContainersOutputToFile = logFile 52 | f.project.tasks.composeBuild.build() 53 | f.project.tasks.composeUp.up() 54 | then: 55 | noExceptionThrown() 56 | logFile.text.contains("web_1 | here is some output\nweb_1 | and some more") || 57 | (logFile.text.contains("web-1 | here is some output") && 58 | logFile.text.contains("web-1 | and some more")) 59 | cleanup: 60 | f.project.tasks.composeDown.down() 61 | f.close() 62 | } 63 | 64 | def "captures container output to file path"() { 65 | def f = Fixture.custom(composeFileContent) 66 | def logFile = new File(f.project.projectDir, "web.log") 67 | when: 68 | f.extension.captureContainersOutputToFile = f.project.file("${logFile.absolutePath}") 69 | f.project.tasks.composeBuild.build() 70 | f.project.tasks.composeUp.up() 71 | then: 72 | noExceptionThrown() 73 | logFile.text.contains("web_1 | here is some output\nweb_1 | and some more") || 74 | (logFile.text.contains("web-1 | here is some output") && 75 | logFile.text.contains("web-1 | and some more")) 76 | cleanup: 77 | f.project.tasks.composeDown.down() 78 | f.close() 79 | } 80 | 81 | def "captures service output to separate file"() { 82 | def f = Fixture.custom(composeFileContent) 83 | def logDir = new File(f.project.projectDir, "logDir") 84 | when: 85 | f.extension.captureContainersOutputToFiles = logDir 86 | f.project.tasks.composeBuild.build() 87 | f.project.tasks.composeUp.up() 88 | then: 89 | noExceptionThrown() 90 | def logFile = logDir.toPath().resolve('web.log').toFile() 91 | logFile.text.contains("web_1 | here is some output\nweb_1 | and some more") || 92 | (logFile.text.contains("web-1 | here is some output") && 93 | logFile.text.contains("web-1 | and some more")) 94 | cleanup: 95 | f.project.tasks.composeDown.down() 96 | f.close() 97 | } 98 | 99 | def "captures up and down commands to single file"() { 100 | def f = Fixture.custom(composeFileContent) 101 | def logFile = new File(f.project.projectDir, "compose.log") 102 | when: 103 | f.extension.composeLogToFile = f.project.file("${logFile.absolutePath}") 104 | f.project.tasks.composeBuild.build() 105 | f.project.tasks.composeUp.up() 106 | then: 107 | f.project.tasks.composeDown.down() 108 | noExceptionThrown() 109 | logFile.text.contains("Creating") 110 | logFile.text.contains("Removing") 111 | cleanup: 112 | f.project.tasks.composeDown.down() 113 | f.close() 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/CustomComposeFilesTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.testfixtures.ProjectBuilder 4 | import spock.lang.Specification 5 | 6 | class CustomComposeFilesTest extends Specification { 7 | def "can specify compose files to use"() { 8 | def projectDir = File.createTempDir("gradle", "projectDir") 9 | new File(projectDir, 'original.yml') << ''' 10 | services: 11 | web: 12 | image: nginx:stable 13 | ports: 14 | - 80 15 | ''' 16 | new File(projectDir, 'override.yml') << ''' 17 | services: 18 | web: 19 | ports: 20 | - 8080 21 | ''' 22 | def project = ProjectBuilder.builder().withProjectDir(projectDir).build() 23 | project.plugins.apply 'docker-compose' 24 | def extension = (ComposeExtension) project.extensions.findByName('dockerCompose') 25 | def integrationTestTask = project.tasks.create('integrationTest').doLast { 26 | ContainerInfo webInfo = project.dockerCompose.servicesInfos.web.firstContainer 27 | assert webInfo.ports.containsKey(8080) 28 | assert webInfo.ports.containsKey(80) 29 | } 30 | when: 31 | extension.waitForTcpPorts = false // port 8080 is a fake 32 | extension.useComposeFiles = ['original.yml', 'override.yml'] 33 | project.tasks.composeUp.up() 34 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 35 | then: 36 | noExceptionThrown() 37 | cleanup: 38 | project.tasks.composeDown.down() 39 | try { 40 | projectDir.delete() 41 | } catch (ignored) { 42 | projectDir.deleteOnExit() 43 | } 44 | 45 | } 46 | 47 | def "docker-compose.override.yml file honoured when no files specified"() { 48 | def projectDir = File.createTempDir("gradle", "projectDir") 49 | new File(projectDir, 'docker-compose.yml') << ''' 50 | services: 51 | web: 52 | image: nginx:stable 53 | ''' 54 | new File(projectDir, 'docker-compose.override.yml') << ''' 55 | services: 56 | web: 57 | ports: 58 | - 80 59 | devweb: 60 | image: nginx:stable 61 | ports: 62 | - 80 63 | ''' 64 | def project = ProjectBuilder.builder().withProjectDir(projectDir).build() 65 | project.plugins.apply 'docker-compose' 66 | def integrationTestTask = project.tasks.create('integrationTest').doLast { 67 | assert project.dockerCompose.servicesInfos.web.firstContainer.ports.containsKey(80) 68 | assert project.dockerCompose.servicesInfos.devweb.firstContainer.ports.containsKey(80) 69 | } 70 | when: 71 | project.tasks.composeUp.up() 72 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 73 | then: 74 | noExceptionThrown() 75 | cleanup: 76 | project.tasks.composeDown.down() 77 | try { 78 | projectDir.delete() 79 | } catch (ignored) { 80 | projectDir.deleteOnExit() 81 | } 82 | } 83 | 84 | def "docker-compose.override.yml file ignored when files are specified"() { 85 | def projectDir = File.createTempDir("gradle", "projectDir") 86 | new File(projectDir, 'docker-compose.yml') << ''' 87 | services: 88 | web: 89 | image: nginx:stable 90 | ''' 91 | new File(projectDir, 'docker-compose.override.yml') << ''' 92 | services: 93 | web: 94 | ports: 95 | - 80 96 | devweb: 97 | image: nginx:stable 98 | ports: 99 | - 80 100 | ''' 101 | new File(projectDir, 'docker-compose.prod.yml') << ''' 102 | services: 103 | web: 104 | ports: 105 | - 8080 106 | ''' 107 | def project = ProjectBuilder.builder().withProjectDir(projectDir).build() 108 | project.plugins.apply 'docker-compose' 109 | def extension = (ComposeExtension) project.extensions.findByName('dockerCompose') 110 | def integrationTestTask = project.tasks.create('integrationTest').doLast { 111 | ContainerInfo webInfo = project.dockerCompose.servicesInfos.web.firstContainer 112 | assert webInfo.ports.containsKey(8080) 113 | assert !webInfo.ports.containsKey(80) 114 | assert !project.dockerCompose.servicesInfos.devweb 115 | } 116 | when: 117 | extension.waitForTcpPorts = false // port 8080 is a fake 118 | extension.useComposeFiles = ['docker-compose.yml', 'docker-compose.prod.yml'] 119 | project.tasks.composeUp.up() 120 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 121 | then: 122 | noExceptionThrown() 123 | cleanup: 124 | project.tasks.composeDown.down() 125 | try { 126 | projectDir.delete() 127 | } catch (ignored) { 128 | projectDir.deleteOnExit() 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/ComposeConfigParserTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import spock.lang.Specification 4 | import spock.lang.Unroll 5 | 6 | class ComposeConfigParserTest extends Specification 7 | { 8 | def "findServiceDependencies with a service two direct dependencies in version 3" () 9 | { 10 | given: "compose config output for a service" 11 | def configOutput = """ 12 | services: 13 | master: 14 | depends_on: 15 | slave0: 16 | condition: service_healthy 17 | slave1: 18 | condition: service_healthy 19 | slave0: 20 | expose: 21 | - '22' 22 | slave1: 23 | expose: 24 | - '23' 25 | """ 26 | 27 | when: "findServiceDependencies is called" 28 | def dependenciesMap = ComposeConfigParser.findServiceDependencies(configOutput) 29 | 30 | then: "master has two dependencies" 31 | dependenciesMap["master"] == ["slave0", "slave1"] as Set 32 | 33 | and: "slave0 has no dependencies" 34 | dependenciesMap["slave0"].isEmpty() 35 | 36 | and: "slave1 has no dependencies" 37 | dependenciesMap["slave1"].isEmpty() 38 | } 39 | 40 | def "findServiceDependencies with a service two direct dependencies in version 1" () 41 | { 42 | 43 | given: "compose config output for a service" 44 | def configOutput = """ 45 | master: 46 | links: 47 | - slave0 48 | - slave1 49 | slave0: 50 | expose: 51 | - '22' 52 | slave1: 53 | expose: 54 | - '23' 55 | """ 56 | 57 | when: "findServiceDependencies is called" 58 | def dependenciesMap = ComposeConfigParser.findServiceDependencies(configOutput) 59 | 60 | then: "master has two dependencies" 61 | dependenciesMap["master"] == ["slave0", "slave1"] as Set 62 | 63 | and: "slave0 has no dependencies" 64 | dependenciesMap["slave0"].isEmpty() 65 | 66 | and: "slave1 has no dependencies" 67 | dependenciesMap["slave1"].isEmpty() 68 | } 69 | 70 | def "findServiceDependencies with a service 4 indirect dependencies in version 3" () 71 | { 72 | given: "compose config output for a service" 73 | def configOutput = """ 74 | services: 75 | db: 76 | expose: 77 | - 1414 78 | splunkForward: 79 | expose: 80 | - 8444 81 | dataService: 82 | depends_on: 83 | - db 84 | expose: 85 | - '8080' 86 | audit: 87 | depends_on: 88 | splunkForward: 89 | condition: service_healthy 90 | ui: 91 | depends_on: 92 | dataService: 93 | condition: service_healthy 94 | audit: 95 | condition: service_healthy 96 | expose: 97 | - '23' 98 | """ 99 | 100 | when: "findServiceDependencies is called" 101 | def dependenciesMap = ComposeConfigParser.findServiceDependencies(configOutput) 102 | 103 | then: "ui has 4 dependencies (audit, splunkForward, dataService, db)" 104 | dependenciesMap["ui"] == ["audit", "splunkForward", "dataService", "db"] as Set 105 | 106 | and: "deals with list dependencies" 107 | dependenciesMap["dataService"] == ["db"] as Set 108 | } 109 | 110 | def "findServiceDependencies with a service 4 indirect dependencies in version 1" () 111 | { 112 | given: "compose config output for a service" 113 | def configOutput = """ 114 | db: 115 | expose: 116 | - 1414 117 | splunkForward: 118 | expose: 119 | - 8444 120 | dataService: 121 | links: 122 | - db 123 | expose: 124 | - '8080' 125 | audit: 126 | links: 127 | - splunkForward 128 | ui: 129 | links: 130 | - dataService 131 | - audit 132 | expose: 133 | - '23' 134 | """ 135 | 136 | when: "findServiceDependencies is called" 137 | def dependenciesMap = ComposeConfigParser.findServiceDependencies(configOutput) 138 | 139 | then: "ui has 4 dependencies (audit, splunkForward, dataService, db)" 140 | dependenciesMap["ui"] == ["audit", "splunkForward", "dataService", "db"] as Set 141 | } 142 | 143 | @Unroll 144 | def "calculateDependenciesFromGraph computes dependencies for #service" () 145 | { 146 | given: "a dependency graph" 147 | /** 148 | * services: 149 | * a: 150 | * b: 151 | * depends_on: 152 | * - a 153 | * c: 154 | * depends_on: 155 | * - b 156 | * d: 157 | * e: 158 | * depends_on: 159 | * - c 160 | * - d 161 | */ 162 | def dependencyGraph = [ 163 | "a":[], 164 | "b": ["a"], 165 | "c": ["b"], 166 | "d": [], 167 | "e": ["c", "d"] 168 | ] 169 | 170 | when: "calculateDependenciesFromGraph is called for #service" 171 | def dependencies = ComposeConfigParser.calculateDependenciesFromGraph(service, dependencyGraph) 172 | 173 | then: "the service's dependency set is calculated correctly" 174 | dependencies == expectedSet 175 | 176 | where: 177 | service | expectedSet 178 | "a" | [] as Set 179 | "b" | ["a"] as Set 180 | "c" | ["a", "b"] as Set 181 | "d" | [] as Set 182 | "e" | ["a", "b", "c", "d"] as Set 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/NoOpLogger.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import groovy.transform.CompileStatic 4 | import org.gradle.api.logging.LogLevel 5 | import org.gradle.api.logging.Logger 6 | import org.slf4j.Marker 7 | 8 | @CompileStatic 9 | class NoOpLogger implements Logger { 10 | 11 | static Logger INSTANCE = new NoOpLogger() 12 | 13 | @Override 14 | boolean isLifecycleEnabled() { 15 | return false 16 | } 17 | 18 | @Override 19 | String getName() { 20 | return null 21 | } 22 | 23 | @Override 24 | boolean isTraceEnabled() { 25 | return false 26 | } 27 | 28 | @Override 29 | void trace(String s) { 30 | 31 | } 32 | 33 | @Override 34 | void trace(String s, Object o) { 35 | 36 | } 37 | 38 | @Override 39 | void trace(String s, Object o, Object o1) { 40 | 41 | } 42 | 43 | @Override 44 | void trace(String s, Object... objects) { 45 | 46 | } 47 | 48 | @Override 49 | void trace(String s, Throwable throwable) { 50 | 51 | } 52 | 53 | @Override 54 | boolean isTraceEnabled(Marker marker) { 55 | return false 56 | } 57 | 58 | @Override 59 | void trace(Marker marker, String s) { 60 | 61 | } 62 | 63 | @Override 64 | void trace(Marker marker, String s, Object o) { 65 | 66 | } 67 | 68 | @Override 69 | void trace(Marker marker, String s, Object o, Object o1) { 70 | 71 | } 72 | 73 | @Override 74 | void trace(Marker marker, String s, Object... objects) { 75 | 76 | } 77 | 78 | @Override 79 | void trace(Marker marker, String s, Throwable throwable) { 80 | 81 | } 82 | 83 | @Override 84 | boolean isDebugEnabled() { 85 | return false 86 | } 87 | 88 | @Override 89 | void debug(String s) { 90 | 91 | } 92 | 93 | @Override 94 | void debug(String s, Object o) { 95 | 96 | } 97 | 98 | @Override 99 | void debug(String s, Object o, Object o1) { 100 | 101 | } 102 | 103 | @Override 104 | void debug(String s, Object... objects) { 105 | 106 | } 107 | 108 | @Override 109 | void debug(String s, Throwable throwable) { 110 | 111 | } 112 | 113 | @Override 114 | boolean isDebugEnabled(Marker marker) { 115 | return false 116 | } 117 | 118 | @Override 119 | void debug(Marker marker, String s) { 120 | 121 | } 122 | 123 | @Override 124 | void debug(Marker marker, String s, Object o) { 125 | 126 | } 127 | 128 | @Override 129 | void debug(Marker marker, String s, Object o, Object o1) { 130 | 131 | } 132 | 133 | @Override 134 | void debug(Marker marker, String s, Object... objects) { 135 | 136 | } 137 | 138 | @Override 139 | void debug(Marker marker, String s, Throwable throwable) { 140 | 141 | } 142 | 143 | @Override 144 | boolean isInfoEnabled() { 145 | return false 146 | } 147 | 148 | @Override 149 | void info(String s) { 150 | 151 | } 152 | 153 | @Override 154 | void info(String s, Object o) { 155 | 156 | } 157 | 158 | @Override 159 | void info(String s, Object o, Object o1) { 160 | 161 | } 162 | 163 | @Override 164 | void lifecycle(String s) { 165 | 166 | } 167 | 168 | @Override 169 | void lifecycle(String s, Object... objects) { 170 | 171 | } 172 | 173 | @Override 174 | void lifecycle(String s, Throwable throwable) { 175 | 176 | } 177 | 178 | @Override 179 | boolean isQuietEnabled() { 180 | return false 181 | } 182 | 183 | @Override 184 | void quiet(String s) { 185 | 186 | } 187 | 188 | @Override 189 | void quiet(String s, Object... objects) { 190 | 191 | } 192 | 193 | @Override 194 | void info(String s, Object... objects) { 195 | 196 | } 197 | 198 | @Override 199 | void info(String s, Throwable throwable) { 200 | 201 | } 202 | 203 | @Override 204 | boolean isInfoEnabled(Marker marker) { 205 | return false 206 | } 207 | 208 | @Override 209 | void info(Marker marker, String s) { 210 | 211 | } 212 | 213 | @Override 214 | void info(Marker marker, String s, Object o) { 215 | 216 | } 217 | 218 | @Override 219 | void info(Marker marker, String s, Object o, Object o1) { 220 | 221 | } 222 | 223 | @Override 224 | void info(Marker marker, String s, Object... objects) { 225 | 226 | } 227 | 228 | @Override 229 | void info(Marker marker, String s, Throwable throwable) { 230 | 231 | } 232 | 233 | @Override 234 | boolean isWarnEnabled() { 235 | return false 236 | } 237 | 238 | @Override 239 | void warn(String s) { 240 | 241 | } 242 | 243 | @Override 244 | void warn(String s, Object o) { 245 | 246 | } 247 | 248 | @Override 249 | void warn(String s, Object... objects) { 250 | 251 | } 252 | 253 | @Override 254 | void warn(String s, Object o, Object o1) { 255 | 256 | } 257 | 258 | @Override 259 | void warn(String s, Throwable throwable) { 260 | 261 | } 262 | 263 | @Override 264 | boolean isWarnEnabled(Marker marker) { 265 | return false 266 | } 267 | 268 | @Override 269 | void warn(Marker marker, String s) { 270 | 271 | } 272 | 273 | @Override 274 | void warn(Marker marker, String s, Object o) { 275 | 276 | } 277 | 278 | @Override 279 | void warn(Marker marker, String s, Object o, Object o1) { 280 | 281 | } 282 | 283 | @Override 284 | void warn(Marker marker, String s, Object... objects) { 285 | 286 | } 287 | 288 | @Override 289 | void warn(Marker marker, String s, Throwable throwable) { 290 | 291 | } 292 | 293 | @Override 294 | boolean isErrorEnabled() { 295 | return false 296 | } 297 | 298 | @Override 299 | void error(String s) { 300 | 301 | } 302 | 303 | @Override 304 | void error(String s, Object o) { 305 | 306 | } 307 | 308 | @Override 309 | void error(String s, Object o, Object o1) { 310 | 311 | } 312 | 313 | @Override 314 | void error(String s, Object... objects) { 315 | 316 | } 317 | 318 | @Override 319 | void error(String s, Throwable throwable) { 320 | 321 | } 322 | 323 | @Override 324 | boolean isErrorEnabled(Marker marker) { 325 | return false 326 | } 327 | 328 | @Override 329 | void error(Marker marker, String s) { 330 | 331 | } 332 | 333 | @Override 334 | void error(Marker marker, String s, Object o) { 335 | 336 | } 337 | 338 | @Override 339 | void error(Marker marker, String s, Object o, Object o1) { 340 | 341 | } 342 | 343 | @Override 344 | void error(Marker marker, String s, Object... objects) { 345 | 346 | } 347 | 348 | @Override 349 | void error(Marker marker, String s, Throwable throwable) { 350 | 351 | } 352 | 353 | @Override 354 | void quiet(String s, Throwable throwable) { 355 | 356 | } 357 | 358 | @Override 359 | boolean isEnabled(LogLevel logLevel) { 360 | return false 361 | } 362 | 363 | @Override 364 | void log(LogLevel logLevel, String s) { 365 | 366 | } 367 | 368 | @Override 369 | void log(LogLevel logLevel, String s, Object... objects) { 370 | 371 | } 372 | 373 | @Override 374 | void log(LogLevel logLevel, String s, Throwable throwable) { 375 | 376 | } 377 | } 378 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # SPDX-License-Identifier: Apache-2.0 19 | # 20 | 21 | ############################################################################## 22 | # 23 | # Gradle start up script for POSIX generated by Gradle. 24 | # 25 | # Important for running: 26 | # 27 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 28 | # noncompliant, but you have some other compliant shell such as ksh or 29 | # bash, then to run this script, type that shell name before the whole 30 | # command line, like: 31 | # 32 | # ksh Gradle 33 | # 34 | # Busybox and similar reduced shells will NOT work, because this script 35 | # requires all of these POSIX shell features: 36 | # * functions; 37 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 38 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 39 | # * compound commands having a testable exit status, especially «case»; 40 | # * various built-in commands including «command», «set», and «ulimit». 41 | # 42 | # Important for patching: 43 | # 44 | # (2) This script targets any POSIX shell, so it avoids extensions provided 45 | # by Bash, Ksh, etc; in particular arrays are avoided. 46 | # 47 | # The "traditional" practice of packing multiple parameters into a 48 | # space-separated string is a well documented source of bugs and security 49 | # problems, so this is (mostly) avoided, by progressively accumulating 50 | # options in "$@", and eventually passing that to Java. 51 | # 52 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 53 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 54 | # see the in-line comments for details. 55 | # 56 | # There are tweaks for specific operating systems such as AIX, CygWin, 57 | # Darwin, MinGW, and NonStop. 58 | # 59 | # (3) This script is generated from the Groovy template 60 | # https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 61 | # within the Gradle project. 62 | # 63 | # You can find Gradle at https://github.com/gradle/gradle/. 64 | # 65 | ############################################################################## 66 | 67 | # Attempt to set APP_HOME 68 | 69 | # Resolve links: $0 may be a link 70 | app_path=$0 71 | 72 | # Need this for daisy-chained symlinks. 73 | while 74 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 75 | [ -h "$app_path" ] 76 | do 77 | ls=$( ls -ld "$app_path" ) 78 | link=${ls#*' -> '} 79 | case $link in #( 80 | /*) app_path=$link ;; #( 81 | *) app_path=$APP_HOME$link ;; 82 | esac 83 | done 84 | 85 | # This is normally unused 86 | # shellcheck disable=SC2034 87 | APP_BASE_NAME=${0##*/} 88 | # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) 89 | APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit 90 | 91 | # Use the maximum available, or set MAX_FD != -1 to use that value. 92 | MAX_FD=maximum 93 | 94 | warn () { 95 | echo "$*" 96 | } >&2 97 | 98 | die () { 99 | echo 100 | echo "$*" 101 | echo 102 | exit 1 103 | } >&2 104 | 105 | # OS specific support (must be 'true' or 'false'). 106 | cygwin=false 107 | msys=false 108 | darwin=false 109 | nonstop=false 110 | case "$( uname )" in #( 111 | CYGWIN* ) cygwin=true ;; #( 112 | Darwin* ) darwin=true ;; #( 113 | MSYS* | MINGW* ) msys=true ;; #( 114 | NONSTOP* ) nonstop=true ;; 115 | esac 116 | 117 | 118 | 119 | # Determine the Java command to use to start the JVM. 120 | if [ -n "$JAVA_HOME" ] ; then 121 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 122 | # IBM's JDK on AIX uses strange locations for the executables 123 | JAVACMD=$JAVA_HOME/jre/sh/java 124 | else 125 | JAVACMD=$JAVA_HOME/bin/java 126 | fi 127 | if [ ! -x "$JAVACMD" ] ; then 128 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 129 | 130 | Please set the JAVA_HOME variable in your environment to match the 131 | location of your Java installation." 132 | fi 133 | else 134 | JAVACMD=java 135 | if ! command -v java >/dev/null 2>&1 136 | then 137 | die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 138 | 139 | Please set the JAVA_HOME variable in your environment to match the 140 | location of your Java installation." 141 | fi 142 | fi 143 | 144 | # Increase the maximum file descriptors if we can. 145 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 146 | case $MAX_FD in #( 147 | max*) 148 | # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. 149 | # shellcheck disable=SC2039,SC3045 150 | MAX_FD=$( ulimit -H -n ) || 151 | warn "Could not query maximum file descriptor limit" 152 | esac 153 | case $MAX_FD in #( 154 | '' | soft) :;; #( 155 | *) 156 | # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. 157 | # shellcheck disable=SC2039,SC3045 158 | ulimit -n "$MAX_FD" || 159 | warn "Could not set maximum file descriptor limit to $MAX_FD" 160 | esac 161 | fi 162 | 163 | # Collect all arguments for the java command, stacking in reverse order: 164 | # * args from the command line 165 | # * the main class name 166 | # * -classpath 167 | # * -D...appname settings 168 | # * --module-path (only if needed) 169 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 170 | 171 | # For Cygwin or MSYS, switch paths to Windows format before running java 172 | if "$cygwin" || "$msys" ; then 173 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 174 | 175 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 176 | 177 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 178 | for arg do 179 | if 180 | case $arg in #( 181 | -*) false ;; # don't mess with options #( 182 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 183 | [ -e "$t" ] ;; #( 184 | *) false ;; 185 | esac 186 | then 187 | arg=$( cygpath --path --ignore --mixed "$arg" ) 188 | fi 189 | # Roll the args list around exactly as many times as the number of 190 | # args, so each arg winds up back in the position where it started, but 191 | # possibly modified. 192 | # 193 | # NB: a `for` loop captures its iteration list before it begins, so 194 | # changing the positional parameters here affects neither the number of 195 | # iterations, nor the values presented in `arg`. 196 | shift # remove old arg 197 | set -- "$@" "$arg" # push replacement arg 198 | done 199 | fi 200 | 201 | 202 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 203 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 204 | 205 | # Collect all arguments for the java command: 206 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, 207 | # and any embedded shellness will be escaped. 208 | # * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be 209 | # treated as '${Hostname}' itself on the command line. 210 | 211 | set -- \ 212 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 213 | -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ 214 | "$@" 215 | 216 | # Stop when "xargs" is not available. 217 | if ! command -v xargs >/dev/null 2>&1 218 | then 219 | die "xargs is not available" 220 | fi 221 | 222 | # Use "xargs" to parse quoted args. 223 | # 224 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 225 | # 226 | # In Bash we could simply go: 227 | # 228 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 229 | # set -- "${ARGS[@]}" "$@" 230 | # 231 | # but POSIX shell has neither arrays nor command substitution, so instead we 232 | # post-process each arg (as a line of input to sed) to backslash-escape any 233 | # character that might be a shell metacharacter, then use eval to reverse 234 | # that process (while maintaining the separation between arguments), and wrap 235 | # the whole thing up as a single "set" statement. 236 | # 237 | # This will of course break if any of these variables contains a newline or 238 | # an unmatched quote. 239 | # 240 | 241 | eval "set -- $( 242 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 243 | xargs -n1 | 244 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 245 | tr '\n' ' ' 246 | )" '"$@"' 247 | 248 | exec "$JAVACMD" "$@" 249 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/TasksConfigurator.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import com.avast.gradle.dockercompose.tasks.* 4 | import com.fasterxml.jackson.core.type.TypeReference 5 | import com.fasterxml.jackson.databind.ObjectMapper 6 | import groovy.transform.CompileStatic 7 | import groovy.transform.PackageScope 8 | import org.gradle.api.Project 9 | import org.gradle.api.Task 10 | import org.gradle.api.provider.Provider 11 | import org.gradle.api.tasks.TaskProvider 12 | import org.gradle.process.JavaForkOptions 13 | import org.gradle.process.ProcessForkOptions 14 | 15 | @CompileStatic 16 | class TasksConfigurator { 17 | final ComposeSettings composeSettings 18 | final Project project 19 | final TaskProvider upTask 20 | final TaskProvider downTask 21 | final TaskProvider downForcedTask 22 | final TaskProvider downForcedOnFailureTask 23 | final TaskProvider buildTask 24 | final TaskProvider pullTask 25 | final TaskProvider logsTask 26 | final TaskProvider pushTask 27 | 28 | TasksConfigurator(ComposeSettings composeSettings, Project project, String name = '') { 29 | this.composeSettings = composeSettings 30 | this.project = project 31 | Provider composeExecutor = ComposeExecutor.getInstance(project, composeSettings) 32 | Provider serviceInfoCache = ServiceInfoCache.getInstance(project, composeSettings.nestedName) 33 | this.downTask = project.tasks.register(name ? "${name}ComposeDown".toString() : 'composeDown', ComposeDown) {task -> 34 | configureDownForcedTask(task, composeExecutor, serviceInfoCache) 35 | task.stopContainers.set(composeSettings.stopContainers) 36 | } 37 | this.downForcedTask = project.tasks.register(name ? "${name}ComposeDownForced".toString() : 'composeDownForced', ComposeDownForced) {task -> 38 | configureDownForcedTask(task, composeExecutor, serviceInfoCache) 39 | } 40 | def downForcedOnFailureTask = project.tasks.register(name ? "${name}ComposeDownForcedOnFailure".toString() : 'composeDownForcedOnFailure', ComposeDownForced) {task -> 41 | configureDownForcedTask(task, composeExecutor, serviceInfoCache) 42 | task.onlyIf { task.serviceInfoCache.get().startupFailed } 43 | } 44 | this.downForcedOnFailureTask = downForcedOnFailureTask 45 | this.upTask = project.tasks.register(name ? "${name}ComposeUp".toString() : 'composeUp', ComposeUp) {task -> 46 | task.stopContainers.set(composeSettings.stopContainers) 47 | task.forceRecreate.set(composeSettings.forceRecreate) 48 | task.noRecreate.set(composeSettings.noRecreate) 49 | task.scale.set(composeSettings.scale) 50 | task.upAdditionalArgs.set(composeSettings.upAdditionalArgs) 51 | task.startedServices.set(composeSettings.startedServices) 52 | task.composeLogToFile.set(composeSettings.composeLogToFile) 53 | task.waitForTcpPorts.set(composeSettings.waitForTcpPorts) 54 | task.retainContainersOnStartupFailure.set(composeSettings.retainContainersOnStartupFailure) 55 | task.captureContainersOutput.set(composeSettings.captureContainersOutput) 56 | task.captureContainersOutputToFile.set(composeSettings.captureContainersOutputToFile) 57 | task.captureContainersOutputToFiles.set(composeSettings.captureContainersOutputToFiles) 58 | task.waitAfterHealthyStateProbeFailure.set(composeSettings.waitAfterHealthyStateProbeFailure) 59 | task.checkContainersRunning.set(composeSettings.checkContainersRunning) 60 | task.waitForHealthyStateTimeout.set(composeSettings.waitForHealthyStateTimeout) 61 | task.tcpPortsToIgnoreWhenWaiting.set(composeSettings.tcpPortsToIgnoreWhenWaiting) 62 | task.waitForTcpPortsDisconnectionProbeTimeout.set(composeSettings.waitForTcpPortsDisconnectionProbeTimeout) 63 | task.waitForTcpPortsTimeout.set(composeSettings.waitForTcpPortsTimeout) 64 | task.waitAfterTcpProbeFailure.set(composeSettings.waitAfterTcpProbeFailure) 65 | task.serviceInfoCache.set(serviceInfoCache) 66 | task.composeExecutor.set(composeExecutor) 67 | task.dependsOn(composeSettings.buildBeforeUp.map { buildBeforeUp -> 68 | buildBeforeUp ? [buildTask] : [] 69 | }) 70 | task.dockerExecutor = composeSettings.dockerExecutor 71 | task.finalizedBy(downForcedOnFailureTask) 72 | task.usesService(composeExecutor) 73 | task.usesService(serviceInfoCache) 74 | } 75 | this.buildTask = project.tasks.register(name ? "${name}ComposeBuild".toString() : 'composeBuild', ComposeBuild) {task -> 76 | task.buildAdditionalArgs.set(composeSettings.buildAdditionalArgs) 77 | task.startedServices.set(composeSettings.startedServices) 78 | task.composeExecutor.set(composeExecutor) 79 | task.usesService(composeExecutor) 80 | } 81 | this.pullTask = project.tasks.register(name ? "${name}ComposePull".toString() : 'composePull', ComposePull) {task -> 82 | task.ignorePullFailure.set(composeSettings.ignorePullFailure) 83 | task.pullAdditionalArgs.set(composeSettings.pullAdditionalArgs) 84 | task.startedServices.set(composeSettings.startedServices) 85 | task.composeExecutor.set(composeExecutor) 86 | task.dependsOn(composeSettings.buildBeforePull.map { buildBeforePull -> 87 | buildBeforePull ? [buildTask] : [] 88 | }) 89 | task.usesService(composeExecutor) 90 | } 91 | this.logsTask = project.tasks.register(name ? "${name}ComposeLogs".toString() : 'composeLogs', ComposeLogs) {task -> 92 | task.containerLogToDir.set(composeSettings.containerLogToDir) 93 | task.composeExecutor.set(composeExecutor) 94 | task.usesService(composeExecutor) 95 | } 96 | this.pushTask = project.tasks.register(name ? "${name}ComposePush".toString() : 'composePush', ComposePush) {task -> 97 | task.ignorePushFailure.set(composeSettings.ignorePushFailure) 98 | task.pushServices.set(composeSettings.pushServices) 99 | task.composeExecutor.set(composeExecutor) 100 | task.usesService(composeExecutor) 101 | } 102 | } 103 | 104 | private void configureDownForcedTask( 105 | ComposeDownForced task, 106 | Provider composeExecutor, 107 | Provider serviceInfoCache 108 | ) { 109 | task.dockerComposeStopTimeout.set(composeSettings.dockerComposeStopTimeout) 110 | task.removeContainers.set(composeSettings.removeContainers) 111 | task.startedServices.set(composeSettings.startedServices) 112 | task.removeVolumes.set(composeSettings.removeVolumes) 113 | task.removeImages.set(composeSettings.removeImages) 114 | task.downAdditionalArgs.set(composeSettings.downAdditionalArgs) 115 | task.composeLogToFile.set(composeSettings.composeLogToFile) 116 | task.nestedName.set(composeSettings.nestedName) 117 | task.composeExecutor.set(composeExecutor) 118 | task.serviceInfoCache.set(serviceInfoCache) 119 | task.usesService(composeExecutor) 120 | task.usesService(serviceInfoCache) 121 | } 122 | 123 | @PackageScope 124 | void isRequiredByCore(Task task, boolean fromConfigure) { 125 | task.dependsOn upTask 126 | task.finalizedBy downTask 127 | if (fromConfigure) { 128 | upTask.get().shouldRunAfter getTaskDependencies(task) 129 | } else { 130 | upTask.configure { it.shouldRunAfter getTaskDependencies(task) } 131 | } 132 | 133 | // composeSettings.tasksConfigurator is null when the doFirst actions run with the configuration cache enabled. 134 | def composeSettings = this.composeSettings 135 | def servicesInfos = upTask.flatMap { it.servicesInfosFile }.map { 136 | new ObjectMapper().readValue(it.asFile, new TypeReference>() {}) 137 | } 138 | if (task instanceof ProcessForkOptions) task.doFirst { 139 | composeSettings.exposeAsEnvironmentInternal(task as ProcessForkOptions, servicesInfos.get()) 140 | } 141 | if (task instanceof JavaForkOptions) task.doFirst { 142 | composeSettings.exposeAsSystemPropertiesInternal(task as JavaForkOptions, servicesInfos.get()) 143 | } 144 | } 145 | 146 | private Object getTaskDependencies(Task task) { 147 | def includedBuilds = task.project.gradle.includedBuilds 148 | if (includedBuilds.isEmpty()) { 149 | return task.taskDependencies 150 | } else { 151 | // Ignore any task dependencies from a composite/included build to avoid the 152 | // "Cannot use shouldRunAfter to reference tasks from another build" error introduced in Gradle 8 153 | def includedBuildProjectNames = includedBuilds.collect { it.name }.toSet() 154 | return task.taskDependencies.getDependencies(null).findAll { dependency -> 155 | // use rootProject.name in case the task is from a multi-module composite build 156 | !includedBuildProjectNames.contains(dependency.project.rootProject.name) 157 | } 158 | } 159 | } 160 | 161 | @PackageScope 162 | void setupMissingRequiredBy(String taskName, ComposeSettings settings) { 163 | project.tasks 164 | .findAll { Task task -> task.name.equalsIgnoreCase(taskName) } 165 | .forEach { Task task -> settings.isRequiredBy(task) } 166 | } 167 | 168 | @PackageScope 169 | ComposeSettings newComposeSettings(String name, String nestedName) { 170 | return project.objects.newInstance(ComposeSettings, project, name, nestedName) 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/DockerExecutor.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.logging.Logger 4 | import org.gradle.api.logging.Logging 5 | import org.gradle.process.ExecOperations 6 | import org.gradle.process.ExecSpec 7 | import org.yaml.snakeyaml.Yaml 8 | 9 | import javax.inject.Inject 10 | 11 | class DockerExecutor { 12 | private final ComposeSettings settings 13 | private final ExecOperations exec 14 | 15 | private static final Logger logger = Logging.getLogger(DockerExecutor.class); 16 | 17 | @Inject 18 | DockerExecutor(ComposeSettings settings, ExecOperations exec) { 19 | this.settings = settings 20 | this.exec = exec 21 | } 22 | 23 | String execute(String... args) { 24 | def exec = this.exec 25 | def settings = this.settings 26 | new ByteArrayOutputStream().withStream { os -> 27 | def er = exec.exec { ExecSpec e -> 28 | e.environment = System.getenv() + settings.environment.get() 29 | def finalArgs = [settings.dockerExecutable.get()] 30 | finalArgs.addAll(args) 31 | e.commandLine finalArgs 32 | e.standardOutput = os 33 | e.ignoreExitValue = true 34 | } 35 | def stdout = os.toString().trim() 36 | if (er.exitValue != 0) { 37 | throw new RuntimeException("Exit-code ${er.exitValue} when calling ${settings.dockerExecutable.get()}, stdout: $stdout") 38 | } 39 | stdout 40 | } 41 | } 42 | 43 | List getDockerInfo() { 44 | def asString = execute('info') 45 | logger.debug("Docker info: $asString") 46 | asString.readLines() 47 | } 48 | 49 | String getDockerPlatform() { 50 | String osType = getDockerInfo().collect { it.trim() }.find { it.startsWith('OSType:') } 51 | osType == null || osType.empty ? System.getProperty("os.name") : osType.substring('OSType:'.length()).trim() 52 | } 53 | 54 | String getContainerPlatform(Map inspection) { 55 | def platform = inspection.Platform as String 56 | platform ?: getDockerPlatform() 57 | } 58 | 59 | Map getInspection(String containerId) { 60 | getInspections(containerId).values().find() 61 | } 62 | 63 | Map> getInspections(String... containersIds) { 64 | def asString = execute(*['inspect', *containersIds]) 65 | logger.debug("Inspections for containers ${containersIds.join(', ')}: $asString") 66 | Map[] inspections = new Yaml().load(asString) 67 | def r = inspections.collectEntries { [it.Id, it] } 68 | def notFoundInspections = containersIds.findAll { !r.containsKey(it) } 69 | if (notFoundInspections) { 70 | throw new RuntimeException('docker inspect didn\'t return inspection for these containers: ' + notFoundInspections.join(', ')) 71 | } 72 | r 73 | } 74 | 75 | Map getNetworkInspection(String networkName) { 76 | def asString = execute('network', 'inspect', networkName) 77 | logger.debug("Inspection for network $networkName: $asString") 78 | (new Yaml().load(asString))[0] as Map 79 | } 80 | 81 | String getNetworkGateway(String networkName) { 82 | def networkInspection = getNetworkInspection(networkName) 83 | if (networkInspection) { 84 | Map ipam = networkInspection.IPAM 85 | if (ipam) { 86 | Map[] ipamConfig = ipam.Config 87 | if (ipamConfig && ipamConfig.size() > 0) { 88 | return ipamConfig[0].Gateway 89 | } 90 | } 91 | } 92 | null 93 | } 94 | 95 | String getNetworkDriver(String networkName) { 96 | def networkInspection = getNetworkInspection(networkName) 97 | networkInspection ? networkInspection.Driver as String : "" 98 | } 99 | 100 | String getContainerLogs(String containerId) { 101 | execute('logs', '--follow=false', containerId) 102 | } 103 | 104 | ServiceHost getContainerHost(Map inspection, String serviceName, Logger logger = this.logger) { 105 | String servicesHost = settings.environment.get()['SERVICES_HOST'] ?: System.getenv('SERVICES_HOST') 106 | if (servicesHost) { 107 | logger.lifecycle("SERVICES_HOST environment variable detected - will be used as hostname of service $serviceName ($servicesHost)'") 108 | return new ServiceHost(host: servicesHost, type: ServiceHostType.RemoteDockerHost) 109 | } 110 | String dockerHost = settings.environment.get()['DOCKER_HOST'] ?: System.getenv('DOCKER_HOST') 111 | if (dockerHost) { 112 | def host = dockerHost.toURI().host ?: 'localhost' 113 | logger.lifecycle("DOCKER_HOST environment variable detected - will be used as hostname of service $serviceName ($host)'") 114 | return new ServiceHost(host: host, type: ServiceHostType.RemoteDockerHost) 115 | } 116 | if (isWSL()) { 117 | return new ServiceHost(host: 'localhost', type: ServiceHostType.LocalHost) 118 | } 119 | Map networkSettings = inspection.NetworkSettings 120 | Map networks = networkSettings.Networks 121 | Map.Entry firstNetworkPair = networks.find() 122 | if (isWindows() && getContainerPlatform(inspection).toLowerCase().contains('win') && firstNetworkPair && "nat".equalsIgnoreCase(getNetworkDriver(firstNetworkPair.key))) { 123 | logger.lifecycle("Will use direct access to the container of $serviceName") 124 | return new ServiceHost(host: firstNetworkPair.value.IPAddress, type: ServiceHostType.DirectContainerAccess) 125 | } 126 | if (isMac() || isWindows()) { 127 | logger.lifecycle("Will use localhost as host of $serviceName") 128 | return new ServiceHost(host: 'localhost', type: ServiceHostType.LocalHost) 129 | } 130 | String networkMode = (String)inspection.HostConfig.NetworkMode ?: '' 131 | if (networkMode.startsWith('container:')) { 132 | String linkedContainerId = networkMode.substring('container:'.length()) 133 | logger.lifecycle("Reading container host of $serviceName from linked container $linkedContainerId") 134 | return getContainerHost(getInspection(linkedContainerId), linkedContainerId, logger) 135 | } 136 | String gateway 137 | if (networks && networks.every { it.key.toLowerCase().equals("host") }) { 138 | gateway = 'localhost' 139 | logger.lifecycle("Will use $gateway as host of $serviceName because it is using HOST network") 140 | return new ServiceHost(host: gateway, type: ServiceHostType.Host) 141 | } else if (networks && networks.size() > 0) { 142 | gateway = firstNetworkPair.value.Gateway 143 | if (!gateway) { 144 | logger.lifecycle("Gateway cannot be read from container inspection - trying to read from network inspection (network '${firstNetworkPair.key}')") 145 | gateway = getNetworkGateway(firstNetworkPair.key) 146 | } 147 | logger.lifecycle("Will use $gateway (network ${firstNetworkPair.key}) as host of $serviceName") 148 | return new ServiceHost(host: gateway, type: ServiceHostType.NetworkGateway) 149 | } 150 | if (networkSettings.Gateway) { // networks not specified (older Docker versions) 151 | gateway = networkSettings.Gateway 152 | logger.lifecycle("Will use $gateway as host of $serviceName") 153 | return new ServiceHost(host: gateway, type: ServiceHostType.NetworkGateway) 154 | } 155 | logger.warn("Will use 'localhost' as host of $serviceName (as a fallback)") 156 | return new ServiceHost(host: 'localhost', type: ServiceHostType.LocalHost) 157 | } 158 | 159 | Map getTcpPortsMapping(String serviceName, Map inspection, ServiceHost host) { 160 | getPortsMapping("TCP", serviceName, inspection, host) 161 | } 162 | 163 | Map getUdpPortsMapping(String serviceName, Map inspection, ServiceHost host) { 164 | getPortsMapping("UDP", serviceName, inspection, host) 165 | } 166 | 167 | Map getPortsMapping(String protocol, String serviceName, Map inspection, ServiceHost host) { 168 | Map ports = [:] 169 | inspection.NetworkSettings.Ports.each { String exposedPortWithProtocol, forwardedPortsInfos -> 170 | def (String exposedPortAsString, String pr) = exposedPortWithProtocol.split('/') 171 | if (!protocol.equalsIgnoreCase(pr)) { 172 | return // from closure 173 | } 174 | int exposedPort = exposedPortAsString as int 175 | if (!forwardedPortsInfos || forwardedPortsInfos.isEmpty()) { 176 | logger.debug("No forwarded $protocol port for service '$serviceName:$exposedPort'") 177 | } else { 178 | switch (host.type) { 179 | case ServiceHostType.LocalHost: 180 | case ServiceHostType.NetworkGateway: 181 | case ServiceHostType.RemoteDockerHost: 182 | if (forwardedPortsInfos.size() > 1) { 183 | logger.warn("More forwarded $protocol ports for service '$serviceName:$exposedPort $forwardedPortsInfos'. Will use the first one.") 184 | } 185 | def forwardedPortInfo = forwardedPortsInfos.first() 186 | int forwardedPort = forwardedPortInfo.HostPort as int 187 | logger.info("Exposed $protocol port on service '$serviceName:$exposedPort' will be available as $forwardedPort") 188 | ports.put(exposedPort, forwardedPort) 189 | break 190 | case ServiceHostType.Host: 191 | logger.info("Exposed $protocol port on service '$serviceName:$exposedPort' will be available as $exposedPort because it uses HOST network") 192 | ports.put(exposedPort, exposedPort) 193 | break; 194 | case ServiceHostType.DirectContainerAccess: 195 | logger.info("Exposed $protocol port on service '$serviceName:$exposedPort' will be available as $exposedPort because it uses direct access to the container") 196 | ports.put(exposedPort, exposedPort) 197 | break; 198 | default: 199 | throw new IllegalArgumentException("Unknown ServiceHostType '${host.type}' for service '$serviceName'") 200 | break 201 | } 202 | } 203 | } 204 | ports 205 | } 206 | 207 | private static boolean isMac() { 208 | System.getProperty("os.name").toLowerCase().startsWith("mac") 209 | } 210 | 211 | private static boolean isWindows() { 212 | System.getProperty("os.name").toLowerCase().startsWith("win") 213 | } 214 | 215 | private static boolean isWSL() { 216 | System.getProperty("os.version").toLowerCase().contains('wsl') 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ComposeExecutor.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import org.gradle.api.Project 4 | import org.gradle.api.file.DirectoryProperty 5 | import org.gradle.api.internal.file.FileOperations 6 | import org.gradle.api.logging.Logger 7 | import org.gradle.api.logging.Logging 8 | import org.gradle.api.provider.ListProperty 9 | import org.gradle.api.provider.MapProperty 10 | import org.gradle.api.provider.Property 11 | import org.gradle.api.provider.Provider 12 | import org.gradle.api.services.BuildService 13 | import org.gradle.api.services.BuildServiceParameters 14 | import org.gradle.internal.UncheckedException 15 | import org.gradle.process.ExecOperations 16 | import org.gradle.process.ExecSpec 17 | import org.yaml.snakeyaml.Yaml 18 | 19 | import javax.inject.Inject 20 | import java.lang.ref.WeakReference 21 | import java.util.concurrent.ConcurrentHashMap 22 | import java.util.concurrent.Executors 23 | 24 | import com.avast.gradle.dockercompose.util.VersionNumber 25 | 26 | abstract class ComposeExecutor implements BuildService, AutoCloseable { 27 | static interface Parameters extends BuildServiceParameters { 28 | abstract DirectoryProperty getProjectDirectory() 29 | abstract ListProperty getStartedServices() 30 | abstract ListProperty getUseComposeFiles() 31 | abstract Property getIncludeDependencies() 32 | abstract DirectoryProperty getDockerComposeWorkingDirectory() 33 | abstract MapProperty getEnvironment() 34 | abstract Property getExecutable() 35 | abstract Property getDockerExecutable() 36 | abstract Property getUseDockerComposeV2() 37 | abstract Property getProjectName() 38 | abstract ListProperty getComposeAdditionalArgs() 39 | abstract Property getRemoveOrphans() 40 | abstract MapProperty getScale() 41 | } 42 | 43 | static Provider getInstance(Project project, ComposeSettings settings) { 44 | String serviceId = "${ComposeExecutor.class.canonicalName} $project.path ${settings.hashCode()}" 45 | return project.gradle.sharedServices.registerIfAbsent(serviceId, ComposeExecutor) { 46 | it.parameters.projectDirectory.set(project.layout.projectDirectory) 47 | it.parameters.startedServices.set(settings.startedServices) 48 | it.parameters.useComposeFiles.set(settings.useComposeFiles) 49 | it.parameters.includeDependencies.set(settings.includeDependencies) 50 | it.parameters.dockerComposeWorkingDirectory.set(settings.dockerComposeWorkingDirectory) 51 | it.parameters.environment.set(settings.environment) 52 | it.parameters.executable.set(settings.executable) 53 | it.parameters.dockerExecutable.set(settings.dockerExecutable) 54 | it.parameters.useDockerComposeV2.set(settings.useDockerComposeV2) 55 | it.parameters.projectName.set(settings.projectName) 56 | it.parameters.composeAdditionalArgs.set(settings.composeAdditionalArgs) 57 | it.parameters.removeOrphans.set(settings.removeOrphans) 58 | it.parameters.scale.set(settings.scale) 59 | } 60 | } 61 | 62 | @Inject 63 | abstract ExecOperations getExec() 64 | 65 | @Inject 66 | abstract FileOperations getFileOps() 67 | 68 | private static final Logger logger = Logging.getLogger(ComposeExecutor.class); 69 | 70 | void executeWithCustomOutputWithExitValue(OutputStream os, String... args) { 71 | executeWithCustomOutput(os, false, true, true, args) 72 | } 73 | 74 | void executeWithCustomOutputNoExitValue(OutputStream os, String... args) { 75 | executeWithCustomOutput(os, true, true, true, args) 76 | } 77 | 78 | void executeWithCustomOutput(OutputStream os, Boolean ignoreExitValue, Boolean noAnsi, Boolean captureStderr, String... args) { 79 | def er = exec.exec { ExecSpec e -> 80 | if (parameters.dockerComposeWorkingDirectory.isPresent()) { 81 | e.setWorkingDir(parameters.dockerComposeWorkingDirectory.get().asFile) 82 | } else { 83 | e.setWorkingDir(parameters.projectDirectory) 84 | } 85 | e.environment = System.getenv() + parameters.environment.get() 86 | 87 | def finalArgs = [] 88 | finalArgs.addAll(getDockerComposeBaseCommand()) 89 | finalArgs.addAll(parameters.useComposeFiles.get().collectMany { ['-f', it].asCollection() }) 90 | finalArgs.addAll(parameters.composeAdditionalArgs.get()) 91 | if (noAnsi) { 92 | if (version >= VersionNumber.parse('1.28.0')) { 93 | finalArgs.addAll(['--ansi', 'never']) 94 | } else if (version >= VersionNumber.parse('1.16.0')) { 95 | finalArgs.add('--no-ansi') 96 | } 97 | } 98 | String pn = parameters.projectName.getOrNull() 99 | if (pn) { 100 | finalArgs.addAll(['-p', pn]) 101 | } 102 | finalArgs.addAll(args) 103 | e.commandLine finalArgs 104 | if (os != null) { 105 | e.standardOutput = os 106 | if (captureStderr) { 107 | e.errorOutput = os 108 | } 109 | } 110 | e.ignoreExitValue = true 111 | } 112 | if (!ignoreExitValue && er.exitValue != 0) { 113 | def stdout = os != null ? os.toString().trim() : "N/A" 114 | throw new RuntimeException("Exit-code ${er.exitValue} when calling ${parameters.executable.get()}, stdout: $stdout") 115 | } 116 | } 117 | 118 | String execute(String... args) { 119 | new ByteArrayOutputStream().withStream { os -> 120 | executeWithCustomOutput(os, false, true, false, args) 121 | os.toString().trim() 122 | } 123 | } 124 | 125 | String executeWithAnsi(String... args) { 126 | new ByteArrayOutputStream().withStream { os -> 127 | executeWithCustomOutput(os, false, false, false, args) 128 | os.toString().trim() 129 | } 130 | } 131 | 132 | private VersionNumber cachedVersion 133 | 134 | VersionNumber getVersion() { 135 | if (cachedVersion) return cachedVersion 136 | String rawVersion = executeWithAnsi('version', '--short') 137 | return cachedVersion = VersionNumber.parse(rawVersion.startsWith('v') ? rawVersion.substring(1) : rawVersion) 138 | } 139 | 140 | Map> getContainerIds(List serviceNames) { 141 | // `docker compose ps -q serviceName` returns an exit code of 1 when the service 142 | // doesn't exist. To guard against this, check the service list first. 143 | def services = execute('ps', '--services').readLines() 144 | def result = [:] 145 | for (String serviceName: serviceNames) { 146 | if (services.contains(serviceName)) { 147 | def containerIds = execute('ps', '-q', serviceName).readLines() 148 | result[serviceName] = containerIds 149 | } else { 150 | result[serviceName] = [] 151 | } 152 | } 153 | return result 154 | } 155 | 156 | private Set> threadsToInterruptOnClose = ConcurrentHashMap.newKeySet() 157 | 158 | void captureContainersOutput(Closure logMethod, String... services) { 159 | // execute daemon thread that executes `docker-compose logs -f --no-color` 160 | // the -f arguments means `follow` and so this command ends when docker-compose finishes 161 | def t = Executors.defaultThreadFactory().newThread(new Runnable() { 162 | @Override 163 | void run() { 164 | def os = new OutputStream() { 165 | ArrayList buffer = new ArrayList() 166 | 167 | @Override 168 | void write(int b) throws IOException { 169 | // store bytes into buffer until end-of-line character is detected 170 | if (b == 10 || b == 13) { 171 | if (buffer.size() > 0) { 172 | // convert the byte buffer to characters and print these characters 173 | def toPrint = buffer.collect { it as byte }.toArray() as byte[] 174 | logMethod(new String(toPrint)) 175 | buffer.clear() 176 | } 177 | } else { 178 | buffer.add(b as Byte) 179 | } 180 | } 181 | } 182 | try { 183 | executeWithCustomOutput(os, true, true, true, 'logs', '-f', '--no-color', *services) 184 | } catch (InterruptedException e) { 185 | logger.trace("Thread capturing container output has been interrupted, this is not an error", e) 186 | } catch (UncheckedException ue) { 187 | if (ue.cause instanceof InterruptedException) { 188 | // Gradle < 5.0 incorrectly wrapped InterruptedException to UncheckedException 189 | logger.trace("Thread capturing container output has been interrupted, this is not an error", ue) 190 | } else { 191 | throw ue 192 | } 193 | } finally { 194 | os.close() 195 | } 196 | } 197 | }) 198 | t.daemon = true 199 | t.start() 200 | threadsToInterruptOnClose.add(new WeakReference(t)) 201 | } 202 | 203 | @Override 204 | void close() throws Exception { 205 | threadsToInterruptOnClose.forEach {threadRef -> 206 | def thread = threadRef.get() 207 | if (thread != null) { 208 | thread.interrupt() 209 | } 210 | } 211 | } 212 | 213 | Iterable getServiceNames() { 214 | if (!parameters.startedServices.get().empty) { 215 | if(parameters.includeDependencies.get()) 216 | { 217 | def dependentServices = getDependentServices(parameters.startedServices.get()).toList() 218 | [*parameters.startedServices.get(), *dependentServices].unique() 219 | } 220 | else 221 | { 222 | parameters.startedServices.get() 223 | } 224 | } else if (version >= VersionNumber.parse('1.6.0')) { 225 | execute('config', '--services').readLines() 226 | } else { 227 | def composeFiles = parameters.useComposeFiles.get().empty ? getStandardComposeFiles() : getCustomComposeFiles() 228 | composeFiles.collectMany { composeFile -> 229 | def compose = (Map) (new Yaml().load(fileOps.file(composeFile).text)) 230 | // if there is 'version' on top-level then information about services is in 'services' sub-tree 231 | compose.containsKey('version') ? ((Map) compose.get('services')).keySet() : compose.keySet() 232 | }.unique() 233 | } 234 | } 235 | 236 | /** 237 | * Calculates dependent services for the given set of services. The full dependency graph will be calculated, such that transitive dependencies will be returned. 238 | * @param serviceNames the name of services to calculate dependencies for 239 | * @return the set of services that are dependencies of the given services 240 | */ 241 | Iterable getDependentServices(Iterable serviceNames) { 242 | def configOutput = execute('config') 243 | def dependencyGraph = ComposeConfigParser.findServiceDependencies(configOutput) 244 | serviceNames.collectMany { dependencyGraph.getOrDefault(it, [].toSet()) } 245 | } 246 | 247 | Iterable getStandardComposeFiles() { 248 | File searchDirectory = fileOps.file(parameters.dockerComposeWorkingDirectory) ?: parameters.projectDirectory.getAsFile() 249 | def res = [] 250 | def f = findInParentDirectories('docker-compose.yml', searchDirectory) 251 | if (f != null) res.add(f) 252 | f = findInParentDirectories('docker-compose.override.yml', searchDirectory) 253 | if (f != null) res.add(f) 254 | res 255 | } 256 | 257 | Iterable getCustomComposeFiles() { 258 | parameters.useComposeFiles.get().collect { 259 | def f = fileOps.file(it) 260 | if (!f.exists()) { 261 | throw new IllegalArgumentException("Custom Docker Compose file not found: $f") 262 | } 263 | f 264 | } 265 | } 266 | 267 | File findInParentDirectories(String filename, File directory) { 268 | if ((directory) == null) return null 269 | def f = new File(directory, filename) 270 | f.exists() ? f : findInParentDirectories(filename, directory.parentFile) 271 | } 272 | 273 | boolean shouldRemoveOrphans() { 274 | version >= VersionNumber.parse('1.7.0') && parameters.removeOrphans.get() 275 | } 276 | 277 | boolean isScaleSupported() { 278 | def v = version 279 | if (v < VersionNumber.parse('1.13.0') && parameters.scale) { 280 | throw new UnsupportedOperationException("Docker Compose version $v doesn't support --scale option") 281 | } 282 | !parameters.scale.get().isEmpty() 283 | } 284 | 285 | // Determines whether to use docker-compose (V1) or docker compose (V2) 286 | List getDockerComposeBaseCommand() { 287 | parameters.useDockerComposeV2.get() 288 | ? [parameters.dockerExecutable.get(), "compose"] 289 | : Arrays.asList(parameters.executable.get().split("\\s+")) // split on spaces 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/ComposeSettings.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import com.fasterxml.jackson.core.type.TypeReference 4 | import com.fasterxml.jackson.databind.ObjectMapper 5 | import groovy.transform.CompileStatic 6 | import groovy.transform.PackageScope 7 | import org.gradle.api.Project 8 | import org.gradle.api.Task 9 | import org.gradle.api.file.DirectoryProperty 10 | import org.gradle.api.file.RegularFile 11 | import org.gradle.api.file.RegularFileProperty 12 | import org.gradle.api.provider.ListProperty 13 | import org.gradle.api.provider.MapProperty 14 | import org.gradle.api.provider.Property 15 | import org.gradle.api.provider.Provider 16 | import org.gradle.api.tasks.TaskProvider 17 | import org.gradle.process.JavaForkOptions 18 | import org.gradle.process.ProcessForkOptions 19 | 20 | import javax.inject.Inject 21 | import java.nio.charset.StandardCharsets 22 | import java.security.MessageDigest 23 | import java.time.Duration 24 | 25 | @CompileStatic 26 | abstract class ComposeSettings { 27 | transient final TasksConfigurator tasksConfigurator 28 | final DockerExecutor dockerExecutor 29 | 30 | abstract ListProperty getUseComposeFiles() 31 | abstract ListProperty getStartedServices() 32 | abstract Property getIncludeDependencies() 33 | abstract MapProperty getScale() 34 | 35 | abstract ListProperty getBuildAdditionalArgs() 36 | abstract ListProperty getPullAdditionalArgs() 37 | abstract ListProperty getUpAdditionalArgs() 38 | abstract ListProperty getDownAdditionalArgs() 39 | abstract ListProperty getComposeAdditionalArgs() 40 | 41 | abstract Property getBuildBeforeUp() 42 | abstract Property getBuildBeforePull() 43 | 44 | abstract Property getRemoveOrphans() 45 | abstract Property getForceRecreate() 46 | abstract Property getNoRecreate() 47 | 48 | abstract Property getStopContainers() 49 | abstract Property getRemoveContainers() 50 | abstract Property getRetainContainersOnStartupFailure() 51 | abstract Property getRemoveImages() 52 | abstract Property getRemoveVolumes() 53 | 54 | abstract Property getIgnorePullFailure() 55 | abstract Property getIgnorePushFailure() 56 | abstract ListProperty getPushServices() 57 | 58 | abstract Property getWaitForTcpPorts() 59 | abstract ListProperty getTcpPortsToIgnoreWhenWaiting() 60 | abstract Property getWaitAfterTcpProbeFailure() 61 | abstract Property getWaitForTcpPortsTimeout() 62 | abstract Property getWaitForTcpPortsDisconnectionProbeTimeout() 63 | abstract Property getWaitAfterHealthyStateProbeFailure() 64 | abstract Property getWaitForHealthyStateTimeout() 65 | abstract Property getCheckContainersRunning() 66 | 67 | abstract Property getCaptureContainersOutput() 68 | abstract RegularFileProperty getCaptureContainersOutputToFile() 69 | abstract DirectoryProperty getCaptureContainersOutputToFiles() 70 | abstract RegularFileProperty getComposeLogToFile() 71 | abstract DirectoryProperty getContainerLogToDir() 72 | 73 | protected String customProjectName 74 | protected Boolean customProjectNameSet 75 | protected String safeProjectNamePrefix 76 | void setProjectName(String customProjectName) { 77 | this.customProjectName = customProjectName 78 | this.customProjectNameSet = true 79 | } 80 | private Provider projectNameProvider 81 | Provider getProjectName() { 82 | this.projectNameProvider 83 | } 84 | String projectNamePrefix 85 | String nestedName 86 | 87 | abstract Property getExecutable() 88 | abstract Property getUseDockerComposeV2() 89 | abstract Property getDockerExecutable() 90 | abstract MapProperty getEnvironment() 91 | 92 | abstract DirectoryProperty getDockerComposeWorkingDirectory() 93 | abstract Property getDockerComposeStopTimeout() 94 | 95 | private final Provider servicesInfosFile 96 | 97 | @Inject 98 | ComposeSettings(Project project, String name = '', String parentName = '') { 99 | this.nestedName = parentName + name 100 | this.safeProjectNamePrefix = generateSafeProjectNamePrefix(project) 101 | this.projectNameProvider = project.provider({ 102 | if (customProjectNameSet) { 103 | return customProjectName 104 | } 105 | else if (projectNamePrefix) { 106 | return nestedName ? "${projectNamePrefix}_${nestedName}" : projectNamePrefix 107 | } 108 | else { 109 | return nestedName ? "${safeProjectNamePrefix}_${nestedName}" : safeProjectNamePrefix 110 | } 111 | }).map{ String projectName -> 112 | // docker-compose project names must be lowercase 113 | projectName.toLowerCase() 114 | } 115 | 116 | useComposeFiles.empty() 117 | startedServices.empty() 118 | includeDependencies.set(false) 119 | scale.empty() 120 | 121 | buildAdditionalArgs.empty() 122 | pullAdditionalArgs.empty() 123 | upAdditionalArgs.empty() 124 | downAdditionalArgs.empty() 125 | composeAdditionalArgs.empty() 126 | 127 | buildBeforeUp.set(true) 128 | buildBeforePull.set(true) 129 | 130 | removeOrphans.set(false) 131 | forceRecreate.set(false) 132 | noRecreate.set(false) 133 | 134 | stopContainers.set(true) 135 | removeContainers.set(true) 136 | retainContainersOnStartupFailure.set(false) 137 | removeImages.set(RemoveImages.None) 138 | removeVolumes.set(true) 139 | 140 | ignorePullFailure.set(false) 141 | ignorePushFailure.set(false) 142 | pushServices.empty() 143 | 144 | waitForTcpPorts.set(true) 145 | tcpPortsToIgnoreWhenWaiting.empty() 146 | waitAfterTcpProbeFailure.set(Duration.ofSeconds(1)) 147 | waitForTcpPortsTimeout.set(Duration.ofMinutes(15)) 148 | waitForTcpPortsDisconnectionProbeTimeout.set(Duration.ofMillis(1000)) 149 | waitAfterHealthyStateProbeFailure.set(Duration.ofSeconds(5)) 150 | waitForHealthyStateTimeout.set(Duration.ofMinutes(15)) 151 | checkContainersRunning.set(true) 152 | 153 | captureContainersOutput.set(false) 154 | 155 | executable.set('docker-compose') 156 | useDockerComposeV2.set(true) 157 | dockerExecutable.set('docker') 158 | 159 | dockerComposeStopTimeout.set(Duration.ofSeconds(10)) 160 | 161 | this.containerLogToDir.set(project.buildDir.toPath().resolve('containers-logs').toFile()) 162 | 163 | this.dockerExecutor = project.objects.newInstance(DockerExecutor, this) 164 | this.tasksConfigurator = new TasksConfigurator(this, project, name) 165 | servicesInfosFile = tasksConfigurator.upTask.flatMap { it.servicesInfosFile } 166 | } 167 | 168 | private static String generateSafeProjectNamePrefix(Project project) { 169 | def fullPathMd5 = MessageDigest.getInstance("MD5").digest(project.projectDir.absolutePath.toString().getBytes(StandardCharsets.UTF_8)).encodeHex().toString() 170 | "${fullPathMd5}_${project.name.replace('.', '_')}" 171 | } 172 | 173 | protected ComposeSettings cloneAsNested(String name) { 174 | def r = tasksConfigurator.newComposeSettings(name, this.nestedName) 175 | 176 | r.includeDependencies.set(includeDependencies.get()) 177 | 178 | r.buildAdditionalArgs.set(new ArrayList(this.buildAdditionalArgs.get())) 179 | r.pullAdditionalArgs.set(new ArrayList(this.pullAdditionalArgs.get())) 180 | r.upAdditionalArgs.set(new ArrayList(this.upAdditionalArgs.get())) 181 | r.downAdditionalArgs.set(new ArrayList(this.downAdditionalArgs.get())) 182 | r.composeAdditionalArgs.set(new ArrayList(this.composeAdditionalArgs.get())) 183 | 184 | r.buildBeforeUp.set(this.buildBeforeUp.get()) 185 | r.buildBeforePull.set(this.buildBeforePull.get()) 186 | 187 | r.removeOrphans.set(this.removeOrphans.get()) 188 | r.forceRecreate.set(this.forceRecreate.get()) 189 | r.noRecreate.set(this.noRecreate.get()) 190 | 191 | r.stopContainers.set(stopContainers.get()) 192 | r.removeContainers.set(removeContainers.get()) 193 | r.retainContainersOnStartupFailure.set(retainContainersOnStartupFailure.get()) 194 | r.removeImages.set(removeImages.get()) 195 | r.removeVolumes.set(removeVolumes.get()) 196 | 197 | r.ignorePullFailure.set(ignorePullFailure.get()) 198 | r.ignorePushFailure.set(ignorePushFailure.get()) 199 | 200 | r.waitForTcpPorts.set(this.waitForTcpPorts.get()) 201 | r.tcpPortsToIgnoreWhenWaiting.set(new ArrayList(this.tcpPortsToIgnoreWhenWaiting.get())) 202 | r.waitAfterTcpProbeFailure.set(waitAfterTcpProbeFailure.get()) 203 | r.waitForTcpPortsTimeout.set(waitForTcpPortsTimeout.get()) 204 | r.waitForTcpPortsDisconnectionProbeTimeout.set(waitForTcpPortsDisconnectionProbeTimeout.get()) 205 | r.waitAfterHealthyStateProbeFailure.set(waitAfterHealthyStateProbeFailure.get()) 206 | r.waitForHealthyStateTimeout.set(waitForHealthyStateTimeout.get()) 207 | r.checkContainersRunning.set(checkContainersRunning.get()) 208 | 209 | r.captureContainersOutput.set(captureContainersOutput.get()) 210 | 211 | r.projectNamePrefix = this.projectNamePrefix 212 | 213 | r.executable.set(this.executable.get()) 214 | r.useDockerComposeV2.set(this.useDockerComposeV2.get()) 215 | r.dockerExecutable.set(this.dockerExecutable.get()) 216 | r.environment.set(new HashMap(this.environment.get())) 217 | 218 | r.dockerComposeWorkingDirectory.set(this.dockerComposeWorkingDirectory.getOrNull()) 219 | r.dockerComposeStopTimeout.set(this.dockerComposeStopTimeout.get()) 220 | r 221 | } 222 | 223 | void isRequiredBy(Task task) { 224 | tasksConfigurator.isRequiredByCore(task, false) 225 | } 226 | 227 | void isRequiredBy(TaskProvider taskProvider) { 228 | taskProvider.configure { tasksConfigurator.isRequiredByCore(it, true) } 229 | } 230 | 231 | Map getServicesInfos() { 232 | // Preserve the legacy behavior of returning an empty map if this is called before composeUp succeeds. 233 | // composeUp.servicesInfosFile.map { ... }.get() will fail if called before composeUp completes. 234 | // composeUp.servicesInfosFile.get() will work if called before composeUp completes. 235 | def f = servicesInfosFile.get().asFile 236 | f.exists() ? new ObjectMapper().readValue(f, new TypeReference>() {}) : [:] 237 | } 238 | 239 | void exposeAsEnvironment(ProcessForkOptions task) { 240 | exposeAsEnvironmentInternal(task, servicesInfos) 241 | } 242 | 243 | void exposeAsEnvironmentFile(File file) { 244 | exposeAsEnvironmentFileInternal(file, servicesInfos) 245 | } 246 | 247 | @PackageScope 248 | void exposeAsEnvironmentInternal(ProcessForkOptions task, Map servicesInfos) { 249 | servicesInfos.values().each { serviceInfo -> 250 | serviceInfo.containerInfos.each { instanceName, si -> 251 | if (instanceName.endsWith('_1') || instanceName.endsWith('-1')) { 252 | task.environment << createEnvironmentVariables(serviceInfo.name.toUpperCase(), si) 253 | } 254 | task.environment << createEnvironmentVariables(instanceName.toUpperCase(), si) 255 | } 256 | } 257 | } 258 | 259 | @PackageScope 260 | void exposeAsEnvironmentFileInternal(File envFile, Map servicesInfos) { 261 | String envString = "" 262 | 263 | servicesInfos.values().each { serviceInfo -> 264 | serviceInfo.containerInfos.each { instanceName, si -> 265 | if (instanceName.endsWith('_1') || instanceName.endsWith('-1')) { 266 | createEnvironmentVariables(serviceInfo.name.toUpperCase(), si).each { 267 | envString = envString + it.key + "=" + it.value + "\n" 268 | } 269 | } 270 | createEnvironmentVariables(instanceName.toUpperCase(), si).each { 271 | envString = envString + it.key + "=" + it.value + "\n" 272 | } 273 | } 274 | } 275 | 276 | envFile.write envString 277 | } 278 | 279 | void exposeAsSystemProperties(JavaForkOptions task) { 280 | exposeAsSystemPropertiesInternal(task, servicesInfos) 281 | } 282 | 283 | @PackageScope 284 | void exposeAsSystemPropertiesInternal(JavaForkOptions task, Map servicesInfos) { 285 | servicesInfos.values().each { serviceInfo -> 286 | serviceInfo.containerInfos.each { instanceName, si -> 287 | if(instanceName.endsWith('_1') || instanceName.endsWith('-1')) { 288 | task.systemProperties << createSystemProperties(serviceInfo.name, si) 289 | } 290 | task.systemProperties << createSystemProperties(instanceName, si) 291 | } 292 | } 293 | } 294 | 295 | protected Map createEnvironmentVariables(String variableName, ContainerInfo ci) { 296 | def serviceName = replaceV2Separator(variableName) 297 | Map environmentVariables = [:] 298 | environmentVariables.put("${serviceName}_HOST".toString(), ci.host) 299 | environmentVariables.put("${serviceName}_CONTAINER_HOSTNAME".toString(), ci.containerHostname) 300 | ci.tcpPorts.each { environmentVariables.put("${serviceName}_TCP_${it.key}".toString(), it.value) } 301 | ci.udpPorts.each { environmentVariables.put("${serviceName}_UDP_${it.key}".toString(), it.value) } 302 | environmentVariables 303 | } 304 | 305 | protected Map createSystemProperties(String variableName, ContainerInfo ci) { 306 | def serviceName = replaceV2Separator(variableName) 307 | Map systemProperties = [:] 308 | systemProperties.put("${serviceName}.host".toString(), ci.host) 309 | systemProperties.put("${serviceName}.containerHostname".toString(), ci.containerHostname) 310 | ci.tcpPorts.each { systemProperties.put("${serviceName}.tcp.${it.key}".toString(), it.value) } 311 | ci.udpPorts.each { systemProperties.put("${serviceName}.udp.${it.key}".toString(), it.value) } 312 | systemProperties 313 | } 314 | 315 | static String replaceV2Separator(String serviceName) { 316 | serviceName.replaceAll('-(\\d+)$', '_$1') 317 | } 318 | } 319 | 320 | enum RemoveImages { 321 | None, 322 | Local, // images that don't have a custom name set by the `image` field 323 | All 324 | } 325 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gradle-docker-compose-plugin [![Build](https://github.com/avast/gradle-docker-compose-plugin/actions/workflows/build.yml/badge.svg)](https://github.com/avast/gradle-docker-compose-plugin/actions/workflows/build.yml) [![Version](https://badgen.net/maven/v/maven-central/com.avast.gradle/gradle-docker-compose-plugin/)](https://repo1.maven.org/maven2/com/avast/gradle/gradle-docker-compose-plugin/) 2 | 3 | Simplifies usage of [Docker Compose](https://docs.docker.com/compose/) for local development and integration testing in [Gradle](https://gradle.org/) environment. 4 | 5 | `composeUp` task starts the application and waits till all containers become [healthy](https://docs.docker.com/engine/reference/builder/#healthcheck) and all exposed TCP ports are open (so till the application is ready). It reads assigned host and ports of particular containers and stores them into `dockerCompose.servicesInfos` property. 6 | 7 | `composeDown` task stops the application and removes the containers, only if 'stopContainers' is set to 'true' (default value). 8 | 9 | `composeDownForced` task stops the application and removes the containers. 10 | 11 | `composePull` task pulls and optionally builds the images required by the application. This is useful, for example, with a CI platform that caches docker images to decrease build times. 12 | 13 | `composeBuild` task builds the services of the application. 14 | 15 | `composePush` task pushes images for services to their respective `registry/repository`. 16 | 17 | `composeLogs` task stores logs from all containers to files in `containerLogToDir` directory. 18 | 19 | ## Quick start 20 | The plugin is published to [Gradle Plugin Portal](https://plugins.gradle.org/plugin/com.avast.gradle.docker-compose), so the import is easy as 21 | 22 | ```gradle 23 | plugins { 24 | id "com.avast.gradle.docker-compose" version "$versionHere" 25 | } 26 | ``` 27 | 28 | Since the version `0.14.2`, the plugin is also published to Maven Central, so if your prefer this way: 29 | 30 | ```gradle 31 | buildscript { 32 | repositories { 33 | mavenCentral() 34 | } 35 | dependencies { 36 | classpath "com.avast.gradle:gradle-docker-compose-plugin:$versionHere" 37 | } 38 | } 39 | 40 | apply plugin: 'docker-compose' 41 | ``` 42 | 43 | > Versions prior to `0.14.2` were published to JCenter, and it is decommisioned now, so these old versions are not available. 44 | 45 | After importing the plugin, the basic usage is typically just: 46 | ```gradle 47 | dockerCompose.isRequiredBy(test) 48 | ``` 49 | 50 | It ensures: 51 | * `docker-compose up` is executed in the project directory, so it uses the `docker-compose.yml` file. 52 | * If the provided task (`test` in the example above) executes a new process then environment variables and Java system properties are provided. 53 | * The name of environment variable is `${serviceName}_HOST` and `${serviceName}_TCP_${exposedPort}` (e.g. `WEB_HOST` and `WEB_TCP_80`). 54 | * The name of Java system property is `${serviceName}.host` and `${serviceName}.tcp.${exposedPort}` (e.g. `web.host` and `web.tcp.80`). 55 | * If the service is scaled then the `serviceName` has `_1`, `_2`... suffix (e.g. `WEB_1_HOST` and `WEB_1_TCP_80`, `web_1.host` and `web_1.tcp.80`). 56 | * Please note that in Docker Compose v2, the suffix contains `-` instead of `_` 57 | 58 | ## Why to use Docker Compose? 59 | 1. I want to be able to run my application on my computer, and it must work for my colleagues as well. Just execute `docker compose up` and I'm done - e.g. the database is running. 60 | 2. I want to be able to test my application on my computer - I don't wanna wait till my application is deployed into dev/testing environment and acceptance/end2end tests get executed. I want to execute these tests on my computer - it means execute `docker compose up` before these tests. 61 | 62 | ## Why this plugin? 63 | You could easily ensure that `docker compose up` is called before your tests but there are few gotchas that this plugin solves: 64 | 65 | 1. If you execute `docker compose up -d` (_detached_) then this command returns immediately and your application is probably not able to serve requests at this time. This plugin waits till all containers become [healthy](https://docs.docker.com/engine/reference/builder/#healthcheck) and all exported TCP ports of all services are open. 66 | - If waiting for healthy state or open TCP ports timeouts (default is 15 minutes) then it prints log of related service. 67 | 2. It's recommended not to assign fixed values of exposed ports in `docker-compose.yml` (i.e. `8888:80`) because it can cause ports collision on integration servers. If you don't assign a fixed value for exposed port (use just `80`) then the port is exposed as a random free port. This plugin reads assigned ports (and even IP addresses of containers) and stores them into `dockerCompose.servicesInfo` map. 68 | 3. There are minor differences when using Linux containers on Linux, Windows and Mac, and when using Windows Containers. This plugin handles these differences for you so you have the same experience in all environments. 69 | 70 | # Usage 71 | The plugin must be applied on project that contains `docker-compose.yml` file. It supposes that [Docker Engine](https://docs.docker.com/engine/) and [Docker Compose](https://docs.docker.com/compose/) are installed and available in `PATH`. 72 | 73 | > Starting from plugin version _0.17.13_, Gradle 9.0 is required. Otherwise, you can experience issues related to missing `org/apache/groovy/runtime/ObjectUtil.`. 74 | 75 | > Starting from plugin version _0.17.6_, Gradle 6.1 is required, because _Task.usesService()_ is used. 76 | 77 | > Starting from plugin version _0.17.0_, _useDockerComposeV2_ property defaults to _true_, so the new `docker compose` (instead of deprecated `docker-compose` is used). 78 | 79 | > Starting from plugin version _0.10.0_, Gradle 4.9 or newer is required (because it uses [Task Configuration Avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html)). 80 | 81 | ```gradle 82 | buildscript { 83 | repositories { 84 | mavenCentral() 85 | } 86 | dependencies { 87 | classpath "com.avast.gradle:gradle-docker-compose-plugin:$versionHere" 88 | } 89 | } 90 | 91 | apply plugin: 'docker-compose' 92 | 93 | dockerCompose.isRequiredBy(test) // hooks 'dependsOn composeUp' and 'finalizedBy composeDown', and exposes environment variables and system properties (if possible) 94 | 95 | dockerCompose { 96 | useComposeFiles = ['docker-compose.yml', 'docker-compose.prod.yml'] // like 'docker-compose -f '; default is empty 97 | startedServices = ['web'] // list of services to execute when calling 'docker-compose up' or 'docker-compose pull' (when not specified, all services are executed) 98 | scale = [${serviceName1}: 5, ${serviceName2}: 2] // Pass docker compose --scale option like 'docker-compose up --scale serviceName1=5 --scale serviceName2=2' 99 | forceRecreate = false // pass '--force-recreate' and '--renew-anon-volumes' when calling 'docker-compose up' when set to 'true` 100 | noRecreate = false // pass '--no-recreate' when calling 'docker-compose up' when set to 'true` 101 | buildBeforeUp = true // performs 'docker-compose build' before calling the 'up' command; default is true 102 | buildBeforePull = true // performs 'docker-compose build' before calling the 'pull' command; default is true 103 | ignorePullFailure = false // when set to true, pass '--ignore-pull-failure' to 'docker-compose pull' 104 | ignorePushFailure = false // when set to true, pass '--ignore-push-failure' to 'docker-compose push' 105 | pushServices = [] // which services should be pushed, if not defined then upon `composePush` task all defined services in compose file will be pushed (default behaviour) 106 | buildAdditionalArgs = ['--force-rm'] 107 | pullAdditionalArgs = ['--ignore-pull-failures'] 108 | upAdditionalArgs = ['--no-deps'] 109 | downAdditionalArgs = ['--some-switch'] 110 | composeAdditionalArgs = ['--context', 'remote', '--verbose', "--log-level", "DEBUG"] // for adding more [options] in docker-compose [-f ...] [options] [COMMAND] [ARGS...] 111 | 112 | waitForTcpPorts = true // turns on/off the waiting for exposed TCP ports opening; default is true 113 | waitForTcpPortsTimeout = java.time.Duration.ofMinutes(15) // how long to wait until all exposed TCP become open; default is 15 minutes 114 | waitAfterTcpProbeFailure = java.time.Duration.ofSeconds(1) // how long to sleep before next attempt to check if a TCP is open; default is 1 second 115 | tcpPortsToIgnoreWhenWaiting = [1234] // list of TCP ports what will be ignored when waiting for exposed TCP ports opening; default: empty list 116 | waitForHealthyStateTimeout = java.time.Duration.ofMinutes(15) // how long to wait until a container becomes healthy; default is 15 minutes 117 | waitAfterHealthyStateProbeFailure = java.time.Duration.ofSeconds(5) // how long to sleep before next attempt to check healthy status; default is 5 seconds 118 | checkContainersRunning = true // turns on/off checking if container is running or restarting (during waiting for open TCP port and healthy state); default is true 119 | 120 | captureContainersOutput = false // if true, prints output of all containers to Gradle output - very useful for debugging; default is false 121 | captureContainersOutputToFile = project.file('/path/to/logFile') // sends output of all containers to a log file 122 | captureContainersOutputToFiles = project.file('/path/to/directory') // sends output of all services to a dedicated log file in the directory specified, e.g. 'web.log' for service named 'log' 123 | composeLogToFile = project.file('build/my-logs.txt') // redirect output of composeUp and composeDown tasks to this file; default is null (ouput is not redirected) 124 | containerLogToDir = project.file('build/logs') // directory where composeLogs task stores output of the containers; default: build/containers-logs 125 | includeDependencies = false // calculates services dependencies of startedServices and includes those when gathering logs or removing containers; default is false 126 | 127 | stopContainers = true // doesn't call `docker-compose down` if set to false - see below the paragraph about reconnecting; default is true 128 | removeContainers = true // default is true 129 | retainContainersOnStartupFailure = false // if set to true, skips running ComposeDownForced task when ComposeUp fails - useful for troubleshooting; default is false 130 | removeImages = com.avast.gradle.dockercompose.RemoveImages.None // Other accepted values are All and Local 131 | removeVolumes = true // default is true 132 | removeOrphans = false // removes containers for services not defined in the Compose file; default is false 133 | 134 | projectName = 'my-project' // allow to set custom docker-compose project name (defaults to a stable name derived from absolute path of the project and nested settings name), set to null to Docker Compose default (directory name) 135 | projectNamePrefix = 'my_prefix_' // allow to set custom prefix of docker-compose project name, the final project name has nested configuration name appended 136 | executable = '/path/to/docker-compose' // allow to set the base Docker Compose command (useful if not present in PATH). Defaults to `docker-compose`. Ignored if useDockerComposeV2 is set to true. 137 | useDockerComposeV2 = true // Use Docker Compose V2 instead of Docker Compose V1, default is true. If set to true, `dockerExecutable compose` is used for execution, so executable property is ignored. 138 | dockerExecutable = '/path/to/docker' // allow to set the path of the docker executable (useful if not present in PATH) 139 | dockerComposeWorkingDirectory = project.file('/path/where/docker-compose/is/invoked/from') 140 | dockerComposeStopTimeout = java.time.Duration.ofSeconds(20) // time before docker-compose sends SIGTERM to the running containers after the composeDown task has been started 141 | environment.put 'BACKEND_ADDRESS', '192.168.1.100' // environment variables to be used when calling 'docker-compose', e.g. for substitution in compose file 142 | } 143 | 144 | test.doFirst { 145 | // exposes "${serviceName}_HOST" and "${serviceName}_TCP_${exposedPort}" environment variables 146 | // for example exposes "WEB_HOST" and "WEB_TCP_80" environment variables for service named `web` with exposed port `80` 147 | // if service is scaled using scale option, environment variables will be exposed for each service instance like "WEB_1_HOST", "WEB_1_TCP_80", "WEB_2_HOST", "WEB_2_TCP_80" and so on 148 | dockerCompose.exposeAsEnvironment(test) 149 | // exposes "${serviceName}.host" and "${serviceName}.tcp.${exposedPort}" system properties 150 | // for example exposes "web.host" and "web.tcp.80" system properties for service named `web` with exposed port `80` 151 | // if service is scaled using scale option, environment variables will be exposed for each service instance like "web_1.host", "web_1.tcp.80", "web_2.host", "web_2.tcp.80" and so on 152 | dockerCompose.exposeAsSystemProperties(test) 153 | // get information about container of service `web` (declared in docker-compose.yml) 154 | def webInfo = dockerCompose.servicesInfos.web.firstContainer 155 | // in case scale option is used, dockerCompose.servicesInfos.containerInfos will contain information about all running containers of service. Particular container can be retrieved either by iterating the values of containerInfos map (key is service instance name, for example 'web_1') 156 | def webInfo = dockerCompose.servicesInfos.web.'web_1' 157 | // pass host and exposed TCP port 80 as custom-named Java System properties 158 | systemProperty 'myweb.host', webInfo.host 159 | systemProperty 'myweb.port', webInfo.ports[80] 160 | // it's possible to read information about exposed UDP ports using webInfo.updPorts[1234] 161 | } 162 | ``` 163 | 164 | ## Nested configurations 165 | It is possible to create a new set of `ComposeUp`/`ComposeBuild`/`ComposePull`/`ComposeDown`/`ComposeDownForced`/`ComposePush` tasks using following syntax: 166 |
167 | Groovy 168 | 169 | ```groovy 170 | dockerCompose { 171 | // settings as usual 172 | myNested { 173 | useComposeFiles = ['docker-compose-for-integration-tests.yml'] 174 | isRequiredBy(project.tasks.myTask) 175 | } 176 | } 177 | ``` 178 | 179 |
180 | 181 | * It creates `myNestedComposeUp`, `myNestedComposeBuild`, `myNestedComposePull`, `myNestedComposeDown`, `myNestedComposeDownForced` and `myNestedComposePush` tasks. 182 | * It's possible to use all the settings as in the main `dockerCompose` block. 183 | * Configuration of the nested settings defaults to the main `dockerCompose` settings (declared before the nested settings), except following properties: `projectName`, `startedServices`, `useComposeFiles`, `scale`, `captureContainersOutputToFile`, `captureContainersOutputToFiles`, `composeLogToFile`, `containerLogToDir`, `pushServices` 184 | 185 | When exposing service info from `myNestedComposeUp` task into your task you should use following syntax: 186 | ```groovy 187 | test.doFirst { 188 | dockerCompose.myNested.exposeAsEnvironment(test) 189 | } 190 | ``` 191 | 192 |
193 | Kotlin 194 | 195 | ```kotlin 196 | test.doFirst { 197 | dockerCompose.nested("myNested").exposeAsEnvironment(project.tasks.named("test").get()) 198 | } 199 | ``` 200 | 201 |
202 | 203 | It's also possible to use this simplified syntax: 204 | ```gradle 205 | dockerCompose { 206 | isRequiredByMyTask 'docker-compose-for-integration-tests.yml' 207 | } 208 | ``` 209 | 210 | ## Reconnecting 211 | If you specify `stopContainers` to be `false` then the plugin automatically tries to reconnect to the containers from the previous run 212 | instead of calling `docker-compose up` again. Thanks to this, the startup can be very fast. 213 | 214 | It's very handy in scenarios when you iterate quickly and e.g. don't want to wait for Postgres to start again and again. 215 | 216 | Because you don't want to check-in this change to your VCS, you can take advantage of [this init.gradle](/init.gradle) [initialization script](https://docs.gradle.org/5.2/userguide/init_scripts.html) (in short, copy [this file](/init.gradle) to your `USER_HOME/.gradle/` directory). 217 | 218 | ## Usage from Kotlin DSL 219 | This plugin can be used also from Kotlin DSL, see the example: 220 | ```kotlin 221 | import com.avast.gradle.dockercompose.ComposeExtension 222 | apply(plugin = "docker-compose") 223 | configure { 224 | includeDependencies.set(true) 225 | createNested("local").apply { 226 | setProjectName("foo") 227 | environment.putAll(mapOf("TAGS" to "feature-test,local")) 228 | startedServices.set(listOf("foo-api", "foo-integration")) 229 | upAdditionalArgs.set(listOf("--no-deps")) 230 | } 231 | } 232 | ``` 233 | 234 | # Tips 235 | * You can call `dockerCompose.isRequiredBy(anyTask)` for any task, for example for your custom `integrationTest` task. 236 | * If some Dockerfile needs an artifact generated by Gradle then you can declare this dependency in a standard way, like `composeUp.dependsOn project(':my-app').distTar` 237 | * All properties in `dockerCompose` have meaningful default values so you don't have to touch it. If you are interested then you can look at [ComposeSettings.groovy](/src/main/groovy/com/avast/gradle/dockercompose/ComposeSettings.groovy) for reference. 238 | * `dockerCompose.servicesInfos` contains information about running containers so you must access this property after `composeUp` task is finished. So `doFirst` of your test task is perfect place where to access it. 239 | * Plugin honours a `docker-compose.override.yml` file, but only when no files are specified with `useComposeFiles` (conform command-line behavior). 240 | * Check [ContainerInfo.groovy](/src/main/groovy/com/avast/gradle/dockercompose/ContainerInfo.groovy) to see what you can know about running containers. 241 | * You can determine the Docker host in your Gradle build (i.e. `docker-machine start`) and set the `DOCKER_HOST` environment variable for compose to use: `dockerCompose { environment.put 'DOCKER_HOST', '192.168.64.9' }` 242 | * If the services executed by `docker-compose` are running on a specific host (different than Docker, like in CirceCI 2.0), then `SERVICES_HOST` environment variable can be used. This value will be used as the hostname where the services are expected to be listening. 243 | * If you need to troubleshoot a failing ComposeUp task, set `retainContainersOnStartupFailure` to prevent containers from begin forcibly deleted. Does not override `removeContainers`, so if you run `ComposeDown`, it will not be affected. 244 | 245 | -------------------------------------------------------------------------------- /src/main/groovy/com/avast/gradle/dockercompose/tasks/ComposeUp.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose.tasks 2 | 3 | import com.avast.gradle.dockercompose.ComposeExecutor 4 | import com.avast.gradle.dockercompose.ContainerInfo 5 | import com.avast.gradle.dockercompose.DockerExecutor 6 | import com.avast.gradle.dockercompose.ServiceHost 7 | import com.avast.gradle.dockercompose.ServiceInfo 8 | import com.avast.gradle.dockercompose.ServiceInfoCache 9 | import com.fasterxml.jackson.databind.ObjectMapper 10 | import groovy.json.JsonSlurper 11 | import org.gradle.api.DefaultTask 12 | import org.gradle.api.file.DirectoryProperty 13 | import org.gradle.api.file.RegularFileProperty 14 | import org.gradle.api.provider.ListProperty 15 | import org.gradle.api.provider.MapProperty 16 | import org.gradle.api.provider.Property 17 | import org.gradle.api.tasks.Internal 18 | import org.gradle.api.tasks.OutputFile 19 | import org.gradle.api.tasks.TaskAction 20 | 21 | import java.nio.file.Paths 22 | import java.time.Duration 23 | import java.time.Instant 24 | 25 | abstract class ComposeUp extends DefaultTask { 26 | 27 | @OutputFile 28 | abstract RegularFileProperty getServicesInfosFile() 29 | 30 | @Internal 31 | Boolean wasReconnected = false // for tests 32 | 33 | @Internal 34 | DockerExecutor dockerExecutor 35 | 36 | @Internal 37 | abstract Property getStopContainers() 38 | 39 | @Internal 40 | abstract Property getForceRecreate() 41 | 42 | @Internal 43 | abstract Property getNoRecreate() 44 | 45 | @Internal 46 | abstract MapProperty getScale() 47 | 48 | @Internal 49 | abstract ListProperty getUpAdditionalArgs() 50 | 51 | @Internal 52 | abstract ListProperty getStartedServices() 53 | 54 | @Internal 55 | abstract RegularFileProperty getComposeLogToFile() 56 | 57 | @Internal 58 | abstract Property getWaitForTcpPorts() 59 | 60 | @Internal 61 | abstract Property getRetainContainersOnStartupFailure() 62 | 63 | @Internal 64 | abstract Property getCaptureContainersOutput() 65 | 66 | @Internal 67 | abstract RegularFileProperty getCaptureContainersOutputToFile() 68 | 69 | @Internal 70 | abstract DirectoryProperty getCaptureContainersOutputToFiles() 71 | 72 | @Internal 73 | abstract Property getWaitAfterHealthyStateProbeFailure() 74 | 75 | @Internal 76 | abstract Property getCheckContainersRunning() 77 | 78 | @Internal 79 | abstract Property getWaitForHealthyStateTimeout() 80 | 81 | @Internal 82 | abstract ListProperty getTcpPortsToIgnoreWhenWaiting() 83 | 84 | @Internal 85 | abstract Property getWaitForTcpPortsDisconnectionProbeTimeout() 86 | 87 | @Internal 88 | abstract Property getWaitForTcpPortsTimeout() 89 | 90 | @Internal 91 | abstract Property getWaitAfterTcpProbeFailure() 92 | 93 | @Internal 94 | abstract Property getServiceInfoCache() 95 | 96 | @Internal 97 | abstract Property getComposeExecutor() 98 | 99 | private Map servicesInfos = [:] 100 | 101 | @Internal 102 | Map getServicesInfos() { 103 | servicesInfos 104 | } 105 | 106 | ComposeUp() { 107 | group = 'docker' 108 | description = 'Builds and starts containers of docker-compose project' 109 | setServicesInfosFile() 110 | outputs.upToDateWhen { false } 111 | } 112 | 113 | @TaskAction 114 | void up() { 115 | if (!stopContainers.get()) { 116 | def cachedServicesInfos = serviceInfoCache.get().get({ getStateForCache() }) 117 | if (cachedServicesInfos) { 118 | servicesInfos = cachedServicesInfos 119 | logger.lifecycle('Cached services infos loaded while \'stopContainers\' is set to \'false\'.') 120 | wasReconnected = true 121 | startCapturing() 122 | printExposedPorts() 123 | return 124 | } 125 | } 126 | serviceInfoCache.get().clear() 127 | wasReconnected = false 128 | String[] args = ['up', '-d'] 129 | if (composeExecutor.get().shouldRemoveOrphans()) { 130 | args += '--remove-orphans' 131 | } 132 | if (forceRecreate.get()) { 133 | args += '--force-recreate' 134 | args += '--renew-anon-volumes' 135 | } else if (noRecreate.get()) { 136 | args += '--no-recreate' 137 | } 138 | if (composeExecutor.get().isScaleSupported()) { 139 | args += scale.get().collect { service, value -> 140 | ['--scale', "$service=$value".toString()] 141 | }.flatten() 142 | } 143 | args += upAdditionalArgs.get() 144 | args += startedServices.get() 145 | try { 146 | def composeLog = null 147 | if (composeLogToFile.isPresent()) { 148 | File logFile = composeLogToFile.get().asFile 149 | logger.debug "Logging docker-compose up to: $logFile" 150 | logFile.parentFile.mkdirs() 151 | composeLog = new FileOutputStream(logFile) 152 | } 153 | composeExecutor.get().executeWithCustomOutputWithExitValue(composeLog, args) 154 | def servicesToLoad = composeExecutor.get().getServiceNames() 155 | servicesInfos = loadServicesInfo(servicesToLoad).collectEntries { [(it.name): (it)] } 156 | startCapturing() 157 | waitForHealthyContainers(servicesInfos.values()) 158 | if (waitForTcpPorts.get()) { 159 | servicesInfos = waitForOpenTcpPorts(servicesInfos.values()).collectEntries { [(it.name): (it)] } 160 | } 161 | printExposedPorts() 162 | if (!stopContainers.get()) { 163 | serviceInfoCache.get().set(servicesInfos, getStateForCache()) 164 | } else { 165 | serviceInfoCache.get().clear() 166 | } 167 | writeServicesInfosFile() 168 | } 169 | catch (Exception e) { 170 | logger.debug("Failed to start-up Docker containers", e) 171 | if (!retainContainersOnStartupFailure.get()) { 172 | serviceInfoCache.get().startupFailed = true 173 | } 174 | throw e 175 | } 176 | } 177 | 178 | protected void printExposedPorts() { 179 | if (!servicesInfos.values().any { si -> si.tcpPorts.any() }) { 180 | return 181 | } 182 | int nameMaxLength = Math.max('Name'.length(), servicesInfos.values().collect { it.containerInfos.values().collect { it.instanceName.length() } }.flatten().max()) 183 | int containerPortMaxLenght = 'Container Port'.length() 184 | int mappingMaxLength = Math.max('Mapping'.length(), servicesInfos.values().collect { it.containerInfos.values().collect { ci -> ci.tcpPorts.collect { p -> "${ci.host}:${p.value}".length() } } }.flatten().max()) 185 | logger.lifecycle('+-' + '-'.multiply(nameMaxLength) + '-+-' + '-'.multiply(containerPortMaxLenght) + '-+-' + '-'.multiply(mappingMaxLength) + '-+') 186 | logger.lifecycle('| Name' + ' '.multiply(nameMaxLength - 'Name'.length()) + ' | Container Port' + ' '.multiply(containerPortMaxLenght - 'Container Port'.length()) + ' | Mapping' + ' '.multiply(mappingMaxLength - 'Mapping'.length()) + ' |') 187 | logger.lifecycle('+-' + '-'.multiply(nameMaxLength) + '-+-' + '-'.multiply(containerPortMaxLenght) + '-+-' + '-'.multiply(mappingMaxLength) + '-+') 188 | servicesInfos.values().forEach { si -> 189 | if (si.containerInfos.values().any { it.tcpPorts.any() }) { 190 | si.containerInfos.values().forEach { ci -> 191 | ci.tcpPorts.entrySet().forEach { p -> 192 | String mapping = "${ci.host}:${p.value}".toString() 193 | logger.lifecycle('| ' + ci.instanceName + ' '.multiply(nameMaxLength - ci.instanceName.length()) + ' | ' + p.key + ' '.multiply(containerPortMaxLenght - p.key.toString().length()) + ' | ' + mapping + ' '.multiply(mappingMaxLength - mapping.length()) + ' |') 194 | } 195 | } 196 | logger.lifecycle('+-' + '-'.multiply(nameMaxLength) + '-+-' + '-'.multiply(containerPortMaxLenght) + '-+-' + '-'.multiply(mappingMaxLength) + '-+') 197 | } 198 | } 199 | } 200 | 201 | protected void startCapturing() { 202 | if (captureContainersOutput.get()) { 203 | composeExecutor.get().captureContainersOutput(logger.&lifecycle) 204 | } 205 | if (captureContainersOutputToFile.isPresent()) { 206 | def logFile = captureContainersOutputToFile.get().asFile 207 | logFile.parentFile.mkdirs() 208 | composeExecutor.get().captureContainersOutput({ logFile.append(it + '\n') }) 209 | } 210 | if (captureContainersOutputToFiles.isPresent()) { 211 | def logDir = captureContainersOutputToFiles.get().asFile 212 | logDir.mkdirs() 213 | logDir.listFiles().each { it.delete() } 214 | servicesInfos.keySet().each { 215 | def logFile = logDir.toPath().resolve("${it}.log").toFile() 216 | composeExecutor.get().captureContainersOutput({ logFile.append(it + '\n') }, it) 217 | } 218 | } 219 | } 220 | 221 | private static final VOLATILE_STATE_KEYS = ['RunningFor', 'Publishers'] 222 | private static final UNSTABLE_ARRAY_STATE_KEYS = ['Mounts', 'Ports', 'Networks', 'Labels'] 223 | 224 | @Internal 225 | protected def getStateForCache() { 226 | String processesAsString = composeExecutor.get().execute('ps', '--format', 'json') 227 | String processesState = processesAsString 228 | try { 229 | // Since Docker Compose 2.21.0, the output is not one JSON array but newline-separated JSONs. 230 | Map[] processes 231 | if (processesAsString.startsWith('[')) { 232 | processes = new JsonSlurper().parseText(processesAsString) 233 | } else { 234 | processes = processesAsString.split('\\R').findAll { it.trim() }.collect { new JsonSlurper().parseText(it) } 235 | } 236 | List transformed = processes.collect { 237 | // Status field contains something like "Up 8 seconds", so we have to strip the duration. 238 | if (it.containsKey('Status') && it.Status.startsWith('Up ')) it.Status = 'Up' 239 | VOLATILE_STATE_KEYS.each { key -> it.remove(key) } 240 | UNSTABLE_ARRAY_STATE_KEYS.each { key -> it[key] = parseAndSortStateArray(it[key]) } 241 | it 242 | } 243 | processesState = transformed.join('\t') 244 | } catch (Exception e) { 245 | logger.warn("Cannot process JSON returned from 'docker compose ps --format json'", e) 246 | } 247 | processesState + composeExecutor.get().execute('config') + startedServices.get().join(',') 248 | } 249 | 250 | protected Object parseAndSortStateArray(Object list) { 251 | if (list instanceof List) { 252 | return list.sort { (first, second) -> first.toString() <=> second.toString() } 253 | } else if (list instanceof String && list.contains(",")) { 254 | // Actually not a list, but a comma separated string 255 | return list.split(',').collect { it.trim() }.sort().toArray() 256 | } else { 257 | return list 258 | } 259 | } 260 | 261 | protected Iterable loadServicesInfo(Iterable servicesNames) { 262 | // this code is little bit complicated - the aim is to execute `docker inspect` just once (for all the containers) 263 | Map> serviceToContainersIds = composeExecutor.get().getContainerIds(servicesNames) 264 | Map> inspections = dockerExecutor.getInspections(*serviceToContainersIds.values().flatten().unique()) 265 | serviceToContainersIds.collect { pair -> new ServiceInfo(name: pair.key, containerInfos: pair.value.collect { createContainerInfo(inspections.get(it), pair.key) }.collectEntries { [(it.instanceName): it] } ) } 266 | } 267 | 268 | protected ContainerInfo createContainerInfo(Map inspection, String serviceName) { 269 | String containerId = inspection.Id 270 | logger.info("Container ID of service $serviceName is $containerId") 271 | ServiceHost host = dockerExecutor.getContainerHost(inspection, serviceName, logger) 272 | logger.info("Will use $host as host of service $serviceName") 273 | def tcpPorts = dockerExecutor.getTcpPortsMapping(serviceName, inspection, host) 274 | def udpPorts = dockerExecutor.getUdpPortsMapping(serviceName, inspection, host) 275 | // docker-compose v1 uses an underscore as a separator. v2 uses a hyphen. 276 | String instanceName = inspection.Name.find(/${serviceName}_\d+$/) ?: 277 | inspection.Name.find(/${serviceName}-\d+$/) ?: 278 | inspection.Name - '/' 279 | new ContainerInfo( 280 | instanceName: instanceName, 281 | serviceHost: host, 282 | tcpPorts: tcpPorts, 283 | udpPorts: udpPorts, 284 | inspection: inspection) 285 | } 286 | 287 | void waitForHealthyContainers(Iterable servicesInfos) { 288 | def start = Instant.now() 289 | servicesInfos.forEach { serviceInfo -> 290 | serviceInfo.containerInfos.each { instanceName, containerInfo -> 291 | def firstIteration = true 292 | while (true) { 293 | def inspection = firstIteration ? containerInfo.inspection : dockerExecutor.getInspection(containerInfo.containerId) 294 | Map inspectionState = inspection.State 295 | String healthStatus 296 | if (inspectionState.containsKey('Health')) { 297 | healthStatus = inspectionState.Health.Status 298 | if (!"starting".equalsIgnoreCase(healthStatus) && !"unhealthy".equalsIgnoreCase(healthStatus)) { 299 | logger.lifecycle("${instanceName} health state reported as '$healthStatus' - continuing...") 300 | break 301 | } 302 | logger.lifecycle("Waiting for ${instanceName} to become healthy (it's $healthStatus)") 303 | if (!firstIteration) sleep(waitAfterHealthyStateProbeFailure.get().toMillis()) 304 | } else { 305 | logger.debug("Service ${instanceName} or this version of Docker doesn't support healthchecks") 306 | break 307 | } 308 | if (checkContainersRunning.get() && !"running".equalsIgnoreCase(inspectionState.Status) && !"restarting".equalsIgnoreCase(inspectionState.Status)) { 309 | throw new RuntimeException("Container ${containerInfo.containerId} of ${instanceName} is not running nor restarting. Logs:${System.lineSeparator()}${dockerExecutor.getContainerLogs(containerInfo.containerId)}") 310 | } 311 | if (start.plus(waitForHealthyStateTimeout.get()) < Instant.now()) { 312 | throw new RuntimeException("Container ${containerInfo.containerId} of ${instanceName} is still reported as '${healthStatus}'. Logs:${System.lineSeparator()}${dockerExecutor.getContainerLogs(containerInfo.containerId)}") 313 | } 314 | firstIteration = false 315 | } 316 | } 317 | } 318 | } 319 | 320 | Iterable waitForOpenTcpPorts(Iterable servicesInfos) { 321 | def start = Instant.now() 322 | Map newContainerInfos = [:] 323 | servicesInfos.forEach { serviceInfo -> 324 | serviceInfo.containerInfos.each { instanceName, containerInfo -> 325 | containerInfo.tcpPorts 326 | .findAll { ep, fp -> !tcpPortsToIgnoreWhenWaiting.get().any { it == ep } } 327 | .forEach { exposedPort, forwardedPort -> 328 | logger.lifecycle("Probing TCP socket on ${containerInfo.host}:${forwardedPort} of '${instanceName}'") 329 | Integer portToCheck = forwardedPort 330 | while (true) { 331 | try { 332 | def s = new Socket(containerInfo.host, portToCheck) 333 | s.setSoTimeout(waitForTcpPortsDisconnectionProbeTimeout.get().toMillis() as int) 334 | try { 335 | // in case of Windows and Mac, we must ensure that the socket is not disconnected immediately 336 | // if the socket is closed then it returns -1 337 | // if the socket is not closed then returns a data or timeouts 338 | boolean disconnected = false 339 | try { 340 | disconnected = s.inputStream.read() == -1 341 | } catch (Exception e) { 342 | logger.debug("An exception when reading from socket", e) // expected exception 343 | } 344 | if (disconnected) { 345 | throw new RuntimeException("TCP connection on ${containerInfo.host}:${portToCheck} of '${instanceName}' was disconnected right after connected") 346 | } 347 | } 348 | finally { 349 | s.close() 350 | } 351 | logger.lifecycle("TCP socket on ${containerInfo.host}:${portToCheck} of '${instanceName}' is ready") 352 | break 353 | } 354 | catch (Exception e) { 355 | if (start.plus(waitForTcpPortsTimeout.get()) < Instant.now()) { 356 | throw new RuntimeException("TCP socket on ${containerInfo.host}:${portToCheck} of '${instanceName}' is still failing. Logs:${System.lineSeparator()}${dockerExecutor.getContainerLogs(containerInfo.containerId)}") 357 | } 358 | logger.lifecycle("Waiting for TCP socket on ${containerInfo.host}:${portToCheck} of '${instanceName}' (${e.message})") 359 | sleep(waitAfterTcpProbeFailure.get().toMillis()) 360 | def inspection = dockerExecutor.getInspection(containerInfo.containerId) 361 | if (checkContainersRunning.get() && !"running".equalsIgnoreCase(inspection.State.Status) && !"restarting".equalsIgnoreCase(inspection.State.Status)) { 362 | throw new RuntimeException("Container ${containerInfo.containerId} of ${instanceName} is not running nor restarting. Logs:${System.lineSeparator()}${dockerExecutor.getContainerLogs(containerInfo.containerId)}") 363 | } 364 | ContainerInfo newContainerInfo = createContainerInfo(inspection, serviceInfo.name) 365 | Integer newForwardedPort = newContainerInfo.tcpPorts.get(exposedPort) 366 | if (newForwardedPort != portToCheck) { 367 | logger.lifecycle("Going to replace container information of '${instanceName}' because port $exposedPort was exposed as $forwardedPort but is $newForwardedPort now") 368 | newContainerInfos.put(instanceName, newContainerInfo) 369 | portToCheck = newForwardedPort 370 | } 371 | } 372 | } 373 | } 374 | } 375 | } 376 | servicesInfos.collect { it -> it.copyWith(containerInfos: it.containerInfos.values().collect { newContainerInfos.getOrDefault(it.instanceName, it) }.collectEntries { [(it.instanceName): it] }) } 377 | } 378 | 379 | void setServicesInfosFile() { 380 | def normalName = 'composeUp' 381 | def suffix = (name == normalName) ? '' : "-${name.take(name.size() - normalName.size())}" 382 | def path = Paths.get('tmp', 'com.avast.gradle.docker-compose', "services-infos${suffix}.json") 383 | servicesInfosFile.set(project.layout.buildDirectory.file(path.toString())) 384 | } 385 | 386 | void writeServicesInfosFile() { 387 | def f = servicesInfosFile.get().asFile 388 | f.parentFile.mkdirs() 389 | new ObjectMapper().writeValue(f, servicesInfos) 390 | } 391 | } 392 | -------------------------------------------------------------------------------- /src/test/groovy/com/avast/gradle/dockercompose/DockerComposePluginTest.groovy: -------------------------------------------------------------------------------- 1 | package com.avast.gradle.dockercompose 2 | 3 | import com.avast.gradle.dockercompose.tasks.ComposeBuild 4 | import com.avast.gradle.dockercompose.tasks.ComposeDown 5 | import com.avast.gradle.dockercompose.tasks.ComposeDownForced 6 | import com.avast.gradle.dockercompose.tasks.ComposeLogs 7 | import com.avast.gradle.dockercompose.tasks.ComposePull 8 | import com.avast.gradle.dockercompose.tasks.ComposePush 9 | import com.avast.gradle.dockercompose.tasks.ComposeUp 10 | import com.fasterxml.jackson.core.type.TypeReference 11 | import com.fasterxml.jackson.databind.ObjectMapper 12 | import org.gradle.api.Task 13 | import org.gradle.api.tasks.TaskProvider 14 | import org.gradle.api.tasks.testing.Test 15 | import org.gradle.testfixtures.ProjectBuilder 16 | import spock.lang.IgnoreIf 17 | import spock.lang.Specification 18 | 19 | import java.nio.file.Paths 20 | 21 | import static com.avast.gradle.dockercompose.util.VersionNumber.parse 22 | 23 | class DockerComposePluginTest extends Specification { 24 | def "add tasks and extension to the project"() { 25 | def project = ProjectBuilder.builder().build() 26 | when: 27 | project.plugins.apply 'docker-compose' 28 | then: 29 | project.tasks.composeUp instanceof ComposeUp 30 | project.tasks.composeDown instanceof ComposeDown 31 | project.tasks.composeDownForced instanceof ComposeDownForced 32 | project.tasks.composePull instanceof ComposePull 33 | project.tasks.composePush instanceof ComposePush 34 | project.tasks.composeBuild instanceof ComposeBuild 35 | project.tasks.composeLogs instanceof ComposeLogs 36 | project.extensions.findByName('dockerCompose') instanceof ComposeExtension 37 | } 38 | 39 | def "propagate custom project name to ComposeExecutor"() { 40 | def project = ProjectBuilder.builder().build() 41 | when: 42 | project.plugins.apply 'docker-compose' 43 | project.dockerCompose { 44 | projectName = 'custom-project-name' 45 | } 46 | then: 47 | ComposeExecutor.getInstance(project, project.dockerCompose).get().parameters.projectName.get() == 'custom-project-name' 48 | } 49 | 50 | def "allows to define extra properties"() { 51 | def project = ProjectBuilder.builder().build() 52 | when: 53 | project.plugins.apply 'docker-compose' 54 | project.dockerCompose { 55 | ext.foo = "bar" 56 | ext { 57 | bar = "foo" 58 | } 59 | environment.put "FOO_SETTING", project.rootProject.name 60 | } 61 | then: 62 | project.dockerCompose.foo == "bar" 63 | project.dockerCompose.bar == "foo" 64 | project.dockerCompose.ext 65 | } 66 | 67 | def "add tasks of nested settings"() { 68 | def project = ProjectBuilder.builder().build() 69 | when: 70 | project.plugins.apply 'docker-compose' 71 | project.dockerCompose { 72 | nested { 73 | useComposeFiles = ['test.yml'] 74 | } 75 | } 76 | then: 77 | project.tasks.nestedComposeUp instanceof ComposeUp 78 | project.tasks.nestedComposeDown instanceof ComposeDown 79 | project.tasks.nestedComposeDownForced instanceof ComposeDownForced 80 | project.tasks.nestedComposePull instanceof ComposePull 81 | project.tasks.composePush instanceof ComposePush 82 | project.tasks.nestedComposeBuild instanceof ComposeBuild 83 | project.tasks.nestedComposeLogs instanceof ComposeLogs 84 | ComposeUp up = project.tasks.nestedComposeUp 85 | up.composeExecutor.get().parameters.useComposeFiles.get() == ['test.yml'] 86 | } 87 | 88 | def "project name should always be lowercase"() { 89 | def project = ProjectBuilder.builder().build() 90 | when: 91 | project.plugins.apply 'docker-compose' 92 | project.dockerCompose { 93 | nestedUppercaseTask { 94 | useComposeFiles = ['docker-compose.yml'] 95 | } 96 | nestedUppercaseProjectName { 97 | projectNamePrefix = "UPPERCASE" 98 | } 99 | nestedUppercaseProjectNamePrefix { 100 | projectNamePrefix = "UPPERCASE" 101 | } 102 | } 103 | 104 | then: 105 | 106 | ['nestedUppercaseTask', 'nestedUppercaseProjectName'].forEach{ String config -> 107 | def projectName = project.dockerCompose."$config".projectName.get() 108 | assert projectName.toLowerCase() == projectName 109 | } 110 | 111 | 112 | } 113 | 114 | def "is possible to access servicesInfos of nested setting"() { 115 | def project = ProjectBuilder.builder().build() 116 | when: 117 | project.plugins.apply 'docker-compose' 118 | project.dockerCompose { 119 | nested { 120 | useComposeFiles = ['test.yml'] 121 | } 122 | } 123 | then: 124 | project.dockerCompose.nested.servicesInfos instanceof Map 125 | } 126 | 127 | def "is possible to override nested settings"() { 128 | def project = ProjectBuilder.builder().build() 129 | when: 130 | project.plugins.apply 'docker-compose' 131 | project.dockerCompose { 132 | removeVolumes = true 133 | nested { 134 | useComposeFiles = ['test.yml'] 135 | removeVolumes = false 136 | ignorePullFailure = true 137 | ignorePushFailure = true 138 | } 139 | } 140 | then: 141 | project.dockerCompose.nested.removeVolumes.get() == false 142 | project.dockerCompose.removeVolumes.get() == true 143 | project.dockerCompose.ignorePullFailure.get() == false 144 | project.dockerCompose.ignorePushFailure.get() == false 145 | project.dockerCompose.nested.ignorePullFailure.get() == true 146 | project.dockerCompose.nested.ignorePushFailure.get() == true 147 | } 148 | 149 | def "isRequiredBy() adds dependencies"() { 150 | def project = ProjectBuilder.builder().build() 151 | project.plugins.apply 'docker-compose' 152 | Task task = project.tasks.create('integrationTest') 153 | when: 154 | project.dockerCompose.isRequiredBy(task) 155 | then: 156 | task.dependsOn.find { it instanceof TaskProvider && ((TaskProvider)it).get() == project.tasks.composeUp } 157 | task.getFinalizedBy().getDependencies(task).any { it == project.tasks.composeDown } 158 | } 159 | 160 | def "isRequiredBy() adds dependencies when using TaskProvider"() { 161 | def project = ProjectBuilder.builder().build() 162 | project.plugins.apply 'docker-compose' 163 | TaskProvider taskProvider = project.tasks.register('integrationTest') 164 | when: 165 | project.dockerCompose.isRequiredBy(taskProvider) 166 | Task task = taskProvider.get() 167 | then: 168 | task.dependsOn.find { it instanceof TaskProvider && ((TaskProvider)it).get() == project.tasks.composeUp } 169 | task.getFinalizedBy().getDependencies(task).any { it == project.tasks.composeDown } 170 | } 171 | 172 | def "isRequiredBy() adds dependencies when using TaskProvider and with dependent classes task"() { 173 | def project = ProjectBuilder.builder().build() 174 | project.plugins.apply 'docker-compose' 175 | TaskProvider classesTaskProvider = project.tasks.register('classes') 176 | TaskProvider taskProvider = project.tasks.register('integrationTest') 177 | taskProvider.configure { it.dependsOn classesTaskProvider } 178 | when: 179 | project.dockerCompose.isRequiredBy(taskProvider) 180 | Task task = taskProvider.get() 181 | then: 182 | task.dependsOn.find { it instanceof TaskProvider && ((TaskProvider)it).get() == project.tasks.composeUp } 183 | task.getFinalizedBy().getDependencies(task).any { it == project.tasks.composeDown } 184 | } 185 | 186 | def "isRequiredBy() adds dependencies for nested settings"() { 187 | def project = ProjectBuilder.builder().build() 188 | project.plugins.apply 'docker-compose' 189 | Task task = project.tasks.create('integrationTest') 190 | when: 191 | project.dockerCompose { 192 | nested { 193 | useComposeFiles = ['test.yml'] 194 | isRequiredBy(task) 195 | } 196 | } 197 | then: 198 | task.dependsOn.find { it instanceof TaskProvider && ((TaskProvider)it).get() == project.tasks.nestedComposeUp } 199 | task.getFinalizedBy().getDependencies(task).any { it == project.tasks.nestedComposeDown } 200 | } 201 | 202 | def "add tasks of nested settings and isRequiredBy() adds dependencies for nested settings when using simplified syntax"() { 203 | def project = ProjectBuilder.builder().build() 204 | project.plugins.apply 'docker-compose' 205 | Task task = project.tasks.create('integrationTest') 206 | when: 207 | project.dockerCompose { 208 | isRequiredByIntegrationTest 'test.yml' 209 | } 210 | then: 211 | project.tasks.integrationTestComposeUp instanceof ComposeUp 212 | project.tasks.integrationTestComposeDown instanceof ComposeDown 213 | project.tasks.integrationTestComposeDownForced instanceof ComposeDownForced 214 | project.tasks.integrationTestComposePull instanceof ComposePull 215 | project.tasks.integrationTestComposePush instanceof ComposePush 216 | project.tasks.integrationTestComposeBuild instanceof ComposeBuild 217 | project.tasks.integrationTestComposeLogs instanceof ComposeLogs 218 | ComposeUp up = project.tasks.integrationTestComposeUp 219 | up.composeExecutor.get().parameters.useComposeFiles.get() == ['test.yml'] 220 | task.dependsOn.find { it instanceof TaskProvider && ((TaskProvider)it).get() == project.tasks.integrationTestComposeUp } 221 | task.getFinalizedBy().getDependencies(task).any { it == project.tasks.integrationTestComposeDown } 222 | } 223 | 224 | def "isRequiredBy ensures right order of tasks"() { 225 | def project = ProjectBuilder.builder().build() 226 | project.plugins.apply 'docker-compose' 227 | project.plugins.apply 'java' 228 | when: 229 | project.dockerCompose.isRequiredBy(project.tasks.test) 230 | then: 231 | project.tasks.composeUp.shouldRunAfter.getDependencies(null).any { it == project.tasks.testClasses } 232 | noExceptionThrown() 233 | } 234 | 235 | def "allows to read servicesInfos from another task"() { 236 | def f = Fixture.withNginx() 237 | def integrationTestTask = f.project.tasks.create('integrationTest').doLast { 238 | ContainerInfo webInfo = f.project.dockerCompose.servicesInfos.web.firstContainer 239 | assert "http://${webInfo.host}:${webInfo.tcpPorts[80]}".toURL().text.contains('nginx') 240 | assert webInfo.ports == webInfo.tcpPorts 241 | assert !webInfo.containerHostname.isEmpty() 242 | assert webInfo.inspection.size() > 0 243 | } 244 | when: 245 | f.project.tasks.composeBuild.build() 246 | f.project.tasks.composeUp.up() 247 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 248 | then: 249 | noExceptionThrown() 250 | cleanup: 251 | f.project.tasks.composeDown.down() 252 | f.close() 253 | } 254 | 255 | def "reconnect to previously executed up task"() { 256 | def f = Fixture.withNginx() 257 | when: 258 | f.project.dockerCompose.stopContainers = false 259 | def t = System.nanoTime() 260 | f.project.tasks.composeBuild.build() 261 | f.project.tasks.composeUp.up() 262 | def firstDuration = System.nanoTime() - t 263 | t = System.nanoTime() 264 | f.project.tasks.composeBuild.build() 265 | f.project.tasks.composeUp.up() 266 | def secondDuration = System.nanoTime() - t 267 | then: 268 | noExceptionThrown() 269 | secondDuration < firstDuration 270 | f.project.tasks.composeUp.wasReconnected == true 271 | cleanup: 272 | f.project.tasks.composeDownForced.down() 273 | f.close() 274 | } 275 | 276 | def "does not reconnect to previously executed up task if the container is killed"() { 277 | def f = Fixture.withNginx() 278 | when: 279 | f.project.dockerCompose.stopContainers = false 280 | f.project.tasks.composeBuild.build() 281 | f.project.tasks.composeUp.up() 282 | f.project.dockerCompose.dockerExecutor.execute('kill', f.project.dockerCompose.servicesInfos.values().find().firstContainer.containerId) 283 | f.project.tasks.composeBuild.build() 284 | f.project.tasks.composeUp.up() 285 | then: 286 | noExceptionThrown() 287 | f.project.tasks.composeUp.wasReconnected == false 288 | cleanup: 289 | f.project.tasks.composeDownForced.down() 290 | f.close() 291 | } 292 | 293 | def "allows pull"() { 294 | def f = Fixture.withNginx() 295 | when: 296 | f.project.dockerCompose.startedServices = ['web'] 297 | f.project.tasks.composePull.pull() 298 | then: 299 | noExceptionThrown() 300 | cleanup: 301 | f.close() 302 | } 303 | 304 | def "exposes environment variables and system properties"() { 305 | def f = Fixture.custom(composeFileContent) 306 | f.project.plugins.apply 'java' 307 | f.project.tasks.composeBuild.build() 308 | f.project.tasks.composeUp.up() 309 | Test test = f.project.tasks.test as Test 310 | File file = new File("test.env") 311 | when: 312 | f.project.dockerCompose.exposeAsEnvironment(test) 313 | f.project.dockerCompose.exposeAsEnvironmentFile(file) 314 | f.project.dockerCompose.exposeAsSystemProperties(test) 315 | then: 316 | test.environment.containsKey('WEB_HOST') 317 | test.environment.containsKey('WEB_CONTAINER_HOSTNAME') 318 | test.environment.containsKey('WEB_TCP_80') 319 | test.environment.containsKey('WEB_UDP_81') 320 | test.systemProperties.containsKey('web.host') 321 | test.systemProperties.containsKey('web.containerHostname') 322 | test.systemProperties.containsKey('web.tcp.80') 323 | test.systemProperties.containsKey('web.udp.81') 324 | file.text.contains('WEB_HOST') 325 | file.text.contains('WEB_CONTAINER_HOSTNAME') 326 | file.text.contains('WEB_TCP_80') 327 | file.text.contains('WEB_UDP_81') 328 | 329 | cleanup: 330 | f.project.tasks.composeDown.down() 331 | file.delete() 332 | f.close() 333 | where: 334 | composeFileContent << [''' 335 | services: 336 | web: 337 | image: nginx:stable 338 | ports: 339 | - 80 340 | - 81/udp 341 | '''] 342 | } 343 | 344 | def "exposes environment variables and system properties for services having dash in service name"() { 345 | def f = Fixture.custom(composeFileContent) 346 | f.project.plugins.apply 'java' 347 | f.project.tasks.composeBuild.build() 348 | f.project.tasks.composeUp.up() 349 | Test test = f.project.tasks.test as Test 350 | File file = new File("test.env") 351 | when: 352 | f.project.dockerCompose.exposeAsEnvironment(test) 353 | f.project.dockerCompose.exposeAsEnvironmentFile(file) 354 | f.project.dockerCompose.exposeAsSystemProperties(test) 355 | then: 356 | test.environment.containsKey('WEB-SERVICE_HOST') 357 | test.environment.containsKey('WEB-SERVICE_CONTAINER_HOSTNAME') 358 | test.environment.containsKey('WEB-SERVICE_TCP_80') 359 | test.environment.containsKey('WEB-SERVICE_UDP_81') 360 | test.systemProperties.containsKey('web-service.host') 361 | test.systemProperties.containsKey('web-service.containerHostname') 362 | test.systemProperties.containsKey('web-service.tcp.80') 363 | test.systemProperties.containsKey('web-service.udp.81') 364 | file.text.contains('WEB-SERVICE_HOST') 365 | file.text.contains('WEB-SERVICE_CONTAINER_HOSTNAME') 366 | file.text.contains('WEB-SERVICE_TCP_80') 367 | file.text.contains('WEB-SERVICE_UDP_81') 368 | cleanup: 369 | f.project.tasks.composeDown.down() 370 | file.delete() 371 | f.close() 372 | where: 373 | composeFileContent << [''' 374 | services: 375 | web-service: 376 | image: nginx:stable 377 | ports: 378 | - 80 379 | - 81/udp 380 | '''] 381 | } 382 | 383 | private static boolean isRunningOnWindows() { System.properties['os.name'].toString().toLowerCase().startsWith('windows') } 384 | private static boolean isRunningOnMac() { System.properties['os.name'].toString().toLowerCase().startsWith('macos') || System.properties['os.name'].toString().toLowerCase().startsWith('mac os') } 385 | 386 | @IgnoreIf({ DockerComposePluginTest.isRunningOnWindows() || DockerComposePluginTest.isRunningOnMac() }) 387 | def "expose localhost as a host for container with HOST networking"() { 388 | def f = Fixture.custom(''' 389 | services: 390 | web: 391 | image: nginx:stable 392 | network_mode: host 393 | ''') 394 | f.project.plugins.apply 'java' 395 | f.extension.projectName = 'test' 396 | f.project.tasks.composeBuild.build() 397 | f.project.tasks.composeUp.up() 398 | Test test = f.project.tasks.test as Test 399 | File file = new File("test.env") 400 | when: 401 | f.project.dockerCompose.exposeAsEnvironment(test) 402 | f.project.dockerCompose.exposeAsEnvironmentFile(file) 403 | f.project.dockerCompose.exposeAsSystemProperties(test) 404 | then: 405 | test.environment.get('WEB_HOST') == 'localhost' 406 | test.systemProperties.get('web.host') == 'localhost' 407 | cleanup: 408 | f.project.tasks.composeDown.down() 409 | file.delete() 410 | f.close() 411 | } 412 | 413 | def "docker-compose substitutes environment variables"() { 414 | def f = Fixture.custom(''' 415 | services: 416 | web: 417 | image: nginx:stable 418 | ports: 419 | - $MY_WEB_PORT 420 | ''') 421 | def integrationTestTask = f.project.tasks.create('integrationTest').doLast { 422 | ContainerInfo webInfo = f.project.dockerCompose.servicesInfos.web.firstContainer 423 | assert webInfo.ports.containsKey(80) 424 | } 425 | when: 426 | f.extension.useComposeFiles = ['docker-compose.yml'] 427 | f.extension.environment.put 'MY_WEB_PORT', 80 428 | f.extension.waitForTcpPorts = false // checked in assert 429 | f.project.tasks.composeBuild.build() 430 | f.project.tasks.composeUp.up() 431 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 432 | then: 433 | noExceptionThrown() 434 | cleanup: 435 | f.project.tasks.composeDown.down() 436 | f.close() 437 | } 438 | 439 | @IgnoreIf({ System.getenv('DOCKER_COMPOSE_VERSION') == null || parse(System.getenv('DOCKER_COMPOSE_VERSION')) >= parse('1.13.0') }) 440 | def "exception is thrown for scale option if unsupported docker-compose is used"() { 441 | def f = Fixture.withNginx() 442 | f.extension.scale = ['web': 2] 443 | when: 444 | f.project.tasks.composeBuild.build() 445 | f.project.tasks.composeUp.up() 446 | then: 447 | thrown(UnsupportedOperationException) 448 | cleanup: 449 | f.project.tasks.composeDown.down() 450 | f.close() 451 | } 452 | 453 | @IgnoreIf({ System.getenv('DOCKER_COMPOSE_VERSION') != null && parse(System.getenv('DOCKER_COMPOSE_VERSION')) < parse('1.13.0') }) 454 | def "docker-compose scale option launches multiple instances of service"() { 455 | def f = Fixture.withNginx() 456 | f.extension.scale = ['web': 2] 457 | def integrationTestTask = f.project.tasks.create('integrationTest').doLast { 458 | def webInfos = project.dockerCompose.servicesInfos.web.containerInfos 459 | assert webInfos.size() == 2 460 | assert webInfos.containsKey('web_1') || webInfos.containsKey('web-1') 461 | assert webInfos.containsKey('web_2') || webInfos.containsKey('web-2') 462 | } 463 | when: 464 | f.project.tasks.composeBuild.build() 465 | f.project.tasks.composeUp.up() 466 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 467 | then: 468 | noExceptionThrown() 469 | cleanup: 470 | f.project.tasks.composeDown.down() 471 | f.close() 472 | } 473 | 474 | @IgnoreIf({ System.getenv('DOCKER_COMPOSE_VERSION') != null && parse(System.getenv('DOCKER_COMPOSE_VERSION')) < parse('1.13.0') }) 475 | def "environment variables and system properties exposed for all scaled containers"() { 476 | def f = Fixture.withNginx() 477 | f.project.plugins.apply 'java' 478 | f.extension.scale = ['web': 2] 479 | f.project.tasks.composeBuild.build() 480 | f.project.tasks.composeUp.up() 481 | Test test = f.project.tasks.test as Test 482 | File file = new File("test.env") 483 | when: 484 | f.project.dockerCompose.exposeAsEnvironment(test) 485 | f.project.dockerCompose.exposeAsEnvironmentFile(file) 486 | f.project.dockerCompose.exposeAsSystemProperties(test) 487 | then: 488 | [1, 2].each { containerInstance -> 489 | assert test.environment.containsKey("WEB_${containerInstance}_HOST".toString()) 490 | assert test.environment.containsKey("WEB_${containerInstance}_CONTAINER_HOSTNAME".toString()) 491 | assert test.environment.containsKey("WEB_${containerInstance}_TCP_80".toString()) 492 | assert test.systemProperties.containsKey("web_${containerInstance}.host".toString()) 493 | assert test.systemProperties.containsKey("web_${containerInstance}.containerHostname".toString()) 494 | assert test.systemProperties.containsKey("web_${containerInstance}.tcp.80".toString()) 495 | assert file.text.contains("WEB_${containerInstance}_HOST".toString()) 496 | assert file.text.contains("WEB_${containerInstance}_CONTAINER_HOSTNAME".toString()) 497 | assert file.text.contains("WEB_${containerInstance}_TCP_80".toString()) 498 | } 499 | cleanup: 500 | f.project.tasks.composeDown.down() 501 | file.delete() 502 | f.close() 503 | } 504 | 505 | @IgnoreIf({ System.getenv('DOCKER_COMPOSE_VERSION') != null && parse(System.getenv('DOCKER_COMPOSE_VERSION')) < parse('1.13.0') }) 506 | def "docker-compose scale to 0 does not cause exceptions because of missing first container"() { 507 | def f = Fixture.custom(''' 508 | services: 509 | web: 510 | image: nginx:stable 511 | ports: 512 | - 80 513 | z: 514 | image: nginx:stable 515 | ports: [] 516 | ''') 517 | f.extension.scale = ['web': 0] 518 | def integrationTestTask = f.project.tasks.create('integrationTest').doLast { 519 | def webInfos = project.dockerCompose.servicesInfos.web.containerInfos 520 | assert webInfos.size() == 0 521 | } 522 | when: 523 | f.project.tasks.composeBuild.build() 524 | f.project.tasks.composeUp.up() 525 | integrationTestTask.actions.forEach { it.execute(integrationTestTask) } 526 | then: 527 | noExceptionThrown() 528 | cleanup: 529 | f.project.tasks.composeDown.down() 530 | f.close() 531 | } 532 | 533 | def "exposes environment variables and system properties for container with custom name"() { 534 | def f = Fixture.custom(composeFileContent) 535 | f.project.plugins.apply 'java' 536 | f.project.tasks.composeBuild.build() 537 | f.project.tasks.composeUp.up() 538 | Test test = f.project.tasks.test as Test 539 | File file = new File("test.env") 540 | when: 541 | f.project.dockerCompose.exposeAsEnvironment(test) 542 | f.project.dockerCompose.exposeAsEnvironmentFile(file) 543 | f.project.dockerCompose.exposeAsSystemProperties(test) 544 | then: 545 | test.environment.containsKey('CUSTOM_CONTAINER_NAME_HOST') 546 | test.environment.containsKey('CUSTOM_CONTAINER_NAME_CONTAINER_HOSTNAME') 547 | test.environment.containsKey('CUSTOM_CONTAINER_NAME_TCP_80') 548 | test.systemProperties.containsKey('custom_container_name.host') 549 | test.systemProperties.containsKey('custom_container_name.containerHostname') 550 | test.systemProperties.containsKey('custom_container_name.tcp.80') 551 | file.text.contains('CUSTOM_CONTAINER_NAME_HOST') 552 | file.text.contains('CUSTOM_CONTAINER_NAME_CONTAINER_HOSTNAME') 553 | file.text.contains('CUSTOM_CONTAINER_NAME_TCP_80') 554 | cleanup: 555 | f.project.tasks.composeDown.down() 556 | file.delete() 557 | f.close() 558 | where: 559 | // test it for both compose file version 1 and 2 560 | composeFileContent << [''' 561 | services: 562 | web: 563 | container_name: custom_container_name 564 | image: nginx:stable 565 | ports: 566 | - 80 567 | '''] 568 | } 569 | 570 | def "includeDependencies calculates dependencies correctly"() { 571 | def f = Fixture.custom(composeFileContent) 572 | f.project.plugins.apply 'java' 573 | f.project.dockerCompose.includeDependencies = true 574 | f.project.dockerCompose.startedServices = ['webMaster'] 575 | f.project.plugins.apply 'docker-compose' 576 | f.project.tasks.composeBuild.build() 577 | f.project.tasks.composeUp.up() 578 | Test test = f.project.tasks.test as Test 579 | when: 580 | f.project.tasks.composeDown.down() 581 | then: 582 | def runningServices = ComposeExecutor.getInstance(f.project, f.project.dockerCompose).get().execute('ps') 583 | !runningServices.contains("webMaster") 584 | !runningServices.contains("web0") 585 | !runningServices.contains("web1") 586 | 587 | cleanup: 588 | f.close() 589 | where: 590 | composeFileContent << [''' 591 | services: 592 | web0: 593 | image: nginx:stable 594 | ports: 595 | - 80 596 | web1: 597 | image: nginx:stable 598 | ports: 599 | - 80 600 | links: 601 | - web0 602 | webMaster: 603 | image: nginx:stable 604 | ports: 605 | - 80 606 | links: 607 | - web1 608 | '''] 609 | } 610 | 611 | def "works as expected for container with network from another container"() { 612 | def f = Fixture.custom(composeFileContent) 613 | f.project.plugins.apply 'java' 614 | f.project.plugins.apply 'docker-compose' 615 | when: 616 | f.project.tasks.composeBuild.build() 617 | f.project.tasks.composeUp.up() 618 | then: 619 | f.project.dockerCompose.servicesInfos.nginx.host == f.project.dockerCompose.servicesInfos.gw.host 620 | ServiceInfo gwServiceInfo = f.project.dockerCompose.servicesInfos.gw 621 | "http://${gwServiceInfo.host}:${gwServiceInfo.tcpPorts[80]}".toURL().text.contains('nginx') 622 | cleanup: 623 | f.project.tasks.composeDown.down() 624 | f.close() 625 | where: 626 | composeFileContent << [''' 627 | services: 628 | gw: 629 | image: alpine:3.9.6 630 | entrypoint: /bin/sleep 631 | command: 1h 632 | ports: 633 | - 80 634 | nginx: 635 | image: nginx:stable 636 | network_mode: service:gw 637 | '''] 638 | } 639 | 640 | def "deserialize servicesInfosFile and compare to original"() { 641 | def f = Fixture.withNginx() 642 | when: 643 | f.project.tasks.composeBuild.build() 644 | f.project.tasks.composeUp.up() 645 | def mapper = new ObjectMapper() 646 | def file = f.project.tasks.composeUp.servicesInfosFile.get().asFile 647 | def deserializedServicesInfos = mapper.readValue(file, new TypeReference>() {}) 648 | then: 649 | noExceptionThrown() 650 | deserializedServicesInfos == f.project.tasks.composeUp.servicesInfos 651 | cleanup: 652 | f.project.tasks.composeDown.down() 653 | f.close() 654 | } 655 | 656 | def "verify servicesInfosFile path"() { 657 | def project = ProjectBuilder.builder().build() 658 | when: 659 | project.plugins.apply 'docker-compose' 660 | project.dockerCompose {} 661 | File file = project.tasks.composeUp.servicesInfosFile.get().asFile 662 | then: 663 | file.toPath().endsWith(Paths.get('build', 'tmp', 'com.avast.gradle.docker-compose', 'services-infos.json')) 664 | } 665 | 666 | def "verify servicesInfosFile path for nested configuration"() { 667 | def project = ProjectBuilder.builder().build() 668 | when: 669 | project.plugins.apply 'docker-compose' 670 | project.dockerCompose { 671 | nested {} 672 | } 673 | def file = project.tasks.nestedComposeUp.servicesInfosFile.get().asFile 674 | then:file.toPath().endsWith(Paths.get('build', 'tmp', 'com.avast.gradle.docker-compose', 'services-infos-nested.json')) 675 | } 676 | } 677 | --------------------------------------------------------------------------------