├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── build.sbt ├── docker ├── agent-bond-opts └── jmx_exporter_config.yml ├── helm ├── flink-operator │ ├── .gitignore │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ └── operator.yaml │ └── values.yaml └── pvc-flink │ ├── .gitignore │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── NOTES.txt │ └── pvc.yaml │ └── values.yaml ├── manifest ├── LightbendOperatorSource.yaml ├── install.sh ├── olm │ └── crd │ │ ├── flinkcluster.crd.yaml │ │ ├── flinkoperator.0.0.2.clusterserviceversion.yaml │ │ └── lightbend-flink.package.yaml ├── operator.yaml └── programmatic │ ├── flink.catalogsource.yaml │ ├── flink.configmap.yaml │ ├── flink.operatorgroup.yaml │ └── flink.subscription.yaml ├── operator └── src │ ├── main │ └── scala │ │ └── com │ │ └── lightbend │ │ └── operator │ │ ├── Constants.scala │ │ ├── FlinkClusterOperator.scala │ │ ├── GetClusterStatus.scala │ │ ├── KubernetesFlinkClusterDeployer.scala │ │ ├── MetricsHelper.scala │ │ ├── RunningClusters.scala │ │ └── helpers │ │ └── DataHelper.scala │ └── test │ └── scala │ └── com │ └── lightbend │ └── operator │ └── YamlProcessingTest.scala ├── project ├── Dependencies.scala ├── ModelGeneratorPlugin.scala ├── Versions.scala ├── build.properties └── plugins.sbt ├── schema └── flinkCluster.json └── yaml ├── cluster.yaml ├── cluster_complete.yaml └── logging-configmap.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | ###################### 2 | # Eclipse 3 | ###################### 4 | *.pydevproject 5 | .project 6 | .metadata 7 | tmp/ 8 | tmp/**/* 9 | *.tmp 10 | *.bak 11 | *.swp 12 | *~.nib 13 | local.properties 14 | .classpath 15 | .settings/ 16 | .loadpath 17 | .factorypath 18 | /src/main/resources/rebel.xml 19 | 20 | # External tool builders 21 | .externalToolBuilders/** 22 | 23 | # Locally stored "Eclipse launch configurations" 24 | *.launch 25 | 26 | # CDT-specific 27 | .cproject 28 | 29 | # PDT-specific 30 | .buildpath 31 | 32 | ###################### 33 | # Intellij 34 | ###################### 35 | .idea/ 36 | *.iml 37 | *.iws 38 | *.ipr 39 | *.ids 40 | *.orig 41 | classes/ 42 | 43 | ###################### 44 | # Visual Studio Code 45 | ###################### 46 | .vscode/ 47 | 48 | ###################### 49 | # Maven 50 | ###################### 51 | /log/ 52 | /target/ 53 | pom.xml.versionsBackup 54 | 55 | ###################### 56 | # Gradle 57 | ###################### 58 | .gradle/ 59 | /build/ 60 | 61 | ###################### 62 | # Package Files 63 | ###################### 64 | *.jar 65 | *.war 66 | *.ear 67 | *.db 68 | 69 | ###################### 70 | # Windows 71 | ###################### 72 | # Windows image file caches 73 | Thumbs.db 74 | 75 | # Folder config file 76 | Desktop.ini 77 | 78 | ###################### 79 | # Mac OSX 80 | ###################### 81 | .DS_Store 82 | .svn 83 | 84 | # Thumbnails 85 | ._* 86 | 87 | # Files that might appear on external disk 88 | .Spotlight-V100 89 | .Trashes 90 | 91 | ###################### 92 | # Others 93 | ###################### 94 | *.class 95 | *.*~ 96 | *~ 97 | .merge_file* 98 | 99 | ###################### 100 | # Gradle Wrapper 101 | ###################### 102 | !gradle/wrapper/gradle-wrapper.jar 103 | 104 | ###################### 105 | # Maven Wrapper 106 | ###################### 107 | !.mvn/wrapper/maven-wrapper.jar 108 | 109 | ###################### 110 | # ESLint 111 | ###################### 112 | .eslintcache 113 | 114 | 115 | ########################## 116 | # Custom/Project Specific 117 | ########################## 118 | 119 | # generated manifests 120 | /k8s-spark-operator.yaml 121 | /openshift-spark-operator.yaml 122 | /manifest/operator-devel.yaml 123 | /manifest/operator-test.yaml 124 | 125 | # oc cluster up 126 | openshift.local.clusterup 127 | 128 | 129 | abstract-operator 130 | 131 | # spark shell 132 | metastore_db/ 133 | derby.log 134 | 135 | 136 | ########################## 137 | # sbt 138 | ########################## 139 | target/ 140 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Scala and java are added to centos. Modeled after fabric8/java-centos-openjdk8-jdk:1.5.1 and added scala 3 | # 4 | 5 | FROM centos:7.6.1810 6 | 7 | USER root 8 | 9 | RUN mkdir -p /deployments 10 | 11 | # JAVA_APP_DIR is used by run-java.sh for finding the binaries 12 | ENV JAVA_APP_DIR=/deployments \ 13 | JAVA_MAJOR_VERSION=8 14 | 15 | # Scala 16 | ENV SCALA_VERSION 2.12.8 17 | 18 | # /dev/urandom is used as random source, which is prefectly safe 19 | # according to http://www.2uo.de/myths-about-urandom/ 20 | RUN yum install -y \ 21 | java-1.8.0-openjdk-1.8.0.191.b12-1.el7_6 \ 22 | java-1.8.0-openjdk-devel-1.8.0.191.b12-1.el7_6 \ 23 | && echo "securerandom.source=file:/dev/urandom" >> /usr/lib/jvm/java/jre/lib/security/java.security \ 24 | && yum clean all 25 | 26 | ENV JAVA_HOME /etc/alternatives/jre 27 | 28 | # Agent bond including Jolokia and jmx_exporter 29 | ADD docker/agent-bond-opts /opt/run-java-options 30 | RUN mkdir -p /opt/agent-bond \ 31 | && curl http://central.maven.org/maven2/io/fabric8/agent-bond-agent/1.2.0/agent-bond-agent-1.2.0.jar \ 32 | -o /opt/agent-bond/agent-bond.jar \ 33 | && chmod 444 /opt/agent-bond/agent-bond.jar \ 34 | && chmod 755 /opt/run-java-options 35 | ADD docker/jmx_exporter_config.yml /opt/agent-bond/ 36 | EXPOSE 8778 9779 37 | 38 | ## 39 | # Install Scala 40 | ## Piping curl directly in tar 41 | 42 | RUN mkdir -p /root && \ 43 | curl -fsL https://downloads.typesafe.com/scala/$SCALA_VERSION/scala-$SCALA_VERSION.tgz | tar xfz - -C /root/ && \ 44 | echo >> /root/.bashrc && \ 45 | echo "export PATH=~/scala-$SCALA_VERSION/bin:$PATH" >> /root/.bashrc 46 | 47 | ENV JAVA_OPTS="-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -XX:MaxRAMFraction=2 -XshowSettings:vm" 48 | 49 | # Run under user "jboss" and prepare for be running 50 | # under OpenShift, too 51 | RUN groupadd -r jboss -g 1000 \ 52 | && useradd -u 1000 -r -g jboss -m -d /opt/jboss -s /sbin/nologin jboss \ 53 | && chmod 755 /opt/jboss \ 54 | && chown -R jboss /deployments \ 55 | && usermod -g root -G `id -g jboss` jboss \ 56 | && chmod -R "g+rwX" /deployments \ 57 | && chown -R jboss:root /deployments 58 | 59 | # Create directory for operator jar 60 | RUN mkdir -p /operator && \ 61 | chmod -R 777 /operator && \ 62 | chown -R jboss:root /operator 63 | 64 | USER jboss 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flink-operator 2 | 3 | 4 | `CRD`-based approach for managing Flink clusters in Kubernetes and OpenShift. 5 | 6 | This operator uses [abstract-operator](https://github.com/jvm-operators/abstract-operator) library. 7 | 8 | ## Building and Packaging 9 | 10 | The operator is implemented in the operator module. The model contains both 11 | [json definition of the CRD](schema/flinkCluster.json) and the actual 12 | implementation code. 13 | 14 | Building and creation of the docker image can be done running command: 15 | ```` 16 | sbt docker 17 | ```` 18 | This _docker build_ requires a base image that can be build using the following [docker file](./Dockerfile) 19 | 20 | ## Installation 21 | 22 | To install the operator use [Helm](helm/flink-operator) 23 | 24 | The following configurations is available for operator: 25 | * Operator image information including repository - operator docker name (default - lightbend/fdp-flink-operator); tag - operator docker tag (default - 0.0.1) and pullPolicy - operator docker pull policy (default - always) 26 | * Namespace to watch - three options supported are - empty list - namespace where the operator is installed; explicit list of namespaces, “*” - all namespace (default - “*”) 27 | * ReconciliationInterval - how often (in seconds) the full reconciliation should be run (default is 180) 28 | * Metrics - a boolean defining whether operator metrics is exposed to Prometheus (default - true) 29 | * MetricsPort - port used by metrics http server (default - 8080) 30 | * InternalJvmMetrics - a boolean defining whether operator's internal JVM metrics is available through Prometheus (default - true) 31 | * Operator's resource requirements including memory requirement for an operator (default - 512Mi); cpu requirement for an operator (default - 1000m) 32 | * Checkpointing configuration, including PVC name and mount directory (default none) 33 | * Savepointing configuration, including PVC name and mount directory (default none) 34 | 35 | ## Cluster's specification 36 | 37 | Cluster can be configured using the following components: 38 | * customImage defines two parameters parameters: 39 | * imagename - name of the image to use for cluster (same image is used for both job manager and task manager) - default is `lightbend/flink:1.8.0_scala_2.11_debian` 40 | * pullpolicy - image pull policy - default is `IfNotPresent` 41 | * flinkConfiguration defines cluster specific configuration 42 | * num_taskmanagers - number of task managers (integer) - default is `2` 43 | * taskmanagers_slots - number of slots per task managers (integer) - default is `2` 44 | * parallelism - default parallelism for Flink application (integer) - default is `1` 45 | * metrics - defines wheater to expose cluster's metrics via Prometheus - default `true` 46 | * logging - name of the configmap with the overwrites for logging (see [sample](/yaml/logging-configmap.yaml) of all the files and their data). If not specified, default Flink configuration is used 47 | * checkpointing - name of the PVC used for checkpointing. If it is specified Flink HA is used, if not specified, external checkpointing is not supported and no HA is used 48 | * savepointing - name of the PVC used for savepointing. If it is specified savepointing is not supported. 49 | * master defines specification for jobmanager 50 | * cpu - amount of cpus per instance (string), default `"2"` 51 | * memory - amount of memory per instance (string), default `"1024"` 52 | * inputs - array of inputs used for job manager. If not specified - a session cluster is started. To start a job cluster inputs should contain 53 | ```` 54 | - jobcluster 55 | - name of the main job class 56 | - parameters 57 | ```` 58 | Note that parameter's name and value should be specified on different lines 59 | * worker defines specification for taskmanager 60 | * cpu - amount of cpus per instance (string), default `"4"` 61 | * memory - amount of memory per instance (string), default `"2048"` 62 | * labels - list of additional labels (key/values), see example [here](yaml/cluster_complete.yaml) 63 | * env - list of additional environment variables (key/values), see example [here](yaml/cluster_complete.yaml) 64 | * mounts - list of additional mounts (`PVC`, `ConfigMap`, `Secret`). Every mount is defined by the following parameters, all of which should be present: 65 | * resourcetype - type of mounted resource. Supported values are `PVC`, `ConfigMap`, `Secret` (not case sensitive). Any other resource type will be ignored 66 | * resourcename - name of the resource (the resource should exist) 67 | * mountdirectory - directory at which resource is mounted. If this directory is `/opt/flink/conf`, the resource will be ignored to avoid overriding Flink's native configuration. Additionally `PVC` resources are mounted as `read/write, while`, while `configMap` and `Secret` are mounted as `readdOnly` 68 | * envname - name used to set mountdirectory as environment variable 69 | 70 | The following are generated environment variables 71 | * `LOGCONFIGDIR` for logging definition files 72 | * `CHECKPOINTDIR` for checkpointing directory 73 | * `SAVEPOINTDIR` for savepointing directory 74 | 75 | 76 | ## Basic commands 77 | 78 | To create a cluster, execute the following command: 79 | ``` 80 | cat <` 114 | * `SAVEPOINT_OPTIONS` - Savepoint options to start the cluster with (default: none), for example `--fromSavepoint --allowNonRestoredState` 115 | 116 | For more information on parallelism and savepoint options, see the [documentation](https://ci.apache.org/projects/flink/flink-docs-stable/ops/cli.html#usage) 117 | 118 | --- 119 | **Note** 120 | 121 | This operator assumes that custom images are build using [this project](https://github.com/lightbend/fdp-flink-build). 122 | If you build your images differently, the commands for running applications will change 123 | 124 | --- 125 | 126 | ## Seeing what is running 127 | 128 | To see running clusters, execute: 129 | 130 | ```` 131 | oc get FlinkCluster 132 | NAME AGE 133 | my-cluster 13m 134 | ```` 135 | 136 | To get the information about specific cluster, run: 137 | 138 | ```` 139 | oc describe FlinkCluster my-cluster 140 | Name: my-cluster 141 | Namespace: flink 142 | Labels: 143 | Annotations: 144 | API Version: lightbend.com/v1 145 | Kind: FlinkCluster 146 | Metadata: 147 | Creation Timestamp: 2019-06-16T15:21:27Z 148 | Generation: 1 149 | Resource Version: 11087658 150 | Self Link: /apis/lightbend.com/v1/namespaces/flink/flinkclusters/my-cluster 151 | UID: 68f50b35-904a-11e9-9719-065625d6fbaa 152 | Spec: 153 | Flink Configuration: 154 | Checkpointing: flink-operator-checkpointing 155 | Logging: flink-logging 156 | Num _ Taskmanagers: 1 157 | Parallelism: 2 158 | Savepointing: flink-operator-savepointing 159 | Taskmanagers _ Slots: 2 160 | Master: 161 | Cpu: 1 162 | Mounts: 163 | Envname: my-secret 164 | Mountdirectory: /etc/tls-sidecar/cluster-ca-certs/ 165 | Resourcename: strimzi-clients-ca-cert 166 | Resourcetype: secret 167 | Worker: 168 | Cpu: 1 169 | Events: 170 | ```` 171 | You can also get information about all running clusters running the following: 172 | ```` 173 | oc describe FlinkCluster 174 | Name: my-cluster 175 | Namespace: flink 176 | Labels: 177 | Annotations: 178 | API Version: lightbend.com/v1 179 | Kind: FlinkCluster 180 | Metadata: 181 | Creation Timestamp: 2019-06-16T15:21:27Z 182 | Generation: 1 183 | Resource Version: 11087658 184 | Self Link: /apis/lightbend.com/v1/namespaces/flink/flinkclusters/my-cluster 185 | UID: 68f50b35-904a-11e9-9719-065625d6fbaa 186 | Spec: 187 | Flink Configuration: 188 | Checkpointing: flink-operator-checkpointing 189 | Logging: flink-logging 190 | Num _ Taskmanagers: 1 191 | Parallelism: 2 192 | Savepointing: flink-operator-savepointing 193 | Taskmanagers _ Slots: 2 194 | Master: 195 | Cpu: 1 196 | Mounts: 197 | Envname: my-secret 198 | Mountdirectory: /etc/tls-sidecar/cluster-ca-certs/ 199 | Resourcename: strimzi-clients-ca-cert 200 | Resourcetype: secret 201 | Worker: 202 | Cpu: 1 203 | Events: 204 | ```` 205 | 206 | To modify the cluster, run the following: 207 | ```` 208 | cat < apiVersion: lightbend.com/v1 210 | > kind: FlinkCluster 211 | > metadata: 212 | > name: my-cluster 213 | > spec: 214 | > flinkConfiguration: 215 | > num_taskmanagers: 3 216 | > taskmanagers_slots: 2 217 | > EOF 218 | ```` 219 | Keep in mind that replace command is not commulative. You need to specify all of the parameters, even if they existed in the original cluster 220 | 221 | To delete the cluster, run the following: 222 | ```` 223 | oc delete FlinkCluster my-cluster 224 | ```` 225 | 226 | --- 227 | **Note** 228 | 229 | The above CRD commands are not global, they only show the resources in a namespace that you are in. 230 | 231 | --- 232 | 233 | ## Metrics 234 | 235 | Prometheus support is enabled via Helm chart 236 | To see all available metrics, go to Prometheus console/graph and enter the following query: 237 | ```` 238 | {app_kubernetes_io_name="flink-operator"} 239 | ```` 240 | This will return the list of all metrics produced by the operator. 241 | You should also be able to see operator and created clusters in the lightbend console 242 | 243 | ## License 244 | 245 | Copyright (C) 2019 Lightbend Inc. (https://www.lightbend.com). 246 | 247 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 248 | 249 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 250 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import Dependencies._ 2 | 3 | // global settings for this build 4 | 5 | name in ThisBuild := "fdp-flink-operator" 6 | version in ThisBuild := "0.0.2" 7 | organization in ThisBuild := "lightbend" 8 | scalaVersion in ThisBuild := Versions.scalaVersion 9 | 10 | 11 | 12 | // settings for a native-packager based docker project based on sbt-docker plugin 13 | def sbtdockerAppBase(id: String)(base: String = id): Project = Project(id, base = file(base)) 14 | .enablePlugins(sbtdocker.DockerPlugin, JavaAppPackaging) 15 | .settings( 16 | dockerfile in docker := { 17 | val appDir = stage.value 18 | val targetDir = "/operator" 19 | 20 | new Dockerfile { 21 | from("lightbend/java-scala-operator-centos:1.0.0") 22 | copy(appDir, targetDir, chown = "jboss:root") 23 | run("chmod", "-R", "777", "/operator") 24 | entryPoint(s"$targetDir/bin/${executableScriptName.value}") 25 | } 26 | }, 27 | 28 | // Set name for the image 29 | imageNames in docker := Seq( 30 | ImageName(namespace = Some(organization.value), 31 | repository = name.value.toLowerCase, 32 | tag = Some(version.value)) 33 | ), 34 | 35 | buildOptions in docker := BuildOptions(cache = false) 36 | ) 37 | 38 | lazy val operator = sbtdockerAppBase("fdp-flink-operator")("./operator") 39 | .enablePlugins(ModelGeneratorPlugin) 40 | .settings( 41 | libraryDependencies ++= Seq(abstractOperator, scalaHTTP, gson, junit), 42 | modelSchemaLocation := "./schema/flinkCluster.json", 43 | (compile in Compile) := ((compile in Compile) dependsOn generateModel).value, 44 | mainClass in Compile := Some("io.radanalytics.operator.Entrypoint") 45 | ) 46 | 47 | lazy val flinkoperator = (project in file(".")) 48 | .aggregate(operator) 49 | -------------------------------------------------------------------------------- /docker/agent-bond-opts: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Parse options 4 | while [ $# -gt 0 ] 5 | do 6 | key="$1" 7 | case ${key} in 8 | # Escape for scripts which eval the output of this script 9 | -e|--escape) 10 | escape_sep=1 11 | ;; 12 | esac 13 | shift 14 | done 15 | 16 | # Check whether a given config is contained in AB_JOLOKIA_OPTS 17 | is_in_jolokia_opts() { 18 | local prop=$1 19 | if [ -n "${AB_JOLOKIA_OPTS:-}" ] && [ "${AB_JOLOKIA_OPTS}" != "${AB_JOLOKIA_OPTS/${prop}/}" ]; then 20 | echo "yes" 21 | else 22 | echo "no" 23 | fi 24 | } 25 | 26 | dir=${AB_DIR:-/opt/agent-bond} 27 | sep="=" 28 | 29 | # Options separators defined to avoid clash with fish-pepper templating 30 | ab_open_del="{""{" 31 | ab_close_del="}""}" 32 | if [ -n "${escape_sep:-}" ]; then 33 | ab_open_del='\{\{' 34 | ab_close_del='\}\}' 35 | fi 36 | 37 | if [ -z "${AB_OFF:-}" ]; then 38 | opts="-javaagent:$dir/agent-bond.jar" 39 | config="${AB_CONFIG:-$dir/agent-bond.properties}" 40 | if [ -f "$config" ]; then 41 | # Configuration takes precedence 42 | opts="${opts}${sep}config=${config}" 43 | sep="," 44 | fi 45 | if [ -z "${AB_ENABLED:-}" ] || [ "${AB_ENABLED}" != "${AB_ENABLED/jolokia/}" ]; then 46 | # Direct options only if no configuration is found 47 | jsep="" 48 | jolokia_opts="" 49 | if [ -n "${AB_JOLOKIA_CONFIG:-}" ] && [ -f "${AB_JOLOKIA_CONFIG}" ]; then 50 | jolokia_opts="${jolokia_opts}${jsep}config=${AB_JOLOKIA_CONFIG}" 51 | jsep="," 52 | grep -q -e '^host' ${AB_JOLOKIA_CONFIG} && host_in_config=1 53 | fi 54 | if [ -z "${AB_JOLOKIA_HOST:-}" ] && [ -z "${host_in_config:-}" ]; then 55 | AB_JOLOKIA_HOST='0.0.0.0' 56 | fi 57 | if [ -n "${AB_JOLOKIA_HOST:-}" ]; then 58 | jolokia_opts="${jolokia_opts}${jsep}host=${AB_JOLOKIA_HOST}" 59 | jsep="," 60 | fi 61 | if [ -n "${AB_JOLOKIA_PORT:-}" ]; then 62 | jolokia_opts="${jolokia_opts}${jsep}port=${AB_JOLOKIA_PORT}" 63 | jsep="," 64 | fi 65 | if [ -n "${AB_JOLOKIA_USER:-}" ]; then 66 | jolokia_opts="${jolokia_opts}${jsep}user=${AB_JOLOKIA_USER}" 67 | jsep="," 68 | fi 69 | if [ -n "${AB_JOLOKIA_PASSWORD:-}" ]; then 70 | jolokia_opts="${jolokia_opts}${jsep}password=${AB_JOLOKIA_PASSWORD}" 71 | jsep="," 72 | fi 73 | if [ -n "${AB_JOLOKIA_HTTPS:-}" ]; then 74 | jolokia_opts="${jolokia_opts}${jsep}protocol=https" 75 | https_used=1 76 | jsep="," 77 | fi 78 | # Integration with OpenShift client cert auth 79 | if [ -n "${AB_JOLOKIA_AUTH_OPENSHIFT:-}" ]; then 80 | auth_opts="useSslClientAuthentication=true,extraClientCheck=true" 81 | if [ -z "${https_used+x}" ]; then 82 | auth_opts="${auth_opts},protocol=https" 83 | fi 84 | if [ $(is_in_jolokia_opts "caCert") != "yes" ]; then 85 | auth_opts="${auth_opts},caCert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" 86 | fi 87 | if [ $(is_in_jolokia_opts "clientPrincipal") != "yes" ]; then 88 | if [ "${AB_JOLOKIA_AUTH_OPENSHIFT}" != "${AB_JOLOKIA_AUTH_OPENSHIFT/=/}" ]; then 89 | # Supposed to contain a principal name to check 90 | auth_opts="${auth_opts},clientPrincipal=$(echo ${AB_JOLOKIA_AUTH_OPENSHIFT} | sed -e 's/ /\\\\ /g')" 91 | else 92 | auth_opts="${auth_opts},clientPrincipal=cn=system:master-proxy" 93 | fi 94 | fi 95 | jolokia_opts="${jolokia_opts}${jsep}${auth_opts}" 96 | jsep="," 97 | fi 98 | # Add extra opts to the end 99 | if [ -n "${AB_JOLOKIA_OPTS:-}" ]; then 100 | jolokia_opts="${jolokia_opts}${jsep}${AB_JOLOKIA_OPTS}" 101 | jsep="," 102 | fi 103 | 104 | opts="${opts}${sep}jolokia${ab_open_del}${jolokia_opts}${ab_close_del}" 105 | sep="," 106 | fi 107 | if [ -z "${AB_ENABLED:-}" ] || [ "${AB_ENABLED}" != "${AB_ENABLED/jmx_exporter/}" ]; then 108 | je_opts="" 109 | jsep="" 110 | if [ -n "${AB_JMX_EXPORTER_OPTS:-}" ]; then 111 | opts="${opts}${sep}jmx_exporter${ab_open_del}${AB_JMX_EXPORTER_OPTS}${ab_close_del}" 112 | sep="," 113 | else 114 | port=${AB_JMX_EXPORTER_PORT:-9779} 115 | config=${AB_JMX_EXPORTER_CONFIG:-/opt/agent-bond/jmx_exporter_config.yml} 116 | opts="${opts}${sep}jmx_exporter${ab_open_del}${port}:${config}${ab_close_del}" 117 | sep="," 118 | fi 119 | fi 120 | if [ "${sep:-}" != '=' ] ; then 121 | echo ${opts} 122 | fi 123 | fi -------------------------------------------------------------------------------- /docker/jmx_exporter_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | lowercaseOutputName: true 3 | lowercaseOutputLabelNames: true 4 | whitelistObjectNames: 5 | - 'org.apache.camel:*' 6 | rules: 7 | - pattern: '^org.apache.camel<>((?:Min|Mean|Max|Last|Delta)(?:ProcessingTime)):' 8 | name: camel_routes_$3 9 | labels: 10 | name: $2 11 | context: $1 12 | - pattern: '^org.apache.camel<>(TotalProcessingTime):' 13 | type: COUNTER 14 | name: camel_routes_$3 15 | labels: 16 | name: $2 17 | context: $1 18 | - pattern: '^org.apache.camel<>(ExchangesInflight|LastProcessingTime):' 19 | name: camel_routes_$3 20 | labels: 21 | name: $2 22 | context: $1 23 | - pattern: '^org.apache.camel<>((?:Exchanges(?:Completed|Failed|Total))|FailuresHandled):' 24 | type: COUNTER 25 | name: camel_routes_$3 26 | labels: 27 | name: $2 28 | context: $1 -------------------------------------------------------------------------------- /helm/flink-operator/.gitignore: -------------------------------------------------------------------------------- 1 | template.yaml 2 | -------------------------------------------------------------------------------- /helm/flink-operator/.helmignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | # Common VCS dirs 3 | .git/ 4 | .gitignore 5 | .bzr/ 6 | .bzrignore 7 | .hg/ 8 | .hgignore 9 | .svn/ 10 | # Common backup files 11 | *.swp 12 | *.bak 13 | *.tmp 14 | *~ 15 | # Various IDEs 16 | .project 17 | .idea/ 18 | *.tmproj 19 | 20 | OWNERS 21 | -------------------------------------------------------------------------------- /helm/flink-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: flink-cluster-operator 3 | description: Operator for managing the Flink clusters and apps in Kubernetes and OpenShift 4 | version: 0.0.2 5 | appVersion: 0.0.2 6 | icon: http://www3.imperial.ac.uk/newseventsimages?p_image_type=MEDIUM&p_image_id=27451 7 | home: https://github.com/lightbend/fdp-flink-operator 8 | sources: 9 | - https://github.com/lightbend/fdp-flink-operator 10 | maintainers: 11 | - name: FDP team 12 | keywords: 13 | - Apache Flink 14 | - Operator 15 | -------------------------------------------------------------------------------- /helm/flink-operator/README.md: -------------------------------------------------------------------------------- 1 | # flink-operator 2 | CRD-based approach for managing Flink clusters and apps in Kubernetes and OpenShift. 3 | 4 | # Installation 5 | ``` 6 | helm install flink-operator 7 | ``` 8 | 9 | 10 | The operator needs to create Service Account, Role and Role Binding. 11 | 12 | # Usage 13 | Create Apache Flink Cluster: 14 | 15 | ``` 16 | cat < { 9 | # > "user": { 10 | # > "username": "'"blublinsky"'", 11 | # > "password": "'"Boris1954"'" 12 | # > } 13 | # > }' | jq -r '.token') 14 | AUTH_TOKEN="basic Ymx1YmxpbnNreTpCb3JpczE5NTQ=" 15 | 16 | EXAMPLE_NAMESPACE=blublinsky 17 | EXAMPLE_REPOSITORY=lightbend-flink 18 | EXAMPLE_RELEASE=0.0.3 19 | operator-courier push $BUNDLE_DIR $EXAMPLE_NAMESPACE $EXAMPLE_REPOSITORY \ 20 | $EXAMPLE_RELEASE "$AUTH_TOKEN" -------------------------------------------------------------------------------- /manifest/olm/crd/flinkcluster.crd.yaml: -------------------------------------------------------------------------------- 1 | kind: CustomResourceDefinition 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | metadata: 4 | name: flinkclusters.lightbend.com 5 | spec: 6 | group: lightbend.com 7 | names: 8 | kind: FlinkCluster 9 | listKind: FlinkClusterList 10 | plural: flinkclusters 11 | singular: flinkcluster 12 | scope: Namespaced 13 | version: v1 14 | -------------------------------------------------------------------------------- /manifest/olm/crd/flinkoperator.0.0.2.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: ClusterServiceVersion 4 | metadata: 5 | annotations: 6 | alm-examples: |- 7 | [ 8 | { 9 | "apiVersion": "lightbend.com/v1", 10 | "kind": "FlinkCluster", 11 | "metadata": { 12 | "name": "my-cluster" 13 | }, 14 | "spec": { 15 | "flinkConfiguration": { 16 | "num_taskmanagers": "2", 17 | "taskmanagers_slots": "2" 18 | } 19 | } 20 | } 21 | ] 22 | categories: "apache flink, apache, flink" 23 | certified: "False" 24 | containerImage: "lightbend/fdp-flink-operator:0.0.2" 25 | capabilities: Basic Install 26 | description: "An operator for managing the Apache Flink clusters including starting applications." 27 | support: lightbend 28 | name: flinkoperator.v0.0.2 29 | namespace: openshift-operators 30 | spec: 31 | customresourcedefinitions: 32 | owned: 33 | - description: Flink Cluster 34 | displayName: "Flink Cluster" 35 | kind: FlinkCluster 36 | name: flinkclusters.lightbend.com 37 | version: v1 38 | description: | 39 | **Apache Flink** Stateful Computations over Data Streams. 40 | 41 | Usage: 42 | ``` 43 | # create cluster 44 | cat < 37 | val imagename = image.getImagename match{ 38 | case value if value != null => value 39 | case _ => DEFAULT_FLINK_IMAGE 40 | } 41 | val pullpolicy = image.getPullpolicy match{ 42 | case value if value != null => value 43 | case _ => DEFAULT_PULL_POLICY 44 | } 45 | (imagename, pullpolicy) 46 | case _ => (DEFAULT_FLINK_IMAGE, DEFAULT_PULL_POLICY) 47 | } 48 | 49 | // Master params 50 | val masterParams = cluster.getMaster match { 51 | case master if(master != null) => 52 | val memory = master.getMemory match { 53 | case value if value != null => value 54 | case _ => DEFAULT_JOBMANAGER_MEMORY 55 | } 56 | val cpu = master.getCpu match { 57 | case value if value != null => value 58 | case _ => DEFAULT_JOBMANAGER_CPU 59 | } 60 | val inputs = master.getInputs match { 61 | case value if (value != null) && (value.size() > 0) => value.asScala 62 | case _ => Seq(OPERATOR_TYPE_MASTER_LABEL) 63 | } 64 | (memory, cpu, inputs) 65 | case _ => (DEFAULT_JOBMANAGER_MEMORY, DEFAULT_JOBMANAGER_CPU, Seq(OPERATOR_TYPE_MASTER_LABEL)) 66 | } 67 | 68 | // worker params 69 | val workerParams = cluster.getWorker match { 70 | case worker if(worker != null) => 71 | val memory = worker.getMemory match { 72 | case value if value != null => value 73 | case _ => DEFAULT_TASKMANAGER_MEMORY 74 | } 75 | val cpu = worker.getCpu match { 76 | case value if value != null => value 77 | case _ => DEFAULT_TASKMANAGER_CPU 78 | } 79 | (memory, cpu) 80 | case _ => (DEFAULT_TASKMANAGER_MEMORY, DEFAULT_TASKMANAGER_CPU) 81 | } 82 | 83 | val mounts = new ListBuffer[Mount] 84 | 85 | // Flink params 86 | val flinkP = cluster.getFlinkConfiguration match { 87 | case conf if (conf != null) => 88 | val metric_query_port = DEFAULT_METRIC_QUERY_PORT 89 | val taskmanagers = conf.getNumTaskmanagers match{ 90 | case value if value != null => value.intValue() 91 | case _ => DEFAULT_TASKMANAGER_INSTANCES 92 | } 93 | val taskmanagerslots = conf.getTaskmanagerSlot match{ 94 | case value if value != null => value.intValue() 95 | case _ => DEFAULT_TASKMANAGER_SLOTS 96 | } 97 | val parallelism = conf.getParallelism match{ 98 | case value if value != null => value.intValue() 99 | case _ => DEFAULT_PARALLELISM 100 | } 101 | val metrics = conf.getMetrics match{ 102 | case value if value != null => value.booleanValue() 103 | case _ => DEFAULT_METRICS 104 | } 105 | conf.getLogging match{ 106 | case value if value != null => 107 | mounts += new Mount().withResourcetype("CONFIGMAP").withResourcename(value) 108 | .withMountdirectory(LOGGING_CONFIG_DIRECTORY).withEnvname(LOGGING_ENVIRONMENT) 109 | case _ => 110 | } 111 | conf.getCheckpointing match{ 112 | case value if value != null => 113 | mounts += new Mount().withResourcetype("PVC") 114 | .withResourcename(value).withMountdirectory(CHECKPOINTING_DIRECTORY).withEnvname(CHECKPOINTING_ENVIRONMENT) 115 | case _ => 116 | } 117 | conf.getSavepointing match{ 118 | case value if value != null => 119 | mounts += new Mount().withResourcetype("PVC") 120 | .withResourcename(value).withMountdirectory(SAVEPOINTING_DIRECTORY).withEnvname(SAVEPOINTING_ENVIRONMENT) 121 | case _ => 122 | } 123 | (metric_query_port, taskmanagers, taskmanagerslots, parallelism, metrics) 124 | 125 | case _ => (DEFAULT_METRIC_QUERY_PORT, DEFAULT_TASKMANAGER_INSTANCES, DEFAULT_TASKMANAGER_SLOTS, DEFAULT_PARALLELISM, DEFAULT_METRICS) 126 | } 127 | 128 | // Additional mounte 129 | cluster.getMounts match { 130 | case value if (value != null) && (value.size() > 0) => 131 | value.asScala.foreach { mount => 132 | if(mount.getMountdirectory != FLINK_CONFIG_DIR && ((mount.getResourcetype.equalsIgnoreCase("PVC")) || 133 | (mount.getResourcetype.equalsIgnoreCase("SECRET")) || 134 | (mount.getResourcetype.equalsIgnoreCase("CONFIGMAP")))){ 135 | mounts += new Mount().withResourcetype(mount.getResourcetype).withResourcename(mount.getResourcename) 136 | .withMountdirectory(mount.getMountdirectory).withEnvname(mount.getEnvname) 137 | 138 | } 139 | } 140 | case _ => 141 | } 142 | 143 | 144 | FlinkParams(flinkP._5, flinkP._1, masterParams._1, workerParams._1, masterParams._2, workerParams._2, masterParams._3, 145 | flinkP._2, flinkP._3, imageRef._1, imageRef._2, flinkP._4, mounts) 146 | } 147 | } 148 | 149 | case class FlinkParams(metrics : Boolean, metric_query_port : String, master_memory : String, worker_memory : String, 150 | master_cpu : String, worker_cpu : String, master_args : Seq[String], 151 | worker_instances : Int, worker_slots : Int, imageRef : String, pullPolicy : String, 152 | parallelism : Int, mounts: Seq[Mount]) 153 | -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/FlinkClusterOperator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | 4 | import com.lightbend.operator.types.FlinkCluster 5 | import io.radanalytics.operator.common.{AbstractOperator, EntityInfo, Operator} 6 | import Constants._ 7 | import com.lightbend.operator.GetClusterStatus.getStatus 8 | import io.radanalytics.operator.resource.LabelsHelper._ 9 | import org.slf4j.LoggerFactory 10 | 11 | import scala.collection.mutable.{Map => MMap} 12 | import scala.collection.JavaConverters._ 13 | 14 | 15 | @Operator(forKind = classOf[FlinkCluster], prefix = "lightbend.com", crd=true) 16 | class FlinkClusterOperator extends AbstractOperator[FlinkCluster] { 17 | 18 | private val log = LoggerFactory.getLogger(classOf[AbstractOperator[_ <: EntityInfo]].getName) 19 | 20 | // Those can not created here because namespace is initiated later 21 | private val clusters = MMap[String, RunningClusters]() // In order to support multiple namespaces (all namespace) we need map here 22 | private var deployer : Option[KubernetesFlinkClusterDeployer] = Option.empty 23 | 24 | // Init - initialize logger 25 | override protected def onInit(): Unit = { 26 | } 27 | 28 | // Add event, just deploy a new cluster 29 | override def onAdd(cluster: FlinkCluster): Unit = { 30 | onAddInternal(cluster, namespace, DeploymentOptions()) 31 | } 32 | 33 | private def onAddInternal(cluster: FlinkCluster, ns: String, option: DeploymentOptions) : Unit = { 34 | log.info(s"Flink operator processing add event for a cluster ${cluster.getName} in namespace $ns") 35 | val list = getDeployer().getResourceList(cluster, ns, option) 36 | client.resourceList(list).inNamespace(ns).createOrReplace 37 | getClusters(ns).put(cluster) 38 | } 39 | 40 | // Delete event, just delete cluster 41 | override def onDelete(cluster: FlinkCluster): Unit = { 42 | onDeleteInternal(cluster, namespace) 43 | } 44 | 45 | private def onDeleteInternal(cluster: FlinkCluster, ns : String): Unit = { 46 | log.info(s"Flink operator processing delete event for a cluster ${cluster.getName} in namespace $ns") 47 | val name = cluster.getName 48 | client.services.inNamespace(ns).withLabels(getDeployer().getDefaultLabels(name).asJava).delete 49 | client.replicationControllers.inNamespace(ns).withLabels(getDeployer().getDefaultLabels(name).asJava).delete 50 | client.pods.inNamespace(ns).withLabels(getDeployer().getDefaultLabels(name).asJava).delete 51 | getClusters(ns).delete(name) 52 | } 53 | 54 | // Modify event 55 | override protected def onModify(newCluster: FlinkCluster): Unit = { 56 | log.info(s"Flink operator processing modify event for a cluster ${newCluster.getName} in namespace $namespace") 57 | val name = newCluster.getName 58 | // Get existing cluster 59 | val existingCluster = getClusters(namespace).getCluster(name) 60 | existingCluster match { 61 | case v if (v == null) => 62 | log.error(s"something went wrong, unable to modify existing cluster $name. Perhaps it wasn't deployed properly. Redeploying") 63 | onAddInternal(newCluster, namespace, DeploymentOptions()) 64 | case _ => 65 | isOnlyScale(existingCluster, newCluster) match { 66 | case true => // This is just rescale 67 | log.info(s"Flink operator processing modify event for a cluster ${newCluster.getName}. Rescaling only") 68 | rescaleTakmanagersCluster(newCluster, namespace) 69 | case _ => // Recreate cluster with new parameters 70 | log.info(s"Recreating cluster $name") 71 | val list = getDeployer().getResourceList(newCluster, namespace, DeploymentOptions()) 72 | client.resourceList(list).inNamespace(namespace).createOrReplace 73 | getClusters(namespace).update(newCluster) 74 | } 75 | } 76 | } 77 | 78 | override protected def fullReconciliation() : Unit = { 79 | // 1. get all defined crd and call it desiredSet 80 | // 2. get all deployed clusters and call it actualSet (and update the this.clusters) 81 | // 3. desiredSet - actualSet = toBeCreated 82 | // 4. actualSet - desiredSet = toBeDeleted 83 | // 5. repair / scale 84 | 85 | log.info(s"Running full reconciliation for namespace $namespace and kind $entityName.") 86 | 87 | var change = false 88 | // Get desired clusters 89 | val desired = super.getDesiredSet.asScala.map(cluster => (FullName(cluster.getName, cluster.getNamespace) -> cluster)).toMap 90 | // Get actual workers 91 | val actual = getDeployed 92 | 93 | // Calculate to be created and deleted 94 | val toBeCreated = desired.keys.toList.filterNot(actual.keys.toSet) 95 | val toBeDeleted = actual.keys.toList.filterNot(desired.keys.toSet) 96 | 97 | // Process creation 98 | toBeCreated.isEmpty match { 99 | case true => 100 | case _ => // We need to create missing ones 101 | log.info(s"Reconciliation - toBeCreated: $toBeCreated") 102 | change = true 103 | toBeCreated.foreach(cluster => { 104 | log.info(s"Reconciliation creating cluster $cluster") 105 | onAddInternal(desired.get(cluster).get, cluster.namespace, DeploymentOptions()) 106 | }) 107 | } 108 | 109 | // Process deletion 110 | toBeDeleted.isEmpty match { 111 | case true => 112 | case _ => // We need to delete extraneous 113 | log.info(s"Reconciliation toBeDeleted: $toBeDeleted") 114 | change = true 115 | toBeDeleted.foreach(cluster => { 116 | val c = new FlinkCluster 117 | c.setName(cluster.name) 118 | log.info(s"Reconciliation deleting cluster $cluster") 119 | onDeleteInternal(c, cluster.namespace) 120 | }) 121 | } 122 | 123 | // repair/ rescale 124 | desired.foreach(cluster => { 125 | val state = actual.get(cluster._1).getOrElse(Deployed("", -1, -1, "")) 126 | var deployment = DeploymentOptions(false, false, false) 127 | state.master match { 128 | case actualMasters if (actualMasters > 0) => // may be rescale 129 | actualMasters == 1 match { 130 | case true => // Do nothing 131 | case _ => // Rescale 132 | change = true 133 | rescaleJobManager(cluster._2, cluster._1.namespace) 134 | } 135 | case _ => // Recreate 136 | deployment = DeploymentOptions(true, deployment.worker, deployment.service) 137 | } 138 | state.worker match { 139 | case actualWorkers if (actualWorkers > 0) => // may be rescale 140 | val desiredWorkers = getFlinkParameters(cluster._2).worker_instances 141 | desiredWorkers == actualWorkers match { 142 | case true => // Do nothing 143 | case _ => // Rescale 144 | change = true 145 | rescaleTakmanagersCluster(cluster._2, cluster._1.namespace) 146 | } 147 | case _ => // Recreate 148 | deployment = DeploymentOptions(deployment.master, true, deployment.service) 149 | } 150 | 151 | state.service match { 152 | case "" => deployment = DeploymentOptions(deployment.master, deployment.worker, true) 153 | case _ => // Service exists - get status for this deployment 154 | val status = getStatus(state.service) 155 | log.info(s"Status for cluster ${state.name} - ${status.status}, service ${state.service}") 156 | status.jobs.foreach(job => log.info(s"Job ${job.id} - ${job.status}")) 157 | } 158 | 159 | deployment.todo() match { 160 | case true => // Need to repair 161 | onAddInternal(cluster._2, cluster._2.getNamespace, deployment) 162 | change = true 163 | case _ => 164 | } 165 | }) 166 | 167 | // first reconciliation after (re)start -> update the clusters instance 168 | if (!fullReconciliationRun) { 169 | val clusterList = "*" == namespace match { 170 | case true => clusters.values.toList 171 | case _ => List(getClusters(namespace)) 172 | } 173 | clusterList.foreach(c => c.resetMetrics()) 174 | } 175 | 176 | // Log result 177 | if (!change) 178 | log.info("No change was detected during the reconciliation") 179 | MetricsHelper.reconciliationsTotal.labels(namespace).inc() 180 | } 181 | 182 | // Get actually deployed clusters 183 | private def getDeployed: Map[FullName, Deployed] = { 184 | // Controllers for ns 185 | val controllers = ("*" == namespace) match { 186 | case true => client.replicationControllers.inAnyNamespace 187 | case _ => client.replicationControllers.inNamespace(namespace) 188 | } 189 | // services in ns 190 | val services = ("*" == namespace) match { 191 | case true => client.services.inAnyNamespace 192 | case _ => client.services.inNamespace(namespace) 193 | } 194 | // Create specific labels 195 | val mlabels = Map("server" -> "flink", "component" -> OPERATOR_TYPE_MASTER_LABEL, prefix + OPERATOR_KIND_LABEL -> entityName) 196 | val wlabels = Map("server" -> "flink", "component" -> OPERATOR_TYPE_WORKER_LABEL, prefix + OPERATOR_KIND_LABEL -> entityName) 197 | val slabels = Map("server" -> "flink", prefix + OPERATOR_KIND_LABEL -> entityName) 198 | // Get masters per name 199 | val masters = controllers.withLabels(mlabels.asJava).list.getItems.asScala.map(rc => 200 | FullName(rc.getMetadata.getLabels.get(prefix + entityName), rc.getMetadata.getNamespace) -> rc.getSpec.getReplicas.intValue()).toMap 201 | // Get workers per name 202 | val workers = controllers.withLabels(wlabels.asJava).list.getItems.asScala.map(rc => 203 | FullName(rc.getMetadata.getLabels.get(prefix + entityName), rc.getMetadata.getNamespace) -> rc.getSpec.getReplicas.intValue()).toMap 204 | // Get services per name 205 | val mservices = services.withLabels(slabels.asJava).list.getItems.asScala.map(s => 206 | FullName(s.getMetadata.getLabels.get(prefix + entityName), s.getMetadata.getNamespace) -> s"${s.getMetadata.getName}.${s.getMetadata.getNamespace}.svc.cluster.local").toMap 207 | // Combine to cluster information 208 | masters.keys.toSeq.union(workers.keys.toSeq).union(mservices.keys.toSeq) 209 | .map(key => (key -> Deployed(s"${key.namespace}:${key.name}", 210 | workers.get(key) match {case Some(w) => w; case _ => -1}, 211 | masters.get(key) match {case Some(m) => m; case _ => -1 }, 212 | mservices.get(key) match {case Some(s) => s; case _ => ""} 213 | ))).toMap 214 | } 215 | 216 | // Rescale taskmanagers cluster 217 | private def rescaleTakmanagersCluster(newCluster: FlinkCluster, ns : String) : Unit = { 218 | val newWorkers = getFlinkParameters(newCluster).worker_instances 219 | log.info(s"Cluster ${newCluster.getName} scaling to $newWorkers taskmanagers") 220 | client.replicationControllers.inNamespace(ns).withName(s"${newCluster.getName}-taskmanager").scale(newWorkers) 221 | getClusters(ns).update(newCluster) 222 | } 223 | 224 | // Rescale Job manager 225 | private def rescaleJobManager(newCluster: FlinkCluster, ns : String) : Unit = { 226 | log.info(s"Cluster ${newCluster.getName} scaling JobManageners") 227 | client.replicationControllers.inNamespace(ns).withName(s"${newCluster.getName}-jobmanager").scale(1) 228 | } 229 | 230 | /** 231 | * This method verifies if any two instances of FlinkCluster are the same ones up to the number of 232 | * workers. This way we can call the scale instead of recreating the whole cluster. 233 | * 234 | * @param oldC the first instance of FlinkCluster we are comparing 235 | * @param newC the second instance of FlinkCluster we are comparing 236 | * @return true if both instances represent the same flink cluster but differs only in number of workers (it is safe 237 | * to call scale method) 238 | */ 239 | private def isOnlyScale(oldC: FlinkCluster, newC: FlinkCluster) : Boolean = { 240 | 241 | // Get parameters 242 | val oldP = getFlinkParameters(oldC) 243 | val newP = getFlinkParameters(newC) 244 | newC.getFlinkConfiguration.setNumTaskmanagers(oldP.worker_instances) 245 | oldC == newC 246 | } 247 | 248 | private def getClusters(ns : String): RunningClusters = clusters.get(ns) match { 249 | case Some(c) => c // already exists 250 | case _ => 251 | val c = new RunningClusters(ns) 252 | clusters += (ns -> c) 253 | c 254 | } 255 | 256 | private def getDeployer(): KubernetesFlinkClusterDeployer = deployer match { 257 | case Some(d) => d // Already exists 258 | case _ => // Create a new one 259 | val d = new KubernetesFlinkClusterDeployer(client, entityName, prefix) 260 | deployer = Option(d) 261 | d 262 | } 263 | } 264 | 265 | case class FullName(name: String, namespace: String){ 266 | def equal(other: AnyRef): Boolean = { 267 | other match { 268 | case fn : FullName => (name == fn.name) && (namespace == fn.namespace) 269 | case _ => false 270 | } 271 | } 272 | } 273 | 274 | case class DeploymentOptions(master: Boolean = true, worker : Boolean = true, service: Boolean = true){ 275 | def todo() : Boolean = master || worker || service 276 | } 277 | 278 | case class Deployed(name : String, worker : Int, master: Int, service: String) -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/GetClusterStatus.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | import com.google.gson.Gson 4 | import scalaj.http.{Http, HttpResponse} 5 | 6 | object GetClusterStatus { 7 | 8 | val gson = new Gson() 9 | 10 | def getStatus(service: String, port : Int = 8081): FlinkStatus = { 11 | val response: HttpResponse[String] = Http(s"http://$service:$port/jobs").asString 12 | response.isSuccess match { 13 | case true => // We got success reply 14 | FlinkStatus("RUNNING", gson.fromJson(response.body, classOf[FlinkJobs]).jobs) 15 | case _ => // Job Query failed 16 | FlinkStatus("FAILED", Seq.empty) 17 | } 18 | } 19 | 20 | def main(args: Array[String]): Unit = { 21 | val status = getStatus("fdp-taxiride-jobmanager-flink.fiorano.lightbend.com") 22 | println(s"Cluster state is - ${status.status}") 23 | status.jobs.foreach(job => println(s"job : id - ${job.id}; status - ${job.status}")) 24 | } 25 | } 26 | 27 | case class FlinkJob(id : String, status : String) 28 | 29 | case class FlinkJobs(jobs : Array[FlinkJob]) 30 | 31 | case class FlinkStatus(status : String, jobs : Seq[FlinkJob]) -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/KubernetesFlinkClusterDeployer.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | import scala.collection.mutable.ListBuffer 4 | import io.fabric8.kubernetes.api.model._ 5 | import io.fabric8.kubernetes.client.KubernetesClient 6 | import com.lightbend.operator.types.FlinkCluster 7 | import io.radanalytics.operator.resource.LabelsHelper._ 8 | 9 | import scala.collection.JavaConverters._ 10 | import Constants._ 11 | import org.slf4j.LoggerFactory 12 | 13 | class KubernetesFlinkClusterDeployer(client: KubernetesClient, entityName: String, prefix: String) { 14 | 15 | private val log = LoggerFactory.getLogger(classOf[KubernetesFlinkClusterDeployer].getName) 16 | // log.info(s"Creating KubernetesFlinkClusterDeployer for the entity name $entityName, prefix $prefix") 17 | 18 | def getResourceList(cluster: FlinkCluster, namespace: String, options: DeploymentOptions): KubernetesResourceList[_ <: HasMetadata] = client.synchronized { 19 | 20 | log.info(s"Creating resource list for cluster ${cluster.getName} in namespace $namespace") 21 | val params = getFlinkParameters(cluster) 22 | var resourceList = List[HasMetadata]() 23 | if(options.master) resourceList = getRCforMaster(cluster, params, namespace) :: resourceList 24 | if(options.worker) resourceList = getRCforWorker(cluster, params, namespace) :: resourceList 25 | if(options.service) resourceList = getService(cluster, namespace) :: resourceList 26 | new KubernetesListBuilder().withItems(resourceList.asJava).build 27 | } 28 | 29 | private def getService(cluster: FlinkCluster, namespace: String): Service = { 30 | 31 | val labels = getLabels(cluster, null) 32 | val ports = List( 33 | new ServicePortBuilder().withPort(6123).withName("rpc").build, 34 | new ServicePortBuilder().withPort(6124).withName("blob").build, 35 | new ServicePortBuilder().withPort(8081).withName("ui").build) 36 | 37 | new ServiceBuilder() 38 | .withNewMetadata 39 | .withName(s"${cluster.getName}-$OPERATOR_TYPE_MASTER_LABEL") 40 | .withNamespace(namespace) 41 | .withLabels(labels.asJava) 42 | .endMetadata. 43 | withNewSpec 44 | .withSelector(Map("app" -> cluster.getName, "component" -> OPERATOR_TYPE_MASTER_LABEL).asJava) 45 | .withPorts(ports.asJava) 46 | .endSpec() 47 | .build 48 | } 49 | 50 | private def getRCforMaster(cluster: FlinkCluster, params: FlinkParams, namespace: String): ReplicationController = { 51 | 52 | // Names 53 | val name = cluster.getName 54 | val podName = s"$name-$OPERATOR_TYPE_MASTER_LABEL" 55 | 56 | // Ports 57 | val ports = List( 58 | portBuild(6123, "rpc"), 59 | portBuild(6124, "blob"), 60 | portBuild(8081, "ui")) 61 | 62 | // Environment variables 63 | val envVars = buildEnv(cluster, params, true) 64 | 65 | // Arguments 66 | val args = params.master_args.toList 67 | 68 | // Liveness probe 69 | val masterLiveness = new ProbeBuilder() 70 | .withHttpGet( 71 | new HTTPGetActionBuilder().withPath("/overview") 72 | .withPort(new IntOrStringBuilder().withIntVal(8081).build()).build()) 73 | .withInitialDelaySeconds(30) 74 | .withPeriodSeconds(10) 75 | .build() 76 | 77 | // Limits 78 | val limits = Map(("cpu" -> new QuantityBuilder().withAmount(s"${params.master_cpu}000m").build()), 79 | ("memory" -> new QuantityBuilder().withAmount(s"${params.master_memory}Mi").build())) 80 | 81 | // Container 82 | val containerBuilder = cBuilder(params, envVars, ports , args, limits).withLivenessProbe(masterLiveness) 83 | 84 | // Mounts 85 | val volumes = volumesBuilder(containerBuilder, params) 86 | 87 | // Metrics 88 | var annotations = Map[String, String]() 89 | if (params.metrics) { 90 | annotations = annotations + (("prometheus.io/scrape" -> "true"), ("prometheus.io/port" -> "9249")) 91 | } 92 | 93 | // Labels 94 | var labels = getLabels(cluster, OPERATOR_TYPE_MASTER_LABEL) 95 | 96 | // Replication controller 97 | controllerBuilder(podName, namespace, 1, annotations, labels, containerBuilder, volumes) 98 | } 99 | 100 | private def getRCforWorker(cluster: FlinkCluster, params: FlinkParams, namespace: String): ReplicationController = { 101 | 102 | // Flink parameters 103 | val name = cluster.getName 104 | val podName = s"$name-$OPERATOR_TYPE_WORKER_LABEL" 105 | 106 | // Ports 107 | val ports = List( 108 | portBuild(6121, "data"), 109 | portBuild(6122, "rpc"), 110 | portBuild(6125, "query"), 111 | portBuild(params.metric_query_port.toInt, "metric")) 112 | 113 | // Environment variables 114 | val envVars = buildEnv(cluster, params, false) 115 | 116 | // Arguments 117 | val args = List(OPERATOR_TYPE_WORKER_LABEL) 118 | 119 | // Limits 120 | val limits = Map(("cpu" -> new QuantityBuilder().withAmount(s"${params.worker_cpu}000m").build()), 121 | ("memory" -> new QuantityBuilder().withAmount(s"${params.worker_memory}Mi").build())) 122 | 123 | 124 | // Container 125 | val containerBuilder = cBuilder(params, envVars, ports , args, limits) 126 | 127 | // Mounts 128 | val volumes = volumesBuilder(containerBuilder, params) 129 | 130 | // Metrics 131 | var annotations = Map[String, String]() 132 | if (params.metrics) { 133 | annotations = annotations + (("prometheus.io/scrape" -> "true"), ("prometheus.io/port" -> "9249")) 134 | } 135 | 136 | // Labels 137 | val labels = getLabels(cluster, OPERATOR_TYPE_WORKER_LABEL) 138 | 139 | // Replication controller 140 | controllerBuilder(podName, namespace, params.worker_instances, annotations, labels, containerBuilder, volumes) 141 | } 142 | 143 | private def envBuild(key: String, value: String): EnvVar = new EnvVarBuilder().withName(key).withValue(value).build 144 | 145 | private def portBuild(port: Int, name: String): ContainerPort = new ContainerPortBuilder().withContainerPort(port).withName(name).build() 146 | 147 | private def getLabels(cluster: FlinkCluster, component : String) : Map[String, String] = { 148 | 149 | var labels = Map(("server" -> "flink"), ("app" -> cluster.getName)) ++ getDefaultLabels(cluster.getName) 150 | if (cluster.getLabels != null) { 151 | cluster.getLabels.asScala.foreach(label => 152 | labels = labels + (label._1 -> label._2) 153 | ) 154 | } 155 | if(component != null) 156 | labels = labels + ("component" -> component) 157 | labels 158 | } 159 | 160 | private def buildEnv(cluster: FlinkCluster, params: FlinkParams, jobmanager : Boolean) : Seq[EnvVar] = { 161 | var envVars = new ListBuffer[EnvVar]() 162 | envVars += envBuild("CONTAINER_METRIC_PORT", params.metric_query_port) 163 | jobmanager match { 164 | case true => 165 | envVars += envBuild("JOBMANAGER_MEMORY", s"${params.master_memory}m") 166 | envVars += envBuild("JOB_MANAGER_RPC_ADDRESS", s"${cluster.getName}-$OPERATOR_TYPE_MASTER_LABEL") 167 | case _ => 168 | envVars += envBuild("TASKMANAGER_MEMORY", s"${params.worker_memory}m") 169 | envVars += envBuild("TASKMANAGER_SLOTS", params.worker_slots.toString) 170 | envVars += envBuild("JOB_MANAGER_RPC_ADDRESS", s"${cluster.getName}-$OPERATOR_TYPE_MASTER_LABEL") 171 | envVars += new EnvVarBuilder().withName("K8S_POD_IP").withValueFrom( 172 | new EnvVarSourceBuilder().withFieldRef( 173 | new ObjectFieldSelectorBuilder().withFieldPath("status.podIP").build()).build()).build 174 | 175 | } 176 | params.mounts.foreach (mount => envVars += envBuild(mount.getEnvname, mount.getMountdirectory)) 177 | params.parallelism match { 178 | case p if(p != 1) => envVars += envBuild(Constants.PARALLELISM_ENV_VAR, params.parallelism.toString) 179 | case _ => 180 | } 181 | if (cluster.getEnv != null) 182 | cluster.getEnv.asScala.foreach(env => envVars += envBuild(env.getName, env.getValue)) 183 | 184 | envVars 185 | } 186 | 187 | private def cBuilder(params: FlinkParams, envVars : Seq[EnvVar], ports : List[ContainerPort], args: List[String], limits: Map[String, Quantity]) : ContainerBuilder = { 188 | new ContainerBuilder() 189 | .withImage(params.imageRef) 190 | .withImagePullPolicy(params.pullPolicy) 191 | .withName(OPERATOR_TYPE_MASTER_LABEL) 192 | .withTerminationMessagePolicy("File") 193 | .withEnv(envVars.asJava) 194 | .withPorts(ports.asJava) 195 | .withArgs(args.asJava) 196 | .withResources(new ResourceRequirementsBuilder().withLimits(limits.asJava).build()) 197 | } 198 | 199 | private def volumesBuilder(containerBuilder: ContainerBuilder, params: FlinkParams) : Seq[Volume] = { 200 | // Mounts 201 | val volumes = new ListBuffer[Volume] 202 | params.mounts foreach {mount => 203 | val readonly = mount.getResourcetype match { 204 | case v if v.equalsIgnoreCase ("PVC") => 205 | volumes += new VolumeBuilder ().withName (mount.getEnvname.toLowerCase).withPersistentVolumeClaim ( 206 | new PersistentVolumeClaimVolumeSource (mount.getResourcename, false) ).build () 207 | false 208 | case v if v.equalsIgnoreCase ("CONFIGMAP") => 209 | volumes += new VolumeBuilder ().withName (mount.getEnvname.toLowerCase).withConfigMap ( 210 | new ConfigMapVolumeSourceBuilder ().withName (mount.getResourcename).build () ).build () 211 | true 212 | case _ => 213 | volumes += new VolumeBuilder ().withName (mount.getEnvname.toLowerCase).withSecret ( 214 | new SecretVolumeSourceBuilder ().withSecretName (mount.getResourcename).build () ).build () 215 | true 216 | } 217 | containerBuilder.addToVolumeMounts (new VolumeMountBuilder () 218 | .withName (mount.getEnvname.toLowerCase).withMountPath (mount.getMountdirectory).withReadOnly (readonly) 219 | .build () ) 220 | } 221 | volumes 222 | } 223 | 224 | private def controllerBuilder(podname: String, namespace : String, replicas : Int, annotations : Map[String, String], labels : Map[String, String], containerBuilder: ContainerBuilder, volumes : Seq[Volume]) : ReplicationController = { 225 | 226 | new ReplicationControllerBuilder() 227 | .withNewMetadata 228 | .withName(podname) 229 | .withNamespace(namespace) 230 | .endMetadata 231 | .withNewSpec 232 | .withReplicas(replicas) 233 | .withNewTemplate 234 | .withNewMetadata 235 | .withAnnotations(annotations.asJava) 236 | .withLabels(labels.asJava) 237 | .endMetadata 238 | .withNewSpec.withContainers(containerBuilder.build).withVolumes(volumes.toList.asJava).endSpec() 239 | .endTemplate 240 | .endSpec.build 241 | } 242 | 243 | def getDefaultLabels(name: String): Map[String, String] = { 244 | Map((s"$prefix$OPERATOR_KIND_LABEL" -> entityName),(s"$prefix$entityName"-> name)) 245 | } 246 | } -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/MetricsHelper.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | import io.prometheus.client.{Counter, Gauge} 4 | 5 | object MetricsHelper { 6 | private val PREFIX = "operator_" 7 | 8 | val reconciliationsTotal = Counter.build.name(PREFIX + "full_reconciliations_total") 9 | .help("How many times the full reconciliation has been run.") 10 | .labelNames("ns") 11 | .register 12 | 13 | val runningClusters = Gauge.build.name(PREFIX + "running_clusters") 14 | .help("Flink clusters that are currently running.") 15 | .labelNames("ns").register 16 | 17 | val workers = Gauge.build.name(PREFIX + "running_workers") 18 | .help("Number of workers per cluster name.") 19 | .labelNames("cluster", "ns").register 20 | 21 | val startedTotal = Gauge.build.name(PREFIX + "started_clusters_total") 22 | .help("Flink clusters has been started by operator.") 23 | .labelNames("ns").register 24 | } 25 | -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/RunningClusters.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | import com.lightbend.operator.types.FlinkCluster 4 | 5 | import scala.collection.mutable.Map 6 | import MetricsHelper._ 7 | import Constants._ 8 | import org.slf4j.LoggerFactory 9 | 10 | class RunningClusters (namespace: String){ 11 | 12 | private val log = LoggerFactory.getLogger(classOf[RunningClusters].getName) 13 | 14 | log.info(s"Creating clusters map for the namespace $namespace") 15 | val clusters = Map[String, FlinkCluster]() 16 | runningClusters.labels(namespace).set(0) 17 | 18 | def put(c: FlinkCluster): Unit = { 19 | log.info(s"Adding new cluster ${c.getName} in namespace $namespace") 20 | clusters.get(c.getName) match { 21 | case Some(value) => // Already exists, skip 22 | case _ => 23 | runningClusters.labels(namespace).inc() 24 | startedTotal.labels(namespace).inc() 25 | workers.labels(c.getName, namespace).set(getFlinkParameters(c).worker_instances) 26 | clusters += (c.getName -> c) 27 | } 28 | } 29 | 30 | def update(c: FlinkCluster): Unit = { 31 | log.info(s"Updating cluster ${c.getName} in namespace $namespace") 32 | clusters += (c.getName -> c) 33 | workers.labels(c.getName, namespace).set(getFlinkParameters(c).worker_instances) 34 | } 35 | 36 | def delete(name: String): Unit = { 37 | log.info(s"Deleting cluster $name in namespace $namespace") 38 | clusters.contains(name) match { 39 | case true => 40 | runningClusters.labels(namespace).dec() 41 | workers.labels(name, namespace).set(0) 42 | clusters -=name 43 | case _ => 44 | } 45 | } 46 | 47 | def getCluster(name: String): FlinkCluster = clusters.getOrElse(name, null) 48 | 49 | def resetMetrics(): Unit = { 50 | startedTotal.labels(namespace).set(0) 51 | clusters.keys.foreach(name => workers.labels(name, namespace).set(0)) 52 | startedTotal.labels(namespace).set(0) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /operator/src/main/scala/com/lightbend/operator/helpers/DataHelper.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator.helpers 2 | 3 | import java.util 4 | 5 | import com.lightbend.operator.types.{FlinkCluster, FlinkConfiguration, Image, Mount, NameValue, RCSpec} 6 | import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition 7 | 8 | import scala.collection.JavaConverters._ 9 | import java.util.{ArrayList, LinkedHashMap} 10 | 11 | import scala.collection.mutable.ListBuffer 12 | 13 | object DataHelper { 14 | 15 | def fromCRD(crd : CustomResourceDefinition) : FlinkCluster = { 16 | val cluster = new FlinkCluster() 17 | cluster.setName(crd.getMetadata.getName) 18 | cluster.setNamespace(crd.getMetadata.getNamespace) 19 | if(crd.getSpec != null) { 20 | val additionalproperties = crd.getSpec.getAdditionalProperties.asScala 21 | additionalproperties.get("customImage") match { 22 | case Some(value) => 23 | val props = value.asInstanceOf[LinkedHashMap[String, String]] 24 | val image = new Image() 25 | props.get("imagename") match { 26 | case value if (value != null) => image.setImagename(value) 27 | case _ => 28 | } 29 | props.get("pullpolicy") match { 30 | case value if (value != null) => image.setPullpolicy(value) 31 | case _ => 32 | } 33 | cluster.setCustomImage(image) 34 | case _ => 35 | } 36 | additionalproperties.get("env") match { 37 | case Some(value) => 38 | val env = value.asInstanceOf[ArrayList[LinkedHashMap[String, AnyRef]]].asScala.map(e => { 39 | val en = e.asInstanceOf[LinkedHashMap[String, String]] 40 | new NameValue().withName(en.get("name")).withValue(en.get("value")) 41 | }) 42 | cluster.setEnv(env.asJava) 43 | case _ => 44 | } 45 | additionalproperties.get("labels") match { 46 | case Some(value) => 47 | val labels = value.asInstanceOf[LinkedHashMap[String, String]].asScala.map(lab => { 48 | (lab._1 -> lab._2) 49 | }).toMap 50 | cluster.setLabels(labels.asJava) 51 | case _ => 52 | } 53 | additionalproperties.get("flinkConfiguration") match { 54 | case Some(value) => 55 | val props = value.asInstanceOf[LinkedHashMap[String, AnyRef]] 56 | val flinkProps = new FlinkConfiguration() 57 | props.get("num_taskmanagers") match { 58 | case value if (value != null) => flinkProps.setNumTaskmanagers(value.asInstanceOf[Int]) 59 | case _ => 60 | } 61 | props.get("taskmanagers_slots") match { 62 | case value if (value != null) => flinkProps.setTaskmanagerSlot(value.asInstanceOf[Int]) 63 | case _ => 64 | } 65 | props.get("parallelism") match { 66 | case value if (value != null) => flinkProps.setParallelism(value.asInstanceOf[Int]) 67 | case _ => 68 | } 69 | props.get("metrics") match { 70 | case value if (value != null) => flinkProps.setMetrics(value.asInstanceOf[Boolean]) 71 | case _ => 72 | } 73 | props.get("logging") match { 74 | case value if (value != null) => flinkProps.setLogging(value.asInstanceOf[String]) 75 | case _ => 76 | } 77 | props.get("checkpointing") match { 78 | case value if (value != null) => flinkProps.setCheckpointing(value.asInstanceOf[String]) 79 | case _ => 80 | } 81 | props.get("savepointing") match { 82 | case value if (value != null) => flinkProps.setSavepointing(value.asInstanceOf[String]) 83 | case _ => 84 | } 85 | cluster.setFlinkConfiguration(flinkProps) 86 | case _ => 87 | } 88 | 89 | additionalproperties.get("master") match { 90 | case Some(value) => 91 | val minfo = value.asInstanceOf[LinkedHashMap[String, AnyRef]] 92 | val master = new RCSpec() 93 | minfo.get("cpu") match { 94 | case cpu if (cpu != null) => 95 | master.setCpu(cpu.asInstanceOf[String]) 96 | case _ => 97 | } 98 | minfo.get("memory") match { 99 | case memory if (memory != null) => 100 | master.setMemory(memory.asInstanceOf[String]) 101 | case _ => 102 | } 103 | minfo.get("inputs") match { 104 | case inputs if (inputs != null) => 105 | master.setInputs(inputs.asInstanceOf[ArrayList[String]]) 106 | case _ => 107 | } 108 | cluster.setMaster(master) 109 | 110 | case _ => 111 | } 112 | 113 | additionalproperties.get("worker") match { 114 | case Some(value) => 115 | val winfo = value.asInstanceOf[LinkedHashMap[String, AnyRef]] 116 | val worker = new RCSpec() 117 | winfo.get("cpu") match { 118 | case cpu if (cpu != null) => 119 | worker.setCpu(cpu.asInstanceOf[String]) 120 | case _ => 121 | } 122 | winfo.get("memory") match { 123 | case memory if (memory != null) => 124 | worker.setMemory(memory.asInstanceOf[String]) 125 | case _ => 126 | } 127 | cluster.setWorker(worker) 128 | case _ => 129 | } 130 | 131 | additionalproperties.get("mounts") match { 132 | case Some(value) => 133 | val minfos = value.asInstanceOf[util.ArrayList[LinkedHashMap[String, String]]] 134 | val mounts = new ListBuffer[Mount] 135 | minfos.forEach{minfo => 136 | val resourceType = minfo.get("resourcetype") match { 137 | case rtype if (rtype != null) => rtype 138 | case _ => "" 139 | } 140 | val resourceName = minfo.get("resourcename") match { 141 | case rname if (rname != null) => rname 142 | case _ => "" 143 | } 144 | val mountDirectory = minfo.get("mountdirectory") match { 145 | case directory if (directory != null) => directory 146 | case _ => "" 147 | } 148 | val envname = minfo.get("envname") match { 149 | case name if (name != null) => name 150 | case _ => "" 151 | } 152 | mounts += new Mount().withResourcetype(resourceType).withResourcename(resourceName) 153 | .withMountdirectory(mountDirectory).withEnvname(envname) 154 | } 155 | cluster.setMounts(mounts.asJava) 156 | case _ => 157 | } 158 | 159 | } 160 | 161 | // Return result 162 | cluster 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /operator/src/test/scala/com/lightbend/operator/YamlProcessingTest.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.operator 2 | 3 | import com.lightbend.operator.helpers.DataHelper 4 | import io.fabric8.kubernetes.client.DefaultKubernetesClient 5 | import junit.framework.TestCase 6 | import org.junit.Test 7 | import com.lightbend.operator.types.FlinkCluster 8 | import io.radanalytics.operator.common.JSONSchemaReader 9 | 10 | import scala.collection.JavaConverters._ 11 | 12 | class YamlProcessingTest extends TestCase{ 13 | 14 | private val file1 = "./../yaml/cluster_complete.yaml" 15 | private val client = new DefaultKubernetesClient 16 | 17 | @Test 18 | def testParseYaml(): Unit = { 19 | 20 | val schema = JSONSchemaReader.readSchema(classOf[FlinkCluster]) 21 | println(schema) 22 | 23 | val crd = client.customResourceDefinitions().load(file1).get() 24 | val cluster = DataHelper.fromCRD(crd) 25 | println(cluster) 26 | 27 | val deployer = new KubernetesFlinkClusterDeployer(client, "FlinkCluster", "lightbend.com") 28 | val resources = deployer.getResourceList(cluster, cluster.getNamespace, DeploymentOptions()) 29 | resources.getItems.asScala.foreach(resource => println(resource)) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | import Versions._ 2 | import sbt._ 3 | 4 | object Dependencies { 5 | 6 | val abstractOperator = "io.radanalytics" % "abstract-operator" % abstractOperatorVersion 7 | val scalaHTTP = "org.scalaj" %% "scalaj-http" % scalaHTTPVersion 8 | val gson = "com.google.code.gson" % "gson" % gsonVersion 9 | val junit = "junit" % "junit" % junitVersion % Test 10 | } 11 | -------------------------------------------------------------------------------- /project/ModelGeneratorPlugin.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import sbt.Keys._ 3 | import org.jsonschema2pojo._ 4 | import org.jsonschema2pojo.rules.RuleFactory 5 | import java.io.File 6 | 7 | import com.sun.codemodel.JCodeModel 8 | 9 | object ModelGeneratorPlugin extends AutoPlugin { 10 | 11 | object autoImport { 12 | lazy val generateModel = taskKey[Unit]("Generates the Model from a JSON Schema") 13 | lazy val modelSchemaLocation = settingKey[String]("The source for the schema definition") 14 | } 15 | 16 | import autoImport._ 17 | 18 | val GeneratedSrcLocation = "target/generated-sources/jsonschema2pojo/" 19 | 20 | override def projectSettings = Seq( 21 | generateModel := Def.taskDyn { 22 | Def.task { 23 | val schemaLocation = modelSchemaLocation.value 24 | val baseDir = baseDirectory.value 25 | generate(schemaLocation, baseDir) 26 | } 27 | }.value, 28 | unmanagedSourceDirectories in Compile += baseDirectory.value / GeneratedSrcLocation 29 | ) 30 | 31 | def generate(schemaResource: String, baseDir: File)= { 32 | 33 | val codeModel = new JCodeModel() 34 | val source = new File(schemaResource).toURI.toURL 35 | 36 | val outputPojoDirectory=new File(baseDir, GeneratedSrcLocation) 37 | if (!outputPojoDirectory.exists()) { 38 | outputPojoDirectory.mkdirs() 39 | } 40 | 41 | val config = new DefaultGenerationConfig() { 42 | override val isGenerateBuilders: Boolean = true // set config option by overriding method 43 | } 44 | 45 | val mapper = new SchemaMapper(new RuleFactory(config, new Jackson2Annotator(config), new SchemaStore()), new SchemaGenerator()) 46 | mapper.generate(codeModel, "FlinkCluster", "com.lightbend.operator.types", source) 47 | codeModel.build(outputPojoDirectory) 48 | } 49 | } 50 | 51 | 52 | -------------------------------------------------------------------------------- /project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | 3 | val scalaVersion = "2.12.8" 4 | val abstractOperatorVersion = "0.6.6" 5 | val jsonGeneratorVersion = "1.0.1" 6 | val junitVersion = "4.12" 7 | val scalaHTTPVersion = "2.4.1" 8 | val gsonVersion = "2.8.5" 9 | } 10 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version = 1.2.8 -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | 2 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 3 | 4 | libraryDependencies += "org.jsonschema2pojo" % "jsonschema2pojo-core" % "1.0.1" 5 | 6 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") 7 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 8 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 9 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") -------------------------------------------------------------------------------- /schema/flinkCluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "description": "A Flink cluster configuration", 4 | "type": "object", 5 | "extends": { 6 | "type": "object", 7 | "existingJavaType": "io.radanalytics.operator.common.EntityInfo" 8 | }, 9 | "properties": { 10 | "master": { 11 | "$ref": "#/definitions/RCSpec" 12 | }, 13 | "worker": { 14 | "$ref": "#/definitions/RCSpec" 15 | }, 16 | "customImage": { 17 | "$ref": "#/definitions/Image" 18 | }, 19 | "env": { 20 | "$ref": "#/definitions/NameValue" 21 | }, 22 | "flinkConfiguration": { 23 | "$ref": "#/definitions/FlinkConfiguration" 24 | }, 25 | "labels" : { 26 | "existingJavaType" : "java.util.Map", 27 | "type" : "object" 28 | }, 29 | "mounts": { 30 | "type": "array", 31 | "items": { 32 | "$ref": "#/definitions/Mount" 33 | } 34 | } 35 | }, 36 | "definitions": { 37 | "RCSpec": { 38 | "type": "object", 39 | "properties": { 40 | "memory": { 41 | "type": "string" 42 | }, 43 | "cpu": { 44 | "type": "string" 45 | }, 46 | "inputs": { 47 | "type": "array", 48 | "items": { 49 | "type": "string" 50 | } 51 | } 52 | } 53 | }, 54 | "FlinkConfiguration": { 55 | "type": "object", 56 | "properties": { 57 | "num_taskmanagers": { 58 | "type": "integer" 59 | }, 60 | "taskmanager_slot": { 61 | "type": "integer" 62 | }, 63 | "parallelism": { 64 | "type": "integer" 65 | }, 66 | "metrics": { 67 | "type" : "boolean", 68 | "default": "true" 69 | }, 70 | "logging": { 71 | "type": "string" 72 | }, 73 | "checkpointing": { 74 | "type": "string" 75 | }, 76 | "savepointing": { 77 | "type": "string" 78 | } 79 | } 80 | }, 81 | "Mount": { 82 | "type": "object", 83 | "properties": { 84 | "resourcetype": { 85 | "type": "string" 86 | }, 87 | "resourcename": { 88 | "type": "string" 89 | }, 90 | "mountdirectory": { 91 | "type": "string" 92 | }, 93 | "envname": { 94 | "type": "string" 95 | } 96 | }, 97 | "required": ["resourcetype", "resourcename", "mountdirectory", "envname"] 98 | }, 99 | "Image": { 100 | "type": "object", 101 | "properties": { 102 | "imagename": { 103 | "type": "string" 104 | }, 105 | "pullpolicy": { 106 | "type": "string" 107 | } 108 | }, 109 | "required": ["imagename"] 110 | }, 111 | "NameValue": { 112 | "type": "array", 113 | "items": { 114 | "type": "object", 115 | "properties": { 116 | "name": { "type": "string" }, 117 | "value": { "type": "string" } 118 | }, 119 | "required": ["name", "value"] 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /yaml/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: lightbend.com/v1 2 | kind: FlinkCluster 3 | metadata: 4 | namespace: nm 5 | name: my-flink-cluster 6 | -------------------------------------------------------------------------------- /yaml/cluster_complete.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: lightbend.com/v1 2 | kind: FlinkCluster 3 | metadata: 4 | namespace: nm 5 | name: my-flink-cluster 6 | spec: 7 | customImage : 8 | imagename: myimage 9 | pullpolicy: "Always" 10 | labels: 11 | common-label-for-all-the-resources-operator-deploys/deployed-by: john 12 | common-label-to-be-replaced-on-some-resources: global-value 13 | env: 14 | - name: ENV1 15 | value: env1 16 | flinkConfiguration: 17 | num_taskmanagers: 2 18 | taskmanagers_slots: 2 19 | parallelism: 2 20 | logging : "flink-logging" 21 | checkpointing: "checkpvcname" 22 | savepointing: "savepvcname" 23 | worker: 24 | cpu: "2" 25 | master: 26 | cpu: "2" 27 | inputs: 28 | - input1 29 | - input2 30 | mounts: 31 | - resourcetype: "secret" 32 | resourcename: "myresource" 33 | mountdirectory: "directory" 34 | envname : "name" 35 | - resourcetype: "pvc" 36 | resourcename: "myresource1" 37 | mountdirectory: "directory1" 38 | envname : "name1" 39 | - resourcetype: "configmap" 40 | resourcename: "myresource2" 41 | mountdirectory: "directory2" 42 | envname : "name2" -------------------------------------------------------------------------------- /yaml/logging-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: flink-logging 5 | data: 6 | log4j-cli.properties: > 7 | log4j.rootLogger=INFO, file 8 | 9 | 10 | # Log all infos in the given file 11 | 12 | log4j.appender.file=org.apache.log4j.FileAppender 13 | 14 | log4j.appender.file.file=${log.file} 15 | 16 | log4j.appender.file.append=false 17 | 18 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 19 | 20 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} 21 | %-5p %-60c %x - %m%n 22 | 23 | # Log output from org.apache.flink.yarn to the console. This is used by the 24 | 25 | # CliFrontend class when using a per-job YARN cluster. 26 | 27 | log4j.logger.org.apache.flink.yarn=INFO, console 28 | 29 | log4j.logger.org.apache.flink.yarn.cli.FlinkYarnSessionCli=INFO, console 30 | 31 | log4j.logger.org.apache.hadoop=INFO, console 32 | 33 | 34 | log4j.appender.console=org.apache.log4j.ConsoleAppender 35 | 36 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 37 | 38 | log4j.appender.console.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} 39 | %-5p %-60c %x - %m%n 40 | 41 | 42 | # suppress the warning that hadoop native libraries are not loaded 43 | (irrelevant for the client) 44 | 45 | log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF 46 | 47 | 48 | # suppress the irrelevant (wrong) warnings from the netty channel handler 49 | 50 | log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, 51 | file 52 | log4j-console.properties: > 53 | # This affects logging for both user code and Flink 54 | 55 | log4j.rootLogger=INFO, console 56 | 57 | 58 | # Uncomment this if you want to _only_ change Flink's logging 59 | 60 | #log4j.logger.org.apache.flink=INFO 61 | 62 | 63 | # The following lines keep the log level of common libraries/connectors on 64 | 65 | # log level INFO. The root logger does not override this. You have to 66 | manually 67 | 68 | # change the log levels here. 69 | 70 | log4j.logger.akka=INFO 71 | 72 | log4j.logger.org.apache.kafka=INFO 73 | 74 | log4j.logger.org.apache.hadoop=INFO 75 | 76 | log4j.logger.org.apache.zookeeper=INFO 77 | 78 | 79 | # Log all infos to the console 80 | 81 | log4j.appender.console=org.apache.log4j.ConsoleAppender 82 | 83 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 84 | 85 | log4j.appender.console.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} 86 | %-5p %-60c %x - %m%n 87 | 88 | 89 | # Suppress the irrelevant (wrong) warnings from the Netty channel handler 90 | 91 | log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, 92 | console 93 | log4j.properties: > 94 | # This affects logging for both user code and Flink 95 | 96 | log4j.rootLogger=INFO, file 97 | 98 | 99 | # Uncomment this if you want to _only_ change Flink's logging 100 | 101 | #log4j.logger.org.apache.flink=INFO 102 | 103 | 104 | # The following lines keep the log level of common libraries/connectors on 105 | 106 | # log level INFO. The root logger does not override this. You have to 107 | manually 108 | 109 | # change the log levels here. 110 | 111 | log4j.logger.akka=INFO 112 | 113 | log4j.logger.org.apache.kafka=INFO 114 | 115 | log4j.logger.org.apache.hadoop=INFO 116 | 117 | log4j.logger.org.apache.zookeeper=INFO 118 | 119 | 120 | # Log all infos in the given file 121 | 122 | log4j.appender.file=org.apache.log4j.FileAppender 123 | 124 | log4j.appender.file.file=${log.file} 125 | 126 | log4j.appender.file.append=false 127 | 128 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 129 | 130 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} 131 | %-5p %-60c %x - %m%n 132 | 133 | 134 | # Suppress the irrelevant (wrong) warnings from the Netty channel handler 135 | 136 | log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, 137 | file 138 | logback-console.xml: | 139 | 140 | 141 | 142 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | logback.xml: | 178 | 179 | 180 | ${log.file} 181 | false 182 | 183 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | --------------------------------------------------------------------------------