├── .dockerignore ├── .gitignore ├── .gitmodules ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.apns2 ├── Dockerfile.fcm ├── Dockerfile.http ├── Dockerfile.web_push ├── Jenkinsfile ├── LICENSE ├── README.md ├── build.rs ├── config ├── apns2.toml ├── fcm.toml ├── http_requester.toml └── web_push.toml ├── deploy ├── apns2 │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── apns2.toml │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ └── service.yaml │ └── values.yaml ├── fcm │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── fcm.toml │ │ ├── ingress.yaml │ │ └── service.yaml │ └── values.yaml ├── http │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── http.toml │ │ ├── ingress.yaml │ │ └── service.yaml │ └── values.yaml ├── staging.yaml └── web-push │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── service.yaml │ └── web_push.toml │ └── values.yaml ├── docker-compose.yml ├── examples └── send_http_request.rs └── src ├── apns2 ├── consumer.rs ├── main.rs ├── notifier.rs └── producer.rs ├── common ├── config.rs ├── events │ └── mod.rs ├── kafka │ ├── mod.rs │ ├── request_consumer.rs │ └── response_producer.rs ├── lib.rs ├── logger.rs ├── metrics.rs └── system.rs ├── fcm ├── consumer.rs ├── main.rs ├── notifier.rs └── producer.rs ├── http_requester ├── consumer.rs ├── main.rs ├── producer.rs └── requester.rs └── web_push ├── consumer.rs ├── main.rs ├── notifier.rs └── producer.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | src/common/events/* 2 | !src/common/events/mod.rs 3 | target 4 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "third_party/events"] 2 | path = third_party/events 3 | url = https://github.com/xray-tech/xorc-events.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | before_install: 3 | - sudo add-apt-repository -y ppa:maarten-fonville/protobuf 4 | - sudo apt-get -y update 5 | - sudo apt-get -y install libssl-dev protobuf-compiler libffi-dev build-essential python wget 6 | env: 7 | - RUST_BACKTRACE=1 8 | 9 | matrix: 10 | include: 11 | - rust: beta 12 | - rust: stable 13 | - rust: nightly 14 | 15 | cache: 16 | apt: true 17 | directories: 18 | - target/debug/deps 19 | - target/debug/build 20 | 21 | script: 22 | - cargo test 23 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xorc-notifications" 3 | version = "0.1.0" 4 | authors = ["Julius de Bruijn "] 5 | license = "Apache-2.0" 6 | readme = "README.md" 7 | description = "A consumer to send push notifications from Kafka" 8 | keywords = ["apns", "fcm", "web-push", "consumer", "kafka"] 9 | repository = "https://github.com/xray-tech/xorc-notifications" 10 | homepage = "https://github.com/xray-tech/xorc-notifications" 11 | 12 | [dependencies] 13 | a2 = "0.3" 14 | fcm = "0.6" 15 | web-push = "0.4" 16 | protobuf = { version = "2.0", features = ["with-bytes"] } 17 | bytes = "0.4" 18 | chan-signal = "0.3" 19 | chan = "0.1" 20 | argparse = "0.2" 21 | slog = { version = "2.3", features = ["erased-serde", "nested-values"] } 22 | slog-json = "2.2" 23 | slog-async = "2.3" 24 | slog-term = "2.4" 25 | slog-scope = "4.0" 26 | toml = "0.4" 27 | serde = "1.0" 28 | serde_derive = "1.0" 29 | time = "0.1" 30 | hyper = "0.12" 31 | hyper-tls = "0.3" 32 | http = "0.1" 33 | prometheus = "0.4" 34 | lazy_static = "1.0" 35 | heck = "0.3" 36 | futures = "0.1" 37 | serde_json = "1.0" 38 | tokio = "0.1" 39 | tokio-signal = "0.1" 40 | tokio-timer = "0.1" 41 | rdkafka = "0.17" 42 | chrono = "0.4" 43 | base64 = "0.6" 44 | erased-serde = "0.3" 45 | regex = "1" 46 | 47 | [build-dependencies] 48 | protoc-rust = "2.0" 49 | 50 | [dev-dependencies] 51 | clap = "2.32" 52 | 53 | [lib] 54 | name = "common" 55 | path = "src/common/lib.rs" 56 | 57 | [[bin]] 58 | name = "apns2" 59 | path = "src/apns2/main.rs" 60 | 61 | [[bin]] 62 | name = "fcm" 63 | path = "src/fcm/main.rs" 64 | 65 | [[bin]] 66 | name = "web_push" 67 | path = "src/web_push/main.rs" 68 | 69 | [[bin]] 70 | name = "http_requester" 71 | path = "src/http_requester/main.rs" 72 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest 2 | MAINTAINER Julius de Bruijn 3 | 4 | WORKDIR /usr/src 5 | ENV USER root 6 | ENV RUST_BACKTRACE 1 7 | 8 | RUN apt-get -y update 9 | RUN apt-get -y install libssl-dev protobuf-compiler libffi-dev build-essential python 10 | 11 | ENV PROTOC /usr/bin/protoc 12 | ENV PROTOC_INCLUDE /usr/include 13 | 14 | RUN mkdir -p /usr/src/xorc-notifications 15 | RUN mkdir -p /etc/xorc-notifications 16 | COPY Cargo.toml Cargo.lock build.rs /usr/src/xorc-notifications/ 17 | COPY src /usr/src/xorc-notifications/src 18 | COPY third_party /usr/src/xorc-notifications/third_party 19 | COPY third_party/events /usr/src/xorc-notifications/third_party/events 20 | COPY config /usr/src/xorc-notifications/config 21 | 22 | ENV PATH="/root/.cargo/bin:${PATH}" 23 | 24 | WORKDIR /usr/src/xorc-notifications 25 | RUN cargo build --release 26 | 27 | CMD "echo 'Xorc notifications base image'" 28 | -------------------------------------------------------------------------------- /Dockerfile.apns2: -------------------------------------------------------------------------------- 1 | ARG git_commit 2 | FROM eu.gcr.io/xray2poc/xorc-notifications:$git_commit 3 | MAINTAINER Julius de Bruijn 4 | 5 | RUN cp target/release/apns2 /bin 6 | RUN chmod a+x /bin/apns2 7 | 8 | ENV CONFIG "/etc/xorc-notifications/apns2.toml" 9 | 10 | WORKDIR / 11 | 12 | CMD "/bin/apns2" 13 | -------------------------------------------------------------------------------- /Dockerfile.fcm: -------------------------------------------------------------------------------- 1 | ARG git_commit 2 | FROM eu.gcr.io/xray2poc/xorc-notifications:$git_commit 3 | MAINTAINER Julius de Bruijn 4 | 5 | RUN cp target/release/fcm /bin 6 | RUN chmod a+x /bin/fcm 7 | 8 | ENV CONFIG "/etc/xorc-notifications/fcm.toml" 9 | 10 | WORKDIR / 11 | 12 | CMD "/bin/fcm" 13 | -------------------------------------------------------------------------------- /Dockerfile.http: -------------------------------------------------------------------------------- 1 | ARG git_commit 2 | FROM eu.gcr.io/xray2poc/xorc-notifications:$git_commit 3 | MAINTAINER Julius de Bruijn 4 | 5 | RUN cp target/release/http_requester /bin 6 | RUN chmod a+x /bin/http_requester 7 | 8 | ENV CONFIG "/etc/xorc-notifications/http_requester.toml" 9 | 10 | WORKDIR / 11 | 12 | CMD "/bin/http_requester" 13 | -------------------------------------------------------------------------------- /Dockerfile.web_push: -------------------------------------------------------------------------------- 1 | ARG git_commit 2 | FROM eu.gcr.io/xray2poc/xorc-notifications:$git_commit 3 | MAINTAINER Julius de Bruijn 4 | 5 | RUN cp target/release/web_push /bin 6 | RUN chmod a+x /bin/web_push 7 | 8 | ENV CONFIG "/etc/xorc-notifications/web_push.toml" 9 | 10 | WORKDIR / 11 | 12 | CMD "/bin/web_push" 13 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | def podConfig = """ 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: dind 6 | spec: 7 | serviceAccountName: jenkins 8 | containers: 9 | - name: docker 10 | image: docker/compose:1.21.2 11 | command: ['cat'] 12 | tty: true 13 | env: 14 | - name: DOCKER_HOST 15 | value: tcp://localhost:2375 16 | volumeMounts: 17 | - name: cargo 18 | mountPath: /root/.cargo 19 | - name: cargo 20 | mountPath: /usr/src/xorc-notifications/target 21 | - name: service-account 22 | mountPath: /etc/service-account 23 | - name: dind-daemon 24 | image: docker:18.03.1-dind 25 | securityContext: 26 | privileged: true 27 | volumeMounts: 28 | - name: docker-graph-storage 29 | mountPath: /var/lib/docker 30 | resources: 31 | requests: 32 | cpu: 1 33 | - name: helm 34 | image: lachlanevenson/k8s-helm:v2.10.0 35 | command: ['cat'] 36 | tty: true 37 | volumes: 38 | - name: docker-graph-storage 39 | persistentVolumeClaim: 40 | claimName: ci-graph-storage 41 | - name: cargo 42 | persistentVolumeClaim: 43 | claimName: ci-cargo-storage 44 | - name: service-account 45 | secret: 46 | secretName: service-account 47 | """ 48 | 49 | def label = "xray-notifications-${UUID.randomUUID().toString()}" 50 | 51 | podTemplate(name: 'docker', label: label, yaml: podConfig) { 52 | node(label) { 53 | def scmVars = checkout([ 54 | $class: 'GitSCM', 55 | branches: scm.branches, 56 | doGenerateSubmoduleConfigurations: false, 57 | extensions: [[ 58 | $class: 'SubmoduleOption', 59 | disableSubmodules: false, 60 | parentCredentials: true, 61 | recursiveSubmodules: true, 62 | reference: '', 63 | trackingSubmodules: false 64 | ]], 65 | submoduleCfg: [], 66 | userRemoteConfigs: scm.userRemoteConfigs 67 | ]) 68 | 69 | def gitCommit = scmVars.GIT_COMMIT 70 | 71 | container('docker') { 72 | stage('Build base image') { 73 | sh("docker login -u _json_key --password-stdin https://eu.gcr.io < /etc/service-account/xray2poc.json") 74 | def imageName = "eu.gcr.io/xray2poc/xorc-notifications" 75 | def image = "${imageName}:${gitCommit}" 76 | 77 | sh("docker build -t ${image} .") 78 | sh("docker push ${image}") 79 | } 80 | 81 | stage('Build consumer images') { 82 | parallel( 83 | apns2: { 84 | sh("docker login -u _json_key --password-stdin https://eu.gcr.io < /etc/service-account/xray2poc.json") 85 | def image = "eu.gcr.io/xray2poc/apns2:${gitCommit}" 86 | sh("docker build -t ${image} -f Dockerfile.apns2 --build-arg git_commit=$gitCommit .") 87 | sh("docker push ${image}") 88 | }, 89 | fcm: { 90 | sh("docker login -u _json_key --password-stdin https://eu.gcr.io < /etc/service-account/xray2poc.json") 91 | def image = "eu.gcr.io/xray2poc/fcm:${gitCommit}" 92 | sh("docker build -t ${image} -f Dockerfile.fcm --build-arg git_commit=$gitCommit .") 93 | sh("docker push ${image}") 94 | }, 95 | web_push: { 96 | sh("docker login -u _json_key --password-stdin https://eu.gcr.io < /etc/service-account/xray2poc.json") 97 | def image = "eu.gcr.io/xray2poc/web_push:${gitCommit}" 98 | sh("docker build -t ${image} -f Dockerfile.web_push --build-arg git_commit=$gitCommit .") 99 | sh("docker push ${image}") 100 | }, 101 | http: { 102 | sh("docker login -u _json_key --password-stdin https://eu.gcr.io < /etc/service-account/xray2poc.json") 103 | def image = "eu.gcr.io/xray2poc/http:${gitCommit}" 104 | sh("docker build -t ${image} -f Dockerfile.http --build-arg git_commit=$gitCommit .") 105 | sh("docker push ${image}") 106 | } 107 | ) 108 | } 109 | 110 | if (env.BRANCH_NAME == "master") { 111 | stage("Deploy"){ 112 | container('helm') { 113 | parallel( 114 | http: { 115 | sh("helm upgrade -i --wait --set image.tag=${gitCommit} staging-http ./deploy/http -f deploy/staging.yaml") 116 | }, 117 | apns: { 118 | sh("helm upgrade -i --wait --set image.tag=${gitCommit} staging-apns2 ./deploy/apns2 -f deploy/staging.yaml") 119 | }, 120 | fcm: { 121 | sh("helm upgrade -i --wait --set image.tag=${gitCommit} staging-fcm ./deploy/fcm -f deploy/staging.yaml") 122 | }, 123 | web_push: { 124 | sh("helm upgrade -i --wait --set image.tag=${gitCommit} staging-web-push ./deploy/web-push -f deploy/staging.yaml") 125 | } 126 | ) 127 | } 128 | } 129 | } 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # XORC Notifications 2 | 3 | [![Travis Build Status](https://api.travis-ci.org/xray-tech/xorc-notifications.svg?branch=master)](https://travis-ci.org/xray-tech/xorc-notifications) 4 | [![crates.io](https://img.shields.io/crates/v/xorc-notifications.svg?style=flat-square)](https://crates.io/crates/xorc-notifications) 5 | [![Apache licensed](https://img.shields.io/badge/license-apache-blue.svg)](./LICENSE) 6 | 7 | A collection of services consuming [PushNotification 8 | events](https://github.com/xray-tech/xorc-events/blob/master/notification/push_notification.proto) 9 | to send push notifications to apns2/fcm/web push and [Application 10 | events](https://github.com/xray-tech/xorc-events/blob/master/application.proto) to receive configuration. 11 | 12 | The systems are by default multi-tenant for sending push notifications to multiple different applications. 13 | 14 | - [apns2](src/apns2) for Apple notifications 15 | - [fcm](src/fcm) for Google notificiations 16 | - [web_push](src/web_push) for Web push notifications 17 | - [http_requester](src/http_requester) for generic HTTP requests 18 | - [common](src/common) a library used by all four consumers 19 | 20 | ## Installation 21 | 22 | The consumers can be installed through Cargo: 23 | 24 | ```bash 25 | > cargo install xorc-notifications 26 | ``` 27 | 28 | It installs four different binaries: `apns2`, `fcm`, `web_push` and 29 | `http_requester`, all named after the systems they present. 30 | 31 | ### Environment variables 32 | 33 | variable | description | example 34 | -------------|-------------------------------------|---------------------------------- 35 | `CONFIG` | The configuration file location | `/etc/xorc-notifications/config.toml` 36 | `LOG_FORMAT` | Log output format | `text` or `json`, default: `text` 37 | `RUST_ENV` | The program environment | `test`, `development`, `staging` or `production`, default: `development` 38 | 39 | ### Required options 40 | 41 | section | key | description | example 42 | ----------|-----------------|--------------------------------------------|---------------------------------- 43 | `[kafka]` | `input_topic` | Notification input topic | `"production.notifications.apns"` 44 | `[kafka]` | `config_topic` | Application configuration topic | `"production.applications"` 45 | `[kafka]` | `output_topic` | Notification response topic | `"production.oam"` 46 | `[kafka]` | `group_id` | Consumer group ID | `"production.consumers.apns"` 47 | `[kafka]` | `brokers` | Comma-separated list of Kafka brokers | `"kafka1:9092,kafka2:9092"` 48 | `[kafka]` | `consumer_type` | Decides the input protobuf deserialization | `push_notification` for `PushNotification`, `http_request` for `HttpRequest` 49 | 50 | ## Dependencies 51 | 52 | The systems are written with Rust and it should always be possible to compile 53 | with the latest stable version. The de-facto way of getting the latest Rust is 54 | with [rustup](https://rustup.rs/): 55 | 56 | ```bash 57 | > curl https://sh.rustup.rs -sSf | sh 58 | > rustup update 59 | > rustup default stable 60 | ``` 61 | 62 | To check that everything works: 63 | 64 | ```bash 65 | > rustc --version 66 | rustc 1.30.0 (da5f414c2 2018-10-24) 67 | > cargo --version 68 | cargo 1.30.0 (36d96825d 2018-10-24) 69 | ``` 70 | 71 | Some of the crates used in the project have dependencies to certain system 72 | libraries and tools, for Ubuntu 18.04 you get them with: 73 | 74 | ```bash 75 | > sudo apt install build-essential libssl-dev automake ca-certificates libffi-dev protobuf-compiler 76 | ``` 77 | 78 | ## Development setup 79 | 80 | The project uses [Protocol 81 | Buffers](https://developers.google.com/protocol-buffers/) for event schemas. 82 | `cargo build` should generate the corresponding Rust structs to be used in the 83 | code. By default the protobuf classes are included as a submodule, which must be 84 | imported to the project tree: 85 | 86 | ```bash 87 | > git submodule update --init 88 | ``` 89 | 90 | Configuration examples for all the consumers are in [config](config/). Create a 91 | copy from an example config removing the ending, and modify it to suit your test 92 | setup. 93 | 94 | Running apns2: 95 | 96 | ```bash 97 | > env CONFIG=./config/apns2.toml cargo run --bin apns2 98 | ``` 99 | 100 | Running fcm: 101 | 102 | ```bash 103 | > env CONFIG=./config/fcm.toml cargo run --bin fcm 104 | ``` 105 | 106 | Running web_push: 107 | 108 | ```bash 109 | > env CONFIG=./config/web_push.toml cargo run --bin web_push 110 | ``` 111 | 112 | Running http_requester: 113 | 114 | ```bash 115 | > env CONFIG=./config/http_requester.toml cargo run --bin http_requester 116 | ``` 117 | 118 | ## Example scripts for testing purposes 119 | 120 | The [examples](examples/) directory contains helper scripts for testing the 121 | consumers. 122 | 123 | To build them: 124 | 125 | ```bash 126 | cargo build --release --examples 127 | ``` 128 | 129 | The executables are in `target/release` directory. 130 | 131 | ## Configuration 132 | The system is configuration is handled through a 133 | [toml](https://github.com/toml-lang/toml) file and a environment variable. 134 | 135 | ### Code Architecture 136 | 137 | - All four systems use an asynchronous Kafka consumer consuming the `input_topic`, 138 | requesting the external service with a client, parsing the response and 139 | responding back to the caller. 140 | - System should implement the `EventHandler` 141 | ([request_consumer.rs](src/common/kafka/request_consumer.rs)) and respond with 142 | `ResponseProducer` 143 | ([response_producer.rs](src/common/kafka/response_producer.rs)). 144 | - Consumer should keep track of connections for different applications using 145 | the configuration values from the `config_topic`. 146 | - In general none of the main code should never block. 147 | - All consumers talk HTTP and when requested, return Prometheus statistics 148 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | extern crate protoc_rust; 2 | 3 | fn main() { 4 | protoc_rust::run(protoc_rust::Args { 5 | out_dir: "src/common/events", 6 | input: &[ 7 | "third_party/events/common/rpc.proto", 8 | "third_party/events/common/rpc_decoder.proto", 9 | "third_party/events/application.proto", 10 | "third_party/events/http/http_request.proto", 11 | "third_party/events/http/http_response.proto", 12 | "third_party/events/notification/push_notification.proto", 13 | "third_party/events/notification/apple_notification.proto", 14 | "third_party/events/notification/google_notification.proto", 15 | "third_party/events/notification/webpush_notification.proto", 16 | "third_party/events/notification/push_result.proto", 17 | ], 18 | includes: &["third_party/events/"], 19 | customize: protoc_rust::Customize { 20 | ..Default::default() 21 | }, 22 | }).expect("protoc"); 23 | } 24 | -------------------------------------------------------------------------------- /config/apns2.toml: -------------------------------------------------------------------------------- 1 | [kafka] 2 | input_topic = "rpc.push-notification" 3 | config_topic = "config.push-notification" 4 | output_topic = "rpc.responses" 5 | group_id = "test.consumers.apns" 6 | brokers = "kafka:9092" 7 | -------------------------------------------------------------------------------- /config/fcm.toml: -------------------------------------------------------------------------------- 1 | [kafka] 2 | input_topic = "rpc.push-notification" 3 | config_topic = "config.push-notification" 4 | output_topic = "rpc.responses" 5 | group_id = "test.consumers.fcm" 6 | brokers = "kafka:9092" 7 | -------------------------------------------------------------------------------- /config/http_requester.toml: -------------------------------------------------------------------------------- 1 | [kafka] 2 | input_topic = "rpc.http" 3 | config_topic = "config.push-notification" 4 | output_topic = "rpc.responses" 5 | group_id = "http-requester-test" 6 | brokers = "kafka:9092" 7 | -------------------------------------------------------------------------------- /config/web_push.toml: -------------------------------------------------------------------------------- 1 | [kafka] 2 | input_topic = "rpc.push-notification" 3 | config_topic = "config.push-notification" 4 | output_topic = "rpc.responses" 5 | group_id = "test.consumers.webpush" 6 | brokers = "kafka:9092" 7 | -------------------------------------------------------------------------------- /deploy/apns2/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /deploy/apns2/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: apns2 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /deploy/apns2/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "apns2.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "apns2.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "apns2.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "apns2.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /deploy/apns2/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "apns2.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "apns2.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "apns2.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /deploy/apns2/templates/apns2.toml: -------------------------------------------------------------------------------- 1 | {{- define "apns2.config" -}} 2 | [kafka] 3 | input_topic = "rpc.push-notification" 4 | config_topic = "config.push-notification" 5 | output_topic = "rpc.responses" 6 | group_id = "{{.Values.kafka.group_prefix}}-apns2" 7 | brokers = "{{.Values.kafka.endpoint}}" 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /deploy/apns2/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "apns2.fullname" . }} 5 | data: 6 | apns2.toml: {{ include "apns2.config" . | quote }} 7 | -------------------------------------------------------------------------------- /deploy/apns2/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "apns2.fullname" . }} 5 | labels: 6 | app: {{ template "apns2.name" . }} 7 | chart: {{ template "apns2.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.replicaCount }} 12 | selector: 13 | matchLabels: 14 | app: {{ template "apns2.name" . }} 15 | release: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "apns2.name" . }} 20 | release: {{ .Release.Name }} 21 | annotations: 22 | prometheus.io/scrape: "true" 23 | prometheus.io/port: "80" 24 | spec: 25 | containers: 26 | - name: {{ .Chart.Name }} 27 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 28 | imagePullPolicy: {{ .Values.image.pullPolicy }} 29 | ports: 30 | - name: http 31 | containerPort: 80 32 | protocol: TCP 33 | livenessProbe: 34 | httpGet: 35 | path: /metrics 36 | port: http 37 | env: 38 | - name: PORT 39 | value: "80" 40 | - name: RUST_BACKTRACE 41 | value: "1" 42 | - name: CONFIG 43 | value: "/etc/xorc-notifications/apns2.toml" 44 | - name: LOG_FORMAT 45 | value: "json" 46 | - name: RUST_ENV 47 | value: "{{ .Values.system.environment }}" 48 | volumeMounts: 49 | - name: config 50 | mountPath: /etc/xorc-notifications/apns2.toml 51 | subPath: apns2.toml 52 | readinessProbe: 53 | httpGet: 54 | path: /metrics 55 | port: http 56 | resources: 57 | {{ toYaml .Values.resources | indent 12 }} 58 | {{- with .Values.nodeSelector }} 59 | nodeSelector: 60 | {{ toYaml . | indent 8 }} 61 | {{- end }} 62 | {{- with .Values.affinity }} 63 | affinity: 64 | {{ toYaml . | indent 8 }} 65 | {{- end }} 66 | {{- with .Values.tolerations }} 67 | tolerations: 68 | {{ toYaml . | indent 8 }} 69 | {{- end }} 70 | volumes: 71 | - name: config 72 | configMap: 73 | name: {{ template "apns2.fullname" . }} 74 | -------------------------------------------------------------------------------- /deploy/apns2/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "apns2.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app: {{ template "apns2.name" . }} 10 | chart: {{ template "apns2.chart" . }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . }} 31 | http: 32 | paths: 33 | - path: {{ $ingressPath }} 34 | backend: 35 | serviceName: {{ $fullName }} 36 | servicePort: http 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /deploy/apns2/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "apns2.fullname" . }} 5 | labels: 6 | app: {{ template "apns2.name" . }} 7 | chart: {{ template "apns2.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: 80 14 | targetPort: http 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app: {{ template "apns2.name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /deploy/apns2/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for apns2. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 2 6 | 7 | image: 8 | repository: eu.gcr.io/xray2poc/apns2 9 | tag: stable 10 | pullPolicy: IfNotPresent 11 | 12 | service: 13 | type: ClusterIP 14 | port: 80 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # kubernetes.io/tls-acme: "true" 21 | path: / 22 | hosts: 23 | - chart-example.local 24 | tls: [] 25 | # - secretName: chart-example-tls 26 | # hosts: 27 | # - chart-example.local 28 | 29 | resources: 30 | # We usually recommend not to specify default resources and to leave this as a conscious 31 | # choice for the user. This also increases chances charts run on environments with little 32 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 33 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 34 | # limits: 35 | # cpu: 100m 36 | # memory: 128Mi 37 | requests: 38 | cpu: 100m 39 | memory: 64Mi 40 | 41 | nodeSelector: {} 42 | 43 | tolerations: [] 44 | 45 | affinity: {} 46 | -------------------------------------------------------------------------------- /deploy/fcm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /deploy/fcm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: fcm 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /deploy/fcm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "fcm.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "fcm.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fcm.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fcm.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /deploy/fcm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "fcm.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "fcm.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "fcm.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /deploy/fcm/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "fcm.fullname" . }} 5 | data: 6 | fcm.toml: {{ include "fcm.config" . | quote }} 7 | -------------------------------------------------------------------------------- /deploy/fcm/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "fcm.fullname" . }} 5 | labels: 6 | app: {{ template "fcm.name" . }} 7 | chart: {{ template "fcm.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.replicaCount }} 12 | selector: 13 | matchLabels: 14 | app: {{ template "fcm.name" . }} 15 | release: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "fcm.name" . }} 20 | release: {{ .Release.Name }} 21 | annotations: 22 | prometheus.io/scrape: "true" 23 | prometheus.io/port: "80" 24 | spec: 25 | containers: 26 | - name: {{ .Chart.Name }} 27 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 28 | imagePullPolicy: {{ .Values.image.pullPolicy }} 29 | ports: 30 | - name: http 31 | containerPort: 80 32 | protocol: TCP 33 | livenessProbe: 34 | httpGet: 35 | path: /metrics 36 | port: http 37 | env: 38 | - name: PORT 39 | value: "80" 40 | - name: RUST_BACKTRACE 41 | value: "1" 42 | - name: CONFIG 43 | value: "/etc/xorc-notifications/fcm.toml" 44 | - name: LOG_FORMAT 45 | value: "json" 46 | - name: RUST_ENV 47 | value: "{{ .Values.system.environment }}" 48 | volumeMounts: 49 | - name: config 50 | mountPath: /etc/xorc-notifications/fcm.toml 51 | subPath: fcm.toml 52 | readinessProbe: 53 | httpGet: 54 | path: /metrics 55 | port: http 56 | resources: 57 | {{ toYaml .Values.resources | indent 12 }} 58 | {{- with .Values.nodeSelector }} 59 | nodeSelector: 60 | {{ toYaml . | indent 8 }} 61 | {{- end }} 62 | {{- with .Values.affinity }} 63 | affinity: 64 | {{ toYaml . | indent 8 }} 65 | {{- end }} 66 | {{- with .Values.tolerations }} 67 | tolerations: 68 | {{ toYaml . | indent 8 }} 69 | {{- end }} 70 | volumes: 71 | - name: config 72 | configMap: 73 | name: {{ template "fcm.fullname" . }} 74 | -------------------------------------------------------------------------------- /deploy/fcm/templates/fcm.toml: -------------------------------------------------------------------------------- 1 | {{- define "fcm.config" -}} 2 | [kafka] 3 | input_topic = "rpc.push-notification" 4 | config_topic = "config.push-notification" 5 | output_topic = "rpc.responses" 6 | group_id = "{{.Values.kafka.group_prefix}}-fcm" 7 | brokers = "{{.Values.kafka.endpoint}}" 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /deploy/fcm/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "fcm.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app: {{ template "fcm.name" . }} 10 | chart: {{ template "fcm.chart" . }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . }} 31 | http: 32 | paths: 33 | - path: {{ $ingressPath }} 34 | backend: 35 | serviceName: {{ $fullName }} 36 | servicePort: http 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /deploy/fcm/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "fcm.fullname" . }} 5 | labels: 6 | app: {{ template "fcm.name" . }} 7 | chart: {{ template "fcm.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.port }} 14 | targetPort: http 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app: {{ template "fcm.name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /deploy/fcm/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for fcm. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 2 6 | 7 | image: 8 | repository: eu.gcr.io/xray2poc/fcm 9 | tag: stable 10 | pullPolicy: IfNotPresent 11 | 12 | service: 13 | type: ClusterIP 14 | port: 80 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # kubernetes.io/tls-acme: "true" 21 | path: / 22 | hosts: 23 | - chart-example.local 24 | tls: [] 25 | # - secretName: chart-example-tls 26 | # hosts: 27 | # - chart-example.local 28 | 29 | resources: 30 | # We usually recommend not to specify default resources and to leave this as a conscious 31 | # choice for the user. This also increases chances charts run on environments with little 32 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 33 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 34 | # limits: 35 | # cpu: 100m 36 | # memory: 128Mi 37 | requests: 38 | cpu: 100m 39 | memory: 64Mi 40 | 41 | nodeSelector: {} 42 | 43 | tolerations: [] 44 | 45 | affinity: {} 46 | -------------------------------------------------------------------------------- /deploy/http/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /deploy/http/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: RPC HTTP Requests 4 | name: http 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /deploy/http/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "http.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "http.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "http.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "http.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /deploy/http/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "http.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "http.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "http.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /deploy/http/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "http.fullname" . }} 5 | data: 6 | http.toml: {{ include "http.config" . | quote }} 7 | -------------------------------------------------------------------------------- /deploy/http/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "http.fullname" . }} 5 | labels: 6 | app: {{ template "http.name" . }} 7 | chart: {{ template "http.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.replicaCount }} 12 | selector: 13 | matchLabels: 14 | app: {{ template "http.name" . }} 15 | release: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "http.name" . }} 20 | release: {{ .Release.Name }} 21 | annotations: 22 | prometheus.io/scrape: "true" 23 | prometheus.io/port: "80" 24 | spec: 25 | containers: 26 | - name: {{ .Chart.Name }} 27 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 28 | imagePullPolicy: {{ .Values.image.pullPolicy }} 29 | ports: 30 | - name: http 31 | containerPort: 80 32 | protocol: TCP 33 | livenessProbe: 34 | httpGet: 35 | path: /metrics 36 | port: http 37 | env: 38 | - name: PORT 39 | value: "80" 40 | - name: RUST_BACKTRACE 41 | value: "1" 42 | - name: CONFIG 43 | value: "/etc/xorc-notifications/http.toml" 44 | - name: LOG_FORMAT 45 | value: "json" 46 | - name: RUST_ENV 47 | value: "{{ .Values.system.environment }}" 48 | volumeMounts: 49 | - name: config 50 | mountPath: /etc/xorc-notifications/http.toml 51 | subPath: http.toml 52 | readinessProbe: 53 | httpGet: 54 | path: /metrics 55 | port: http 56 | resources: 57 | {{ toYaml .Values.resources | indent 12 }} 58 | {{- with .Values.nodeSelector }} 59 | nodeSelector: 60 | {{ toYaml . | indent 8 }} 61 | {{- end }} 62 | {{- with .Values.affinity }} 63 | affinity: 64 | {{ toYaml . | indent 8 }} 65 | {{- end }} 66 | {{- with .Values.tolerations }} 67 | tolerations: 68 | {{ toYaml . | indent 8 }} 69 | {{- end }} 70 | volumes: 71 | - name: config 72 | configMap: 73 | name: {{ template "http.fullname" . }} 74 | -------------------------------------------------------------------------------- /deploy/http/templates/http.toml: -------------------------------------------------------------------------------- 1 | {{- define "http.config" -}} 2 | [kafka] 3 | input_topic = "rpc.http" 4 | config_topic = "config.push-notification" 5 | output_topic = "rpc.responses" 6 | group_id = "{{.Values.kafka.group_prefix}}-http-requester" 7 | brokers = "{{.Values.kafka.endpoint}}" 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /deploy/http/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "http.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app: {{ template "http.name" . }} 10 | chart: {{ template "http.chart" . }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . }} 31 | http: 32 | paths: 33 | - path: {{ $ingressPath }} 34 | backend: 35 | serviceName: {{ $fullName }} 36 | servicePort: http 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /deploy/http/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "http.fullname" . }} 5 | labels: 6 | app: {{ template "http.name" . }} 7 | chart: {{ template "http.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: 80 14 | targetPort: 80 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app: {{ template "http.name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /deploy/http/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for http. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 2 6 | 7 | image: 8 | repository: eu.gcr.io/xray2poc/http 9 | tag: latest 10 | pullPolicy: IfNotPresent 11 | 12 | service: 13 | type: ClusterIP 14 | port: 80 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # kubernetes.io/tls-acme: "true" 21 | path: / 22 | hosts: 23 | - chart-example.local 24 | tls: [] 25 | # - secretName: chart-example-tls 26 | # hosts: 27 | # - chart-example.local 28 | 29 | resources: 30 | # We usually recommend not to specify default resources and to leave this as a conscious 31 | # choice for the user. This also increases chances charts run on environments with little 32 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 33 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 34 | # limits: 35 | # cpu: 100m 36 | # memory: 128Mi 37 | requests: 38 | cpu: 100m 39 | memory: 64Mi 40 | 41 | nodeSelector: {} 42 | 43 | tolerations: [] 44 | 45 | affinity: {} 46 | -------------------------------------------------------------------------------- /deploy/staging.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 2 2 | 3 | kafka: 4 | endpoint: staging-kafka:9092 5 | group_prefix: staging 6 | 7 | system: 8 | environment: staging 9 | -------------------------------------------------------------------------------- /deploy/web-push/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /deploy/web-push/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: web-push 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /deploy/web-push/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "web-push.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ include "web-push.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "web-push.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "web-push.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /deploy/web-push/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "web-push.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "web-push.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "web-push.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /deploy/web-push/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "web-push.fullname" . }} 5 | data: 6 | web_push.toml: {{ include "web-push.config" . | quote }} 7 | -------------------------------------------------------------------------------- /deploy/web-push/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "web-push.fullname" . }} 5 | labels: 6 | app: {{ include "web-push.name" . }} 7 | chart: {{ include "web-push.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.replicaCount }} 12 | selector: 13 | matchLabels: 14 | app: {{ include "web-push.name" . }} 15 | release: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ include "web-push.name" . }} 20 | release: {{ .Release.Name }} 21 | spec: 22 | containers: 23 | - name: {{ .Chart.Name }} 24 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 25 | imagePullPolicy: {{ .Values.image.pullPolicy }} 26 | ports: 27 | - name: http 28 | containerPort: 80 29 | protocol: TCP 30 | livenessProbe: 31 | httpGet: 32 | path: /metrics 33 | port: http 34 | env: 35 | - name: PORT 36 | value: "80" 37 | - name: RUST_BACKTRACE 38 | value: "1" 39 | - name: CONFIG 40 | value: "/etc/xorc-notifications/web_push.toml" 41 | - name: LOG_FORMAT 42 | value: "json" 43 | - name: RUST_ENV 44 | value: "{{ .Values.system.environment }}" 45 | volumeMounts: 46 | - name: config 47 | mountPath: /etc/xorc-notifications/web_push.toml 48 | subPath: web_push.toml 49 | readinessProbe: 50 | httpGet: 51 | path: / 52 | port: http 53 | resources: 54 | {{ toYaml .Values.resources | indent 12 }} 55 | {{- with .Values.nodeSelector }} 56 | nodeSelector: 57 | {{ toYaml . | indent 8 }} 58 | {{- end }} 59 | {{- with .Values.affinity }} 60 | affinity: 61 | {{ toYaml . | indent 8 }} 62 | {{- end }} 63 | {{- with .Values.tolerations }} 64 | tolerations: 65 | {{ toYaml . | indent 8 }} 66 | {{- end }} 67 | volumes: 68 | - name: config 69 | configMap: 70 | name: {{ template "web-push.fullname" . }} 71 | -------------------------------------------------------------------------------- /deploy/web-push/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "web-push.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app: {{ include "web-push.name" . }} 10 | chart: {{ include "web-push.chart" . }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . | quote }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . | quote }} 31 | http: 32 | paths: 33 | - path: {{ $ingressPath }} 34 | backend: 35 | serviceName: {{ $fullName }} 36 | servicePort: http 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /deploy/web-push/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "web-push.fullname" . }} 5 | labels: 6 | app: {{ include "web-push.name" . }} 7 | chart: {{ include "web-push.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.port }} 14 | targetPort: http 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app: {{ include "web-push.name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /deploy/web-push/templates/web_push.toml: -------------------------------------------------------------------------------- 1 | {{- define "web-push.config" -}} 2 | [kafka] 3 | input_topic = "rpc.push-notification" 4 | config_topic = "config.push-notification" 5 | output_topic = "rpc.responses" 6 | group_id = "{{.Values.kafka.group_prefix}}-web_push" 7 | brokers = "{{.Values.kafka.endpoint}}" 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /deploy/web-push/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for web-push. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 2 6 | 7 | image: 8 | repository: eu.gcr.io/xray2poc/web_push 9 | tag: stable 10 | pullPolicy: IfNotPresent 11 | 12 | service: 13 | type: ClusterIP 14 | port: 80 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # kubernetes.io/tls-acme: "true" 21 | path: / 22 | hosts: 23 | - chart-example.local 24 | tls: [] 25 | # - secretName: chart-example-tls 26 | # hosts: 27 | # - chart-example.local 28 | 29 | resources: 30 | # We usually recommend not to specify default resources and to leave this as a conscious 31 | # choice for the user. This also increases chances charts run on environments with little 32 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 33 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 34 | # limits: 35 | # cpu: 100m 36 | # memory: 128Mi 37 | requests: 38 | cpu: 100m 39 | memory: 64Mi 40 | 41 | nodeSelector: {} 42 | 43 | tolerations: [] 44 | 45 | affinity: {} 46 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | logging: 5 | driver: "none" 6 | image: wurstmeister/zookeeper 7 | ports: 8 | - "2181:2181" 9 | kafka: 10 | image: wurstmeister/kafka 11 | depends_on: 12 | - zookeeper 13 | ports: 14 | - "9092:9092" 15 | environment: 16 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 17 | KAFKA_ADVERTISED_HOST_NAME: "localhost" 18 | KAFKA_BROKER_ID: 666 19 | KAFKA_CREATE_TOPICS: "rpc:100:1,config:12:1:compact" 20 | -------------------------------------------------------------------------------- /examples/send_http_request.rs: -------------------------------------------------------------------------------- 1 | extern crate common; 2 | extern crate clap; 3 | extern crate rdkafka; 4 | extern crate chrono; 5 | extern crate protobuf; 6 | extern crate futures; 7 | 8 | use rdkafka::{ 9 | config::ClientConfig, 10 | producer::future_producer::{ 11 | FutureProducer, 12 | FutureRecord, 13 | }, 14 | }; 15 | 16 | use common::events::{ 17 | http_request::{HttpRequest, HttpRequest_HttpVerb::*}, 18 | rpc::Request, 19 | }; 20 | 21 | use protobuf::Message; 22 | use futures::future::Future; 23 | use clap::{Arg, App}; 24 | use std::collections::HashMap; 25 | 26 | fn is_int(v: String) -> Result<(), String> { 27 | match v.parse::() { 28 | Ok(_) => Ok(()), 29 | Err(e) => Err(format!("The timeout value is not valid: {:?}", e)) 30 | } 31 | } 32 | 33 | fn header_value(v: String) -> Result<(), String> { 34 | let splitted: Vec<&str> = v.split(": ").collect(); 35 | 36 | if splitted.len() == 2 { 37 | Ok(()) 38 | } else { 39 | Err(format!("Invalid header value: {}", v)) 40 | } 41 | } 42 | 43 | fn main() { 44 | let matches = App::new("HTTP Request Sender") 45 | .version("4.20") 46 | .author("Censhare Techlab") 47 | .about("Sends HTTP requests through Kafka and http_requester") 48 | .arg(Arg::with_name("kafka_server") 49 | .short("s") 50 | .long("kafka_server") 51 | .value_name("SERVER:PORT") 52 | .help("Kafka server to connect to") 53 | .default_value("localhost:9092") 54 | .takes_value(true)) 55 | .arg(Arg::with_name("kafka_topic") 56 | .short("t") 57 | .long("kafka_topic") 58 | .value_name("TOPIC") 59 | .help("Kafka topic to write to") 60 | .default_value("rpc") 61 | .takes_value(true)) 62 | .arg(Arg::with_name("request") 63 | .short("X") 64 | .long("request") 65 | .value_name("VERB") 66 | .help("HTTP verb to use with the request") 67 | .default_value("GET") 68 | .takes_value(true)) 69 | .arg(Arg::with_name("body") 70 | .short("d") 71 | .long("data") 72 | .value_name("DATA") 73 | .help("HTTP POST data")) 74 | .arg(Arg::with_name("timeout") 75 | .long("timeout") 76 | .value_name("MILLIS") 77 | .help("Maximum time allowed to wait for response") 78 | .takes_value(true) 79 | .default_value("2000") 80 | .validator(is_int)) 81 | .arg(Arg::with_name("headers") 82 | .long("header") 83 | .short("H") 84 | .value_name("HeaderName: HeaderValue") 85 | .takes_value(true) 86 | .multiple(true) 87 | .validator(header_value)) 88 | .arg(Arg::with_name("URI") 89 | .help("The uri to connect") 90 | .required(true) 91 | .index(1)) 92 | .get_matches(); 93 | 94 | let kafka_server = matches 95 | .value_of("kafka_server") 96 | .unwrap_or("localhost:9092"); 97 | 98 | let kafka_topic = matches 99 | .value_of("kafka_topic") 100 | .unwrap_or("rpc"); 101 | 102 | println!("TOPIC: {}", kafka_topic); 103 | 104 | let producer: FutureProducer = ClientConfig::new() 105 | .set("bootstrap.servers", kafka_server) 106 | .set("produce.offset.report", "true") 107 | .create() 108 | .expect("Producer creation error"); 109 | 110 | let mut header = Request::new(); 111 | header.set_field_type("http.HttpRequest".to_string()); 112 | 113 | let mut request = HttpRequest::new(); 114 | request.set_header(header); 115 | 116 | match matches.value_of("request").unwrap_or("GET") { 117 | "GET" => request.set_request_type(GET), 118 | "POST" => request.set_request_type(POST), 119 | "PUT" => request.set_request_type(PUT), 120 | "DELETE" => request.set_request_type(DELETE), 121 | "PATCH" => request.set_request_type(PATCH), 122 | "OPTIONS" => request.set_request_type(OPTIONS), 123 | v => panic!("Unsupported verb: {}", v), 124 | } 125 | 126 | request.set_uri(matches.value_of("URI").unwrap().to_string()); 127 | 128 | if let Some(body) = matches.value_of("body") { 129 | request.set_body(body.to_string()); 130 | } 131 | 132 | if let Some(timeout) = matches.value_of("timeout").and_then(|t| t.parse::().ok()) { 133 | request.set_timeout(timeout); 134 | } 135 | 136 | if let Some(headers) = matches.values_of("headers") { 137 | request.set_headers(headers.fold(HashMap::new(), |mut acc, header| { 138 | let splitted: Vec<&str> = header 139 | .split(": ") 140 | .collect(); 141 | 142 | acc.insert(splitted[0].to_string(), splitted[1].to_string()); 143 | acc 144 | })) 145 | }; 146 | 147 | let payload = request.write_to_bytes().unwrap(); 148 | 149 | let record = FutureRecord { 150 | topic: &kafka_topic, 151 | partition: None, 152 | payload: Some(&payload), 153 | key: None, 154 | timestamp: None, 155 | headers: None, 156 | }; 157 | 158 | producer.send::, Vec>(record, -1).wait().unwrap().unwrap(); 159 | } 160 | -------------------------------------------------------------------------------- /src/apns2/consumer.rs: -------------------------------------------------------------------------------- 1 | use futures::{Future, future::ok}; 2 | 3 | use std::{ 4 | collections::HashMap, 5 | sync::RwLock, 6 | }; 7 | 8 | use common::{ 9 | events::{ 10 | application::{ 11 | Application, 12 | ConnectionEndpoint::{ 13 | Production, 14 | Sandbox 15 | }, 16 | IosCertificate, 17 | IosToken 18 | }, 19 | push_notification::PushNotification, 20 | http_request::HttpRequest, 21 | }, 22 | kafka::EventHandler, 23 | metrics::* 24 | }; 25 | 26 | use a2::{client::Endpoint, error::Error}; 27 | 28 | use notifier::Notifier; 29 | use producer::ApnsProducer; 30 | 31 | pub struct ApnsHandler { 32 | producer: ApnsProducer, 33 | notifiers: RwLock>, 34 | } 35 | 36 | impl ApnsHandler { 37 | pub fn new() -> ApnsHandler { 38 | let notifiers = RwLock::new(HashMap::new()); 39 | let producer = ApnsProducer::new(); 40 | 41 | ApnsHandler { 42 | producer, 43 | notifiers, 44 | } 45 | } 46 | 47 | fn add_certificate_notifier( 48 | &self, 49 | certificate: &IosCertificate, 50 | endpoint: Endpoint, 51 | application_id: &str, 52 | apns_topic: &str 53 | ) -> Result<(), Error> { 54 | let mut pkcs12 = certificate.get_pkcs12(); 55 | 56 | let notifier = Notifier::certificate( 57 | &mut pkcs12, 58 | certificate.get_password(), 59 | endpoint, 60 | apns_topic, 61 | )?; 62 | 63 | let mut notifiers = self.notifiers.write().unwrap(); 64 | notifiers.insert(application_id.to_string(), notifier); 65 | 66 | Ok(()) 67 | } 68 | 69 | fn add_token_notifier( 70 | &self, 71 | token: &IosToken, 72 | endpoint: Endpoint, 73 | application_id: &str, 74 | apns_topic: &str, 75 | ) -> Result<(), Error> { 76 | let mut pkcs8 = token.get_pkcs8(); 77 | 78 | let notifier = Notifier::token( 79 | &mut pkcs8, 80 | token.get_key_id(), 81 | token.get_team_id(), 82 | endpoint, 83 | apns_topic, 84 | )?; 85 | 86 | let mut notifiers = self.notifiers.write().unwrap(); 87 | notifiers.insert(application_id.to_string(), notifier); 88 | 89 | Ok(()) 90 | } 91 | 92 | fn delete_notifier(&self, id: &str) { 93 | if self.notifiers.write().unwrap().remove(id).is_some() { 94 | self.set_app_counter(); 95 | warn!("Application removed"; "universe" => id); 96 | } 97 | } 98 | 99 | fn set_app_counter(&self) { 100 | NUMBER_OF_APPLICATIONS.set(self.notifiers.read().unwrap().len() as f64); 101 | } 102 | } 103 | 104 | impl EventHandler for ApnsHandler { 105 | fn accepts(&self, event: &PushNotification) -> bool { 106 | event.has_apple() 107 | } 108 | 109 | fn handle_notification( 110 | &self, 111 | key: Option>, 112 | event: PushNotification, 113 | ) -> Box + 'static + Send> { 114 | let producer = self.producer.clone(); 115 | let timer = RESPONSE_TIMES_HISTOGRAM.start_timer(); 116 | 117 | CALLBACKS_INFLIGHT.inc(); 118 | 119 | if let Some(notifier) = self.notifiers.read().unwrap().get(event.get_universe()) { 120 | let notification_send = notifier 121 | .notify(&event) 122 | .then(move |result| { 123 | timer.observe_duration(); 124 | CALLBACKS_INFLIGHT.dec(); 125 | 126 | match result { 127 | Ok(_) => producer.handle_ok(key, event), 128 | Err(Error::ResponseError(e)) => producer.handle_err(key, event, e), 129 | Err(e) => producer.handle_fatal(key, event, e), 130 | } 131 | }) 132 | .then(|_| ok(())); 133 | 134 | Box::new(notification_send) 135 | } else { 136 | let connection_error = producer 137 | .handle_fatal(key, event, Error::ConnectionError) 138 | .then(|_| ok(())); 139 | 140 | Box::new(connection_error) 141 | } 142 | } 143 | 144 | fn handle_http( 145 | &self, 146 | _: Option>, 147 | _: HttpRequest, 148 | ) -> Box + 'static + Send> { 149 | warn!("We don't handle http request events here"); 150 | Box::new(ok(())) 151 | } 152 | 153 | fn handle_config(&self, id: &str, application: Option) { 154 | match application { 155 | None => { 156 | self.delete_notifier(id); 157 | } 158 | Some(application) => { 159 | let application_id = application.get_id(); 160 | 161 | if !application.has_ios_config() { 162 | debug!("No ios config"; &application); 163 | self.delete_notifier(application_id); 164 | return; 165 | } 166 | 167 | let ios_config = application.get_ios_config(); 168 | 169 | if !ios_config.get_enabled() { 170 | debug!("Not enabled"; &application); 171 | self.delete_notifier(application_id); 172 | return; 173 | } 174 | 175 | if !ios_config.has_token() && !ios_config.has_certificate() { 176 | debug!("No connection details"; &application); 177 | self.delete_notifier(application_id); 178 | return; 179 | } 180 | 181 | let result = if ios_config.has_token() { 182 | let token_config = ios_config.get_token(); 183 | 184 | let endpoint = match token_config.get_endpoint() { 185 | Production => Endpoint::Production, 186 | Sandbox => Endpoint::Sandbox, 187 | }; 188 | 189 | info!( 190 | "Updating application configuration"; 191 | &application, 192 | "connection_type" => "token", 193 | "team_id" => token_config.get_team_id(), 194 | "key_id" => token_config.get_key_id(), 195 | "apns_topic" => token_config.get_apns_topic(), 196 | "endpoint" => format!("{:?}", endpoint) 197 | ); 198 | 199 | self.add_token_notifier( 200 | token_config, 201 | endpoint, 202 | application_id, 203 | token_config.get_apns_topic(), 204 | ) 205 | } else { 206 | let cert_config = ios_config.get_certificate(); 207 | 208 | let endpoint = match cert_config.get_endpoint() { 209 | Production => Endpoint::Production, 210 | Sandbox => Endpoint::Sandbox, 211 | }; 212 | 213 | info!( 214 | "Updating application configuration"; 215 | &application, 216 | "connection_type" => "certificate", 217 | "apns_topic" => cert_config.get_apns_topic(), 218 | "endpoint" => format!("{:?}", endpoint) 219 | ); 220 | 221 | self.add_certificate_notifier( 222 | cert_config, 223 | endpoint, 224 | application_id, 225 | cert_config.get_apns_topic(), 226 | ) 227 | }; 228 | 229 | self.set_app_counter(); 230 | 231 | if let Err(error) = result { 232 | error!( 233 | "Error connecting to APNs"; 234 | &application, 235 | "error" => format!("{:?}", error) 236 | ) 237 | }; 238 | } 239 | } 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /src/apns2/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate lazy_static; 2 | #[macro_use] extern crate slog; 3 | #[macro_use] extern crate slog_scope; 4 | 5 | extern crate a2; 6 | extern crate common; 7 | extern crate futures; 8 | extern crate heck; 9 | extern crate serde_json; 10 | extern crate tokio_timer; 11 | 12 | mod consumer; 13 | mod notifier; 14 | mod producer; 15 | 16 | use consumer::ApnsHandler; 17 | use std::env; 18 | 19 | use common::{config::Config, system::System}; 20 | 21 | lazy_static! { 22 | pub static ref CONFIG: Config = match env::var("CONFIG") { 23 | Ok(config_file_location) => Config::parse(&config_file_location), 24 | _ => Config::parse("./config/fcm.toml"), 25 | }; 26 | } 27 | 28 | fn main() { 29 | System::start( 30 | "apns2", 31 | ApnsHandler::new(), 32 | &CONFIG, 33 | ); 34 | } 35 | -------------------------------------------------------------------------------- /src/apns2/notifier.rs: -------------------------------------------------------------------------------- 1 | use common::events::push_notification::PushNotification; 2 | use common::metrics::*; 3 | use serde_json::error::Error as JsonError; 4 | use serde_json::{self, Value}; 5 | use std::io::Read; 6 | use std::time::Duration; 7 | use tokio_timer::Timeout; 8 | 9 | use a2::{client::{Client, Endpoint, FutureResponse}, error::Error, 10 | request::{notification::*, payload::Payload}}; 11 | 12 | enum NotifierType { 13 | Token, 14 | Certificate, 15 | } 16 | 17 | pub struct Notifier { 18 | client: Client, 19 | topic: String, 20 | notifier_type: NotifierType, 21 | } 22 | 23 | impl Drop for Notifier { 24 | fn drop(&mut self) { 25 | match self.notifier_type { 26 | NotifierType::Token => { 27 | TOKEN_CONSUMERS.dec(); 28 | } 29 | NotifierType::Certificate => { 30 | CERTIFICATE_CONSUMERS.dec(); 31 | } 32 | } 33 | } 34 | } 35 | 36 | impl Notifier { 37 | pub fn certificate( 38 | pkcs12: &mut R, 39 | password: &str, 40 | endpoint: Endpoint, 41 | topic: &str, 42 | ) -> Result 43 | where 44 | R: Read, 45 | { 46 | let client = Client::certificate(pkcs12, password, endpoint)?; 47 | let notifier_type = NotifierType::Certificate; 48 | CERTIFICATE_CONSUMERS.inc(); 49 | 50 | Ok(Notifier { 51 | client, 52 | topic: String::from(topic), 53 | notifier_type, 54 | }) 55 | } 56 | 57 | pub fn token( 58 | pkcs8: &mut R, 59 | key_id: &str, 60 | team_id: &str, 61 | endpoint: Endpoint, 62 | topic: &str, 63 | ) -> Result 64 | where 65 | R: Read, 66 | { 67 | let client = Client::token(pkcs8, key_id, team_id, endpoint)?; 68 | let notifier_type = NotifierType::Token; 69 | TOKEN_CONSUMERS.inc(); 70 | 71 | Ok(Notifier { 72 | client, 73 | topic: String::from(topic), 74 | notifier_type, 75 | }) 76 | } 77 | 78 | pub fn notify(&self, event: &PushNotification) -> Timeout { 79 | self.client.send_with_timeout(self.gen_payload(event), Duration::from_secs(3)) 80 | } 81 | 82 | fn gen_payload<'a>(&'a self, event: &'a PushNotification) -> Payload<'a> { 83 | let notification_data = event.get_apple(); 84 | let headers = notification_data.get_headers(); 85 | 86 | let mut options = NotificationOptions { 87 | ..Default::default() 88 | }; 89 | 90 | if headers.has_apns_priority() { 91 | match headers.get_apns_priority() { 92 | 10 => options.apns_priority = Priority::High, 93 | _ => options.apns_priority = Priority::Normal, 94 | } 95 | } 96 | if event.get_header().has_correlation_id() { 97 | options.apns_id = Some(event.get_header().get_correlation_id()); 98 | } 99 | if headers.has_apns_expiration() { 100 | options.apns_expiration = Some(headers.get_apns_expiration() as u64); 101 | } 102 | if headers.has_apns_topic() { 103 | options.apns_topic = Some(headers.get_apns_topic()); 104 | } else { 105 | options.apns_topic = Some(&self.topic); 106 | } 107 | 108 | let mut payload = if notification_data.has_localized() { 109 | let alert_data = notification_data.get_localized(); 110 | let mut builder = 111 | LocalizedNotificationBuilder::new(alert_data.get_title(), alert_data.get_body()); 112 | 113 | if alert_data.has_title_loc_key() { 114 | builder.set_title_loc_key(alert_data.get_title_loc_key()); 115 | } 116 | if !alert_data.get_title_loc_args().is_empty() { 117 | builder.set_title_loc_args(&alert_data.get_title_loc_args()); 118 | } 119 | if alert_data.has_action_loc_key() { 120 | builder.set_action_loc_key(alert_data.get_action_loc_key()); 121 | } 122 | if alert_data.has_launch_image() { 123 | builder.set_launch_image(alert_data.get_launch_image()); 124 | } 125 | if alert_data.has_loc_key() { 126 | builder.set_loc_key(alert_data.get_loc_key()); 127 | } 128 | if !alert_data.get_loc_args().is_empty() { 129 | builder.set_loc_args(&alert_data.get_loc_args()); 130 | } 131 | if notification_data.has_badge() { 132 | builder.set_badge(notification_data.get_badge()); 133 | } 134 | if notification_data.has_sound() { 135 | builder.set_sound(notification_data.get_sound()); 136 | } 137 | if notification_data.has_category() { 138 | builder.set_category(notification_data.get_category()); 139 | } 140 | if alert_data.has_mutable_content() && alert_data.get_mutable_content() { 141 | builder.set_mutable_content(); 142 | } 143 | 144 | builder.build(event.get_device_token(), options) 145 | } else if notification_data.has_silent() { 146 | SilentNotificationBuilder::new().build(event.get_device_token(), options) 147 | } else { 148 | let mut builder = PlainNotificationBuilder::new(notification_data.get_plain()); 149 | 150 | if notification_data.has_badge() { 151 | builder.set_badge(notification_data.get_badge()); 152 | } 153 | if notification_data.has_sound() { 154 | builder.set_sound(notification_data.get_sound()); 155 | } 156 | if notification_data.has_category() { 157 | builder.set_category(notification_data.get_category()); 158 | } 159 | 160 | builder.build(event.get_device_token(), options) 161 | }; 162 | 163 | if notification_data.has_custom_data() { 164 | let custom_data = notification_data.get_custom_data(); 165 | 166 | let v: Result = serde_json::from_str(custom_data.get_body()); 167 | match v { 168 | Ok(json) => { 169 | if let Err(e) = payload.add_custom_data(custom_data.get_key(), &json) { 170 | error!("Couldn't serialize custom data {:?}", e); 171 | }; 172 | } 173 | Err(e) => { 174 | error!("Non-json custom data: {:?}", e); 175 | } 176 | } 177 | } 178 | 179 | payload 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/apns2/producer.rs: -------------------------------------------------------------------------------- 1 | use a2::{ 2 | Error, 3 | Response, 4 | ErrorReason::*, 5 | }; 6 | 7 | use common::{ 8 | events::{ 9 | push_result::{ 10 | PushResult, 11 | PushResult_ResponseAction as ResponseAction 12 | }, 13 | push_notification::PushNotification, 14 | }, 15 | kafka::{ 16 | DeliveryFuture, 17 | ResponseProducer 18 | }, 19 | metrics::* 20 | }; 21 | 22 | use heck::SnakeCase; 23 | use CONFIG; 24 | 25 | pub struct ApnsProducer { 26 | producer: ResponseProducer, 27 | } 28 | 29 | impl ApnsProducer { 30 | pub fn new() -> ApnsProducer { 31 | ApnsProducer { 32 | producer: ResponseProducer::new(&CONFIG.kafka), 33 | } 34 | } 35 | 36 | pub fn handle_ok( 37 | &self, 38 | key: Option>, 39 | event: PushNotification 40 | ) -> DeliveryFuture 41 | { 42 | CALLBACKS_COUNTER.with_label_values(&["success"]).inc(); 43 | 44 | info!( 45 | "Successfully sent a push notification"; 46 | &event, 47 | "successful" => true 48 | ); 49 | 50 | let result: PushResult = (event, ResponseAction::None).into(); 51 | self.producer.publish(key, &result) 52 | } 53 | 54 | pub fn handle_err( 55 | &self, 56 | key: Option>, 57 | event: PushNotification, 58 | response: Response 59 | ) -> DeliveryFuture 60 | { 61 | let reason = response.error.as_ref() 62 | .map(|ref error| { 63 | format!("{:?}", error.reason) 64 | }); 65 | 66 | error!( 67 | "Error sending a push notification"; 68 | &event, 69 | "successful" => false, 70 | "reason" => reason 71 | ); 72 | 73 | let response_action = 74 | if let Some(ref reason) = response.error.map(|e| e.reason) { 75 | let error_label = format!("{:?}", reason).to_snake_case(); 76 | CALLBACKS_COUNTER.with_label_values(&[&error_label]).inc(); 77 | 78 | match reason { 79 | Unregistered | DeviceTokenNotForTopic | BadDeviceToken => 80 | ResponseAction::UnsubscribeEntity, 81 | InternalServerError | Shutdown | ServiceUnavailable | ExpiredProviderToken | Forbidden => 82 | ResponseAction::Retry, 83 | _ => 84 | ResponseAction::None, 85 | } 86 | } else { 87 | CALLBACKS_COUNTER.with_label_values(&["Unknown"]).inc(); 88 | ResponseAction::None 89 | }; 90 | 91 | let result: PushResult = (event, response_action).into(); 92 | self.producer.publish(key, &result) 93 | } 94 | 95 | pub fn handle_fatal( 96 | &self, 97 | key: Option>, 98 | event: PushNotification, 99 | error: Error 100 | ) -> DeliveryFuture 101 | { 102 | let status_label = format!("{:?}", error).to_snake_case(); 103 | let result: PushResult = (event, ResponseAction::Retry).into(); 104 | 105 | CALLBACKS_COUNTER.with_label_values(&[&status_label]).inc(); 106 | self.producer.publish(key, &result) 107 | } 108 | } 109 | 110 | impl Clone for ApnsProducer { 111 | fn clone(&self) -> Self { 112 | ApnsProducer { 113 | producer: self.producer.clone(), 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/common/config.rs: -------------------------------------------------------------------------------- 1 | use kafka; 2 | use toml; 3 | 4 | use std::{fs::File, io::prelude::*}; 5 | 6 | #[derive(Deserialize, Debug)] 7 | pub struct Config { 8 | pub kafka: kafka::Config, 9 | } 10 | 11 | impl Config { 12 | /// Load TOML-formatted configuration from `path`. 13 | pub fn parse(path: &str) -> Config { 14 | let mut config_toml = String::new(); 15 | 16 | let mut file = match File::open(path) { 17 | Ok(file) => file, 18 | Err(err) => { 19 | panic!("Error while reading config file: [{}]", err); 20 | } 21 | }; 22 | 23 | file.read_to_string(&mut config_toml) 24 | .unwrap_or_else(|err| panic!("Error while reading config: [{}]", err)); 25 | 26 | toml::from_str(&config_toml).unwrap() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/common/events/mod.rs: -------------------------------------------------------------------------------- 1 | use self::{ 2 | http_request::HttpRequest_HttpVerb::{self, *}, 3 | push_result::{ 4 | PushResult, 5 | PushResult_ResponseAction as ResponseAction 6 | }, 7 | push_notification::PushNotification, 8 | }; 9 | 10 | pub mod apple_notification; 11 | pub mod application; 12 | pub mod google_notification; 13 | pub mod rpc; 14 | pub mod rpc_decoder; 15 | pub mod push_notification; 16 | pub mod push_result; 17 | pub mod webpush_notification; 18 | pub mod http_request; 19 | pub mod http_response; 20 | 21 | impl Into for (PushNotification, ResponseAction) { 22 | fn into(self) -> PushResult { 23 | let (mut event, response_action) = self; 24 | 25 | let mut header = rpc::Response::new(); 26 | header.set_field_type("notification.PushResult".to_string()); 27 | header.set_request(event.take_header()); 28 | 29 | let mut result = PushResult::new(); 30 | result.set_header(header); 31 | result.set_response_action(response_action); 32 | 33 | result 34 | } 35 | } 36 | 37 | impl AsRef for HttpRequest_HttpVerb { 38 | fn as_ref(&self) -> &str { 39 | match self { 40 | GET => "GET", 41 | POST => "POST", 42 | PUT => "PUT", 43 | DELETE => "DELETE", 44 | PATCH => "PATCH", 45 | OPTIONS => "OPTIONS", 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/common/kafka/mod.rs: -------------------------------------------------------------------------------- 1 | mod request_consumer; 2 | mod response_producer; 3 | 4 | pub use self::request_consumer::{EventHandler, RequestConsumer}; 5 | pub use self::response_producer::ResponseProducer; 6 | pub use rdkafka::producer::DeliveryFuture; 7 | 8 | #[derive(Deserialize, Debug)] 9 | pub struct Config { 10 | /// Kafka topic for incoming `PushNotification` events, triggering a push 11 | /// notification to be sent. 12 | pub input_topic: String, 13 | /// Kafka topic for incoming `Application` events, holding the tenant 14 | /// configuration. 15 | pub config_topic: String, 16 | /// Kafka topic for push notification responses. 17 | pub output_topic: String, 18 | /// Kafka consumer group ID. 19 | pub group_id: String, 20 | /// A comma-separated list of Kafka brokers to connect. 21 | pub brokers: String, 22 | } 23 | -------------------------------------------------------------------------------- /src/common/kafka/request_consumer.rs: -------------------------------------------------------------------------------- 1 | use rdkafka::{ 2 | Message, 3 | message::BorrowedMessage, 4 | config::ClientConfig, 5 | consumer::{CommitMode, Consumer, stream_consumer::StreamConsumer}, 6 | topic_partition_list::{Offset, TopicPartitionList}, 7 | }; 8 | use kafka::Config; 9 | use events::{ 10 | application::Application, 11 | push_notification::PushNotification, 12 | http_request::HttpRequest, 13 | rpc_decoder::RequestWrapper, 14 | }; 15 | use futures::{Future, Stream, sync::oneshot}; 16 | use protobuf::parse_from_bytes; 17 | use tokio::{self, runtime::current_thread::Runtime}; 18 | use regex::Regex; 19 | 20 | lazy_static! { 21 | static ref APP_KEY_RE: Regex = 22 | Regex::new( 23 | r"application|([A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12})" 24 | ).unwrap(); 25 | } 26 | 27 | pub trait EventHandler { 28 | /// True if the consumer should accept the incoming event. 29 | fn accepts(&self, event: &PushNotification) -> bool; 30 | 31 | /// Try to send a push notification. If key parameter is set, the response 32 | /// will be sent with the same routing key. 33 | fn handle_notification( 34 | &self, 35 | key: Option>, 36 | event: PushNotification, 37 | ) -> Box + 'static + Send>; 38 | 39 | /// Try to send a http request. If key parameter is set, the response 40 | /// will be sent with the same routing key. 41 | fn handle_http( 42 | &self, 43 | key: Option>, 44 | event: HttpRequest, 45 | ) -> Box + 'static + Send>; 46 | 47 | /// Handle tenant configuration for connection setup. 48 | fn handle_config( 49 | &self, 50 | id: &str, 51 | config: Option 52 | ); 53 | } 54 | 55 | pub struct RequestConsumer { 56 | config_topic: String, 57 | input_topic: String, 58 | group_id: String, 59 | brokers: String, 60 | handler: H, 61 | } 62 | 63 | impl RequestConsumer { 64 | /// A kafka consumer to consume push notification events. `EventHandler` 65 | /// should contain the business logic. 66 | pub fn new(handler: H, config: &Config) -> RequestConsumer { 67 | RequestConsumer { 68 | config_topic: config.config_topic.clone(), 69 | input_topic: config.input_topic.clone(), 70 | group_id: config.group_id.clone(), 71 | brokers: config.brokers.clone(), 72 | handler, 73 | } 74 | } 75 | 76 | /// Consuming the configuration topic for tenant connection setup. A message through `control` stops the consumer. 77 | pub fn handle_configs(&self, control: oneshot::Receiver<()>) -> Result<(), ()> { 78 | let consumer: StreamConsumer = ClientConfig::new() 79 | .set("group.id", &self.group_id) 80 | .set("bootstrap.servers", &self.brokers) 81 | .set("enable.auto.commit", "false") 82 | .set("auto.offset.reset", "earliest") 83 | .set("enable.partition.eof", "false") 84 | .create() 85 | .expect("Consumer creation failed"); 86 | 87 | let mut partitions = TopicPartitionList::new(); 88 | 89 | for partition in 0..12 { 90 | partitions.add_partition_offset( 91 | &self.config_topic, 92 | partition, 93 | Offset::Beginning 94 | ); 95 | } 96 | 97 | consumer.assign(&partitions).expect("Can't subscribe to specified topics"); 98 | 99 | info!("Starting config processing"); 100 | 101 | self.handler(consumer, control, &|msg: BorrowedMessage| { 102 | let convert_key = msg.key().and_then(|key| { 103 | String::from_utf8(key.to_vec()).ok() 104 | }); 105 | 106 | match convert_key { 107 | Some(ref key) if APP_KEY_RE.is_match(key) => { 108 | let type_parsing = msg.payload().and_then(|payload| { 109 | parse_from_bytes::(&payload).ok() 110 | }); 111 | 112 | let application_id: &str = key 113 | .split('|') 114 | .collect::>()[1]; 115 | 116 | match type_parsing { 117 | Some(ref decoder) => { 118 | match decoder.get_header().get_field_type() { 119 | "application.Application" => { 120 | debug!( 121 | "Got application configuration"; 122 | "universe" => application_id, 123 | "key" => key 124 | ); 125 | 126 | self.handle_config(application_id, Some(&msg)) 127 | } 128 | t => 129 | debug!("Invalid type: {}", t), 130 | } 131 | } 132 | None => { 133 | debug!( 134 | "Got null configuration"; 135 | "universe" => application_id, 136 | "key" => key 137 | ); 138 | 139 | self.handle_config(application_id, None); 140 | } 141 | } 142 | } 143 | _ => debug!("Not an application configuration here") 144 | } 145 | 146 | Ok(()) 147 | }) 148 | } 149 | 150 | /// Consume until event is sent through `control`. 151 | pub fn handle_requests(&self, control: oneshot::Receiver<()>) -> Result<(), ()> { 152 | let consumer: StreamConsumer = ClientConfig::new() 153 | .set("group.id", &self.group_id) 154 | .set("bootstrap.servers", &self.brokers) 155 | .set("enable.auto.commit", "true") 156 | .set("auto.offset.reset", "latest") 157 | .set("enable.partition.eof", "false") 158 | .create() 159 | .expect("Consumer creation failed"); 160 | 161 | consumer.subscribe(&[&self.input_topic]).expect("Can't subscribe to specified topics"); 162 | 163 | info!("Starting events processing"); 164 | 165 | self.handler(consumer, control, &|msg: BorrowedMessage| { 166 | debug!( 167 | "Got message"; 168 | "topic" => msg.topic(), 169 | "key" => msg.key().and_then(|key| String::from_utf8(key.to_vec()).ok()) 170 | ); 171 | 172 | let type_parsing = msg.payload() 173 | .and_then(|payload| parse_from_bytes::(&payload).ok()); 174 | 175 | match type_parsing { 176 | Some(ref decoder) => { 177 | match decoder.get_header().get_field_type() { 178 | "notification.PushNotification" => 179 | self.handle_push(&msg), 180 | "http.HttpRequest" => 181 | self.handle_http(&msg), 182 | t => 183 | debug!("Invalid type: {}", t), 184 | 185 | } 186 | } 187 | None => { 188 | error!("Invalid RPC request"); 189 | } 190 | } 191 | 192 | Ok(()) 193 | }) 194 | } 195 | 196 | fn handler( 197 | &self, 198 | consumer: StreamConsumer, 199 | control: oneshot::Receiver<()>, 200 | process_event: &Fn(BorrowedMessage) -> Result<(), ()> 201 | ) -> Result<(), ()> { 202 | let mut core = Runtime::new().unwrap(); 203 | 204 | let processed_stream = consumer 205 | .start() 206 | .filter_map(|result| match result { 207 | Ok(msg) => Some(msg), 208 | Err(e) => { 209 | warn!("Error while receiving from Kafka: {:?}", e); 210 | None 211 | } 212 | }) 213 | .for_each(|msg| process_event(msg)) 214 | .select2(control) 215 | .then(|_| consumer.commit_consumer_state(CommitMode::Sync)); 216 | 217 | core.block_on(processed_stream).unwrap(); 218 | 219 | Ok(()) 220 | } 221 | 222 | fn handle_push(&self, msg: &BorrowedMessage) { 223 | let event_parsing = msg.payload() 224 | .and_then(|payload| parse_from_bytes::(payload).ok()); 225 | 226 | match event_parsing { 227 | Some(event) => { 228 | if self.handler.accepts(&event) { 229 | let notification_handling = self.handler.handle_notification( 230 | msg.key().map(|key| key.to_vec()), 231 | event 232 | ); 233 | 234 | tokio::spawn(notification_handling); 235 | } else { 236 | debug!("Push notification skipped"); 237 | } 238 | } 239 | None => { 240 | error!("Error parsing a PushNotification event"); 241 | } 242 | } 243 | } 244 | 245 | fn handle_http(&self, msg: &BorrowedMessage) { 246 | let event_parsing = msg.payload() 247 | .and_then(|payload| parse_from_bytes::(payload).ok()); 248 | 249 | match event_parsing { 250 | Some(event) => { 251 | let http_handling = self.handler.handle_http( 252 | msg.key().map(|key| key.to_vec()), 253 | event 254 | ); 255 | 256 | tokio::spawn(http_handling); 257 | } 258 | None => { 259 | debug!("Not a HttpRequest event this one here"); 260 | } 261 | } 262 | } 263 | 264 | fn handle_config(&self, msg_id: &str, msg: Option<&BorrowedMessage>) { 265 | let event = msg 266 | .and_then(|msg| msg.payload()) 267 | .and_then(|payload| parse_from_bytes::(&payload).ok()); 268 | 269 | self.handler.handle_config(msg_id, event); 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /src/common/kafka/response_producer.rs: -------------------------------------------------------------------------------- 1 | use rdkafka::{ 2 | config::ClientConfig, 3 | producer::future_producer::{ 4 | DeliveryFuture, 5 | FutureProducer, 6 | FutureRecord, 7 | }, 8 | }; 9 | 10 | use kafka::Config; 11 | use protobuf::Message; 12 | use std::sync::Arc; 13 | 14 | struct Kafka { 15 | output_topic: String, 16 | producer: FutureProducer, 17 | } 18 | 19 | pub struct ResponseProducer { 20 | kafka: Arc, 21 | } 22 | 23 | impl ResponseProducer { 24 | /// Producer to send responses to notification events. 25 | pub fn new(config: &Config) -> ResponseProducer { 26 | let producer = ClientConfig::new() 27 | .set("bootstrap.servers", &config.brokers) 28 | .set("produce.offset.report", "true") 29 | .create() 30 | .expect("Producer creation error"); 31 | 32 | let kafka = Arc::new(Kafka { 33 | output_topic: config.output_topic.clone(), 34 | producer, 35 | }); 36 | 37 | ResponseProducer { kafka } 38 | } 39 | 40 | /// Send the push response. If key is set, sets the routing key in the Kafka 41 | /// message. 42 | pub fn publish( 43 | &self, 44 | key: Option>, 45 | event: &Message, 46 | ) -> DeliveryFuture { 47 | let payload = event.write_to_bytes().unwrap(); 48 | 49 | let record = FutureRecord { 50 | topic: &self.kafka.output_topic, 51 | partition: None, 52 | payload: Some(&payload), 53 | key: key.as_ref(), 54 | timestamp: None, 55 | headers: None, 56 | }; 57 | 58 | self.kafka.producer.send::, Vec>(record, -1) 59 | } 60 | } 61 | 62 | impl Clone for ResponseProducer { 63 | fn clone(&self) -> Self { 64 | ResponseProducer { 65 | kafka: self.kafka.clone(), 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/common/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate chan; 2 | #[macro_use] extern crate lazy_static; 3 | #[macro_use] extern crate prometheus; 4 | #[macro_use] extern crate serde_derive; 5 | #[macro_use] extern crate slog; 6 | #[macro_use] extern crate slog_scope; 7 | 8 | extern crate a2; 9 | extern crate argparse; 10 | extern crate chan_signal; 11 | extern crate chrono; 12 | extern crate erased_serde; 13 | extern crate futures; 14 | extern crate http; 15 | extern crate hyper; 16 | extern crate protobuf; 17 | extern crate rdkafka; 18 | extern crate serde; 19 | extern crate tokio; 20 | extern crate toml; 21 | extern crate web_push; 22 | extern crate slog_json; 23 | extern crate slog_async; 24 | extern crate slog_term; 25 | extern crate regex; 26 | 27 | pub mod config; 28 | pub mod events; 29 | pub mod kafka; 30 | pub mod logger; 31 | pub mod metrics; 32 | pub mod system; 33 | -------------------------------------------------------------------------------- /src/common/logger.rs: -------------------------------------------------------------------------------- 1 | use std::{env, io}; 2 | use slog::{self, Drain, Record, Serializer, KV, Key}; 3 | use slog_term::{TermDecorator, CompactFormat}; 4 | use slog_async::Async; 5 | use slog_json::Json; 6 | 7 | use events::{ 8 | push_notification::PushNotification, 9 | http_response::HttpResponse, 10 | http_request::HttpRequest, 11 | application::Application 12 | }; 13 | 14 | #[derive(Debug)] 15 | pub enum LogAction { 16 | ConsumerCreate, 17 | ConsumerDelete, 18 | NotificationResult, 19 | } 20 | 21 | pub struct Logger; 22 | 23 | impl Logger { 24 | /// Builds a new logger. Depending on `LOG_FORMAT` environment variable, 25 | /// either produces colorful text or JSON. 26 | pub fn build(application_name: &'static str) -> slog::Logger { 27 | let drain = match env::var("LOG_FORMAT") { 28 | Ok(ref val) if val == "json" => { 29 | let drain = Json::new(io::stdout()).add_default_keys().build().fuse(); 30 | Async::new(drain).build().fuse() 31 | } 32 | _ => { 33 | let decorator = TermDecorator::new().stdout().build(); 34 | let drain = CompactFormat::new(decorator).build().fuse(); 35 | Async::new(drain).build().fuse() 36 | } 37 | }; 38 | 39 | let environment = env::var("RUST_ENV") 40 | .unwrap_or_else(|_| String::from("development")); 41 | 42 | slog::Logger::root( 43 | drain, 44 | o!("application_name" => application_name, "environment" => environment) 45 | ) 46 | } 47 | } 48 | 49 | impl KV for PushNotification { 50 | fn serialize(&self, _record: &Record, serializer: &mut Serializer) -> slog::Result { 51 | serializer.emit_str("device_token", self.get_device_token())?; 52 | serializer.emit_str("universe", self.get_universe())?; 53 | serializer.emit_str("correlation_id", self.get_header().get_correlation_id())?; 54 | 55 | Ok(()) 56 | } 57 | } 58 | 59 | impl slog::Value for HttpResponse { 60 | fn serialize(&self, _record: &Record, _key: Key, serializer: &mut Serializer) -> slog::Result { 61 | if self.has_payload() { 62 | serializer.emit_str( 63 | "status_code", 64 | &self.get_payload().get_status_code().to_string(), 65 | ) 66 | } else { 67 | serializer.emit_str( 68 | "status_code", 69 | &format!("{:?}", self.get_connection_error()), 70 | ) 71 | } 72 | } 73 | } 74 | 75 | impl slog::Value for HttpRequest { 76 | fn serialize(&self, _record: &Record, _key: Key, serializer: &mut Serializer) -> slog::Result { 77 | serializer.emit_str("correlation_id", self.get_header().get_correlation_id())?; 78 | serializer.emit_str("request_type", self.get_request_type().as_ref())?; 79 | serializer.emit_str("request_body", self.get_body())?; 80 | serializer.emit_str("timeout", &self.get_timeout().to_string())?; 81 | 82 | let mut curl = format!("curl -X {} ", self.get_request_type().as_ref()); 83 | 84 | if self.has_body() { 85 | curl.push_str("--data \""); 86 | curl.push_str(self.get_body().replace("\"", "\\\"").as_ref()); 87 | curl.push_str("\" "); 88 | } 89 | 90 | for (key, value) in self.get_headers().iter() { 91 | curl.push_str("-H \""); 92 | curl.push_str(key); 93 | curl.push_str(": "); 94 | curl.push_str(value); 95 | curl.push_str("\" "); 96 | } 97 | 98 | curl.push_str(self.get_uri()); 99 | 100 | if !self.get_params().is_empty() { 101 | curl.push_str("?"); 102 | 103 | for (key, value) in self.get_params().iter() { 104 | curl.push_str(key); 105 | curl.push_str("="); 106 | curl.push_str(value) 107 | } 108 | } 109 | 110 | serializer.emit_str("curl", curl.as_ref())?; 111 | 112 | Ok(()) 113 | } 114 | } 115 | 116 | impl KV for Application { 117 | fn serialize(&self, _record: &Record, serializer: &mut Serializer) -> slog::Result { 118 | serializer.emit_str("app_id", self.get_id())?; 119 | 120 | if self.has_organization() { 121 | serializer.emit_str("organization", self.get_organization())?; 122 | } 123 | 124 | Ok(()) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/common/metrics.rs: -------------------------------------------------------------------------------- 1 | use http::header; 2 | use prometheus::{self, CounterVec, Encoder, Gauge, Histogram, TextEncoder}; 3 | use std::env; 4 | 5 | use hyper::{rt, Body, Request, Response, Server, service::service_fn_ok}; 6 | 7 | use futures::{Future, sync::oneshot::Receiver}; 8 | 9 | lazy_static! { 10 | pub static ref CALLBACKS_COUNTER: CounterVec = register_counter_vec!( 11 | "push_notifications_total", 12 | "Total number of push notifications responded.", 13 | &["status"] 14 | ).unwrap(); 15 | pub static ref CALLBACKS_INFLIGHT: Gauge = register_gauge!( 16 | "push_notifications_in_flight", 17 | "Number of push notifications in flight" 18 | ).unwrap(); 19 | pub static ref RESPONSE_TIMES_HISTOGRAM: Histogram = register_histogram!( 20 | "http_request_latency_seconds", 21 | "The HTTP request latencies in seconds" 22 | ).unwrap(); 23 | pub static ref TOKEN_CONSUMERS: Gauge = register_gauge!( 24 | "apns_token_consumers", 25 | "Number of token-based consumers to Apple push notification service" 26 | ).unwrap(); 27 | pub static ref CERTIFICATE_CONSUMERS: Gauge = register_gauge!( 28 | "apns_certificate_consumers", 29 | "Number of certificate-based consumers to Apple push notification service" 30 | ).unwrap(); 31 | pub static ref NUMBER_OF_APPLICATIONS: Gauge = register_gauge!( 32 | "push_notications_number_of_applications", 33 | "Number of applications sending push notifications" 34 | ).unwrap(); 35 | } 36 | 37 | #[derive(Clone, Copy)] 38 | pub struct StatisticsServer; 39 | 40 | impl StatisticsServer { 41 | fn prometheus(_: Request) -> Response { 42 | let encoder = TextEncoder::new(); 43 | let metric_families = prometheus::gather(); 44 | let mut buffer = vec![]; 45 | let mut builder = Response::builder(); 46 | 47 | encoder.encode(&metric_families, &mut buffer).unwrap(); 48 | 49 | builder.header(header::CONTENT_TYPE, encoder.format_type()); 50 | 51 | builder.body(buffer.into()).unwrap() 52 | } 53 | 54 | pub fn handle(rx: Receiver<()>) { 55 | let port = match env::var("PORT") { 56 | Ok(val) => val, 57 | Err(_) => String::from("8081"), 58 | }; 59 | 60 | let addr = format!("0.0.0.0:{}", port).parse().unwrap(); 61 | 62 | let server = Server::bind(&addr) 63 | .serve(|| service_fn_ok(Self::prometheus)) 64 | .map_err(|e| eprintln!("server error: {}", e)); 65 | 66 | rt::run(server.select2(rx).then(move |_| Ok(()))); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/common/system.rs: -------------------------------------------------------------------------------- 1 | use chan_signal::{notify, Signal}; 2 | use config::Config; 3 | use kafka::EventHandler; 4 | use kafka::RequestConsumer; 5 | use metrics::StatisticsServer; 6 | use std::{thread, thread::JoinHandle, sync::Arc}; 7 | use futures::sync::oneshot; 8 | use logger::Logger; 9 | use slog_scope; 10 | 11 | pub struct System; 12 | 13 | impl System { 14 | /// Starts the push system for given handler and configuration. 15 | pub fn start(name: &'static str, handler: H, config: &Config) 16 | where 17 | H: EventHandler + Send + Sync + 'static, 18 | { 19 | let exit_signal = notify(&[Signal::INT, Signal::TERM]); 20 | let (server_tx, server_rx) = oneshot::channel(); 21 | let (request_tx, request_rx) = oneshot::channel(); 22 | let (config_tx, config_rx) = oneshot::channel(); 23 | 24 | let logger = Logger::build(name); 25 | let _log_guard = slog_scope::set_global_logger(logger); 26 | 27 | slog_scope::scope(&slog_scope::logger().new(slog_o!()), || { 28 | info!("Bringing up the system"); 29 | 30 | let mut threads: Vec> = Vec::new(); 31 | let consumer = Arc::new(RequestConsumer::new(handler, &config.kafka)); 32 | 33 | threads.push({ 34 | let consumer = consumer.clone(); 35 | thread::spawn(move || { 36 | info!("Starting the request consumer"); 37 | 38 | if let Err(error) = consumer.handle_requests(request_rx) { 39 | error!("Error in request consumer"; "error" => format!("{:?}", error)); 40 | } 41 | 42 | info!("Exiting request consumer"); 43 | }) 44 | }); 45 | 46 | threads.push({ 47 | let consumer = consumer.clone(); 48 | thread::spawn(move || { 49 | info!("Starting the config consumer"); 50 | 51 | if let Err(error) = consumer.handle_configs(config_rx) { 52 | error!("Error in config consumer"; "error" => format!("{:?}", error)); 53 | } 54 | 55 | info!("Exiting config consumer"); 56 | }) 57 | }); 58 | 59 | threads.push({ 60 | thread::spawn(move || { 61 | info!("Starting statistics server"); 62 | StatisticsServer::handle(server_rx); 63 | info!("Exiting statistics server"); 64 | }) 65 | }); 66 | 67 | chan_select! { 68 | exit_signal.recv() -> signal => { 69 | info!("Received signal"; "signal" => format!("{:?}", signal)); 70 | 71 | server_tx.send(()).unwrap(); 72 | request_tx.send(()).unwrap(); 73 | config_tx.send(()).unwrap(); 74 | 75 | for thread in threads { 76 | thread.thread().unpark(); 77 | thread.join().unwrap(); 78 | } 79 | }, 80 | } 81 | }) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/fcm/consumer.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use common::{ 4 | events::{ 5 | application::Application, 6 | push_notification::PushNotification, 7 | http_request::HttpRequest, 8 | }, 9 | kafka::EventHandler, 10 | metrics::* 11 | }; 12 | 13 | use futures::{Future, future::ok}; 14 | 15 | use std::sync::RwLock; 16 | use notifier::Notifier; 17 | use producer::FcmProducer; 18 | 19 | pub struct FcmHandler { 20 | producer: FcmProducer, 21 | api_keys: RwLock>, 22 | notifier: Notifier, 23 | } 24 | 25 | impl FcmHandler { 26 | pub fn new() -> FcmHandler { 27 | let api_keys = RwLock::new(HashMap::new()); 28 | let producer = FcmProducer::new(); 29 | let notifier = Notifier::new(); 30 | 31 | FcmHandler { 32 | producer, 33 | api_keys, 34 | notifier, 35 | } 36 | } 37 | 38 | fn delete_key(&self, id: &str) { 39 | if self.api_keys.write().unwrap().remove(id).is_some() { 40 | self.set_app_counter(); 41 | info!("Application removed"; "universe" => id); 42 | }; 43 | } 44 | 45 | fn set_app_counter(&self) { 46 | NUMBER_OF_APPLICATIONS.set(self.api_keys.read().unwrap().len() as f64); 47 | } 48 | } 49 | 50 | impl EventHandler for FcmHandler { 51 | fn accepts(&self, event: &PushNotification) -> bool { 52 | event.has_google() 53 | } 54 | 55 | fn handle_notification( 56 | &self, 57 | key: Option>, 58 | event: PushNotification, 59 | ) -> Box + 'static + Send> { 60 | let timer = RESPONSE_TIMES_HISTOGRAM.start_timer(); 61 | CALLBACKS_INFLIGHT.inc(); 62 | 63 | if let Some(api_key) = self.api_keys.read().unwrap().get(event.get_universe()) { 64 | let producer = self.producer.clone(); 65 | 66 | Box::new( 67 | self.notifier 68 | .notify(&event, api_key) 69 | .then(move |result| { 70 | timer.observe_duration(); 71 | CALLBACKS_INFLIGHT.dec(); 72 | 73 | match result { 74 | Ok(response) => producer.handle_response(key, event, response), 75 | Err(error) => producer.handle_error(key, event, error), 76 | } 77 | }) 78 | .then(|_| ok(())), 79 | ) 80 | } else { 81 | Box::new(self.producer.handle_no_cert(key, event).then(|_| ok(()))) 82 | } 83 | } 84 | 85 | fn handle_http( 86 | &self, 87 | _: Option>, 88 | _: HttpRequest 89 | ) -> Box + 'static + Send> { 90 | warn!("We don't handle http request events here"); 91 | Box::new(ok(())) 92 | } 93 | 94 | fn handle_config(&self, id: &str, application: Option) { 95 | match application { 96 | None => { 97 | self.delete_key(id); 98 | } 99 | Some(application) => { 100 | let application_id = application.get_id(); 101 | 102 | if !application.has_android_config() { 103 | self.delete_key(application_id); 104 | return; 105 | } 106 | 107 | let android_config = application.get_android_config(); 108 | 109 | if !android_config.get_enabled() { 110 | self.delete_key(application_id); 111 | return; 112 | } 113 | 114 | if !android_config.has_fcm_api_key() { 115 | self.delete_key(application_id); 116 | return; 117 | } 118 | 119 | let api_key = android_config.get_fcm_api_key(); 120 | 121 | info!( 122 | "Updating application configuration"; 123 | &application, 124 | "fcm_api_key" => api_key 125 | ); 126 | 127 | self.api_keys.write().unwrap().insert( 128 | String::from(application_id), 129 | String::from(api_key), 130 | ); 131 | 132 | self.set_app_counter(); 133 | } 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/fcm/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate lazy_static; 2 | #[macro_use] extern crate slog; 3 | #[macro_use] extern crate slog_scope; 4 | 5 | extern crate common; 6 | extern crate fcm; 7 | extern crate futures; 8 | 9 | mod consumer; 10 | mod notifier; 11 | mod producer; 12 | 13 | use common::{config::Config, system::System}; 14 | 15 | use consumer::FcmHandler; 16 | use std::env; 17 | 18 | lazy_static! { 19 | pub static ref CONFIG: Config = match env::var("CONFIG") { 20 | Ok(config_file_location) => Config::parse(&config_file_location), 21 | _ => Config::parse("./config/fcm.toml"), 22 | }; 23 | } 24 | 25 | fn main() { 26 | System::start( 27 | "fcm", 28 | FcmHandler::new(), 29 | &CONFIG, 30 | ); 31 | } 32 | -------------------------------------------------------------------------------- /src/fcm/notifier.rs: -------------------------------------------------------------------------------- 1 | use common::events::{google_notification::GoogleNotification_Priority, 2 | push_notification::PushNotification}; 3 | use fcm::*; 4 | 5 | pub struct Notifier { 6 | client: Client, 7 | } 8 | 9 | impl Notifier { 10 | pub fn new() -> Notifier { 11 | Notifier { 12 | client: Client::new().unwrap(), 13 | } 14 | } 15 | 16 | pub fn notify(&self, event: &PushNotification, api_key: &str) -> FutureResponse { 17 | self.client.send(Self::gen_payload(event, api_key)) 18 | } 19 | 20 | fn gen_payload<'a>(pn: &'a PushNotification, api_key: &'a str) -> Message<'a> { 21 | let notification = pn.get_google(); 22 | let mut message = MessageBuilder::new(api_key, pn.get_device_token()); 23 | 24 | if notification.has_localized() { 25 | let localized = notification.get_localized(); 26 | let mut builder = NotificationBuilder::new(); 27 | 28 | if localized.has_title() { 29 | builder.title(localized.get_title()); 30 | } 31 | if localized.has_tag() { 32 | builder.tag(localized.get_tag()); 33 | } 34 | if localized.has_body() { 35 | builder.body(localized.get_body()); 36 | } 37 | if localized.has_icon() { 38 | builder.icon(localized.get_icon()); 39 | } 40 | if localized.has_sound() { 41 | builder.sound(localized.get_sound()); 42 | } 43 | if localized.has_badge() { 44 | builder.badge(localized.get_badge()); 45 | } 46 | if localized.has_color() { 47 | builder.color(localized.get_color()); 48 | } 49 | if localized.has_click_action() { 50 | builder.click_action(localized.get_click_action()); 51 | } 52 | if localized.has_body_loc_key() { 53 | builder.body_loc_key(localized.get_body_loc_key()); 54 | } 55 | if localized.has_title_loc_key() { 56 | builder.title_loc_key(localized.get_title_loc_key()); 57 | } 58 | 59 | if !localized.get_title_loc_args().is_empty() { 60 | builder.title_loc_args(localized.get_title_loc_args()); 61 | } 62 | 63 | if !localized.get_body_loc_args().is_empty() { 64 | builder.body_loc_args(localized.get_body_loc_args()); 65 | } 66 | 67 | if let Err(e) = message.data(localized.get_data()) { 68 | error!("Couldn't encode custom data to the message: {:?}", e); 69 | } 70 | 71 | message.notification(builder.finalize()); 72 | } else { 73 | if let Err(e) = message.data(notification.get_message().get_data()) { 74 | error!("Couldn't encode custom data to the message: {:?}", e); 75 | } 76 | } 77 | 78 | if !notification.get_registration_ids().is_empty() { 79 | message.registration_ids(notification.get_registration_ids()); 80 | } 81 | 82 | if notification.has_collapse_key() { 83 | message.collapse_key(notification.get_collapse_key()); 84 | } 85 | 86 | match notification.get_priority() { 87 | GoogleNotification_Priority::Normal => { 88 | message.priority(Priority::Normal); 89 | } 90 | GoogleNotification_Priority::High => { 91 | message.priority(Priority::High); 92 | } 93 | } 94 | 95 | if notification.has_content_available() { 96 | message.content_available(notification.get_content_available()); 97 | } 98 | 99 | if notification.has_delay_while_idle() { 100 | message.delay_while_idle(notification.get_delay_while_idle()); 101 | } 102 | 103 | if notification.has_time_to_live() { 104 | message.time_to_live(notification.get_time_to_live()); 105 | } 106 | 107 | if notification.has_restricted_package_name() { 108 | message.restricted_package_name(notification.get_restricted_package_name()); 109 | } 110 | 111 | if notification.has_dry_run() { 112 | message.dry_run(notification.get_dry_run()); 113 | } 114 | 115 | message.finalize() 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/fcm/producer.rs: -------------------------------------------------------------------------------- 1 | use common::{ 2 | events::{ 3 | push_result::{ 4 | PushResult, 5 | PushResult_ResponseAction as ResponseAction 6 | }, 7 | push_notification::PushNotification, 8 | }, 9 | kafka::{DeliveryFuture, ResponseProducer}, 10 | metrics::CALLBACKS_COUNTER 11 | }; 12 | 13 | use fcm::response::{FcmError, FcmResponse, ErrorReason::*}; 14 | use CONFIG; 15 | 16 | pub struct FcmProducer { 17 | producer: ResponseProducer, 18 | } 19 | 20 | impl FcmProducer { 21 | pub fn new() -> FcmProducer { 22 | FcmProducer { 23 | producer: ResponseProducer::new(&CONFIG.kafka), 24 | } 25 | } 26 | 27 | pub fn handle_no_cert( 28 | &self, 29 | key: Option>, 30 | event: PushNotification 31 | ) -> DeliveryFuture 32 | { 33 | error!( 34 | "No FCM key set for application"; 35 | &event, 36 | "successful" => false, 37 | ); 38 | 39 | CALLBACKS_COUNTER.with_label_values(&["certificate_missing"]).inc(); 40 | 41 | let result: PushResult = (event, ResponseAction::Retry).into(); 42 | self.producer.publish(key, &result) 43 | } 44 | 45 | pub fn handle_error( 46 | &self, 47 | key: Option>, 48 | event: PushNotification, 49 | error: FcmError 50 | ) -> DeliveryFuture 51 | { 52 | error!( 53 | "Error sending a push notification"; 54 | &event, 55 | "successful" => false, 56 | "reason" => format!("{:?}", error) 57 | ); 58 | 59 | let response_action = 60 | match error { 61 | FcmError::ServerError(_) => { 62 | CALLBACKS_COUNTER.with_label_values(&["server_error"]).inc(); 63 | ResponseAction::Retry 64 | } 65 | FcmError::Unauthorized => { 66 | CALLBACKS_COUNTER.with_label_values(&["unauthorized"]).inc(); 67 | ResponseAction::None 68 | } 69 | FcmError::InvalidMessage(_) => { 70 | CALLBACKS_COUNTER.with_label_values(&["invalid_message"]).inc(); 71 | ResponseAction::None 72 | } 73 | }; 74 | 75 | let result: PushResult = (event, response_action).into(); 76 | self.producer.publish(key, &result) 77 | } 78 | 79 | pub fn handle_response( 80 | &self, 81 | key: Option>, 82 | event: PushNotification, 83 | response: FcmResponse, 84 | ) -> DeliveryFuture { 85 | let error = response 86 | .results 87 | .as_ref() 88 | .and_then(|ref results| results.first()) 89 | .and_then(|ref result| result.error); 90 | 91 | let response_action = 92 | if let Some(ref error) = error { 93 | let status_str = format!("{:?}", error); 94 | CALLBACKS_COUNTER.with_label_values(&[&status_str]).inc(); 95 | 96 | error!( 97 | "Error sending a push notification"; 98 | &event, 99 | "successful" => false, 100 | "reason" => status_str 101 | ); 102 | 103 | match error { 104 | NotRegistered => ResponseAction::UnsubscribeEntity, 105 | _ => ResponseAction::None, 106 | } 107 | } else { 108 | ResponseAction::None 109 | }; 110 | 111 | let result: PushResult = (event, response_action).into(); 112 | self.producer.publish(key, &result) 113 | } 114 | } 115 | 116 | impl Clone for FcmProducer { 117 | fn clone(&self) -> Self { 118 | FcmProducer { 119 | producer: self.producer.clone(), 120 | } 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/http_requester/consumer.rs: -------------------------------------------------------------------------------- 1 | use common::{ 2 | events::{ 3 | application::Application, 4 | push_notification::PushNotification, 5 | http_request::HttpRequest, 6 | }, 7 | kafka::EventHandler, 8 | metrics::* 9 | }; 10 | 11 | use futures::{Future, future::ok}; 12 | use requester::Requester; 13 | use producer::HttpResponseProducer; 14 | 15 | pub struct HttpRequestHandler { 16 | producer: HttpResponseProducer, 17 | requester: Requester, 18 | } 19 | 20 | impl HttpRequestHandler { 21 | pub fn new() -> HttpRequestHandler { 22 | let producer = HttpResponseProducer::new(); 23 | let requester = Requester::new(); 24 | 25 | HttpRequestHandler { 26 | producer, 27 | requester, 28 | } 29 | } 30 | } 31 | 32 | impl EventHandler for HttpRequestHandler { 33 | fn accepts(&self, _: &PushNotification) -> bool { false } 34 | 35 | fn handle_notification( 36 | &self, 37 | _: Option>, 38 | _: PushNotification, 39 | ) -> Box + 'static + Send> { 40 | warn!("We don't handle push notification events here"); 41 | Box::new(ok(())) 42 | } 43 | 44 | fn handle_http( 45 | &self, 46 | key: Option>, 47 | event: HttpRequest, 48 | ) -> Box + 'static + Send> { 49 | let producer = self.producer.clone(); 50 | 51 | let timer = RESPONSE_TIMES_HISTOGRAM.start_timer(); 52 | CALLBACKS_INFLIGHT.inc(); 53 | 54 | let request_send = self.requester.request(&event) 55 | .then(move |response| { 56 | timer.observe_duration(); 57 | CALLBACKS_INFLIGHT.dec(); 58 | producer.respond(key, event, response) 59 | }) 60 | .then(|_| ok(())); 61 | 62 | Box::new(request_send) 63 | } 64 | 65 | fn handle_config(&self, _: &str, _: Option) { 66 | debug!("Skipping configuration"); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/http_requester/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate lazy_static; 2 | #[macro_use] extern crate slog; 3 | #[macro_use] extern crate slog_scope; 4 | 5 | extern crate tokio_timer; 6 | extern crate protobuf; 7 | extern crate common; 8 | extern crate fcm; 9 | extern crate futures; 10 | extern crate hyper; 11 | extern crate hyper_tls; 12 | extern crate http; 13 | extern crate bytes; 14 | extern crate chrono; 15 | 16 | mod consumer; 17 | mod requester; 18 | mod producer; 19 | 20 | use common::{config::Config, system::System}; 21 | 22 | use consumer::HttpRequestHandler; 23 | use std::env; 24 | 25 | lazy_static! { 26 | pub static ref CONFIG: Config = match env::var("CONFIG") { 27 | Ok(config_file_location) => Config::parse(&config_file_location), 28 | _ => Config::parse("./config/http_requester.toml"), 29 | }; 30 | } 31 | 32 | fn main() { 33 | System::start("http_requester", HttpRequestHandler::new(), &CONFIG); 34 | } 35 | -------------------------------------------------------------------------------- /src/http_requester/producer.rs: -------------------------------------------------------------------------------- 1 | use common::{ 2 | events::{ 3 | http_request::HttpRequest, 4 | http_response::*, 5 | rpc::{Response}, 6 | }, 7 | kafka::{ 8 | DeliveryFuture, 9 | ResponseProducer 10 | }, 11 | metrics::* 12 | }; 13 | use std::{collections::HashMap, str}; 14 | use requester::{HttpResult, RequestError}; 15 | 16 | use CONFIG; 17 | 18 | pub struct HttpResponseProducer { 19 | producer: ResponseProducer, 20 | } 21 | 22 | impl HttpResponseProducer { 23 | pub fn new() -> HttpResponseProducer { 24 | HttpResponseProducer { 25 | producer: ResponseProducer::new(&CONFIG.kafka) 26 | } 27 | } 28 | 29 | pub fn respond( 30 | &self, 31 | key: Option>, 32 | mut event: HttpRequest, 33 | result: Result 34 | ) -> DeliveryFuture 35 | { 36 | let mut header = Response::new(); 37 | header.set_field_type("http.HttpResponse".to_string()); 38 | header.set_request(event.take_header()); 39 | 40 | let mut response = HttpResponse::new(); 41 | response.set_header(header); 42 | 43 | match result { 44 | Ok(http_result) => { 45 | CALLBACKS_COUNTER.with_label_values(&[http_result.code.as_str()]).inc(); 46 | 47 | let mut payload = HttpResponse_Payload::new(); 48 | 49 | let body_vec = http_result.body.to_vec(); 50 | let body = str::from_utf8(&body_vec).unwrap_or(""); 51 | 52 | payload.set_response_body(body.to_string()); 53 | payload.set_status_code(http_result.code.as_u16() as i32); 54 | 55 | let headers = http_result 56 | .headers 57 | .into_iter() 58 | .fold(HashMap::new(), |mut acc, (key, value)| { 59 | if let Some(key) = key { 60 | acc.insert( 61 | String::from(key.as_str()), 62 | String::from(value.to_str().unwrap_or("")), 63 | ); 64 | }; 65 | 66 | acc 67 | }); 68 | 69 | payload.set_headers(headers); 70 | response.set_payload(payload); 71 | 72 | info!( 73 | "Successful HTTP request"; 74 | "request" => &event, 75 | "response" => &response 76 | ); 77 | } 78 | Err(RequestError::Timeout) => { 79 | CALLBACKS_COUNTER.with_label_values(&["timeout"]).inc(); 80 | response.set_connection_error(HttpResponse_SocketError::Timeout); 81 | 82 | error!( 83 | "HTTP request timeout"; 84 | "request" => &event, 85 | "response" => &response 86 | ); 87 | } 88 | Err(RequestError::Connection) => { 89 | CALLBACKS_COUNTER.with_label_values(&["connection"]).inc(); 90 | response.set_connection_error(HttpResponse_SocketError::ConnectionError); 91 | 92 | error!( 93 | "HTTP request connection error"; 94 | "request" => &event, 95 | "response" => &response 96 | ); 97 | } 98 | } 99 | 100 | self.producer.publish(key, &response) 101 | } 102 | } 103 | 104 | impl Clone for HttpResponseProducer { 105 | fn clone(&self) -> Self { 106 | Self { 107 | producer: self.producer.clone(), 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/http_requester/requester.rs: -------------------------------------------------------------------------------- 1 | use hyper::{ 2 | Request, 3 | StatusCode, 4 | Body, 5 | client::{Client, HttpConnector}, 6 | }; 7 | use futures::{ 8 | Future, 9 | stream::Stream, 10 | future::err, 11 | }; 12 | use std::{ 13 | collections::HashMap, 14 | time::Duration, 15 | }; 16 | use common::events::http_request::HttpRequest; 17 | use http::HeaderMap; 18 | use hyper_tls::HttpsConnector; 19 | use bytes::Bytes; 20 | use tokio_timer::Timer; 21 | 22 | pub struct Requester { 23 | client: Client>, 24 | timer: Timer, 25 | } 26 | 27 | #[derive(Debug)] 28 | pub enum RequestError { 29 | Timeout, 30 | Connection, 31 | } 32 | 33 | pub struct HttpResult { 34 | pub code: StatusCode, 35 | pub body: Bytes, 36 | pub headers: HeaderMap 37 | } 38 | 39 | impl Requester { 40 | pub fn new() -> Requester { 41 | let mut client = Client::builder(); 42 | client.keep_alive(true); 43 | 44 | Requester { 45 | client: client.build(HttpsConnector::new(4).unwrap()), 46 | timer: Timer::default(), 47 | } 48 | } 49 | 50 | pub fn request( 51 | &self, 52 | event: &HttpRequest, 53 | ) -> impl Future + 'static + Send 54 | { 55 | let mut builder = Request::builder(); 56 | builder.method(event.get_request_type().as_ref()); 57 | 58 | for (k, v) in event.get_headers().iter() { 59 | builder.header(k.as_bytes(), v.as_bytes()); 60 | } 61 | 62 | if event.get_params().is_empty() { 63 | builder.uri(event.get_uri()); 64 | } else { 65 | builder.uri(Self::uri_with_params( 66 | event.get_uri(), 67 | event.get_params(), 68 | )); 69 | }; 70 | 71 | let request: Request = 72 | builder.body(Body::from(event.get_body().as_bytes().to_vec())).unwrap(); 73 | 74 | let timeout = self.timer.sleep(Duration::from_millis(event.get_timeout())) 75 | .then(|_| err(RequestError::Timeout)); 76 | 77 | self.client 78 | .request(request) 79 | .map_err(|_| { RequestError::Connection }) 80 | .select(timeout) 81 | .map_err(|_| { RequestError::Timeout }) 82 | .and_then(move |(response, _)| { 83 | let (parts, body) = response.into_parts(); 84 | 85 | body 86 | .concat2() 87 | .map_err(|_| { RequestError::Connection }) 88 | .map(move |chunk| { 89 | HttpResult { 90 | code: parts.status, 91 | body: chunk.into_bytes(), 92 | headers: parts.headers 93 | } 94 | }) 95 | }) 96 | } 97 | 98 | fn uri_with_params(uri: &str, params: &HashMap) -> String { 99 | params.iter().fold(format!("{}?", uri), |mut acc, (key, value)| { 100 | acc.push_str(key.as_ref()); 101 | acc.push_str("="); 102 | acc.push_str(value.as_ref()); 103 | acc 104 | }) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/web_push/consumer.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use common::{ 4 | events::{ 5 | application::Application, 6 | push_notification::PushNotification, 7 | http_request::HttpRequest, 8 | }, 9 | kafka::EventHandler, 10 | metrics::* 11 | }; 12 | 13 | use futures::{Future, future::ok}; 14 | use std::sync::RwLock; 15 | use notifier::Notifier; 16 | use producer::WebPushProducer; 17 | 18 | struct ApiKey { 19 | fcm_api_key: Option, 20 | } 21 | 22 | pub struct WebPushHandler { 23 | producer: WebPushProducer, 24 | fcm_api_keys: RwLock>, 25 | notifier: Notifier, 26 | } 27 | 28 | impl WebPushHandler { 29 | pub fn new() -> WebPushHandler { 30 | let fcm_api_keys = RwLock::new(HashMap::new()); 31 | let producer = WebPushProducer::new(); 32 | let notifier = Notifier::new(); 33 | 34 | WebPushHandler { 35 | producer, 36 | fcm_api_keys, 37 | notifier, 38 | } 39 | } 40 | 41 | fn delete_key(&self, id: &str) { 42 | if self.fcm_api_keys.write().unwrap().remove(id).is_some() { 43 | self.set_app_counter(); 44 | info!("Application removed"; "universe" => id); 45 | }; 46 | } 47 | 48 | fn set_app_counter(&self) { 49 | NUMBER_OF_APPLICATIONS.set(self.fcm_api_keys.read().unwrap().len() as f64); 50 | } 51 | } 52 | 53 | impl EventHandler for WebPushHandler { 54 | fn accepts(&self, event: &PushNotification) -> bool { 55 | event.has_web() 56 | } 57 | 58 | fn handle_notification( 59 | &self, 60 | key: Option>, 61 | event: PushNotification, 62 | ) -> Box + 'static + Send> { 63 | let producer = self.producer.clone(); 64 | 65 | match self.fcm_api_keys.read().unwrap().get(event.get_universe()) { 66 | Some(entity) => { 67 | let timer = RESPONSE_TIMES_HISTOGRAM.start_timer(); 68 | CALLBACKS_INFLIGHT.inc(); 69 | 70 | let notification_send = self.notifier 71 | .notify(&event, entity.fcm_api_key.as_ref()) 72 | .then(move |result| { 73 | timer.observe_duration(); 74 | CALLBACKS_INFLIGHT.dec(); 75 | 76 | match result { 77 | Ok(()) => producer.handle_ok(key, event), 78 | Err(error) => producer.handle_error(key, event, &error), 79 | } 80 | }) 81 | .then(|_| ok(())); 82 | 83 | Box::new(notification_send) 84 | } 85 | None => Box::new(self.producer.handle_no_cert(key, event).then(|_| ok(()))), 86 | } 87 | } 88 | 89 | fn handle_http( 90 | &self, 91 | _: Option>, 92 | _: HttpRequest 93 | ) -> Box + 'static + Send> { 94 | warn!("We don't handle http request events here"); 95 | Box::new(ok(())) 96 | } 97 | 98 | fn handle_config(&self, id: &str, application: Option) { 99 | match application { 100 | None => { 101 | self.delete_key(id); 102 | } 103 | Some(application) => { 104 | let application_id = application.get_id(); 105 | 106 | if !application.has_web_config() { 107 | self.delete_key(application_id); 108 | return; 109 | } 110 | 111 | let web_app = application.get_web_config(); 112 | 113 | if !web_app.get_enabled() { 114 | self.delete_key(application_id); 115 | return; 116 | } 117 | 118 | if web_app.has_fcm_api_key() { 119 | let api_key = web_app 120 | .get_fcm_api_key() 121 | .to_string(); 122 | 123 | info!( 124 | "Updating application configuration"; 125 | &application, 126 | "fcm_api_key" => &api_key 127 | ); 128 | 129 | self.fcm_api_keys.write().unwrap().insert( 130 | String::from(application_id), 131 | ApiKey { fcm_api_key: Some(api_key), }, 132 | ); 133 | } else { 134 | info!( 135 | "Updating application configuration"; 136 | &application, 137 | ); 138 | 139 | self.fcm_api_keys 140 | .write() 141 | .unwrap() 142 | .insert(String::from(application_id), ApiKey { fcm_api_key: None }); 143 | } 144 | 145 | self.set_app_counter(); 146 | } 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/web_push/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate lazy_static; 2 | #[macro_use] extern crate slog; 3 | #[macro_use] extern crate slog_scope; 4 | 5 | extern crate common; 6 | extern crate futures; 7 | extern crate hyper; 8 | extern crate tokio_signal; 9 | extern crate web_push; 10 | 11 | mod consumer; 12 | mod notifier; 13 | mod producer; 14 | 15 | use common::{config::Config, system::System}; 16 | 17 | use consumer::WebPushHandler; 18 | use std::env; 19 | 20 | lazy_static! { 21 | pub static ref CONFIG: Config = match env::var("CONFIG") { 22 | Ok(config_file_location) => Config::parse(&config_file_location), 23 | _ => Config::parse("./config/web_push.toml"), 24 | }; 25 | } 26 | 27 | fn main() { 28 | System::start( 29 | "web_push", 30 | WebPushHandler::new(), 31 | &CONFIG, 32 | ); 33 | } 34 | -------------------------------------------------------------------------------- /src/web_push/notifier.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use web_push::*; 3 | 4 | use futures::{Future, future::{err, Either}}; 5 | 6 | use common::events::push_notification::PushNotification; 7 | 8 | pub struct Notifier { 9 | client: WebPushClient, 10 | } 11 | 12 | impl Notifier { 13 | pub fn new() -> Notifier { 14 | Notifier { 15 | client: WebPushClient::new().unwrap(), 16 | } 17 | } 18 | 19 | pub fn notify( 20 | &self, 21 | event: &PushNotification, 22 | fcm_api_key: Option<&String>, 23 | ) -> impl Future { 24 | match Self::build_message(&event, fcm_api_key) { 25 | Ok(message) => Either::A( 26 | self.client 27 | .send_with_timeout(message, Duration::from_secs(2)), 28 | ), 29 | Err(e) => Either::B(err(e)), 30 | } 31 | } 32 | 33 | fn build_message( 34 | pn: &PushNotification, 35 | fcm_api_key: Option<&String>, 36 | ) -> Result { 37 | let web = pn.get_web(); 38 | 39 | let subscription_info = 40 | SubscriptionInfo::new(pn.get_device_token(), web.get_auth(), web.get_p256dh()); 41 | 42 | let mut message = WebPushMessageBuilder::new(&subscription_info)?; 43 | 44 | if web.has_payload() { 45 | message.set_payload(ContentEncoding::AesGcm, web.get_payload().as_bytes()); 46 | } 47 | 48 | if web.has_ttl() { 49 | message.set_ttl(web.get_ttl() as u32); 50 | } 51 | 52 | if let Some(key) = fcm_api_key { 53 | message.set_gcm_key(key); 54 | } 55 | 56 | message.build() 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/web_push/producer.rs: -------------------------------------------------------------------------------- 1 | use common::{ 2 | events::{ 3 | push_notification::PushNotification, 4 | push_result::{ 5 | PushResult, 6 | PushResult_ResponseAction as ResponseAction 7 | }, 8 | }, 9 | kafka::{DeliveryFuture, ResponseProducer}, 10 | metrics::CALLBACKS_COUNTER 11 | }; 12 | 13 | use CONFIG; 14 | 15 | use web_push::{*, WebPushError::*}; 16 | 17 | pub struct WebPushProducer { 18 | producer: ResponseProducer, 19 | } 20 | 21 | impl WebPushProducer { 22 | pub fn new() -> WebPushProducer { 23 | WebPushProducer { 24 | producer: ResponseProducer::new(&CONFIG.kafka), 25 | } 26 | } 27 | 28 | pub fn handle_ok( 29 | &self, 30 | key: Option>, 31 | event: PushNotification 32 | ) -> DeliveryFuture 33 | { 34 | info!( 35 | "Successfully sent a push notification"; 36 | &event, 37 | "successful" => true 38 | ); 39 | 40 | CALLBACKS_COUNTER.with_label_values(&["success"]).inc(); 41 | 42 | let result: PushResult = (event, ResponseAction::None).into(); 43 | self.producer.publish(key, &result) 44 | } 45 | 46 | pub fn handle_no_cert( 47 | &self, 48 | key: Option>, 49 | event: PushNotification 50 | ) -> DeliveryFuture 51 | { 52 | error!( 53 | "Application is not configured to send web push messages"; 54 | &event, 55 | "successful" => false 56 | ); 57 | 58 | CALLBACKS_COUNTER.with_label_values(&["certificate_missing"]).inc(); 59 | 60 | let result: PushResult = (event, ResponseAction::Retry).into(); 61 | self.producer.publish(key, &result) 62 | } 63 | 64 | pub fn handle_error( 65 | &self, 66 | key: Option>, 67 | event: PushNotification, 68 | error: &WebPushError 69 | ) -> DeliveryFuture 70 | { 71 | error!( 72 | "Error sending a push notification"; 73 | &event, 74 | "successful" => false, 75 | "reason" => format!("{:?}", error) 76 | ); 77 | 78 | let response_action = 79 | match error { 80 | ServerError(_) | TimeoutError => ResponseAction::Retry, 81 | EndpointNotFound | EndpointNotValid => ResponseAction::UnsubscribeEntity, 82 | _ => ResponseAction::None, 83 | }; 84 | 85 | CALLBACKS_COUNTER.with_label_values(&[error.short_description()]).inc(); 86 | 87 | let result: PushResult = (event, response_action).into(); 88 | self.producer.publish(key, &result) 89 | } 90 | } 91 | 92 | impl Clone for WebPushProducer { 93 | fn clone(&self) -> Self { 94 | Self { 95 | producer: self.producer.clone(), 96 | } 97 | } 98 | } 99 | --------------------------------------------------------------------------------