├── dcompose-stack ├── .gitignore ├── logging │ ├── .gitignore │ ├── graylog.env.template │ ├── docker-compose.yml │ └── README.md ├── firebase-app-server │ ├── .gitignore │ ├── etc │ │ ├── sqltool.rc │ │ ├── env.template │ │ └── server.properties │ ├── bin │ │ ├── start-xmpp │ │ ├── get-subject-data.sh │ │ ├── install-systemd │ │ └── log-parser.py │ ├── lib │ │ └── systemd │ │ │ └── radar-xmpp-server.service.template │ ├── docker-compose.yml │ └── README.md ├── radar-cp-s3-stack │ ├── lib │ │ ├── .gitignore │ │ ├── systemd │ │ │ ├── radar-check-health.timer.template │ │ │ ├── radar-check-health.service.template │ │ │ ├── radar-renew-certificate.service.template │ │ │ ├── radar-renew-certificate.timer.template │ │ │ └── radar-docker.service.template │ │ ├── self-sign-certificate.sh │ │ ├── install-dashboard-pipeline.sh │ │ ├── install-systemd-wrappers.sh │ │ └── check-health.sh │ ├── etc │ │ ├── schema │ │ │ ├── commons │ │ │ │ └── .gitkeep │ │ │ └── specifications │ │ │ │ └── .gitkeep │ │ ├── webserver │ │ │ ├── ip-access-control.conf.template │ │ │ ├── dashboard-pipeline.conf.template │ │ │ ├── optional-services.conf.template │ │ │ ├── cors.conf │ │ │ └── nginx.nossl.conf.template │ │ ├── smtp.env.template │ │ ├── rest-source-authorizer │ │ │ └── rest_source_clients_configs.yml.template │ │ ├── fitbit │ │ │ └── docker │ │ │ │ ├── source-fitbit.properties.template │ │ │ │ └── users │ │ │ │ └── fitbit-user.yml.template │ │ ├── .gitignore │ │ ├── mongodb-connector │ │ │ └── sink-mongo.properties.template │ │ ├── redcap-integration │ │ │ └── radar.yml.template │ │ ├── s3-connector │ │ │ └── sink-s3.properties.template │ │ ├── rest-api │ │ │ └── radar.yml.template │ │ ├── managementportal │ │ │ └── config │ │ │ │ └── oauth_client_details.csv.template │ │ ├── env.template │ │ ├── gateway │ │ │ └── gateway.yml │ │ └── radar-backend │ │ │ └── radar.yml.template │ ├── ci │ │ ├── ci-smtp.template │ │ ├── setup-env.sh │ │ └── ci-env.template │ ├── images │ │ └── postgres │ │ │ ├── Dockerfile │ │ │ ├── on-db-ready │ │ │ └── multi-db-init.sh │ ├── bin │ │ ├── radar-cert-renew │ │ ├── docker-prune │ │ ├── radar-kafka-consumer │ │ ├── postgres-upgrade │ │ ├── radar-log │ │ ├── keystore-init │ │ └── radar-docker │ ├── radarbase-kafka-streams.yml │ ├── optional-services.yml │ └── dashboard-pipeline.yml └── radar-cp-hadoop-stack │ ├── lib │ ├── .gitignore │ ├── systemd │ │ ├── radar-check-health.timer.template │ │ ├── radar-check-health.service.template │ │ ├── radar-renew-certificate.service.template │ │ ├── radar-renew-certificate.timer.template │ │ └── radar-docker.service.template │ ├── self-sign-certificate.sh │ ├── install-systemd-wrappers.sh │ └── check-health.sh │ ├── etc │ ├── schema │ │ ├── commons │ │ │ └── .gitkeep │ │ └── specifications │ │ │ └── .gitkeep │ ├── webserver │ │ ├── ip-access-control.conf.template │ │ ├── optional-services.conf.template │ │ ├── cors.conf │ │ └── nginx.nossl.conf.template │ ├── smtp.env.template │ ├── gateway │ │ └── gateway.yml │ ├── rest-source-authorizer │ │ └── rest_source_clients_configs.yml.template │ ├── hdfs-connector │ │ ├── sink-hdfs-high.properties │ │ ├── sink-hdfs-med.properties │ │ ├── sink-hdfs-low.properties │ │ └── sink-hdfs.properties.template │ ├── fitbit │ │ └── docker │ │ │ ├── source-fitbit.properties.template │ │ │ └── users │ │ │ └── fitbit-user.yml.template │ ├── jdbc-connector │ │ └── sink-timescale.properties.template │ ├── .gitignore │ ├── mongodb-connector │ │ └── sink-mongo.properties.template │ ├── redcap-integration │ │ └── radar.yml.template │ ├── rest-api │ │ └── radar.yml.template │ ├── managementportal │ │ └── config │ │ │ └── oauth_client_details.csv.template │ ├── env.template │ ├── hdfs-restructure │ │ └── restructure.yml.template │ └── radar-backend │ │ └── radar.yml.template │ ├── .gitignore │ ├── ci │ ├── ci-smtp.template │ ├── setup-env.sh │ └── ci-env.template │ ├── hash-backup │ ├── systemd │ │ ├── radar-hashbackup.service.template │ │ └── radar-hashbackup.timer.template │ ├── dest.conf │ ├── run-backup.sh │ ├── backup.conf │ ├── initialize-hb.sh │ └── README.md │ ├── images │ ├── postgres │ │ ├── Dockerfile │ │ ├── on-db-ready │ │ └── multi-db-init.sh │ └── hdfs │ │ ├── Dockerfile │ │ ├── entrypoint.sh │ │ └── hdfs-site.xml.mustache │ ├── bin │ ├── radar-cert-renew │ ├── docker-prune │ ├── hdfs-extract │ ├── hdfs-upgrade │ ├── radar-kafka-consumer │ ├── postgres-upgrade │ ├── radar-log │ ├── radar-docker │ └── keystore-init │ └── postgres-backup │ ├── README.md │ └── scripts │ └── pg_backup.config ├── scripts ├── hdfs-data-retention │ ├── topics_to_remove.txt │ ├── hdfs_get_relevant_files.pig │ └── hdfs_data_retention.sh ├── stage-runner │ ├── start.sh │ ├── checkhealth.sh │ ├── stop.sh │ └── configure.sh ├── check_radar_network.sh ├── util.sh └── README.md ├── images ├── kafka-manager │ ├── entrypoint.sh │ ├── img │ │ └── add_cluster.png │ ├── Dockerfile │ ├── conf │ │ └── application.conf │ └── README.md ├── radar-hotstorage-mongodb │ ├── Dockerfile │ ├── init.sh │ ├── README.md │ └── db_init.sh └── hsqldb │ ├── README.md │ └── Dockerfile ├── wip ├── radar-cp-swarm-stack │ └── README.md └── radar-cp-sasl-stack │ └── secrets │ ├── consumer_jaas.conf │ ├── producer_jaas.conf │ ├── host.producer.ssl.config │ ├── host.consumer.ssl.config │ ├── host.producer.ssl.sasl.config │ ├── host.consumer.ssl.sasl.config │ ├── zookeeper_1_jaas.conf │ ├── zookeeper_2_jaas.conf │ ├── zookeeper_3_jaas.conf │ ├── krb.conf │ ├── broker1_jaas.conf │ ├── broker2_jaas.conf │ ├── broker3_jaas.conf │ └── create-certs.sh ├── appspec.yml └── .github └── workflows └── main.yml /dcompose-stack/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /dcompose-stack/logging/.gitignore: -------------------------------------------------------------------------------- 1 | *.env 2 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/.gitignore: -------------------------------------------------------------------------------- 1 | *.jar 2 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/.gitignore: -------------------------------------------------------------------------------- 1 | *.jar 2 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/schema/commons/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/schema/commons/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/schema/specifications/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/schema/specifications/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/.gitignore: -------------------------------------------------------------------------------- 1 | /.env 2 | /etc/smtp.env 3 | /radar.yml 4 | /output/ 5 | -------------------------------------------------------------------------------- /scripts/hdfs-data-retention/topics_to_remove.txt: -------------------------------------------------------------------------------- 1 | android_phone_acceleration 2 | android_phone_gyroscope 3 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/etc/sqltool.rc: -------------------------------------------------------------------------------- 1 | urlid db 2 | url jdbc:hsqldb:hsql://hsqldb/status 3 | username SA 4 | password 5 | -------------------------------------------------------------------------------- /images/kafka-manager/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -f RUNNING_PID 4 | exec ./bin/kafka-manager -Dconfig.file=conf/application.conf 5 | -------------------------------------------------------------------------------- /images/kafka-manager/img/add_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RADAR-base/RADAR-Docker/HEAD/images/kafka-manager/img/add_cluster.png -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/ci/ci-smtp.template: -------------------------------------------------------------------------------- 1 | SMARTHOST_ADDRESS=mail.example.com 2 | SMARTHOST_PORT=587 3 | SMARTHOST_USER=user@example.com 4 | SMARTHOST_PASSWORD=XXXXXXXX 5 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/ci/ci-smtp.template: -------------------------------------------------------------------------------- 1 | SMARTHOST_ADDRESS=mail.example.com 2 | SMARTHOST_PORT=587 3 | SMARTHOST_USER=user@example.com 4 | SMARTHOST_PASSWORD=XXXXXXXX 5 | -------------------------------------------------------------------------------- /scripts/stage-runner/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | pushd . 6 | cd /home/ec2-user/RADAR-Docker/dcompose-stack/radar-cp-hadoop-stack 7 | ./bin/radar-docker install 8 | popd -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/webserver/ip-access-control.conf.template: -------------------------------------------------------------------------------- 1 | allow all; 2 | # restrict access to only certain IPs 3 | # allow 1.2.3.0/14; 4 | # allow 10.20.30.40; 5 | # deny all; 6 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/webserver/ip-access-control.conf.template: -------------------------------------------------------------------------------- 1 | allow all; 2 | # restrict access to only certain IPs 3 | # allow 1.2.3.0/14; 4 | # allow 10.20.30.40; 5 | # deny all; 6 | -------------------------------------------------------------------------------- /wip/radar-cp-swarm-stack/README.md: -------------------------------------------------------------------------------- 1 | # RADAR-base with multi-node cluster using Docker Swarm 2 | 3 | # Run the full setup with 4 | ```shell 5 | docker deploy --compose-file docker-compose.yml radar-cns-stack 6 | ``` 7 | 8 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/etc/env.template: -------------------------------------------------------------------------------- 1 | FCM_XMPP_APP_SERVER_DB_PATH=/usr/local/var/lib/radar/xmpp/hsql 2 | FCM_XMPP_APP_SERVER_LOGS_PATH=/usr/local/var/lib/radar/xmpp/hsql/logs/ 3 | FCM_SENDER_KEY= 4 | FCM_SERVER_KEY= 5 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/etc/server.properties: -------------------------------------------------------------------------------- 1 | server.database.0=file:/var/opt/hsqldb/data/notification 2 | server.dbname.0=notification 3 | 4 | server.database.1=file:/var/opt/hsqldb/data/status 5 | server.dbname.1=status 6 | -------------------------------------------------------------------------------- /images/radar-hotstorage-mongodb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mongo:3.2.10 2 | 3 | COPY ["./init.sh", "./db_init.sh", "./"] 4 | 5 | RUN chmod +x init.sh && chmod +x db_init.sh 6 | 7 | EXPOSE 27017 8 | EXPOSE 28017 9 | 10 | CMD ["./init.sh"] -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/systemd/radar-hashbackup.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker hashbackup service 3 | 4 | [Service] 5 | 6 | WorkingDirectory= 7 | 8 | ExecStart= 9 | 10 | NotifyAccess=all 11 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/images/postgres/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG POSTGRES_VERSION=10.6-alpine 2 | FROM postgres:${POSTGRES_VERSION} 3 | 4 | COPY ./multi-db-init.sh /docker-entrypoint-initdb.d/multi-db-init.sh 5 | COPY ./on-db-ready /usr/bin/on-db-ready 6 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/postgres/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG POSTGRES_VERSION=10.6-alpine 2 | FROM postgres:${POSTGRES_VERSION} 3 | 4 | COPY ./multi-db-init.sh /docker-entrypoint-initdb.d/multi-db-init.sh 5 | COPY ./on-db-ready /usr/bin/on-db-ready 6 | -------------------------------------------------------------------------------- /images/radar-hotstorage-mongodb/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -m 4 | 5 | cmd="mongod" 6 | 7 | if [ "$AUTH" == "yes" ]; then 8 | cmd="$cmd --auth" 9 | fi 10 | 11 | echo "=> Starting MongoDB" 12 | 13 | $cmd & 14 | 15 | /db_init.sh 16 | 17 | fg -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template: -------------------------------------------------------------------------------- 1 | SMARTHOST_ADDRESS=mail.example.com 2 | SMARTHOST_PORT=587 3 | SMARTHOST_USER=user@example.com 4 | SMARTHOST_PASSWORD=XXXXXXXX 5 | SMARTHOST_ALIASES=*.amazonaws.com 6 | RELAY_NETWORKS=:172.0.0.0/8:192.168.0.0/16 -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/smtp.env.template: -------------------------------------------------------------------------------- 1 | SMARTHOST_ADDRESS=mail.example.com 2 | SMARTHOST_PORT=587 3 | SMARTHOST_USER=user@example.com 4 | SMARTHOST_PASSWORD=XXXXXXXX 5 | SMARTHOST_ALIASES=*.amazonaws.com 6 | RELAY_NETWORKS=:172.0.0.0/8:192.168.0.0/16 -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/gateway/gateway.yml: -------------------------------------------------------------------------------- 1 | baseUri: http://0.0.0.0/radar-gateway/ 2 | managementPortalUrl: http://managementportal-app:8080/managementportal 3 | restProxyUrl: http://rest-proxy-1:8082 4 | schemaRegistryUrl: http://schema-registry-1:8081 5 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/consumer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/saslconsumer.keytab" 6 | principal="saslconsumer/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/producer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/saslproducer.keytab" 6 | principal="saslproducer/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/systemd/radar-hashbackup.timer.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker hash backup timer. Default set to daily 3 | [Timer] 4 | # Daily at 3am 5 | OnCalendar=*-*-* 03:00:00 6 | Persistent=true 7 | Unit=radar-hashbackup.service 8 | [Install] 9 | WantedBy=multi-user.target 10 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/systemd/radar-check-health.timer.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker health checking every 5 minutes 3 | BindsTo=radar-docker.service 4 | 5 | [Timer] 6 | OnBootSec=10min 7 | OnUnitActiveSec=5min 8 | Unit=radar-check-health.service 9 | 10 | [Install] 11 | WantedBy=default.target -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/systemd/radar-check-health.timer.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker health checking every 5 minutes 3 | BindsTo=radar-docker.service 4 | 5 | [Timer] 6 | OnBootSec=10min 7 | OnUnitActiveSec=5min 8 | Unit=radar-check-health.service 9 | 10 | [Install] 11 | WantedBy=default.target -------------------------------------------------------------------------------- /images/hsqldb/README.md: -------------------------------------------------------------------------------- 1 | # HSQLDB image 2 | 3 | Image for HSQLDB. This exposes port 9001 for database connections and uses `file:/var/opt/hsqldb/data` for database files. If this directory is mounted, change ownership of the local mount path before running the tool: 4 | 5 | ```sh 6 | chown -R 9999:999 7 | ``` 8 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/webserver/dashboard-pipeline.conf.template: -------------------------------------------------------------------------------- 1 | location /dashboard/ { 2 | proxy_pass http://dashboard:80/; 3 | proxy_set_header Host $host; 4 | } 5 | location /api/ { 6 | include cors.conf; 7 | proxy_pass http://rest-api:8080/api/; 8 | proxy_set_header Host $host; 9 | } -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/hdfs/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_VERSION=3.0.3-alpine 2 | FROM smizy/hadoop-base:${BASE_VERSION} 3 | 4 | ENV HADOOP_DFS_NAME_DIR file:///hadoop/dfs/name 5 | ENV HADOOP_DFS_DATA_DIR file:///hadoop/dfs/data 6 | 7 | COPY ./hdfs-site.xml.mustache ${HADOOP_CONF_DIR}/ 8 | COPY ./entrypoint.sh /usr/local/bin/ 9 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/host.producer.ssl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SSL 9 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/systemd/radar-check-health.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | After=radar-docker.service 4 | 5 | [Service] 6 | 7 | WorkingDirectory= 8 | 9 | ExecStart= 10 | 11 | NotifyAccess=all 12 | 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | SyslogIdentifier=radar-check-health 16 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/systemd/radar-check-health.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | After=radar-docker.service 4 | 5 | [Service] 6 | 7 | WorkingDirectory= 8 | 9 | ExecStart= 10 | 11 | NotifyAccess=all 12 | 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | SyslogIdentifier=radar-check-health 16 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/systemd/radar-renew-certificate.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | After=radar-docker.service 4 | 5 | [Service] 6 | 7 | WorkingDirectory= 8 | 9 | ExecStart= 10 | 11 | NotifyAccess=all 12 | 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | SyslogIdentifier=radar-renew-certificate 16 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/systemd/radar-renew-certificate.timer.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker renew ssl certificate daily 3 | BindsTo=radar-docker.service 4 | 5 | [Timer] 6 | OnCalendar=daily 7 | RandomizedDelaySec=12h 8 | Persistent=true 9 | Unit=radar-renew-certificate.service 10 | 11 | [Install] 12 | WantedBy=default.target 13 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/systemd/radar-renew-certificate.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | After=radar-docker.service 4 | 5 | [Service] 6 | 7 | WorkingDirectory= 8 | 9 | ExecStart= 10 | 11 | NotifyAccess=all 12 | 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | SyslogIdentifier=radar-renew-certificate 16 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/systemd/radar-renew-certificate.timer.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker renew ssl certificate daily 3 | BindsTo=radar-docker.service 4 | 5 | [Timer] 6 | OnCalendar=daily 7 | RandomizedDelaySec=12h 8 | Persistent=true 9 | Unit=radar-renew-certificate.service 10 | 11 | [Install] 12 | WantedBy=default.target 13 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-host 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SSL 10 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/rest-source-authorizer/rest_source_clients_configs.yml.template: -------------------------------------------------------------------------------- 1 | rest_source_clients: 2 | - source_type: FitBit 3 | authorization_endpoint: https://www.fitbit.com/oauth2/authorize 4 | token_endpoint: https://api.fitbit.com/oauth2/token 5 | client_id: FITBITIT 6 | client_secret: FITBITSECRET 7 | scope: activity heartrate sleep profile -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/rest-source-authorizer/rest_source_clients_configs.yml.template: -------------------------------------------------------------------------------- 1 | rest_source_clients: 2 | - source_type: FitBit 3 | authorization_endpoint: https://www.fitbit.com/oauth2/authorize 4 | token_endpoint: https://api.fitbit.com/oauth2/token 5 | client_id: FITBITIT 6 | client_secret: FITBITSECRET 7 | scope: activity heartrate sleep profile -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SASL_SSL 9 | sasl.mechanism=GSSAPI 10 | sasl.kerberos.service.name=kafka 11 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/hdfs-connector/sink-hdfs-high.properties: -------------------------------------------------------------------------------- 1 | name=radar-hdfs-sink-empatica-high-120000 2 | connector.class=io.confluent.connect.hdfs.HdfsSinkConnector 3 | tasks.max=4 4 | topics=android_empatica_e4_blood_volume_pulse,android_empatica_e4_acceleration 5 | flush.size=120000 6 | hdfs.url=hdfs://hdfs-namenode-1:8020 7 | format.class=org.radarcns.sink.hdfs.AvroFormatRadar 8 | topics.dir=/topicAndroidNew 9 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/hdfs-connector/sink-hdfs-med.properties: -------------------------------------------------------------------------------- 1 | name=radar-hdfs-sink-android-med-12000 2 | connector.class=io.confluent.connect.hdfs.HdfsSinkConnector 3 | tasks.max=4 4 | topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_temperature 5 | flush.size=12000 6 | hdfs.url=hdfs://hdfs-namenode-1:8020 7 | format.class=org.radarcns.sink.hdfs.AvroFormatRadar 8 | topics.dir=/topicAndroidNew 9 | -------------------------------------------------------------------------------- /dcompose-stack/logging/graylog.env.template: -------------------------------------------------------------------------------- 1 | # Set a secret pepper that the passwords will be hashed with 2 | # Minimum length is 16 characters 3 | GRAYLOG_PASSWORD_SECRET= 4 | 5 | # Set a password for the admin user. Obtain the SHA2 of the 6 | # password by running echo -n "mypassword" | shasum -a 256 7 | GRAYLOG_ROOT_PASSWORD_SHA2= 8 | 9 | # Web address for Graylog to run on 10 | GRAYLOG_WEB_ENDPOINT_URI=http://127.0.0.1:9000/api 11 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-sasl-host 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SASL_SSL 10 | sasl.mechanism=GSSAPI 11 | sasl.kerberos.service.name=kafka 12 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/hdfs-connector/sink-hdfs-low.properties: -------------------------------------------------------------------------------- 1 | name=radar-hdfs-sink-empatica-low-3000 2 | connector.class=io.confluent.connect.hdfs.HdfsSinkConnector 3 | tasks.max=4 4 | topics=android_empatica_e4_battery_level,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status 5 | flush.size=3000 6 | hdfs.url=hdfs://hdfs-namenode-1:8020 7 | format.class=org.radarcns.sink.hdfs.AvroFormatRadar 8 | topics.dir=/topicAndroidNew 9 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/bin/radar-cert-renew: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | . ./lib/util.sh 6 | . ./.env 7 | 8 | if [ "${ENABLE_HTTPS:-yes}" != yes ]; then 9 | echo "HTTPS is disabled. Not renewing certificate." 10 | exit 0 11 | fi 12 | 13 | if [ -z ${SERVER_NAME} ]; then 14 | echo "Set SERVER_NAME variable in .env" 15 | exit 1 16 | fi 17 | 18 | request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" force 19 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/images/postgres/on-db-ready: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -u 4 | 5 | if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then 6 | echo "Waiting for postgres database..." 7 | HOSTNAME=$(hostname) 8 | for count in {1..120}; do 9 | if pg_isready -U "$POSTGRES_USER" -q -h $HOSTNAME; then 10 | echo "Database ready." 11 | exec "$@" 12 | fi 13 | sleep 1 14 | done 15 | 16 | echo "Postgres database timeout" 17 | exit 1 18 | fi -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/radar-cert-renew: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | . ./lib/util.sh 6 | . ./.env 7 | 8 | if [ "${ENABLE_HTTPS:-yes}" != yes ]; then 9 | echo "HTTPS is disabled. Not renewing certificate." 10 | exit 0 11 | fi 12 | 13 | if [ -z ${SERVER_NAME} ]; then 14 | echo "Set SERVER_NAME variable in .env" 15 | exit 1 16 | fi 17 | 18 | request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" force 19 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/postgres/on-db-ready: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -u 4 | 5 | if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then 6 | echo "Waiting for postgres database..." 7 | HOSTNAME=$(hostname) 8 | for count in {1..120}; do 9 | if pg_isready -U "$POSTGRES_USER" -q -h $HOSTNAME; then 10 | echo "Database ready." 11 | exec "$@" 12 | fi 13 | sleep 1 14 | done 15 | 16 | echo "Postgres database timeout" 17 | exit 1 18 | fi -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/hdfs-connector/sink-hdfs.properties.template: -------------------------------------------------------------------------------- 1 | name=radar-hdfs-sink-android-15000 2 | connector.class=io.confluent.connect.hdfs.HdfsSinkConnector 3 | tasks.max=4 4 | topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_blood_volume_pulse,android_empatica_e4_temperature 5 | flush.size=80000 6 | rotate.interval.ms=900000 7 | hdfs.url=hdfs://hdfs-namenode-1:8020 8 | format.class=org.radarcns.sink.hdfs.AvroFormatRadar 9 | topics.dir=topicAndroidNew 10 | avro.codec=snappy 11 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/bin/start-xmpp: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | . ../radar-cp-hadoop-stack/lib/util.sh 6 | 7 | copy_template_if_absent .env etc/env.template 8 | 9 | . .env 10 | 11 | ensure_env_password FCM_SENDER_KEY "Firebase Cloud Messaging Sender ID" 12 | ensure_env_password FCM_SERVER_KEY "Firebase Cloud Messaging Server Key" 13 | 14 | mkdir -p "$FCM_XMPP_APP_SERVER_DB_PATH" 15 | chown -R 9999:999 "$FCM_XMPP_APP_SERVER_DB_PATH" 16 | 17 | exec docker-compose up "$@" 18 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper1.keytab" 6 | principal="zookeeper/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient1.keytab" 13 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper2.keytab" 6 | principal="zookeeper/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient2.keytab" 13 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper3.keytab" 6 | principal="zookeeper/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient3.keytab" 13 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/dest.conf: -------------------------------------------------------------------------------- 1 | # Remote destinations information according to hash backup documentation 2 | 3 | destname myftp 4 | type ftp 5 | host ftp.myserver.com 6 | port 21 7 | userid myuser 8 | password mypass 9 | dir 10 | 11 | 12 | destname myS3 13 | type s3 14 | location US 15 | accesskey myaccesskey 16 | secretkey mysecretkey 17 | bucket myaccesskey-hashbackup 18 | class ia 19 | dir 20 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/lib/systemd/radar-xmpp-server.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR FCM XMPP service 3 | 4 | [Service] 5 | TimeoutStartSec=0 6 | TimeoutStopSec=90 7 | Restart=always 8 | RestartSec=10 9 | 10 | StandardOutput=syslog 11 | StandardError=syslog 12 | SyslogIdentifier=radar-xmpp-server 13 | 14 | WorkingDirectory= 15 | 16 | ExecStart=./bin/start-xmpp.sh 17 | 18 | ExecReload=/usr/local/bin/docker-compose restart 19 | 20 | ExecStop=/usr/local/bin/docker-compose down 21 | 22 | NotifyAccess=all 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/systemd/radar-docker.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | 4 | [Service] 5 | TimeoutStartSec=0 6 | TimeoutStopSec=90 7 | Restart=always 8 | RestartSec=10 9 | 10 | StandardOutput=syslog 11 | StandardError=syslog 12 | SyslogIdentifier=radar-docker 13 | 14 | WorkingDirectory= 15 | 16 | ExecStart=./lib/systemd/start-radar-stack.sh 17 | 18 | ExecReload=/usr/local/bin/docker-compose restart 19 | 20 | ExecStop=/usr/local/bin/docker-compose down 21 | 22 | NotifyAccess=all 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/systemd/radar-docker.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=RADAR-Docker service 3 | 4 | [Service] 5 | TimeoutStartSec=0 6 | TimeoutStopSec=90 7 | Restart=always 8 | RestartSec=10 9 | 10 | StandardOutput=syslog 11 | StandardError=syslog 12 | SyslogIdentifier=radar-docker 13 | 14 | WorkingDirectory= 15 | 16 | ExecStart=./lib/systemd/start-radar-stack.sh 17 | 18 | ExecReload=/usr/local/bin/docker-compose restart 19 | 20 | ExecStop=/usr/local/bin/docker-compose down -v 21 | 22 | NotifyAccess=all 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/fitbit/docker/source-fitbit.properties.template: -------------------------------------------------------------------------------- 1 | name=radar-fitbit-source 2 | connector.class=org.radarbase.connect.rest.fitbit.FitbitSourceConnector 3 | tasks.max=4 4 | rest.source.base.url=https://api.fitbit.com 5 | rest.source.poll.interval.ms=5000 6 | rest.source.request.generator.class=org.radarbase.connect.rest.fitbit.request.FitbitRequestGenerator 7 | fitbit.api.client= 8 | fitbit.api.secret= 9 | fitbit.user.repository.class=org.radarbase.connect.rest.fitbit.user.ServiceUserRepository 10 | fitbit.user.repository.url=http://radar-rest-sources-backend:8080/ -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/fitbit/docker/source-fitbit.properties.template: -------------------------------------------------------------------------------- 1 | name=radar-fitbit-source 2 | connector.class=org.radarbase.connect.rest.fitbit.FitbitSourceConnector 3 | tasks.max=4 4 | rest.source.base.url=https://api.fitbit.com 5 | rest.source.poll.interval.ms=5000 6 | rest.source.request.generator.class=org.radarbase.connect.rest.fitbit.request.FitbitRequestGenerator 7 | fitbit.api.client= 8 | fitbit.api.secret= 9 | fitbit.user.repository.class=org.radarbase.connect.rest.fitbit.user.ServiceUserRepository 10 | fitbit.user.repository.url=http://radar-rest-sources-backend:8080/ -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/webserver/optional-services.conf.template: -------------------------------------------------------------------------------- 1 | # uncomment and add according to which optional services need to use the webserver. 2 | location /redcapint/ { 3 | proxy_pass http://radar-integration:8080/redcap/; 4 | proxy_set_header Host $host; 5 | } 6 | 7 | location /rest-sources/authorizer/ { 8 | proxy_pass http://radar-rest-sources-authorizer:80/; 9 | proxy_set_header Host $host; 10 | } 11 | 12 | location /rest-sources/backend/ { 13 | proxy_pass http://radar-rest-sources-backend:8080/; 14 | proxy_set_header Host $host; 15 | } -------------------------------------------------------------------------------- /appspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.0 2 | os: linux 3 | files: 4 | - source: / 5 | destination: /home/ec2-user/RADAR-Docker 6 | hooks: 7 | BeforeInstall: 8 | - location: scripts/stage-runner/stop.sh 9 | timeout: 300 10 | runas: root 11 | AfterInstall: 12 | - location: scripts/stage-runner/configure.sh 13 | timeout: 120 14 | runas: root 15 | ApplicationStart: 16 | - location: scripts/stage-runner/start.sh 17 | timeout: 900 18 | runas: root 19 | ValidateService: 20 | - location: scripts/stage-runner/checkhealth.sh 21 | timeout: 300 22 | runas: root -------------------------------------------------------------------------------- /scripts/stage-runner/checkhealth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This is a preliminary check on health 3 | 4 | set -eu 5 | 6 | DB_CONTAINER=radar-cp-hadoop-stack_radarbase-postgresql_1 7 | 8 | pushd . 9 | cd /home/ec2-user/RADAR-Docker/scripts/stage-runner 10 | ./wait-for-it.sh -t 150 localhost:80 --strict -- echo "Postgres database is ready!" 11 | popd 12 | 13 | # Restore the prostgres database 14 | rm -rf /tmp/postgres_dump 15 | aws s3 cp s3://radar-codedeploy/radar_backend_postgres_dump /tmp/postgres_dump 16 | docker cp /tmp/postgres_dump $DB_CONTAINER:/tmp/postgres_dump 17 | docker exec $DB_CONTAINER psql -U postgres -f /tmp/postgres_dump postgres 18 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/krb.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = TEST.CONFLUENT.IO 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | 14 | [realms] 15 | TEST.CONFLUENT.IO = { 16 | kdc = quickstart.confluent.io 17 | admin_server = quickstart.confluent.io 18 | } 19 | 20 | [domain_realm] 21 | .TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 22 | TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 23 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/self-sign-certificate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | if [ $# -ne 1 ]; then 6 | echo "Need SSL path parameter" 7 | exit 1 8 | fi 9 | 10 | SSL_PATH="$1" 11 | 12 | if [ ! -e "${SSL_PATH}" ]; then 13 | mkdir -p "${SSL_PATH}" 14 | fi 15 | if [ ! -e "/var/lib/openssl/.well-known" ]; then 16 | mkdir -p /var/lib/openssl/.well-known 17 | fi 18 | apk update 19 | apk add openssl 20 | 21 | cd "${SSL_PATH}" 22 | find . -type f -delete 23 | openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout privkey.pem -out cert.pem -days 3650 -nodes -sha256 24 | cp cert.pem chain.pem 25 | cp cert.pem fullchain.pem 26 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/self-sign-certificate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | if [ $# -ne 1 ]; then 6 | echo "Need SSL path parameter" 7 | exit 1 8 | fi 9 | 10 | SSL_PATH="$1" 11 | 12 | if [ ! -e "${SSL_PATH}" ]; then 13 | mkdir -p "${SSL_PATH}" 14 | fi 15 | if [ ! -e "/var/lib/openssl/.well-known" ]; then 16 | mkdir -p /var/lib/openssl/.well-known 17 | fi 18 | apk update 19 | apk add openssl 20 | 21 | cd "${SSL_PATH}" 22 | find . -type f -delete 23 | openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout privkey.pem -out cert.pem -days 3650 -nodes -sha256 24 | cp cert.pem chain.pem 25 | cp cert.pem fullchain.pem 26 | -------------------------------------------------------------------------------- /scripts/stage-runner/stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | if [ -d "/home/ec2-user/RADAR-Docker" ]; then 6 | pushd . 7 | cd /home/ec2-user/RADAR-Docker/dcompose-stack/radar-cp-hadoop-stack 8 | ./bin/radar-docker down 9 | popd 10 | rm -rf /home/ec2-user/RADAR-Docker 11 | fi 12 | 13 | # Remove nginx if installed to free port 80 14 | systemctl stop nginx 15 | systemctl disable nginx.service 16 | systemctl daemon-reload 17 | apt-get -y remove nginx nginx-common 18 | 19 | # Configure container logs 20 | cat < /etc/docker/daemon.json 21 | { 22 | "log-opts": { 23 | "max-size": "30m", 24 | "max-file": "7" 25 | } 26 | } 27 | EOF 28 | systemctl restart docker 29 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/webserver/optional-services.conf.template: -------------------------------------------------------------------------------- 1 | # uncomment and add according to which optional services need to use the webserver. 2 | #location /redcapint/ { 3 | # proxy_pass http://radar-integration:8080/redcap/; 4 | # proxy_set_header Host $host; 5 | #} 6 | 7 | #location /rest-sources/authorizer/ { 8 | # proxy_pass http://radar-rest-sources-authorizer:80/; 9 | # proxy_set_header Host $host; 10 | #} 11 | 12 | #location /rest-sources/backend/ { 13 | # proxy_pass http://radar-rest-sources-backend:8080/; 14 | # proxy_set_header Host $host; 15 | #} 16 | 17 | #location /grafana/ { 18 | # proxy_pass http://grafana:3000/; 19 | # proxy_set_header Host $host; 20 | #} -------------------------------------------------------------------------------- /scripts/check_radar_network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # network interface 4 | network=eduroam 5 | # network interface 6 | nic=wlp5s1 7 | # lock file 8 | lockfile=/home/radar/RADAR-Network/LOCK_RETRY 9 | # log file 10 | logfile=/home/radar/RADAR-Network/radar-network.log 11 | # url to check against 12 | url=https://www.empatica.com 13 | 14 | . ./util.sh 15 | 16 | # check connection and force reconnection if needed 17 | if [ ! -f $lockfile ]; then 18 | touch $lockfile 19 | if ! isConnected; then 20 | connect 21 | fi 22 | rm $lockfile 23 | else 24 | log_info "Another instance is already running ... " 25 | fi 26 | log_info "### DONE ###" 27 | 28 | # check if log size exceeds the limit. If so, it rotates the log file 29 | rolloverLog 30 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/bin/get-subject-data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | raw_subject=${1} 4 | #echo $HOME 5 | if [[ $raw_subject = 'help' ]];then 6 | sudo docker-compose -f ../xmpp-server/docker-compose.yml exec hsqldb sh -c "java -jar /opt/hsqldb/lib/sqltool.jar --help" 7 | else 8 | match="-" 9 | repl="\\002d" 10 | subject=${raw_subject//$match/$repl} 11 | # echo $subject 12 | java -jar ${2} --rcFile=../etc/sqltool.rc --debug --sql=" 13 | select status_info.subject_id, notification_info.title, notification_info.ttl_seconds, notification_info.message, notification_info.execution_time from notification_info inner join status_info on notification_info.notification_task_uuid = status_info.notification_task_uuid where status_info.subject_id=U&'${subject}';" db 14 | fi 15 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/broker1_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/broker1.keytab" 6 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/broker1.keytab" 13 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient1.keytab" 21 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/broker2_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/broker2.keytab" 6 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/broker2.keytab" 13 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient2.keytab" 21 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/broker3_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/broker3.keytab" 6 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/broker3.keytab" 13 | principal="kafka/quickstart.confluent.io@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient3.keytab" 21 | principal="zkclient/quickstart.confluent.io@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/jdbc-connector/sink-timescale.properties.template: -------------------------------------------------------------------------------- 1 | name=radar-timescale-sink 2 | connector.class=io.confluent.connect.jdbc.JdbcSinkConnector 3 | tasks.max=2 4 | transforms=mergeKey,timestamp 5 | transforms.mergeKey.type=org.radarbase.kafka.connect.transforms.MergeKey 6 | transforms.timestamp.type=org.radarbase.kafka.connect.transforms.TimestampConverter 7 | transforms.timestamp.fields=time,timeReceived,timeCompleted,timestamp 8 | topics=android_phone_relative_location,android_phone_battery_level 9 | connection.url= 10 | connection.user=grafana 11 | connection.password= 12 | dialect.name=TimescaleDBDatabaseDialect 13 | insert.mode=upsert 14 | pk.mode=record_value 15 | pk.fields=time, userId, projectId 16 | auto.create=true 17 | connection.attempts=3 18 | connection.backoff.ms=9000 -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/.gitignore: -------------------------------------------------------------------------------- 1 | /mongodb-connector/sink-mongo.properties 2 | /hdfs-connector/sink-hdfs.properties 3 | /webserver/nginx.conf 4 | /webserver/kafka-manager.htpasswd 5 | /webserver/ip-access-control.conf 6 | /webserver/optional-services.conf 7 | /webserver/dashboard-pipeline.conf 8 | /radar-backend/radar.yml 9 | /smtp.env 10 | /rest-api/device-catalog.yml 11 | /rest-api/radar.yml 12 | /rest-api/mp_info.yml 13 | /redcap-integration/radar.yml 14 | /managementportal/config/keystore.jks 15 | /managementportal/config/oauth_client_details.csv 16 | /schema/commons/ 17 | /schema/specifications/ 18 | radar-is.yml 19 | /fitbit/docker/users/*.yml 20 | /fitbit/docker/source-fitbit.properties 21 | /rest-source-authorizer/rest_source_clients_configs.yml 22 | keystore.p12 23 | /hdfs-restructure/restructure.yml 24 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore: -------------------------------------------------------------------------------- 1 | /mongodb-connector/sink-mongo.properties 2 | /hdfs-connector/sink-hdfs.properties 3 | /webserver/nginx.conf 4 | /webserver/kafka-manager.htpasswd 5 | /webserver/ip-access-control.conf 6 | /webserver/optional-services.conf 7 | /radar-backend/radar.yml 8 | /smtp.env 9 | /rest-api/device-catalog.yml 10 | /rest-api/radar.yml 11 | /rest-api/mp_info.yml 12 | /redcap-integration/radar.yml 13 | /managementportal/config/keystore.jks 14 | /managementportal/config/oauth_client_details.csv 15 | /schema/commons/ 16 | /schema/specifications/ 17 | radar-is.yml 18 | /fitbit/docker/users/*.yml 19 | /fitbit/docker/source-fitbit.properties 20 | /rest-source-authorizer/rest_source_clients_configs.yml 21 | keystore.p12 22 | /hdfs-restructure/restructure.yml 23 | /jdbc-connector/sink-timescale.properties 24 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/mongodb-connector/sink-mongo.properties.template: -------------------------------------------------------------------------------- 1 | # Kafka consumer configuration 2 | name=radar-connector-mongodb-sink 3 | 4 | # Kafka connector configuration 5 | connector.class=org.radarcns.connect.mongodb.MongoDbSinkConnector 6 | tasks.max=1 7 | 8 | # Topics that will be consumed 9 | topics= 10 | 11 | # MongoDB server 12 | mongo.host=hotstorage 13 | mongo.port=27017 14 | 15 | # MongoDB configuration 16 | mongo.username= 17 | mongo.password= 18 | mongo.database= 19 | 20 | # Collection name for putting data into the MongoDB database. The {$topic} token will be replaced 21 | # by the Kafka topic name. 22 | #mongo.collection.format={$topic} 23 | 24 | # Buffer Capacity for mogodb writer. Default value is 20000 25 | # buffer.capacity=20000 26 | 27 | # Factory class to do the actual record conversion 28 | record.converter.class=org.radarcns.connect.mongodb.serialization.RecordConverterFactory 29 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/mongodb-connector/sink-mongo.properties.template: -------------------------------------------------------------------------------- 1 | # Kafka consumer configuration 2 | name=radar-connector-mongodb-sink 3 | 4 | # Kafka connector configuration 5 | connector.class=org.radarcns.connect.mongodb.MongoDbSinkConnector 6 | tasks.max=1 7 | 8 | # Topics that will be consumed 9 | topics= 10 | 11 | # MongoDB server 12 | mongo.host=hotstorage 13 | mongo.port=27017 14 | 15 | # MongoDB configuration 16 | mongo.username= 17 | mongo.password= 18 | mongo.database= 19 | 20 | # Collection name for putting data into the MongoDB database. The {$topic} token will be replaced 21 | # by the Kafka topic name. 22 | #mongo.collection.format={$topic} 23 | 24 | # Buffer Capacity for mogodb writer. Default value is 20000 25 | # buffer.capacity=20000 26 | 27 | # Factory class to do the actual record conversion 28 | record.converter.class=org.radarcns.connect.mongodb.serialization.RecordConverterFactory 29 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/bin/docker-prune: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 4 | 5 | . lib/util.sh 6 | . ./.env 7 | 8 | check_command_exists docker 9 | 10 | echo "This will delete all the data associated with this RADAR-base server in Kafka, Minio, MongoDB and Postgres. This will also delete all data in docker containers. Do you wish to continue? yes or no." 11 | select yn in "Yes" "No"; do 12 | case $yn in 13 | Yes ) sudo-linux docker system prune --filter "label!=certs" "$@"; 14 | sudo-linux docker volume prune --filter "label!=certs" "$@"; 15 | sudo-linux rm -rf "$MINIO1_DATA1"; 16 | sudo-linux rm -rf "$MINIO2_DATA1"; 17 | sudo-linux rm -rf "$MINIO3_DATA1"; 18 | sudo-linux rm -rf "$MINIO4_DATA1"; 19 | sudo-linux rm -rf "$MONGODB_DIR"; 20 | sudo-linux rm -rf "$MP_POSTGRES_DIR"; 21 | break;; 22 | No ) exit;; 23 | esac 24 | done 25 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/redcap-integration/radar.yml.template: -------------------------------------------------------------------------------- 1 | version: 0.1-alpha 2 | released: 2017-08-29 3 | 4 | # ManagementPortal configuration 5 | oauth_client_id: #OAuth2 clientId used by the webApp for making requests 6 | oauth_client_secret: #OAuth2 client secrete 7 | management_portal_url: #URL pointing Management Portal 8 | token_endpoint: #URL managing tokens 9 | project_endpoint: #URL managing project function 10 | subject_endpoint: #URL managing subject functions 11 | 12 | # Set of supported projects 13 | projects: 14 | - redcap_info: 15 | url: #URL pointing REDCap instance 16 | project_id: #REDCap project identifier 17 | enrolment_event: #Unique identifier for the enrolment event 18 | integration_form: #Name of integration REDCap form 19 | token: #REDCap API Token used to identify the REDCap user against the REDCap instance 20 | - mp_info: 21 | # Management Portal project identifier 22 | project_name: # 23 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/redcap-integration/radar.yml.template: -------------------------------------------------------------------------------- 1 | version: 0.1-alpha 2 | released: 2017-08-29 3 | 4 | # ManagementPortal configuration 5 | oauth_client_id: #OAuth2 clientId used by the webApp for making requests 6 | oauth_client_secret: #OAuth2 client secrete 7 | management_portal_url: #URL pointing Management Portal 8 | token_endpoint: #URL managing tokens 9 | project_endpoint: #URL managing project function 10 | subject_endpoint: #URL managing subject functions 11 | 12 | # Set of supported projects 13 | projects: 14 | - redcap_info: 15 | url: #URL pointing REDCap instance 16 | project_id: #REDCap project identifier 17 | enrolment_event: #Unique identifier for the enrolment event 18 | integration_form: #Name of integration REDCap form 19 | token: #REDCap API Token used to identify the REDCap user against the REDCap instance 20 | - mp_info: 21 | # Management Portal project identifier 22 | project_name: # 23 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/docker-prune: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 4 | 5 | . lib/util.sh 6 | . ./.env 7 | 8 | check_command_exists docker 9 | 10 | echo "This will delete all the data associated with RADAR in HDFS, MongoDB and Postgres. This will also delete all data in docker containers. Do you wish to continue? yes or no." 11 | select yn in "Yes" "No"; do 12 | case $yn in 13 | Yes ) sudo-linux docker system prune --filter "label!=certs" "$@"; 14 | sudo-linux docker volume prune --filter "label!=certs" "$@"; 15 | sudo-linux rm -rf "$HDFS_DATA_DIR_1"; 16 | sudo-linux rm -rf "$HDFS_DATA_DIR_2"; 17 | sudo-linux rm -rf "$HDFS_DATA_DIR_3"; 18 | sudo-linux rm -rf "$HDFS_NAME_DIR_1"; 19 | sudo-linux rm -rf "$HDFS_NAME_DIR_2"; 20 | sudo-linux rm -rf "$MONGODB_DIR"; 21 | sudo-linux rm -rf "$MP_POSTGRES_DIR"; 22 | break;; 23 | No ) exit;; 24 | esac 25 | done 26 | -------------------------------------------------------------------------------- /images/kafka-manager/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM hseeberger/scala-sbt:8u171-2.12.6-1.2.0 as builder 2 | ENV SBT_VERSION=0.13.9 3 | 4 | RUN mkdir /code 5 | 6 | WORKDIR /code 7 | 8 | RUN sbt -sbt-version ${SBT_VERSION} 9 | 10 | ENV KM_VERSION=1.3.3.18 11 | 12 | RUN wget https://github.com/yahoo/kafka-manager/archive/${KM_VERSION}.tar.gz && \ 13 | tar xxf ${KM_VERSION}.tar.gz && \ 14 | cd kafka-manager-${KM_VERSION} && \ 15 | sbt clean dist && \ 16 | unzip -d / ./target/universal/kafka-manager-${KM_VERSION}.zip && \ 17 | mv /kafka-manager-${KM_VERSION} /kafka-manager 18 | 19 | FROM openjdk:8-alpine 20 | MAINTAINER Yatharth Ranjan 21 | 22 | ENV ZK_HOSTS=zookeeper-1:2181 23 | RUN apk add --no-cache bash 24 | 25 | COPY --from=builder /kafka-manager /kafka-manager 26 | COPY ./conf/application.conf /kafka-manager/conf/application.conf 27 | COPY ./entrypoint.sh /kafka-manager/ 28 | WORKDIR /kafka-manager 29 | 30 | EXPOSE 9000 31 | ENTRYPOINT ["./entrypoint.sh"] 32 | -------------------------------------------------------------------------------- /dcompose-stack/logging/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | networks: 5 | graylog: 6 | driver: bridge 7 | 8 | volumes: 9 | mongo: {} 10 | elasticsearch: {} 11 | graylog: {} 12 | 13 | services: 14 | 15 | mongo: 16 | image: mongo:3.4.3 17 | networks: 18 | - graylog 19 | volumes: 20 | - mongo:/data/db 21 | 22 | elasticsearch: 23 | image: elasticsearch:2.4.4-alpine 24 | command: elasticsearch -Des.cluster.name="graylog" 25 | networks: 26 | - graylog 27 | volumes: 28 | - elasticsearch:/usr/share/elasticsearch/data 29 | 30 | graylog: 31 | image: graylog2/server:2.2.3-1 32 | networks: 33 | - graylog 34 | depends_on: 35 | - mongo 36 | - elasticsearch 37 | links: 38 | - mongo 39 | - elasticsearch 40 | env_file: 41 | - ./graylog.env 42 | ports: 43 | - "9000:9000" 44 | - "12201/udp:12201/udp" 45 | volumes: 46 | - graylog:/usr/share/graylog/data/journal 47 | 48 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/s3-connector/sink-s3.properties.template: -------------------------------------------------------------------------------- 1 | name=radar-s3-sink-connector 2 | connector.class=io.confluent.connect.s3.S3SinkConnector 3 | tasks.max=4 4 | topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_blood_volume_pulse,android_empatica_e4_temperature 5 | 6 | flush.size=10000 7 | 8 | s3.bucket.name= 9 | aws.access.key.id=minio 10 | aws.secret.access.key=minio123 11 | 12 | s3.part.size=26214400 13 | s3.part.retries=3 14 | 15 | store.url=http://minio1:9000/ 16 | storage.class=io.confluent.connect.s3.storage.S3Storage 17 | format.class=org.radarbase.connect.s3.RadarBaseAvroFormat 18 | 19 | rotate.schedule.interval.ms=900000 20 | timezone=UTC 21 | avro.codec=deflate 22 | errors.tolerance=all 23 | errors.log.enable=true 24 | errors.deadletterqueue.topic.name=dead_letter_queue_s3 25 | errors.deadletterqueue.topic.replication.factor=3 26 | errors.deadletterqueue.context.headers.enable=true 27 | errors.retry.delay.max.ms=60000 28 | errors.retry.timeout=300000 29 | avro.codec=deflate -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/postgres/multi-db-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -u 4 | 5 | function create_user_and_database() { 6 | local database=$1 7 | local database_exist=$(psql -U "$POSTGRES_USER" -tAc "SELECT 1 FROM pg_database WHERE datname='$database';") 8 | if [[ "$database_exist" == 1 ]]; then 9 | echo "Database $database already exists" 10 | else 11 | echo "Database $database does not exist" 12 | echo " Creating database '$database' for user '$POSTGRES_USER'" 13 | psql -U "$POSTGRES_USER" -v ON_ERROR_STOP=1 <<-EOSQL 14 | CREATE DATABASE "$database"; 15 | GRANT ALL PRIVILEGES ON DATABASE $database TO "$POSTGRES_USER"; 16 | EOSQL 17 | fi 18 | } 19 | 20 | if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then 21 | echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" 22 | #waiting for postgres 23 | for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do 24 | create_user_and_database $db 25 | done 26 | echo "Databases created" 27 | fi 28 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/images/postgres/multi-db-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -u 4 | 5 | function create_user_and_database() { 6 | local database=$1 7 | local database_exist=$(psql -U "$POSTGRES_USER" -tAc "SELECT 1 FROM pg_database WHERE datname='$database';") 8 | if [[ "$database_exist" == 1 ]]; then 9 | echo "Database $database already exists" 10 | else 11 | echo "Database $database does not exist" 12 | echo " Creating database '$database' for user '$POSTGRES_USER'" 13 | psql -U "$POSTGRES_USER" -v ON_ERROR_STOP=1 <<-EOSQL 14 | CREATE DATABASE "$database"; 15 | GRANT ALL PRIVILEGES ON DATABASE $database TO "$POSTGRES_USER"; 16 | EOSQL 17 | fi 18 | } 19 | 20 | if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then 21 | echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" 22 | #waiting for postgres 23 | for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do 24 | create_user_and_database $db 25 | done 26 | echo "Databases created" 27 | fi 28 | -------------------------------------------------------------------------------- /images/radar-hotstorage-mongodb/README.md: -------------------------------------------------------------------------------- 1 | # DockerisedRADAR-HotStorage 2 | 3 | Upon the first start, this dockerised version of MongoDB 3.2.10 creates a db named `RADAR_DB` owned by user `RADAR_USER` with password `RADAR_PWD`. 4 | 5 | Create the docker image: 6 | ``` 7 | $ docker build -t radarcns/radar-mongo ./ 8 | ``` 9 | 10 | Or pull from dockerhub: 11 | ``` 12 | $ docker pull radarcns/radar-mongo:latest 13 | ``` 14 | 15 | Run the docker image locally: 16 | ``` 17 | $ docker run -d -p 27017:27017 -p 28017:28017 --name radar-hotstorage radarcns/radar-mongo:latest -e RADAR_USER="restapi" -e RADAR_PWD="radar" -e RADAR_DB="hotstorage" 18 | ``` 19 | 20 | To test MongoDB, access the [Mongo Dashboard](http://localhost:28017) 21 | 22 | ## Runtime environment variables 23 | 24 | Environment variables used by the RestApi 25 | 26 | ```bash 27 | # authentication flag for MongoDB 28 | AUTH yes 29 | 30 | # mongoDb user and password 31 | RADAR_USER restapi 32 | RADAR_PWD radar 33 | 34 | # mongoDb database 35 | RADAR_DB hotstorage 36 | ``` -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/run-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$( dirname "${BASH_SOURCE[0]}" )" 3 | . "./backup.conf" 4 | 5 | # lock file 6 | lockfile=.LOCKFILE 7 | 8 | if [ ! -f $lockfile ]; then 9 | echo "Creating lock ..." 10 | touch $lockfile 11 | IFS=',' read -r -a inputs <<< "$INPUTS" 12 | 13 | for element in "${inputs[@]}" 14 | do 15 | if [[ ! -d $element ]] 16 | then 17 | echo "The input path ${element} is not a directory." 18 | exit 1 19 | fi 20 | 21 | echo "Running backup for input: ${element}" 22 | backupSubpath=$(basename "${element}") 23 | finalPath="${OUTPUT}/${backupSubpath}" 24 | hb log backup -c ${finalPath} ${element} ${DEDUPLICATE_MEMORY} -X 25 | hb log retain -c ${finalPath} ${RETAIN} ${DELETED_RETAIN} -v 26 | hb log selftest -c ${finalPath} -v4 --inc 1d/120d --sample 4 27 | done 28 | echo "Removing lock ..." 29 | rm $lockfile 30 | else 31 | echo "Another instance is already running ... " 32 | fi 33 | echo "### DONE ###" 34 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/bin/install-systemd: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | echo $(pwd) 5 | 6 | . ../radar-cp-hadoop-stack/lib/util.sh 7 | . .env 8 | 9 | if [ "$(id -un)" == "root" ] || id -Gn | grep -qe '\'; then 10 | BASE=/etc/systemd/system 11 | SYSTEMCTL_OPTS=() 12 | else 13 | BASE=$HOME/.config/systemd/user 14 | mkdir -p $BASE 15 | SYSTEMCTL_OPTS=(--user) 16 | export XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR:-/run/user/$UID} 17 | fi 18 | 19 | echo "==> Copying templates" 20 | copy_template $BASE/radar-xmpp-server.service lib/systemd/radar-xmpp-server.service.template 21 | 22 | echo "==> Inlining variables" 23 | inline_variable 'WorkingDirectory=' "$PWD" $BASE/radar-xmpp-server.service 24 | inline_variable 'ExecStart=' "$PWD/bin/start-xmpp" $BASE/radar-xmpp-server.service 25 | 26 | echo "==> Reloading systemd" 27 | systemctl "${SYSTEMCTL_OPTS[@]}" daemon-reload 28 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-xmpp-server 29 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-xmpp-server 30 | 31 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/ci/setup-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Starting to configure mock configurations for test" 3 | 4 | # create folder for docker volumes 5 | sudo mkdir -p /home/ci/data 6 | 7 | # setup mock configs 8 | cp ./ci-env.template ../.env 9 | cp ./ci-smtp.template ../etc/smtp.env 10 | cp ../etc/radar-backend/radar.yml.template ../etc/radar-backend/radar.yml 11 | cp ../etc/webserver/nginx.conf.template ../etc/webserver/nginx.conf 12 | cp ../etc/s3-connector/sink-s3.properties.template ../etc/s3-connector/sink-s3.properties 13 | cp ../etc/mongodb-connector/sink-mongo.properties.template ../etc/mongodb-connector/sink-mongo.properties 14 | cp ../etc/managementportal/config/oauth_client_details.csv.template ../etc/managementportal/config/oauth_client_details.csv 15 | cp ../etc/redcap-integration/radar.yml.template ../etc/redcap-integration/radar.yml 16 | cp ../etc/output-restructure/restructure.yml.template ../etc/output-restructure/restructure.yml 17 | 18 | ../bin/keystore-init 19 | 20 | echo "Setting up mock configurations finished..." 21 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | 3 | networks: 4 | db: 5 | driver: bridge 6 | internal: true 7 | 8 | services: 9 | hsqldb: 10 | build: ../../images/hsqldb/ 11 | image: radarbase/hsqldb:2.5.0 12 | restart: always 13 | networks: 14 | - db 15 | - default 16 | ports: 17 | - 9001:9001 18 | volumes: 19 | - ${FCM_XMPP_APP_SERVER_DB_PATH}:/var/opt/hsqldb/data 20 | - ./etc/server.properties:/etc/opt/hsqldb/conf/server.properties 21 | 22 | xmppserver: 23 | image: radarbase/fcmxmppserverv2:0.1.5 24 | restart: always 25 | networks: 26 | - default 27 | - db 28 | depends_on: 29 | - hsqldb 30 | volumes: 31 | - ${FCM_XMPP_APP_SERVER_LOGS_PATH}:/usr/local/radar/xmpp-server/logs/ 32 | environment: 33 | RADAR_XMPP_FCM_SENDER_KEY: ${FCM_SENDER_KEY} 34 | RADAR_XMPP_FCM_SERVER_KEY: ${FCM_SERVER_KEY} 35 | RADAR_XMPP_SCHEDULER_TYPE: "server" 36 | RADAR_XMPP_DB_PATH: "//hsqldb:9001/notification" 37 | RADAR_XMPP_CACHE_EXPIRY: 300 38 | JAVA_OPTS: "-Xms256m -Xmx1G" 39 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/ci/setup-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Starting to configure mock configurations for test" 3 | 4 | # create folder for docker volumes 5 | sudo mkdir -p /home/ci/data 6 | 7 | # setup mock configs 8 | cp ./ci-env.template ../.env 9 | cp ./ci-smtp.template ../etc/smtp.env 10 | cp ../etc/radar-backend/radar.yml.template ../etc/radar-backend/radar.yml 11 | cp ../etc/webserver/nginx.conf.template ../etc/webserver/nginx.conf 12 | cp ../etc/hdfs-connector/sink-hdfs.properties.template ../etc/hdfs-connector/sink-hdfs.properties 13 | cp ../etc/mongodb-connector/sink-mongo.properties.template ../etc/mongodb-connector/sink-mongo.properties 14 | cp ../etc/managementportal/config/oauth_client_details.csv.template ../etc/managementportal/config/oauth_client_details.csv 15 | cp ../etc/redcap-integration/radar.yml.template ../etc/redcap-integration/radar.yml 16 | cp ../etc/hdfs-restructure/restructure.yml.template ../etc/hdfs-restructure/restructure.yml 17 | cp ../etc/jdbc-connector/sink-timescale.properties.template ../etc/jdbc-connector/sink-timescale.properties 18 | 19 | ../bin/keystore-init 20 | 21 | echo "Setting up mock configurations finished..." 22 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/fitbit/docker/users/fitbit-user.yml.template: -------------------------------------------------------------------------------- 1 | --- 2 | # Unique user key 3 | id: test 4 | # Project ID to be used in org.radarcns.kafka.ObservationKey record keys 5 | projectId: radar-test 6 | # User ID to be used in org.radarcns.kafka.ObservationKey record keys 7 | userId: test 8 | # Source ID to be used in org.radarcns.kafka.ObservationKey record keys 9 | sourceId: charge-2 10 | # Date from when to collect data. 11 | startDate: 2018-08-06T00:00:00Z 12 | # Date until when to collect data. 13 | endDate: 2019-01-01T00:00:00Z 14 | # Fitbit user ID as returned by the Fitbit authentication procedure 15 | externalUserId: ? 16 | oauth2: 17 | # Fitbit OAuth 2.0 access token as returned by the Fitbit authentication procedure 18 | accessToken: ? 19 | # Fitbit OAuth 2.0 refresh token as returned by the Fitbit authentication procedure 20 | refreshToken: ? 21 | # Optional expiry time of the access token. If absent, it will be estimated to one hour 22 | # when the source connector starts. When an authentication error occurs, a new access token will 23 | # be fetched regardless of the value in this field. 24 | #expiresAt: 2018-08-06T00:00:00Z 25 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/hdfs-extract: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/.."; pwd) 4 | 5 | if [[ $# -lt 1 || $1 = "-h" || $1 = "--help" ]]; then 6 | printf "Usage:\n$0 []\nThe destination directory defaults to ./output\n" 7 | exit 1 8 | fi 9 | 10 | . "$DIR/lib/util.sh" 11 | . "$DIR/.env" 12 | 13 | # HDFS filename to get 14 | HDFS_FILE=$1 15 | # Absolute directory to write output to 16 | OUTPUT_DIR=${2:-$DIR/output} 17 | OUTPUT_DIR="$(cd "$(dirname "$OUTPUT_DIR")"; pwd)/$(basename "$OUTPUT_DIR")" 18 | # Internal docker directory to write output to 19 | HDFS_OUTPUT_DIR=/home/output 20 | # HDFS command to run 21 | HDFS_COMMAND="hdfs dfs -get $HDFS_FILE $HDFS_OUTPUT_DIR" 22 | DOCKER_OPTS=( 23 | -i --rm 24 | --network hadoop 25 | -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" 26 | -e HADOOP_NAMENODE1_HOSTNAME=hdfs-namenode-1) 27 | 28 | mkdir -p $OUTPUT_DIR 29 | 30 | echo "==> Extracting HDFS path $HDFS_FILE to $OUTPUT_DIR" 31 | echo docker run "${DOCKER_OPTS[@]}" radarbase/hdfs:${HDFS_BASE_VERSION} $HDFS_COMMAND 32 | sudo-linux docker run "${DOCKER_OPTS[@]}" radarbase/hdfs:${HDFS_BASE_VERSION} $HDFS_COMMAND 33 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/fitbit/docker/users/fitbit-user.yml.template: -------------------------------------------------------------------------------- 1 | --- 2 | # Unique user key 3 | id: test 4 | # Project ID to be used in org.radarcns.kafka.ObservationKey record keys 5 | projectId: radar-test 6 | # User ID to be used in org.radarcns.kafka.ObservationKey record keys 7 | userId: test 8 | # Source ID to be used in org.radarcns.kafka.ObservationKey record keys 9 | sourceId: charge-2 10 | # Date from when to collect data. 11 | startDate: 2018-08-06T00:00:00Z 12 | # Date until when to collect data. 13 | endDate: 2019-01-01T00:00:00Z 14 | # Fitbit user ID as returned by the Fitbit authentication procedure 15 | externalUserId: ? 16 | oauth2: 17 | # Fitbit OAuth 2.0 access token as returned by the Fitbit authentication procedure 18 | accessToken: ? 19 | # Fitbit OAuth 2.0 refresh token as returned by the Fitbit authentication procedure 20 | refreshToken: ? 21 | # Optional expiry time of the access token. If absent, it will be estimated to one hour 22 | # when the source connector starts. When an authentication error occurs, a new access token will 23 | # be fetched regardless of the value in this field. 24 | #expiresAt: 2018-08-06T00:00:00Z 25 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/hdfs-upgrade: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# = 0 ]; then 4 | echo "Usage: $0 VERSION" 5 | exit 1 6 | fi 7 | 8 | VERSION=$1 9 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 10 | 11 | stack=bin/radar-docker 12 | . ./.env 13 | . lib/util.sh 14 | 15 | echo "Are you sure you want to to upgrade HDFS from version ${HDFS_BASE_VERSION} to ${VERSION}?" 16 | 17 | select yn in "Yes" "No"; do 18 | case $yn in 19 | Yes ) break;; 20 | No ) exit;; 21 | esac 22 | done 23 | 24 | # ensure that the image exists 25 | docker pull smizy/hadoop-base:$VERSION 26 | 27 | echo "==> Stopping HDFS name nodes" 28 | $stack quit hdfs-namenode-1 radar-hdfs-connector 29 | 30 | echo "==> Updating HDFS version to ${VERSION}" 31 | ensure_variable "HDFS_BASE_VERSION=" "${VERSION}" .env 32 | 33 | $stack build 34 | 35 | echo "==> Upgrading name node 1" 36 | $stack run --rm --name hdfs-namenode-1 hdfs-namenode-1 namenode-1 -upgradeOnly 37 | $stack up -d hdfs-namenode-1 38 | sleep 30 39 | 40 | echo "==> Finalizing upgrade" 41 | $stack exec hdfs-namenode-1 hdfs dfsadmin -finalizeUpgrade 42 | 43 | echo "==> Bringing up HDFS cluster" 44 | $stack hdfs 45 | $stack up -d radar-hdfs-connector 46 | -------------------------------------------------------------------------------- /images/hsqldb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:12-oracle 2 | 3 | MAINTAINER yatharth.ranjan@kcl.ac.uk 4 | 5 | # Reuse directory layout between images 6 | RUN mkdir -p /opt/hsqldb/lib /etc/opt/hsqldb/conf /var/opt/hsqldb/data && \ 7 | groupadd --system -g 999 hsqldb && \ 8 | useradd --system -g hsqldb -u 9999 hsqldb && \ 9 | chown hsqldb:hsqldb -R /var/opt/hsqldb 10 | 11 | ENV MVN_CENTRAL_URL https://repo1.maven.org/maven2 12 | ENV HSQLDB_MVN_GRP org/hsqldb 13 | ENV HSQLDB_VERSION 2.5.0 14 | ENV LOG4J_VERSION 1.2.17 15 | 16 | ENV SERVER_PROPERTY_PATH /etc/opt/hsqldb/conf/server.properties 17 | ENV SQL_TOOL_RC_PATH /etc/opt/hsqldb/conf/sqltool.rc 18 | 19 | RUN curl -#o /opt/hsqldb/lib/hsqldb.jar \ 20 | "${MVN_CENTRAL_URL}/${HSQLDB_MVN_GRP}/hsqldb/${HSQLDB_VERSION}/hsqldb-${HSQLDB_VERSION}.jar" && \ 21 | curl -#o /opt/hsqldb/lib/sqltool.jar \ 22 | "${MVN_CENTRAL_URL}/${HSQLDB_MVN_GRP}/sqltool/${HSQLDB_VERSION}/sqltool-${HSQLDB_VERSION}.jar" && \ 23 | curl -#o /opt/hsqldb/lib/log4j.jar \ 24 | "${MVN_CENTRAL_URL}/log4j/log4j/${LOG4J_VERSION}/log4j-${LOG4J_VERSION}.jar" 25 | 26 | EXPOSE 9001 27 | USER hsqldb 28 | WORKDIR /var/opt/hsqldb/data 29 | 30 | CMD java -cp /opt/hsqldb/lib/*:/etc/opt/hsqldb/conf org.hsqldb.server.Server --props ${SERVER_PROPERTY_PATH} 31 | -------------------------------------------------------------------------------- /dcompose-stack/logging/README.md: -------------------------------------------------------------------------------- 1 | # Docker logging with Graylog2 2 | 3 | This directory sets up a graylog2 instance that docker can stream data to. 4 | 5 | ## Usage 6 | 7 | Set up this container by moving `graylog.env.template` to `graylog.env` and editing it. See instructions inside the `graylog.env.template` on how to set each variable. 8 | 9 | Start the logging container with 10 | ```shell 11 | sudo docker-compose up -d 12 | ``` 13 | On macOS, omit `sudo` in the command above. 14 | 15 | Then go to the [Graylog dashboard](http://localhost:9000). Log in with your chosen password, and navigate to `System -> Inputs`. Choose `GELF UDP` as a source and click `Launch new input`. Set the option to allow Global logs, and name the input `RADAR-Docker`. Now your Graylog instance is ready to collect data from docker on the host it is running on, using the GELF driver with URL `udp://localhost:12201` (replace `localhost` with the hostname where the Graylog is running, if needed). 16 | 17 | Now, other docker containers can be configured to use the `gelf` log driver. In a docker-compose file, add the following lines to a service to let it use Graylog: 18 | ```yaml 19 | logging: 20 | driver: gelf 21 | options: 22 | gelf-address: udp://localhost:12201 23 | ``` 24 | Now all docker logs of that service will be forwarded to Graylog 25 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/rest-api/radar.yml.template: -------------------------------------------------------------------------------- 1 | 2 | # Mongodb hosts a map of hostname and port 3 | mongodb_hosts: 4 | hotstorage: 27017 5 | 6 | # Mongodb User 7 | mongodb_credentials: 8 | username: 9 | password: 10 | database_name: 11 | 12 | management_portal_config: 13 | oauth_client_id: radar_restapi #OAuth2 clientId used by the webApp for making requests 14 | oauth_client_secret: secret #OAuth2 client secret 15 | oauth_client_scopes: SUBJECT.READ PROJECT.READ SOURCE.READ SOURCETYPE.READ SOURCEDATA.READ 16 | management_portal_url: http://managementportal-app:8080/managementportal/ #URL pointing Management Portal 17 | token_endpoint: oauth/token #URL managing tokens 18 | project_endpoint: api/projects/ #URL managing project function 19 | subject_endpoint: api/subjects/ 20 | source_type_endpoint: api/source-types/ 21 | source_data_endpoint: api/source-data/ 22 | source_endpoint: api/sources/ 23 | 24 | # Timeout duration for every source-type to decide source status whether its connected or not. 25 | # A source-type should be defined by following the convention of producer_model as mentioned in 26 | # the specification in radar-schemas 27 | # timeout should be specified as the ISO-8601 duration format {@code PnDTnHnMn.nS}. 28 | source-type-connection-timeout: 29 | android_phone: PT2H 30 | empatica_e4: PT1H 31 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/radar.yml.template: -------------------------------------------------------------------------------- 1 | 2 | # Mongodb hosts a map of hostname and port 3 | mongodb_hosts: 4 | hotstorage: 27017 5 | 6 | # Mongodb User 7 | mongodb_credentials: 8 | username: 9 | password: 10 | database_name: 11 | 12 | management_portal_config: 13 | oauth_client_id: radar_restapi #OAuth2 clientId used by the webApp for making requests 14 | oauth_client_secret: secret #OAuth2 client secret 15 | oauth_client_scopes: SUBJECT.READ PROJECT.READ SOURCE.READ SOURCETYPE.READ SOURCEDATA.READ 16 | management_portal_url: http://managementportal-app:8080/managementportal/ #URL pointing Management Portal 17 | token_endpoint: oauth/token #URL managing tokens 18 | project_endpoint: api/projects/ #URL managing project function 19 | subject_endpoint: api/subjects/ 20 | source_type_endpoint: api/source-types/ 21 | source_data_endpoint: api/source-data/ 22 | source_endpoint: api/sources/ 23 | 24 | # Timeout duration for every source-type to decide source status whether its connected or not. 25 | # A source-type should be defined by following the convention of producer_model as mentioned in 26 | # the specification in radar-schemas 27 | # timeout should be specified as the ISO-8601 duration format {@code PnDTnHnMn.nS}. 28 | source-type-connection-timeout: 29 | android_phone: PT2H 30 | empatica_e4: PT1H 31 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/postgres-backup/README.md: -------------------------------------------------------------------------------- 1 | # POSTGRES Backup Scripts 2 | 3 | **Note These Scripts Have been Deprecated. Please use the unified backup solution provided in `hash-backup` folder. This folder will be removed in the future.** 4 | 5 | The `scripts` directory contains a script for running roatated backups of a running postgres instance. In this case these scripts are mounted to the postgres container (using bind mounts) and then run to create backups. These scripts are taken from the [postgres wiki](https://wiki.postgresql.org/wiki/Automated_Backup_on_Linux) 6 | 7 | The backups can be configured to create daily, weekly and monthly backups by configuring the `pg_backup.config` configuration file. Please note that if you change the `BACKUP_DIR` in the config file then make sure to change the bind mount in the postgres container in docker-compose.yml file as well. 8 | Further configuration information is contained in the `pg_backup.config` file. 9 | 10 | This scripts needs to be run periodically which is based on your preference and can be done through cron job or a systemd timer. 11 | The backups are then stored in `postgres-backup/backups` directory 12 | 13 | Example cron job for daily running this script on postgres container at midnight is - 14 | ``` 15 | 00 00 * * * docker exec radarcphadoopstack_managementportal-postgresql_1 ./backup-scripts/pg_backup_rotated.sh >> ~/pg_backup.log 2>&1 16 | ``` 17 | 18 | This also logs the output to a file. 19 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/managementportal/config/oauth_client_details.csv.template: -------------------------------------------------------------------------------- 1 | client_id;resource_ids;client_secret;scope;authorized_grant_types;redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove 2 | pRMT;res_ManagementPortal,res_gateway;;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 3 | aRMT;res_ManagementPortal,res_gateway;;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 4 | THINC-IT;res_ManagementPortal,res_gateway;;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 5 | radar_restapi;res_ManagementPortal;secret;SUBJECT.READ,PROJECT.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ;client_credentials;;;43200;259200;{}; 6 | radar_redcap_integrator;res_ManagementPortal;secret;PROJECT.READ,SUBJECT.CREATE,SUBJECT.READ,SUBJECT.UPDATE;client_credentials;;;43200;259200;{}; 7 | radar_dashboard;res_ManagementPortal,res_RestApi;;SUBJECT.READ,PROJECT.READ,SOURCE.READ,SOURCETYPE.READ,MEASUREMENT.READ;refresh_token,authorization_code;;;43200;259200;{}; 8 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/config/oauth_client_details.csv.template: -------------------------------------------------------------------------------- 1 | client_id;resource_ids;client_secret;scope;authorized_grant_types;redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove 2 | pRMT;res_ManagementPortal,res_gateway;saturday$SHARE$scale;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 3 | aRMT;res_ManagementPortal,res_gateway;;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 4 | THINC-IT;res_ManagementPortal,res_gateway;;MEASUREMENT.CREATE,SUBJECT.UPDATE,SUBJECT.READ,PROJECT.READ,SOURCETYPE.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ,USER.READ,ROLE.READ;refresh_token,authorization_code;;;43200;7948800;{"dynamic_registration": true}; 5 | radar_restapi;res_ManagementPortal;secret;SUBJECT.READ,PROJECT.READ,SOURCE.READ,SOURCETYPE.READ,SOURCEDATA.READ;client_credentials;;;43200;259200;{}; 6 | radar_redcap_integrator;res_ManagementPortal;secret;PROJECT.READ,SUBJECT.CREATE,SUBJECT.READ,SUBJECT.UPDATE;client_credentials;;;43200;259200;{}; 7 | radar_dashboard;res_ManagementPortal,res_RestApi;;SUBJECT.READ,PROJECT.READ,SOURCE.READ,SOURCETYPE.READ,MEASUREMENT.READ;refresh_token,authorization_code;;;43200;259200;{}; 8 | -------------------------------------------------------------------------------- /images/kafka-manager/conf/application.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0 2 | # See accompanying LICENSE file. 3 | 4 | # This is the main configuration file for the application. 5 | # ~~~~~ 6 | 7 | # Secret key 8 | # ~~~~~ 9 | # The secret key is used to secure cryptographics functions. 10 | # If you deploy your application to several instances be sure to use the same key! 11 | play.crypto.secret="^ [passthrough options]\n" 10 | printf "\nkafka-avro-console-consumer passthrough options:\n" 11 | docker-compose exec schema-registry-1 kafka-avro-console-consumer 12 | exit 1 13 | fi 14 | 15 | #Save command line arguments so functions can access it 16 | #To access command line arguments use syntax ${args[0]} etc 17 | args=("$@") 18 | 19 | # get list of available topics 20 | topics=($(docker-compose exec kafka-1 kafka-topics --zookeeper zookeeper-1:2181 --list)) 21 | #printf "%s\n" "${topics[@]}" 22 | 23 | # check if is valid topic 24 | array_contains () { 25 | local array="$1[@]" 26 | local seeking=$2 27 | local in=1 28 | for element in "${!array}"; do 29 | element_s=$(echo $element | tr -d '\r') 30 | seeking_s=$(echo $seeking | tr -d '\r') 31 | if [[ $element_s == $seeking_s ]]; then 32 | in=0 33 | break 34 | fi 35 | done 36 | return $in 37 | } 38 | 39 | if ! array_contains topics ${args[0]}; then 40 | echo -e "Topic ${args[0]} not available. Topics on server are:\n" 41 | printf "%s\n" "${topics[@]}" 42 | exit 1 43 | fi 44 | 45 | # consumer command to run 46 | KACC_CMD="kafka-avro-console-consumer --bootstrap-server kafka-1:9092,kafka-2:9092,kafka-3:9092 --property schema.registry.url=http://schema-registry-1:8081 --property print.key=true --topic ${args[0]} ${args[@]:1}" 47 | DOCKER_CMD="docker-compose exec schema-registry-1" 48 | 49 | # run consumer 50 | echo $DOCKER_CMD $KACC_CMD 51 | exec $DOCKER_CMD $KACC_CMD 52 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/radar-kafka-consumer: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | docker-compose up -d kafka-1 schema-registry-1 6 | 7 | # kafka-avro-console-consumer inside dockerized radar platform 8 | if [[ $# -lt 1 || $1 = "-h" || $1 = "--help" || $1 == "--"* ]]; then 9 | printf "Usage: $0 [passthrough options]\n" 10 | printf "\nkafka-avro-console-consumer passthrough options:\n" 11 | docker-compose exec schema-registry-1 kafka-avro-console-consumer 12 | exit 1 13 | fi 14 | 15 | #Save command line arguments so functions can access it 16 | #To access command line arguments use syntax ${args[0]} etc 17 | args=("$@") 18 | 19 | # get list of available topics 20 | topics=($(docker-compose exec kafka-1 kafka-topics --zookeeper zookeeper-1:2181 --list)) 21 | #printf "%s\n" "${topics[@]}" 22 | 23 | # check if is valid topic 24 | array_contains () { 25 | local array="$1[@]" 26 | local seeking=$2 27 | local in=1 28 | for element in "${!array}"; do 29 | element_s=$(echo $element | tr -d '\r') 30 | seeking_s=$(echo $seeking | tr -d '\r') 31 | if [[ $element_s == $seeking_s ]]; then 32 | in=0 33 | break 34 | fi 35 | done 36 | return $in 37 | } 38 | 39 | if ! array_contains topics ${args[0]}; then 40 | echo -e "Topic ${args[0]} not available. Topics on server are:\n" 41 | printf "%s\n" "${topics[@]}" 42 | exit 1 43 | fi 44 | 45 | # consumer command to run 46 | KACC_CMD="kafka-avro-console-consumer --bootstrap-server kafka-1:9092,kafka-2:9092,kafka-3:9092 --property schema.registry.url=http://schema-registry-1:8081 --property print.key=true --topic ${args[0]} ${args[@]:1}" 47 | DOCKER_CMD="docker-compose exec schema-registry-1" 48 | 49 | # run consumer 50 | echo $DOCKER_CMD $KACC_CMD 51 | exec $DOCKER_CMD $KACC_CMD 52 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/postgres-backup/scripts/pg_backup.config: -------------------------------------------------------------------------------- 1 | ############################## 2 | ## POSTGRESQL BACKUP CONFIG ## 3 | ############################## 4 | # Source - https://wiki.postgresql.org/wiki/Automated_Backup_on_Linux 5 | 6 | # Optional system user to run backups as. If the user the script is running as doesn't match this 7 | # the script terminates. Leave blank to skip check. 8 | BACKUP_USER= 9 | 10 | # Optional hostname to adhere to pg_hba policies. Will default to "localhost" if none specified. 11 | HOSTNAME= 12 | 13 | # Optional username to connect to database as. Will default to "postgres" if none specified. 14 | USERNAME= 15 | 16 | # This dir will be created if it doesn't exist. This must be writable by the user the script is 17 | # running as. 18 | BACKUP_DIR=/backups/database/postgresql/ 19 | 20 | # List of strings to match against in database name, separated by space or comma, for which we only 21 | # wish to keep a backup of the schema, not the data. Any database names which contain any of these 22 | # values will be considered candidates. (e.g. "system_log" will match "dev_system_log_2010-01") 23 | SCHEMA_ONLY_LIST="" 24 | 25 | # Will produce a custom-format backup if set to "yes" 26 | ENABLE_CUSTOM_BACKUPS=yes 27 | 28 | # Will produce a gzipped plain-format backup if set to "yes" 29 | ENABLE_PLAIN_BACKUPS=yes 30 | 31 | # Will produce gzipped sql file containing the cluster globals, like users and passwords, if set to "yes" 32 | ENABLE_GLOBALS_BACKUPS=yes 33 | 34 | 35 | #### SETTINGS FOR ROTATED BACKUPS #### 36 | 37 | # Which day to take the weekly backup from (1-7 = Monday-Sunday) 38 | DAY_OF_WEEK_TO_KEEP=5 39 | 40 | # Number of days to keep daily backups 41 | DAYS_TO_KEEP=7 42 | 43 | # How many weeks to keep weekly backups 44 | WEEKS_TO_KEEP=5 45 | 46 | ###################################### -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/bin/postgres-upgrade: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# = 0 ]; then 4 | echo "Usage: $0 VERSION" 5 | exit 1 6 | fi 7 | 8 | NEW_VERSION=$1 9 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 10 | 11 | . ./.env 12 | . lib/util.sh 13 | 14 | echo "Are you sure you want to to upgrade POSTGRESQL from version ${POSTGRES_VERSION} to ${NEW_VERSION}?" 15 | 16 | select yn in "Yes" "No"; do 17 | case $yn in 18 | Yes ) break;; 19 | No ) exit;; 20 | esac 21 | done 22 | 23 | # Ensure that the base image exists 24 | docker pull postgres:${NEW_VERSION} 25 | 26 | POSTGRES_NEW_DIR="${MP_POSTGRES_DIR}/data-${NEW_VERSION}" 27 | 28 | if [ -e "${POSTGRES_NEW_DIR}" ]; then 29 | echo "Please remove old temporary directory $POSTGRES_NEW_DIR before proceeding" 30 | exit 1 31 | fi 32 | 33 | echo "==> Starting new postgres database with temporary directory ${POSTGRES_NEW_DIR}" 34 | POSTGRES_NEW=$(docker run -d -v "${POSTGRES_NEW_DIR}/:/var/lib/postgresql/data" --env-file ./.env postgres:${NEW_VERSION}) 35 | sleep 5 36 | 37 | echo "==> Migrating ManagementPortal database to ${NEW_VERSION}" 38 | docker-compose exec radarbase-postgresql pg_dumpall -U "${POSTGRES_USER}" \ 39 | | docker exec -i ${POSTGRES_NEW} psql -U "${POSTGRES_USER}" 40 | 41 | docker rm -vf "${POSTGRES_NEW}" 42 | 43 | echo "==> Stopping postgres..." 44 | docker-compose stop radarbase-postgresql 45 | docker-compose rm -vf radarbase-postgresql 46 | 47 | echo "==> Moving old data to backup ${MP_POSTGRES_DIR}/data-backup-$(date +%FT%TZ)/" 48 | mv "${MP_POSTGRES_DIR}/data/" "${MP_POSTGRES_DIR}/data-backup-$(date +%FT%TZ)/" 49 | mv "${POSTGRES_NEW_DIR}" "${MP_POSTGRES_DIR}/data/" 50 | 51 | # change postgres version 52 | inline_variable " image: postgres:" "${NEW_VERSION}" docker-compose.yml 53 | ensure_variable "POSTGRES_VERSION=" "${NEW_VERSION}" .env 54 | 55 | echo "==> Starting postgres..." 56 | 57 | docker-compose up -d radarbase-postgresql 58 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/postgres-upgrade: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# = 0 ]; then 4 | echo "Usage: $0 VERSION" 5 | exit 1 6 | fi 7 | 8 | NEW_VERSION=$1 9 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 10 | 11 | . ./.env 12 | . lib/util.sh 13 | 14 | echo "Are you sure you want to to upgrade POSTGRESQL from version ${POSTGRES_VERSION} to ${NEW_VERSION}?" 15 | 16 | select yn in "Yes" "No"; do 17 | case $yn in 18 | Yes ) break;; 19 | No ) exit;; 20 | esac 21 | done 22 | 23 | # Ensure that the base image exists 24 | docker pull postgres:${NEW_VERSION} 25 | 26 | POSTGRES_NEW_DIR="${MP_POSTGRES_DIR}/data-${NEW_VERSION}" 27 | 28 | if [ -e "${POSTGRES_NEW_DIR}" ]; then 29 | echo "Please remove old temporary directory $POSTGRES_NEW_DIR before proceeding" 30 | exit 1 31 | fi 32 | 33 | echo "==> Starting new postgres database with temporary directory ${POSTGRES_NEW_DIR}" 34 | POSTGRES_NEW=$(docker run -d -v "${POSTGRES_NEW_DIR}/:/var/lib/postgresql/data" --env-file ./.env postgres:${NEW_VERSION}) 35 | sleep 5 36 | 37 | echo "==> Migrating ManagementPortal database to ${NEW_VERSION}" 38 | docker-compose exec radarbase-postgresql pg_dumpall -U "${POSTGRES_USER}" \ 39 | | docker exec -i ${POSTGRES_NEW} psql -U "${POSTGRES_USER}" 40 | 41 | docker rm -vf "${POSTGRES_NEW}" 42 | 43 | echo "==> Stopping postgres..." 44 | docker-compose stop radarbase-postgresql 45 | docker-compose rm -vf radarbase-postgresql 46 | 47 | echo "==> Moving old data to backup ${MP_POSTGRES_DIR}/data-backup-$(date +%FT%TZ)/" 48 | mv "${MP_POSTGRES_DIR}/data/" "${MP_POSTGRES_DIR}/data-backup-$(date +%FT%TZ)/" 49 | mv "${POSTGRES_NEW_DIR}" "${MP_POSTGRES_DIR}/data/" 50 | 51 | # change postgres version 52 | inline_variable " image: postgres:" "${NEW_VERSION}" docker-compose.yml 53 | ensure_variable "POSTGRES_VERSION=" "${NEW_VERSION}" .env 54 | 55 | echo "==> Starting postgres..." 56 | 57 | docker-compose up -d radarbase-postgresql 58 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/ci/ci-env.template: -------------------------------------------------------------------------------- 1 | SERVER_NAME=localhost 2 | MANAGEMENTPORTAL_KEY_DNAME=CN=localhost,OU=MyName,O=MyOrg,L=MyCity,S=MyState,C=MyCountryCode 3 | MANAGEMENTPORTAL_FRONTEND_CLIENT_SECRET=test 4 | SELF_SIGNED_CERT=yes 5 | MAINTAINER_EMAIL=me@example.com 6 | RADAR_RAW_TOPIC_LIST= 7 | RADAR_AGG_TOPIC_LIST= 8 | HOTSTORAGE_USERNAME=mongodb-user 9 | HOTSTORAGE_PASSWORD=mongo-pwd 10 | HOTSTORAGE_NAME=mongodb-database 11 | MONGODB_DIR=/home/ci/data/mongodb 12 | MP_POSTGRES_DIR=/home/ci/data/postgres 13 | POSTGRES_USER=postgresdbuser 14 | POSTGRES_PASSWORD=postgresdbpwd 15 | FROM_EMAIL=from-email 16 | MANAGEMENTPORTAL_CATALOGUE_SERVER_ENABLE_AUTO_IMPORT=false 17 | HEALTHCHECK_HIPCHAT_NOTIFY=no 18 | HEALTHCHECK_HIPCHAT_ROOM_ID="" 19 | HEALTHCHECK_HIPCHAT_TOKEN="" 20 | HDFS_BASE_VERSION=3.0.3-alpine 21 | HDFS_DATA_DIR_1=/home/ci/data/hdfs-data-1 22 | HDFS_DATA_DIR_2=/home/ci/data/hdfs-data-2 23 | HDFS_DATA_DIR_3=/home/ci/data/hdfs-data-3 24 | HDFS_NAME_DIR_1=/home/ci/data/hdfs-name-1 25 | HDFS_NAME_DIR_2=/home/ci/data/hdfs-name-2 26 | POSTGRES_VERSION=10.6-alpine 27 | KAFKA_MANAGER_USERNAME=kafkamanager-user 28 | KAFKA_MANAGER_PASSWORD=kafkamanager-pwd 29 | PORTAINER_PASSWORD_HASH=$2y$05$POrvWxSUsnkeGNZ7LzHUceL6rQM3U79lYofaM2NtyQJ8iWJ4ve542 30 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_0=radarbase-managementportal-ec 31 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_1=selfsigned 32 | RESTRUCTURE_OUTPUT_DIR=./output 33 | MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD=notadmin 34 | MANAGEMENTPORTAL_COMMON_PRIVACY_POLICY_URL=http://info.thehyve.nl/radar-cns-privacy-policy 35 | ENABLE_OPTIONAL_SERVICES=true 36 | FITBIT_API_CLIENT_ID=fitbit-client 37 | FITBIT_API_CLIENT_SECRET=fitbit-secret 38 | RADAR_SCHEMAS_VERSION=0.7.3 39 | TOPIC_INIT_TRIES=60 40 | TIMESCALEDB_PASSWORD=password 41 | TIMESCALEDB_DB=radar-data 42 | TIMESCALEDB_DIR=/usr/local/var/lib/docker/timescaledb 43 | GRAFANA_PASSWORD=password 44 | DASHBOARD_TOPIC_LIST=android_phone_battery_level -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/install-dashboard-pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 4 | 5 | . lib/util.sh 6 | 7 | echo "Setting up dashboard pipeline on" 8 | check_command_exists docker 9 | check_command_exists docker-compose 10 | 11 | copy_template_if_absent etc/mongodb-connector/sink-mongo.properties 12 | copy_template_if_absent etc/rest-api/radar.yml 13 | 14 | check_parent_exists MONGODB_DIR ${MONGODB_DIR} 15 | 16 | ensure_env_default HOTSTORAGE_USERNAME hotstorage 17 | ensure_env_password HOTSTORAGE_PASSWORD "Hot storage (MongoDB) password not set in .env." 18 | ensure_env_default HOTSTORAGE_NAME hotstorage 19 | 20 | echo "==> Configuring MongoDB Connector" 21 | # Update sink-mongo.properties 22 | ensure_variable 'mongo.username=' $HOTSTORAGE_USERNAME etc/mongodb-connector/sink-mongo.properties 23 | ensure_variable 'mongo.password=' $HOTSTORAGE_PASSWORD etc/mongodb-connector/sink-mongo.properties 24 | ensure_variable 'mongo.database=' $HOTSTORAGE_NAME etc/mongodb-connector/sink-mongo.properties 25 | 26 | if [ -z "${COMBINED_AGG_TOPIC_LIST}"]; then 27 | COMBINED_AGG_TOPIC_LIST=$(sudo-linux docker run "${KAFKA_INIT_OPTS[@]}" list_aggregated.sh 2>/dev/null | tail -n 1) 28 | if [ -n "${RADAR_AGG_TOPIC_LIST}" ]; then 29 | COMBINED_AGG_TOPIC_LIST="${RADAR_AGG_TOPIC_LIST},${COMBINED_AGG_TOPIC_LIST}" 30 | fi 31 | fi 32 | ensure_variable 'topics=' "${COMBINED_AGG_TOPIC_LIST}" etc/mongodb-connector/sink-mongo.properties 33 | 34 | echo "==> Configuring REST-API" 35 | 36 | # Set MongoDb credential 37 | inline_variable 'username:[[:space:]]' "$HOTSTORAGE_USERNAME" etc/rest-api/radar.yml 38 | inline_variable 'password:[[:space:]]' "$HOTSTORAGE_PASSWORD" etc/rest-api/radar.yml 39 | inline_variable 'database_name:[[:space:]]' "$HOTSTORAGE_NAME" etc/rest-api/radar.yml 40 | 41 | echo "==> Including dashboard-pipeline.conf to nginx" 42 | sed_i '/\#\sinclude\sdashboard\-pipeline\.conf\;*/s/#//g' etc/webserver/nginx.conf 43 | -------------------------------------------------------------------------------- /scripts/util.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin 4 | 5 | # maximum file size in byte to rotate log 6 | minimumsize=10000000 7 | 8 | # current time 9 | timestamp=$(date '+%Y-%m-%d %H:%M:%S'); 10 | 11 | # Write message in the log file 12 | log_info() { 13 | echo "$timestamp - $@" >> $logfile 2>&1 14 | } 15 | 16 | # Remove old lock 17 | checkLock() { 18 | uptime=$( $logfile 36 | fi 37 | } 38 | 39 | # Check connection 40 | isConnected() { 41 | case "$(curl -s --max-time 10 --retry 5 -I $url | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in 42 | [23]) log_info "HTTP connectivity is up" && return 0;; 43 | 5) log_info "The web proxy won't let us through" && return 1;; 44 | *) log_info "The network is down or very slow" && return 1;; 45 | esac 46 | } 47 | 48 | # Force connection 49 | connect() { 50 | log_info "Forcing reconnection" 51 | sudo nmcli conn down $network >> $logfile 2>&1 52 | log_info "Turning wifi NIC off" 53 | sleep 30 54 | sudo nmcli conn up $network >> $logfile 2>&1 55 | log_info "Turning wifi NIC on" 56 | log_info "Double checking ..." 57 | if ! isConnected; then 58 | log_info "Forcing reconnection with a sleep time of 30 sec ..." 59 | sudo nmcli conn down $network >> $logfile 2>&1 60 | log_info "Turning wifi NIC off" 61 | sleep 60 62 | sudo nmcli conn up $network >> $logfile 2>&1 63 | log_info "Turning wifi NIC on" 64 | fi 65 | log_info "Completed" 66 | } 67 | 68 | # Entry point 69 | touch $logfile 70 | log_info "### $timestamp ###" 71 | log_info "Checking lock ..." 72 | checkLock -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/env.template: -------------------------------------------------------------------------------- 1 | SERVER_NAME=localhost 2 | MANAGEMENTPORTAL_KEY_DNAME=CN=localhost,OU=MyName,O=MyOrg,L=MyCity,S=MyState,C=MyCountryCode 3 | MANAGEMENTPORTAL_FRONTEND_CLIENT_SECRET= 4 | ENABLE_HTTPS=yes 5 | SELF_SIGNED_CERT=yes 6 | MAINTAINER_EMAIL=me@example.com 7 | RADAR_RAW_TOPIC_LIST=android_phone_usage_event_aggregated,android_phone_usage_event_output 8 | RADAR_AGG_TOPIC_LIST= 9 | HOTSTORAGE_USERNAME=mongodb-user 10 | HOTSTORAGE_PASSWORD= 11 | HOTSTORAGE_NAME=mongodb-database 12 | MONGODB_DIR=/usr/local/var/lib/docker/mongodb 13 | MP_POSTGRES_DIR=/usr/local/var/lib/docker/postgres 14 | POSTGRES_USER=postgresdb-user 15 | POSTGRES_PASSWORD= 16 | FROM_EMAIL=from-email 17 | MANAGEMENTPORTAL_CATALOGUE_SERVER_ENABLE_AUTO_IMPORT=false 18 | HEALTHCHECK_SLACK_NOTIFY=no 19 | HEALTHCHECK_SLACK_WEBHOOK_URL= 20 | HEALTHCHECK_SLACK_CHANNEL=#radar-ops 21 | HDFS_BASE_VERSION=3.0.3-alpine 22 | HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 23 | HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 24 | HDFS_DATA_DIR_3=/usr/local/var/lib/docker/hdfs-data-3 25 | HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 26 | HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 27 | POSTGRES_VERSION=10.6-alpine 28 | KAFKA_MANAGER_USERNAME=kafkamanager-user 29 | KAFKA_MANAGER_PASSWORD= 30 | PORTAINER_PASSWORD_HASH= 31 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_0=radarbase-managementportal-ec 32 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_1=selfsigned 33 | RESTRUCTURE_OUTPUT_DIR=./output 34 | MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD= 35 | MANAGEMENTPORTAL_COMMON_PRIVACY_POLICY_URL=http://info.thehyve.nl/radar-cns-privacy-policy 36 | ENABLE_OPTIONAL_SERVICES=false 37 | FITBIT_API_CLIENT_ID=fitbit-client 38 | FITBIT_API_CLIENT_SECRET=fitbit-secret 39 | NGINX_PROXIES= 40 | RADAR_SCHEMAS_VERSION=0.7.3 41 | TOPIC_INIT_TRIES=60 42 | TIMESCALEDB_PASSWORD=password 43 | TIMESCALEDB_DB=radar-data 44 | TIMESCALEDB_DIR=/usr/local/var/lib/docker/timescaledb 45 | GRAFANA_PASSWORD=password 46 | DASHBOARD_TOPIC_LIST=android_phone_battery_level -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/radarbase-kafka-streams.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.5' 3 | 4 | services: 5 | 6 | #---------------------------------------------------------------------------# 7 | # RADAR backend streams # 8 | #---------------------------------------------------------------------------# 9 | radar-backend-stream: 10 | image: radarbase/radar-backend:0.4.0 11 | command: 12 | - stream 13 | networks: 14 | - zookeeper 15 | - kafka 16 | # for getting the play store category 17 | - default 18 | depends_on: 19 | - zookeeper-1 20 | - kafka-1 21 | - kafka-2 22 | - kafka-3 23 | - schema-registry-1 24 | - kafka-init 25 | volumes: 26 | - ./etc/radar-backend/radar.yml:/etc/radar.yml 27 | restart: always 28 | environment: 29 | KAFKA_REST_PROXY: http://rest-proxy-1:8082 30 | KAFKA_SCHEMA_REGISTRY: http://schema-registry-1:8081 31 | KAFKA_BROKERS: 3 32 | 33 | #---------------------------------------------------------------------------# 34 | # RADAR backend monitor # 35 | #---------------------------------------------------------------------------# 36 | radar-backend-monitor: 37 | image: radarbase/radar-backend:0.4.0 38 | command: monitor 39 | networks: 40 | - zookeeper 41 | - kafka 42 | - mail 43 | depends_on: 44 | - zookeeper-1 45 | - kafka-1 46 | - kafka-2 47 | - kafka-3 48 | - schema-registry-1 49 | - kafka-init 50 | - smtp 51 | volumes: 52 | - ./etc/radar-backend/radar.yml:/etc/radar.yml 53 | - radar-backend-monitor-disconnect-data:/var/lib/radar/data 54 | restart: always 55 | environment: 56 | KAFKA_REST_PROXY: http://rest-proxy-1:8082 57 | KAFKA_SCHEMA_REGISTRY: http://schema-registry-1:8081 58 | KAFKA_BROKERS: 3 59 | # For backwards compatibility 60 | TOPIC_LIST: "application_record_counts" -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/install-systemd-wrappers.sh: -------------------------------------------------------------------------------- 1 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 2 | 3 | echo $(pwd) 4 | . lib/util.sh 5 | . .env 6 | 7 | if [ "$(id -un)" == "root" ] || id -Gn | grep -qe '\'; then 8 | BASE=/etc/systemd/system 9 | SYSTEMCTL_OPTS=() 10 | else 11 | BASE=$HOME/.config/systemd/user 12 | mkdir -p $BASE 13 | SYSTEMCTL_OPTS=(--user) 14 | export XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR:-/run/user/$UID} 15 | fi 16 | 17 | echo "==> Copying templates" 18 | copy_template $BASE/radar-docker.service lib/systemd/radar-docker.service.template 19 | copy_template $BASE/radar-check-health.service lib/systemd/radar-check-health.service.template 20 | copy_template $BASE/radar-check-health.timer lib/systemd/radar-check-health.timer.template 21 | copy_template $BASE/radar-renew-certificate.service lib/systemd/radar-renew-certificate.service.template 22 | copy_template $BASE/radar-renew-certificate.timer lib/systemd/radar-renew-certificate.timer.template 23 | 24 | echo "==> Inlining variables" 25 | inline_variable 'WorkingDirectory=' "$PWD" $BASE/radar-docker.service 26 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker foreground" $BASE/radar-docker.service 27 | 28 | inline_variable 'WorkingDirectory=' "$PWD" $BASE/radar-check-health.service 29 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker health" $BASE/radar-check-health.service 30 | 31 | inline_variable 'WorkingDirectory=' "$DIR" $BASE/radar-renew-certificate.service 32 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker cert-renew" $BASE/radar-renew-certificate.service 33 | 34 | echo "==> Reloading systemd" 35 | systemctl "${SYSTEMCTL_OPTS[@]}" daemon-reload 36 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-docker 37 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-check-health.timer 38 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-renew-certificate.timer 39 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-docker 40 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-check-health.timer 41 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-renew-certificate.timer 42 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/install-systemd-wrappers.sh: -------------------------------------------------------------------------------- 1 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 2 | 3 | echo $(pwd) 4 | . lib/util.sh 5 | . .env 6 | 7 | if [ "$(id -un)" == "root" ] || id -Gn | grep -qe '\'; then 8 | BASE=/etc/systemd/system 9 | SYSTEMCTL_OPTS=() 10 | else 11 | BASE=$HOME/.config/systemd/user 12 | mkdir -p $BASE 13 | SYSTEMCTL_OPTS=(--user) 14 | export XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR:-/run/user/$UID} 15 | fi 16 | 17 | echo "==> Copying templates" 18 | copy_template $BASE/radar-docker.service lib/systemd/radar-docker.service.template 19 | copy_template $BASE/radar-check-health.service lib/systemd/radar-check-health.service.template 20 | copy_template $BASE/radar-check-health.timer lib/systemd/radar-check-health.timer.template 21 | copy_template $BASE/radar-renew-certificate.service lib/systemd/radar-renew-certificate.service.template 22 | copy_template $BASE/radar-renew-certificate.timer lib/systemd/radar-renew-certificate.timer.template 23 | 24 | echo "==> Inlining variables" 25 | inline_variable 'WorkingDirectory=' "$PWD" $BASE/radar-docker.service 26 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker foreground" $BASE/radar-docker.service 27 | 28 | inline_variable 'WorkingDirectory=' "$PWD" $BASE/radar-check-health.service 29 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker health" $BASE/radar-check-health.service 30 | 31 | inline_variable 'WorkingDirectory=' "$DIR" $BASE/radar-renew-certificate.service 32 | inline_variable 'ExecStart=' "$PWD/bin/radar-docker cert-renew" $BASE/radar-renew-certificate.service 33 | 34 | echo "==> Reloading systemd" 35 | systemctl "${SYSTEMCTL_OPTS[@]}" daemon-reload 36 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-docker 37 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-check-health.timer 38 | systemctl "${SYSTEMCTL_OPTS[@]}" enable radar-renew-certificate.timer 39 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-docker 40 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-check-health.timer 41 | systemctl "${SYSTEMCTL_OPTS[@]}" start radar-renew-certificate.timer 42 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/backup.conf: -------------------------------------------------------------------------------- 1 | 2 | . ../.env 3 | 4 | # Hash Backup version to use. The version will not be compatible after 1 year so please make sure to update it. 5 | HB_VERSION=2115 6 | 7 | # Comma separated list of absolute paths to backup. Default is set to postgres db path from .env file 8 | INPUTS=${MP_POSTGRES_DIR} 9 | 10 | # Absolute path on which to create backup from the inputs. It is recommended to use a separate disk in production 11 | OUTPUT=/hashbackup 12 | 13 | # Deduplicate memory to use while backing up. Higher value means better deduplication. Default is 1gb 14 | DEDUPLICATE_MEMORY=-D1g 15 | 16 | # Retention options. Default is the last 30 days of backups + one every month for the last 12 months 17 | RETAIN=-s30d12m 18 | 19 | # Retention options for the files that are deleted. Default is the files for the last 3 months. 20 | # Any files that are deleted will be removed from backups after 3 months. 21 | DELETED_RETAIN=-x3m 22 | 23 | # Common key to use for encrypting all the backups. Please make sure to keep this safe and backed up. 24 | KEY= 25 | 26 | # Common passphrase to use for all the backups. Please make sure to keep this safe and backed up. 27 | PASSPHRASE= 28 | 29 | # Local backup size. It is recommended to set cache-size-limit as high as reasonable. Leave at default if not using a remote backup. 30 | # The default is -1, meaning to keep a copy of all backup data in the local backup directory. 31 | LOCAL_SIZE= 32 | 33 | # Arc size limit. At least 2 x arc-size-limit bytes of free disk space will be required in the local backup directory. 34 | # The default is 100, A practical limit is around 4GB (for huge files), because many storage services do not allow file sizes over 5GB. 35 | ARC_SIZE= 36 | 37 | # Set to true if want to set up a systemd unit and timer to run the backup scripts. Default time is set to run daily at 3am but you 38 | # can configure it in ./systemd/radar-hashbackup.timer 39 | SET_UP_TIMER=false 40 | 41 | # The remote path in which to create backups 42 | ROOT_REMOTE_PATH=/hash-backups 43 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/ci/ci-env.template: -------------------------------------------------------------------------------- 1 | SERVER_NAME=localhost 2 | MANAGEMENTPORTAL_KEY_DNAME=CN=localhost,OU=MyName,O=MyOrg,L=MyCity,S=MyState,C=MyCountryCode 3 | MANAGEMENTPORTAL_FRONTEND_CLIENT_SECRET=test 4 | ENABLE_HTTPS=yes 5 | SELF_SIGNED_CERT=yes 6 | MAINTAINER_EMAIL=me@example.com 7 | RADAR_RAW_TOPIC_LIST=android_phone_usage_event_aggregated,android_phone_usage_event_output 8 | RADAR_AGG_TOPIC_LIST= 9 | HOTSTORAGE_USERNAME=mongodb-user 10 | HOTSTORAGE_PASSWORD=mongo-db 11 | HOTSTORAGE_NAME=mongodb-database 12 | MONGODB_DIR=/home/ci/data/mongodb 13 | MP_POSTGRES_DIR=/home/ci/data/postgres 14 | POSTGRES_USER=postgresdbuser 15 | POSTGRES_PASSWORD=postgresdbpwd 16 | FROM_EMAIL=from-email 17 | MANAGEMENTPORTAL_CATALOGUE_SERVER_ENABLE_AUTO_IMPORT=false 18 | HEALTHCHECK_SLACK_NOTIFY=no 19 | HEALTHCHECK_SLACK_WEBHOOK_URL= 20 | HEALTHCHECK_SLACK_CHANNEL=#radar-ops 21 | POSTGRES_VERSION=10.6-alpine 22 | KAFKA_MANAGER_USERNAME=kafkamanager-user 23 | KAFKA_MANAGER_PASSWORD=kafkamanagerpwd 24 | PORTAINER_PASSWORD_HASH=$2y$05$POrvWxSUsnkeGNZ7LzHUceL6rQM3U79lYofaM2NtyQJ8iWJ4ve542 25 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_0=radarbase-managementportal-ec 26 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_1=selfsigned 27 | RESTRUCTURE_OUTPUT_DIR=./output 28 | MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD=notadmin 29 | MANAGEMENTPORTAL_COMMON_PRIVACY_POLICY_URL=http://info.thehyve.nl/radar-cns-privacy-policy 30 | ENABLE_OPTIONAL_SERVICES=false 31 | FITBIT_API_CLIENT_ID=fitbit-client 32 | FITBIT_API_CLIENT_SECRET=fitbit-secret 33 | NGINX_PROXIES= 34 | RADAR_SCHEMAS_VERSION=0.7.3 35 | TOPIC_INIT_TRIES=60 36 | MINIO_ACCESS_KEY=radarbase-minio 37 | MINIO_SECRET_KEY=minio123 38 | MINIO_ENDPOINT=http://minio1:9000 39 | MINIO_INTERMEDIATE_BUCKET_NAME=radarbase-cold-storage 40 | MINIO_OUTPUT_BUCKET_NAME=radarbase-output 41 | MINIO1_DATA1=/home/ci/data/data11 42 | MINIO2_DATA1=/home/ci/data/data21 43 | MINIO3_DATA1=/home/ci/data/data31 44 | MINIO4_DATA1=/home/ci/data/data41 45 | UPLOAD_POSTGRES_DIR=/home/ci/data/upload 46 | ENABLE_DASHBOARD_PIPELINE=false 47 | ENABLE_KAFKA_STREAMS=false 48 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/env.template: -------------------------------------------------------------------------------- 1 | SERVER_NAME=localhost 2 | MANAGEMENTPORTAL_KEY_DNAME=CN=localhost,OU=MyName,O=MyOrg,L=MyCity,S=MyState,C=MyCountryCode 3 | MANAGEMENTPORTAL_FRONTEND_CLIENT_SECRET= 4 | ENABLE_HTTPS=yes 5 | SELF_SIGNED_CERT=yes 6 | MAINTAINER_EMAIL=me@example.com 7 | RADAR_RAW_TOPIC_LIST=android_phone_usage_event_aggregated,android_phone_usage_event_output 8 | RADAR_AGG_TOPIC_LIST= 9 | HOTSTORAGE_USERNAME=mongodb-user 10 | HOTSTORAGE_PASSWORD= 11 | HOTSTORAGE_NAME=mongodb-database 12 | MONGODB_DIR=/usr/local/var/lib/docker/mongodb 13 | MP_POSTGRES_DIR=/usr/local/var/lib/docker/postgres 14 | POSTGRES_USER=postgresdb-user 15 | POSTGRES_PASSWORD= 16 | FROM_EMAIL=from-email 17 | MANAGEMENTPORTAL_CATALOGUE_SERVER_ENABLE_AUTO_IMPORT=false 18 | HEALTHCHECK_SLACK_NOTIFY=no 19 | HEALTHCHECK_SLACK_WEBHOOK_URL= 20 | HEALTHCHECK_SLACK_CHANNEL=#radar-ops 21 | POSTGRES_VERSION=10.6-alpine 22 | KAFKA_MANAGER_USERNAME=kafkamanager-user 23 | KAFKA_MANAGER_PASSWORD= 24 | PORTAINER_PASSWORD_HASH= 25 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_0=radarbase-managementportal-ec 26 | MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_1=selfsigned 27 | RESTRUCTURE_OUTPUT_DIR=./output 28 | MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD= 29 | MANAGEMENTPORTAL_COMMON_PRIVACY_POLICY_URL=http://info.thehyve.nl/radar-cns-privacy-policy 30 | ENABLE_OPTIONAL_SERVICES=false 31 | FITBIT_API_CLIENT_ID=fitbit-client 32 | FITBIT_API_CLIENT_SECRET=fitbit-secret 33 | NGINX_PROXIES= 34 | RADAR_SCHEMAS_VERSION=0.7.3 35 | TOPIC_INIT_TRIES=60 36 | MINIO_ACCESS_KEY=radarbase-minio 37 | MINIO_SECRET_KEY= 38 | MINIO_ENDPOINT=http://minio1:9000 39 | MINIO_INTERMEDIATE_BUCKET_NAME=radarbase-intermediate-storage 40 | MINIO_OUTPUT_BUCKET_NAME=radarbase-output-storage 41 | MINIO1_DATA1=/usr/local/var/lib/docker/minio/data11 42 | MINIO2_DATA1=/usr/local/var/lib/docker/minio/data21 43 | MINIO3_DATA1=/usr/local/var/lib/docker/minio/data31 44 | MINIO4_DATA1=/usr/local/var/lib/docker/minio/data41 45 | UPLOAD_POSTGRES_DIR=/usr/local/var/lib/docker/upload 46 | ENABLE_DASHBOARD_PIPELINE=false 47 | ENABLE_KAFKA_STREAMS=false -------------------------------------------------------------------------------- /images/radar-hotstorage-mongodb/db_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RET=1 4 | while [[ RET -ne 0 ]]; do 5 | echo "=> Waiting for confirmation of MongoDB service startup" 6 | sleep 5 7 | mongo admin --eval "help" >/dev/null 2>&1 8 | RET=$? 9 | done 10 | 11 | if [ -f /data/db/.radar_hotstorage_set ]; then 12 | echo "**********************************************" 13 | echo "** RADAR-base Hotstorage is up and running **" 14 | echo "**********************************************" 15 | exit 0 16 | fi 17 | 18 | if [ -z "$RADAR_USER" ]; then 19 | echo "$RADAR_USER is not defined" 20 | exit 2 21 | fi 22 | 23 | if [ -z "$RADAR_PWD" ]; then 24 | echo "$RADAR_PWD is not defined" 25 | exit 2 26 | fi 27 | 28 | if [ -z "$RADAR_DB" ]; then 29 | echo "$RADAR_DB is not defined" 30 | exit 2 31 | fi 32 | 33 | echo "=> MongoDB is ready" 34 | 35 | echo "=> Creating DB and User for RADAR-base Hot Storage" 36 | 37 | mongo admin --eval 'db.createUser( { user: "'${RADAR_USER}'", pwd: "'${RADAR_PWD}'", roles: [ { role: "root", db: "admin" } ] } )' 38 | mongo admin -u $RADAR_USER -p $RADAR_PWD < /etc/rsyslog.d/00-radar.conf 28 | if ( \$programname == "radar-docker" ) then { 29 | action(type="omfile" file="$LOG_DIR/radar-docker.log") 30 | stop 31 | } 32 | if ( \$programname == "radar-output" ) then { 33 | action(type="omfile" file="$LOG_DIR/radar-output.log") 34 | stop 35 | } 36 | if ( \$programname == "radar-renew-certificate" ) then { 37 | action(type="omfile" file="$LOG_DIR/radar-renew-certificate.log") 38 | stop 39 | } 40 | if ( \$programname == "radar-check-health" ) then { 41 | action(type="omfile" file="$LOG_DIR/radar-check-health.log") 42 | stop 43 | } 44 | if ( \$programname startswith "docker" ) then { 45 | action(type="omfile" file="$LOG_DIR/docker.log") 46 | stop 47 | } 48 | EOF 49 | 50 | LOG_SIZE=${LOG_SIZE:-1000000000} 51 | 52 | echo "Writing log rotation directives to /etc/logrotate.d/radar (with maximum size $LOG_SIZE)" 53 | 54 | cat < /etc/logrotate.d/radar 55 | $LOG_DIR/radar-*.log { 56 | daily 57 | maxsize $LOG_SIZE 58 | rotate 10 59 | delaycompress 60 | compress 61 | notifempty 62 | missingok 63 | postrotate 64 | invoke-rc.d rsyslog rotate > /dev/null 65 | endscript 66 | } 67 | EOF 68 | 69 | echo "Running log rotation every hour" 70 | 71 | if [ ! -e /etc/cron.hourly/logrotate ]; then 72 | ln -s /etc/cron.daily/logrotate /etc/cron.hourly/logrotate 73 | systemctl restart cron 74 | fi 75 | 76 | echo "Restarting rsyslog" 77 | 78 | systemctl restart rsyslog 79 | 80 | echo "Done." 81 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/radar-log: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | trap catch_errors ERR; 4 | 5 | function catch_errors() { 6 | exit_code=$? 7 | echo "### FAILURE ###"; 8 | exit $exit_code; 9 | } 10 | 11 | if [ $# != 1 ]; then 12 | echo "Set logging output directory" 13 | echo "Usage: $0 DIRECTORY" 14 | echo " Env variables:" 15 | echo " LOG_SIZE maximum number of bytes for a log (default 1000000000, is one GB)" 16 | exit 1 17 | fi 18 | 19 | LOG_DIR="$1" 20 | 21 | mkdir -p "$LOG_DIR" 22 | chown root:syslog "$LOG_DIR" 23 | chmod g+w "$LOG_DIR" 24 | 25 | echo "Writing log directives to /etc/rsyslog.d/00-radar.conf" 26 | 27 | cat < /etc/rsyslog.d/00-radar.conf 28 | if ( \$programname == "radar-docker" ) then { 29 | action(type="omfile" file="$LOG_DIR/radar-docker.log") 30 | stop 31 | } 32 | if ( \$programname == "radar-output" ) then { 33 | action(type="omfile" file="$LOG_DIR/radar-output.log") 34 | stop 35 | } 36 | if ( \$programname == "radar-renew-certificate" ) then { 37 | action(type="omfile" file="$LOG_DIR/radar-renew-certificate.log") 38 | stop 39 | } 40 | if ( \$programname == "radar-check-health" ) then { 41 | action(type="omfile" file="$LOG_DIR/radar-check-health.log") 42 | stop 43 | } 44 | if ( \$programname startswith "docker" ) then { 45 | action(type="omfile" file="$LOG_DIR/docker.log") 46 | stop 47 | } 48 | EOF 49 | 50 | LOG_SIZE=${LOG_SIZE:-1000000000} 51 | 52 | echo "Writing log rotation directives to /etc/logrotate.d/radar (with maximum size $LOG_SIZE)" 53 | 54 | cat < /etc/logrotate.d/radar 55 | $LOG_DIR/radar-*.log { 56 | daily 57 | maxsize $LOG_SIZE 58 | rotate 10 59 | delaycompress 60 | compress 61 | notifempty 62 | missingok 63 | postrotate 64 | invoke-rc.d rsyslog rotate > /dev/null 65 | endscript 66 | } 67 | EOF 68 | 69 | echo "Running log rotation every hour" 70 | 71 | if [ ! -e /etc/cron.hourly/logrotate ]; then 72 | ln -s /etc/cron.daily/logrotate /etc/cron.hourly/logrotate 73 | systemctl restart cron 74 | fi 75 | 76 | echo "Restarting rsyslog" 77 | 78 | systemctl restart rsyslog 79 | 80 | echo "Done." 81 | -------------------------------------------------------------------------------- /scripts/hdfs-data-retention/hdfs_get_relevant_files.pig: -------------------------------------------------------------------------------- 1 | -- Load all of the fields from the file 2 | DATA = LOAD '$inputFile' USING PigStorage(',') AS (path:chararray, 3 | replication:int, 4 | modTime:chararray, 5 | accessTime:chararray, 6 | blockSize:long, 7 | numBlocks:int, 8 | fileSize:long, 9 | NamespaceQuota:int, 10 | DiskspaceQuota:int, 11 | perms:chararray, 12 | username:chararray, 13 | groupname:chararray); 14 | 15 | 16 | -- Grab just the path, size and modDate(in milliseconds) 17 | RELEVANT_FIELDS = FOREACH DATA GENERATE path, fileSize, ToMilliSeconds(ToDate(modTime, 'yyyy-MM-dd HH:mm', '+00:00')) as modTime:long; 18 | RELEVANT_FILES = FILTER RELEVANT_FIELDS BY ((modTime < ToMilliSeconds(ToDate('$time', 'yyyy-MM-dd HH:mm', '+00:00'))) AND (path matches '^((?!tmp).)*.avro')); 19 | -- DUMP RELEVANT_FILES; 20 | -- Load topics from the provided file 21 | TOPICS = LOAD '$topics' USING PigStorage() AS (topic:chararray); 22 | 23 | PATH_SIZE = FOREACH RELEVANT_FILES GENERATE path, fileSize; 24 | PATH_SIZE_TOPIC = CROSS PATH_SIZE, TOPICS; 25 | -- DUMP PATH_SIZE_TOPIC; 26 | PATH_MATCHES_TOPIC = FILTER PATH_SIZE_TOPIC BY (path matches SPRINTF('.*%s.*', topic)); 27 | 28 | -- Calculate total file size 29 | SUM_FILE_SIZES = FOREACH (GROUP PATH_MATCHES_TOPIC ALL) GENERATE CONCAT('SUM OF FILES SIZES TO BE DELETED IN MB = ', (chararray)(SUM(PATH_MATCHES_TOPIC.fileSize) / 1024 / 1024)); 30 | DUMP SUM_FILE_SIZES; 31 | FINAL_PATHS = FOREACH PATH_MATCHES_TOPIC GENERATE path; 32 | -- Save results 33 | -- DUMP FINAL_PATH; 34 | STORE FINAL_PATHS INTO '$outputFile'; 35 | -------------------------------------------------------------------------------- /wip/radar-cp-sasl-stack/secrets/create-certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o nounset \ 4 | -o errexit \ 5 | -o verbose \ 6 | -o xtrace 7 | 8 | # Generate CA key 9 | openssl req -new -x509 -keyout snakeoil-ca-1.key -out snakeoil-ca-1.crt -days 365 -subj '/CN=ca1.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' -passin pass:confluent -passout pass:confluent 10 | # openssl req -new -x509 -keyout snakeoil-ca-2.key -out snakeoil-ca-2.crt -days 365 -subj '/CN=ca2.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' -passin pass:confluent -passout pass:confluent 11 | 12 | # Kafkacat 13 | openssl genrsa -des3 -passout "pass:confluent" -out kafkacat.client.key 1024 14 | openssl req -passin "pass:confluent" -passout "pass:confluent" -key kafkacat.client.key -new -out kafkacat.client.req -subj '/CN=kafkacat.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' 15 | openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in kafkacat.client.req -out kafkacat-ca1-signed.pem -days 9999 -CAcreateserial -passin "pass:confluent" 16 | 17 | 18 | 19 | for i in broker1 broker2 broker3 producer consumer 20 | do 21 | echo $i 22 | # Create keystores 23 | keytool -genkey -noprompt \ 24 | -alias $i \ 25 | -dname "CN=$i.test.confluent.io, OU=TEST, O=CONFLUENT, L=PaloAlto, S=Ca, C=US" \ 26 | -keystore kafka.$i.keystore.jks \ 27 | -keyalg RSA \ 28 | -storepass confluent \ 29 | -keypass confluent 30 | 31 | # Create CSR, sign the key and import back into keystore 32 | keytool -keystore kafka.$i.keystore.jks -alias $i -certreq -file $i.csr -storepass confluent -keypass confluent 33 | 34 | openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in $i.csr -out $i-ca1-signed.crt -days 9999 -CAcreateserial -passin pass:confluent 35 | 36 | keytool -keystore kafka.$i.keystore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass confluent -keypass confluent 37 | 38 | keytool -keystore kafka.$i.keystore.jks -alias $i -import -file $i-ca1-signed.crt -storepass confluent -keypass confluent 39 | 40 | # Create truststore and import the CA cert. 41 | keytool -keystore kafka.$i.truststore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass confluent -keypass confluent 42 | 43 | echo "confluent" > ${i}_sslkey_creds 44 | echo "confluent" > ${i}_keystore_creds 45 | echo "confluent" > ${i}_truststore_creds 46 | done 47 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/webserver/cors.conf: -------------------------------------------------------------------------------- 1 | # From: https://gist.github.com/Stanback/7145487 2 | # 3 | # CORS header support 4 | # 5 | # One way to use this is by placing it into a file called "cors_support" 6 | # under your Nginx configuration directory and placing the following 7 | # statement inside your **location** block(s): 8 | # 9 | # include cors_support; 10 | # 11 | # As of Nginx 1.7.5, add_header supports an "always" parameter which 12 | # allows CORS to work if the backend returns 4xx or 5xx status code. 13 | # 14 | # For more information on CORS, please see: http://enable-cors.org/ 15 | # Forked from this Gist: https://gist.github.com/michiel/1064640 16 | # 17 | 18 | # do not send duplicate origin headers if the underlying 19 | # service is CORS-compliant 20 | proxy_hide_header 'Access-Control-Allow-Origin'; 21 | 22 | set $cors_method ''; 23 | 24 | if ($request_method = 'GET') { 25 | set $cors_method 'noopt'; 26 | } 27 | if ($request_method = 'POST') { 28 | set $cors_method 'noopt'; 29 | } 30 | if ($request_method = 'HEAD') { 31 | set $cors_method 'noopt'; 32 | } 33 | if ($request_method = 'PUT') { 34 | set $cors_method 'noopt'; 35 | } 36 | if ($request_method = 'DELETE') { 37 | set $cors_method 'noopt'; 38 | } 39 | if ($request_method = 'OPTIONS') { 40 | # Tell client that this pre-flight info is valid for 20 days 41 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 42 | add_header 'Access-Control-Allow-Credentials' 'true' always; 43 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; 44 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 45 | add_header 'Access-Control-Max-Age' 1728000; 46 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 47 | add_header 'Content-Length' 0; 48 | return 204; 49 | } 50 | if ($cors_method = 'noopt') { 51 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 52 | add_header 'Access-Control-Allow-Credentials' 'true' always; 53 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; 54 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 55 | } 56 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/webserver/cors.conf: -------------------------------------------------------------------------------- 1 | # From: https://gist.github.com/Stanback/7145487 2 | # 3 | # CORS header support 4 | # 5 | # One way to use this is by placing it into a file called "cors_support" 6 | # under your Nginx configuration directory and placing the following 7 | # statement inside your **location** block(s): 8 | # 9 | # include cors_support; 10 | # 11 | # As of Nginx 1.7.5, add_header supports an "always" parameter which 12 | # allows CORS to work if the backend returns 4xx or 5xx status code. 13 | # 14 | # For more information on CORS, please see: http://enable-cors.org/ 15 | # Forked from this Gist: https://gist.github.com/michiel/1064640 16 | # 17 | 18 | # do not send duplicate origin headers if the underlying 19 | # service is CORS-compliant 20 | proxy_hide_header 'Access-Control-Allow-Origin'; 21 | 22 | set $cors_method ''; 23 | 24 | if ($request_method = 'GET') { 25 | set $cors_method 'noopt'; 26 | } 27 | if ($request_method = 'POST') { 28 | set $cors_method 'noopt'; 29 | } 30 | if ($request_method = 'HEAD') { 31 | set $cors_method 'noopt'; 32 | } 33 | if ($request_method = 'PUT') { 34 | set $cors_method 'noopt'; 35 | } 36 | if ($request_method = 'DELETE') { 37 | set $cors_method 'noopt'; 38 | } 39 | if ($request_method = 'OPTIONS') { 40 | # Tell client that this pre-flight info is valid for 20 days 41 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 42 | add_header 'Access-Control-Allow-Credentials' 'true' always; 43 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; 44 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 45 | add_header 'Access-Control-Max-Age' 1728000; 46 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 47 | add_header 'Content-Length' 0; 48 | return 204; 49 | } 50 | if ($cors_method = 'noopt') { 51 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 52 | add_header 'Access-Control-Allow-Credentials' 'true' always; 53 | add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; 54 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 55 | } 56 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/gateway/gateway.yml: -------------------------------------------------------------------------------- 1 | # Resource config class 2 | #resourceConfig: org.radarbase.gateway.inject.ManagementPortalEnhancerFactory 3 | 4 | server: 5 | # URI to serve data to 6 | baseUri: http://0.0.0.0:8090/radar-gateway/ 7 | # Maximum number of simultaneous requests to Kafka. 8 | #maxRequests: 200 9 | # Maximum request content length, also when decompressed. 10 | # This protects against memory overflows. 11 | #maxRequestSize: 25165824 12 | # Whether JMX should be enabled. Disable if not needed, for higher performance. 13 | #isJmxEnabled: true 14 | 15 | kafka: 16 | # Number of Kafka brokers to keep in a pool for reuse in multiple requests. 17 | # poolSize: 20 18 | # Kafka producer settings. Read from https://kafka.apache.org/documentation/#producerconfigs. 19 | producer: 20 | bootstrap.servers: kafka-1:9092,kafka-2:9092,kafka-3:9092 21 | # Kafka Admin Client settings. Read from https://kafka.apache.org/documentation/#adminclientconfigs. 22 | #admin: 23 | # bootstrap server property is copied from the producer settings if none is provided. 24 | #bootstrap.servers: kafka-1:9092 25 | # Kafka serialization settings, used in KafkaAvroSerializer. Read from [io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig]. 26 | serialization: 27 | schema.registry.url: http://schema-registry-1:8081 28 | 29 | # Authorization settings 30 | auth: 31 | # ManagementPortal URL. If available, this is used to read the public key from 32 | # ManagementPortal directly. This is the recommended method of getting public key. 33 | managementPortalUrl: http://managementportal-app:8080/managementportal 34 | # Whether to check that the user that submits data has the reported source ID registered 35 | # in the ManagementPortal. 36 | #checkSourceId: true 37 | # OAuth 2.0 resource name. 38 | #resourceName: res_gateway 39 | # OAuth 2.0 token issuer. If null, this is not checked. 40 | #issuer: null 41 | # Key store for checking the digital signature of OAuth 2.0 JWTs. 42 | #keyStore: 43 | # Path to the p12 key store. 44 | #path: null 45 | # Alias in the key store to use 46 | #alias: null 47 | # Password of the key store 48 | #password: null 49 | # Plain-text PEM public keys 50 | #publicKeys: 51 | # ECDSA public keys 52 | #ecdsa: [] 53 | # RSA public keys 54 | #rsa: [] -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/initialize-hb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$( dirname "${BASH_SOURCE[0]}" )" 3 | . "./backup.conf" 4 | . "../lib/util.sh" > /dev/null 5 | 6 | IFS=',' read -r -a inputs <<< "$INPUTS" 7 | 8 | # install hash backup if it does not exist 9 | if hash hb 2>/dev/null 10 | then 11 | echo "Hash backup detected. Proceeding..." 12 | else 13 | echo "Installing Hash Backup..." 14 | mkdir tmp 15 | wget -q -P tmp http://www.hashbackup.com/download/hb-"${HB_VERSION}"-linux-64bit.tar.gz 16 | tar -xzf tmp/hb-*.tar.gz -C tmp 17 | sudo-linux cp tmp/hb-"${HB_VERSION}"/hb /usr/local/bin 18 | rm -r tmp 19 | echo "Hash Backup installed...." 20 | fi 21 | 22 | # initialize a backup directory for each input path and set up remote destinations 23 | for element in "${inputs[@]}" 24 | do 25 | if [[ ! -d $element ]] 26 | then 27 | echo "The input path ${element} is not a directory." 28 | exit 1 29 | fi 30 | 31 | echo "Initializing backup for input: ${element}" 32 | backupSubpath=$(basename "${element}") 33 | finalPath="${OUTPUT}/${backupSubpath}" 34 | 35 | # Only init if the directory does not exist 36 | if [[ ! -d $finalPath ]] 37 | then 38 | export HBPASS=${PASSPHRASE} 39 | hb init -c ${finalPath} -k "${KEY}" -p "env" 40 | if [ ! -z ${LOCAL_SIZE} ] 41 | then 42 | hb config -c ${finalPath} cache-size-limit ${LOCAL_SIZE} 43 | fi 44 | if [ ! -z ${ARC_SIZE} ] 45 | then 46 | hb config -c ${finalPath} arc-size-limit ${ARC_SIZE} 47 | fi 48 | cp dest.conf "${finalPath}"/dest.conf 49 | # Set up remote directory according to input path and remote root dir 50 | sed -i -e "s~dir.*~dir ${ROOT_REMOTE_PATH}/${backupSubpath}/~g" "${finalPath}"/dest.conf 51 | else 52 | echo "Output Directory ${finalPath} already exists, Skipping initializing it..." 53 | fi 54 | done 55 | 56 | if [[ ${SET_UP_TIMER} -eq true ]] 57 | then 58 | check_command_exists systemctl 59 | copy_template_if_absent /etc/systemd/system/radar-hashbackup.service systemd/radar-hashbackup.service.template 60 | copy_template_if_absent /etc/systemd/system/radar-hashbackup.timer systemd/radar-hashbackup.timer.template 61 | 62 | DIR="$( pwd )" 63 | sudo chmod +x $DIR/run-backup.sh 64 | inline_variable 'WorkingDirectory=' "$DIR" /etc/systemd/system/radar-hashbackup.service 65 | inline_variable 'ExecStart=' "$DIR/run-backup.sh" /etc/systemd/system/radar-hashbackup.service 66 | 67 | sudo systemctl daemon-reload 68 | sudo systemctl enable radar-hashbackup.timer 69 | sudo systemctl start radar-hashbackup.timer 70 | fi 71 | -------------------------------------------------------------------------------- /images/kafka-manager/README.md: -------------------------------------------------------------------------------- 1 | # Kafka manager 2 | 3 | The [kafka-manager](https://github.com/yahoo/kafka-manager) is an interactive web based tool for managing Apache Kafka. Kafka manager has beed integrated in the stack. However, the following istructions are included so that its easy for someone in the futuer to update the stack or for using kafka-manager in other projects. 4 | Instructions for deploying kafka-manager in a docker container and proxied through nginx- 5 | 6 | 7 | 1. Clone the GitHub repo - `$ git clone https://github.com/yahoo/kafka-manager.git` 8 | 2. Change the working directory - `$ cd kafka-manager` 9 | 3. Create a zip distribution using scala - `$ ./sbt clean dist` 10 | 4. Note the path of the zip file created. 11 | 5. Unzip the zip file to the stack location at `RADAR-Docker/dcompose-stack/radar-cp-hadoop-stack/` 12 | 6. change directory to unzipped folder (in my case `$ cd kafka-manager-1.3.3.14/`) 13 | 7. Create a file named Dockerfile for specifying the docker build - `$ sudo vim Dockerfile` 14 | 8. Add the following content to the Dockerfile - 15 | ```dockerfile 16 | FROM hseeberger/scala-sbt 17 | 18 | RUN mkdir /kafka-manager-1.3.3.14 19 | ADD . /kafka-manager-1.3.3.14 20 | ENV ZK_HOSTS=zookeeper-1:2181 21 | 22 | WORKDIR /kafka-manager-1.3.3.14 23 | 24 | EXPOSE 9000 25 | ENTRYPOINT ["./bin/kafka-manager","-Dconfig.file=conf/application.conf"] 26 | ``` 27 | 9. Note- Change the version of `kafka-manger-{Version}` in the Dockerfile above according to the version you cloned and specified by the unzipped folder. 28 | 10. Change the `play.http.context` parameter in the conf/application.conf file to point to the location path you are going to specify in the nginx.conf file later. In my case it was - `play.http.context = "/kafkamanager/“` 29 | 11. Now edit the `etc/nginx.conf.template` file to include the path to kafka-manager so that it is accessible from the browser. Add the following inside the server tag of nginx.conf file - 30 | ```nginx 31 | location /kafkamanager/{ 32 | proxy_pass http://kafka-manager:9000; 33 | proxy_set_header Host $host; 34 | } 35 | ``` 36 | 12. Now start the stack with `dcompose-stack/radar-cp-hadoop-stack//install-radar-stack.sh`. This will build a docker image for kafka and start it in a container. You can access it with a browser at `https://host/kafkamanager/`. Open the link and add all the information. In this case the zookeeper host is at `zookeeper-1:2181`. This will look something like the image - 37 | 38 | ![Add a Cluster](/images/kafka-manager/img/add_cluster.png) 39 | 40 | Note- You can also take the easy route and just pull the docker image from docker hub located at `radarcns/kafka-manager`. But remember that the context path is `/kafka-manager` so you will need to specify this in your `nginx.conf` file 41 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/radar-docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 3 | 4 | . .env 5 | 6 | if [[ "${ENABLE_OPTIONAL_SERVICES}" = "true" ]]; then 7 | stack="${DOCKER_COMPOSE:-docker-compose} -f docker-compose.yml -f optional-services.yml" 8 | else 9 | stack=${DOCKER_COMPOSE:-docker-compose} 10 | fi 11 | 12 | function print_usage() { 13 | echo 'Control the RADAR-base platform directly' 14 | echo "Usage: $0 [COMMAND|-h|--help] ..." 15 | echo 16 | echo 'Commands:' 17 | echo ' cert-renew Request or renew SSL certificate' 18 | echo ' foreground [SERVICE...] Install the RADAR-base platform and start it in the foreground.' 19 | echo ' if services are provided in the arguments, only those services' 20 | echo ' are actually started.' 21 | echo ' install [SERVICE...] Install the RADAR-base platform, starting it in the background.' 22 | echo ' if services are provided in the arguments, only those services' 23 | echo ' are actually started.' 24 | echo ' install-systemd Install systemd wrappers to manage the RADAR platform' 25 | echo ' hdfs Start the HDFS cluster stack.' 26 | echo ' health Check the health of the cluster and restart failing services.' 27 | echo ' quit SERVICE... Stop and remove given services.' 28 | echo ' rebuild SERVICE... Rebuild and restart given services.' 29 | echo 30 | echo 'Options:' 31 | echo ' -h, --help Print this usage information.' 32 | echo 33 | echo 'docker-compose commands and options are also accepted:' 34 | echo '======================================================' 35 | echo 36 | } 37 | 38 | if [ $# = 0 ]; then 39 | print_usage 40 | exec $stack 41 | fi 42 | 43 | CMD=$1 44 | shift 45 | 46 | # advanced pattern matching 47 | shopt -s extglob 48 | 49 | case "${CMD}" in 50 | cert-renew) 51 | . ./lib/util.sh 52 | request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" force 53 | ;; 54 | install) 55 | . lib/perform-install.sh 56 | ;; 57 | install-systemd) 58 | . lib/install-systemd-wrappers.sh 59 | ;; 60 | rebuild) 61 | exec $stack up -d --force-recreate --build --no-deps -V "$@" 62 | ;; 63 | quit) 64 | $stack stop "$@" && \ 65 | $stack rm -vf "$@" && \ 66 | exit 0 || exit 1 67 | ;; 68 | health) 69 | . lib/check-health.sh 70 | ;; 71 | hdfs) 72 | exec $stack up -d --remove-orphans hdfs-datanode-1 hdfs-datanode-2 hdfs-datanode-3 73 | ;; 74 | foreground) 75 | . lib/perform-install.sh 76 | exec $stack up "$@" 77 | ;; 78 | @(--help|-h)) 79 | print_usage 80 | exec $stack "$CMD" "$@" 81 | ;; 82 | *) 83 | exec $stack "$CMD" "$@" 84 | ;; 85 | esac 86 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/hdfs-restructure/restructure.yml.template: -------------------------------------------------------------------------------- 1 | service: 2 | # Whether to run the application as a polling service. 3 | enable: true 4 | # Polling interval in seconds. 5 | interval: 300 6 | 7 | # Compression characteristics 8 | compression: 9 | # Compression type: none, zip or gzip 10 | type: gzip 11 | # Compression Factory class 12 | # factory: org.radarbase.hdfs.data.CompressionFactory 13 | # Additional compression properties 14 | # properties: {} 15 | 16 | # File format 17 | format: 18 | # Format type: CSV or JSON 19 | type: csv 20 | # Whether to deduplicate the files in each topic by default 21 | deduplication: 22 | enable: true 23 | # Deduplicate considering only distinct fields sourceId and time. 24 | # This may incur data loss if multiple measurements are recorded 25 | # at exactly the same time. By default, all values are considered 26 | # when looking at distinct lines. 27 | # distinctFields: [key.sourceId, value.time] 28 | # Format factory class 29 | # factory: org.radarbase.hdfs.data.FormatFactory 30 | # Additional format properties 31 | # properties: {} 32 | 33 | # Worker settings 34 | worker: 35 | # Maximum number of files and converters to keep open while processing 36 | cacheSize: 300 37 | # Number of threads to do processing with 38 | numThreads: 2 39 | # Maximum number of files to process in any given topic. 40 | # maxFilesPerTopic: null 41 | 42 | storage: 43 | factory: org.radarbase.hdfs.storage.LocalStorageDriver 44 | properties: 45 | localUid: 0 46 | localGid: 0 47 | 48 | # Path settings 49 | paths: 50 | # Input directories in HDFS 51 | inputs: 52 | - /topicAndroidNew 53 | # Root temporary directory for local file processing. 54 | temp: /output/+tmp 55 | # Output directory 56 | output: /output 57 | # Output path construction factory 58 | #factory: org.radarbase.hdfs.MonthlyObservationKeyPathFactory 59 | # Additional properties 60 | # properties: {} 61 | 62 | # Individual topic configuration 63 | topics: {} 64 | # # topic name 65 | # connect_fitbit_source: 66 | # # deduplicate this topic, regardless of the format settings 67 | # deduplication: 68 | # # deduplicate this topic only using given fields. 69 | # distinctFields: [value.time] 70 | # connect_fitbit_bad: 71 | # # Do not process this topic 72 | # exclude: true 73 | # biovotion_acceleration: 74 | # # Disable deduplication 75 | # deduplication: 76 | # enable: false 77 | 78 | # HDFS settings 79 | hdfs: 80 | # HDFS name node in case of a single name node, or HDFS cluster ID in case of high availability. 81 | name: hdfs-namenode-1 82 | # High availability settings: 83 | # nameNodes: 84 | # - name: hdfs1 85 | # hostname: hdfs-namenode-1 86 | # - name: hdfs2 87 | # hostname: hdfs-namenode-2 88 | # Where files will be locked. This value should be the same for all restructure processes. 89 | lockPath: /logs/org.radarbase.hdfs/lock 90 | # Additional raw HDFS configuration properties 91 | # properties: {} 92 | 93 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/lib/check-health.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Check whether services are healthy. If not, restart them and notify the maintainer. 3 | 4 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 5 | 6 | stack=bin/radar-docker 7 | . lib/util.sh 8 | . ./.env 9 | 10 | function slack_notify() { 11 | # Send notification via Slack, if configured. 12 | if [ "$HEALTHCHECK_SLACK_NOTIFY" == "yes" ] ; then 13 | if [ -z "$HEALTHCHECK_SLACK_WEBHOOK_URL" ] ; then 14 | echo "Error: Slack notifications are enabled, but \$HEALTHCHECK_SLACK_WEBHOOK_URL is undefined. Unable to send Slack notification." 15 | exit 1 16 | fi 17 | 18 | channel=${HEALTHCHECK_SLACK_CHANNEL:-#radar-ops} 19 | color=$1 20 | body=$2 21 | curl -X POST --data-urlencode "payload={\"channel\": \"$channel\", \"username\": \"radar-healthcheck\", \"icon_emoji\": \":hospital:\", \"attachments\": [{\"color\": \"$color\", \"fallback\": \"$body\", \"fields\": [{\"title\": \"Health update\", \"value\": \"$body\"}]}]}" \ 22 | $HEALTHCHECK_SLACK_WEBHOOK_URL 23 | fi 24 | } 25 | 26 | unhealthy=() 27 | 28 | # get all human-readable service names 29 | # see last line of loop 30 | while read service; do 31 | # check if a container was started for the service 32 | container=$(sudo-linux $stack ps -q $service) 33 | if [ -z "${container}" ]; then 34 | # no container means no running service 35 | continue 36 | fi 37 | health=$(sudo-linux docker inspect --format '{{.State.Health.Status}}' $container 2>/dev/null || echo "null") 38 | if [ "$health" = "unhealthy" ]; then 39 | echo "Service $service is unhealthy. Restarting." 40 | unhealthy+=("${service}") 41 | sudo-linux $stack restart ${service} 42 | fi 43 | done <<< "$(sudo-linux $stack config --services)" 44 | 45 | display_host="${SERVER_NAME} ($(hostname -f), $(curl -s http://ipecho.net/plain))" 46 | 47 | if [ "${#unhealthy[@]}" -eq 0 ]; then 48 | if [ -f .unhealthy ]; then 49 | rm -f .unhealthy 50 | slack_notify good "All services on ${display_host} are healthy again" 51 | fi 52 | echo "All services are healthy" 53 | else 54 | echo "$unhealthy services were unhealthy and have been restarted." 55 | 56 | # Send notification to MAINTAINER 57 | # start up the mail container if not already started 58 | sudo-linux $stack up -d smtp 59 | # ensure that all topics are available 60 | sudo-linux $stack run --rm kafka-init 61 | # save the container, so that we can use exec to send an email later 62 | container=$(sudo-linux $stack ps -q smtp) 63 | SAVEIFS=$IFS 64 | IFS=, 65 | display_services="[${unhealthy[*]}]" 66 | IFS=$SAVEIFS 67 | body="Services on ${display_host} are unhealthy. Services $display_services have been restarted. Please log in for further information." 68 | echo "Sent notification to $MAINTAINER_EMAIL" 69 | echo "$body" | sudo-linux docker exec -i ${container} mail -aFrom:$FROM_EMAIL "-s[RADAR] Services on ${SERVER_NAME} unhealthy" $MAINTAINER_EMAIL 70 | 71 | echo "${unhealthy[@]}" > .unhealthy 72 | 73 | slack_notify danger "$body" 74 | 75 | exit 1 76 | fi 77 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/lib/check-health.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Check whether services are healthy. If not, restart them and notify the maintainer. 3 | 4 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 5 | 6 | stack=bin/radar-docker 7 | . lib/util.sh 8 | . ./.env 9 | 10 | function slack_notify() { 11 | # Send notification via Slack, if configured. 12 | if [ "$HEALTHCHECK_SLACK_NOTIFY" == "yes" ] ; then 13 | if [ -z "$HEALTHCHECK_SLACK_WEBHOOK_URL" ] ; then 14 | echo "Error: Slack notifications are enabled, but \$HEALTHCHECK_SLACK_WEBHOOK_URL is undefined. Unable to send Slack notification." 15 | exit 1 16 | fi 17 | 18 | channel=${HEALTHCHECK_SLACK_CHANNEL:-#radar-ops} 19 | color=$1 20 | body=$2 21 | curl -X POST --data-urlencode "payload={\"channel\": \"$channel\", \"username\": \"radar-healthcheck\", \"icon_emoji\": \":hospital:\", \"attachments\": [{\"color\": \"$color\", \"fallback\": \"$body\", \"fields\": [{\"title\": \"Health update\", \"value\": \"$body\"}]}]}" \ 22 | $HEALTHCHECK_SLACK_WEBHOOK_URL 23 | fi 24 | } 25 | 26 | unhealthy=() 27 | 28 | # get all human-readable service names 29 | # see last line of loop 30 | while read service; do 31 | # check if a container was started for the service 32 | container=$(sudo-linux $stack ps -q $service) 33 | if [ -z "${container}" ]; then 34 | # no container means no running service 35 | continue 36 | fi 37 | health=$(sudo-linux docker inspect --format '{{.State.Health.Status}}' $container 2>/dev/null || echo "null") 38 | if [ "$health" = "unhealthy" ]; then 39 | echo "Service $service is unhealthy. Restarting." 40 | unhealthy+=("${service}") 41 | sudo-linux $stack restart ${service} 42 | fi 43 | done <<< "$(sudo-linux $stack config --services)" 44 | 45 | display_host="${SERVER_NAME} ($(hostname -f), $(curl -s http://ipecho.net/plain))" 46 | 47 | if [ "${#unhealthy[@]}" -eq 0 ]; then 48 | if [ -f .unhealthy ]; then 49 | rm -f .unhealthy 50 | slack_notify good "All services on ${display_host} are healthy again" 51 | fi 52 | echo "All services are healthy" 53 | else 54 | echo "$unhealthy services were unhealthy and have been restarted." 55 | 56 | # Send notification to MAINTAINER 57 | # start up the mail container if not already started 58 | sudo-linux $stack up -d smtp 59 | # ensure that all topics are available 60 | sudo-linux $stack run --rm kafka-init 61 | # save the container, so that we can use exec to send an email later 62 | container=$(sudo-linux $stack ps -q smtp) 63 | SAVEIFS=$IFS 64 | IFS=, 65 | display_services="[${unhealthy[*]}]" 66 | IFS=$SAVEIFS 67 | body="Services on ${display_host} are unhealthy. Services $display_services have been restarted. Please log in for further information." 68 | echo "Sent notification to $MAINTAINER_EMAIL" 69 | echo "$body" | sudo-linux docker exec -i ${container} mail -aFrom:$FROM_EMAIL "-s[RADAR] Services on ${SERVER_NAME} unhealthy" $MAINTAINER_EMAIL 70 | 71 | echo "${unhealthy[@]}" > .unhealthy 72 | 73 | slack_notify danger "$body" 74 | 75 | exit 1 76 | fi 77 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/bin/keystore-init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | . ./.env 6 | . lib/util.sh 7 | 8 | function createKeyStore() { 9 | file=$1 10 | 11 | if ! keytool -list $KEYTOOL_OPTS -alias radarbase-managementportal-ec >/dev/null 2>/dev/null; then 12 | KEYTOOL_CREATE_OPTS="-genkeypair -alias radarbase-managementportal-ec -keyalg EC -keysize 256 -sigalg SHA256withECDSA -storetype PKCS12 $KEYSTORE_CREATE_OPTS" 13 | if [ -n "${MANAGEMENTPORTAL_KEY_DNAME}" ]; then 14 | KEYTOOL_CREATE_OPTS="$KEYTOOL_CREATE_OPTS -dname ${MANAGEMENTPORTAL_KEY_DNAME}" 15 | fi 16 | echo "--> Generating keystore to hold EC keypair for JWT signing" 17 | keytool $KEYTOOL_CREATE_OPTS $KEYTOOL_OPTS 18 | else 19 | echo "--> ECDSA keypair for signing JWTs already exists. Not creating a new one." 20 | fi 21 | 22 | if ! keytool -list $KEYTOOL_OPTS -alias selfsigned >/dev/null 2>/dev/null; then 23 | KEYTOOL_CREATE_OPTS="-genkeypair -alias selfsigned -keyalg RSA -keysize 4096 -storetype PKCS12 $KEYSTORE_CREATE_OPTS" 24 | if [ -n "${MANAGEMENTPORTAL_KEY_DNAME}" ]; then 25 | KEYTOOL_CREATE_OPTS="$KEYTOOL_CREATE_OPTS -dname ${MANAGEMENTPORTAL_KEY_DNAME}" 26 | fi 27 | echo "--> Generating keystore to hold RSA keypair for JWT signing" 28 | keytool $KEYTOOL_CREATE_OPTS $KEYTOOL_OPTS 29 | else 30 | echo "--> RSA keypair for signing JWTs already exists. Not creating a new one." 31 | fi 32 | 33 | chmod 400 "$file" 34 | } 35 | 36 | function convertJksToPkcs12() { 37 | src=$1 38 | dest=$2 39 | 40 | if [ ! -e $dest ] && [ -e $src ]; then 41 | echo "--> Importing PKCS12 key store from existing JKS key store" 42 | keytool -importkeystore -srckeystore $src -destkeystore $dest -srcstoretype JKS -deststoretype PKCS12 -deststorepass radarbase -srcstorepass radarbase 43 | chmod 400 $dest 44 | fi 45 | } 46 | 47 | function writeKeys() { 48 | FILE=$1 49 | RES=$2 50 | 51 | echo "--> Updating public keys of signatures" 52 | echo "resourceName: $RES" > "$FILE" 53 | echo "publicKeys:" >> "$FILE" 54 | 55 | ALIASES=($(keytool -list $KEYTOOL_OPTS | grep PrivateKeyEntry | sed -e 's/^\([^,]*\),.*$/\1/')) 56 | for (( i=0; i < ${#ALIASES[@]}; i++)); do 57 | ALIAS=${ALIASES[$i]} 58 | ensure_variable "MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_$i=" $ALIAS .env 59 | echo " - |-" >> "$FILE" 60 | if keytool -export $KEYTOOL_OPTS -alias $ALIAS | openssl x509 -inform der -text | grep -q ecdsa-with-SHA256; then 61 | REPLACE_PUBKEY="EC PUBLIC KEY" 62 | else 63 | REPLACE_PUBKEY="PUBLIC KEY" 64 | fi 65 | 66 | cert="$(keytool -export $KEYTOOL_OPTS -alias $ALIAS | openssl x509 -inform der -pubkey -noout)" 67 | while IFS='' read -r line && [ -n "$line" ]; do 68 | line=$(sed "s/PUBLIC KEY/$REPLACE_PUBKEY/" <<< $line) 69 | echo " $line" >> "$FILE" 70 | done <<< "$cert" 71 | done 72 | } 73 | 74 | keystorefile=etc/managementportal/config/keystore.p12 75 | oldkeystorefile=etc/managementportal/config/keystore.jks 76 | 77 | convertJksToPkcs12 $oldkeystorefile $keystorefile 78 | 79 | export KEYTOOL_OPTS="-keystore ${keystorefile} -storepass radarbase -keypass radarbase $KEYSTORE_INIT_OPTS" 80 | 81 | createKeyStore "$keystorefile" -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/bin/radar-docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 3 | 4 | . .env 5 | 6 | stack_array=() 7 | stack_array+=("-f docker-compose.yml ") 8 | 9 | if [[ "${ENABLE_OPTIONAL_SERVICES}" = "true" ]]; then 10 | stack_array+=("-f optional-services.yml ") 11 | fi 12 | 13 | if [[ "${ENABLE_DASHBOARD_PIPELINE}" = "true" ]]; then 14 | stack_array+=("-f dashboard-pipeline.yml ") 15 | fi 16 | 17 | if [[ "${ENABLE_KAFKA_STREAMS}" = "true" ]]; then 18 | stack_array+=("-f radarbase-kafka-streams.yml ") 19 | fi 20 | 21 | function print_usage() { 22 | echo 'Control the RADAR-base platform directly' 23 | echo "Usage: $0 [COMMAND|-h|--help] ..." 24 | echo 25 | echo 'Commands:' 26 | echo ' cert-renew Request or renew SSL certificate' 27 | echo ' foreground [SERVICE...] Install the RADAR-base platform and start it in the foreground.' 28 | echo ' if services are provided in the arguments, only those services' 29 | echo ' are actually started.' 30 | echo ' install [SERVICE...] Install the RADAR-base platform, starting it in the background.' 31 | echo ' if services are provided in the arguments, only those services' 32 | echo ' are actually started.' 33 | echo ' install-systemd Install systemd wrappers to manage the RADAR platform' 34 | echo ' minio Start the minio cluster.' 35 | echo ' health Check the health of the cluster and restart failing services.' 36 | echo ' quit SERVICE... Stop and remove given services.' 37 | echo ' rebuild SERVICE... Rebuild and restart given services.' 38 | echo 39 | echo 'Options:' 40 | echo ' -h, --help Print this usage information.' 41 | echo 42 | echo 'docker-compose commands and options are also accepted:' 43 | echo '======================================================' 44 | echo 45 | } 46 | 47 | if [ $# = 0 ]; then 48 | print_usage 49 | stack="${DOCKER_COMPOSE:-docker-compose} ${stack_array[@]}" 50 | exec $stack 51 | fi 52 | 53 | 54 | CMD=$1 55 | stack="${DOCKER_COMPOSE:-docker-compose} ${stack_array[@]}" 56 | shift 57 | 58 | # advanced pattern matching 59 | shopt -s extglob 60 | 61 | case "${CMD}" in 62 | cert-renew) 63 | . ./lib/util.sh 64 | request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" force 65 | ;; 66 | install) 67 | . lib/perform-install.sh 68 | ;; 69 | install-systemd) 70 | . lib/install-systemd-wrappers.sh 71 | ;; 72 | rebuild) 73 | exec $stack up -d --force-recreate --build --no-deps -V "$@" 74 | ;; 75 | quit) 76 | $stack stop "$@" && \ 77 | $stack rm -vf "$@" && \ 78 | exit 0 || exit 1 79 | ;; 80 | health) 81 | . lib/check-health.sh 82 | ;; 83 | minio) 84 | exec $stack up -d --remove-orphans minio1 minio2 minio3 minio4 85 | ;; 86 | foreground) 87 | . lib/perform-install.sh 88 | exec $stack up "$@" 89 | ;; 90 | @(--help|-h)) 91 | print_usage 92 | exec $stack "$CMD" "$@" 93 | ;; 94 | *) 95 | exec $stack "$CMD" "$@" 96 | ;; 97 | esac 98 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/bin/keystore-init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$( dirname "${BASH_SOURCE[0]}" )/.." 4 | 5 | . ./.env 6 | . lib/util.sh 7 | 8 | function createKeyStore() { 9 | file=$1 10 | 11 | if ! keytool -list $KEYTOOL_OPTS -alias radarbase-managementportal-ec >/dev/null 2>/dev/null; then 12 | KEYTOOL_CREATE_OPTS="-genkeypair -alias radarbase-managementportal-ec -keyalg EC -keysize 256 -sigalg SHA256withECDSA -storetype PKCS12 $KEYSTORE_CREATE_OPTS" 13 | if [ -n "${MANAGEMENTPORTAL_KEY_DNAME}" ]; then 14 | KEYTOOL_CREATE_OPTS="$KEYTOOL_CREATE_OPTS -dname ${MANAGEMENTPORTAL_KEY_DNAME}" 15 | fi 16 | echo "--> Generating keystore to hold EC keypair for JWT signing" 17 | keytool $KEYTOOL_CREATE_OPTS $KEYTOOL_OPTS 18 | else 19 | echo "--> ECDSA keypair for signing JWTs already exists. Not creating a new one." 20 | fi 21 | 22 | if ! keytool -list $KEYTOOL_OPTS -alias selfsigned >/dev/null 2>/dev/null; then 23 | KEYTOOL_CREATE_OPTS="-genkeypair -alias selfsigned -keyalg RSA -keysize 4096 -storetype PKCS12 $KEYSTORE_CREATE_OPTS" 24 | if [ -n "${MANAGEMENTPORTAL_KEY_DNAME}" ]; then 25 | KEYTOOL_CREATE_OPTS="$KEYTOOL_CREATE_OPTS -dname ${MANAGEMENTPORTAL_KEY_DNAME}" 26 | fi 27 | echo "--> Generating keystore to hold RSA keypair for JWT signing" 28 | keytool $KEYTOOL_CREATE_OPTS $KEYTOOL_OPTS 29 | else 30 | echo "--> RSA keypair for signing JWTs already exists. Not creating a new one." 31 | fi 32 | 33 | chmod 400 "$file" 34 | } 35 | 36 | function convertJksToPkcs12() { 37 | src=$1 38 | dest=$2 39 | 40 | if [ ! -e $dest ] && [ -e $src ]; then 41 | echo "--> Importing PKCS12 key store from existing JKS key store" 42 | keytool -importkeystore -srckeystore $src -destkeystore $dest -srcstoretype JKS -deststoretype PKCS12 -deststorepass radarbase -srcstorepass radarbase 43 | chmod 400 $dest 44 | fi 45 | } 46 | 47 | function writeKeys() { 48 | FILE=$1 49 | RES=$2 50 | 51 | echo "--> Updating public keys of signatures" 52 | echo "resourceName: $RES" > "$FILE" 53 | echo "publicKeys:" >> "$FILE" 54 | 55 | ALIASES=($(keytool -list $KEYTOOL_OPTS | grep PrivateKeyEntry | sed -e 's/^\([^,]*\),.*$/\1/')) 56 | for (( i=0; i < ${#ALIASES[@]}; i++)); do 57 | ALIAS=${ALIASES[$i]} 58 | ensure_variable "MANAGEMENTPORTAL_OAUTH_CHECKING_KEY_ALIASES_$i=" $ALIAS .env 59 | echo " - |-" >> "$FILE" 60 | if keytool -export $KEYTOOL_OPTS -alias $ALIAS | openssl x509 -inform der -text | grep -q ecdsa-with-SHA256; then 61 | REPLACE_PUBKEY="EC PUBLIC KEY" 62 | else 63 | REPLACE_PUBKEY="PUBLIC KEY" 64 | fi 65 | 66 | cert="$(keytool -export $KEYTOOL_OPTS -alias $ALIAS | openssl x509 -inform der -pubkey -noout)" 67 | while IFS='' read -r line && [ -n "$line" ]; do 68 | line=$(sed "s/PUBLIC KEY/$REPLACE_PUBKEY/" <<< $line) 69 | echo " $line" >> "$FILE" 70 | done <<< "$cert" 71 | done 72 | } 73 | 74 | keystorefile=etc/managementportal/config/keystore.p12 75 | oldkeystorefile=etc/managementportal/config/keystore.jks 76 | 77 | convertJksToPkcs12 $oldkeystorefile $keystorefile 78 | 79 | export KEYTOOL_OPTS="-keystore ${keystorefile} -storepass radarbase -keypass radarbase $KEYSTORE_INIT_OPTS" 80 | 81 | createKeyStore "$keystorefile" 82 | 83 | writeKeys etc/managementportal/config/radar-is.yml res_ManagementPortal 84 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | ## Scripts 2 | 3 | This folder contains useful scripts to manage the server where the RADAR-base Platform is running. 4 | 5 | ### `check_radar_network.sh` 6 | **It checks if the machine is connected to internet. The script can be parametrised with the following** 7 | 8 | - `nic` is the internet gateway 9 | - `lockfile` lock useful to check whether there is a previous instance still running 10 | - `logfile` is the log file where the script logs each operation 11 | - `url` is the web site used to check the connectivity 12 | 13 | To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add your task at the end of the file. The syntax is 14 | ```shell 15 | * * * * * command to be executed 16 | - - - - - 17 | | | | | | 18 | | | | | +----- day of week (0 - 6) (Sunday=0) 19 | | | | +------- month (1 - 12) 20 | | | +--------- day of month (1 - 31) 21 | | +----------- hour (0 - 23) 22 | +------------- min (0 - 59) 23 | ``` 24 | 25 | For example, `*/2 * * * * /absolute/path/to/script-name.sh` will execute `script-name.sh` every `2` minutes. 26 | 27 | Before deploying the task, make sure that all paths used by the script are absolute. Replace the relative path to `util.sh` with the absolute one. 28 | 29 | 30 | 31 | ### `hdfs-data-retention/hdfs_data_retention.sh` 32 | **It is a script for deleting records from hdfs based on name of the topic and the date. All the records for the current topics older than the specified date are deleted from HDFS.** 33 | - `hdfs-data-retention/topics_to_remove.txt` - The default file used by the above script to delete files from these topics. Please specify each topic on a new line. 34 | 35 | Usage: 36 | To just get the FS image file and process it and list the sum of file sizes of all the relevant files using apache pig, run the command like - 37 | ```shell 38 | cd hdfs-data-retention 39 | sudo bash hdfs_data_retention.sh --date "2018-03-15 12:00" 40 | ``` 41 | This will output the file sizes sum of the calculated paths like - 42 | ``` 43 | (SUM OF FILES SIZES TO BE DELETED IN MB = 46555) 44 | ``` 45 | and also store the finalised path meeting the conditions of topics and date in the `./tmp/final_paths/part_r_00000` 46 | 47 | To also delete the files and other options see below - 48 | ``` 49 | Usage: ./hdfs_data_retention.sh --date [Options...] 50 | Options: ** means required 51 | 52 | -d|--delete: enable delete for the data. If not specified, the size of selected files is displayed. 53 | -st|--skip-trash: Enables skipTrash option for . To be used with -d|--delete option. 54 | * -u|--url: The HDFS namenode Url to connect to. Default is hdfs://hdfs-namenode:8020 55 | * -tf|--topics-file: The path of the file containing the newline-separated list of topics to remove the files from. Default is ./topics_to_remove.txt 56 | ** -dt|--date: All the files modified before this date time will be selected. Format is (yyyy-MM-dd HH:mm) 57 | ``` 58 | Recommended use of the script for large filesystems is via a Cron job or a Screen session as it may take some time to delete all the files. 59 | 60 | Info: 61 | By default the script is set up to run against docker containers in the RADAR-base stack. 62 | The script will use the hdfs.image and hdfs.txt files from `./tmp` folder if delete is specified and the files are not older than a day. 63 | 64 | If you get JAVA_HOME not set error, please uncomment and specify the JAVA_HOME in the script. 65 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/README.md: -------------------------------------------------------------------------------- 1 | # Firebase Notification server 2 | 3 | This directory provides services and accompanying files required to run the FCM notifications server. Currently, [XMPP App server](https://github.com/RADAR-base/fcmxmppserverv2) is provided but this may later change to the [RADAR-AppServer](https://github.com/RADAR-base/RADAR-Appserver). 4 | 5 | ## Configuration 6 | 7 | Copy `etc/env.template` to `.env`. Set the values of the new file to the desired values. 8 | 9 | ```sh 10 | FCM_XMPP_APP_SERVER_DB_PATH= # The path on the host where to store database files 11 | FCM_XMPP_APP_SERVER_LOGS_PATH= # The path on the host where to store the log files generated by the server 12 | FCM_SENDER_KEY= # The Firebase Cloud Messaging Sender ID 13 | FCM_SERVER_KEY= # # The Firebase Cloud Messaging Server Key 14 | ``` 15 | 16 | For more information, take a look at the instruction in the [README](https://github.com/RADAR-base/fcmxmppserverv2). 17 | 18 | If the server is not running in a secure environment, it may be essential to explicitly add username and password to the database. 19 | This can be done by changing the `server.database.n` properties in the [server.properties](/etc/server.properties) file and appending with your username and password as stated [here](http://hsqldb.org/doc/guide/guide.html#N15798). Then add the same to the environment of the `xmppserver` in [docker-compose.yml](/docker-compose.yml) file with keys `RADAR_XMPP_DB_USER` and `RADAR_XMPP_DB_PASS`. 20 | 21 | ## Usage 22 | 23 | Since the App Server does not depend on any other services, it can be run separately by running - 24 | 25 | ```sh 26 | bin/start-xmpp -d 27 | ``` 28 | 29 | It can be further controlled with `docker-compose` and `docker` commands. 30 | 31 | ## Extras 32 | 33 | There are some extra scripts provided for convenience when administering the server in `bin/` folder. 34 | 35 | - `get-subject-data.sh` - this can be used for getting the notifications, and other data for a particular subject using their subject Id. Note that this will require the [sqltool.jar](http://hsqldb.org/doc/2.0/util-guide/sqltool-chapt.html) from hsqldb. You will also need to update any settings in (sqltool.rc)[/etc/sqltool.rc] according to your DB setup. Once configured, The script can be used as follows - 36 | ```sh 37 | ./get-subject-data.sh 38 | ``` 39 | 40 | - `log-parser.py` - This is used for parsing information out of the logs files generated by the server. This is supposed to be run as a cron job for creating CSV files for Execution, delivery and error messages for each notification request over a long period of time. This can be run in cron per day as follows - 41 | ```sh 42 | 0 16 * * * python3 /home/ubuntu/xmpp-server-extras/logs-parser/log-parser.py /usr/local/var/lib/radar/xmpp/hsql/logs/ /home/ubuntu/xmpp-server-extras/logs-parser/files >> /home/ubuntu/log-parser-run.log 2>&1 43 | ``` 44 | This will output the CSV files in the directory `/home/ubuntu/xmpp-server-extras/logs-parser/files` which contain three files for each day (One for Executions, One for Delivery and One for Errors). 45 | 46 | - `install-systemd` Run the xmpp server in a systemd environment as part of system startup. 47 | 48 | ## Other information 49 | 50 | If you are in a development environment, It may be worth trying to use the new and improved [RADAR-AppServer](https://github.com/RADAR-base/RADAR-Appserver/tree/dev). It exposes REST endpoints along with supporting the legacy XMPP and has secure integration with the Management Portal. 51 | -------------------------------------------------------------------------------- /dcompose-stack/firebase-app-server/bin/log-parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys, csv, json 4 | import os 5 | import os.path 6 | 7 | def parse_logs(input_directory, output_directory): 8 | subject_token = {} 9 | 10 | for log_file in os.listdir(input_directory): 11 | # Finding out the executed ones 12 | if not log_file.endswith('.log'): 13 | continue 14 | 15 | print("Parsing {}...".format(log_file)) 16 | 17 | output_file_name = log_file.split('.')[0] 18 | log_path = os.path.join(input_directory, log_file) 19 | exec_path = os.path.join(output_directory, "{}_executions.csv".format(output_file_name)) 20 | delivery_path = os.path.join(output_directory, "{}_delivery.csv".format(output_file_name)) 21 | error_path = os.path.join(output_directory, "{}_error.csv".format(output_file_name)) 22 | 23 | with open(log_path) as f,\ 24 | open(exec_path, 'w') as exec_output,\ 25 | open(delivery_path, 'w') as delivery_output,\ 26 | open(error_path, 'w') as error_output: 27 | exec_writer = csv.writer(exec_output, delimiter=',', 28 | quotechar='|', quoting=csv.QUOTE_MINIMAL) 29 | delivery_writer = csv.writer(delivery_output, delimiter=',', 30 | quotechar='|', quoting=csv.QUOTE_MINIMAL) 31 | error_writer = csv.writer(error_output, delimiter=',', 32 | quotechar='|', quoting=csv.QUOTE_MINIMAL) 33 | 34 | exec_writer.writerow(['date', 'time', 'subject_id', 'fcm_token', 'scheduled_time', 'executed?']) 35 | delivery_writer.writerow(['date', 'time', 'subject_id', 'fcm_token', 'message_sent_timestamp', 'message_status']) 36 | for line in f: 37 | data = line.split() 38 | if len(data) < 2: 39 | continue 40 | date = data[0] 41 | time = data[1] 42 | if "Executing Execution" in line: 43 | #print(line) 44 | #print(str(data)) 45 | ids = data[7].split('=')[1].split('+') 46 | fcm_token = ids[0] 47 | subject_id = ids[1] 48 | scheduled_time = data[8].split('=')[1].split(',')[0] 49 | subject_token[fcm_token] = subject_id 50 | exec_writer.writerow([date, time, subject_id, fcm_token, scheduled_time, 'true']) 51 | #print(str(ids)) 52 | #break 53 | elif "message_type\":\"receipt" in line: 54 | #print(line) 55 | parsed_data = data[9].split('>')[1].split('<')[0] 56 | json_data = json.loads(parsed_data) 57 | message_status = json_data['data']['message_status'] 58 | message_sent_timestamp = json_data['data']['message_sent_timestamp'] 59 | fcm_token = json_data['data']['device_registration_id'] 60 | try: 61 | subject_id = subject_token[fcm_token] 62 | except KeyError: 63 | subject_id = 'unknown' 64 | pass 65 | #print(message_status, message_sent_timestamp, fcm_token, subject_id) 66 | delivery_writer.writerow([date, time, subject_id, fcm_token, message_sent_timestamp, message_status]) 67 | elif 'message_type":"nack' in line: 68 | error_writer.writerow([line]) 69 | print(line) 70 | 71 | 72 | if __name__ == '__main__': 73 | if len(sys.argv) != 3: 74 | print("Usage: {} ".format(sys.argv[0])) 75 | sys.exit(1) 76 | 77 | input_directory = sys.argv[1] 78 | output_directory = sys.argv[2] 79 | parse_logs(input_directory, output_directory) 80 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/hdfs/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | wait_until() { 6 | local hostname=${1?} 7 | local port=${2?} 8 | local retry=${3:-100} 9 | local sleep_secs=${4:-2} 10 | 11 | local address_up=0 12 | 13 | while [ ${retry} -gt 0 ] ; do 14 | echo "Waiting until ${hostname}:${port} is up ... with retry count: ${retry}" 15 | if nc -z ${hostname} ${port}; then 16 | address_up=1 17 | break 18 | fi 19 | retry=$((retry-1)) 20 | sleep ${sleep_secs} 21 | done 22 | 23 | if [ $address_up -eq 0 ]; then 24 | echo "GIVE UP waiting until ${hostname}:${port} is up! " 25 | exit 1 26 | fi 27 | } 28 | 29 | format_hdfs() { 30 | NAME_DIR=$1 31 | shift 32 | IFS=',' read -r -a namedirs <<< $(echo "$NAME_DIR" | sed -e 's#file://##g') 33 | 34 | for namedir in "${namedirs[@]}"; do 35 | mkdir -p "$namedir" 36 | if [ ! -d "$namedir" ]; then 37 | echo "Namenode name directory not found: $namedir" 38 | exit 2 39 | fi 40 | 41 | if [ ! -e "$namedir/current/VERSION" ]; then 42 | echo "Formatting namenode name directory: $namedir is not yet formatted" 43 | hdfs namenode $@ 44 | return 0 45 | fi 46 | done 47 | return 1 48 | } 49 | 50 | # apply template 51 | for template in $(ls ${HADOOP_CONF_DIR}/*.mustache) 52 | do 53 | conf_file=${template%.mustache} 54 | cat ${conf_file}.mustache | mustache.sh > ${conf_file} 55 | done 56 | 57 | USAGE=$'Usage: $0 [CMD] ...\n\tCMD: journalnode|namenode-1|namenode-2|datanode|resourcemanager-1|nodemanager|historyserver-1' 58 | 59 | if [ "$#" == "0" ]; then 60 | echo "$USAGE" 61 | exit 1 62 | fi 63 | 64 | CMD=$1 65 | shift 66 | 67 | case $CMD in 68 | "journalnode") 69 | exec hdfs journalnode "$@" 70 | ;; 71 | "namenode-1") 72 | if format_hdfs "$HADOOP_DFS_NAME_DIR" -format -force && [ "${HADOOP_NAMENODE_HA}" != "" ]; then 73 | hdfs zkfc -formatZK -force 74 | fi 75 | # wait_until ${HADOOP_QJOURNAL_ADDRESS%%:*} 8485 76 | if [ "${HADOOP_NAMENODE_HA}" != "" ]; then 77 | hdfs zkfc & 78 | fi 79 | exec hdfs namenode "$@" 80 | ;; 81 | "namenode-2") 82 | wait_until ${HADOOP_NAMENODE1_HOSTNAME} 8020 83 | if format_hdfs "$HADOOP_DFS_NAME_DIR" -bootstrapStandby && [ "${HADOOP_NAMENODE_HA}" != "" ]; then 84 | hdfs zkfc -formatZK -force 85 | fi 86 | 87 | hdfs zkfc & 88 | exec hdfs namenode "$@" 89 | ;; 90 | "datanode") 91 | wait_until ${HADOOP_NAMENODE1_HOSTNAME} 8020 92 | exec hdfs datanode "$@" 93 | ;; 94 | "resourcemanager-1") 95 | exec su-exec yarn yarn resourcemanager "$@" 96 | ;; 97 | "nodemanager") 98 | wait_until ${YARN_RESOURCEMANAGER_HOSTNAME} 8031 99 | exec su-exec yarn yarn nodemanager "$@" 100 | ;; 101 | "historyserver-1") 102 | wait_until ${HADOOP_NAMENODE1_HOSTNAME} 8020 103 | 104 | set +e -x 105 | 106 | hdfs dfs -ls /tmp > /dev/null 2>&1 107 | if [ $? -ne 0 ]; then 108 | hdfs dfs -mkdir -p /tmp 109 | hdfs dfs -chmod 1777 /tmp 110 | fi 111 | 112 | hdfs dfs -ls /user > /dev/null 2>&1 113 | if [ $? -ne 0 ]; then 114 | hdfs dfs -mkdir -p /user/hdfs 115 | hdfs dfs -chmod 755 /user 116 | fi 117 | 118 | hdfs dfs -ls ${YARN_REMOTE_APP_LOG_DIR} > /dev/null 2>&1 119 | if [ $? -ne 0 ]; then 120 | su-exec yarn hdfs dfs -mkdir -p ${YARN_REMOTE_APP_LOG_DIR} 121 | su-exec yarn hdfs dfs -chmod -R 1777 ${YARN_REMOTE_APP_LOG_DIR} 122 | su-exec yarn hdfs dfs -chown -R yarn:hadoop ${YARN_REMOTE_APP_LOG_DIR} 123 | fi 124 | 125 | hdfs dfs -ls ${YARN_APP_MAPRED_STAGING_DIR} > /dev/null 2>&1 126 | if [ $? -ne 0 ]; then 127 | su-exec mapred hdfs dfs -mkdir -p ${YARN_APP_MAPRED_STAGING_DIR} 128 | su-exec mapred hdfs dfs -chmod -R 1777 ${YARN_APP_MAPRED_STAGING_DIR} 129 | su-exec mapred hdfs dfs -chown -R mapred:hadoop ${YARN_APP_MAPRED_STAGING_DIR} 130 | fi 131 | 132 | set -e +x 133 | 134 | exec su-exec mapred mapred historyserver "$@" 135 | ;; 136 | *) 137 | exec "$CMD" "$@" 138 | ;; 139 | esac 140 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/images/hdfs/hdfs-site.xml.mustache: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | dfs.namenode.name.dir 22 | {{HADOOP_DFS_NAME_DIR}} 23 | 24 | 25 | 26 | dfs.datanode.data.dir 27 | {{HADOOP_DFS_DATA_DIR}} 28 | 29 | 30 | 31 | dfs.permissions.superusergroup 32 | hadoop 33 | 34 | 35 | 36 | dfs.nameservices 37 | {{HADOOP_CLUSTER_NAME}} 38 | 39 | 40 | 41 | dfs.replication 42 | {{HADOOP_DFS_REPLICATION}} 43 | 44 | 45 | {{#HADOOP_NAMENODE_HA}} 46 | 47 | dfs.ha.namenodes.{{HADOOP_CLUSTER_NAME}} 48 | {{HADOOP_NAMENODE_HA}} 49 | 50 | 51 | 52 | dfs.namenode.rpc-address.{{HADOOP_CLUSTER_NAME}}.nn1 53 | {{HADOOP_NAMENODE1_HOSTNAME}}:8020 54 | 55 | 56 | 57 | dfs.namenode.http-address.{{HADOOP_CLUSTER_NAME}}.nn1 58 | {{HADOOP_NAMENODE1_HOSTNAME}}:9870 59 | 60 | 61 | 62 | dfs.namenode.rpc-address.{{HADOOP_CLUSTER_NAME}}.nn2 63 | {{HADOOP_NAMENODE2_HOSTNAME}}:8020 64 | 65 | 66 | 67 | dfs.namenode.http-address.{{HADOOP_CLUSTER_NAME}}.nn2 68 | {{HADOOP_NAMENODE2_HOSTNAME}}:9870 69 | 70 | 71 | 72 | dfs.namenode.shared.edits.dir 73 | qjournal://{{HADOOP_QJOURNAL_ADDRESS}}/{{HADOOP_CLUSTER_NAME}} 74 | 75 | 76 | 77 | dfs.client.failover.proxy.provider.{{HADOOP_CLUSTER_NAME}} 78 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 79 | 80 | 81 | 82 | dfs.ha.fencing.methods 83 | shell(/usr/local/bin/fence.sh) 84 | 85 | 86 | 87 | dfs.journalnode.edits.dir 88 | {{HADOOP_TMP_DIR}}/dfs/journal/data 89 | 90 | 91 | 92 | dfs.ha.automatic-failover.enabled 93 | true 94 | 95 | {{/HADOOP_NAMENODE_HA}} 96 | 97 | {{^HADOOP_NAMENODE_HA}} 98 | 99 | dfs.namenode.rpc-address.{{HADOOP_CLUSTER_NAME}} 100 | {{HADOOP_NAMENODE1_HOSTNAME}}:8020 101 | 102 | 103 | 104 | dfs.namenode.http-address.{{HADOOP_CLUSTER_NAME}} 105 | {{HADOOP_NAMENODE1_HOSTNAME}}:9870 106 | 107 | {{/HADOOP_NAMENODE_HA}} 108 | 109 | 110 | dfs.client.block.write.replace-datanode-on-failure.policy 111 | NEVER 112 | 113 | 114 | 115 | dfs.client.block.write.replace-datanode-on-failure.enable 116 | false 117 | 118 | 119 | 120 | dfs.namenode.datanode.registration.ip-hostname-check 121 | false 122 | 123 | 124 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/hash-backup/README.md: -------------------------------------------------------------------------------- 1 | # HASH BACKUPS 2 | 3 | This directory contains a unified solution to create backups for different paths (or directories) in the system. For a quickstart, the postgres path (configured in .env file) is already included by default. 4 | - 5 | 6 | - First configure all the parameters in the `backup.conf` file. Please note that the key and passphrase should be provided and be kept safe and backed up elsewhere. 7 | - The passphrase will be taken from the environment variable HBPASS whenever access to the backup is requested. A passphrase secures backup data: - for users in hosted or managed environments, like a VPS - when the backup directory is on USB thumb drives - when the backup directory is on mounted storage like Google Drive, Amazon Cloud Drive, Dropbox, etc. additional details here - [HashBackup Security](http://www.hashbackup.com/technical/security) 8 | - Then configure the remote destinations (if any) to send the backups to in the `dest.conf` file. Please look at the hashbackup documentation for more info on this. For a start FTP and Amazon s3 examples are included. Please note to leave the `dir` at any value since this will be eventually be replaced by the script based on the `ROOT_REMOTE_PATH` and `INPUTS` specified in the `backup.conf` file. 9 | - Then run the initialization scripts 10 | ```shell 11 | sudo bash initialize-hb.sh 12 | ``` 13 | - This should initialize the hashbackup output directories with the specified key and passphrase and apply any configurations. 14 | - If the `SET_UP_TIMER` parameter in `backup.conf` is set to `true` then the above command automatically configures a `systemd timer` to run the backups (`./run-bakup.sh` script) daily at 3am. This can be changed in `/etc/systemd/system/radar-hashbackup.timer`. 15 | - systemd timer is recommended but you may alternatively, run this via CRON job just add the following to the crontab - 16 | ``` 17 | 00 03 * * * root sudo bash //dcompose-stack/radar-cp-hadoop-stack/hash-backup/run-backup.sh 18 | ``` 19 | 20 | If the `systemd` timer is set to run the backups, then the backup should be controlled via `systemctl`. 21 | ```shell 22 | # query the latest status and logs of the backup service 23 | sudo systemctl status radar-hashbackup 24 | 25 | # Stop backup timer 26 | sudo systemctl stop radar-hashbackup.timer 27 | 28 | # Restart backup timer 29 | sudo systemctl reload radar-hashbackup.timer 30 | 31 | # Start backup timer 32 | sudo systemctl start radar-hashbackup.timer 33 | 34 | # Full radar-hashbackup system logs 35 | sudo journalctl -u radar-hashbackup 36 | ``` 37 | The CRON job should preferably not be used if `systemd` is used. To remove `systemctl` integration, run 38 | ``` 39 | sudo systemctl disable radar-hashbackup 40 | ``` 41 | 42 | 43 | **Notes**: 44 | If you want to run the backups once or manually, instead of using `systemd` or `CRON` you can just run the run backup script like - 45 | ```shell 46 | sudo bash run-backup.sh 47 | ``` 48 | 49 | Also remember to upgrade hash backup frequently (~ every 3 months) since it is stated in documentation that - `The compatibility goal is that backups created less than a year ago should be accessible with the latest version.` 50 | 51 | Currently, the hashbackups are configured to use input paths but for systems like databases, you should prefer first creating dump of the database on a filepath and then using that path in the hashbackup configuration. 52 | This can be easily done using a cron job for example - 53 | This is for creating a dump of the postgres db running inside a docker container on a directory on the host named `/localpostgresdump` every night at 12 - 54 | 55 | ``` 56 | 00 00 * * * docker exec pg_dumpall > /localpostgresdump/backup 57 | ``` 58 | 59 | You can then add the path `/localpostgresdump` in the `backup.conf` file in `INPUTS` which will create a backup of SQL dumps. 60 | 61 | 62 | ## Important INFO 63 | Quoting the Hashbackup Docs from the download page - 64 | ``` 65 | Beta versions of HashBackup expire quarterly on the 15th of January, April, July, and October. Use hb upgrade to get the latest version and extend the expiration date. 66 | IMPORTANT: You can always access your backup data after the expiration date: everything continues to work except the backup command. 67 | ``` 68 | 69 | This means you will need to upgrade Hashbackup regularly. You can easily set up a CRON job to accomplish this. The following example shows how to upgrade every week at 1 AM on a Sunday. 70 | ``` 71 | 0 1 * * 0 root hb upgrade 72 | ``` 73 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/optional-services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.5' 3 | 4 | 5 | volumes: 6 | fitbit-logs: {} 7 | 8 | services: 9 | #---------------------------------------------------------------------------# 10 | # RADAR REDCap Integration # 11 | #---------------------------------------------------------------------------# 12 | radar-integration: 13 | image: radarbase/radar-redcapintegration:0.1.0 14 | networks: 15 | - api 16 | - default 17 | depends_on: 18 | - managementportal-app 19 | restart: always 20 | volumes: 21 | - "./etc/redcap-integration:/usr/local/etc/radar-redcap-int" 22 | healthcheck: 23 | test: ["CMD-SHELL", "wget --post-data {} http://localhost:8080/redcap/trigger 2>&1 | grep -q 500 || exit 1"] 24 | interval: 1m 25 | timeout: 5s 26 | retries: 3 27 | 28 | #---------------------------------------------------------------------------# 29 | # RADAR Fitbit connector # 30 | #---------------------------------------------------------------------------# 31 | radar-fitbit-connector: 32 | image: radarbase/kafka-connect-rest-fitbit-source:0.3.2 33 | restart: on-failure 34 | volumes: 35 | - ./etc/fitbit/docker/source-fitbit.properties:/etc/kafka-connect/source-fitbit.properties 36 | - ./etc/fitbit/docker/users:/var/lib/kafka-connect-fitbit-source/users 37 | - fitbit-logs:/var/lib/kafka-connect-fitbit-source/logs 38 | networks: 39 | - zookeeper 40 | - kafka 41 | - default 42 | depends_on: 43 | - zookeeper-1 44 | - zookeeper-2 45 | - zookeeper-3 46 | - kafka-1 47 | - kafka-2 48 | - kafka-3 49 | - schema-registry-1 50 | environment: 51 | CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 52 | CONNECT_REST_PORT: 8083 53 | CONNECT_GROUP_ID: "default" 54 | CONNECT_CONFIG_STORAGE_TOPIC: "default.config" 55 | CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" 56 | CONNECT_STATUS_STORAGE_TOPIC: "default.status" 57 | CONNECT_KEY_CONVERTER: "io.confluent.connect.avro.AvroConverter" 58 | CONNECT_VALUE_CONVERTER: "io.confluent.connect.avro.AvroConverter" 59 | CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" 60 | CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" 61 | CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" 62 | CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" 63 | CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/var/lib/kafka-connect-fitbit-source/logs/connect.offsets" 64 | CONNECT_REST_ADVERTISED_HOST_NAME: "radar-fitbit-connector" 65 | CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 66 | CONNECTOR_PROPERTY_FILE_PREFIX: "source-fitbit" 67 | KAFKA_HEAP_OPTS: "-Xms256m -Xmx768m" 68 | KAFKA_BROKERS: 3 69 | CONNECT_LOG4J_LOGGERS: "org.reflections=ERROR" 70 | healthcheck: 71 | test: ["CMD-SHELL", "curl -sf localhost:8083/connectors/radar-fitbit-source/status | grep -o '\"state\":\"[^\"]*\"' | tr '\\n' ',' | grep -vq FAILED || exit 1"] 72 | interval: 1m 73 | timeout: 5s 74 | retries: 3 75 | 76 | radar-rest-sources-backend: 77 | image: radarbase/radar-rest-source-auth-backend:2.0 78 | depends_on: 79 | - radarbase-postgresql 80 | networks: 81 | - default 82 | - api 83 | - management 84 | environment: 85 | - SPRING_DATASOURCE_URL=jdbc:postgresql://radarbase-postgresql:5432/restsourceauthorizer 86 | - SPRING_DATASOURCE_USERNAME=${POSTGRES_USER} 87 | - SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD} 88 | - REST_SOURCE_AUTHORIZER_SOURCE_CLIENTS_FILE_PATH=app-includes/rest_source_clients_configs.yml 89 | - APP_SLEEP=10 # gives time for the database to boot before the application 90 | volumes: 91 | - ./etc/rest-source-authorizer/:/app-includes/ 92 | healthcheck: 93 | test: ["CMD", "wget", "--spider", "http://localhost:8080/users"] 94 | interval: 1m30s 95 | timeout: 5s 96 | retries: 3 97 | 98 | radar-rest-sources-authorizer: 99 | image: radarbase/radar-rest-source-authorizer:2.0 100 | networks: 101 | - api 102 | depends_on: 103 | - radar-rest-sources-backend 104 | - radarbase-postgresql 105 | healthcheck: 106 | test: ["CMD", "wget", "--spider", "http://localhost:80"] 107 | interval: 1m30s 108 | timeout: 5s 109 | retries: 3 110 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/radar-backend/radar.yml.template: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | released: 2018-09-13 3 | 4 | #============================== Zookeeper ==============================# 5 | #List of Zookeeper instances 6 | zookeeper: 7 | - host: zookeeper-1 8 | port: 2181 9 | 10 | #================================ Kafka ================================# 11 | #List of Kafka brokers 12 | broker: 13 | - host: kafka-1 14 | port: 9092 15 | 16 | #=========================== Schema Registry ===========================# 17 | #List of Schema Registry instances 18 | schema_registry: 19 | - host: schema-registry-1 20 | port: 8081 21 | protocol: http 22 | #=========================== Kafka REST Proxy ===========================# 23 | #List of kafka Rest proxy instances 24 | rest_proxy: 25 | host: rest-proxy-1 26 | port: 8082 27 | protocol: http 28 | 29 | #Kafka internal parameters 30 | #============================ Kafka Streams ============================# 31 | #The number of threads that a stream must be run according is priority 32 | stream: 33 | threads_per_priority: 34 | low: 1 35 | normal: 2 36 | high: 4 37 | 38 | properties: 39 | max.request.size: 3500042 #Set message.max.bytes for kafka brokers higher than or equal to this value 40 | retries: 15 41 | session.timeout.ms: 500000 42 | commit.interval.ms: 300000 43 | request.timeout.ms: 10860000 44 | state.cleanup.delay.ms: 10860000 45 | 46 | streams: 47 | - class: org.radarcns.stream.empatica.E4AccelerationStream 48 | - class: org.radarcns.stream.empatica.E4BatteryLevelStream 49 | - class: org.radarcns.stream.empatica.E4BloodVolumePulseStream 50 | - class: org.radarcns.stream.empatica.E4ElectroDermalActivityStream 51 | - class: org.radarcns.stream.empatica.E4HeartRateStream 52 | - class: org.radarcns.stream.empatica.E4InterBeatIntervalStream 53 | - class: org.radarcns.stream.empatica.E4TemperatureStream 54 | - class: org.radarcns.stream.phone.PhoneAccelerationStream 55 | - class: org.radarcns.stream.phone.PhoneBatteryStream 56 | - class: org.radarcns.stream.phone.PhoneUsageStream 57 | - class: org.radarcns.stream.phone.PhoneUsageAggregationStream 58 | 59 | 60 | #====================== Source statistics monitor ======================# 61 | source_statistics: 62 | - name: Empatica E4 63 | topics: 64 | - android_empatica_e4_blood_volume_pulse_1min 65 | output_topic: source_statistics_empatica_e4 66 | - name: Biovotion VSM1 67 | topics: 68 | - android_biovotion_vsm1_acceleration_1min 69 | output_topic: source_statistics_biovotion_vsm1 70 | - name: RADAR pRMT 71 | topics: 72 | - android_phone_acceleration_1min 73 | - android_phone_bluetooth_devices 74 | - android_phone_sms 75 | - android_phone_call 76 | - android_phone_contacts 77 | - android_phone_usage_event 78 | - android_phone_relative_location 79 | output_topic: source_statistics_android_phone 80 | 81 | #======================== Battery level monitor ========================# 82 | battery_monitor: 83 | level: LOW 84 | notify: # Each project can have a number of email addresses 85 | - project_id: s1 86 | email_address: 87 | - notifier1@email 88 | - project_id: s2 89 | email_address: 90 | - notifier2@email 91 | email_host: smtp 92 | email_port: 25 93 | email_user: user@example.com 94 | topics: 95 | - android_empatica_e4_battery_level 96 | 97 | #======================= Disconnection monitor==========================# 98 | disconnect_monitor: 99 | notify: # Each project can have a number of email addresses 100 | - project_id: s1 101 | email_address: 102 | - notifier1@email 103 | - project_id: s2 104 | email_address: 105 | - notifier2@email 106 | email_host: smtp 107 | email_port: 25 108 | email_user: user@example.com 109 | topics: 110 | - android_empatica_e4_temperature 111 | timeout: 1800 # seconds after which a stream is set disconnected 112 | alert_repetitions: 2 # number of additional emails to send after the first 113 | 114 | #====================== Source statistics monitor ======================# 115 | statistics_monitors: 116 | - name: Empatica E4 117 | topics: 118 | - android_empatica_e4_blood_volume_pulse_1min 119 | output_topic: source_statistics_empatica_e4 120 | - name: Biovotion VSM1 121 | topics: 122 | - android_biovotion_vsm1_acceleration_1min 123 | output_topic: source_statistics_biovotion_vsm1 124 | - name: RADAR pRMT 125 | topics: 126 | - android_phone_acceleration_1min 127 | - android_phone_bluetooth_devices 128 | - android_phone_sms 129 | - android_phone_call 130 | - android_phone_contacts 131 | - android_phone_usage_event 132 | - android_phone_relative_location 133 | output_topic: source_statistics_android_phone 134 | 135 | persistence_path: /var/lib/radar/data 136 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/radar-backend/radar.yml.template: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | released: 2018-09-13 3 | 4 | #============================== Zookeeper ==============================# 5 | #List of Zookeeper instances 6 | zookeeper: 7 | - host: zookeeper-1 8 | port: 2181 9 | 10 | #================================ Kafka ================================# 11 | #List of Kafka brokers 12 | broker: 13 | - host: kafka-1 14 | port: 9092 15 | 16 | #=========================== Schema Registry ===========================# 17 | #List of Schema Registry instances 18 | schema_registry: 19 | - host: schema-registry-1 20 | port: 8081 21 | protocol: http 22 | #=========================== Kafka REST Proxy ===========================# 23 | #List of kafka Rest proxy instances 24 | rest_proxy: 25 | host: rest-proxy-1 26 | port: 8082 27 | protocol: http 28 | 29 | #Kafka internal parameters 30 | #============================ Kafka Streams ============================# 31 | #The number of threads that a stream must be run according is priority 32 | stream: 33 | threads_per_priority: 34 | low: 1 35 | normal: 2 36 | high: 4 37 | 38 | properties: 39 | max.request.size: 3500042 #Set message.max.bytes for kafka brokers higher than or equal to this value 40 | retries: 15 41 | session.timeout.ms: 500000 42 | commit.interval.ms: 300000 43 | request.timeout.ms: 10860000 44 | state.cleanup.delay.ms: 10860000 45 | 46 | streams: 47 | - class: org.radarcns.stream.empatica.E4AccelerationStream 48 | - class: org.radarcns.stream.empatica.E4BatteryLevelStream 49 | - class: org.radarcns.stream.empatica.E4BloodVolumePulseStream 50 | - class: org.radarcns.stream.empatica.E4ElectroDermalActivityStream 51 | - class: org.radarcns.stream.empatica.E4HeartRateStream 52 | - class: org.radarcns.stream.empatica.E4InterBeatIntervalStream 53 | - class: org.radarcns.stream.empatica.E4TemperatureStream 54 | - class: org.radarcns.stream.phone.PhoneAccelerationStream 55 | - class: org.radarcns.stream.phone.PhoneBatteryStream 56 | - class: org.radarcns.stream.phone.PhoneUsageStream 57 | - class: org.radarcns.stream.phone.PhoneUsageAggregationStream 58 | 59 | 60 | #====================== Source statistics monitor ======================# 61 | source_statistics: 62 | - name: Empatica E4 63 | topics: 64 | - android_empatica_e4_blood_volume_pulse_1min 65 | output_topic: source_statistics_empatica_e4 66 | - name: Biovotion VSM1 67 | topics: 68 | - android_biovotion_vsm1_acceleration_1min 69 | output_topic: source_statistics_biovotion_vsm1 70 | - name: RADAR pRMT 71 | topics: 72 | - android_phone_acceleration_1min 73 | - android_phone_bluetooth_devices 74 | - android_phone_sms 75 | - android_phone_call 76 | - android_phone_contacts 77 | - android_phone_usage_event 78 | - android_phone_relative_location 79 | output_topic: source_statistics_android_phone 80 | 81 | #======================== Battery level monitor ========================# 82 | battery_monitor: 83 | level: LOW 84 | notify: # Each project can have a number of email addresses 85 | - project_id: s1 86 | email_address: 87 | - notifier1@email 88 | - project_id: s2 89 | email_address: 90 | - notifier2@email 91 | email_host: smtp 92 | email_port: 25 93 | email_user: user@example.com 94 | topics: 95 | - android_empatica_e4_battery_level 96 | 97 | #======================= Disconnection monitor==========================# 98 | disconnect_monitor: 99 | notify: # Each project can have a number of email addresses 100 | - project_id: s1 101 | email_address: 102 | - notifier1@email 103 | - project_id: s2 104 | email_address: 105 | - notifier2@email 106 | email_host: smtp 107 | email_port: 25 108 | email_user: user@example.com 109 | topics: 110 | - android_empatica_e4_temperature 111 | timeout: 1800 # seconds after which a stream is set disconnected 112 | alert_repetitions: 2 # number of additional emails to send after the first 113 | 114 | #====================== Source statistics monitor ======================# 115 | statistics_monitors: 116 | - name: Empatica E4 117 | topics: 118 | - android_empatica_e4_blood_volume_pulse_1min 119 | output_topic: source_statistics_empatica_e4 120 | - name: Biovotion VSM1 121 | topics: 122 | - android_biovotion_vsm1_acceleration_1min 123 | output_topic: source_statistics_biovotion_vsm1 124 | - name: RADAR pRMT 125 | topics: 126 | - android_phone_acceleration_1min 127 | - android_phone_bluetooth_devices 128 | - android_phone_sms 129 | - android_phone_call 130 | - android_phone_contacts 131 | - android_phone_usage_event 132 | - android_phone_relative_location 133 | output_topic: source_statistics_android_phone 134 | 135 | persistence_path: /var/lib/radar/data 136 | -------------------------------------------------------------------------------- /scripts/stage-runner/configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | pushd . 6 | cd /home/ec2-user/RADAR-Docker/dcompose-stack/radar-cp-hadoop-stack 7 | 8 | ENV_PATH=./.env 9 | readonly ENV_PATH 10 | rm -rf "$ENV_PATH" 11 | touch "$ENV_PATH" 12 | 13 | # Configure OAuth client credentials? 14 | cp ./etc/managementportal/config/oauth_client_details.csv.template ./etc/managementportal/config/oauth_client_details.csv 15 | 16 | cp ./etc/radar-backend/radar.yml.template ./etc/radar-backend/radar.yml 17 | cp ./etc/redcap-integration/radar.yml.template ./etc/redcap-integration/radar.yml 18 | cp ./etc/fitbit/docker/users/fitbit-user.yml.template ./etc/fitbit/docker/users/fitbit-user.yml 19 | cp ./etc/webserver/ip-access-control.conf.template ./etc/webserver/ip-access-control.conf 20 | cp ./etc/webserver/nginx.conf.template ./etc/webserver/nginx.conf 21 | cp ./etc/webserver/optional-services.conf.template ./etc/webserver/optional-services.conf 22 | cp ./etc/smtp.env.template ./etc/smtp.env 23 | cp ./etc/hdfs-restructure/restructure.yml.template ./etc/hdfs-restructure/restructure.yml 24 | 25 | function _get_param () { 26 | local param_value=$(aws ssm get-parameters --region eu-west-1 --names $1 --query Parameters[0].Value) 27 | param_value=$(echo "$param_value" | sed -e 's/^"//' -e 's/"$//') 28 | echo $param_value 29 | } 30 | 31 | function _get_decrypted_param () { 32 | local param_value=$(aws ssm get-parameters --region eu-west-1 --names $1 --with-decryption --query Parameters[0].Value) 33 | param_value=$(echo "$param_value" | sed -e 's/^"//' -e 's/"$//') 34 | echo $param_value 35 | } 36 | 37 | function _get_secure_file() { 38 | local file_content=$(_get_decrypted_param "$1") 39 | printf "%b\n" "$file_content" > $2 40 | } 41 | 42 | IFS="=" 43 | while read -r key val 44 | do 45 | if [[ "$key" == "SERVER_NAME" ]]; then 46 | echo "$key=radar-backend.co.uk" >> ./.env 47 | elif [[ "$key" == "MANAGEMENTPORTAL_KEY_DNAME" ]]; then 48 | echo "$key=CN=radar-backend.co.uk,OU=MyName,O=MyOrg,L=MyCity,S=MyState,C=MyCountryCode" >> ./.env 49 | elif [[ "$key" == "MANAGEMENTPORTAL_FRONTEND_CLIENT_SECRET" ]]; then 50 | echo "$key=travel.COUNTRY.flowers" >> ./.env 51 | elif [[ "$key" == "SELF_SIGNED_CERT" ]]; then 52 | echo "$key=no" >> ./.env 53 | elif [[ "$key" == "MANAGEMENTPORTAL_CATALOGUE_SERVER_ENABLE_AUTO_IMPORT" ]]; then 54 | echo "$key=true" >> ./.env 55 | elif [[ "$key" == "ENABLE_OPTIONAL_SERVICES" ]]; then 56 | echo "$key=true" >> ./.env 57 | elif [[ "$key" == "HOTSTORAGE_USERNAME" ]]; then 58 | value=$(_get_decrypted_param "RadarBackendHotstorageUsername") 59 | echo "$key=$value" >> ./.env 60 | elif [[ "$key" == "HOTSTORAGE_PASSWORD" ]]; then 61 | value=$(_get_decrypted_param "RadarBackendHotstoragePassword") 62 | echo "$key=$value" >> ./.env 63 | elif [[ "$key" == "HOTSTORAGE_NAME" ]]; then 64 | value=$(_get_decrypted_param "RadarBackendHotstorageName") 65 | echo "$key=$value" >> ./.env 66 | elif [[ "$key" == "POSTGRES_USER" ]]; then 67 | value=$(_get_decrypted_param "RadarBackendPostgresUser") 68 | echo "$key=$value" >> ./.env 69 | elif [[ "$key" == "POSTGRES_PASSWORD" ]]; then 70 | value=$(_get_decrypted_param "RadarBackendPostgresPassword") 71 | echo "$key=$value" >> ./.env 72 | elif [[ "$key" == "KAFKA_MANAGER_PASSWORD" ]]; then 73 | value=$(_get_decrypted_param "RadarBackendKafkaManagerPassword") 74 | echo "$key=$value" >> ./.env 75 | elif [[ "$key" == "PORTAINER_PASSWORD_HASH" ]]; then 76 | value=$(_get_decrypted_param "RadarBackendPortainerPasswordHash") 77 | echo "$key=$value" >> ./.env 78 | elif [[ "$key" == "MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD" ]]; then 79 | value=$(_get_decrypted_param "RadarBackendManagementportalCommonAdminPassword") 80 | echo "$key=$value" >> ./.env 81 | elif [[ "$key" == "TIMESCALEDB_PASSWORD" ]]; then 82 | value=$(_get_decrypted_param "RadarBackendTimescaledbPassword") 83 | echo "$key=$value" >> ./.env 84 | elif [[ "$key" == "GRAFANA_PASSWORD" ]]; then 85 | value=$(_get_decrypted_param "RadarBackendGrafanaPassword") 86 | echo "$key=$value" >> ./.env 87 | elif [[ "$key" == "MANAGEMENTPORTAL_COMMON_ADMIN_PASSWORD" ]]; then 88 | value=$(_get_decrypted_param "RadarBackendManagementportalCommonAdminPassword") 89 | echo "$key=$value" >> ./.env 90 | else 91 | echo "$key=$val" >> ./.env 92 | fi 93 | done < <(grep . ./etc/env.template) 94 | 95 | # Overwrite SMTP environment variables 96 | _get_secure_file "RadarBackendSmtpEnv" ./etc/smtp.env 97 | 98 | # Overwrite configuration on output restructure 99 | _get_secure_file "RadarBackendOutputRestructureConfig" ./etc/hdfs-restructure/restructure.yml 100 | mkdir -p ./etc/hdfs-restructure/output/+tmp 101 | chmod -R +w ./etc/hdfs-restructure/output 102 | 103 | sed -i -e '2,$s/^#//' ./etc/webserver/optional-services.conf 104 | 105 | popd -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/dashboard-pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.5' 3 | 4 | networks: 5 | hotstorage: 6 | driver: bridge 7 | internal: true 8 | api: 9 | driver: bridge 10 | internal: true 11 | 12 | 13 | services: 14 | 15 | #---------------------------------------------------------------------------# 16 | # RADAR Hot Storage # 17 | #---------------------------------------------------------------------------# 18 | hotstorage: 19 | image: radarbase/radar-hotstorage:0.1 20 | networks: 21 | - hotstorage 22 | volumes: 23 | - "${MONGODB_DIR}/db:/data/db" 24 | - "${MONGODB_DIR}/configdb:/data/configdb" 25 | restart: always 26 | environment: 27 | RADAR_USER: ${HOTSTORAGE_USERNAME} 28 | RADAR_PWD: ${HOTSTORAGE_PASSWORD} 29 | RADAR_DB: ${HOTSTORAGE_NAME} 30 | healthcheck: 31 | test: ["CMD", "mongo", "-u", "${HOTSTORAGE_USERNAME}", "-p", "${HOTSTORAGE_PASSWORD}", "${HOTSTORAGE_NAME}", "--eval", "db"] 32 | interval: 1m 33 | timeout: 5s 34 | retries: 3 35 | 36 | #---------------------------------------------------------------------------# 37 | # RADAR REST API # 38 | #---------------------------------------------------------------------------# 39 | rest-api: 40 | image: radarbase/radar-restapi:0.3 41 | networks: 42 | - hotstorage 43 | - api 44 | - management 45 | depends_on: 46 | - hotstorage 47 | - managementportal-app 48 | restart: always 49 | volumes: 50 | - "./etc/rest-api:/usr/local/conf/radar/rest-api" 51 | environment: 52 | RADAR_IS_CONFIG_LOCATION: usr/local/conf/radar/rest-api/radar-is.yml 53 | healthcheck: 54 | test: ["CMD", "wget", "--spider", "http://localhost:8080/api/openapi.json"] 55 | interval: 1m 56 | timeout: 5s 57 | retries: 3 58 | 59 | #---------------------------------------------------------------------------# 60 | # RADAR Dashboard # 61 | #---------------------------------------------------------------------------# 62 | dashboard: 63 | image: radarcns/radar-dashboard:2.1.0 64 | networks: 65 | - api 66 | depends_on: 67 | - rest-api 68 | restart: always 69 | environment: 70 | API_URI: https://${SERVER_NAME}/api 71 | BASE_HREF: /dashboard/ 72 | healthcheck: 73 | test: ["CMD", "wget", "-s", "http://localhost:80/"] 74 | interval: 1m 75 | timeout: 5s 76 | retries: 3 77 | 78 | #---------------------------------------------------------------------------# 79 | # RADAR mongo connector # 80 | #---------------------------------------------------------------------------# 81 | radar-mongodb-connector: 82 | image: radarbase/kafka-connect-mongodb-sink:0.2.2 83 | restart: on-failure 84 | volumes: 85 | - ./etc/mongodb-connector/sink-mongo.properties:/etc/kafka-connect/sink.properties 86 | networks: 87 | - zookeeper 88 | - kafka 89 | - hotstorage 90 | depends_on: 91 | - zookeeper-1 92 | - kafka-1 93 | - kafka-2 94 | - kafka-3 95 | - schema-registry-1 96 | - rest-proxy-1 97 | - kafka-init 98 | - hotstorage 99 | environment: 100 | CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 101 | CONNECT_REST_PORT: 8083 102 | CONNECT_GROUP_ID: "default" 103 | CONNECT_CONFIG_STORAGE_TOPIC: "default.config" 104 | CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" 105 | CONNECT_STATUS_STORAGE_TOPIC: "default.status" 106 | CONNECT_KEY_CONVERTER: "io.confluent.connect.avro.AvroConverter" 107 | CONNECT_VALUE_CONVERTER: "io.confluent.connect.avro.AvroConverter" 108 | CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" 109 | CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" 110 | CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" 111 | CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" 112 | CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" 113 | CONNECT_REST_ADVERTISED_HOST_NAME: "radar-mongodb-connector" 114 | CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 115 | CONNECT_CONSUMER_MAX_POLL_RECORDS: 500 116 | CONNECT_CONSUMER_MAX_POLL_INTERVAL_MS: 300000 117 | CONNECT_CONSUMER_SESSION_TIMEOUT_MS: 10000 118 | CONNECT_CONSUMER_HEARTBEAT_INTERVAL_MS: 3000 119 | CONNECT_PLUGIN_PATH: /usr/share/java/kafka-connect/plugins 120 | KAFKA_BROKERS: 3 121 | CONNECT_LOG4J_ROOT_LOGLEVEL: WARN 122 | CONNECT_LOG4J_LOGGERS: "org.reflections=ERROR" 123 | healthcheck: 124 | test: ["CMD-SHELL", "curl -sf localhost:8083/connectors/radar-connector-mongodb-sink/status | grep -o '\"state\":\"[^\"]*\"' | tr '\\n' ',' | grep -vq FAILED || exit 1"] 125 | interval: 1m 126 | timeout: 5s 127 | retries: 3 -------------------------------------------------------------------------------- /scripts/hdfs-data-retention/hdfs_data_retention.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PIG_VERSION="0.16.0" 4 | 5 | OUTPUT_DIR="./tmp" 6 | OUTPUT_DIR="$(cd "$(dirname "$OUTPUT_DIR")"; pwd)/$(basename "$OUTPUT_DIR")" 7 | 8 | TOPICS_FILE="./topics_to_remove.txt" 9 | HDFS_NAME_NODE='hdfs://hdfs-namenode:8020' 10 | DELETE="false" 11 | SKIP_TRASH="" 12 | 13 | # HDFS command to get FS image file from hdfs name node 14 | HDFS_COMMAND_IMAGE=(hdfs dfsadmin -fetchImage /fsimage_tmp/hdfs.image) 15 | # Can also use (curl --silent "http://hdfs-namenode:50070/imagetransfer?getimage=1&txid=latest" -o /fsimage_tmp/hdfs.image) 16 | 17 | # HDFS command to create text file from FSImage file 18 | HDFS_COMMAND_TEXT=(hadoop oiv -i /fsimage_tmp/hdfs.image -o /fsimage_tmp/hdfs.txt -p Delimited -delimiter ,) 19 | DOCKER_COMMAND=(docker run -i --rm --network hadoop -v "${OUTPUT_DIR}:/fsimage_tmp" -e "CORE_CONF_fs_defaultFS=${HDFS_NAME_NODE}" uhopper/hadoop:2.7.2) 20 | 21 | 22 | 23 | if [[ ! -d 'tmp' ]]; then 24 | mkdir tmp 25 | fi 26 | 27 | while [[ $# -gt 0 ]] 28 | do 29 | key="$1" 30 | 31 | case $key in 32 | -d|--delete) 33 | DELETE="true" 34 | shift # past argument 35 | ;; 36 | -st|--skip-trash) 37 | SKIP_TRASH="-skipTrash" 38 | shift # past argument 39 | ;; 40 | -u|--url) 41 | HDFS_NAME_NODE="$2" 42 | shift # past argument 43 | shift # past value 44 | ;; 45 | -tf|--topics-file) 46 | TOPICS_FILE="$2" 47 | shift # past argument 48 | shift # past value 49 | ;; 50 | -dt|--date) 51 | if [[ "$2" =~ ^[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])[[:space:]]([0-1][0-9]|2[0-3]):([0-5][0-9])$ ]]; then 52 | # All records for appropriate topics before this date will be removed from HDFS. 53 | date_time_to_remove_before="$2" 54 | else 55 | echo "Invalid date. Please use -h or --help for more information." 56 | exit 1 57 | fi 58 | shift # past argument 59 | shift # past value 60 | ;; 61 | -h|--help) 62 | echo "Usage: ./hdfs_data_retention.sh -u -tf -dt -d" 63 | echo "Options: * means required" 64 | echo " -d|--delete: enable delete for the data. If not specified, the size of selected files is displayed." 65 | echo " -st|--skip-trash: Enables skipTrash option for . To be used with -d|--delete option." 66 | echo " -h|--help: Displays this help." 67 | echo "* -u|--url: The HDFS namenode Url to connect to. Default is hdfs://hdfs-namenode:8020" 68 | echo "* -tf|--topics-file: The path of the file containing the newline-separated list of topics to remove the files from. Default is ./topics_to_remove.txt" 69 | echo "* -dt|--date: All the files modified before this date time will be selected. Format is (yyyy-MM-dd HH:mm)" 70 | exit 0 71 | ;; 72 | *) # unknown option 73 | echo "Unknown Option $1. Try again. Use -h or --help for more info." 74 | exit 1 75 | ;; 76 | esac 77 | done 78 | 79 | if [[ -z "$date_time_to_remove_before" ]]; then 80 | echo "Please specify a date and time. See -h or --help for more information." 81 | exit 1 82 | fi 83 | 84 | if [[ -f "./tmp/hdfs.image" ]]; then 85 | if [[ $(find ./tmp/hdfs.image -mtime +1 -print) ]]; then 86 | echo "./tmp/hdfs.image is older than a day. Downloading a new FS image file. " 87 | ${DOCKER_COMMAND[@]} ${HDFS_COMMAND_IMAGE[@]} 88 | ${DOCKER_COMMAND[@]} ${HDFS_COMMAND_TEXT[@]} 89 | fi 90 | else 91 | echo "Downloading a new FS image file and converting to txt. " 92 | ${DOCKER_COMMAND[@]} ${HDFS_COMMAND_IMAGE[@]} 93 | ${DOCKER_COMMAND[@]} ${HDFS_COMMAND_TEXT[@]} 94 | fi 95 | 96 | # Set this if get JAVA_HOME not set error or set it in ~/.profile 97 | #export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64" 98 | 99 | # Download and configure apache pig 100 | export PIG_HOME="$(pwd)"/pig-"${PIG_VERSION}" 101 | export PATH=$PATH:"$(pwd)"/pig-"${PIG_VERSION}"/bin 102 | 103 | if ! hash "pig" >/dev/null 2>&1; then 104 | wget http://www-us.apache.org/dist/pig/pig-"${PIG_VERSION}"/pig-"${PIG_VERSION}".tar.gz 105 | tar -xzf pig-"${PIG_VERSION}".tar.gz 106 | export PATH=$PATH:"$(pwd)"/pig-"${PIG_VERSION}"/bin 107 | fi 108 | 109 | # Write all the relevant file paths to file using apache pig 110 | pig -x local -param inputFile=./tmp/hdfs.txt -param outputFile=./tmp/final_paths -param topics=${TOPICS_FILE} -param time="${date_time_to_remove_before}" ./hdfs_get_relevant_files.pig 111 | 112 | FINAL_PATH='./tmp/final_paths/part-r-00000' 113 | #NUMOFLINES=$(wc -l < "${FINAL_PATH}") 114 | # If delete is passed as an argument, only then delete the files from the HDFS. 115 | if [[ "${DELETE}" = "true" ]]; then 116 | docker run -i -d --name "hdfs-delete" --network hadoop -v "${OUTPUT_DIR}:/fsimage_tmp" -e "CORE_CONF_fs_defaultFS=${HDFS_NAME_NODE}" uhopper/hadoop:2.7.2 /bin/bash 117 | # Wait for the container to start up 118 | sleep 30 119 | if [[ -f "${FINAL_PATH}" ]]; then 120 | echo "READING AND REMOVING RELEVANT PATHS" 121 | docker exec hdfs-delete bash -c 'apt-get -y install pv && pv -pte /fsimage_tmp/final_paths/part-r-00000 | xargs -n 100 hdfs dfs -rm ${SKIP_TRASH}' 122 | fi 123 | # Delete the image after delete operation is complete 124 | rm -r ./tmp/hdfs.* 125 | docker rm -f hdfs-delete 126 | fi 127 | 128 | rm -r ./tmp/final_paths/ 129 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-hadoop-stack/etc/webserver/nginx.nossl.conf.template: -------------------------------------------------------------------------------- 1 | worker_rlimit_nofile 8192; 2 | 3 | events { 4 | worker_connections 4096; ## Default: 1024 5 | } 6 | 7 | http { 8 | real_ip_header X-Forwarded-For; 9 | # Updated from NGINX_PROXIES .env variable by `bin/radar-docker install`. 10 | # Do not change the next line! It is autogenerated. 11 | # NGINX_PROXIES 12 | real_ip_recursive on; 13 | 14 | default_type application/octet-stream; 15 | log_format main '$remote_addr - $remote_user [$time_local] $status ' 16 | '"$request" $body_bytes_sent "$http_referer" ' 17 | '"$http_user_agent" "$http_x_forwarded_for"'; 18 | tcp_nodelay on; 19 | 20 | # hide nginx version 21 | server_tokens off; 22 | 23 | # add nosniff header (https://www.owasp.org/index.php/List_of_useful_HTTP_headers) 24 | add_header X-Content-Type-Options nosniff; 25 | 26 | # For logins, make 2 requests per second at most 27 | limit_req_zone $binary_remote_addr zone=login_limit:10m rate=2r/s; 28 | 29 | server { 30 | listen 80 default_server; 31 | listen [::]:80 default_server; 32 | server_name localhost; 33 | 34 | access_log /var/log/nginx/access.log; 35 | error_log /var/log/nginx/error.log; 36 | 37 | location /kafka/ { 38 | include cors.conf; 39 | proxy_pass http://gateway/radar-gateway/; 40 | proxy_set_header Host $host; 41 | proxy_http_version 1.1; 42 | proxy_set_header Connection ""; 43 | } 44 | location ^~ /kafka/consumers { 45 | deny all; 46 | } 47 | location ^~ /kafka/brokers { 48 | deny all; 49 | } 50 | location ~* /kafka/topics/.+/partitions { 51 | deny all; 52 | } 53 | location /schema/ { 54 | if ($request_method = 'OPTIONS') { 55 | # Tell client that this pre-flight info is valid for 20 days 56 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 57 | add_header 'Access-Control-Allow-Credentials' 'true' always; 58 | add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; 59 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 60 | add_header 'Access-Control-Max-Age' 1728000; 61 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 62 | add_header 'Content-Length' 0; 63 | add_header 'Allow' 'GET,OPTIONS'; 64 | return 204; 65 | } 66 | 67 | if ($request_method = 'GET') { 68 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 69 | add_header 'Access-Control-Allow-Credentials' 'true' always; 70 | add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; 71 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 72 | } 73 | 74 | limit_except GET OPTIONS { 75 | deny all; 76 | } 77 | proxy_pass http://schema-registry-1:8081/; 78 | proxy_set_header Host $host; 79 | proxy_http_version 1.1; 80 | proxy_set_header Connection ""; 81 | } 82 | location = /schema/application.wadl { 83 | deny all; 84 | } 85 | location /dashboard/ { 86 | proxy_pass http://dashboard:80/; 87 | proxy_set_header Host $host; 88 | } 89 | location /portainer/ { 90 | include ip-access-control.conf; 91 | 92 | proxy_pass http://portainer:9000/; 93 | proxy_http_version 1.1; 94 | proxy_set_header Connection ""; 95 | } 96 | location /portainer/api/websocket/ { 97 | include ip-access-control.conf; 98 | proxy_pass http://portainer:9000/api/websocket/; 99 | proxy_set_header Upgrade $http_upgrade; 100 | proxy_set_header Connection "upgrade"; 101 | proxy_http_version 1.1; 102 | } 103 | location /api/ { 104 | include cors.conf; 105 | proxy_pass http://rest-api:8080/api/; 106 | proxy_set_header Host $host; 107 | } 108 | location /managementportal/ { 109 | include cors.conf; 110 | proxy_pass http://managementportal-app:8080/managementportal/; 111 | proxy_set_header Host $host; 112 | } 113 | location /managementportal/oauth/ { 114 | # Allow 20 fast-following requests, like when authorizing a user. 115 | limit_req zone=login_limit burst=20; 116 | include cors.conf; 117 | proxy_pass http://managementportal-app:8080/managementportal/oauth/; 118 | proxy_set_header Host $host; 119 | } 120 | location /managementportal/api/meta-token/ { 121 | limit_req zone=login_limit; 122 | include cors.conf; 123 | proxy_pass http://managementportal-app:8080/managementportal/api/meta-token/; 124 | proxy_set_header Host $host; 125 | } 126 | location /kafkamanager/{ 127 | include ip-access-control.conf; 128 | auth_basic "Kafka manager"; 129 | auth_basic_user_file kafka-manager.htpasswd; 130 | 131 | proxy_pass http://kafka-manager:9000; 132 | proxy_set_header Host $host; 133 | } 134 | include optional-services.conf; 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # Continuous integration and conditional deployment to stage 2 | name: Main 3 | 4 | on: 5 | push: 6 | branches: [ master, dev ] 7 | pull_request: 8 | branches: [ master, dev ] 9 | 10 | jobs: 11 | 12 | ci-with-email-and-hdfs-support: 13 | # The type of runner that the job will run on 14 | runs-on: ubuntu-latest 15 | concurrency: ci-with-email-and-hdfs-support 16 | 17 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: actions/setup-java@v2 21 | with: 22 | distribution: "adopt" 23 | java-version: "8" 24 | 25 | - name: Login to Docker Hub 26 | uses: docker/login-action@v1 27 | with: 28 | username: ${{ secrets.DOCKERHUB_USERNAME }} 29 | password: ${{ secrets.DOCKERHUB_TOKEN }} 30 | 31 | - name: Before install 32 | run: | 33 | docker --version 34 | mkdir -p "$HOME/bin"; 35 | export PATH="$HOME/bin:$PATH"; 36 | sudo docker-compose --version 37 | 38 | - name: With email and HDFS support 39 | run: | 40 | cd dcompose-stack/radar-cp-hadoop-stack/ci 41 | ./setup-env.sh 42 | cd ../ 43 | bin/radar-docker down 44 | bin/radar-docker install 45 | sleep 120 46 | bin/radar-docker ps 47 | [ -z "$(bin/radar-docker ps | tail -n +3 | grep " Exit " | grep -v "kafka-init_1")" ] 48 | bin/radar-docker down && sleep 90 49 | 50 | ci-with-s3-connector: 51 | # The type of runner that the job will run on 52 | runs-on: ubuntu-latest 53 | concurrency: radar-docker-ci-with-s3-connector 54 | 55 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 56 | steps: 57 | - uses: actions/checkout@v2 58 | - uses: actions/setup-java@v2 59 | with: 60 | distribution: "adopt" 61 | java-version: "8" 62 | 63 | - name: Login to Docker Hub 64 | uses: docker/login-action@v1 65 | with: 66 | username: ${{ secrets.DOCKERHUB_USERNAME }} 67 | password: ${{ secrets.DOCKERHUB_TOKEN }} 68 | 69 | - name: Before install 70 | run: | 71 | docker --version 72 | mkdir -p "$HOME/bin"; 73 | export PATH="$HOME/bin:$PATH"; 74 | sudo docker-compose --version 75 | 76 | - name: With S3 Connector 77 | run: | 78 | cd dcompose-stack/radar-cp-s3-stack/ci 79 | ./setup-env.sh 80 | cd ../ 81 | bin/radar-docker down 82 | bin/radar-docker install 83 | sleep 120 84 | bin/radar-docker ps 85 | # [ -z "$(bin/radar-docker ps | tail -n +3 | grep " Exit " | grep -v "kafka-init_1")" ] # https://github.com/RADAR-base/RADAR-Docker/issues/262 86 | bin/radar-docker down && sleep 90 87 | 88 | ci-firebase-messaging-stack: 89 | # The type of runner that the job will run on 90 | runs-on: ubuntu-latest 91 | concurrency: radar-docker-ci-firebase-messaging-stack 92 | 93 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 94 | steps: 95 | - uses: actions/checkout@v2 96 | - uses: actions/setup-java@v2 97 | with: 98 | distribution: "adopt" 99 | java-version: "8" 100 | 101 | - name: Login to Docker Hub 102 | uses: docker/login-action@v1 103 | with: 104 | username: ${{ secrets.DOCKERHUB_USERNAME }} 105 | password: ${{ secrets.DOCKERHUB_TOKEN }} 106 | 107 | - name: Before install 108 | run: | 109 | docker --version 110 | mkdir -p "$HOME/bin"; 111 | export PATH="$HOME/bin:$PATH"; 112 | sudo docker-compose --version 113 | 114 | - name: Firebase messaging stack 115 | run: | 116 | cd dcompose-stack/firebase-app-server 117 | cp etc/env.template .env 118 | sudo docker-compose down 119 | sudo docker-compose up -d --build && sleep 15 && [ -z "$(sudo docker-compose ps | tail -n +3 | grep " Exit ")" ] 120 | sudo docker-compose down 121 | 122 | deployment-to-stage: 123 | # Deploy to stage on merging into dev 124 | if: github.event_name == 'push' && github.ref == 'refs/heads/dev' 125 | 126 | # The type of runner that the job will run on 127 | runs-on: ubuntu-latest 128 | needs: [ci-with-email-and-hdfs-support, ci-with-s3-connector, ci-firebase-messaging-stack] 129 | concurrency: radar-docker-deployment-to-stage 130 | 131 | steps: 132 | - name: Configure AWS credentials 133 | uses: aws-actions/configure-aws-credentials@v1 134 | with: 135 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 136 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 137 | aws-region: eu-west-1 138 | 139 | - name: Create deployment 140 | id: deploy 141 | run: | 142 | aws deploy create-deployment \ 143 | --application-name radar-backend \ 144 | --deployment-group-name RadarBackendDeploymentGroupName \ 145 | --deployment-config-name CodeDeployDefault.OneAtATime \ 146 | --github-location repository=${{ github.repository }},commitId=${{ github.sha }} 147 | -------------------------------------------------------------------------------- /dcompose-stack/radar-cp-s3-stack/etc/webserver/nginx.nossl.conf.template: -------------------------------------------------------------------------------- 1 | worker_rlimit_nofile 8192; 2 | 3 | events { 4 | worker_connections 4096; ## Default: 1024 5 | } 6 | 7 | http { 8 | real_ip_header X-Forwarded-For; 9 | # Updated from NGINX_PROXIES .env variable by `bin/radar-docker install`. 10 | # Do not change the next line! It is autogenerated. 11 | # NGINX_PROXIES 12 | real_ip_recursive on; 13 | 14 | default_type application/octet-stream; 15 | log_format main '$remote_addr - $remote_user [$time_local] $status ' 16 | '"$request" $body_bytes_sent "$http_referer" ' 17 | '"$http_user_agent" "$http_x_forwarded_for"'; 18 | tcp_nodelay on; 19 | 20 | # hide nginx version 21 | server_tokens off; 22 | 23 | # add nosniff header (https://www.owasp.org/index.php/List_of_useful_HTTP_headers) 24 | add_header X-Content-Type-Options nosniff; 25 | 26 | # For logins, make 2 requests per second at most 27 | limit_req_zone $binary_remote_addr zone=login_limit:10m rate=2r/s; 28 | 29 | upstream minio_servers { 30 | server minio1:9000; 31 | server minio2:9000; 32 | server minio3:9000; 33 | server minio4:9000; 34 | } 35 | 36 | server { 37 | listen 80 default_server; 38 | listen [::]:80 default_server; 39 | server_name localhost; 40 | 41 | access_log /var/log/nginx/access.log; 42 | error_log /var/log/nginx/error.log; 43 | 44 | location /kafka/ { 45 | include cors.conf; 46 | proxy_pass http://gateway/radar-gateway/; 47 | proxy_set_header Host $host; 48 | proxy_http_version 1.1; 49 | proxy_set_header Connection ""; 50 | add_header 'Allow' 'GET,OPTIONS'; 51 | } 52 | location ^~ /kafka/consumers { 53 | deny all; 54 | } 55 | location ^~ /kafka/brokers { 56 | deny all; 57 | } 58 | location ~* /kafka/topics/.+/partitions { 59 | deny all; 60 | } 61 | location /schema/ { 62 | if ($request_method = 'OPTIONS') { 63 | # Tell client that this pre-flight info is valid for 20 days 64 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 65 | add_header 'Access-Control-Allow-Credentials' 'true' always; 66 | add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; 67 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 68 | add_header 'Access-Control-Max-Age' 1728000; 69 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 70 | add_header 'Content-Length' 0; 71 | return 204; 72 | } 73 | 74 | if ($request_method = 'GET') { 75 | add_header 'Access-Control-Allow-Origin' "$http_origin" always; 76 | add_header 'Access-Control-Allow-Credentials' 'true' always; 77 | add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; 78 | add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Requested-With' always; 79 | } 80 | 81 | limit_except GET OPTIONS { 82 | deny all; 83 | } 84 | proxy_pass http://schema-registry-1:8081/; 85 | proxy_set_header Host $host; 86 | proxy_http_version 1.1; 87 | proxy_set_header Connection ""; 88 | } 89 | location = /schema/application.wadl { 90 | deny all; 91 | } 92 | location /portainer/ { 93 | include ip-access-control.conf; 94 | 95 | proxy_pass http://portainer:9000/; 96 | proxy_http_version 1.1; 97 | proxy_set_header Connection ""; 98 | } 99 | location /portainer/api/websocket/ { 100 | include ip-access-control.conf; 101 | proxy_pass http://portainer:9000/api/websocket/; 102 | proxy_set_header Upgrade $http_upgrade; 103 | proxy_set_header Connection "upgrade"; 104 | proxy_http_version 1.1; 105 | } 106 | location /managementportal/ { 107 | include cors.conf; 108 | proxy_pass http://managementportal-app:8080/managementportal/; 109 | proxy_set_header Host $host; 110 | } 111 | location /managementportal/oauth/ { 112 | # Allow 20 fast-following requests, like when authorizing a user. 113 | limit_req zone=login_limit burst=20; 114 | include cors.conf; 115 | proxy_pass http://managementportal-app:8080/managementportal/oauth/; 116 | proxy_set_header Host $host; 117 | } 118 | location /managementportal/api/meta-token/ { 119 | limit_req zone=login_limit; 120 | include cors.conf; 121 | proxy_pass http://managementportal-app:8080/managementportal/api/meta-token/; 122 | proxy_set_header Host $host; 123 | } 124 | location /kafkamanager/{ 125 | include ip-access-control.conf; 126 | auth_basic "Kafka manager"; 127 | auth_basic_user_file kafka-manager.htpasswd; 128 | 129 | proxy_pass http://kafka-manager:9000; 130 | proxy_set_header Host $host; 131 | } 132 | location /minio/ { 133 | proxy_set_header Host $host; 134 | proxy_pass http://minio_servers; 135 | } 136 | # include optional-services.conf; 137 | # include dashboard-pipeline.conf; 138 | } 139 | } 140 | --------------------------------------------------------------------------------