├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── ci.yml │ ├── dockerhubpublish.yml │ ├── publish.yml │ └── scan.yml ├── .gitignore ├── .grype ├── config.yml └── grype.friendly.tmpl ├── CHANGELOG.md ├── DEVELOP.md ├── LICENSE ├── README.md ├── docs ├── additional-packages.md ├── extend.md ├── neptune-manual.md ├── neptune.md ├── setup-manual.md ├── setup.md ├── tigergraph.md └── views.md └── src ├── bootstraps ├── core │ ├── graphistry.sh │ ├── graphistry.yml │ ├── minimal.sh │ └── minimal.yml ├── neptune │ ├── graphistry.sh │ ├── graphistry.yml │ ├── minimal.sh │ └── minimal.yml └── scripts │ ├── cloudformation-bootstrap.sh │ ├── docker-aws.sh │ ├── docker-container-build.sh │ ├── graphistry-ami-list.sh │ ├── graphistry-service-account.sh │ ├── graphistry-wait-healthy.sh │ ├── hello-end.sh │ ├── hello-start.sh │ ├── instance-id.sh │ ├── prepopulate-notebooks.sh │ └── swap-caddy.sh ├── caddy ├── Caddyfile ├── docker-compose.gak.graphistry.yml └── full.Caddyfile ├── docker ├── .env ├── Dockerfile ├── dc ├── dc.cpu ├── docker-compose.yml ├── entrypoint.sh ├── hooks │ ├── README.md │ └── build └── override │ ├── cpu.override.yml │ └── docker-compose.override.yml ├── envs ├── docker.env ├── general.env ├── graphistry.env ├── neptune.env ├── splunk.env ├── streamlit.env └── tigergraph.env ├── python ├── .flake8 ├── TigerGraph_helper │ └── tg_helper.py ├── __init__.py ├── bin │ └── lint.sh ├── components │ ├── AppPicker.py │ ├── Graphistry.py │ ├── Splunk.py │ ├── URLParam.py │ └── __init__.py ├── conda-app.sh ├── css │ ├── __init__.py │ └── css.py ├── entrypoint.py ├── neptune_helper │ ├── __init__.py │ ├── df_helper.py │ └── gremlin_helper.py ├── requirements-app.txt ├── requirements-system.txt ├── test │ ├── README.md │ └── test_stub.py ├── tox.ini ├── util │ ├── __init__.py │ └── log.py └── views │ ├── demo_01_fancy │ └── __init__.py │ ├── demo_02_disabled │ └── __init__.py │ ├── demo_03_minimal │ └── __init__.py │ ├── demo_04_simple │ └── __init__.py │ ├── demo_avr │ ├── __init__.py │ ├── app.css │ └── marlowe.py │ ├── demo_bio_01_funcoup │ └── __init__.py │ ├── demo_fraud_fincen │ └── __init__.py │ ├── demo_login │ ├── __init__.py │ └── marlowe.py │ ├── demo_neptune_01_minimal_gremlin │ └── __init__.py │ ├── demo_neptune_02_gremlin │ └── __init__.py │ ├── demo_neptune_03_c360 │ └── __init__.py │ ├── demo_rapids_01_simple │ └── __init__.py │ ├── demo_tigergraph_circle │ └── __init__.py │ └── demo_tigergraph_fraud │ └── __init__.py └── streamlit ├── config.toml └── credentials.toml /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG] " 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of the bug 12 | 13 | **Expected behavior** 14 | What you wanted to happen 15 | 16 | **Observed behavior** 17 | What actually happened 18 | 19 | **To Reproduce** 20 | Steps to reproduce the behavior, ideally entirely copy-pastable for running on an out-of-the-box install 21 | 22 | 1. Go to '...' 23 | 2. Click on '....' 24 | 3. Scroll down to '....' 25 | 4. See error 26 | 27 | 28 | **Screenshots** 29 | If applicable, add screenshots to help explain your problem. 30 | 31 | **Environment** 32 | - OS [e.g., Ubuntu 18.04 LTS] 33 | - StreamLit Version [See src/python/requirements-system.txt, or date] 34 | - How you installed: [manual git, full aws quicklaunch, neptune minimal, ...], 35 | 36 | **Additional context** 37 | Add any other context about the problem here. 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[FEA] " 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | #description: 3 | # On every PR push, 4 | # Skipped if PR has label 'skip-ci' 5 | 6 | on: 7 | 8 | #Regular dev 9 | push: 10 | pull_request: 11 | 12 | 13 | #Enable UI-driven branch testing 14 | workflow_dispatch: 15 | 16 | #Test main bidaily @ 1a 17 | schedule: 18 | - cron: '0 1 1-31/2 * *' 19 | 20 | 21 | jobs: 22 | 23 | test-core-build: 24 | 25 | runs-on: ubuntu-latest 26 | 27 | strategy: 28 | matrix: 29 | python-version: [ 3.7, 3.8, 3.9 ] 30 | 31 | steps: 32 | 33 | - name: Checkout repo 34 | uses: actions/checkout@v3 35 | 36 | - name: Set up Python ${{ matrix.python-version }} 37 | uses: actions/setup-python@v4 38 | with: 39 | python-version: ${{ matrix.python-version }} 40 | 41 | - name: Install deps 42 | run: pip install flake8 pytest tox 43 | 44 | - name: Lint with flake8 45 | run: | 46 | cd src/python && ./bin/lint.sh 47 | 48 | - name: Core tests 49 | run: | 50 | cd src/python && python -m pytest test 51 | 52 | test-docker: 53 | 54 | runs-on: ubuntu-latest 55 | 56 | env: 57 | COMPOSE_DOCKER_CLI_BUILD: 1 58 | DOCKER_BUILDKIT: 1 59 | 60 | strategy: 61 | matrix: 62 | flavor: [ 'dc', 'dc.cpu' ] 63 | 64 | steps: 65 | 66 | - name: Checkout repo 67 | uses: actions/checkout@v3 68 | 69 | - name: Free Disk Space 70 | env: 71 | COMPOSE_DOCKER_CLI_BUILD: 1 72 | DOCKER_BUILDKIT: 1 73 | run: | 74 | df -h 75 | sudo docker system df 76 | echo "Prune docker" 77 | sudo docker system prune -f -a --volumes 78 | sudo docker builder prune -a 79 | df -h 80 | sudo docker system df 81 | echo "swap info (expected: 4GB at /mnt/swapfile)" 82 | grep Swap /proc/meminfo 83 | #echo "Identiy biggest dpkg packages" 84 | #sudo dpkg-query --show --showformat='${Installed-Size}\t${Package}\n' | sort -rh | head -50 | awk '{print $1/1024, $2}' || echo "fail dpkg-query" 85 | #echo "Identify biggest apt packages" 86 | #sudo aptitude search "~i" --display-format "%p %I" --sort installsize | tail -50 || echo "fail apt search" 87 | echo "Remove apt packages" 88 | apt-get purge --auto-remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel || echo ok1 89 | apt-get purge --auto-remove aria2 ansible shellcheck rpm xorriso zsync \ 90 | 'clang-.*' lldb-6.0 lld-6.0 lldb-8 lld-8 \ 91 | lldb-9 lld-9 \ 92 | esl-erlang g++-8 g++-9 gfortran-8 gfortran-9 \ 93 | cabal-install-2.0 cabal-install-2.2 \ 94 | cabal-install-2.4 cabal-install-3.0 cabal-install-3.2 'gcc-.*' heroku imagemagick \ 95 | libmagickcore-dev libmagickwand-dev libmagic-dev ant ant-optional kubectl \ 96 | mercurial apt-transport-https mono-complete mono-devel 'mysql-.*' libmysqlclient-dev \ 97 | mssql-tools unixodbc-dev yarn bazel chrpath libssl-dev libxft-dev \ 98 | libfreetype6 libfreetype6-dev libfontconfig1 libfontconfig1-dev \ 99 | php-zmq snmp pollinate libpq-dev postgresql-client ruby-full \ 100 | sphinxsearch subversion mongodb-org -yq >/dev/null 2>&1 \ 101 | || echo "failed main apt-get remove2" 102 | df -h 103 | echo "Removing large packages" 104 | apt-get purge --autoremove -y libgl1-mesa-dri || echo "fail remove libmesa" 105 | apt-get purge --autoremove -y 'openjdk-.*' || echo "openjdk-11-jre-headless" 106 | apt-get purge --autoremove -y 'mysql-server-core.*' || echo "fail remove mysql-server" 107 | apt-get purge --autoremove -y r-base-core || echo "fail remove r-base-core" 108 | apt-get purge --auto-remove -y '^ghc.*' || echo failghc 109 | apt-get purge --auto-remove -y '^dotnet-.*' || echo faildotnet 110 | apt-get purge --auto-remove -y '^llvm-.*' || echo failllvm 111 | apt-get purge --auto-remove -y 'php.*' || echo failphp 112 | apt-get purge --auto-remove -y 'adoptopenjdk-.*' || echo 'fail jdk' 113 | apt-get purge --auto-remove -y 'hhvm' || echo 'fail hhvm' 114 | apt-get purge --auto-remove -y 'google-chrome-stable' || echo 'fail chrome' 115 | apt-get purge --auto-remove -y 'firefox' || echo 'fail ffox' 116 | apt-get purge --auto-remove -y podman 'mongo.*' || echo failmongo 117 | ( apt-get purge --auto-remove -y 'rust' || apt-get purge --auto-remove -y 'rust.*' ) || echo "couldn't remove rust" 118 | sudo rm -rf /usr/share/az_* || echo "fail az cleanup" 119 | sudo rm -rf /usr/local/julia || echo "fail julia cleanup" 120 | echo "--- /opt ---" 121 | sudo ls /opt 122 | sudo rm -rf /opt/az || echo "fail az" 123 | sudo rm -rf /opt/hostedtoolcache/go || echo "fail go cleanup" 124 | sudo rm -rf /opt/hostedtoolcache/Ruby/2.5.8 || echo "fail ruby cleanup" 125 | sudo rm -rf /opt/hostedtoolcache/Ruby/2.6.6 || echo "fail ruby cleanup" 126 | sudo rm -rf /opt/hostedtoolcache/Ruby/2.7.1 || echo "fail ruby cleanup" 127 | sudo rm -rf /opt/hostedtoolcache/PyPy || echo "fail pypy cleanup" 128 | sudo rm -rf /opt/ghc || echo "fail ghc cleanup" 129 | sudo rm -rf /opt/hostedtoolcache && sudo mkdir -p /opt/hostedtoolcache 130 | sudo rm -rf /opt/microsoft || echo "fail microsoft" 131 | sudo rm -rf /opt/pipx || echo "skip pipx" 132 | echo "--- /usr/include ---" 133 | sudo ls /usr/include 134 | sudo rm -rf /usr/include/boost || echo "fail boost cleanup" 135 | sudo rm -rf /usr/include/php || echo "fail php cleanup" 136 | echo "--- /usr/lib ---" 137 | sudo ls /usr/lib 138 | sudo rm -rf /usr/lib/google-cloud-sdk || echo "fail gcloud cleanup" 139 | sudo rm -rf /usr/lib/jvm || echo "fail jvm cleanup" 140 | sudo rm -rf /usr/lib/mono || echo "fail mono cleanup" 141 | sudo rm -rf /usr/lib/llvm-* || echo "fail llvm cleanup" 142 | echo "--- /usr/local ---" 143 | sudo ls /usr/local 144 | sudo rm -rf /usr/local/aws-cli || echo "fail aws-cli cleanup" 145 | sudo rm -rf /usr/local/aws-sam-cli || echo "fail aws-sam-cli cleanup" 146 | sudo rm -rf /usr/local/bin/bicep || echo "fail bicep cleanup" 147 | sudo rm -rf /usr/local/bin/cmake-gui || echo "fail cmake-gui cleanup" 148 | sudo rm -rf /usr/local/bin/helm || echo "fail helm cleanup" 149 | sudo rm -rf /usr/local/bin/kubectl || echo "fail kubectl cleanup" 150 | sudo rm -rf /usr/local/bin/minikube || echo "fail minikube cleanup" 151 | sudo rm -rf /usr/local/bin/node || echo "fail node cleanup" 152 | sudo rm -rf /usr/local/bin/oc || echo "fail oc cleanup" 153 | sudo rm -rf /usr/local/bin/packer || echo "fail packer cleanup" 154 | sudo rm -rf /usr/local/bin/pulumi || echo "fail pulumi cleanup" 155 | sudo rm -rf /usr/local/bin/pulumi-* || echo "fail pulumi-* cleanup" 156 | sudo rm -rf /usr/local/bin/stack || echo "fail stack cleanup" 157 | sudo rm -rf /usr/local/bin/terraform || echo "fail terraform cleanup" 158 | sudo rm -rf /usr/local/graalvm || echo "fail graal cleanup" 159 | sudo rm -rf /usr/local/julia* || echo "fail julia cleanup" 160 | sudo rm -rf /usr/local/lib/android || echo "fail android cleanup" 161 | sudo rm -rf /usr/local/lib/heroku || echo "fail heroku cleanup" 162 | sudo rm -rf /usr/local/lib/node_modules || echo "fail node_modules cleanup" 163 | sudo rm -rf /usr/local/n || echo "fail n cleanup" 164 | sudo rm -rf /usr/local/sqlpackage || echo "fail sqlpackage cleanup" 165 | echo "--- /usr/share ---" 166 | sudo ls /usr/share 167 | sudo rm -rf /usr/share/dotnet || echo "fail dotnet cleanup" 168 | sudo rm -rf /usr/share/miniconda || echo 'skip miniconda' 169 | sudo rm -rf /usr/share/gradle || echo "skip gradle" 170 | sudo rm -rf /usr/share/gradle* || echo "fail gradle cleanup" 171 | sudo rm -rf /usr/share/kotlinc || echo 'skip kotlinc' 172 | sudo rm -rf /usr/share/rust || echo 'skip rust' 173 | sudo rm -rf /usr/share/sbt || echo 'skip sbt' 174 | sudo rm -rf /usr/share/swift || echo "fail swift cleanup" 175 | sudo rm -rf /usr/share/vcpkg || echo "fail vcpkg cleanup" 176 | echo "--- /home ---" 177 | sudo ls /home 178 | sudo rm -rf /home/linuxbrew || echo "fail linuxbrew cleanup" 179 | df -h 180 | ( sudo apt-get install -y wajig && wajig large ) || echo "Failed installing wajig" 181 | sudo apt-get autoremove -y >/dev/null 2>&1 182 | sudo apt-get clean 183 | sudo apt-get autoremove -y >/dev/null 2>&1 184 | sudo apt-get autoclean -y >/dev/null 2>&1 185 | df -h 186 | echo "------------ remaining /usr/local/lib (1) ------------" 187 | sudo du -sh /usr/local/lib/* | sort -h | tail -n 20 || echo ok 188 | echo "------------ remaining /usr/share (1) ------------" 189 | sudo du -sh /usr/share/* | sort -h | tail -n 10 || echo ok 190 | echo "------------ remaining /usr/local (1) ------------" 191 | sudo du -sh /usr/local/* | sort -h | tail -n 10 || echo ok 192 | echo "------------ remaining /usr/local/bin (1) ------------" 193 | sudo du -sh /usr/local/bin/* | sort -h | tail -n 10 || echo ok 194 | echo "------------ remaining /opt (1) ------------" 195 | sudo du -sh /opt/* | sort -h | tail -n 10 || echo ok 196 | echo "https://github.com/actions/virtual-environments/issues/709" 197 | sudo rm -rf "$AGENT_TOOLSDIRECTORY" 198 | echo "------------ remaining /usr/share ------------" 199 | du -sh /usr/share/* | sort -h || echo ok 200 | echo "------------ remaining /usr/local ------------" 201 | du -sh /usr/local/* | sort -h || echo ok 202 | echo "------------ remaining /usr/local/bin --------" 203 | du -sh /usr/local/bin/* | sort -h || echo ok 204 | echo "------------ remaining /opt ------------" 205 | sudo du -sh /opt/* | sort -h || echo ok 206 | echo "------------ remaining /opt/hostedtoolcache/* ------------" 207 | sudo du -sh /opt/hostedtoolcache/* | sort -h || echo ok hosted 208 | df -h 209 | sudo docker info 210 | sudo docker system df 211 | sudo ls -alh /var/lib/docker || echo 'ok docker' 212 | sudo ls -alh /var/lib/docker/buildkit || echo 'ok docker buildkit' 213 | df -h 214 | 215 | - name: Build docker 216 | env: 217 | COMPOSE_DOCKER_CLI_BUILD: 1 218 | DOCKER_BUILDKIT: 1 219 | run: | 220 | cd src/docker && ./${{ matrix.flavor }} build 221 | -------------------------------------------------------------------------------- /.github/workflows/dockerhubpublish.yml: -------------------------------------------------------------------------------- 1 | name: build&publishtoDockerHub 2 | 3 | on: 4 | push: 5 | tags: 6 | - '[0-9]+.[0-9]+.[0-9]+(.*)' 7 | 8 | workflow_dispatch: 9 | inputs: 10 | version: 11 | description: 'Version to bump to' 12 | required: false 13 | 14 | workflow_call: 15 | 16 | jobs: 17 | docker-build-publish: 18 | runs-on: ubuntu-latest-4-cores 19 | strategy: 20 | matrix: 21 | CUDA_SHORT_VERSION: ['11.8'] 22 | fail-fast: true 23 | 24 | 25 | steps: 26 | 27 | - name: checkout repo 28 | uses: actions/checkout@v3 29 | with: 30 | fetch-depth: 0 31 | 32 | - name: get most recent tag 33 | run: | 34 | echo "RELEASE_VERSION=$(git describe --tags --abbrev=0)" >> $GITHUB_ENV 35 | echo "CUDA_SHORT_VERSION=${{ matrix.CUDA_SHORT_VERSION }}" >> $GITHUB_ENV 36 | 37 | - name: check env 38 | run: | 39 | echo $RELEASE_VERSION 40 | echo ${{ env.RELEASE_VERSION }} 41 | echo $CUDA_SHORT_VERSION 42 | echo ${{ env.CUDA_SHORT_VERSION }} 43 | 44 | - name: Login to DockerHub 45 | uses: docker/login-action@v2 46 | with: 47 | username: ${{ secrets.DOCKERHUB_USERNAME }} 48 | password: ${{ secrets.DOCKERHUB_TOKEN }} 49 | 50 | - name: Build Graph-App-Kit 51 | env: 52 | DOCKER_BUILDKIT: 1 53 | COMPOSE_DOCKER_CLI_BUILD: 1 54 | 55 | run: | 56 | cd src/docker \ 57 | && GRAPHISTRY_FORGE_BASE_VERSION=v${{ env.RELEASE_VERSION }}-${{ env.CUDA_SHORT_VERSION }} docker compose -f docker-compose.yml build 58 | 59 | - name: tag the image 60 | run: | 61 | docker tag graphistry/graph-app-kit-st:latest-${{ env.CUDA_SHORT_VERSION }} graphistry/graph-app-kit-st:v${{ env.RELEASE_VERSION }}-${{ env.CUDA_SHORT_VERSION }} 62 | 63 | - name: Publish Graph-App-Kit to DockerHub 64 | run: | 65 | docker push graphistry/graph-app-kit-st:v${{ env.RELEASE_VERSION }}-${{ env.CUDA_SHORT_VERSION }} && docker push graphistry/graph-app-kit-st:latest-${{ env.CUDA_SHORT_VERSION }} 66 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | 5 | #Merge 6 | push: 7 | branches: 8 | - master 9 | 10 | #Label 11 | pull_request: 12 | types: [labeled] 13 | 14 | 15 | #Enable UI-driven branch testing 16 | workflow_dispatch: 17 | 18 | #Test main bidaily @ 1a 19 | schedule: 20 | - cron: '0 1 1-31/2 * *' 21 | 22 | jobs: 23 | 24 | stub_mt: 25 | name: Always-succeed step to prevent appearing as a failure 26 | runs-on: ubuntu-latest 27 | timeout-minutes: 10 28 | steps: 29 | - name: Do nothing 30 | run: echo "Do nothing" 31 | 32 | publish: 33 | name: Upload to Amazon S3 34 | if: github.event.action != 'labeled' || github.event.label.name == 'publish' 35 | runs-on: ubuntu-latest 36 | 37 | steps: 38 | - name: Checkout 39 | uses: actions/checkout@v3 40 | 41 | - name: Configure AWS credentials 42 | uses: aws-actions/configure-aws-credentials@v1 43 | with: 44 | aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }} 45 | aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }} 46 | aws-region: ${{ secrets.S3_AWS_REGION_PUBLIC }} 47 | 48 | - name: Copy CloudFormation templates 49 | run: | 50 | aws s3 cp src/bootstraps/core/graphistry.yml "s3://${{ secrets.AWS_S3_BUCKET_PUBLIC}}/templates/latest/core/graphistry.yml" 51 | aws s3 cp src/bootstraps/core/minimal.yml "s3://${{ secrets.AWS_S3_BUCKET_PUBLIC}}/templates/latest/core/minimal.yml" 52 | aws s3 cp src/bootstraps/neptune/graphistry.yml "s3://${{ secrets.AWS_S3_BUCKET_PUBLIC}}/templates/latest/neptune/graphistry.yml" 53 | aws s3 cp src/bootstraps/neptune/minimal.yml "s3://${{ secrets.AWS_S3_BUCKET_PUBLIC}}/templates/latest/neptune/minimal.yml" 54 | 55 | -------------------------------------------------------------------------------- /.github/workflows/scan.yml: -------------------------------------------------------------------------------- 1 | name: Scan 2 | #description: 3 | # Daily 4a CVE report 4 | 5 | on: 6 | workflow_dispatch: 7 | schedule: 8 | - cron: '0 4 * * *' 9 | 10 | jobs: 11 | 12 | cancel_outstanding: 13 | name: Detect and cancel outstanding runs of this workflow 14 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-security-scan') }} 15 | runs-on: ubuntu-latest 16 | timeout-minutes: 10 17 | steps: 18 | - name: Cancel Previous Runs 19 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-cancel') }} 20 | uses: styfle/cancel-workflow-action@0.4.0 21 | with: 22 | access_token: ${{ github.token }} 23 | 24 | scan_base: 25 | name: Scan base Graphistry container 26 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-security-scan') }} 27 | runs-on: ubuntu-latest-4-cores 28 | timeout-minutes: 30 29 | env: 30 | COMPOSE_DOCKER_CLI_BUILD: 1 31 | DOCKER_BUILDKIT: 1 32 | strategy: 33 | matrix: 34 | CUDA_SHORT_VERSION: ['11.8'] 35 | steps: 36 | 37 | - name: checkout 38 | uses: actions/checkout@v3 39 | 40 | - name: version envvars 41 | run: | 42 | echo "VERSION=latest" >> $GITHUB_ENV 43 | echo "CUDA_SHORT_VERSION=${{ matrix.CUDA_SHORT_VERSION }}" >> $GITHUB_ENV 44 | ( rm -f CUDA_SHORT_VERSION || echo ok ) && ( echo ${{ matrix.CUDA_SHORT_VERSION }} > CUDA_SHORT_VERSION ) 45 | 46 | - name: install grype 47 | run: | 48 | sudo docker images 49 | curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin 50 | 51 | - name: scan 52 | env: 53 | DOCKER_IMAGE: graphistry/graphistry-forge-base 54 | DOCKER_IMAGE_SHORT: graphistry-forge-base 55 | VERSION: latest 56 | run: | 57 | grype ${DOCKER_IMAGE}:${VERSION}-${CUDA_SHORT_VERSION} \ 58 | --only-fixed \ 59 | -o sarif \ 60 | > ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 61 | echo "========== SARIF REPORT ==========" 62 | cat ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 63 | 64 | - name: upload SARIF report 65 | env: 66 | DOCKER_IMAGE: graphistry/graphistry-forge-base 67 | DOCKER_IMAGE_SHORT: graphistry-forge-base 68 | VERSION: latest 69 | uses: github/codeql-action/upload-sarif@v1 70 | with: 71 | sarif_file: ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 72 | # category differentiates multiple results for one commit 73 | category: ${DOCKER_IMAGE_SHORT}-${VERSION}-${CUDA_SHORT_VERSION} 74 | 75 | - name: fail on fixable severe vulnerabilities 76 | env: 77 | DOCKER_IMAGE: graphistry/graphistry-forge-base 78 | DOCKER_IMAGE_SHORT: graphistry-forge-base 79 | VERSION: latest 80 | run: | 81 | grype ${DOCKER_IMAGE}:${VERSION}-${CUDA_SHORT_VERSION} \ 82 | --only-fixed \ 83 | --fail-on high \ 84 | -o template -t .grype/grype.friendly.tmpl 85 | 86 | 87 | scan_gak: 88 | name: Scan graph-app-kit container 89 | if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-security-scan') }} 90 | runs-on: ubuntu-latest-4-cores 91 | timeout-minutes: 30 92 | env: 93 | COMPOSE_DOCKER_CLI_BUILD: 1 94 | DOCKER_BUILDKIT: 1 95 | strategy: 96 | matrix: 97 | CUDA_SHORT_VERSION: ['11.8'] 98 | steps: 99 | 100 | - name: checkout 101 | uses: actions/checkout@v3 102 | 103 | - name: version envvars 104 | run: | 105 | echo "VERSION=latest" >> $GITHUB_ENV 106 | echo "CUDA_SHORT_VERSION=${{ matrix.CUDA_SHORT_VERSION }}" >> $GITHUB_ENV 107 | ( rm -f CUDA_SHORT_VERSION || echo ok ) && ( echo ${{ matrix.CUDA_SHORT_VERSION }} > CUDA_SHORT_VERSION ) 108 | 109 | - name: install grype 110 | run: | 111 | sudo docker images 112 | curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin 113 | 114 | - name: scan 115 | env: 116 | DOCKER_IMAGE: graphistry/graphistry-graph-app-kit-st 117 | DOCKER_IMAGE_SHORT: graphistry-graph-app-kit-st 118 | VERSION: latest 119 | run: | 120 | grype ${DOCKER_IMAGE}:${VERSION}-${CUDA_SHORT_VERSION} \ 121 | --only-fixed \ 122 | -o sarif \ 123 | > ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 124 | echo "========== SARIF REPORT ==========" 125 | cat ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 126 | 127 | - name: upload SARIF report 128 | env: 129 | DOCKER_IMAGE: graphistry/graphistry-graph-app-kit-st 130 | DOCKER_IMAGE_SHORT: graphistry-graph-app-kit-st 131 | VERSION: latest 132 | uses: github/codeql-action/upload-sarif@v1 133 | with: 134 | sarif_file: ${DOCKER_IMAGE_SHORT}-${CUDA_SHORT_VERSION}.json 135 | # category differentiates multiple results for one commit 136 | category: ${DOCKER_IMAGE_SHORT}-${VERSION}-${CUDA_SHORT_VERSION} 137 | 138 | - name: fail on fixable severe vulnerabilities 139 | env: 140 | DOCKER_IMAGE: graphistry/graphistry-graph-app-kit-st 141 | DOCKER_IMAGE_SHORT: graphistry-graph-app-kit-st 142 | VERSION: latest 143 | run: | 144 | grype ${DOCKER_IMAGE}:${VERSION}-${CUDA_SHORT_VERSION} \ 145 | --only-fixed \ 146 | --fail-on high \ 147 | -o template -t .grype/grype.friendly.tmpl 148 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | .DS_Store 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | pip-wheel-metadata/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 97 | __pypackages__/ 98 | 99 | # Celery stuff 100 | celerybeat-schedule 101 | celerybeat.pid 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | src/data 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | -------------------------------------------------------------------------------- /.grype/config.yml: -------------------------------------------------------------------------------- 1 | ##### 2 | # 3 | # DEFAULTS: https://github.com/anchore/grype 4 | # 5 | ##### 6 | 7 | ##### 8 | # 9 | # OVERRIDES 10 | # 11 | ##### 12 | 13 | # a list of globs to exclude from scanning, for example: 14 | # exclude: 15 | # - '/etc/**' 16 | # - './out/**/*.json' 17 | # same as --exclude ; GRYPE_EXCLUDE env var 18 | #exclude: [] 19 | #exclude: 20 | # - '' 21 | 22 | 23 | search: 24 | 25 | indexed-archives: false 26 | 27 | -------------------------------------------------------------------------------- /.grype/grype.friendly.tmpl: -------------------------------------------------------------------------------- 1 | "Package","Version Installed","Vulnerability ID","Severity","Location", 2 | {{- range .Matches}} 3 | "{{.Artifact.Name}}","{{.Artifact.Version}}","{{.Vulnerability.ID}}","{{.Vulnerability.Severity}}","{{.Vulnerability.DataSource}}","{{.Vulnerability.Namespace}}","{{.Vulnerability.Fix.State}}","{{.Artifact.Locations}}" 4 | {{- end}} 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to the graph-app-kit are documented in this file. 4 | 5 | The changelog format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and all PyGraphistry-specific breaking changes are explictly noted here. 6 | 7 | Related changelogs: 8 | 9 | Core: 10 | * [Streamlit](https://docs.streamlit.io/en/stable/changelog.html) 11 | * [PyGraphistry changelog](https://github.com/graphistry/pygraphistry/blob/master/CHANGELOG.md) 12 | * [Graphistry core changelog](https://graphistry.zendesk.com/hc/en-us/articles/360033184174-Enterprise-Release-List-Downloads) 13 | * [RAPIDS changelog](https://github.com/rapidsai/cudf/releases) 14 | 15 | Extensions: 16 | * [Neo4j](https://neo4j.com/release-notes/) 17 | * [TigerGraph](https://docs.tigergraph.com/faqs/change-log-1) 18 | * [AWS Neptune](https://docs.aws.amazon.com/neptune/latest/userguide/doc-history.html) 19 | 20 | ## [Development] 21 | 22 | See [projects page](https://github.com/graphistry/graph-app-kit/projects) and [open pull requests](https://github.com/graphistry/graph-app-kit/pulls) 23 | 24 | ### Changed 25 | 26 | * Update dependency PyGraphistry 0.33.8 27 | 28 | ### Infra 29 | 30 | * Enable native execution with stronger default env var handling 31 | 32 | ## [2023.02.10] 33 | 34 | ### Changed 35 | 36 | * Update dependency PyGraphistry 0.33.0 37 | 38 | ## [2.40.28 - 2023.08.16] 39 | 40 | ### Changed 41 | 42 | - upgrade from streamlit v1.21.0 to latest --> v1.25.0 43 | - improved the logging behavior and print statements not being written (we recommend using pyton logger instead of print statements for greater functionality and control) [issue #104](https://github.com/graphistry/graph-app-kit/issues/104) 44 | - fixed unhandled exception in demo_avr when splunk credentials not set [issue #105](https://github.com/graphistry/graph-app-kit/issues/105) 45 | - fixed incorrect ST_PUBLIC_PORT printed in the logs from entrypoint.sh [issue #106](https://github.com/graphistry/graph-app-kit/issues/106) 46 | 47 | ### Added 48 | 49 | in a previous release two new demo views were added: demo_avr and demo_login which are both cyber security related 50 | 51 | ### Breaking 52 | 53 | - check if upgrade from streamlit 1.21 --> 1.25 might have affected your code 54 | 55 | 56 | ## [2023.02.10] 57 | 58 | ### Changed 59 | 60 | * Infra: CUDA base now 11.5 61 | * Infra: Optimized runner 62 | * Infra: Remove now-unnecessary free space step 63 | 64 | ### Added 65 | 66 | * Infra: env var USE_DOCKER=true 67 | * Infra: env var PYTHONPATH=/apps/views 68 | * Infra: env var FAVICON_URL 69 | * Infra: externally overridable src/streamlit/{config.toml,credentials.toml} 70 | * Infra: Update GPU base to 2.39.27 71 | * Infra: Update actions/checkout GHA from v2 (deprecated) to v3 72 | 73 | ### Breaking 74 | 75 | * Infra: Removed autoheal service and labels (likely OK for most users) 76 | 77 | ### Fixed 78 | 79 | * toml: Fixed startup for users of new startup system (master) 80 | 81 | 82 | ## [2.39.25 - 2022.08.14] 83 | 84 | ### Changed 85 | 86 | * Update dependency PyGraphistry 0.27.1 87 | * Update dependency Streamlit to 1.12.0 88 | * Update RAPIDS-ecosystem packages (2022.04) 89 | 90 | ## [2.39.13 - 2022.05.07] 91 | 92 | ### Added 93 | 94 | * opt-in to minimal CPU-only base image 95 | * alias in `src/docker`: `./dc`, `./dc.cpu` 96 | * test python 3.7, 3.8, 3.9 97 | * test cpu and gpu docker building 98 | 99 | ### Changed 100 | 101 | * build now uses buildkit 102 | * minimal 103 | 104 | ### Docs 105 | 106 | * cpu mode 107 | * aliases of `./dc` and `./dc.cpu` 108 | 109 | ### Breaking 110 | 111 | * Temporarily switch dockerhub build to CPU minimal while too-large base image kinks get worked out 112 | 113 | ## [2022.03.13] 114 | 115 | ### Changed 116 | 117 | * GraphistrySt: Switched from unsafe mode markdown to component 118 | * PyGraphistry: Updated to 0.21.2 119 | 120 | ### Breaking 121 | 122 | * UI: Switch default to wide mode 123 | 124 | 125 | ## [2022.03.02] 126 | 127 | ### Changed 128 | 129 | * Infra: Upgrade to Graphistry 2.38.10 base 130 | * Infra: Unpinned plotly and updated protobuf, streamlit 131 | 132 | ### Added 133 | 134 | * Infra: New `docker-compose.override.yml` symlink mode (WIP) 135 | 136 | ### Fixed 137 | 138 | * Infra: Replaced failing GHA collaborators-only with repo setting 139 | 140 | ## [2021.03.11] 141 | 142 | ### Changed 143 | 144 | * CI: Graphistry AMI list generator takes VERSION parameter 145 | * Infra: Upgrade to Graphistry 2.36.6 146 | 147 | ### Docs 148 | 149 | * CI: Graphistry AMI list generator usage 150 | * README: Removed dangling link 151 | * README: Quicklaunch links and admin commands 152 | 153 | ### Fixed 154 | 155 | * Private Streamlit dashboards instance now bound to Jupyter notebook private dashboards folder. Was incorrectly using the public folder: https://github.com/graphistry/graph-app-kit/pull/51/commits/3872de053b7d2888ce271acf395f112491742606 156 | 157 | ## [2021.03.06] 158 | 159 | ### Added 160 | 161 | * CI: Publish cloud formation templates to s3 on merge (https://github.com/graphistry/graph-app-kit/pull/48) 162 | 163 | ## [2021.02.24] 164 | 165 | #### Added 166 | 167 | * TigerGraph support (https://github.com/graphistry/graph-app-kit/pull/36) 168 | 169 | ## [2021.02.23] 170 | 171 | ### Added 172 | 173 | * Changelog 174 | * AMI enumeration script 175 | * Tests: flake8, docker build 176 | * CI: GHA 177 | * CI: Badges 178 | 179 | ### Changed 180 | 181 | * Versions: Streamlit 0.70 -> 0.77, PyGraphistry 0.14 -> 0.17.2, Graphistry -> 2.35.9 (including AMIs) 182 | * Dev docs: Tagging, building 183 | * Graphistry 2.35+ Support: Swaps in old < 2.34-style Caddy 1.x container as Graphistry 2.35's Caddy 2.x is currently tricky for auth reuse 184 | * Plotter auto-memoizes with `.plot(as_files=True, ...)` 185 | 186 | ### Fixed 187 | 188 | * Flake8 warnings 189 | 190 | ### Breaking 191 | 192 | * Default base container now CUDA 11.0 193 | 194 | --- 195 | -------------------------------------------------------------------------------- /DEVELOP.md: -------------------------------------------------------------------------------- 1 | # Developer Guide 2 | 3 | These instructions are for contributing to this repository. See main README.md for usage. 4 | 5 | ## Manual 6 | 7 | ```bash 8 | cd src/docker 9 | docker-compose build 10 | docker-compose up 11 | ``` 12 | 13 | ## Test 14 | 15 | ### CI 16 | 17 | CI will trigger on pushes to PRs 18 | 19 | ### Local 20 | 21 | To test locally: 22 | 23 | ```bash 24 | cd src/python 25 | ./bin/lint.sh 26 | python3 -m pytest test 27 | ``` 28 | 29 | This is expected to change as full docker-based testing lands 30 | 31 | ### AWS 32 | 33 | * Modify `src/bootstraps/core/graphistry.yml` on the checkout step do use the branch: `git clone -b mybranch` 34 | * Push your branch 35 | * In CloudFormation, upload your modified `graphistry.yml`` 36 | 37 | ## Aligned base versions 38 | 39 | For faster AWS launches and Graphistry Enterprise, we: 40 | 41 | - Keep the docker base in sync w/ AWS version & enterprise version: 42 | * docker-compose.yml: `GRAPHISTRY_FORGE_BASE_VERSION` 43 | * Dockerfile: `GRAPHISTRY_FORGE_BASE_VERSION` 44 | * Set both to the appropriate Graphistry (or sufficient Python) base, e.g., `v2.39.12-11.4` 45 | 46 | - Update aws version (bootstraps/*/graphistry.yml) by pointing to that version's region AMIs via bootstraps/scripts/graphistry-ami-list.sh 47 | * Setup: `apt-get install awscli jq` and `aws configure` 48 | * Run `src/bootstraps/scripts $ VERSION=2.36.6-11.0 ./graphistry-ami-list.sh` 49 | * Paste into `src/bootstraps/core,neptune/graphistry.yml` 50 | * Update `src/docker/docker-compose.yml::GRAPHISTRY_FORGE_BASE_VERSION` 51 | 52 | ## DockerHub Automated Builds 53 | 54 | Managed via DockerHub automates tagged builds 55 | 56 | Ahead of time: 57 | 58 | * Ensure you've set `GRAPHISTRY_FORGE_BASE_VERSION` in the `Dockerfile` (not just the `docker-compose.yml`) 59 | * Merged into master 60 | 61 | Publish: 62 | 63 | 1. `git tag 2.39.12` 64 | * Use a tag that corresponds to the Graphistry version, or some suffix (`2.39.12.1`) 65 | * Note lack of `v` 66 | 2. `git push --tags` 67 | 68 | DockerHub automatic builds will: 69 | * publish as tag `v2.39.12-11.4`: note addition of `v` and `-11.4` 70 | * publish as tag `latest` 71 | 72 | See [current tags](https://hub.docker.com/r/graphistry/graph-app-kit-st/tags) and available [base tags](https://hub.docker.com/r/graphistry/graphistry-forge-base/tags) 73 | 74 | ## AWS Publish action 75 | 76 | * Docker rebuilds on merge to main 77 | * Push main tag ('v1.2.3') for building named versions 78 | * CloudFormation templates uploaded to S3 upon a PR being labeled "publish", merge-to-main, or explicit GHA call 79 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Graphistry 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [ ![Version](https://img.shields.io/docker/v/graphistry/graph-app-kit-st?logo=docker) ](https://hub.docker.com/r/graphistry/graph-app-kit-st/tags) 2 | [ ![CI](https://github.com/graphistry/graph-app-kit/actions/workflows/ci.yml/badge.svg) ](https://github.com/graphistry/graph-app-kit/actions/workflows/ci.yml) 3 | [ ![Publish](https://github.com/graphistry/graph-app-kit/actions/workflows/publish.yml/badge.svg) ](https://github.com/graphistry/graph-app-kit/actions/workflows/publish.yml) 4 | [ ![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/graphistry/graph-app-kit-st?logo=docker) ](https://hub.docker.com/repository/docker/graphistry/graph-app-kit-st/builds) 5 | ✔️ Linux 6 | ✔️ OS X 7 | ❌ Windows ([#39](https://github.com/graphistry/graph-app-kit/issues/39)) 8 | 9 | 10 | [![Uptime Robot status](https://img.shields.io/uptimerobot/status/m787548531-e9c7b7508fc76fea927e2313?label=hub.graphistry.com)](https://status.graphistry.com/) [](https://join.slack.com/t/graphistry-community/shared_invite/zt-53ik36w2-fpP0Ibjbk7IJuVFIRSnr6g) 11 | [![Twitter Follow](https://img.shields.io/twitter/follow/graphistry)](https://twitter.com/graphistry) 12 | 13 | 14 | # Welcome to graph-app-kit 15 | 16 | Turn your graph data into a secure and interactive visual graph app in 15 minutes! 17 | 18 | 19 | ![Screenshot](https://user-images.githubusercontent.com/4249447/92298596-8e518600-eeff-11ea-8276-069281a4af93.png) 20 | 21 | ## Why 22 | 23 | This open source effort puts together patterns the Graphistry team has reused across many graph projects as teams go from code-heavy Jupyter notebook experiments to deploying streamlined analyst tools. Whether building your first graph app, trying an idea, or wanting to check a reference, this project aims to simplify that process. It covers pieces like: Easy code editing and deployment, a project stucture ready for teams, built-in authentication, no need for custom JS/CSS at the start, batteries-included data + library dependencies, and fast loading & visualization of large graphs. 24 | 25 | ## What 26 | 27 | * **Minimal core**: The barebones dashboard server. In provides a StreamLit docker-compose container with PyData ecosystem libraries and examples of visualizing data from various systems. Install it, plug in credentials to various web services like cloud databases and a free [Graphistry Hub](https://hub.graphistry.com) visualization account, and launch. It does not have GPU ETL and GPU AI libraries. 28 | 29 | * **Full core**: Initially for AWS, the full core bundles adds to the docker-compose system: Accounts, Jupyter notebooks for authoring, serves StreamLit dashboards with both public + private zones, and runs Graphistry/RAPIDS locally on the same server. Launch with on click via the Cloud Formation template. 30 | 31 | * **Full core + DB**: DB-specific variants are the same as minimal/full, and add simpler DB-specific quick launching/connecting. 32 | 33 | ## Get started 34 | 35 | ### Quick (Local code) - full GPU core + third-party connectors 36 | 37 | **Note**: Base image includes Nvidia RAPIDS and AI dependencies so is quite large, see CPU alternative for a lightweight alternativve 38 | 39 | **Note**: Use `sudo` for docker-compose commands if your configuration requires it and is giving permission error 40 | 41 | ```bash 42 | # Minimal core 43 | git clone https://github.com/graphistry/graph-app-kit.git 44 | cd graph-app-kit/src/docker 45 | 46 | # Enable docker buildkit 47 | # ... or run docker-compose via provided alias script `./dc` 48 | export DOCKER_BUILDKIT=1 49 | export COMPOSE_DOCKER_CLI_BUILD=1 50 | 51 | # Build 52 | docker-compose build 53 | 54 | # Optional: Edit src/docker/.env (API accounts), docker-compose.yml: Auth, ports, ... 55 | 56 | # Launch 57 | docker-compose up -d 58 | docker-compose logs -f -t --tail=100 59 | ``` 60 | 61 | => `http://localhost:8501/` 62 | 63 | To [add views](docs/views.md) and relaunch: 64 | 65 | ```bash 66 | # Add dashboards @ src/python/views//__init__.py 67 | 68 | docker-compose up -d --force-recreate 69 | ``` 70 | 71 | ### Quick (Local code) - minimal CPU core + third-party connectors 72 | 73 | Same commands as above, but use `./dc.cpu`, which aliases `docker-compose -f docker-compose.yml -f override/cpu.override.yml`: 74 | 75 | ```bash 76 | git clone https://github.com/graphistry/graph-app-kit.git 77 | cd graph-app-kit/src/docker 78 | ./dc.cpu build 79 | ... 80 | ./dc.cpu up 81 | ``` 82 | 83 | ### Quick Launchers - minimal/full core 84 | 85 | 1. Quick launch options: 86 | 87 | **Full**: [![Launch Stack](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=region#/stacks/new?stackName=graph_app_kit_full&templateURL=https://graph-app-kit-repo-public.s3.us-east-2.amazonaws.com/templates/latest/core/graphistry.yml) 88 | 89 | * Public + protected Streamlit dashboards, Jupyter notebooks + editing, Graphistry, RAPIDS 90 | * Login to web UI as `admin` / `i-instanceid` -> file uploader, notebooks, ... 91 | * Dashboards: `/public/dash` and `/private/dash` 92 | * [More info](docs/setup.md) 93 | 94 | Admin: 95 | 96 | ```bash 97 | # launch logs 98 | tail -f /var/log/cloud-init-output.log -n 1000 99 | 100 | # app logs 101 | sudo docker ps 102 | sudo docker logs -f -t --tail=1 MY_CONTAINER 103 | 104 | # restart a graphistry container 105 | cd graphistry && sudo docker-compose restart MY_CONTAINER 106 | 107 | # restart caddy (Caddy 1 override) 108 | cd graphistry && sudo docker-compose -f docker-compose.gak.graphistry.yml up -d caddy 109 | 110 | # run streamlit 111 | cd graph-app-kit/public/graph-app-kit && docker-compose -p pub run -d --name streamlit-pub streamlit 112 | cd graph-app-kit/private/graph-app-kit && docker-compose -p priv run -d --name streamlit-priv streamlit 113 | ``` 114 | 115 | **Minimal**: Open Streamlit, ssh to connect/add [free Graphistry Hub username/pass](https://www.graphistry.com/get-started): 116 | 117 | **Database-specific**: [Amazon Neptune](docs/neptune.md), [TigerGraph](docs/tigergraph.md) 118 | 119 | 2. [Add views](docs/views.md) 120 | 121 | 3. [Main configurations and extensions](docs/extend.md): Database connectors, authentication, notebook-based editing, and more 122 | 123 | ### Native (Experimental) 124 | 125 | Install dependencies, pick location of views folder, and run: 126 | 127 | ```bash 128 | cd src/python 129 | pip3 install -r requirements-system.txt 130 | pip3 install -r requirements-app.txt 131 | VIEW_PATH="`pwd`/views" streamlit run entrypoint.py 132 | ``` 133 | 134 | ## The pieces 135 | 136 | ### Core 137 | 138 | * Prebuilt Python project structure ready for prototyping 139 | * [Streamlit](https://www.streamlit.io/) quick self-serve dashboarding 140 | * [Graphistry](https://www.graphistry.com/get-started) point-and-click GPU-accelerated visual graph analytics 141 | * Data frames: Data wrangling via [Pandas](https://pandas.pydata.org/)and [Apache Arrow](https://arrow.apache.org/), including handling formats such as CSV, XLS, JSON, Parquet, and more 142 | 143 | * Standard Docker and docker-compose cross-platform deployment 144 | 145 | ### GPU acceleration (optional) - Full 146 | 147 | For non-minimal installs, if GPUs are present, `graph-app-kit` leverages GPU cloud acceleration: 148 | 149 | * GPU Analytics: [RAPIDS](https://www.rapids.ai) and CUDA already setup for use if run with an Nvidia docker runtime - cudf GPU dataframes, [BlazingSQL](https://www.blazingsql.com) GPU SQL, cuGraph GPU graph algorithms, cuML libraries, and more 150 | 151 | * GPU Visualization: Connect to an external Graphistry server or, faster, run on the same GPU server 152 | 153 | ### Prebuilt integrations & recipes 154 | 155 | `graph-app-kit` works well with the Python data ecosystem (pandas, cudf, PySpark, SQL, ...) and we're growing the set of builtins and recipes: 156 | 157 | * Graph databases 158 | 159 | * [AWS Neptune](https://aws.amazon.com/neptune/): [quick launch](docs/neptune.md) and [manual launch](docs/neptune-manual.md) 160 | * [TinkerPop Gremlin](https://tinkerpop.apache.org/): [query demos](https://github.com/graphistry/graph-app-kit/tree/master/src/python/views/demo_neptune_01_minimal_gremlin) 161 | * [TigerGraph](https://www.tigergraph.com): [setup](docs/tigergraph.md) 162 | 163 | * Collaborations welcome! 164 | 165 | * [Jupyter notebooks](https://jupyter.org/): Use quick launchers or [integrations guide](docs/extend.md) for web-based live editing of dashboards by sharing volume mounts between Jupyter and Streamlit 166 | 167 | * [Caddy](https://caddyserver.com/): Reverse proxy for custom URLs, [automatic LetsEncrypt TLS certificates](http://letsencrypt.org/), multiple sites on the same domain, pluggable authentication (see [integrations guide](docs/extend.md)) 168 | 169 | * Also you can install your [custom Python Packages](docs/additional-packages.md). 170 | 171 | ## Contribute 172 | 173 | We welcome all sorts of help! 174 | 175 | * Deployment: Docker, cloud runners, ... 176 | * Dependencies: Common graph packages 177 | * Connectors: Examples for common databases and how to get a lot of data out 178 | * Demos! 179 | 180 | See [DEVELOP.md](DEVELOP.md) for more contributor information 181 | -------------------------------------------------------------------------------- /docs/additional-packages.md: -------------------------------------------------------------------------------- 1 | # Adding Custom Python Packages to graph-app-kit 2 | 3 | ## Install Custom Python Packages Locally 4 | 5 | Install the desired custom Python packages on your localhost. For example, if you want to include a different version of `PyGraphistry` and an additional dependency like `faker`, run the following commands in your terminal: 6 | 7 | ```bash 8 | pip install pip install graphistry[all] 9 | pip install faker 10 | ``` 11 | 12 | Graph-app-kit dependency errors on `pip install` are normal and can be safely ignored. 13 | 14 | If the environment is airgapped, copy the packages into the path `./data/py_envs/gak` from the root directory of the Graphistry install. Ensure that you use the same system architecture and Python version. 15 | -------------------------------------------------------------------------------- /docs/extend.md: -------------------------------------------------------------------------------- 1 | # Configure graph-app-kit and add integrations 2 | 3 | Most settings can be configured by creating a custom Docker environment file `src/docker/.env` (see `src/envs/*.env` for options). You can also edit `docker-compose.yml` and `/etc/docker/daemon.json`, but we recommend sticking to `.env`. 4 | 5 | Integration settings that deal with external systems such as TLS, accounts, and notebooks require having launched them. If you are not integrating into existing ones, see the initial [setup section](setup.md) for how to quicklaunch a Graphistry server. 6 | 7 | ## Monitoring 8 | 9 | By default, `graph-app-kit` logging uses the Docker json file driver: 10 | 11 | * Inspect recent activities: `cd src/docker` and then `sudo docker-compose logs -f -t --tail=100` 12 | 13 | * Setup [alternative logging drivers](https://docs.docker.com/config/containers/logging/configure/) 14 | 15 | ## Core 16 | 17 | * Streamlit: URL base path: `BASE_PATH=dashboard/` and `BASE_URL=http://localhost/dashboard/` 18 | * Graphistry: None - set `GRAPHISTRY_USERNAME=usr` + `GRAPHISTRY_PASSWORD=pwd` (see `src/envs/graphistry.env` for more, like `GRAPHISTRY_SERVER` if using a private Graphistry server) 19 | * Log level: `LOG_LEVEL=ERROR` (for Python's `logging`) 20 | 21 | ## Databases 22 | 23 | * [Amazon Neptune guide](docs/neptune.md) for TinkerPop/Gremlin integration 24 | 25 | ## TLS with Cadddy 26 | 27 | * Auth: See [Caddy sample](src/caddy/Caddyfile) reverse proxy example for an authentication check against an account system, including the one shipping with your Graphistry server (requires `sudo docker-compose restart caddy` in your Graphistry server upon editing `/var/graphistry/data/config/Caddyfile`) 28 | 29 | ## Public+Private views 30 | * To simulatenously run 1 public and 1 private instance, create two `graph-app-kit` clones `public_dash` and `private_dash`, and for `src/docker/.env`, set: 31 | * `COMPOSE_PROJECT_NAME=streamlit-pub` and `COMPOSE_PROJECT_NAME=streamlit-priv` 32 | * Override default `ST_PUBLIC_PORT=8501` with two distinct values 33 | * See [Caddy sample](src/caddy/Caddyfile) for configuring URI routes, including covering the private instance with your Graphistry account system (JWT auth URL) -------------------------------------------------------------------------------- /docs/neptune-manual.md: -------------------------------------------------------------------------------- 1 | # Amazon Neptune and graph-app-kit manual setup 2 | 3 | For quick launch and an introduction, see the [main graph-app-kit Neptune docs](neptune.md). 4 | 5 | By using `graph-app-kit` with Amazon Neptune, you can visually explore graph database data and share point-and-click dashboard tools. This guides walks through manually launching Neptune, `graph-app-kit`, and connecting them. Alternatively, the [CloudFormation templates](neptune.md) enable quick launching preconfigured versions of both. 6 | 7 | ## 1. Manually setup Amazon Neptune 8 | 9 | Ensure your Amazon Neptune database instance can be connected to by your `graph-app-kit` instance: 10 | 11 | - You must have or create an Amazon Neptune cluster. See the official [Getting Started with Neptune docs](https://docs.aws.amazon.com/neptune/latest/userguide/get-started.html). 12 | 13 | - Amazon Neptune clusters are hosted in a private VPC so the server hosting `graph-app-kit` must be [granted access to the VPC](https://docs.aws.amazon.com/neptune/latest/userguide/security-vpc.html). We *strongly* recommend hosting the `graph-app-kit` instance in a public subnet within the Neptune database's VPC. 14 | 15 | - If using IAM authorization on your Amazon Neptune cluster the sample code provided will need to be updated to support using SigV4 signing of requests. We recommend using [this tool](https://github.com/awslabs/amazon-neptune-tools/tree/master/neptune-python-utils) to simplify the process. 16 | 17 | ## 2. Manually launch and configure graph-app-kit for Amazon Neptune 18 | 19 | 20 | Create an AWS EC2 `graph-app-kit` instance using the usual [graph-app-kit first launch step](setup-manual.md), with the following launch settings: 21 | 22 | 1. Set `Network` to the `VPC` ID value ("`vpc-...`") from `1. Setup Amazon Neptune` (unless performing an alternative like manual VPC peering) 23 | 2. Set `Subnet` to the `PublicSubnet1` subnet ID value ("`subnet-...`") from `1. Setup Amazon Neptune` 24 | * `Auto-assign Public IP` should default to `Use subnet setting (Enable)` 25 | 26 | Continue through the [graph-app-kit steps to download and build](setup-manual.md). 27 | 28 | 29 | SSH into your `graph-app-kit` instance and set the following required environment variable configurations in your [src/docker/.env](src/docker/.env) file: 30 | 31 | ```bash 32 | NEPTUNE_READER_PROTOCOL=wss 33 | NEPTUNE_READER_HOST= 34 | NEPTUNE_READER_PORT=8182 35 | ``` 36 | 37 | For additional Neptune template options, see [src/envs/neptune.env](../src/envs/neptune.env). 38 | 39 | Reset and restart your `graph-app-kit` container: 40 | 41 | ```bash 42 | cd src/docker 43 | sudo docker-compose down -v 44 | sudo docker-compose up -d 45 | ``` 46 | 47 | Watch logs with `sudo docker-compose logs -f -t --tail=1` 48 | 49 | Access your Streamlit instance at http://the.public.ip.address:8501 50 | 51 | ## 4. Graph! 52 | 53 | * Go to your Streamlit homepage using the link from the launch section you followed 54 | * Select `GREMLIN: SIMPLE SAMPLE` from the dropdown to load a random sample of nodes from whatever Neptune database is connected 55 | * Continue to the instructions for [creating custom views](views.md) and [adding common extensions](extend.md) like TLS, public/private dashboards, and more 56 | 57 | 58 | ## Advanced Neptune Configuration 59 | 60 | ### Run in an AWS VPC outside of the Neptune VPC 61 | 62 | Follow AWS and Neptune VPC Peering instructions for mapping subnets across the VPCs. 63 | 64 | ### Run outside of AWS and local developement 65 | 66 | You can run Streamlit from outside of AWS. The challenge is that Amazon Neptune runs in a private VPC, meaning it exposes no internet-accessible API endpoints. 67 | 68 | Options to connect to Amazon Neptune from a local computer include: 69 | 70 | * Setup an SSH tunnel for your internet connections 71 | 72 | * Configure a load balancer to expose the Neptune endpoint 73 | 74 | * Choose another way to configure secure access to a private VPC in AWS. 75 | Check the [Getting Started with Neptune](https://docs.aws.amazon.com/neptune/latest/userguide/get-started.html) page for the current best recommended practices. 76 | -------------------------------------------------------------------------------- /docs/neptune.md: -------------------------------------------------------------------------------- 1 | # Quick launch Amazon Neptune and graph-app-kit 2 | 3 | [Amazon Neptune](https://aws.amazon.com/neptune/) is a: 4 | 5 | > ... "fast, reliable, fully managed graph database service that makes it easy to build and run applications that work with highly connected datasets" 6 | 7 | Amazon Neptune supports both property graph queries with [Apache Gremlin/Tinkerpop](https://tinkerpop.apache.org/) queries, and RDF graphs with SPARQL queries. By using `graph-app-kit` with Amazon Neptune, you can visually explore graph database data and share point-and-click dashboard tools. 8 | 9 | This guides walks through quick launch scripts for Neptune and Neptune-aware `graph-app-kit`. Alternatively, you may follow our [manual Neptune setup guide](neptune-manual.md). 10 | 11 | ## 1. Setup Amazon Neptune with identity graph demo data 12 | 13 | Launch using a button at the bottom of the [identity graph sample cloud formation templates tutorial](https://aws.amazon.com/blogs/database/building-a-customer-identity-graph-with-amazon-neptune/): 14 | 15 | 1. Click the `Launch Stack` button for your region: 16 | * If launching the Full `graph-app-kit` template or using a GPU instance, use an AWS region with 4+ vCPU quota of `g4dn`/`p3`/`p4` ([or request it](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)) 17 | 2. Check the acknowledgement boxes in the `Capabilities` section 18 | 3. Click `Create Stack` (5-20min) 19 | 20 | **Important Output**: Open the root `Identity-Graph-Sample` item's `Output` tab to see the values you will fill in to configure the next steps: 21 | 22 | * `VPC`: ID `vpc-abc` 23 | * `PublicSubnet1`: ID `subnet-abc` 24 | * `DBClusterReadEndpoint`: URL `abc.cluster-ro-xyz.zzz.neptune.amazonaws.com` 25 | 26 | ---- 27 | 28 | **Manage:** 29 | 30 | * **Neptune**: AWS console -> `Services` -> `Neptune` -> `Databases` 31 | 32 | * **Stack** (inspect/delete): `AWS Console` -> `Services` -> `CloudFormation` -> `Stacks` 33 | 34 | * **Resize ($)**: 35 | * Above **Neptune** console -> `Databases` -> **Modify** the **Writer** 36 | * Change *DB instance class* to *db.r4.large* -> `Continue` -> check **Apply immediately** -> `Modify DB Instance` 37 | 38 | ## 2. Launch graph-app-kit configured for Amazon Neptune 39 | 40 | #### Configuration 1: Full (Recommended) 41 | 42 | [![Launch Stack](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=region#/stacks/new?stackName=graph_app_kit_full&templateURL=https://graph-app-kit-repo-public.s3.us-east-2.amazonaws.com/templates/latest/neptune/graphistry.yml) *Full stack* 43 | 44 | Launches: 45 | 46 | * GPU EC2 instance in your Neptune VPC ([request 4+ vCPU of g4, p3, or p4 capacity](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) if none) 47 | * Start making views for Neptune data immediately 48 | * Web-based live editing 49 | * Graphistry, public + private Streamlit dashboards, Jupyter notebooks, RAPIDS.ai Python GPU ecosystem 50 | 51 | 52 | If AWS warns `Please select another region`, use the `Select a Region` dropdown in the top right menu. 53 | 54 | #### Configuration 2: Minimal 55 | 56 | 1. [Get a free or self-managed Graphistry server account](https://www.graphistry.com/get-started) with username+password 57 | 58 | 2. [![Launch Stack](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=region#/stacks/new?stackName=graph_app_kit_full&templateURL=https://graph-app-kit-repo-public.s3.us-east-2.amazonaws.com/templates/latest/neptune/graphistry.yml) *Minimal stack* 59 | 60 | Launches: 61 | 62 | * CPU EC2 instance in your Neptune VPC 63 | * Create Neptune views from your terminal 64 | * Public Streamlit dashboards linked against a remote Graphistry account 65 | * Not included: Local Graphistry server, private dashboards, Jupyter notebooks, RAPIDS.ai GPU ecosystem 66 | 67 | If AWS reports `Please select another region`, use the `Select a Region` dropdown in the top right menu. 68 | 69 | ---- 70 | 71 | #### Launch configuration: Details parameters 72 | 73 | * Set stack name to anything, such as `graph-app-kit-a` 74 | * Set `VPC` to the `VPC` ID value ("`vpc-...`") from `1. Setup Amazon Neptune` 75 | * Set `Subnet` to the `PublicSubnet1` subnet ID value ("`subnet-...`") from `1. Setup Amazon Neptune` 76 | * Set `GraphAppKitKeyPair` to one where you have a private.key for SSH'ing 77 | * If using the minimal template, fill in details for connecting to a remote Graphistry server 78 | 79 | #### *Optional:* Monitor instance launch for progress and errors 80 | 81 | * `Resources` tab -> EC2 instance link -> click instance for details (public IP, ...) 82 | 83 | * Login and watch: 84 | 85 | ```bash 86 | ssh -i /my/private.key ubuntu@the.instance.public.ip 87 | ### ssh -i /my/private.key ec2-user@the.instance.public.ip for Minimal launcher 88 | 89 | tail -f /var/log/cloud-init-output.log -n 1000 90 | ``` 91 | 92 | ## 3. Graph! 93 | 94 | * Go to your public Streamlit dashboard: http://[the.public.ip.address]/public/dash 95 | * Select `GREMLIN: SIMPLE SAMPLE` from the dropdown to load a random sample of nodes from whatever Neptune database is connected 96 | 97 | ### Login 98 | 99 | * Login into the web portal at **http://[the.public.ip.address]** using credentials **`admin`** / ***`i-theInstanceID`*** 100 | 101 | * The minimal launcher has no web-based account portal 102 | 103 | * *Optional*: SSH using the instructions from step 2 104 | 105 | ### Launched URLs for full stack 106 | 107 | * **Graphistry: GPU-accelerated visual analytics + account login** 108 | * **http://[the.public.ip.address]** 109 | * Login as `admin` / `your-aws-instance-id` 110 | * Installed at `/home/ubuntu/graphistry` 111 | * You can change your admin password using the web UI 112 | * **Streamlit: Public dashboards** 113 | * **http://[the.public.ip.address]/public/dash** 114 | * Installed at `/home/ubuntu/graph-app-kit/public/graph-app-kit` 115 | * Run as `src/docker $ docker-compose -p pub run -d --name streamlit-pub streamlit` 116 | * **Streamlit: Private dashboards** 117 | * **http://[the.public.ip.address]/private/dash** 118 | * Installed at `/home/ubuntu/graph-app-kit/private/graph-app-kit` 119 | * Run as `src/docker $ docker-compose -p priv run -d --name streamlit-priv streamlit` 120 | * **Jupyter: Data science notebooks + Streamlit dashboard live-editing** 121 | * **http://[the.public.ip.address]/notebook** 122 | * Live-edit `graph-app-kit` view folders `notebook/graph-app-kit/[public,private]/views` 123 | 124 | ### Launched URLs for minimal stack 125 | 126 | * **Streamlit: Public dashboards** 127 | * **http://[the.public.ip.address]/public/dash** 128 | * Installed at `/home/ubuntu/graph-app-kit/public/graph-app-kit` 129 | * Run as `src/docker $ docker-compose up -d streamlit` 130 | 131 | ## 4. Next steps 132 | 133 | Continue to the instructions for [creating custom views](views.md) and [adding common extensions](extend.md) like TLS, public/private dashboards, and more 134 | 135 | For more advanced Neptune configuration options, see the [manual Amazone Neptune setup guide](neptune-manual.md). 136 | -------------------------------------------------------------------------------- /docs/setup-manual.md: -------------------------------------------------------------------------------- 1 | # Setup graph-app-kit 2 | 3 | For quick launchers, see the [AWS quick launch setup guide](setup.md). 4 | 5 | ## 1. Launch and setup a server for Docker + extensions 6 | 7 | **Manual: Launch a Linux server and install, configure dependencies** 8 | 9 | * Open ports: 22, 80, 443, 8501 for all users (`0.0.0.0/0`) or for specific admin and user IPs 10 | 11 | * Ubuntu 18.04 LTS is the most common choice for containerized GPU computing 12 | 13 | * Install docker-ce and docker-compose 14 | 15 | * Optional: 16 | * GPU: If you have a [RAPIDS.ai](https://www.rapids.ai)-compatible GPU (see below), install the Nvidia docker runtime and set it as the default for Docker daemons 17 | 18 | * Extensions: Install Jupyter, a reverse proxy (ex: Caddy), and an authentication system 19 | 20 | **Note: GPU Instances**: Cloud providers generally require you to request GPU capacity quota for your account, which may take 1 day. [RAPIDS.ai-compatible GPU instance types](https://github.com/graphistry/graphistry-cli/blob/master/hardware-software.md#cloud) include: 21 | 22 | * AWS: g4, p3, p4 23 | * Azure: NC6s_v2+, ND+, NCasT4 24 | 25 | ## 2. Download graph-app-kit 26 | 27 | ```bash 28 | git clone https://github.com/graphistry/graph-app-kit.git 29 | ``` 30 | 31 | ## 3. Build 32 | 33 | ```bash 34 | cd graph-app-kit/src/docker 35 | sudo docker-compose build 36 | ``` 37 | 38 | ## 4. Set your Graphistry visualization credentials 39 | 40 | Get a public or private Graphistry account: 41 | 42 | * Graphistry Hub (public, free): [Create a free Graphistry Hub account](https://hub.graphistry.com/) using the username/password option, which you will use for API access. Visualizations will default to pointing to the public Graphistry Hub GPU servers. 43 | 44 | * Alternatively, [launch a private Graphistry server](https://www.graphistry.com/get-started), login, and use the username/password/URL for your configurtion. 45 | 46 | Edit `src/docker/.env` with: 47 | 48 | ```bash 49 | GRAPHISTRY_USERNAME=your_username 50 | GRAPHISTRY_PASSWORD=your_password 51 | ### OPTIONAL: Add if a private/local Graphistry server 52 | #GRAPHISTRY_PROTOCOL=http or https 53 | #GRAPHISTRY_SERVER=your.private-server.net 54 | ``` 55 | 56 | ## 5. Start & stop 57 | 58 | `cd src/docker` and then: 59 | 60 | * Start: `sudo docker-compose up -d` 61 | * Use: Go to `http://localhost:8501/dashboard` (or whatever the public IP) 62 | * Stop: `sudo docker-compose down -v` 63 | 64 | ## Graph! 65 | 66 | You are now ready to [add custom views](views.md) and [add integrations](extend.md). -------------------------------------------------------------------------------- /docs/setup.md: -------------------------------------------------------------------------------- 1 | # Setup graph-app-kit 2 | 3 | The below provides quick launchers for AWS-based deployments. See [manual setup](setup-manual.md) for alternative instructions, and check for database-specific integrations such as for [Amazon Neptune](neptune.md). 4 | 5 | ## 1. Launch graph-app-kit 6 | 7 | **Option 1 - Full (Recommended):** 8 | 9 | [![Launch Stack](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=region#/stacks/new?stackName=graph_app_kit_full&templateURL=https://graph-app-kit-repo-public.s3.us-east-2.amazonaws.com/templates/latest/core/graphistry.yml) 10 | 11 | * If AWS reports `Please select another region`, use the `Select a Region` dropdown in the top right menu 12 | 13 | Launches: 14 | 15 | * GPU instance 16 | * Web-based live editing 17 | * Core toolkit: Graphistry, public + private Streamlit dashboards, Jupyter notebooks, RAPIDS.ai Python GPU ecosystem 18 | 19 | Tenants launching GPUs for the first time may need to [request 4+ vCPU of g4, p3, or p4 capacity](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) 20 | 21 | **Option 2 - Minimal:** 22 | 23 | Steps: 24 | 25 | * Get a free or self-managed [Graphistry server account](https://www.graphistry.com/get-started) with username+pass 26 | * [![Launch Stack](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?region=region#/stacks/new?stackName=graph_app_kit_full&templateURL=https://graph-app-kit-repo-public.s3.us-east-2.amazonaws.com/templates/latest/core/graphistry.yml) 27 | * If AWS reports `Please select another region`, use the `Select a Region` dropdown in the top right menu. 28 | 29 | 30 | Launches: 31 | 32 | * AWS CPU-mode instance + hub.graphistry.com account 33 | * Edit dashboard views from your terminal 34 | * Included: Public Streamlit dashboards linked against a remote Graphistry account 35 | * Not included: Local Graphistry (GPU), private dashboards, Jupyter, RAPIDS.ai (GPU) 36 | 37 | 38 | ---- 39 | 40 | 1. **Launch configuration: Details parameters** 41 | 42 | * Set stack name to anything, such as `graph-app-kit-a` 43 | * Set `VPC` to one that is web-accessible 44 | * Set `Subnet` to a web-accessible subnet in the VPC ("`subnet-...`") 45 | * Set `GraphAppKitKeyPair` to any where you have the SSH private.key 46 | 47 | If using the minimal template, fill in details for your Graphistry account 48 | 49 | 2. ***(Optional):*** **Monitor instance launch for progress and errors** 50 | 51 | * Click the `Resources` tab and follow the link to the EC2 instance AWS console page after it gets generated 52 | 53 | * Click on the instance to find its public IP address 54 | 55 | * Login and watch: 56 | 57 | ```bash 58 | ssh -i /my/private.key ubuntu@the.instance.public.ip 59 | ### ssh -i /my/private.key ec2-user@the.instance.public.ip for Minimal launcher 60 | 61 | tail -f /var/log/cloud-init-output.log -n 1000 62 | ``` 63 | 64 | ## 3. Graph! 65 | 66 | Go to your public Streamlit dashboard and start exploring: http://[the.public.ip.address]/public/dash 67 | 68 | ### Login 69 | 70 | * Upon launch completion, you will have a full suite of graph tools located at **http://[the.public.ip.address]** 71 | 72 | * Web login using credentials **`admin`** / ***`i-theInstanceID`*** 73 | 74 | * SSH using the instructions from step 2 75 | 76 | * ***Note***: The minimal launcher has no web admin portal, just SSH and Streamlit 77 | 78 | ### URLs for full stack 79 | 80 | * **Graphistry: GPU-accelerated visual analytics + account login** 81 | * **http://[the.public.ip.address]** 82 | * Login as `admin` / `your-aws-instance-id` 83 | * Installed at `/home/ubuntu/graphistry` 84 | * You can change your admin password using the web UI 85 | * **Streamlit: Public dashboards** 86 | * **http://[the.public.ip.address]/public/dash** 87 | * Installed at `/home/ubuntu/graph-app-kit/public/graph-app-kit` 88 | * Run as `src/docker $ docker-compose -p pub run -d --name streamlit-pub streamlit` 89 | * **Streamlit: Private dashboards** 90 | * **http://[the.public.ip.address]/private/dash** 91 | * Installed at `/home/ubuntu/graph-app-kit/private/graph-app-kit` 92 | * Run as `src/docker $ docker-compose -p priv run -d --name streamlit-priv streamlit` 93 | * **Jupyter: Data science notebooks + Streamlit dashboard live-editing** 94 | * **http://[the.public.ip.address]/notebook** 95 | * Live-edit `graph-app-kit` view folders `notebook/graph-app-kit/[public,private]/views` 96 | 97 | ### URLs for minimal stack 98 | 99 | * **Streamlit: Public dashboards** 100 | * **http://[the.public.ip.address]/public/dash** 101 | * Installed at `/home/ubuntu/graph-app-kit/public/graph-app-kit` 102 | * Run as `src/docker $ docker-compose up -d streamlit` 103 | 104 | ## 4. Optional - administer 105 | 106 | Advanced users can SSH into the server to manipulate individual services: 107 | 108 | ### System visibility 109 | 110 | ```bash 111 | # launch logs 112 | tail -f /var/log/cloud-init-output.log -n 1000 113 | 114 | # app logs 115 | sudo docker ps 116 | sudo docker logs -f -t --tail=1 MY_CONTAINER 117 | 118 | # stats 119 | sudo htop 120 | sudo iftop 121 | top 122 | ``` 123 | 124 | ### Graphistry 125 | 126 | For more advanced Graphistry administration, so the [Graphistry admin docs repo](https://github.com/graphistry/graphistry-cli) 127 | 128 | ```bash 129 | # restart a graphistry container 130 | cd graphistry && sudo docker-compose restart MY_CONTAINER # do *not* run it's caddy (v2) 131 | 132 | # restart caddy (Caddy 1 override over Graphistry's Caddy 2) 133 | cd graphistry && sudo docker-compose -f docker-compose.gak.graphistry.yml up -d caddy 134 | ``` 135 | 136 | ### Streamlit 137 | 138 | Use `docker-compose` project names (`-p the_name`) to distinguish your public vs private dashboards: 139 | 140 | ```bash 141 | cd graph-app-kit/public/graph-app-kit && docker-compose -p pub run -d --name streamlit-pub streamlit 142 | cd graph-app-kit/private/graph-app-kit && docker-compose -p priv run -d --name streamlit-priv streamlit 143 | ``` 144 | 145 | ## 5. Next steps 146 | 147 | Continue to the instructions for [creating custom views](views.md) and [adding common extensions](extend.md) like TLS, public/private dashboards, and more 148 | -------------------------------------------------------------------------------- /docs/tigergraph.md: -------------------------------------------------------------------------------- 1 | # Quick launch TigerGraph and graph-app-kit 2 | This guide walks through setting up a TigerGraph database to configure with `graph-app-kit`. 3 | 4 | ## Setup 5 | 6 | ### 1. Setup TigerGraph 7 | 8 | Create a free TigerGraph Cloud account, launch the prebuilt solution, and run the sample data loader and sampler query loader. 9 | 10 | 1. Create a [TG Cloud](https://tgcloud.io/) account 11 | 2. Click **Create Solution** and choose the **Fraud and Money Laundering Detection** starter kit. 12 | 3. Click through the options and launch, which takes ~5 minutes 13 | 4. Open [GraphStudio](https://www.tigergraph.com/graphstudio/): `My Solutions` -> `Applications` -> `Graph Studio`, using credentials `tigergraph` / `password_you_set_during_launch` 14 | 5. In the top-left dropdown, flip from `Global View` to `Anti Fraud` 15 | 6. Click the left menu's **Load Data** option and hit the *play button* (`Start/Resume loading`) to load the sample data into the graph 16 | 7. Click the left menu's **Write Queriess** option and hit the *up button* (`Installl all queries`) to compile the sample queries 17 | 8. Generate a secret token: Top-right `Admin` button -> `User Management` -> new alias `mysecret` -> hit `[ + ]` and copy the generated secret 18 | 19 | See also the demo video from an older version of TG Cloud: 20 | 21 | [ ](https://www.youtube.com/watch?v=JARd9ULRP_I) 22 | 23 | 24 | ### 2. Quick-launch graph-app-kit 25 | 26 | One-click launch a [full graph-app-kit install](setup.md) or manually setup a [local minimal version](set-manual.md) 27 | 28 | * [AWS quick launch](setup.md): Instance with preloaded Docker setup of Jupyter notebooks, public+private StreamLit dashboards, Graphistry/RAPIDS GPU visual analysis 29 | 30 | * [Local version](set-manual.md): Local Docker container with StreamLit and libraries, guides for adding API keys and common configurations 31 | 32 | ### 3. OPTIONAL: Store TigerGraph credentials in graph-app-kit 33 | 34 | 1. Go to your install's command line. If a quick-launched cloud instance, do so by SSH'ing and going to either your public server (`graph-app-kit/public/`) or private server (`graph-app-kit/private/`) 35 | 36 | 2. Store your TigerGraph credentials in `src/envs/tigergraph.env`: 37 | 38 | ```bash 39 | TIGERGRAPH_HOST=https://myinstance.i.tgcloud.io 40 | TIGERGRAPH_USERNAME=tigergraph 41 | TIGERGRAPH_PASSWORD=mypassword 42 | TIGERGRAPH_GRAPHNAME=AntiFraud 43 | TIGERGRAPH_SECRET=mysecret 44 | ``` 45 | 46 | 3. Restart your Streamlit container with the new creds: `cd src/docker && sudo docker-compose up -d --force-recreate` 47 | 48 | ## Explore 49 | 50 | 1. Go to the Streamlit website based on how you launched graph-app-kit: `/public/dash` / `/private/dash` (quick launched) or `localhost:8501/` (manual) 51 | 52 | The generic `INTRO: SIMPLE PIPELINE` dashboard should have loaded 53 | 54 | 2. Switch to `TIGERGRAPH: FRAUD FILTER CIRCLE` 55 | 56 | If you had not stored TigerGraph credentials, input them in the left sidebar menu 57 | 58 | -------------------------------------------------------------------------------- /docs/views.md: -------------------------------------------------------------------------------- 1 | # Develop graph-app-kit dashboard views 2 | 3 | ## Setup 4 | 5 | ## Recommended: Web authoring 6 | 7 | Not required but recommended, we recommend setting up Jupyter-based shared web authoring. You can proceed without, such as live editing from a commandline tool like `vim`. However, if you are interested in a friendlier environment, see the [configuration and integrations section](extend.md). 8 | 9 | ## Live edit 10 | 11 | * Modify Python files in `src/python/views/[your dashboard]/__init__.py`, and in-tool, hit the `rerun` button that appears 12 | * Add new views by adding `views/[your dsahboard]/__init__.py` with methods `def info(): return {'name': 'x'}` and `def run(): None` 13 | * Add new dependencies: modify `src/python/requirements-app.txt` and rerun `docker-compose build` and restart 14 | 15 | ## Toggle views 16 | 17 | Configure which dashboards `AppPicker` includes: 18 | 19 | * Disable individual dashboards: Have a dashboard's `info()` return `{'enabled': False}` 20 | * Create tags and toggle them: 21 | * Tag a dashboard view as part of `src/python/views/[your_app]/__init__.py`: 22 | * `info()`: `{'tags': ['new_app', ...]}` 23 | * Opt-in and opt-out to tags: in `src/python/entrypoint.py`: 24 | * `AppPicker(include=['testing', 'new_app'], exclude=['demo'])` 25 | 26 | ## Toggle view CSS defaults 27 | Use the `css` module in your `views`: 28 | 29 | ```python 30 | from css import all_css 31 | def run(): 32 | all_css() 33 | all_css(is_max_main_width=False, is_hide_dev_menu=False) 34 | ``` 35 | 36 | ## Configure site theme 37 | Tweak `src/python/entrypoint.py`: 38 | 39 | ```python 40 | page_title_str ="Graph dashboard" 41 | st.beta_set_page_config( 42 | layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc. 43 | initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed" 44 | page_title=page_title_str, # String or None. Strings get appended with "• Streamlit". 45 | page_icon='none.png', # String, anything supported by st.image, or None. 46 | ) 47 | ``` 48 | 49 | ## Extend! 50 | 51 | You are now ready to [add integrations](extend.md) like database connections, authentication, and live editing. -------------------------------------------------------------------------------- /src/bootstraps/core/graphistry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ### Full graphistry/st setup: 5 | ### caddy, jupyter, pub/priv st, ... 6 | ### Assumes Graphistry GPU AMI: docker, ... 7 | 8 | cd ../scripts 9 | 10 | SCRIPT="Full graph-app-kit for Graphistry" 11 | ./hello-start.sh "$SCRIPT" 12 | 13 | export GRAPHISTRY_HOME=${GRAPHISTRY_HOME:-/home/ubuntu/graphistry} 14 | export NOTEBOOKS_HOME=${NOTEBOOKS_HOME:-${GRAPHISTRY_HOME}/data/notebooks} 15 | export GAK_PUBLIC=/home/ubuntu/graph-app-kit/public/graph-app-kit 16 | export GAK_PRIVATE=/home/ubuntu/graph-app-kit/private/graph-app-kit 17 | 18 | echo 19 | echo "----- SETTINGS ------" 20 | echo " * GRAPHISTRY_HOME: $GRAPHISTRY_HOME" 21 | echo " * NOTEBOOKS_HOME: $NOTEBOOKS_HOME" 22 | echo "---------------------" 23 | source instance-id.sh 24 | echo " * INSTANCE_ID: $INSTANCE_ID" 25 | echo 26 | 27 | ./cloudformation-bootstrap.sh 28 | ./docker-container-build.sh 29 | ./prepopulate-notebooks.sh graph-app-kit/public views ubuntu 30 | ./prepopulate-notebooks.sh graph-app-kit/private views ubuntu 31 | ./graphistry-wait-healthy.sh 32 | ./swap-caddy.sh 33 | source ./graphistry-service-account.sh 34 | echo "Got SERVICE_USER ${SERVICE_USER}, SERVICE_PASS" 35 | 36 | echo '===== Configuring graph-app-kit with Graphistry service account and Neptune =====' 37 | ( \ 38 | cd ../../docker \ 39 | && echo "GRAPHISTRY_USERNAME=${SERVICE_USER}" \ 40 | && echo "GRAPHISTRY_PASSWORD=${SERVICE_PASS}" \ 41 | && echo "GRAPHISTRY_PROTOCOL=http" \ 42 | && echo "GRAPHISTRY_SERVER=`curl http://169.254.169.254/latest/meta-data/public-ipv4`" \ 43 | ) | sudo tee ../../docker/.env 44 | 45 | echo '----- Reuse public graph-app-kit .env as private .env' 46 | sudo cp "${GAK_PUBLIC}/src/docker/.env" "${GAK_PRIVATE}/src/docker/.env" 47 | 48 | echo '----- Finish pub vs. priv .env specialization' 49 | ( \ 50 | echo "BASE_PATH=public/dash/" \ 51 | && echo "GRAPH_VIEWS=${GRAPHISTRY_HOME}/data/notebooks/graph-app-kit/public/views" \ 52 | ) | sudo tee -a "${GAK_PUBLIC}/src/docker/.env" 53 | ( \ 54 | echo "BASE_PATH=private/dash/" \ 55 | && echo "GRAPH_VIEWS=${GRAPHISTRY_HOME}/data/notebooks/graph-app-kit/private/views" \ 56 | ) | sudo tee -a "${GAK_PRIVATE}/src/docker/.env" 57 | 58 | echo '----- Launching graph-app-kit as streamlit-pub/priv:8501' 59 | ( \ 60 | cd "${GAK_PUBLIC}/src/docker" \ 61 | && sudo docker-compose -p pub run -d --name streamlit-pub streamlit \ 62 | ) 63 | ( \ 64 | cd "${GAK_PRIVATE}/src/docker" \ 65 | && sudo docker-compose -p priv run -d --name streamlit-priv streamlit \ 66 | ) 67 | 68 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/core/graphistry.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | 3 | Metadata: 4 | AWS::CloudFormation::Interface: 5 | ParameterGroups: 6 | - 7 | Label: 8 | default: "Network Configuration" 9 | Parameters: 10 | - GraphAppKitVPC 11 | - GraphAppKitSubnet 12 | - 13 | Label: 14 | default: "Server Configuration" 15 | Parameters: 16 | - GraphAppKitKeyPair 17 | 18 | Parameters: 19 | GraphAppKitKeyPair: 20 | Type: AWS::EC2::KeyPair::KeyName 21 | Description: "Enter an EC2 Key Pair for this instance" 22 | GraphAppKitVPC: 23 | Type: AWS::EC2::VPC::Id 24 | Description: "Enter a web-accessible VPC. Ex: vpc-abc" 25 | GraphAppKitSubnet: 26 | Type: AWS::EC2::Subnet::Id 27 | Description: "Enter a public subnet within the previously selected VPC. Ex: subnet-123" 28 | InstanceType: 29 | Type: String 30 | Default: 'g4dn.xlarge' 31 | Description: "Enter a RAPIDS.ai-compatible GPU instance type. Ex: g4dn.xlarge" 32 | 33 | #2.36.6-11.0 34 | #Generated with: src/bootstraps/scripts/graphistry-ami-list.sh 35 | Mappings: 36 | RegionMap: 37 | eu-north-1: 38 | "HVM64": "ami-0b2592478df250046" 39 | ap-south-1: 40 | "HVM64": "ami-0bfb82d22c366410f" 41 | eu-west-3: 42 | "HVM64": "ami-097d478a5770e380e" 43 | eu-west-2: 44 | "HVM64": "ami-043e20df39a9045af" 45 | eu-west-1: 46 | "HVM64": "ami-093602dc6405d5520" 47 | ap-northeast-2: 48 | "HVM64": "ami-0cc634c9ca40a31ed" 49 | ap-northeast-1: 50 | "HVM64": "ami-09c66f9f92a9764f0" 51 | sa-east-1: 52 | "HVM64": "ami-03eb2854f7ed11992" 53 | ca-central-1: 54 | "HVM64": "ami-0a09738b9642a7c6a" 55 | ap-southeast-1: 56 | "HVM64": "ami-0815657491a4418de" 57 | ap-southeast-2: 58 | "HVM64": "ami-03c592c67b2291c2c" 59 | eu-central-1: 60 | "HVM64": "ami-0a353808c57350e8a" 61 | us-east-1: 62 | "HVM64": "ami-087af94bb9a4c396e" 63 | us-east-2: 64 | "HVM64": "ami-053efa142e054478a" 65 | us-west-1: 66 | "HVM64": "ami-012c1c8f9dbb1a2c6" 67 | us-west-2: 68 | "HVM64": "ami-0036548682eb8b53d" 69 | 70 | Resources: 71 | GraphAppKitSecurityGroup: 72 | Type: AWS::EC2::SecurityGroup 73 | Properties: 74 | GroupDescription: Graphistry Graph App Kit Access 8501 22 75 | Tags: 76 | - Key: "name" 77 | Value: "graph-app-kit-quicklaunch-a" 78 | - Key: "kind" 79 | Value: "graph-app-kit" 80 | VpcId: 81 | Ref: GraphAppKitVPC 82 | SecurityGroupIngress: 83 | - IpProtocol: tcp 84 | FromPort: 8501 85 | ToPort: 8501 86 | CidrIp: 0.0.0.0/0 87 | - IpProtocol: tcp 88 | FromPort: 22 89 | ToPort: 22 90 | CidrIp: 0.0.0.0/0 91 | - IpProtocol: tcp 92 | FromPort: 80 93 | ToPort: 80 94 | CidrIp: 0.0.0.0/0 95 | - IpProtocol: tcp 96 | FromPort: 443 97 | ToPort: 443 98 | CidrIp: 0.0.0.0/0 99 | GraphAppKitEC2: 100 | Type: AWS::EC2::Instance 101 | Properties: 102 | Tags: 103 | - Key: "kind" 104 | Value: "graph-app-kit-full" 105 | - Key: Name 106 | Value: Graphistry-Graph-App-Kit 107 | ImageId: !FindInMap [RegionMap, !Ref "AWS::Region", HVM64] 108 | InstanceType: !Ref InstanceType 109 | SubnetId: 110 | Ref: GraphAppKitSubnet 111 | SecurityGroupIds: 112 | - 113 | Ref: GraphAppKitSecurityGroup 114 | KeyName: 115 | Ref: GraphAppKitKeyPair 116 | BlockDeviceMappings: 117 | - DeviceName: /dev/xvda 118 | Ebs: 119 | VolumeType: gp2 120 | VolumeSize: '60' 121 | DeleteOnTermination: 'true' 122 | Encrypted: 'false' 123 | UserData: 124 | Fn::Base64: 125 | Fn::Join: 126 | - '' 127 | - - "#!/bin/bash\n" 128 | - "set -ex\n" 129 | - "sudo usermod -a -G docker ubuntu\n" 130 | - "echo '===== System check'\n" 131 | - "nvidia-smi\n" 132 | - "echo '===== Downloading graph-app-kit'\n" 133 | - "cd /home/ubuntu\n" 134 | - "mkdir -p graph-app-kit/public\n" 135 | - "cd graph-app-kit/public\n" 136 | - "git clone https://github.com/graphistry/graph-app-kit.git\n" 137 | - "cp -r /home/ubuntu/graph-app-kit/public /home/ubuntu/graph-app-kit/private\n" 138 | - "echo '===== Running graph-app-kit bootstraps'\n" 139 | - "cd /home/ubuntu/graph-app-kit/public/graph-app-kit/src/bootstraps/core\n" 140 | - "./graphistry.sh\n" 141 | - "/opt/aws/bin/cfn-signal -e $? --stack " 142 | - Ref: AWS::StackName 143 | - " --resource GraphAppKitEC2 --region " 144 | - Ref: AWS::Region 145 | - "\n" 146 | CreationPolicy: 147 | ResourceSignal: 148 | Count: 1 149 | Timeout: "PT20M" 150 | 151 | Outputs: 152 | PublicIp: 153 | Description: GraphAppKitEC2 Public IP 154 | Value: !GetAtt GraphAppKitEC2.PublicIp 155 | Export: 156 | Name: !Sub "${AWS::StackName}-PublicIp" -------------------------------------------------------------------------------- /src/bootstraps/core/minimal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ### Minimal st setup 5 | ### Assume empty AMI, Graphistry Hub 6 | 7 | cd ../scripts 8 | 9 | SCRIPT="Minimal graph-app-kit for Graphistry" 10 | ./hello-start.sh "$SCRIPT" 11 | 12 | export GRAPHISTRY_USERNAME=$1 13 | export GRAPHISTRY_PASSWORD=$2 14 | export GRAPHISTRY_HOST=$3 15 | export GRAPHISTRY_PROTOCOL=$4 16 | export GAK_PUBLIC=/home/ec2-user/graph-app-kit/public/graph-app-kit 17 | 18 | echo 19 | echo "----- SETTINGS ------" 20 | echo " * GRAPHISTRY_USERNAME (\$1): $GRAPHISTRY_USERNAME" 21 | echo " * GRAPHISTRY_PASSWORD (\$2): ***" 22 | echo " * GRAPHISTRY_HOST (\$3): $GRAPHISTRY_HOST" 23 | echo " * GRAPHISTRY_PROTOCOL (\$4): $GRAPHISTRY_PROTOCOL" 24 | echo " * GAK_PUBLIC: $GAK_PUBLIC" 25 | echo "---------------------" 26 | source instance-id.sh 27 | echo " * INSTANCE_ID: $INSTANCE_ID" 28 | echo 29 | 30 | ./docker-aws.sh 31 | DC_ALIAS='./dc.cpu' ./docker-container-build.sh 32 | 33 | echo '===== Configuring graph-app-kit with Graphistry account =====' 34 | ( \ 35 | cd ../../docker \ 36 | && echo "ST_PUBLIC_PORT=80" \ 37 | && echo "BASE_PATH=public/dash/" \ 38 | && echo "GRAPHISTRY_USERNAME=${GRAPHISTRY_USERNAME}" \ 39 | && echo "GRAPHISTRY_PASSWORD=${GRAPHISTRY_PASSWORD}" \ 40 | && echo "GRAPHISTRY_PROTOCOL=${GRAPHISTRY_PROTOCOL}" \ 41 | && echo "GRAPHISTRY_SERVER=${GRAPHISTRY_HOST}" \ 42 | ) | sudo tee ../../docker/.env 43 | 44 | echo '----- Launching graph-app-kit as streamlit-pub:8501' 45 | ( \ 46 | cd "${GAK_PUBLIC}/src/docker" \ 47 | && (sudo ./dc.cpu up -d streamlit) \ 48 | ) 49 | 50 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/core/minimal.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | 3 | Metadata: 4 | AWS::CloudFormation::Interface: 5 | ParameterGroups: 6 | - 7 | Label: 8 | default: "Network Configuration" 9 | Parameters: 10 | - GraphAppKitVPC 11 | - GraphAppKitSubnet 12 | - 13 | Label: 14 | default: "Server Configuration" 15 | Parameters: 16 | - GraphAppKitKeyPair 17 | 18 | Parameters: 19 | GraphAppKitKeyPair: 20 | Type: AWS::EC2::KeyPair::KeyName 21 | Description: "Enter an EC2 Key Pair for this instance" 22 | GraphAppKitVPC: 23 | Type: AWS::EC2::VPC::Id 24 | Description: "Enter a web-accessible VPC. Ex: vpc-abc" 25 | GraphAppKitSubnet: 26 | Type: AWS::EC2::Subnet::Id 27 | Description: "Enter a public subnet within the previously selected VPC. Ex: subnet-123" 28 | InstanceType: 29 | Type: String 30 | Default: 't3.medium' 31 | Description: "Enter preferred CPU type. Ex: t3.medium" 32 | LatestAmiId: 33 | Type: 'AWS::SSM::Parameter::Value' 34 | Default: '/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2' 35 | Description: 'Base AMI' 36 | GraphistryUserName: 37 | Type: String 38 | Description: 'Graphistry account username. Get free at https://hub.graphistry.com .' 39 | GraphistryPassword: 40 | NoEcho: true 41 | Type: String 42 | Description: 'Graphistry account password. Get free at https://hub.graphistry.com .' 43 | GraphistryHost: 44 | Type: String 45 | Default: "hub.graphistry.com" 46 | Description: 'Graphistry server domain. Ex: hub.graphistry.com' 47 | GraphistryProtocol: 48 | Type: String 49 | Default: "https" 50 | Description: 'Graphistry server protocol (requires certificate installed for TLS). Ex: http' 51 | 52 | 53 | Resources: 54 | GraphAppKitSecurityGroup: 55 | Type: AWS::EC2::SecurityGroup 56 | Properties: 57 | GroupDescription: Graphistry Graph App Kit Access 8501 22 58 | Tags: 59 | - Key: "kind" 60 | Value: "graph-app-kit-minimal" 61 | - Key: Name 62 | Value: Graph-App-Kit-Minimal 63 | VpcId: 64 | Ref: GraphAppKitVPC 65 | SecurityGroupIngress: 66 | - IpProtocol: tcp 67 | FromPort: 8501 68 | ToPort: 8501 69 | CidrIp: 0.0.0.0/0 70 | - IpProtocol: tcp 71 | FromPort: 22 72 | ToPort: 22 73 | CidrIp: 0.0.0.0/0 74 | - IpProtocol: tcp 75 | FromPort: 80 76 | ToPort: 80 77 | CidrIp: 0.0.0.0/0 78 | - IpProtocol: tcp 79 | FromPort: 443 80 | ToPort: 443 81 | CidrIp: 0.0.0.0/0 82 | GraphAppKitEC2: 83 | Type: AWS::EC2::Instance 84 | Properties: 85 | Tags: 86 | - Key: "name" 87 | Value: "graph-app-kit-quicklaunch-a" 88 | - Key: "kind" 89 | Value: "graph-app-kit" 90 | ImageId: !Ref LatestAmiId 91 | InstanceType: !Ref InstanceType 92 | SubnetId: 93 | Ref: GraphAppKitSubnet 94 | SecurityGroupIds: 95 | - 96 | Ref: GraphAppKitSecurityGroup 97 | KeyName: 98 | Ref: GraphAppKitKeyPair 99 | BlockDeviceMappings: 100 | - DeviceName: /dev/xvda 101 | Ebs: 102 | VolumeType: gp2 103 | VolumeSize: '60' 104 | DeleteOnTermination: 'true' 105 | Encrypted: 'false' 106 | UserData: 107 | Fn::Base64: 108 | Fn::Join: 109 | - '' 110 | - - "#!/bin/bash\n" 111 | - "set -ex\n" 112 | - "sudo yum install -y git\n" 113 | - "cd /home/ec2-user\n" 114 | - "echo '===== Downloading graph-app-kit'\n" 115 | - "mkdir -p graph-app-kit/public\n" 116 | - "cd graph-app-kit/public\n" 117 | - "git clone https://github.com/graphistry/graph-app-kit.git\n" 118 | - "echo '===== Running graph-app-kit bootstraps'\n" 119 | - "cd /home/ec2-user/graph-app-kit/public/graph-app-kit/src/bootstraps/core\n" 120 | - "./minimal.sh \"" 121 | - Ref: GraphistryUserName 122 | - "\" \"" 123 | - Ref: GraphistryPassword 124 | - "\" \"" 125 | - Ref: GraphistryHost 126 | - "\" \"" 127 | - Ref: GraphistryProtocol 128 | - "\"\n" 129 | - "/opt/aws/bin/cfn-signal -e $? --stack " 130 | - Ref: AWS::StackName 131 | - " --resource GraphAppKitEC2 --region " 132 | - Ref: AWS::Region 133 | - "\n" 134 | CreationPolicy: 135 | ResourceSignal: 136 | Count: 1 137 | Timeout: "PT20M" 138 | 139 | Outputs: 140 | PublicIp: 141 | Description: GraphAppKitEC2 Public IP 142 | Value: !GetAtt GraphAppKitEC2.PublicIp 143 | Export: 144 | Name: !Sub "${AWS::StackName}-PublicIp" -------------------------------------------------------------------------------- /src/bootstraps/neptune/graphistry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ### Full neptune/graphistry/st setup: 5 | ### caddy, jupyter, pub/priv st, ... 6 | ### Assumes Graphistry GPU AMI: docker, ... 7 | 8 | cd ../scripts 9 | 10 | SCRIPT="Full graph-app-kit for Neptune/Graphistry" 11 | ./hello-start.sh "$SCRIPT" 12 | 13 | export GRAPHISTRY_HOME=${GRAPHISTRY_HOME:-/home/ubuntu/graphistry} 14 | export NOTEBOOKS_HOME=${NOTEBOOKS_HOME:-${GRAPHISTRY_HOME}/data/notebooks} 15 | export NEPTUNE_READER_HOST=$1 16 | export GAK_PUBLIC=/home/ubuntu/graph-app-kit/public/graph-app-kit 17 | export GAK_PRIVATE=/home/ubuntu/graph-app-kit/private/graph-app-kit 18 | 19 | echo 20 | echo "----- SETTINGS ------" 21 | echo " * GRAPHISTRY_HOME: $GRAPHISTRY_HOME" 22 | echo " * NOTEBOOKS_HOME: $NOTEBOOKS_HOME" 23 | echo " * NEPTUNE_READER_HOST (\$1): $NEPTUNE_READER_HOST" 24 | echo "---------------------" 25 | source instance-id.sh 26 | echo " * INSTANCE_ID: $INSTANCE_ID" 27 | echo 28 | 29 | ./cloudformation-bootstrap.sh 30 | ./docker-container-build.sh 31 | ./prepopulate-notebooks.sh graph-app-kit/public views ubuntu 32 | ./prepopulate-notebooks.sh graph-app-kit/private views ubuntu 33 | ./graphistry-wait-healthy.sh 34 | ./swap-caddy.sh 35 | source ./graphistry-service-account.sh 36 | echo "Got SERVICE_USER ${SERVICE_USER}, SERVICE_PASS" 37 | 38 | echo '===== Configuring graph-app-kit with Graphistry service account and Neptune =====' 39 | ( \ 40 | cd ../../docker \ 41 | && echo "GRAPH_VIEWS=${GRAPHISTRY_HOME}/data/notebooks/graph-app-kit/public/views" \ 42 | && echo "GRAPHISTRY_USERNAME=${SERVICE_USER}" \ 43 | && echo "GRAPHISTRY_PASSWORD=${SERVICE_PASS}" \ 44 | && echo "GRAPHISTRY_PROTOCOL=http" \ 45 | && echo "GRAPHISTRY_SERVER=`curl http://169.254.169.254/latest/meta-data/public-ipv4`" \ 46 | && echo "NEPTUNE_READER_PROTOCOL=wss" \ 47 | && echo "NEPTUNE_READER_PORT=8182" \ 48 | && echo "NEPTUNE_READER_HOST=$NEPTUNE_READER_HOST" \ 49 | ) | sudo tee ../../docker/.env 50 | 51 | echo '----- Reuse public graph-app-kit .env as private .env' 52 | sudo cp "${GAK_PUBLIC}/src/docker/.env" "${GAK_PRIVATE}/src/docker/.env" 53 | 54 | echo '----- Finish pub vs. priv .env specialization' 55 | echo "BASE_PATH=public/dash/" | sudo tee -a "${GAK_PUBLIC}/src/docker/.env" 56 | echo "BASE_PATH=private/dash/" | sudo tee -a "${GAK_PRIVATE}/src/docker/.env" 57 | 58 | echo '----- Launching graph-app-kit as streamlit-pub/priv:8501' 59 | ( \ 60 | cd "${GAK_PUBLIC}/src/docker" \ 61 | && sudo docker-compose -p pub run -d --name streamlit-pub streamlit \ 62 | ) 63 | ( \ 64 | cd "${GAK_PRIVATE}/src/docker" \ 65 | && sudo docker-compose -p priv run -d --name streamlit-priv streamlit \ 66 | ) 67 | 68 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/neptune/graphistry.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | 3 | Metadata: 4 | AWS::CloudFormation::Interface: 5 | ParameterGroups: 6 | - 7 | Label: 8 | default: "Network Configuration" 9 | Parameters: 10 | - GraphAppKitVPC 11 | - GraphAppKitSubnet 12 | - 13 | Label: 14 | default: "Server Configuration" 15 | Parameters: 16 | - GraphAppKitKeyPair 17 | 18 | Parameters: 19 | GraphAppKitKeyPair: 20 | Type: AWS::EC2::KeyPair::KeyName 21 | Description: "Enter an EC2 Key Pair for this instance" 22 | GraphAppKitVPC: 23 | Type: AWS::EC2::VPC::Id 24 | Description: "Enter the VPC where Neptune is presently hosted. Ex: vpc-abc" 25 | GraphAppKitSubnet: 26 | Type: AWS::EC2::Subnet::Id 27 | Description: "Enter a public subnet within the previously selected VPC that also has access to Neptune. Ex: subnet-123" 28 | InstanceType: 29 | Type: String 30 | Default: 'g4dn.xlarge' 31 | Description: "Enter a RAPIDS.ai-compatible GPU instance type. Ex: g4dn.xlarge" 32 | NeptuneReaderHost: 33 | Type: String 34 | Description: "Enter the Neptune Cluster Read Endpoint URL. Ex: abc.def.ghi.neptune.amazonaws.com" 35 | 36 | #2.36.6-11.0 37 | #Generated with: src/bootstraps/scripts/graphistry-ami-list.sh 38 | Mappings: 39 | RegionMap: 40 | eu-north-1: 41 | "HVM64": "ami-0b2592478df250046" 42 | ap-south-1: 43 | "HVM64": "ami-0bfb82d22c366410f" 44 | eu-west-3: 45 | "HVM64": "ami-097d478a5770e380e" 46 | eu-west-2: 47 | "HVM64": "ami-043e20df39a9045af" 48 | eu-west-1: 49 | "HVM64": "ami-093602dc6405d5520" 50 | ap-northeast-2: 51 | "HVM64": "ami-0cc634c9ca40a31ed" 52 | ap-northeast-1: 53 | "HVM64": "ami-09c66f9f92a9764f0" 54 | sa-east-1: 55 | "HVM64": "ami-03eb2854f7ed11992" 56 | ca-central-1: 57 | "HVM64": "ami-0a09738b9642a7c6a" 58 | ap-southeast-1: 59 | "HVM64": "ami-0815657491a4418de" 60 | ap-southeast-2: 61 | "HVM64": "ami-03c592c67b2291c2c" 62 | eu-central-1: 63 | "HVM64": "ami-0a353808c57350e8a" 64 | us-east-1: 65 | "HVM64": "ami-087af94bb9a4c396e" 66 | us-east-2: 67 | "HVM64": "ami-053efa142e054478a" 68 | us-west-1: 69 | "HVM64": "ami-012c1c8f9dbb1a2c6" 70 | us-west-2: 71 | "HVM64": "ami-0036548682eb8b53d" 72 | Resources: 73 | GraphAppKitSecurityGroup: 74 | Type: AWS::EC2::SecurityGroup 75 | Properties: 76 | GroupDescription: Graphistry Graph App Kit Access 8501 22 77 | Tags: 78 | - Key: "name" 79 | Value: "graph-app-kit-quicklaunch-a" 80 | - Key: "kind" 81 | Value: "graph-app-kit" 82 | VpcId: 83 | Ref: GraphAppKitVPC 84 | SecurityGroupIngress: 85 | - IpProtocol: tcp 86 | FromPort: 8501 87 | ToPort: 8501 88 | CidrIp: 0.0.0.0/0 89 | - IpProtocol: tcp 90 | FromPort: 22 91 | ToPort: 22 92 | CidrIp: 0.0.0.0/0 93 | - IpProtocol: tcp 94 | FromPort: 80 95 | ToPort: 80 96 | CidrIp: 0.0.0.0/0 97 | - IpProtocol: tcp 98 | FromPort: 443 99 | ToPort: 443 100 | CidrIp: 0.0.0.0/0 101 | GraphAppKitEC2: 102 | Type: AWS::EC2::Instance 103 | Properties: 104 | Tags: 105 | - Key: "kind" 106 | Value: "graph-app-kit-full" 107 | - Key: Name 108 | Value: Graphistry-Graph-App-Kit 109 | ImageId: !FindInMap [RegionMap, !Ref "AWS::Region", HVM64] 110 | InstanceType: !Ref InstanceType 111 | SubnetId: 112 | Ref: GraphAppKitSubnet 113 | SecurityGroupIds: 114 | - 115 | Ref: GraphAppKitSecurityGroup 116 | KeyName: 117 | Ref: GraphAppKitKeyPair 118 | BlockDeviceMappings: 119 | - DeviceName: /dev/xvda 120 | Ebs: 121 | VolumeType: gp2 122 | VolumeSize: '60' 123 | DeleteOnTermination: 'true' 124 | Encrypted: 'false' 125 | UserData: 126 | Fn::Base64: 127 | Fn::Join: 128 | - '' 129 | - - "#!/bin/bash\n" 130 | - "set -ex\n" 131 | - "sudo usermod -a -G docker ubuntu\n" 132 | - "echo '===== System check'\n" 133 | - "nvidia-smi\n" 134 | - "echo '===== Downloading graph-app-kit'\n" 135 | - "cd /home/ubuntu\n" 136 | - "mkdir -p graph-app-kit/public\n" 137 | - "cd graph-app-kit/public\n" 138 | - "git clone https://github.com/graphistry/graph-app-kit.git\n" 139 | - "cp -r /home/ubuntu/graph-app-kit/public /home/ubuntu/graph-app-kit/private\n" 140 | - "echo '===== Running graph-app-kit bootstraps'\n" 141 | - "cd /home/ubuntu/graph-app-kit/public/graph-app-kit/src/bootstraps/neptune\n" 142 | - "./graphistry.sh \"" 143 | - Ref: NeptuneReaderHost 144 | - "\"\n" 145 | - "/opt/aws/bin/cfn-signal -e $? --stack " 146 | - Ref: AWS::StackName 147 | - " --resource GraphAppKitEC2 --region " 148 | - Ref: AWS::Region 149 | - "\n" 150 | CreationPolicy: 151 | ResourceSignal: 152 | Count: 1 153 | Timeout: "PT20M" 154 | 155 | Outputs: 156 | PublicIp: 157 | Description: GraphAppKitEC2 Public IP 158 | Value: !GetAtt GraphAppKitEC2.PublicIp 159 | Export: 160 | Name: !Sub "${AWS::StackName}-PublicIp" -------------------------------------------------------------------------------- /src/bootstraps/neptune/minimal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ### Minimal st setup 5 | ### Assume empty AMI, Graphistry Hub 6 | 7 | cd ../scripts 8 | 9 | SCRIPT="Minimal graph-app-kit for Neptune/Graphistry" 10 | ./hello-start.sh "$SCRIPT" 11 | 12 | export NEPTUNE_READER_HOST=$1 13 | export GRAPHISTRY_USERNAME=$2 14 | export GRAPHISTRY_PASSWORD=$3 15 | export GRAPHISTRY_HOST=$4 16 | export GRAPHISTRY_PROTOCOL=$5 17 | export GAK_PUBLIC=/home/ec2-user/graph-app-kit/public/graph-app-kit 18 | 19 | echo 20 | echo "----- SETTINGS ------" 21 | echo " * NEPTUNE_READER_HOST (\$1): $NEPTUNE_READER_HOST" 22 | echo " * GRAPHISTRY_USERNAME (\$2): $GRAPHISTRY_USERNAME" 23 | echo " * GRAPHISTRY_PASSWORD (\$3): ***" 24 | echo " * GRAPHISTRY_HOST (\$4): $GRAPHISTRY_HOST" 25 | echo " * GRAPHISTRY_PROTOCOL (\$5): $GRAPHISTRY_PROTOCOL" 26 | echo " * GAK_PUBLIC: $GAK_PUBLIC" 27 | echo "---------------------" 28 | source instance-id.sh 29 | echo " * INSTANCE_ID: $INSTANCE_ID" 30 | echo 31 | 32 | ./docker-aws.sh 33 | ./docker-container-build.sh 34 | 35 | echo '===== Configuring graph-app-kit with Graphistry account and Neptune =====' 36 | ( \ 37 | cd ../../docker \ 38 | && echo "ST_PUBLIC_PORT=80" \ 39 | && echo "BASE_PATH=public/dash/" \ 40 | && echo "GRAPHISTRY_USERNAME=${GRAPHISTRY_USERNAME}" \ 41 | && echo "GRAPHISTRY_PASSWORD=${GRAPHISTRY_PASSWORD}" \ 42 | && echo "GRAPHISTRY_PROTOCOL=${GRAPHISTRY_PROTOCOL}" \ 43 | && echo "GRAPHISTRY_SERVER=${GRAPHISTRY_HOST}" \ 44 | && echo "NEPTUNE_READER_PROTOCOL=wss" \ 45 | && echo "NEPTUNE_READER_PORT=8182" \ 46 | && echo "NEPTUNE_READER_HOST=$NEPTUNE_READER_HOST" \ 47 | ) | sudo tee ../../docker/.env 48 | 49 | echo '----- Launching graph-app-kit as streamlit-pub:8501' 50 | ( \ 51 | cd "${GAK_PUBLIC}/src/docker" \ 52 | && sudo /usr/local/bin/docker-compose up -d streamlit \ 53 | ) 54 | 55 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/neptune/minimal.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | 3 | Metadata: 4 | AWS::CloudFormation::Interface: 5 | ParameterGroups: 6 | - 7 | Label: 8 | default: "Network Configuration" 9 | Parameters: 10 | - GraphAppKitVPC 11 | - GraphAppKitSubnet 12 | - 13 | Label: 14 | default: "Server Configuration" 15 | Parameters: 16 | - GraphAppKitKeyPair 17 | 18 | Parameters: 19 | GraphAppKitKeyPair: 20 | Type: AWS::EC2::KeyPair::KeyName 21 | Description: "Enter an EC2 Key Pair for this instance" 22 | GraphAppKitVPC: 23 | Type: AWS::EC2::VPC::Id 24 | Description: "Enter the VPC where Neptune is presently hosted. Ex: vpc-abc" 25 | GraphAppKitSubnet: 26 | Type: AWS::EC2::Subnet::Id 27 | Description: "Enter a public subnet within the previously selected VPC that also has access to Neptune. Ex: subnet-123" 28 | InstanceType: 29 | Type: String 30 | Default: 't3.medium' 31 | Description: "Enter preferred CPU type. Ex: t3.medium" 32 | NeptuneReaderHost: 33 | Type: String 34 | Description: "Enter the Neptune Cluster Read Endpoint URL. Ex: abc.def.ghi.neptune.amazonaws.com" 35 | LatestAmiId: 36 | Type: 'AWS::SSM::Parameter::Value' 37 | Default: '/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2' 38 | Description: 'Base AMI' 39 | GraphistryUserName: 40 | Type: String 41 | Description: 'Graphistry account username. Get free at https://hub.graphistry.com .' 42 | GraphistryPassword: 43 | NoEcho: true 44 | Type: String 45 | Description: 'Graphistry account password. Get free at https://hub.graphistry.com .' 46 | GraphistryHost: 47 | Type: String 48 | Default: "hub.graphistry.com" 49 | Description: 'Graphistry server domain. Ex: hub.graphistry.com' 50 | GraphistryProtocol: 51 | Type: String 52 | Default: "https" 53 | Description: 'Graphistry server protocol (requires certificate installed for TLS). Ex: http' 54 | 55 | 56 | Resources: 57 | GraphAppKitSecurityGroup: 58 | Type: AWS::EC2::SecurityGroup 59 | Properties: 60 | GroupDescription: Graphistry Graph App Kit Access 8501 22 61 | Tags: 62 | - Key: "kind" 63 | Value: "graph-app-kit-minimal" 64 | - Key: Name 65 | Value: Graph-App-Kit-Minimal 66 | VpcId: 67 | Ref: GraphAppKitVPC 68 | SecurityGroupIngress: 69 | - IpProtocol: tcp 70 | FromPort: 8501 71 | ToPort: 8501 72 | CidrIp: 0.0.0.0/0 73 | - IpProtocol: tcp 74 | FromPort: 22 75 | ToPort: 22 76 | CidrIp: 0.0.0.0/0 77 | - IpProtocol: tcp 78 | FromPort: 80 79 | ToPort: 80 80 | CidrIp: 0.0.0.0/0 81 | - IpProtocol: tcp 82 | FromPort: 443 83 | ToPort: 443 84 | CidrIp: 0.0.0.0/0 85 | GraphAppKitEC2: 86 | Type: AWS::EC2::Instance 87 | Properties: 88 | Tags: 89 | - Key: "name" 90 | Value: "graph-app-kit-quicklaunch-a" 91 | - Key: "kind" 92 | Value: "graph-app-kit" 93 | ImageId: !Ref LatestAmiId 94 | InstanceType: !Ref InstanceType 95 | SubnetId: 96 | Ref: GraphAppKitSubnet 97 | SecurityGroupIds: 98 | - 99 | Ref: GraphAppKitSecurityGroup 100 | KeyName: 101 | Ref: GraphAppKitKeyPair 102 | BlockDeviceMappings: 103 | - DeviceName: /dev/xvda 104 | Ebs: 105 | VolumeType: gp2 106 | VolumeSize: '60' 107 | DeleteOnTermination: 'true' 108 | Encrypted: 'false' 109 | UserData: 110 | Fn::Base64: 111 | Fn::Join: 112 | - '' 113 | - - "#!/bin/bash\n" 114 | - "set -ex\n" 115 | - "sudo yum install -y git\n" 116 | - "cd /home/ec2-user\n" 117 | - "echo '===== Downloading graph-app-kit'\n" 118 | - "mkdir -p graph-app-kit/public\n" 119 | - "cd graph-app-kit/public\n" 120 | - "git clone https://github.com/graphistry/graph-app-kit.git\n" 121 | - "echo '===== Running graph-app-kit bootstraps'\n" 122 | - "cd /home/ec2-user/graph-app-kit/public/graph-app-kit/src/bootstraps/neptune\n" 123 | - "./minimal.sh \"" 124 | - Ref: NeptuneReaderHost 125 | - "\" \"" 126 | - Ref: GraphistryUserName 127 | - "\" \"" 128 | - Ref: GraphistryPassword 129 | - "\" \"" 130 | - Ref: GraphistryHost 131 | - "\" \"" 132 | - Ref: GraphistryProtocol 133 | - "\"\n" 134 | - "/opt/aws/bin/cfn-signal -e $? --stack " 135 | - Ref: AWS::StackName 136 | - " --resource GraphAppKitEC2 --region " 137 | - Ref: AWS::Region 138 | - "\n" 139 | CreationPolicy: 140 | ResourceSignal: 141 | Count: 1 142 | Timeout: "PT20M" 143 | 144 | Outputs: 145 | PublicIp: 146 | Description: GraphAppKitEC2 Public IP 147 | Value: !GetAtt GraphAppKitEC2.PublicIp 148 | Export: 149 | Name: !Sub "${AWS::StackName}-PublicIp" -------------------------------------------------------------------------------- /src/bootstraps/scripts/cloudformation-bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | SCRIPT="Installing CloudFormation Bootstrap" 5 | ./hello-start.sh "$SCRIPT" 6 | 7 | sudo apt-get update -y 8 | sudo apt-get install -y python-pip python-setuptools 9 | sudo mkdir -p /opt/aws/bin 10 | sudo python /usr/lib/python2.7/dist-packages/easy_install.py --script-dir /opt/aws/bin https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz 11 | 12 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/docker-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | SCRIPT="Install docker for AWS" 5 | ./hello-start.sh "$SCRIPT" 6 | 7 | sudo amazon-linux-extras install -y docker 8 | sudo service docker start 9 | sudo usermod -a -G docker ec2-user 10 | sudo chkconfig docker on 11 | 12 | sudo curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose 13 | sudo chmod +x /usr/local/bin/docker-compose 14 | docker-compose version 15 | 16 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/docker-container-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | SCRIPT="Build graph-app-kit docker" 5 | DC_ALIAS=${DC_ALIAS:-docker-compose} 6 | ./hello-start.sh "$SCRIPT" 7 | 8 | ( cd ../../docker && $DC_ALIAS build ) 9 | 10 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/graphistry-ami-list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # 5 | # List Graphistry AMIs for yml 6 | # 7 | # Run: $ VERSION=2.36.6-11.0 ./graphistry-ami-list.sh 8 | # 9 | # Output: Non-zero return code on failure, and on success: 10 | # 11 | #eu-north-1: 12 | # "HVM64": "ami-007f7e0cbb31804ca" 13 | #ap-south-1: 14 | # "HVM64": "ami-0771ea144a8bcc05b" 15 | #eu-west-3: 16 | # "HVM64": "ami-0bf29fec2ad97b43b" 17 | # ... 18 | # 19 | # ------------ 20 | 21 | AWS=* 22 | BASE=graphistry-standalone-2???-??-??T??-??-??Z-v 23 | VERSION=${VERSION:-2.35.9-11.0} 24 | SUFFIX=* 25 | GREP_INCLUDE="aws-marketplace/graphistry-standalone" # There were some surprise namespaces 26 | 27 | ## {"ImageId": ..., "ImageLocation": ..., "region": ...}* 28 | IMAGES=$( 29 | for region in `aws ec2 describe-regions --output text | cut -f4` 30 | do 31 | aws ec2 describe-images \ 32 | --region $region \ 33 | --owners self 679593333241 \ 34 | --filters "Name=name,Values=${AWS}${BASE}${VERSION}${SUFFIX}" \ 35 | | jq -c "(.Images[] | {ImageId: .ImageId, ImageLocation: .ImageLocation, "region": \"$region\"})" \ 36 | | grep "${GREP_INCLUDE}" 37 | done 38 | ) 39 | 40 | #echo "IMAGES: $IMAGES" 41 | #echo "IMAGES: $( echo "$IMAGES" | jq -r . )" 42 | 43 | #From ^^^, we want: 44 | #Mappings: 45 | # RegionMap: 46 | # us-east-1: 47 | # "HVM64": "ami-0758d945357560324" 48 | for row in `echo "$IMAGES"`; do 49 | REGION=$(echo $row | jq -r .region) 50 | IMAGE=$(echo $row | jq -r .ImageId) 51 | LOC=$(echo $row | jq -r .ImageLocation) 52 | echo "${REGION}:" 53 | echo " \"HVM64\": \"$IMAGE\"" 54 | #echo " ($LOC)" 55 | done 56 | -------------------------------------------------------------------------------- /src/bootstraps/scripts/graphistry-service-account.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SCRIPT="Get AWS Instance ID" 5 | ./hello-start.sh "$SCRIPT" 6 | 7 | 8 | ### Graphistry should already add admin / on boot 9 | ( \ 10 | cd "${GRAPHISTRY_HOME}" \ 11 | && ( \ 12 | until ( curl -fsS http://localhost/streamgl-gpu/secondary/gpu/health > /dev/null ); \ 13 | do ( docker-compose ps && sleep 1 ); \ 14 | done \ 15 | ) 16 | ) 17 | 18 | ## Service account 19 | export SERVICE_USER=graphappkit 20 | export SERVICE_PASS="${INSTANCE_ID}_${RANDOM}" 21 | ADD_USER_SCRIPT="from nexus.users.models import User; user=User.objects.create_superuser(username='${SERVICE_USER}', email='root@amazon.com', password='${SERVICE_PASS}', name='${SERVICE_USER}', is_active=True); print('made service account ${SERVICE_USER}')" 22 | VERIFY_USER_SCRIPT="from allauth.account.models import EmailAddress; e = EmailAddress.objects.create(user=user, email='root@amazon.com', primary=True, verified=True); e.save(); print('verified user')" 23 | POST_SCRIPT="CELERY_BROKER_URL=zz python manage.py shell && echo done || { echo fail && exit 1; }" 24 | 25 | ( \ 26 | cd "${GRAPHISTRY_HOME}" \ 27 | && docker-compose exec -T nexus \ 28 | bash -c \ 29 | "source activate rapids && echo \"${ADD_USER_SCRIPT}; ${VERIFY_USER_SCRIPT}\" | ${POST_SCRIPT}" \ 30 | ) 31 | 32 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/graphistry-wait-healthy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | SCRIPT="Wait Graphistry docker containers healthy" 5 | ./hello-start.sh "$SCRIPT" 6 | 7 | 8 | ( \ 9 | cd "${GRAPHISTRY_HOME}" \ 10 | && for i in `sudo docker-compose ps --services`; do ( \ 11 | ( \ 12 | until ( \ 13 | [[ '"healthy"' == $(sudo docker inspect "graphistry_${i}_1" --format "{{json .State.Health.Status}}") ]] \ 14 | ); do ( \ 15 | echo "waiting on $i (5s)" \ 16 | && sudo docker-compose ps \ 17 | && sleep 5 \ 18 | ); done \ 19 | ) && echo "healthy $i" \ 20 | ); done \ 21 | ) 22 | 23 | echo "--- Graphistry status after healthy waiting ---" 24 | ( cd "${GRAPHISTRY_HOME}" && sudo docker-compose ps ) 25 | 26 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/hello-end.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | echo 5 | echo 6 | echo "########################################" 7 | echo "##" 8 | echo "## Finished: $1" 9 | echo "##" 10 | echo "########################################" 11 | echo 12 | echo -------------------------------------------------------------------------------- /src/bootstraps/scripts/hello-start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | echo 5 | echo 6 | echo "########################################" 7 | echo "##" 8 | echo "## Start: $1" 9 | echo "##" 10 | echo "########################################" 11 | echo 12 | echo -------------------------------------------------------------------------------- /src/bootstraps/scripts/instance-id.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | SCRIPT="Get AWS Instance ID" 5 | ./hello-start.sh "$SCRIPT" 6 | 7 | export INSTANCE_ID=$( curl -s http://169.254.169.254/latest/meta-data/instance-id ) 8 | echo "Exported INSTANCE_ID: $INSTANCE_ID" 9 | 10 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/prepopulate-notebooks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | VIEWS_PATH="$1" 5 | VIEW_FOLDER_NAME="$2" 6 | NB_OWNER="$3" 7 | 8 | SCRIPT="Prepopulating notebooks (${NOTEBOOKS_HOME}/${VIEWS_PATH}/${VIEW_FOLDER_NAME})" 9 | ./hello-start.sh "$SCRIPT" 10 | 11 | echo "---- SETTINGS ----" 12 | echo "* NOTEBOOKS_HOME: $NOTEBOOKS_HOME" 13 | echo "* VIEWS_PATH: $VIEWS_PATH" 14 | echo "* VIEW_FOLDER_NAME: $VIEW_FOLDER_NAME" 15 | echo "* NB_OWNER: $NB_OWNER" 16 | 17 | mkdir -p "${NOTEBOOKS_HOME}/${VIEWS_PATH}" 18 | cp -r ../../python/views "${NOTEBOOKS_HOME}/${VIEWS_PATH}/${VIEW_FOLDER_NAME}" 19 | 20 | sudo chown -R "${NB_OWNER}" "${NOTEBOOKS_HOME}/${VIEWS_PATH}" 21 | 22 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/bootstraps/scripts/swap-caddy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | ## Graphistry now uses Caddy2, and until Caddy 2 supports simple auth, g-a-k still sticks with Caddy1 5 | ## ... So we stop Graphistry's Caddy 2 and put in a docker-compose of its old 6 | 7 | CADDY_FILENAME=${CADDY_FILENAME:-full.Caddyfile} 8 | CADDY_COMPOSE_FILENAME=${CADDY_COMPOSE_FILENAME:-docker-compose.gak.graphistry.yml} 9 | 10 | SCRIPT="Swap Caddyfile" 11 | ./hello-start.sh "$SCRIPT" 12 | 13 | echo "* Using CADDY_FILENAME: $CADDY_FILENAME" 14 | 15 | sudo cp "../../caddy/${CADDY_COMPOSE_FILENAME}" "${GRAPHISTRY_HOME}/${CADDY_COMPOSE_FILENAME}" 16 | sudo cp "../../caddy/${CADDY_FILENAME}" "${GRAPHISTRY_HOME}/data/config/Caddyfile" 17 | ( \ 18 | cd "${GRAPHISTRY_HOME}" \ 19 | && sudo docker-compose stop caddy \ 20 | && sudo docker-compose -f "${CADDY_COMPOSE_FILENAME}" up -d caddy \ 21 | && sudo docker-compose ps \ 22 | && sudo docker-compose -f "${CADDY_COMPOSE_FILENAME}" ps \ 23 | ) 24 | 25 | ./hello-end.sh "$SCRIPT" -------------------------------------------------------------------------------- /src/caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | ########################################################################################## 2 | ### # 3 | ### CUSTOM CADDYFILE # 4 | ### # 5 | ### Caddy proxy configuration # 6 | ### # 7 | ### -- Examples: See Caddyfile.default and official Caddy 1 docs # 8 | ### # 9 | ### -- Relaunch: # 10 | ### # 11 | ### docker-compose stop caddy && docker-compose up -d # 12 | ### # 13 | ########################################################################################## 14 | 15 | ### Automatic TLS certificates with LetsEncrypt 16 | :80, a.bc.com { 17 | tls { 18 | max_certs 100 19 | } 20 | 21 | #your regular app, e.g., Graphistry server 22 | #proxy / nginx:80 { 23 | # except /dashboard 24 | # websocket 25 | # transparent 26 | #} 27 | 28 | #streamlit 29 | #check `docker ps` as `streamlit:8501` may instead be something like `streamlit-pub_streamlit_1:8502` 30 | proxy /dashboard streamlit:8501 { 31 | websocket 32 | transparent 33 | } 34 | 35 | #streamlit static assets (check browser console if page loads but not assets like images) 36 | #proxy /assets streamlit:8501 { 37 | # websocket 38 | # transparent 39 | #} 40 | 41 | #optionally auth 42 | #reauth { 43 | # path /dashboard 44 | # except /static 45 | # upstream url=http://nginx/django-json-auth,timeout=20s,follow=false,cookies=true 46 | # failure redirect target=/accounts/login/?next={uri} 47 | #} 48 | 49 | } 50 | 51 | 52 | ### HTTP 53 | ### Use when IP-only / LetsEncrypt-prohibited domain (Ex: AWS URLs) 54 | #:80 { 55 | # 56 | #} 57 | 58 | ### Manual TLS 59 | #https://your.site.ngo:443 { 60 | # tls /root/.caddy/my.crt /root/.caddy/my.key 61 | #} 62 | -------------------------------------------------------------------------------- /src/caddy/docker-compose.gak.graphistry.yml: -------------------------------------------------------------------------------- 1 | #Reuse graphistry's docker-compose setup.. except downgrade to caddy1 for auth support 2 | #Start _after_ graphistry 3 | 4 | version: '3.5' 5 | 6 | networks: 7 | grph_net_external: 8 | external: 9 | name: grph_net 10 | 11 | x-production-options: 12 | &production_opts 13 | restart: unless-stopped 14 | expose: 15 | - "8080" 16 | environment: 17 | PORT: 8080 18 | NODE_ENV: production 19 | USE_LOCAL_USER: "false" 20 | ACME_AGREE: "true" 21 | networks: 22 | - grph_net_external 23 | 24 | services: 25 | caddy: 26 | << : *production_opts 27 | image: graphistry/caddy:v2.30.28 28 | expose: 29 | - "80" 30 | - "443" 31 | ports: 32 | - 80:80 33 | - 443:443 34 | environment: 35 | - ENABLE_TELEMETRY=false 36 | - ACME_AGREE=true 37 | volumes: 38 | - ./data/config/Caddyfile:/etc/Caddyfile 39 | - ./data/config/caddy:/root/.caddy 40 | healthcheck: 41 | interval: 300s 42 | start_period: 60s 43 | retries: 3 44 | timeout: 30s 45 | #TODO define local path without muddying Caddyfile 46 | test: sh -c 'curl -f http://nginx/healthz' -------------------------------------------------------------------------------- /src/caddy/full.Caddyfile: -------------------------------------------------------------------------------- 1 | 2 | :80 { 3 | 4 | proxy /public/dash http://streamlit-pub:8501/ { 5 | websocket 6 | transparent 7 | } 8 | 9 | proxy /private/dash http://streamlit-priv:8501/ { 10 | websocket 11 | transparent 12 | } 13 | 14 | proxy / nginx:80 { 15 | websocket 16 | transparent 17 | } 18 | 19 | reauth { 20 | path /private/dash 21 | upstream url=http://nginx/django-json-auth,timeout=20s,follow=false,cookies=true 22 | failure redirect target=/accounts/login/?next={uri} 23 | } 24 | 25 | } 26 | 27 | -------------------------------------------------------------------------------- /src/docker/.env: -------------------------------------------------------------------------------- 1 | #BASE_PATH=dashboard/ 2 | #BASE_URL=http://localhost:8501/dashboard 3 | 4 | #GRAPHISTRY_USERNAME=abc 5 | #GRAPHISTRY_PASSWORD=xyz 6 | 7 | #LOG_LEVEL=DEBUG -------------------------------------------------------------------------------- /src/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Default to big image, but allow thin cpu override 2 | ARG DOCKER_TAG=latest 3 | ARG GRAPHISTRY_FORGE_BASE_VERSION=v2.41.0-11.8 4 | ARG PYTHON_VERSION=3.10 5 | ARG BASE_IMAGE=graphistry/graphistry-forge-base:${GRAPHISTRY_FORGE_BASE_VERSION} 6 | FROM python:$PYTHON_VERSION as cpu_base 7 | 8 | ARG DOCKER_TAG=latest 9 | ARG GRAPHISTRY_FORGE_BASE_VERSION=v2.41.0-11.8 10 | ARG BASE_IMAGE=graphistry/graphistry-forge-base:${GRAPHISTRY_FORGE_BASE_VERSION} 11 | FROM $BASE_IMAGE 12 | 13 | EXPOSE 8501 14 | 15 | # making directory of app 16 | WORKDIR /app 17 | 18 | COPY python/conda-app.sh ./ 19 | RUN { source activate rapids || echo ok ; } && ./conda-app.sh 20 | 21 | COPY python/requirements-system.txt ./ 22 | RUN --mount=type=cache,target=/root/.cache \ 23 | { source activate rapids || echo ok ; } && pip install -r requirements-system.txt 24 | 25 | COPY python/requirements-app.txt ./ 26 | RUN --mount=type=cache,target=/root/.cache \ 27 | { source activate rapids || echo ok ; } && pip install -r requirements-app.txt 28 | 29 | ENV LC_ALL=C.UTF-8 30 | ENV LANG=C.UTF-8 31 | 32 | #Note no trailing slash 33 | ENV BASE_URL=http://localhost:8501/dashboard 34 | ENV BASE_PATH=dashboard/ 35 | ENV LOG_LEVEL=ERROR 36 | ENV VIEW_PATH=/apps/views 37 | COPY python/ /apps/ 38 | 39 | COPY docker/entrypoint.sh /entrypoint.sh 40 | 41 | #Default in case not dynamically mounted 42 | COPY streamlit/credentials.toml /root/.streamlit/credentials.toml 43 | COPY streamlit/config.toml /root/.streamlit/config.toml 44 | 45 | ENTRYPOINT ["/entrypoint.sh"] 46 | CMD ["/apps/entrypoint.py"] 47 | 48 | #Assume volume mount src/python as /apps/ (hot module reloading) 49 | -------------------------------------------------------------------------------- /src/docker/dc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check if using older version of docker compose 4 | docker-compose version &>/dev/null 5 | if [ $? != 0 ]; then 6 | dc_cmd='docker compose' 7 | else 8 | dc_cmd='docker-compose' 9 | fi 10 | 11 | set -ex 12 | 13 | DOCKER_BUILDKIT=1 \ 14 | COMPOSE_DOCKER_CLI_BUILD=1 \ 15 | ${dc_cmd} $@ 16 | 17 | -------------------------------------------------------------------------------- /src/docker/dc.cpu: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check if using older version of docker compose 4 | docker-compose version &>/dev/null 5 | if [ $? != 0 ]; then 6 | dc_cmd='docker compose' 7 | else 8 | dc_cmd='docker-compose' 9 | fi 10 | 11 | set -ex 12 | 13 | DOCKER_BUILDKIT=1 \ 14 | COMPOSE_DOCKER_CLI_BUILD=1 \ 15 | ${dc_cmd} -f docker-compose.yml -f override/cpu.override.yml $@ 16 | 17 | -------------------------------------------------------------------------------- /src/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | ############################################################ 4 | ## 5 | ## NETWORK 6 | ## 7 | ############################################################ 8 | 9 | #----------------------------------------------------------- 10 | # Ex: Create new network 11 | #----------------------------------------------------------- 12 | networks: 13 | grph_net: 14 | name: grph_net 15 | 16 | #----------------------------------------------------------- 17 | # Ex: Reuse network from another docker-compose 18 | #----------------------------------------------------------- 19 | #networks: 20 | # grph_net: 21 | # external: 22 | # name: grph_net 23 | 24 | 25 | ############################################################ 26 | ## 27 | ## CONFIG 28 | ## 29 | ############################################################ 30 | 31 | 32 | x-production-options: 33 | &production_opts 34 | restart: unless-stopped 35 | networks: 36 | - grph_net 37 | #Warning: Loads into continer env, not used by compose envvar interpolation 38 | env_file: 39 | - ../envs/docker.env 40 | - ../envs/general.env 41 | - ../envs/graphistry.env 42 | - ../envs/neptune.env 43 | - ../envs/streamlit.env 44 | - ../envs/tigergraph.env 45 | - ../envs/splunk.env 46 | - .env 47 | #Warning: Loads into continer env, not used by compose envvar interpolation 48 | environment: 49 | - BASE_PATH=${BASE_PATH:-dashboard/} 50 | - BASE_URL=${BASE_URL:-http://localhost:8501/dashboard} 51 | - GRAPH_VIEWS=${GRAPH_VIEWS:-../python/views} 52 | - USE_DOCKER=True 53 | - FAVICON_URL=${FAVICON_URL:-https://hub.graphistry.com/pivot/favicon/favicon.ico} 54 | x-build-kwargs: 55 | &build_kwargs 56 | args: 57 | - DOCKER_TAG=${DOCKER_TAG:-latest} 58 | - BUILDKIT_INLINE_CACHE=1 59 | - GRAPHISTRY_FORGE_BASE_VERSION=${GRAPHISTRY_FORGE_BASE_VERSION:-v2.41.0-11.8} 60 | 61 | ############################################################ 62 | ## 63 | ## SERVICES 64 | ## 65 | ############################################################ 66 | 67 | services: 68 | streamlit: 69 | <<: *production_opts 70 | environment: 71 | PYTHONPATH: "/opt/py_env" 72 | PIP_TARGET: "/opt/py_env" 73 | image: graphistry/graph-app-kit-st:${DOCKER_TAG:-latest}-${CUDA_SHORT_VERSION:-11.8} 74 | command: --server.baseUrlPath="$BASE_PATH" /apps/entrypoint.py 75 | build: 76 | <<: *build_kwargs 77 | context: .. 78 | dockerfile: ./docker/Dockerfile 79 | cache_from: 80 | - graphistry/graph-app-kit-st:${DOCKER_TAG:-latest}-${CUDA_SHORT_VERSION:-11.8} 81 | ports: 82 | - "${ST_PUBLIC_PORT:-8501}:8501" 83 | volumes: 84 | - ../python:/apps 85 | - ${GRAPH_VIEWS:-../python/views}:/apps/views 86 | - ${NEPTUNE_KEY_PATH:-/tmp/mt.pem}:/secrets/neptune-reader.pem 87 | - ../streamlit/config.toml:/root/gak/config.toml 88 | - ../streamlit/credentials.toml:/root/gak/credentials.toml 89 | - ../data/py_envs/gak:/opt/py_env 90 | healthcheck: 91 | test: 92 | [ 93 | "CMD", 94 | "curl", 95 | "-Lf", 96 | "http://localhost:8501/${BASE_PATH}healthz" 97 | ] 98 | interval: 30s 99 | timeout: 30s 100 | retries: 10 101 | start_period: 10s 102 | -------------------------------------------------------------------------------- /src/docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #ex: --server.baseUrlPath="st_dashboard/" /apps/entrypoint.py 4 | 5 | echo "" 6 | echo "" 7 | echo " _ _ _ _ " 8 | echo " __ _ _ __ __ _ _ __ | |__ __ _ _ __ _ __ | | _(_) |_ " 9 | echo " / _\ | '__/ _\ | '_ \| '_ \ _____ / _\ | '_ \| '_ \ _____| |/ / | __|" 10 | echo "| (_| | | | (_| | |_) | | | |_____| (_| | |_) | |_) |_____| <| | |_ " 11 | echo " \__, |_| \__,_| .__/|_| |_| \__,_| .__/| .__/ |_|\_\_|\__|" 12 | echo " |___/ |_| |_| |_| " 13 | echo "" 14 | echo "" 15 | echo "Browser path (BASE_URL): $BASE_URL" 16 | echo "StreamLit internal base path (BASE_PATH): $BASE_PATH" 17 | echo "StreamLit host port (ST_PUBLIC_PORT): $ST_PUBLIC_PORT" 18 | echo "StreamLit views mount host path (GRAPH_VIEW): $GRAPH_VIEW" 19 | echo "Log level (LOG_LEVEL): $LOG_LEVEL" 20 | echo "Graphistry user (GRAPHISTRY_USERNAME): $GRAPHISTRY_USERNAME" 21 | echo "" 22 | 23 | mkdir -p /root/.streamlit 24 | 25 | if [[ -f "/root/gak/credentials.toml" ]]; then 26 | echo "Found custom credentials.toml, overriding default" 27 | cp /root/gak/credentials.toml /root/.streamlit/credentials.toml 28 | fi 29 | 30 | 31 | if [[ -f "/root/gak/config.toml" ]]; then 32 | echo "Found custom config.toml, overriding default" 33 | cp /root/gak/config.toml /root/.streamlit/config.toml 34 | fi 35 | 36 | # if ST_LOG_LEVEL is defined, set it for the base logger of the streamlit app by passing 37 | # it in here. To set only set LOG_LEVEL for views and not the root logger for the app, 38 | # use LOG_LEVEL 39 | 40 | if [ -v ST_LOG_LEVEL ]; then 41 | PASS_LOG_LEVEL="--logger.level=${ST_LOG_LEVEL}" 42 | fi 43 | 44 | { source activate rapids || echo ok ; } \ 45 | && echo "pwd: `pwd`" && find . && streamlit run "$@" "${PASS_LOG_LEVEL}" 46 | -------------------------------------------------------------------------------- /src/docker/hooks/README.md: -------------------------------------------------------------------------------- 1 | # DockerHub Build Hooks 2 | 3 | See [official docs](http://docs.docker.oeynet.com/docker-hub/webhooks/) 4 | -------------------------------------------------------------------------------- /src/docker/hooks/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "##########################################" 4 | echo "##" 5 | echo "## CUSTOM BUILD (hooks/build)" 6 | echo "##" 7 | echo "##########################################" 8 | echo "pwd: `pwd`" 9 | echo "DOCKER_TAG: ${DOCKER_TAG}" # => src/docker 10 | echo "IMAGE_NAME: ${IMAGE_NAME}" # => graphistry/graph-app-kit-st 11 | 12 | docker-compose -f docker-compose.yml -f override/cpu.override.yml build -------------------------------------------------------------------------------- /src/docker/override/cpu.override.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | services: 4 | streamlit: 5 | image: graphistry/graph-app-kit-st:${DOCKER_TAG:-latest} 6 | build: 7 | args: 8 | BASE_IMAGE: cpu_base 9 | 10 | -------------------------------------------------------------------------------- /src/docker/override/docker-compose.override.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | ######################################################################################################################## 4 | # 5 | # Experimental: Add streamlit service + mount views/ into jupyter 6 | # 7 | # - Symlink as `${GRAPHISTRY_HOME}/data/config/graph-app-kit 8 | # - Symlink `${GRAPHISTRY_HOME}/docker-compose.override.yml` as src/docker/override/docker-compose.override.yml 9 | # - Start graphistry 10 | # 11 | ######################################################################################################################## 12 | # 13 | # Ex: Assuming `graphistry` and `graph-app-kit` are siblings: 14 | # 15 | # graphistry$ ln -s ../graph-app-kit/src/docker/override/docker-compose.override.yml docker-compose.override.yml 16 | # graphistry$ ln -s "`pwd`/../graph-app-kit/" data/config/graph-app-kit 17 | # graphistry$ docker-compose up -d 18 | # 19 | ######################################################################################################################## 20 | # 21 | services: 22 | 23 | # Add graph-app-kit to notebook mounts 24 | notebook: 25 | volumes: 26 | #graphistry 27 | - ./data/notebooks:/home/graphistry/notebooks 28 | - ./data/dask-shared:/dask-shared 29 | - ./data/dask-shared:/home/graphistry/dask-shared 30 | #graph-app-kit 31 | - ../graph-app-kit/src/python/views:/home/graphistry/graph-app-kit-src-views 32 | 33 | streamlit: 34 | restart: unless-stopped 35 | networks: 36 | - grph_net 37 | #Warning: Loads into continer env, not used by compose envvar interpolation 38 | environment: 39 | - BASE_PATH=${BASE_PATH:-dashboard/} 40 | - BASE_URL=${BASE_URL:-http://localhost:8501/dashboard} 41 | - GRAPH_VIEWS=${GRAPH_VIEWS:-../python/views} 42 | #Warning: Loads into continer env, not used by compose envvar interpolation 43 | env_file: 44 | - ./data/config/graph-app-kit/src/envs/docker.env 45 | - ./data/config/graph-app-kit/src/envs/general.env 46 | - ./data/config/graph-app-kit/src/envs/graphistry.env 47 | - ./data/config/graph-app-kit/src/envs/neptune.env 48 | - ./data/config/graph-app-kit/src/envs/streamlit.env 49 | - ./data/config/graph-app-kit/src/envs/tigergraph.env 50 | - ./data/config/graph-app-kit/src/docker/.env 51 | image: graphistry/graph-app-kit-st:${DOCKER_TAG:-latest} 52 | command: --server.baseUrlPath="$BASE_PATH" /apps/entrypoint.py 53 | build: 54 | args: 55 | - DOCKER_TAG=${DOCKER_TAG:-latest} 56 | - BUILDKIT_INLINE_CACHE=1 57 | - GRAPHISTRY_FORGE_BASE_VERSION=${GRAPHISTRY_FORGE_BASE_VERSION:-v2.41.0-11.8} 58 | context: .. 59 | dockerfile: ./docker/Dockerfile 60 | cache_from: 61 | - graphistry/graph-app-kit-st:${DOCKER_TAG:-latest} 62 | ports: 63 | - "${ST_PUBLIC_PORT:-8501}:8501" 64 | volumes: 65 | - ../graph-app-kit/src/python:/apps 66 | - ${GRAPH_VIEWS:-../graph-app-kit/src/python/views}:/apps/views 67 | - ${NEPTUNE_KEY_PATH:-/tmp/mt.pem}:/secrets/neptune-reader.pem 68 | healthcheck: 69 | test: ["CMD", "curl", "-Lf", "http://localhost:8501/${BASE_PATH}healthz"] 70 | interval: 30s 71 | timeout: 30s 72 | retries: 10 73 | start_period: 10s 74 | -------------------------------------------------------------------------------- /src/envs/docker.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## DOCKER 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | ##---------------------------------------------------------- 10 | 11 | #COMPOSE_PROJECT_NAME=streamlit-priv -------------------------------------------------------------------------------- /src/envs/general.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## GENERAL 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | ##---------------------------------------------------------- 10 | 11 | #DEBUG, INFO, WARNING, ERROR, CRITICAL 12 | LOG_LEVEL=ERROR 13 | #DOCKER_TAG=latest 14 | 15 | #https://hub.docker.com/r/graphistry/graphistry-forge-base/tags 16 | #VERSION_BASE=v2.32.4 -------------------------------------------------------------------------------- /src/envs/graphistry.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## GRAPHISTRY 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | ### Creds used with Graphistry again 10 | #GRAPHISTRY_USERNAME=user 11 | #GRAPHISTRY_PASSWORD=pass 12 | 13 | 14 | ##---------------------------------------------------------- 15 | 16 | 17 | ### Remote graphistry instance 18 | #GRAPHISTRY_PROTOCOL=https 19 | #GRAPHISTRY_SERVER=hub.graphistry.com 20 | 21 | 22 | ### Ex: Python uploads to local Graphistry server on same Docker network, if running 23 | ### graph-app-kit in standalone mode, use fqdn or IP address of graphistry server: 24 | #GRAPHISTRY_PROTOCOL=http 25 | #GRAPHISTRY_SERVER=nginx 26 | 27 | 28 | ### Ex: Browser viewing from local Graphistry instance (Docker exposed on :80) 29 | #GRAPHISTRY_CLIENT_PROTOCOL_HOSTNAME=http://localhost 30 | 31 | 32 | ### Ex: Graphistry Hub (Default -- no need to set) 33 | #GRAPHISTRY_PROTOCOL=https 34 | #GRAPHISTRY_SERVER=hub.graphistry.com 35 | #GRAPHISTRY_CLIENT_PROTOCOL_HOSTNAME=https://hub.graphistry.com -------------------------------------------------------------------------------- /src/envs/neptune.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## NEPTUNE 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | 10 | ##---------------------------------------------------------- 11 | 12 | 13 | ### See https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-endpoints.html 14 | #NEPTUNE_READER_PROTOCOL=wss 15 | #NEPTUNE_READER_HOST=your-neptune-DBClusterReadEndpoint.com 16 | #NEPTUNE_READER_PORT=8182 17 | 18 | ### Optional: Tunnel through an EC2 node in same VPC as Neptune, such as for local dev or remote service 19 | # Private key: see docker-compose.yml for volume mount of /secrets/neptune-reader.pem 20 | #NEPTUNE_KEY_PATH=/tmp/mt.pem 21 | #NEPTUNE_TUNNEL_HOST=222.222.222.222 22 | #NEPTUNE_TUNNEL_USER=root -------------------------------------------------------------------------------- /src/envs/splunk.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## SPLUNK 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | 10 | ### Splunk credentials. See splunk.py and https://github.com/splunk/splunk-sdk-python for more details 11 | #SPLUNK_USERNAME="" 12 | #SPLUNK_PASSWORD="" 13 | #SPLUNK_HOST="" 14 | -------------------------------------------------------------------------------- /src/envs/streamlit.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## STREAMLIT 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | BASE_PATH=dashboard/ 10 | BASE_URL=http://localhost:8501/dashboard 11 | 12 | ##---------------------------------------------------------- 13 | 14 | #Override when running concurrent streamlit instances 15 | ST_PUBLIC_PORT=${ST_PUBLIC_PORT:-8501} -------------------------------------------------------------------------------- /src/envs/tigergraph.env: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | ## 3 | ## TigerGraph 4 | ## 5 | ############################################################ 6 | # Note: Settings in docker/.env override settings from here, 7 | # per order of loading in docker-compose.yml "env_file" section 8 | 9 | 10 | ##---------------------------------------------------------- 11 | 12 | # Optional TigerGraph connection file 13 | # Uncomment and fill in each parameter 14 | # Generate a token via TigerGraph Studio's Admin -> Users management panel. 15 | 16 | #TIGERGRAPH_HOST=https://myapp123.i.tgcloud.io 17 | #TIGERGRAPH_USERNAME=tigergraph 18 | #TIGERGRAPH_PASSWORD=mypwd 19 | #TIGERGRAPH_GRAPHNAME=AntiFraud 20 | #TIGERGRAPH_SECRET=mykey -------------------------------------------------------------------------------- /src/python/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E121,E122,E126,E127,E128,E129,E131,E201,E202,E401,E501,F401,F403,F541,W503 3 | max-complexity = 10 4 | max-line-length = 127 5 | -------------------------------------------------------------------------------- /src/python/TigerGraph_helper/tg_helper.py: -------------------------------------------------------------------------------- 1 | import logging, os, pyTigerGraph as tg 2 | from typing import Optional 3 | 4 | from util import getChild 5 | 6 | logger = getChild(__name__) 7 | 8 | TIGERGRAPH_CONNECTION_VERSION = '3.1.0' 9 | 10 | def connect_to_tigergraph() -> Optional[dict]: 11 | if ('TIGERGRAPH_HOST' in os.environ and 'TIGERGRAPH_USERNAME' in os.environ 12 | and 'TIGERGRAPH_PASSWORD' in os.environ and 'TIGERGRAPH_GRAPHNAME' in os.environ): 13 | 14 | creds = { 15 | 'host': os.environ["TIGERGRAPH_HOST"], 16 | 'username': os.environ["TIGERGRAPH_USERNAME"], 17 | 'password': os.environ["TIGERGRAPH_PASSWORD"], 18 | 'graphname': os.environ["TIGERGRAPH_GRAPHNAME"] 19 | } 20 | logger.info('Connecting to TigerGraph using environment variables: %s', 21 | {**creds, 22 | 'password': ''.join(['*' for x in creds['password']]) if creds['password'] is not None else ''}) 23 | 24 | conn = tg.TigerGraphConnection(**creds, version=TIGERGRAPH_CONNECTION_VERSION) 25 | 26 | if ('TIGERGRAPH_SECRET' in os.environ and os.environ["TIGERGRAPH_SECRET"] is not None): 27 | logger.info('... Connected to TG, getting token via provided secret...') 28 | secret = os.environ["TIGERGRAPH_SECRET"] 29 | conn.getToken(secret) 30 | else: 31 | # FIXME: This times out in practice, maybe TG 3.0 -> 3.1 version issues? 32 | logger.info('... Connected to TG, creating secret and getting token...') 33 | conn.getToken(conn.createSecret()) 34 | 35 | logger.info('Successfully finished connecting to TG!') 36 | return conn 37 | 38 | logger.debug("Missing TigerGraph environment variables; skipping connection") 39 | return None 40 | -------------------------------------------------------------------------------- /src/python/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphistry/graph-app-kit/2e05765413393613240121173dce635f0ad0ce18/src/python/__init__.py -------------------------------------------------------------------------------- /src/python/bin/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | # Run from python root 5 | # Non-zero exit code on fail 6 | # Uses tox.ini's flake8 config 7 | 8 | flake8 --version 9 | 10 | # Quick syntax errors 11 | flake8 \ 12 | . \ 13 | --exit-zero \ 14 | --count \ 15 | --select=E9,F63,F7,F82 \ 16 | --show-source \ 17 | --statistics 18 | 19 | # Deeper check 20 | flake8 \ 21 | . \ 22 | --exit-zero \ 23 | --count \ 24 | --exit-zero \ 25 | --statistics -------------------------------------------------------------------------------- /src/python/components/AppPicker.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import logging, os 3 | import streamlit as st 4 | from util import getChild 5 | 6 | logger = getChild(__name__) 7 | 8 | # loads all views//__init__.py and tracks active as URL param "?view_index=" 9 | # includes modules with methods run() 10 | # and excludes if ('enabled' in info() and info()['enabled'] == False) 11 | class AppPicker: 12 | VIEW_APP_ID_VAR = "view_index" 13 | 14 | # include: if non-empty, include if any tags match 15 | # exclude: exclude if any tag matches 16 | def __init__(self, include=[], exclude=[]): 17 | self.include = include 18 | self.exclude = exclude 19 | pass 20 | 21 | def check_included(self, mod_info): 22 | if ("enabled" in mod_info) and not mod_info["enabled"]: 23 | return False 24 | if len(self.include) > 0: 25 | hit = False 26 | for tag in self.include: 27 | if tag in mod_info["tags"]: 28 | hit = True 29 | break 30 | if not hit: 31 | return False 32 | for tag in self.exclude: 33 | if tag in mod_info["tags"]: 34 | return False 35 | return True 36 | 37 | # () -> {'id' -> { 'name': str, 'id': str, 'module': Module } } 38 | def list_modules(self): 39 | 40 | # tcook - debug logger issues and prints to stdout 41 | # print("func: print(flush=True) in list_modules()", flush=True) 42 | # print("func: print(NO flush) in list_modules()") 43 | # logger.info("logger.INFO() msg in list_modules()") 44 | # logger.error("logger.ERROR() msg in list_modules()") 45 | # logger.debug("logger.DEBUG() msg in list_modules()") 46 | # print(f"func: print() call LOG_LEVEL={os.getenv('LOG_LEVEL')}", flush=True) 47 | 48 | # tcook: does not affect newlines issues in logger 49 | # for handler in logger.handlers: 50 | # handler.flush() 51 | 52 | modules_by_id = {} 53 | view_path = os.environ.get("VIEW_PATH", "/apps/views") 54 | for view_folder in sorted([view.split("/")[-1] for (view, _, _) in os.walk(view_path) if view != view_path and not view.endswith("__pycache__")]): 55 | try: 56 | mod = importlib.import_module(f"views.{view_folder}") 57 | if hasattr(mod, "run"): 58 | nfo = mod.info() if hasattr(mod, "info") else {"name": view_folder} 59 | mod_id = nfo["id"] if "id" in nfo else nfo["name"] 60 | nfo_resolved = { 61 | "name": view_folder, 62 | "tags": [], 63 | **nfo, 64 | "id": mod_id, 65 | "module": mod, 66 | } 67 | if self.check_included(nfo_resolved): 68 | modules_by_id[mod_id] = nfo_resolved 69 | except: # noqa: E722 70 | 71 | # tcook - debug logger issues and prints to stdout 72 | # print("exception caught in list_modules()", flush=True) 73 | # print("func: list_modules() 222222", flush=True) 74 | # logger.info("logger.INFO msg in list_modules() 222222") 75 | # logger.error("logger.ERROR msg in list_modules() 222222") 76 | # logger.debug("logger.DEBUG msg in list_modules() 222222") 77 | # print(f"check: LOG_LEVEL={os.getenv('LOG_LEVEL')}", flush=True) 78 | 79 | logger.error( 80 | "Module loader ignoring file views/%s due to import failure; safe to ignore for .swp etc files", 81 | view_folder, 82 | exc_info=True, 83 | ) 84 | 85 | # tcook: does not affect newlines issues in logger 86 | # for handler in logger.handlers: 87 | # handler.flush() 88 | 89 | sorted_mods = sorted(modules_by_id.values(), key=lambda nfo: nfo["id"]) 90 | for i in range(len(sorted_mods)): 91 | sorted_mods[i]["index"] = i 92 | return modules_by_id 93 | 94 | # () -> ? str 95 | def get_maybe_active_view_id(self, query_params): 96 | maybe_default_view_id = query_params[self.VIEW_APP_ID_VAR][0] if self.VIEW_APP_ID_VAR in query_params else None 97 | return maybe_default_view_id 98 | 99 | # () -> ? { 'name': str, 'id': str, 'module': Module } 100 | def get_and_set_active_app(self): 101 | query_params = st.experimental_get_query_params() 102 | maybe_default_view_id = self.get_maybe_active_view_id(query_params) 103 | logger.debug("url view id: %s", maybe_default_view_id) 104 | 105 | modules_by_id = self.list_modules() 106 | # logger.debug("loaded mods: %s", modules_by_id) 107 | 108 | view = None 109 | if len(modules_by_id.keys()) == 0: 110 | pass 111 | else: 112 | if len(modules_by_id.keys()) == 1: 113 | view_id = list(modules_by_id.values())[0]["id"] 114 | view = modules_by_id[view_id] 115 | else: 116 | sorted_mods = sorted(modules_by_id.values(), key=lambda nfo: nfo["index"]) 117 | view_id = st.sidebar.selectbox( 118 | "", 119 | [nfo["id"] for nfo in sorted_mods], 120 | index=0 if maybe_default_view_id is None else modules_by_id[maybe_default_view_id]["index"], 121 | format_func=(lambda id: modules_by_id[id]["name"].upper()), 122 | ) 123 | view = modules_by_id[view_id] 124 | query_params[self.VIEW_APP_ID_VAR] = view_id 125 | st.experimental_set_query_params(**query_params) 126 | 127 | return view 128 | 129 | def load_active_app(self): 130 | mods = self.list_modules() 131 | view = self.get_and_set_active_app() 132 | 133 | if len(mods.keys()) == 0: 134 | st.sidebar.header("No modules found") 135 | st.sidebar.write("Create/mount a views folder") 136 | st.sidebar.write("Ex: File with signature src/views/myapp/__init__.py::run()") 137 | elif len(mods.keys()) == 1: 138 | pass 139 | else: 140 | st.sidebar.title(view["name"]) 141 | 142 | if not (view is None): 143 | logger.info("running mod: %s / %s", view, view["module"]) 144 | view["module"].run() 145 | 146 | return view 147 | -------------------------------------------------------------------------------- /src/python/components/Graphistry.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import graphistry 4 | import streamlit.components.v1 as components 5 | from graphistry import PyGraphistry 6 | import streamlit as st 7 | 8 | from util import getChild 9 | 10 | logger = getChild(__name__) 11 | 12 | logger.debug("Using graphistry version: %s", graphistry.__version__) 13 | 14 | class GraphistrySt: 15 | def __init__(self, overrides={}): 16 | self.cfg = { 17 | "api": 3, 18 | **({"username": os.environ["GRAPHISTRY_USERNAME"]} if "GRAPHISTRY_USERNAME" in os.environ else {}), 19 | **({"password": os.environ["GRAPHISTRY_PASSWORD"]} if "GRAPHISTRY_PASSWORD" in os.environ else {}), 20 | **({"token": os.environ["GRAPHISTRY_TOKEN"]} if "GRAPHISTRY_TOKEN" in os.environ else {}), 21 | **({"protocol": os.environ["GRAPHISTRY_PROTOCOL"]} if "GRAPHISTRY_PROTOCOL" in os.environ else {}), 22 | **({"server": os.environ["GRAPHISTRY_SERVER"]} if "GRAPHISTRY_SERVER" in os.environ else {}), 23 | **( 24 | {"client_protocol_hostname": os.environ["GRAPHISTRY_CLIENT_PROTOCOL_HOSTNAME"]} 25 | if "GRAPHISTRY_CLIENT_PROTOCOL_HOSTNAME" in os.environ 26 | else {} 27 | ), 28 | **overrides, 29 | } 30 | if not (("username" in self.cfg) and ("password" in self.cfg)) and not ("token" in self.cfg): 31 | logger.info("No graphistry creds set, skipping") 32 | return 33 | if not ("store_token_creds_in_memory" in self.cfg): 34 | self.cfg["store_token_creds_in_memory"] = True 35 | graphistry.register(**self.cfg) 36 | 37 | def render_url(self, url): 38 | if self.test_login(): 39 | logger.debug("rendering main area, with url: %s", url) 40 | # iframe = '' 41 | # st.markdown(iframe, unsafe_allow_html=True) 42 | components.iframe(src=url, height=800, scrolling=True) 43 | 44 | def plot(self, g): 45 | if PyGraphistry._is_authenticated: 46 | url = g.plot(as_files=True, render=False) # TODO: Remove as_files=True when becomes default 47 | self.render_url(url) 48 | else: 49 | st.markdown( 50 | """ 51 | Graphistry not authenticated. Did you set credentials in docker/.env based on envs/graphistry.env ? 52 | """ 53 | ) 54 | 55 | def test_login(self, verbose=True): 56 | try: 57 | graphistry.register(**self.cfg) 58 | if PyGraphistry._config["api_token"] is None: 59 | raise Exception("Graphistry username and/or password not found.") 60 | return True 61 | except: # noqa: E722 62 | if verbose: 63 | st.write( 64 | Exception( 65 | """Not logged in for Graphistry plots: 66 | Get free GPU account at graphistry.com/get-started and 67 | plug in Graphistry crendentials: GRAPHISTRY_USERNAME, GRAPHISTRY_PASSWORD, GRAPHISTRY_SERVER 68 | into src/docker/.env if running graph-app-kit in standalone mode. If using Graphistry 69 | Enterprise Server which has graph-app-kit integrated, add the credentials to 70 | ${GRAPHISTRY_HOME}/.env or ${GRAPHISTRY_HOME}/data/custom.env or ${GRAPHISTRY_HOME}/.env or 71 | envs/graphistry.env""" 72 | ) 73 | ) 74 | return False 75 | 76 | 77 | GraphistrySt() 78 | -------------------------------------------------------------------------------- /src/python/components/URLParam.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib 3 | import streamlit as st 4 | import logging 5 | 6 | from util import getChild 7 | 8 | logger = getChild(__name__) 9 | 10 | class URLParam: 11 | def __init__(self, prefix="d_"): 12 | self.prefix = prefix 13 | 14 | # str * 'a -> 'a 15 | def get_field(self, field: str, default=None): 16 | field = self.prefix + field 17 | query_params = st.experimental_get_query_params() 18 | maybe_v = json.loads(urllib.parse.unquote(query_params[field][0])) if field in query_params else None 19 | out = default if maybe_v is None else maybe_v 20 | logger.debug("resolved default for %s as %s :: %s", field, out, type(out)) 21 | return out 22 | 23 | # str * 'a -> () 24 | def set_field(self, field: str, val): 25 | field = self.prefix + field 26 | query_params = st.experimental_get_query_params() 27 | logger.debug("params at set: %s", query_params.items()) 28 | logger.debug("rewriting field %s val %s as %s", field, val, urllib.parse.quote(json.dumps(val), safe="")) 29 | 30 | query_params = st.experimental_set_query_params( 31 | **{**{k: v[0] for k, v in query_params.items()}, **{field: urllib.parse.quote(json.dumps(val), safe="")}} 32 | ) 33 | -------------------------------------------------------------------------------- /src/python/components/__init__.py: -------------------------------------------------------------------------------- 1 | from .AppPicker import AppPicker 2 | from .Graphistry import GraphistrySt 3 | from .URLParam import URLParam 4 | -------------------------------------------------------------------------------- /src/python/conda-app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | ### Add custom conda installs here 5 | ### Note the install order: 6 | ### - docker base conda 7 | ### - docker base pip 8 | ### - app conda <-- here 9 | ### - base pip 10 | ### - app pip 11 | ### Future versions aim to do all conda installs at the beginning 12 | 13 | 14 | echo "==========================" 15 | echo "=" 16 | echo "= Conda app dependencies" 17 | echo "=" 18 | echo "==========================" 19 | echo "" 20 | 21 | ### Add commands here 22 | 23 | #conda install -c conda-forge hdbscan=0.8.26 24 | 25 | echo "**** Successfull install of conda app deps ***" -------------------------------------------------------------------------------- /src/python/css/__init__.py: -------------------------------------------------------------------------------- 1 | from .css import ( 2 | hide_dev_menu, 3 | max_main_width, 4 | 5 | all_css 6 | ) 7 | -------------------------------------------------------------------------------- /src/python/css/css.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | # Full width for main area 5 | # https://discuss.streamlit.io/t/custom-render-widths/81/6 6 | def max_main_width(): 7 | max_width_str = f"max-width: 2000px;" 8 | st.markdown( 9 | f""" 10 | 20 | """, 21 | unsafe_allow_html=True) 22 | 23 | 24 | # Hide dev menu 25 | def hide_dev_menu(): 26 | hide_streamlit_style = """ 27 | 31 | """ 32 | st.markdown(hide_streamlit_style, unsafe_allow_html=True) 33 | 34 | 35 | def all_css(is_max_main_width=True, is_hide_dev_menu=True): 36 | if is_max_main_width: 37 | max_main_width() 38 | if is_hide_dev_menu: 39 | hide_dev_menu() 40 | -------------------------------------------------------------------------------- /src/python/entrypoint.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | from components import AppPicker 4 | 5 | page_title_str = "Graph dashboard" 6 | st.set_page_config( 7 | layout="wide", # Can be "centered" or "wide". In the future also "dashboard", etc. 8 | initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed" 9 | page_title=page_title_str, # String or None. Strings get appended with "• Streamlit". 10 | page_icon=os.environ.get('FAVICON_URL', 'https://hub.graphistry.com/pivot/favicon/favicon.ico'), # String, anything supported by st.image, or None. 11 | ) 12 | 13 | # loads all views//__init__.py and tracks active as URL param "?view_index=" 14 | # includes modules with methods run() 15 | # and excludes if ('enabled' in info() and info()['enabled'] == False) 16 | # ... and further include/exclude via info()['tags'] 17 | AppPicker().load_active_app() 18 | 19 | # AppPicker(include=[], exclude=['demo']).load_active_app() 20 | -------------------------------------------------------------------------------- /src/python/neptune_helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphistry/graph-app-kit/2e05765413393613240121173dce635f0ad0ce18/src/python/neptune_helper/__init__.py -------------------------------------------------------------------------------- /src/python/neptune_helper/df_helper.py: -------------------------------------------------------------------------------- 1 | def vertex_to_dict(vertex): 2 | d = {} 3 | for k in vertex.keys(): 4 | if isinstance(vertex[k], list): 5 | d[str(k)] = vertex[k][0] 6 | else: 7 | d[str(k)] = vertex[k] 8 | d['id'] = d.pop('T.id') 9 | d['label'] = d.pop('T.label') 10 | return d 11 | 12 | 13 | def edge_to_dict(edge, start_id, end_id): 14 | d = {} 15 | for k in edge.keys(): 16 | if isinstance(edge[k], list): 17 | d[str(k)] = edge[k][0] 18 | else: 19 | d[str(k)] = edge[k] 20 | d['id'] = d.pop('T.id') 21 | d['label'] = d.pop('T.label') 22 | d['source'] = start_id 23 | d['target'] = end_id 24 | return d 25 | 26 | 27 | def flatten_df(df): 28 | 29 | def obj_as_primitive(v): 30 | if (v is None) or type(v) == str: 31 | return v 32 | if type(v) == list: 33 | return ','.join([str(x) for x in v]) 34 | return str(v) 35 | 36 | df2 = df.copy(deep=False) 37 | for c in df.columns: 38 | if df2[c].dtype.name == 'object': 39 | df2[c] = df2[c].apply(obj_as_primitive) 40 | 41 | return df2 42 | -------------------------------------------------------------------------------- /src/python/neptune_helper/gremlin_helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from gremlin_python import statics 4 | from gremlin_python.structure.graph import Graph 5 | from gremlin_python.process.graph_traversal import __ 6 | from gremlin_python.process.anonymous_traversal import traversal 7 | from gremlin_python.process.strategies import * 8 | from gremlin_python.process.traversal import * 9 | from gremlin_python.structure.graph import Path, Vertex, Edge 10 | from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection 11 | from util import getChild 12 | 13 | logger = getChild(__name__) 14 | 15 | def connect_to_neptune(): 16 | """Creates a connection to Neptune and returns the traversal source""" 17 | if ('NEPTUNE_READER_HOST' in os.environ and 'NEPTUNE_READER_PORT' in os.environ 18 | and 'NEPTUNE_READER_PROTOCOL' in os.environ): 19 | server = os.environ["NEPTUNE_READER_HOST"] 20 | port = os.environ["NEPTUNE_READER_PORT"] 21 | protocol = os.environ["NEPTUNE_READER_PROTOCOL"] 22 | endpoint = f'{protocol}://{server}:{port}/gremlin' 23 | logger.info(endpoint) 24 | connection = DriverRemoteConnection(endpoint, 'g') 25 | gts = traversal().withRemote(connection) 26 | return (gts, connection) 27 | else: 28 | logging.error("Internal Configuraiton Error Occurred. ") 29 | return None 30 | -------------------------------------------------------------------------------- /src/python/requirements-app.txt: -------------------------------------------------------------------------------- 1 | ### 2 | ### Use this file for your custom view requirements here vs in the requirements-system.txt 3 | ### 4 | -------------------------------------------------------------------------------- /src/python/requirements-system.txt: -------------------------------------------------------------------------------- 1 | graphistry==0.33.8 2 | streamlit==1.25.0 3 | 4 | ################ 5 | # 6 | # Per-DB 7 | # 8 | ################ 9 | 10 | ### Neptune 11 | # bumping version to get around tornado dependency issue w/ streamlit 12 | # gremlinpython==3.4.10 13 | gremlinpython==3.7.0 14 | 15 | # tcook: not used? 16 | # sshtunnel==0.1.5 17 | 18 | ### TigerGraph 19 | pyTigerGraph # Unpinned as TG Cloud is rapidly moving 20 | plotly # for demos 21 | 22 | ### Splunk 23 | splunk-sdk==1.7.4 24 | 25 | -------------------------------------------------------------------------------- /src/python/test/README.md: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | -------------------------------------------------------------------------------- /src/python/test/test_stub.py: -------------------------------------------------------------------------------- 1 | def test_stub(): 2 | assert True is True 3 | -------------------------------------------------------------------------------- /src/python/tox.ini: -------------------------------------------------------------------------------- 1 | # tox (https://tox.readthedocs.io/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py37 8 | 9 | [testenv] 10 | deps = 11 | flake8 12 | pytest 13 | commands = 14 | python -m pytest test 15 | 16 | [flake8] 17 | ignore = E121,E122,E126,E127,E128,E129,E131,E201,E202,E401,E501,F401,F403,F541,W503 18 | max-complexity = 10 19 | max-line-length = 127 20 | -------------------------------------------------------------------------------- /src/python/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .log import getChild 2 | -------------------------------------------------------------------------------- /src/python/util/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | # logging.basicConfig(format="%(levelname)s %(asctime)s %(name)s:%(message)s\n") 5 | logging.basicConfig(format="%(levelname)s %(asctime)s %(name)s:%(message)s\n") 6 | 7 | def getChild(*args, **kwargs): 8 | 9 | logger = logging.getLogger('gak') 10 | 11 | log_level_str = os.environ.get('LOG_LEVEL', 'ERROR').upper() 12 | log_level = getattr(logging, log_level_str) 13 | logger.debug(f"util.log log_level == {log_level_str} ({log_level})") 14 | 15 | out=logger.getChild(*args, **kwargs) 16 | out.setLevel(log_level) 17 | 18 | out.debug(f"calling logging.setLevel() to log_level == {log_level}") 19 | 20 | return out 21 | -------------------------------------------------------------------------------- /src/python/views/demo_01_fancy/__init__.py: -------------------------------------------------------------------------------- 1 | import graphistry, os, pandas as pd, streamlit as st 2 | from components import GraphistrySt, URLParam 3 | from graphistry import PyGraphistry 4 | from css import all_css 5 | from time import sleep 6 | import logging 7 | 8 | ############################################ 9 | # 10 | # DASHBOARD SETTINGS 11 | # 12 | ############################################ 13 | # Controls how entrypoint.py picks it up 14 | 15 | 16 | app_id = 'app_01' 17 | logger = logging.getLogger(app_id) 18 | urlParams = URLParam(app_id) 19 | 20 | 21 | def info(): 22 | return { 23 | 'id': app_id, 24 | 'name': 'INTRO: fancy graph', 25 | 'enabled': True, 26 | 'tags': ['demo', 'demo_intro'] 27 | } 28 | 29 | 30 | def run(): 31 | run_all() 32 | 33 | 34 | ############################################ 35 | # 36 | # CUSTOM CSS 37 | # 38 | ############################################ 39 | # Have fun! 40 | 41 | def custom_css(): 42 | 43 | all_css() # our favorites 44 | 45 | 46 | ############################################ 47 | # 48 | # SIDEBAR RENDER AERA 49 | # 50 | ############################################ 51 | # Given URL params, render left sidebar form and return combined filter settings 52 | 53 | # #https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 54 | def sidebar_area(): 55 | st.sidebar.title('Pick graph') 56 | 57 | n_init = urlParams.get_field('N', 100) 58 | n = st.sidebar.number_input('Number of nodes', min_value=10, max_value=100000, value=n_init, step=20) 59 | urlParams.set_field('N', n) 60 | 61 | base_url = os.environ.get('BASE_URL', 'http://localhost:8501') 62 | 63 | edges_df = pd.concat([ 64 | pd.DataFrame({ 65 | 's': [x for x in range(n)], 66 | 'd': [(x + 1) % n for x in range(n)], 67 | 'link': [ 68 | '' + str(x % n) + " nodes" 69 | for x in range(n) 70 | ] 71 | }), 72 | pd.DataFrame({ 73 | 's': [x for x in range(n)], 74 | 'd': [(x + 6) % n for x in range(n)], 75 | 'link': [ 76 | '' + str(x % n) + " nodes" 77 | for x in range(n) 78 | ] 79 | }) 80 | ], sort=False, ignore_index=True) 81 | 82 | st.sidebar.title("Filter") 83 | option_to_label = { 84 | 'all': 'All', 85 | 'odd': 'Odds', 86 | 'even': 'Evens' 87 | } 88 | 89 | filter_by_node_type_init = urlParams.get_field('filter_by_type', default='all') 90 | filter_by_node_type = \ 91 | st.sidebar.selectbox( 92 | 'Filter nodes by:', 93 | ('all', 'odd', 'even'), 94 | index=('all', 'odd', 'even').index(filter_by_node_type_init), 95 | format_func=(lambda option: option_to_label[option])) 96 | urlParams.set_field('filter_by_type', filter_by_node_type) 97 | 98 | filter_by_node_range_init = ( 99 | urlParams.get_field('filter_by_node_range_min', default=0), 100 | urlParams.get_field('filter_by_node_range_max', default=n)) 101 | logger.info('filter_by_node_range_init: %s :: %s', filter_by_node_range_init, type(filter_by_node_range_init)) 102 | filter_by_node_range = st.sidebar.slider( 103 | 'Filter for nodes in range:', 104 | min_value=0, max_value=n, value=filter_by_node_range_init, step=1) 105 | urlParams.set_field('filter_by_node_range_min', filter_by_node_range[0]) 106 | urlParams.set_field('filter_by_node_range_max', filter_by_node_range[1]) 107 | 108 | return { 109 | 'n': n, 110 | 'edges_df': edges_df, 111 | 'node_type': filter_by_node_type, 112 | 'node_range': filter_by_node_range 113 | } 114 | 115 | 116 | ############################################ 117 | # 118 | # FILTER PIPELINE 119 | # 120 | ############################################ 121 | # Given filter settings, generate/cache/return dataframes & viz 122 | 123 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 124 | @st.cache_data 125 | def run_filters(node_type, node_range, edges_df, n): 126 | 127 | filtered_edges_df = edges_df 128 | if node_type == 'all': 129 | pass 130 | elif node_type == 'odd': 131 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] % 2 == 1 ] 132 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] % 2 == 1 ] 133 | elif node_type == 'even': 134 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] % 2 == 0 ] 135 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] % 2 == 0 ] 136 | else: 137 | raise Exception('Unknown filter1 option result: %s' % node_type) 138 | 139 | if node_range[0] > 0: 140 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] >= node_range[0] ] 141 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] >= node_range[0] ] 142 | if node_range[1] <= n: 143 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] <= node_range[1] ] 144 | filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] <= node_range[1] ] 145 | 146 | # include viz generation as part of cache 147 | url = plot_url(filtered_edges_df, n) 148 | 149 | return { 150 | 'edges_df': filtered_edges_df, 151 | 'url': url 152 | } 153 | 154 | 155 | ############################################ 156 | # 157 | # VIZ 158 | # 159 | ############################################ 160 | 161 | 162 | def plot_url(edges_df, n): 163 | 164 | nodes_df = pd.DataFrame({ 165 | 'n': pd.concat([edges_df['s'], edges_df['d']]).unique() 166 | }) 167 | 168 | nodes_df['nc'] = nodes_df['n'].apply(lambda v: 0x01000000 * round(255 * v / n)) 169 | 170 | logger.info('Starting graphistry plot') 171 | if not GraphistrySt().test_login(): 172 | return '' 173 | 174 | url = graphistry\ 175 | .bind(source="s", destination="d")\ 176 | .edges(edges_df)\ 177 | .nodes(nodes_df)\ 178 | .bind(node='n', point_color='nc')\ 179 | .settings(url_params={ 180 | 'pointSize': 0.3, 181 | 'splashAfter': 'false', 182 | 'bg': '%23' + 'f0f2f6' 183 | })\ 184 | .plot(render=False) 185 | 186 | logger.info('Generated viz, got back urL: %s', url) 187 | 188 | return url 189 | 190 | 191 | ############################################ 192 | # 193 | # MAIN RENDER AERA 194 | # 195 | ############################################ 196 | # Given configured filters and computed results (cached), render 197 | 198 | def main_area(edges_df, url): 199 | 200 | logger.debug('rendering main area, with url: %s', url) 201 | gst = GraphistrySt() 202 | if PyGraphistry._is_authenticated: 203 | gst.render_url(url) 204 | else: 205 | st.title("Welcome to graph-app-kit!") 206 | st.write(""" 207 | This particular demo requires configuring your graph-app-kit with service credentials for 208 | accessing your Graphistry server 209 | 210 | If this is the first time you are seeing graph-app-kit, it is Graphistry's open-source extension 211 | of the https://streamlit.io/ low-code Python dashboarding tool. It adds: 212 | * Optional Docker, Docker Compose, and AWS CloudFormation self-hosted quick launchers 213 | * Multiple dashboard support 214 | * Optional GPU & AI dependencies (Nvidia driver, RAPIDS, PyTorch) aligned with Graphistry releases 215 | * Graph computing dependencies (Gremlin, TigerGraph, ...) 216 | * A Graphistry plotting component 217 | 218 | Starting with Graphistry 2.39, graph-app-kit comes prebundled: 219 | * Public and staff-only Private dashboards 220 | * Control access via User -> Admin port -> DJANGO-WAFFLE -> Flags" 221 | * ... then edit to desired visibility for flag_show_public_dashboard, flag_show_private_dashboard 222 | * ... and optionally prevent running of the services via your docker-compose.override.yml 223 | """) 224 | 225 | 226 | ############################################ 227 | # 228 | # Putting it all together 229 | # 230 | ############################################ 231 | 232 | def run_all(): 233 | 234 | custom_css() 235 | 236 | try: 237 | 238 | # Render sidebar and get current settings 239 | sidebar_filters = sidebar_area() 240 | 241 | # logger.debug('sidebar_filters: %s', sidebar_filters) 242 | 243 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 244 | # Selective mark these as URL params as well 245 | filter_pipeline_result = run_filters(**sidebar_filters) 246 | 247 | # Render main viz area based on computed filter pipeline results and sidebar settings 248 | main_area(**filter_pipeline_result) 249 | 250 | except Exception as exn: 251 | st.write('Error loading dashboard') 252 | st.write(exn) 253 | -------------------------------------------------------------------------------- /src/python/views/demo_02_disabled/__init__.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | def info(): 5 | return { 6 | 'id': 'app_02', 7 | 'name': 'INTRO: disabled', 8 | 'enabled': False, 9 | 'tags': ['demo', 'demo_intro'] 10 | } 11 | 12 | 13 | def run(): 14 | st.title('app2') 15 | st.markdown('hello! (disabled: not visible in menu)') 16 | -------------------------------------------------------------------------------- /src/python/views/demo_03_minimal/__init__.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # For copy/paste, 04_simple is probably better 4 | 5 | 6 | def info(): 7 | return { 8 | 'id': 'app_03', 9 | 'name': 'INTRO: minimal', 10 | 'tags': ['demo', 'demo_intro'] 11 | } 12 | 13 | 14 | def run(): 15 | st.title('app3') 16 | st.markdown('hello! (minimal)') 17 | -------------------------------------------------------------------------------- /src/python/views/demo_04_simple/__init__.py: -------------------------------------------------------------------------------- 1 | import graphistry, pandas as pd, streamlit as st 2 | from components import GraphistrySt, URLParam 3 | from css import all_css 4 | import logging, os 5 | 6 | ############################################ 7 | # 8 | # DASHBOARD SETTINGS 9 | # 10 | ############################################ 11 | # Controls how entrypoint.py picks it up 12 | 13 | 14 | app_id = 'app_04' 15 | logger = logging.getLogger(app_id) 16 | urlParams = URLParam(app_id) 17 | 18 | 19 | def info(): 20 | return { 21 | 'id': app_id, 22 | 'name': 'INTRO: simple pipeline', 23 | 'tags': ['demo', 'demo_intro'] 24 | } 25 | 26 | 27 | def run(): 28 | run_all() 29 | 30 | 31 | ############################################ 32 | # 33 | # PIPELINE PIECES 34 | # 35 | ############################################ 36 | 37 | 38 | # Have fun! 39 | def custom_css(): 40 | all_css() 41 | st.markdown( 42 | """""", unsafe_allow_html=True) 45 | 46 | 47 | # Given URL params, render left sidebar form and return combined filter settings 48 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 49 | def sidebar_area(): 50 | 51 | # regular param (not in url) 52 | e = st.sidebar.number_input('Number of edges', min_value=10, max_value=100000, value=100, step=20) 53 | 54 | # deep-linkable param (in url) 55 | n_init = urlParams.get_field('N', 100) 56 | n = st.sidebar.number_input('Number of nodes', min_value=10, max_value=100000, value=n_init, step=20) 57 | urlParams.set_field('N', n) 58 | 59 | return {'num_nodes': n, 'num_edges': e} 60 | 61 | 62 | # Given filter settings, generate/cache/return dataframes & viz 63 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 64 | @st.cache_data 65 | def run_filters(num_nodes, num_edges): 66 | nodes_df = pd.DataFrame({ 'n': [x for x in range(0, num_nodes)] }) 67 | edges_df = pd.DataFrame({ 68 | 's': [x % num_nodes for x in range(0, num_edges)], 69 | 'd': [(x + 1) % num_nodes for x in range(0, num_edges)], 70 | }) 71 | graph_url = \ 72 | graphistry.nodes(nodes_df).edges(edges_df) \ 73 | .bind(source='s', destination='d', node='n')\ 74 | .plot(render=False) 75 | return { 'nodes_df': nodes_df, 'edges_df': edges_df, 'graph_url': graph_url } 76 | 77 | 78 | def main_area(num_nodes, num_edges, nodes_df, edges_df, graph_url): 79 | logger.debug('rendering main area, with url: %s', graph_url) 80 | GraphistrySt().render_url(graph_url) 81 | 82 | 83 | ############################################ 84 | # 85 | # PIPELINE FLOW 86 | # 87 | ############################################ 88 | 89 | 90 | def run_all(): 91 | 92 | custom_css() 93 | 94 | try: 95 | 96 | # Render sidebar and get current settings 97 | sidebar_filters = sidebar_area() 98 | 99 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 100 | # Selective mark these as URL params as well 101 | filter_pipeline_result = run_filters(**sidebar_filters) 102 | 103 | # Render main viz area based on computed filter pipeline results and sidebar settings 104 | main_area(**sidebar_filters, **filter_pipeline_result) 105 | 106 | except Exception as exn: 107 | st.write('Error loading dashboard') 108 | st.write(exn) 109 | -------------------------------------------------------------------------------- /src/python/views/demo_avr/app.css: -------------------------------------------------------------------------------- 1 | .block-container { 2 | padding-top: 0rem; 3 | padding-bottom: 0rem; 4 | padding-left: 1rem; 5 | padding-right: 1rem; 6 | } 7 | 8 | .main { 9 | align-items: left; 10 | } 11 | 12 | h2 { 13 | padding-top: 0rem; 14 | padding-bottom: 0.5rem; 15 | } 16 | 17 | [data-testid="stSidebar"] { 18 | width: 300px !important; 19 | } 20 | 21 | .e1fqkh3o4 { 22 | padding-top: 3.2rem; 23 | padding-bottom: 0rem; 24 | padding-left: 0rem; 25 | padding-right: 0rem; 26 | } 27 | 28 | /* Hide the Built with streamlit header/footer */ 29 | header { 30 | display: none !important; 31 | } 32 | 33 | footer { 34 | display: none !important; 35 | } 36 | 37 | hr { 38 | margin-block-start: 0.1rem; 39 | margin-block-end: 0.1rem; 40 | } -------------------------------------------------------------------------------- /src/python/views/demo_bio_01_funcoup/__init__.py: -------------------------------------------------------------------------------- 1 | import graphistry, os, pandas as pd, streamlit as st 2 | from components import GraphistrySt, URLParam 3 | from graphistry import PyGraphistry 4 | from css import all_css 5 | from time import sleep 6 | import logging 7 | 8 | ############################################ 9 | # 10 | # DASHBOARD SETTINGS 11 | # 12 | ############################################ 13 | # Controls how entrypoint.py picks it up 14 | 15 | 16 | app_id = 'app_bio_01' 17 | logger = logging.getLogger(app_id) 18 | urlParams = URLParam(app_id) 19 | 20 | 21 | def info(): 22 | return { 23 | 'id': app_id, 24 | 'name': 'Bio: FUNCOUP', 25 | 'enabled': True, 26 | 'tags': ['bio', 'large', 'funcoup','demo'], 27 | } 28 | 29 | 30 | def run(): 31 | run_all() 32 | 33 | 34 | ############################################ 35 | # 36 | # CUSTOM CSS 37 | # 38 | ############################################ 39 | # Have fun! 40 | 41 | def custom_css(): 42 | 43 | all_css() # our favorites 44 | 45 | 46 | ############################################ 47 | # 48 | # SIDEBAR RENDER AERA 49 | # 50 | ############################################ 51 | # Given URL params, render left sidebar form and return combined filter settings 52 | 53 | # #https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 54 | def sidebar_area(): 55 | st.sidebar.title("Select a Species") 56 | species_to_label = { 57 | 'A.thaliana': 'A.thaliana', 58 | 'B.subtilis': 'B.subtilis', 59 | 'B.taurus': 'B.taurus', 60 | 'C.elegans': 'C.elegans', 61 | 'C.familiaris': 'C.familiaris', 62 | 'C.intestinalis': 'C.intestinalis', 63 | 'D.discoideum': 'D.discoideum', 64 | 'D.melanogaster': 'D.melanogaster', 65 | 'D.rerio': 'D.rerio', 66 | 'E.coli': 'E.coli', 67 | 'G.gallus': 'G.gallus', 68 | 'H.sapiens': 'H.sapiens', 69 | 'M.jannaschii': 'M.jannaschii', 70 | 'M.musculus': 'M.musculus', 71 | 'O.sativa': 'O.sativa', 72 | 'P.falciparum': 'P.falciparum', 73 | 'R.norvegicus': 'R.norvegicus', 74 | 'S.cerevisiae': 'S.cerevisiae', 75 | 'S.pombe': 'S.pombe', 76 | 'S.scrofa': 'S.scrofa', 77 | 'S.solfataricus': 'S.solfataricus', 78 | } 79 | 80 | base_url = os.environ['BASE_URL'] 81 | 82 | filter_by_org_type_init = urlParams.get_field('filter_by_org', default='B.subtilis') 83 | filter_by_org_type = \ 84 | st.sidebar.selectbox( 85 | 'Choose organism:', 86 | ('A.thaliana', 'B.subtilis', 'B.taurus','C.elegans','C.familiaris','C.intestinalis','D.discoideum','D.melanogaster','D.rerio','E.coli','G.gallus','H.sapiens','M.jannaschii','M.musculus','O.sativa','P.falciparum','R.norvegicus','S.cerevisiae','S.pombe','S.scrofa','S.solfataricus'), 87 | index=('A.thaliana', 'B.subtilis', 'B.taurus','C.elegans','C.familiaris','C.intestinalis','D.discoideum','D.melanogaster','D.rerio','E.coli','G.gallus','H.sapiens','M.jannaschii','M.musculus','O.sativa','P.falciparum','R.norvegicus','S.cerevisiae','S.pombe','S.scrofa','S.solfataricus').index(filter_by_org_type_init), 88 | format_func=(lambda option: species_to_label[option])) 89 | urlParams.set_field('filter_by_org', filter_by_org_type) 90 | 91 | st.sidebar.title("Select a Network Type") 92 | umap_to_label = { 93 | True: 'UMAP', 94 | False: 'FunCoup', 95 | } 96 | 97 | filter_by_umap_type_init = urlParams.get_field('filter_by_umap', default='FunCoup') 98 | filter_by_umap_type = \ 99 | st.sidebar.selectbox( 100 | 'Display functional coupling network (select link evidence below) or UMAP against all 40 evidence types:', 101 | (True,False), 102 | index=(True,False).index(filter_by_umap_type_init), 103 | format_func=(lambda option: umap_to_label[option])) 104 | urlParams.set_field('filter_by_umap', filter_by_umap_type) 105 | 106 | if filter_by_umap_type is 'UMAP': 107 | filter_by_net_type = 'full' 108 | else: 109 | filter_by_net_type = 'compact' 110 | 111 | st.sidebar.title("Select an Evidence Type") 112 | edge_to_label = {'PFC':'PFC', 'FBS_max':'FBS_max'} 113 | 114 | filter_by_node_type_init = urlParams.get_field('filter_by_node', default='PFC') 115 | filter_by_node_type = \ 116 | st.sidebar.selectbox( 117 | 'for FunCoup Network display', 118 | ('PFC', 'FBS_max'), 119 | index=('PFC', 'FBS_max').index(filter_by_node_type_init), 120 | format_func=(lambda option: edge_to_label[option])) 121 | urlParams.set_field('filter_by_node', filter_by_node_type) 122 | 123 | 124 | edges_df = pd.read_csv('https://funcoup.org/downloads/download.action?type=network&instanceID=24480085&fileName=FC5.0_'+filter_by_org_type+'_'+filter_by_net_type+'.gz', sep='\t') 125 | 126 | return { 127 | 'edges_df': edges_df, 128 | 'node_type': filter_by_node_type, 129 | 'umap_type': filter_by_umap_type, 130 | } 131 | 132 | 133 | ############################################ 134 | # 135 | # FILTER PIPELINE 136 | # 137 | ############################################ 138 | # Given filter settings, generate/cache/return dataframes & viz 139 | 140 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 141 | @st.cache_data 142 | def run_filters(edges_df, node_type, umap_type=False): 143 | 144 | filtered_edges_df = edges_df 145 | # filtered_edges_df = filtered_edges_df.replace({'ENSG00000':''},regex=True) 146 | filtered_edges_df.columns=filtered_edges_df.columns.str.split(':').str[1] 147 | 148 | # include viz generation as part of cache 149 | url = plot_url(filtered_edges_df,node_type,umap_type) 150 | 151 | return { 152 | 'edges_df': filtered_edges_df, 153 | 'url': url, 154 | } 155 | 156 | 157 | ############################################ 158 | # 159 | # VIZ 160 | # 161 | ############################################ 162 | 163 | 164 | def plot_url(edges_df,node_type, umap_type=False): 165 | 166 | edges_df.replace({'ENSG00000':''},regex=True,inplace=True) ## remove ENSG00000 from gene names for better compression 167 | 168 | nodes_df = pd.DataFrame({ 169 | 'n': pd.concat([edges_df['Gene1'], edges_df['Gene2']]).unique() 170 | }) 171 | n = len(nodes_df) 172 | 173 | nodes_df['ind'] = nodes_df.index 174 | nodes_df['nc'] = nodes_df['ind'].apply(lambda v: 0x01000000 * round(255 * v / n,2)) 175 | 176 | logger.info('Starting graphistry plot') 177 | if not GraphistrySt().test_login(): 178 | return '' 179 | 180 | if umap_type == False: 181 | url = graphistry\ 182 | .edges(edges_df)\ 183 | .bind(source="Gene1", destination="Gene2", edge_weight=node_type)\ 184 | .nodes(nodes_df)\ 185 | .bind(node='n', point_color='nc')\ 186 | .settings(url_params={ 187 | 'pointSize': 0.3, 188 | 'splashAfter': 'false', 189 | 'bg': '%23' + 'f0f2f6' 190 | })\ 191 | .plot(render=False)#, as_files=True, suffix='.html', output=None, open=False) 192 | elif umap_type == True: 193 | 194 | AA = graphistry\ 195 | .nodes(edges_df)\ 196 | .bind(source="Gene1", destination="Gene2")\ 197 | .settings(url_params={ 198 | 'pointSize': 0.3, 199 | 'splashAfter': 'false', 200 | 'bg': '%23' + 'f0f2f6' 201 | })\ 202 | .umap(feature_engine='dirty_cat',engine='umap_learn',memoize=True) 203 | emb2=AA._node_embedding 204 | url=graphistry.nodes(emb2.reset_index(),'index').edges(AA._edges,'_src_implicit','_dst_implicit').bind(point_x="x",point_y="y").settings(url_params={"play":0}).addStyle(bg={'color': '#eee'}).plot(render=False) 205 | 206 | logger.info('Generated viz, got back urL: %s', url) 207 | 208 | return url 209 | 210 | 211 | ############################################ 212 | # 213 | # MAIN RENDER AERA 214 | # 215 | ############################################ 216 | # Given configured filters and computed results (cached), render 217 | 218 | def main_area(edges_df, url): 219 | 220 | logger.debug('rendering main area, with url: %s', url) 221 | gst = GraphistrySt() 222 | if PyGraphistry._is_authenticated: 223 | gst.render_url(url) 224 | else: 225 | st.title("Welcome to graph-app-kit!") 226 | st.write(""" 227 | This particular demo requires configuring your graph-app-kit with service credentials for 228 | accessing your Graphistry server 229 | 230 | If this is the first time you are seeing graph-app-kit, it is Graphistry's open-source extension 231 | of the https://streamlit.io/ low-code Python dashboarding tool. It adds: 232 | * Optional Docker, Docker Compose, and AWS CloudFormation self-hosted quick launchers 233 | * Multiple dashboard support 234 | * Optional GPU & AI dependencies (Nvidia driver, RAPIDS, PyTorch) aligned with Graphistry releases 235 | * Graph computing dependencies (Gremlin, TigerGraph, ...) 236 | * A Graphistry plotting component 237 | 238 | Starting with Graphistry 2.39, graph-app-kit comes prebundled: 239 | * Public and staff-only Private dashboards 240 | * Control access via User -> Admin port -> DJANGO-WAFFLE -> Flags" 241 | * ... then edit to desired visibility for flag_show_public_dashboard, flag_show_private_dashboard 242 | * ... and optionally prevent running of the services via your docker-compose.override.yml 243 | """) 244 | 245 | 246 | ############################################ 247 | # 248 | # Putting it all together 249 | # 250 | ############################################ 251 | 252 | def run_all(): 253 | 254 | custom_css() 255 | 256 | try: 257 | 258 | # Render sidebar and get current settings 259 | sidebar_filters = sidebar_area() 260 | 261 | # logger.debug('sidebar_filters: %s', sidebar_filters) 262 | 263 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 264 | # Selective mark these as URL params as well 265 | filter_pipeline_result = run_filters(**sidebar_filters) 266 | 267 | # Render main viz area based on computed filter pipeline results and sidebar settings 268 | main_area(**filter_pipeline_result) 269 | 270 | except Exception as exn: 271 | st.write('Error loading dashboard') 272 | st.write(exn) 273 | -------------------------------------------------------------------------------- /src/python/views/demo_login/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from datetime import datetime, time, timedelta 5 | from typing import Dict, List, Union 6 | 7 | import dateutil.parser as dp 8 | import pandas as pd 9 | import streamlit.components.v1 as components 10 | from components import URLParam 11 | from components.Splunk import SplunkConnection 12 | from css import all_css 13 | from graphistry import Plottable 14 | from requests.exceptions import HTTPError 15 | from views.demo_login.marlowe import ( 16 | AUTH_SAFE_FIELDS, 17 | AuthDataResource, 18 | AuthMarlowe, 19 | AuthMissingData, 20 | ) 21 | 22 | import streamlit as st 23 | 24 | ############################################ 25 | # 26 | # DASHBOARD SETTINGS 27 | # 28 | ############################################ 29 | # Controls how entrypoint.py picks it up 30 | 31 | 32 | app_id = "demo_login" 33 | logger = logging.getLogger(app_id) 34 | urlParams = URLParam(app_id) 35 | 36 | # Splunk configuration 37 | INDEX = "auth_txt_50k" 38 | DEFAULT_PIVOT_URL_INVESTIGATION_ID = "123" 39 | 40 | 41 | def info(): 42 | return { 43 | "id": app_id, 44 | "name": "Cyber: Login Analyzer", 45 | "tags": ["cyber", "cybersecurity", "security"], 46 | "enabled": True, 47 | } 48 | 49 | 50 | def run(): 51 | run_all() 52 | 53 | 54 | ############################################ 55 | # 56 | # PIPELINE PIECES 57 | # 58 | ############################################ 59 | 60 | 61 | # Have fun! 62 | def custom_css(): 63 | all_css() 64 | st.markdown( 65 | """""", 107 | unsafe_allow_html=True, 108 | ) 109 | 110 | 111 | # Given URL params, render left sidebar form and return combined filter settings 112 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 113 | def sidebar_area(): 114 | with st.sidebar: 115 | # Write a description in the sidebar 116 | st.sidebar.markdown( 117 | '

Nodes: Logins, colored by attack category

', 118 | unsafe_allow_html=True, 119 | ) 120 | st.sidebar.markdown( 121 | '

Edges: Link logins by similarity

', 122 | unsafe_allow_html=True, 123 | ) 124 | 125 | st.sidebar.divider() 126 | 127 | now = datetime.now() 128 | today = now.date() 129 | current_hour = now.time() 130 | month_ago = today - timedelta(days=60) 131 | 132 | start_date = st.sidebar.date_input(label="Start Date", value=month_ago) 133 | start_time = st.sidebar.time_input(label="Start Time", value=time(0, 00)) 134 | end_date = st.sidebar.date_input(label="End Date", value=now) 135 | end_time = st.sidebar.time_input(label="End Time", value=current_hour) 136 | 137 | logger.debug(f"start_date={start_date} start_time={start_time} | end_date={end_date} end_time={end_time}\n") 138 | 139 | start_datetime = dp.parse(f"{start_date} {start_time}") 140 | end_datetime = dp.parse(f"{end_date} {end_time}") 141 | 142 | # st.sidebar.divider() 143 | 144 | # urlParams.get_field("dbscan", 0) 145 | # dbscan: int = st.sidebar.number_input(label="Cluster ID", value=0, step=1) 146 | # urlParams.set_field("dbscan", dbscan) 147 | 148 | return { 149 | "start_datetime": start_datetime, 150 | "end_datetime": end_datetime, 151 | # "dbscan": dbscan, 152 | } 153 | 154 | 155 | # Cache the Splunk client as a resource so it is re-used 156 | @st.cache_resource 157 | def cache_splunk_client(username: str, password: str, host: str) -> SplunkConnection: 158 | splunk_client = SplunkConnection(username, password, host) 159 | assert splunk_client.connect() 160 | return splunk_client 161 | 162 | 163 | # Given filter settings, generate/cache/return dataframes & viz 164 | def run_filters(start_datetime, end_datetime): # , dbscan): 165 | with st.spinner("Generating graph..."): 166 | splunk_client = cache_splunk_client( 167 | os.environ["SPLUNK_USERNAME"], 168 | os.environ["SPLUNK_PASSWORD"], 169 | os.environ["SPLUNK_HOST"], 170 | ) 171 | 172 | query_dict: Dict[str, Union[str, float, List[str]]] = { 173 | "datetime": [ 174 | (">=", start_datetime.isoformat()), 175 | ("<=", end_datetime.isoformat()), 176 | ], 177 | } 178 | # if dbscan > 0: 179 | # query_dict["dbscan"] = dbscan 180 | 181 | splunk_query = SplunkConnection.build_query( 182 | index=INDEX, 183 | query_dict=query_dict, 184 | fields=list(AUTH_SAFE_FIELDS.keys()), 185 | sort=[], 186 | debug=True, 187 | ) 188 | logger.debug(f"Splunk query: {splunk_query}\n") 189 | results = splunk_client.one_shot_splunk(splunk_query) 190 | 191 | # Clean the Splunk results and send them to Graphistry to GPU render and return a url 192 | try: 193 | data_resource = AuthDataResource(edf=results, feature_columns=list(AUTH_SAFE_FIELDS.keys())) 194 | 195 | # 196 | # Bring in standard graphistry environment variables: Set env/*.env files, in .env --> docker-compose.yml --> os.getenv(key) --> AVRMarlowe.register() 197 | # 198 | 199 | logger.info("Configuring environment variables...\n") 200 | investigation_id: str = os.getenv("PIVOT_URL_INVESTIGATION_ID", DEFAULT_PIVOT_URL_INVESTIGATION_ID) 201 | logger.debug(f"investigation_id={investigation_id}\n") 202 | 203 | data_resource.add_pivot_url_column( 204 | investigation_id=investigation_id, 205 | ) 206 | 207 | # Generate the graph 208 | marlowe: AuthMarlowe = AuthMarlowe(data_resource=data_resource) 209 | g: Plottable = marlowe.umap() # next line describe_clusters uses dbscan clusters from umap 210 | cluster_df: pd.DataFrame = marlowe.describe_clusters() 211 | try: 212 | graph_url: str = g.plot(render=False) 213 | except HTTPError as e: 214 | logging.exception(e) 215 | 216 | return { 217 | "graph_url": graph_url, 218 | "cluster_df": cluster_df, 219 | } 220 | except AuthMissingData: 221 | st.error("Your query returned no records.", icon="🚨") 222 | 223 | 224 | def main_area( 225 | start_datetime, 226 | end_datetime, 227 | # dbscan, 228 | graph_url=None, 229 | cluster_df=None, 230 | ): 231 | logger.debug("Rendering main area, with url: %s\n", graph_url) 232 | components.iframe(src=graph_url, height=650, scrolling=True) 233 | st.dataframe(cluster_df, use_container_width=True, height=176) 234 | 235 | 236 | ############################################ 237 | # 238 | # PIPELINE FLOW 239 | # 240 | ############################################ 241 | 242 | 243 | def run_all(): 244 | custom_css() 245 | 246 | try: 247 | # Render sidebar and get current settings 248 | sidebar_filters = sidebar_area() 249 | 250 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 251 | # Selective mark these as URL params as well 252 | filter_pipeline_result = run_filters(**sidebar_filters) 253 | 254 | # Render main viz area based on computed filter pipeline results and sidebar settings 255 | main_area( 256 | **sidebar_filters, 257 | # Fill in empties or main_area will choke 258 | **filter_pipeline_result or {"graph_url": None, "cluster_df": None}, 259 | ) 260 | 261 | except Exception as exn: 262 | st.write("Error loading dashboard") 263 | st.write(exn) 264 | -------------------------------------------------------------------------------- /src/python/views/demo_neptune_01_minimal_gremlin/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import graphistry 3 | import os 4 | import pandas as pd 5 | import streamlit as st 6 | from components import GraphistrySt, URLParam 7 | from css import all_css 8 | from neptune_helper import gremlin_helper, df_helper 9 | 10 | from gremlin_python import statics 11 | from gremlin_python.process.graph_traversal import __ 12 | from gremlin_python.process.traversal import WithOptions, T 13 | import logging 14 | 15 | ############################################ 16 | # 17 | # DASHBOARD SETTINGS 18 | # 19 | ############################################ 20 | # Controls how entrypoint.py picks it up 21 | 22 | 23 | app_id = 'app_neptune_01' 24 | logger = logging.getLogger(app_id) 25 | urlParams = URLParam(app_id) 26 | node_id_col = 'id' 27 | src_id_col = 'source' 28 | dst_id_col = 'target' 29 | node_label_col = 'label' 30 | edge_label_col = 'label' 31 | 32 | 33 | def info(): 34 | return { 35 | 'id': app_id, 36 | 'name': 'GREMLIN: Simple Sample', 37 | 'tags': ['demo', 'neptune_demo'] 38 | } 39 | 40 | 41 | def run(): 42 | run_all() 43 | 44 | 45 | ############################################ 46 | # 47 | # PIPELINE PIECES 48 | # 49 | ############################################ 50 | # Have fun! 51 | def custom_css(): 52 | all_css() 53 | st.markdown( 54 | """""", unsafe_allow_html=True) 57 | 58 | 59 | # Given URL params, render left sidebar form and return combined filter settings 60 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 61 | def sidebar_area(): 62 | 63 | num_edges_init = urlParams.get_field('num_edges', 100) 64 | num_edges = st.sidebar.slider( 65 | 'Number of edges', min_value=1, max_value=10000, value=num_edges_init, step=20) 66 | urlParams.set_field('num_edges', num_edges) 67 | 68 | return {'num_edges': num_edges} 69 | 70 | 71 | def plot_url(nodes_df, edges_df): 72 | nodes_df = df_helper.flatten_df(nodes_df) 73 | edges_df = df_helper.flatten_df(edges_df) 74 | 75 | logger.info('Starting graphistry plot') 76 | g = graphistry\ 77 | .edges(edges_df)\ 78 | .bind(source=src_id_col, destination=dst_id_col)\ 79 | .nodes(nodes_df)\ 80 | .bind(node=node_id_col) 81 | 82 | if not (node_label_col is None): 83 | g = g.bind(point_title=node_label_col) 84 | 85 | if not (edge_label_col is None): 86 | g = g.bind(edge_title=edge_label_col) 87 | 88 | url = g\ 89 | .settings(url_params={ 90 | 'bg': '%23' + 'f0f2f6' 91 | })\ 92 | .plot(render=False) 93 | 94 | logger.info('Generated viz, got back urL: %s', url) 95 | 96 | return url 97 | 98 | 99 | def path_to_df(p): 100 | nodes = {} 101 | edges = {} 102 | 103 | for triple in p: 104 | 105 | src_id = triple[0][T.id] 106 | nodes[src_id] = df_helper.vertex_to_dict(triple[0]) 107 | 108 | dst_id = triple[2][T.id] 109 | nodes[dst_id] = df_helper.vertex_to_dict(triple[2]) 110 | 111 | edges[triple[1][T.id]] = df_helper.edge_to_dict( 112 | triple[1], src_id, dst_id) 113 | 114 | return pd.DataFrame(nodes.values()), pd.DataFrame(edges.values()) 115 | 116 | 117 | # Given filter settings, generate/cache/return dataframes & viz 118 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 119 | @st.cache_data 120 | def run_filters(num_edges): 121 | g, conn = gremlin_helper.connect_to_neptune() 122 | 123 | logger.info('Querying neptune') 124 | res = g.V().inE().limit(num_edges).outV().path().by( 125 | __.valueMap().with_(WithOptions.tokens)).toList() 126 | 127 | nodes_df, edges_df = path_to_df(res) 128 | url = plot_url(nodes_df, edges_df) 129 | 130 | logger.info("Finished compute phase") 131 | 132 | try: 133 | conn.close() 134 | 135 | except RuntimeError as e: 136 | if str(e) == "There is no current event loop in thread 'ScriptRunner.scriptThread'.": 137 | loop = asyncio.new_event_loop() 138 | asyncio.set_event_loop(loop) 139 | conn.close() 140 | else: 141 | raise e 142 | 143 | except Exception as e: 144 | logger.error('oops in gremlin', exc_info=True) 145 | raise e 146 | 147 | return {'nodes_df': nodes_df, 'edges_df': edges_df, 'url': url, 'res': res} 148 | 149 | 150 | def main_area(url): 151 | 152 | logger.debug('rendering main area, with url: %s', url) 153 | GraphistrySt().render_url(url) 154 | 155 | 156 | ############################################ 157 | # 158 | # PIPELINE FLOW 159 | # 160 | ############################################ 161 | 162 | 163 | def run_all(): 164 | try: 165 | custom_css() 166 | 167 | # Render sidebar and get current settings 168 | sidebar_filters = sidebar_area() 169 | 170 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 171 | # Selective mark these as URL params as well 172 | filter_pipeline_result = run_filters(**sidebar_filters) 173 | 174 | # Render main viz area based on computed filter pipeline results and sidebar settings 175 | main_area(filter_pipeline_result['url']) 176 | 177 | except Exception as exn: 178 | st.write('Error loading dashboard') 179 | st.write(exn) 180 | -------------------------------------------------------------------------------- /src/python/views/demo_neptune_02_gremlin/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import graphistry 3 | import os 4 | import pandas as pd 5 | import streamlit as st 6 | from components import GraphistrySt, URLParam 7 | from neptune_helper import gremlin_helper, df_helper 8 | from css import all_css 9 | import time 10 | import altair as alt 11 | 12 | from gremlin_python import statics 13 | from gremlin_python.process.graph_traversal import __ 14 | from gremlin_python.process.traversal import WithOptions, T 15 | import logging 16 | 17 | ############################################ 18 | # 19 | # DASHBOARD SETTINGS 20 | # 21 | ############################################ 22 | # Controls how entrypoint.py picks it up 23 | 24 | 25 | app_id = 'app_neptune_02' 26 | logger = logging.getLogger(app_id) 27 | urlParams = URLParam(app_id) 28 | node_id_col = 'id' 29 | src_id_col = 'source' 30 | dst_id_col = 'target' 31 | node_label_col = 'label' 32 | edge_label_col = 'label' 33 | 34 | # Setup a structure to hold metrics 35 | metrics = {'neptune_time': 0, 'graphistry_time': 0, 36 | 'node_cnt': 0, 'edge_cnt': 0, 'prop_cnt': 0} 37 | 38 | 39 | # Define the name of the view 40 | def info(): 41 | return { 42 | 'id': app_id, 43 | 'name': 'GREMLIN: Faceted Filter', 44 | 'tags': ['demo', 'neptune_demo'] 45 | } 46 | 47 | 48 | def run(): 49 | run_all() 50 | 51 | 52 | ############################################ 53 | # 54 | # PIPELINE PIECES 55 | # 56 | ############################################ 57 | 58 | 59 | # Have fun! 60 | def custom_css(): 61 | all_css() 62 | st.markdown( 63 | """""", unsafe_allow_html=True) 66 | 67 | 68 | # Given URL params, render left sidebar form and return combined filter settings 69 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 70 | def sidebar_area(): 71 | 72 | num_edges_init = urlParams.get_field('num_matches', 10000) 73 | state = st.sidebar.selectbox( 74 | 'Find users from this state?', 75 | [ 76 | 'All States', 77 | 'Alabama', 78 | 'Alaska', 79 | 'Arizona', 80 | 'Arkansas', 81 | 'California', 82 | 'Colorado', 83 | 'Connecticut', 84 | 'Delaware', 85 | 'Florida', 86 | 'Georgia', 87 | 'Hawaii', 88 | 'Idaho', 89 | 'Illinois', 90 | 'Indiana', 91 | 'Iowa', 92 | 'Kansas', 93 | 'Kentucky', 94 | 'Louisiana', 95 | 'Maine', 96 | 'Maryland', 97 | 'Massachusetts', 98 | 'Michigan', 99 | 'Minnesota', 100 | 'Mississippi', 101 | 'Missouri', 102 | 'Montana', 103 | 'Nebraska', 104 | 'Nevada', 105 | 'New Hampshire', 106 | 'New Jersey', 107 | 'New Mexico', 108 | 'New York', 109 | 'North Carolina', 110 | 'North Dakota', 111 | 'Ohio', 112 | 'Oklahoma', 113 | 'Oregon', 114 | 'Pennsylvania', 115 | 'Rhode Island', 116 | 'South Carolina', 117 | 'South Dakota', 118 | 'Tennessee', 119 | 'Texas', 120 | 'Utah', 121 | 'Vermont', 122 | 'Virginia', 123 | 'Washington', 124 | 'West Virginia', 125 | 'Wisconsin', 126 | 'Wyoming' 127 | ]) 128 | 129 | city = st.sidebar.text_input( 130 | 'Find users from this city?', 131 | "") 132 | 133 | num_edges = st.sidebar.slider( 134 | 'Number of edges', min_value=1, max_value=10000, value=num_edges_init, step=20) 135 | urlParams.set_field('num_edges', num_edges) 136 | urlParams.set_field('state', state) 137 | 138 | return {'num_edges': num_edges, 'state': state, 'city': city} 139 | 140 | 141 | def plot_url(nodes_df, edges_df): 142 | global metrics 143 | nodes_df = df_helper.flatten_df(nodes_df) 144 | edges_df = df_helper.flatten_df(edges_df) 145 | 146 | logger.info('Starting graphistry plot') 147 | tic = time.perf_counter() 148 | g = graphistry\ 149 | .edges(edges_df)\ 150 | .bind(source=src_id_col, destination=dst_id_col)\ 151 | .nodes(nodes_df)\ 152 | .bind(node=node_id_col) 153 | 154 | if not (node_label_col is None): 155 | g = g.bind(point_title=node_label_col) 156 | 157 | if not (edge_label_col is None): 158 | g = g.bind(edge_title=edge_label_col) 159 | 160 | url = g\ 161 | .settings(url_params={ 162 | 'bg': '%23' + 'f0f2f6' 163 | })\ 164 | .plot(render=False) 165 | toc = time.perf_counter() 166 | metrics['graphistry_time'] = toc - tic 167 | logger.info(f'Graphisty Time: {metrics["graphistry_time"]}') 168 | logger.info('Generated viz, got back urL: %s', url) 169 | 170 | return url 171 | 172 | 173 | def path_to_df(p): 174 | nodes = {} 175 | edges = {} 176 | 177 | for triple in p: 178 | 179 | src_id = triple[0][T.id] 180 | nodes[src_id] = df_helper.vertex_to_dict(triple[0]) 181 | 182 | dst_id = triple[2][T.id] 183 | nodes[dst_id] = df_helper.vertex_to_dict(triple[2]) 184 | 185 | edges[triple[1][T.id]] = df_helper.edge_to_dict( 186 | triple[1], src_id, dst_id) 187 | 188 | return pd.DataFrame(nodes.values()), pd.DataFrame(edges.values()) 189 | 190 | # Given filter settings, generate/cache/return dataframes & viz 191 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 192 | @st.cache_data 193 | def run_filters(num_edges, state, city): 194 | global metrics 195 | g, conn = gremlin_helper.connect_to_neptune() 196 | 197 | logger.info('Querying neptune') 198 | tic = time.perf_counter() 199 | t = g.V().inE() 200 | # Conditionally add the state filtering in here 201 | if not state == "All States": 202 | t = t.has('visited', 'state', state) 203 | # Conditionally add the city filtering in here 204 | if not city == "": 205 | t = t.has('visited', 'city', city) 206 | res = t.limit(num_edges).outV().path().by( 207 | __.valueMap().with_(WithOptions.tokens)).toList() 208 | toc = time.perf_counter() 209 | logger.info(f'Query Execution: {toc-tic:0.02f} seconds') 210 | logger.debug('Query Result Count: %s', len(res)) 211 | metrics['neptune_time'] = toc - tic 212 | 213 | nodes_df, edges_df = path_to_df(res) 214 | 215 | # Calculate the metrics 216 | metrics['node_cnt'] = nodes_df.size 217 | metrics['edge_cnt'] = edges_df.size 218 | metrics['prop_cnt'] = (nodes_df.size * nodes_df.columns.size) + \ 219 | (edges_df.size * edges_df.columns.size) 220 | 221 | if nodes_df.size > 0: 222 | url = plot_url(nodes_df, edges_df) 223 | else: 224 | url = "" 225 | 226 | logger.info("Finished compute phase") 227 | 228 | try: 229 | conn.close() 230 | 231 | except RuntimeError as e: 232 | if str(e) == "There is no current event loop in thread 'ScriptRunner.scriptThread'.": 233 | loop = asyncio.new_event_loop() 234 | asyncio.set_event_loop(loop) 235 | conn.close() 236 | else: 237 | raise e 238 | 239 | except Exception as e: 240 | logger.error('oops in gremlin', exc_info=True) 241 | raise e 242 | 243 | return {'nodes_df': nodes_df, 'edges_df': edges_df, 'url': url, 'res': res} 244 | 245 | 246 | def main_area(url, nodes, edges, state): 247 | 248 | logger.debug('rendering main area, with url: %s', url) 249 | GraphistrySt().render_url(url) 250 | 251 | # Get the count by state of visits shown 252 | bar_chart_data = edges[edges['label'] == 'visited'] 253 | group_label = 'state' 254 | if not state == 'All States': # If a state is chosen group by city 255 | group_label = 'city' 256 | bar_chart_data['count'] = bar_chart_data.groupby( 257 | group_label)[group_label].transform('count') 258 | bar_chart_data = bar_chart_data[[group_label, 'count']].drop_duplicates().reset_index()[ 259 | [group_label, 'count']] 260 | # Sort the values by group_label 261 | bar_chart_data.sort_values(by=[group_label], inplace=True) 262 | chart = alt.Chart(bar_chart_data).mark_bar().encode( 263 | x=group_label, 264 | y='count') 265 | st.altair_chart(chart, use_container_width=True) 266 | # Show a datatable with the values transposed 267 | st.dataframe(bar_chart_data.set_index(group_label).T) 268 | 269 | st.markdown(f''' 270 | Neptune Load Time (s): {float(metrics['neptune_time']):0.2f} | 271 | Graphistry Load Time (s): {float(metrics['graphistry_time']):0.2f} | 272 | Node Count: {metrics['node_cnt']} | 273 | Edge Count: {metrics['edge_cnt']} | 274 | Property Count: {metrics['prop_cnt']} 275 | ''', unsafe_allow_html=True) 276 | 277 | 278 | ############################################ 279 | # 280 | # PIPELINE FLOW 281 | # 282 | ############################################ 283 | 284 | 285 | def run_all(): 286 | 287 | custom_css() 288 | 289 | try: 290 | 291 | # Render sidebar and get current settings 292 | sidebar_filters = sidebar_area() 293 | 294 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 295 | # Selective mark these as URL params as well 296 | filter_pipeline_result = run_filters(**sidebar_filters) 297 | 298 | # Render main viz area based on computed filter pipeline results and sidebar settings if data is returned 299 | if filter_pipeline_result['nodes_df'].size > 0: 300 | main_area(filter_pipeline_result['url'], 301 | filter_pipeline_result['nodes_df'], 302 | filter_pipeline_result['edges_df'], 303 | sidebar_filters['state']) 304 | else: # render a message 305 | st.write("No data matching the specfiied criteria is found") 306 | 307 | except Exception as exn: 308 | st.write('Error loading dashboard') 309 | st.write(exn) 310 | -------------------------------------------------------------------------------- /src/python/views/demo_neptune_03_c360/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import graphistry 3 | import os 4 | import pandas as pd 5 | import streamlit as st 6 | from components import GraphistrySt, URLParam 7 | from neptune_helper import gremlin_helper, df_helper 8 | from css import all_css 9 | import time 10 | import altair as alt 11 | 12 | from gremlin_python import statics 13 | from gremlin_python.process.graph_traversal import __ 14 | from gremlin_python.process.traversal import WithOptions, T, TextP 15 | import logging 16 | 17 | ############################################ 18 | # 19 | # DASHBOARD SETTINGS 20 | # 21 | ############################################ 22 | # Controls how entrypoint.py picks it up 23 | 24 | 25 | app_id = 'app_neptune_03' 26 | logger = logging.getLogger(app_id) 27 | urlParams = URLParam(app_id) 28 | node_id_col = 'id' 29 | src_id_col = 'source' 30 | dst_id_col = 'target' 31 | node_label_col = 'label' 32 | edge_label_col = 'label' 33 | 34 | # Setup a structure to hold metrics 35 | metrics = {'neptune_time': 0, 'graphistry_time': 0, 36 | 'node_cnt': 0, 'edge_cnt': 0, 'prop_cnt': 0} 37 | 38 | 39 | # Define the name of the view 40 | def info(): 41 | return { 42 | 'id': app_id, 43 | 'name': 'GREMLIN: Customer 360', 44 | 'tags': ['demo', 'neptune_demo'] 45 | } 46 | 47 | 48 | def run(): 49 | run_all() 50 | 51 | 52 | ############################################ 53 | # 54 | # PIPELINE PIECES 55 | # 56 | ############################################ 57 | 58 | 59 | # Have fun! 60 | def custom_css(): 61 | all_css() 62 | st.markdown( 63 | """""", unsafe_allow_html=True) 66 | 67 | 68 | # Given URL params, render left sidebar form and return combined filter settings 69 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 70 | def sidebar_area(): 71 | 72 | num_edges_init = urlParams.get_field('num_edges', 10000) 73 | 74 | transient_id = st.sidebar.text_input( 75 | 'Show me items starting with this transient id?', 76 | "") 77 | 78 | num_matches = st.sidebar.slider( 79 | 'Number of matches', min_value=1, max_value=100, value=50, step=5) 80 | 81 | num_edges = st.sidebar.slider( 82 | 'Number of edges', min_value=1, max_value=10000, value=num_edges_init, step=20) 83 | urlParams.set_field('num_edges', num_edges) 84 | 85 | return {'num_edges': num_edges, 'num_matches': num_matches, 'transient_id': transient_id} 86 | 87 | 88 | def plot_url(nodes_df, edges_df): 89 | global metrics 90 | nodes_df = df_helper.flatten_df(nodes_df) 91 | edges_df = df_helper.flatten_df(edges_df) 92 | 93 | logger.info('Starting graphistry plot') 94 | tic = time.perf_counter() 95 | g = graphistry\ 96 | .edges(edges_df)\ 97 | .bind(source=src_id_col, destination=dst_id_col)\ 98 | .nodes(nodes_df)\ 99 | .bind(node=node_id_col) 100 | 101 | if not (node_label_col is None): 102 | g = g.bind(point_title=node_label_col) 103 | 104 | if not (edge_label_col is None): 105 | g = g.bind(edge_title=edge_label_col) 106 | 107 | url = g\ 108 | .settings(url_params={ 109 | 'bg': '%23' + 'f0f2f6' 110 | })\ 111 | .plot(render=False) 112 | toc = time.perf_counter() 113 | metrics['graphistry_time'] = toc - tic 114 | logger.info(f'Graphisty Time: {metrics["graphistry_time"]}') 115 | logger.info('Generated viz, got back urL: %s', url) 116 | 117 | return url 118 | 119 | 120 | def path_to_df(p): 121 | nodes = {} 122 | edges = {} 123 | 124 | for triple in p: 125 | 126 | src_id = triple[0][T.id] 127 | nodes[src_id] = df_helper.vertex_to_dict(triple[0]) 128 | 129 | dst_id = triple[2][T.id] 130 | nodes[dst_id] = df_helper.vertex_to_dict(triple[2]) 131 | 132 | edges[triple[1][T.id]] = df_helper.edge_to_dict( 133 | triple[1], src_id, dst_id) 134 | 135 | return pd.DataFrame(nodes.values()), pd.DataFrame(edges.values()) 136 | 137 | 138 | # Given filter settings, generate/cache/return dataframes & viz 139 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 140 | @st.cache_data 141 | def run_filters(num_edges, num_matches, transient_id): 142 | global metrics 143 | g, conn = gremlin_helper.connect_to_neptune() 144 | 145 | logger.info('Querying neptune') 146 | tic = time.perf_counter() 147 | t = g.V().hasLabel('transientId') 148 | if not transient_id == "": 149 | # If using Neptune full text search this will perform much faster than the built in Gremlin text search 150 | t = t.has('uid', TextP.containing(transient_id)) 151 | res = t.limit(num_matches).bothE().otherV().limit(num_edges).path().by( 152 | __.valueMap().with_(WithOptions.tokens)).toList() 153 | 154 | toc = time.perf_counter() 155 | logger.info(f'Query Execution: {toc-tic:0.02f} seconds') 156 | logger.debug('Query Result Count: %s', len(res)) 157 | metrics['neptune_time'] = toc - tic 158 | 159 | nodes_df, edges_df = path_to_df(res) 160 | 161 | # Calculate the metrics 162 | metrics['node_cnt'] = nodes_df.size 163 | metrics['edge_cnt'] = edges_df.size 164 | metrics['prop_cnt'] = (nodes_df.size * nodes_df.columns.size) + \ 165 | (edges_df.size * edges_df.columns.size) 166 | 167 | if nodes_df.size > 0: 168 | url = plot_url(nodes_df, edges_df) 169 | else: 170 | url = "" 171 | 172 | logger.info("Finished compute phase") 173 | 174 | try: 175 | conn.close() 176 | 177 | except RuntimeError as e: 178 | if str(e) == "There is no current event loop in thread 'ScriptRunner.scriptThread'.": 179 | loop = asyncio.new_event_loop() 180 | asyncio.set_event_loop(loop) 181 | conn.close() 182 | else: 183 | raise e 184 | 185 | except Exception as e: 186 | logger.error('oops in gremlin', exc_info=True) 187 | raise e 188 | 189 | return {'nodes_df': nodes_df, 'edges_df': edges_df, 'url': url, 'res': res} 190 | 191 | 192 | def main_area(url, nodes, edges): 193 | 194 | logger.debug('rendering main area, with url: %s', url) 195 | GraphistrySt().render_url(url) 196 | 197 | 198 | ############################################ 199 | # 200 | # PIPELINE FLOW 201 | # 202 | ############################################ 203 | 204 | 205 | def run_all(): 206 | 207 | custom_css() 208 | 209 | try: 210 | 211 | # Render sidebar and get current settings 212 | sidebar_filters = sidebar_area() 213 | 214 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 215 | # Selective mark these as URL params as well 216 | filter_pipeline_result = run_filters(**sidebar_filters) 217 | 218 | # Render main viz area based on computed filter pipeline results and sidebar settings if data is returned 219 | if filter_pipeline_result['nodes_df'].size > 0: 220 | main_area(filter_pipeline_result['url'], 221 | filter_pipeline_result['nodes_df'], 222 | filter_pipeline_result['edges_df']) 223 | else: # render a message 224 | st.write("No data matching the specfiied criteria is found") 225 | 226 | except Exception as exn: 227 | st.write('Error loading dashboard') 228 | st.write(exn) 229 | -------------------------------------------------------------------------------- /src/python/views/demo_rapids_01_simple/__init__.py: -------------------------------------------------------------------------------- 1 | import graphistry, pandas as pd, streamlit as st 2 | from components import GraphistrySt, URLParam 3 | from css import all_css 4 | import logging 5 | 6 | ############################################ 7 | # 8 | # DASHBOARD SETTINGS 9 | # 10 | ############################################ 11 | # Controls how entrypoint.py picks it up 12 | 13 | 14 | app_id = 'app_rapids_01' 15 | logger = logging.getLogger(app_id) 16 | urlParams = URLParam(app_id) 17 | 18 | 19 | def info(): 20 | return { 21 | 'id': app_id, 22 | 'name': 'RAPIDS: RAPIDS (cudf)', 23 | 'tags': ['demo', 'demo_rapids'] 24 | } 25 | 26 | 27 | def run(): 28 | run_all() 29 | 30 | 31 | ############################################ 32 | # 33 | # PIPELINE PIECES 34 | # 35 | ############################################ 36 | 37 | 38 | # Have fun! 39 | def custom_css(): 40 | all_css() 41 | st.markdown( 42 | """""", unsafe_allow_html=True) 45 | 46 | 47 | # Given URL params, render left sidebar form and return combined filter settings 48 | # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets 49 | def sidebar_area(): 50 | 51 | # regular param (not in url) 52 | e = st.sidebar.number_input('Number of edges', min_value=10, max_value=100000, value=100, step=20) 53 | 54 | # deep-linkable param (in url) 55 | n_init = urlParams.get_field('N', 100) 56 | n = st.sidebar.number_input('Number of nodes', min_value=10, max_value=100000, value=n_init, step=20) 57 | urlParams.set_field('N', n) 58 | 59 | return {'num_nodes': n, 'num_edges': e} 60 | 61 | 62 | # Given filter settings, generate/cache/return dataframes & viz 63 | #@st.cache(suppress_st_warning=True, allow_output_mutation=True) 64 | @st.cache_data 65 | def run_filters(num_nodes, num_edges): 66 | 67 | try: 68 | import cudf 69 | except Exception as e: 70 | st.exception(RuntimeError('Failed importing cudf')) 71 | raise e 72 | 73 | nodes_df = cudf.DataFrame({ 'n': [x for x in range(0, num_nodes)] }) 74 | edges_df = cudf.DataFrame({ 75 | 's': [x % num_nodes for x in range(0, num_edges)], 76 | 'd': [(x + 1) % num_nodes for x in range(0, num_edges)], 77 | }) 78 | graph_url = graphistry.nodes(nodes_df).edges(edges_df) \ 79 | .bind(source='s', destination='d', node='n')\ 80 | .plot(render=False) 81 | return { 'nodes_df': nodes_df.to_pandas(), 'edges_df': edges_df.to_pandas(), 'graph_url': graph_url } 82 | 83 | 84 | def main_area(num_nodes, num_edges, nodes_df, edges_df, graph_url): 85 | logger.debug('rendering main area, with url: %s', graph_url) 86 | GraphistrySt().render_url(graph_url) 87 | st.header('Edges (RAPIDS GPU cudf DataFrame)') 88 | st.write(edges_df) 89 | 90 | 91 | ############################################ 92 | # 93 | # PIPELINE FLOW 94 | # 95 | ############################################ 96 | 97 | 98 | def run_all(): 99 | 100 | custom_css() 101 | 102 | try: 103 | 104 | # Render sidebar and get current settings 105 | sidebar_filters = sidebar_area() 106 | 107 | # Compute filter pipeline (with auto-caching based on filter setting inputs) 108 | # Selective mark these as URL params as well 109 | filter_pipeline_result = run_filters(**sidebar_filters) 110 | 111 | # Render main viz area based on computed filter pipeline results and sidebar settings 112 | main_area(**sidebar_filters, **filter_pipeline_result) 113 | 114 | except Exception as exn: 115 | st.write('Error loading dashboard') 116 | st.write(exn) 117 | -------------------------------------------------------------------------------- /src/streamlit/config.toml: -------------------------------------------------------------------------------- 1 | 2 | [browser] 3 | gatherUsageStats = false 4 | 5 | [theme] 6 | #base="dark" 7 | #primaryColor="#00ff98" 8 | #secondaryBackgroundColor="#182333" 9 | #textColor="#dae5fd" 10 | 11 | 12 | ### must be last: entrypoint appends baseUrlPath = \"$BASE_PATH\"\n\ 13 | [server] 14 | enableXsrfProtection = false 15 | enableCORS = false 16 | baseUrlPath = "$BASE_PATH" -------------------------------------------------------------------------------- /src/streamlit/credentials.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | email = "" --------------------------------------------------------------------------------