├── .gitignore
├── README.md
├── appwrite
├── README.md
├── code
│ ├── .env.example
│ └── docker-compose.yml
├── update.js
└── update.sh
├── dify
├── README.md
├── code
│ ├── .env.example
│ ├── README.md
│ ├── certbot
│ │ ├── README.md
│ │ ├── docker-entrypoint.sh
│ │ └── update-cert.template.txt
│ ├── couchbase-server
│ │ ├── Dockerfile
│ │ └── init-cbserver.sh
│ ├── docker-compose-template.yaml
│ ├── docker-compose.middleware.yaml
│ ├── docker-compose.png
│ ├── docker-compose.yaml
│ ├── elasticsearch
│ │ └── docker-entrypoint.sh
│ ├── generate_docker_compose
│ ├── middleware.env.example
│ ├── nginx
│ │ ├── conf.d
│ │ │ └── default.conf.template
│ │ ├── docker-entrypoint.sh
│ │ ├── https.conf.template
│ │ ├── nginx.conf.template
│ │ ├── proxy.conf.template
│ │ └── ssl
│ │ │ └── .gitkeep
│ ├── pgvector
│ │ └── docker-entrypoint.sh
│ ├── ssrf_proxy
│ │ ├── docker-entrypoint.sh
│ │ └── squid.conf.template
│ ├── startupscripts
│ │ ├── init.sh
│ │ └── init_user.script
│ ├── tidb
│ │ ├── config
│ │ │ ├── pd.toml
│ │ │ ├── tiflash-learner.toml
│ │ │ └── tiflash.toml
│ │ └── docker-compose.yaml
│ └── volumes
│ │ ├── myscale
│ │ └── config
│ │ │ └── users.d
│ │ │ └── custom_users_config.xml
│ │ ├── oceanbase
│ │ └── init.d
│ │ │ └── vec_memory.sql
│ │ ├── opensearch
│ │ └── opensearch_dashboards.yml
│ │ └── sandbox
│ │ ├── conf
│ │ ├── config.yaml
│ │ └── config.yaml.example
│ │ └── dependencies
│ │ └── python-requirements.txt
├── update.js
└── update.sh
├── package-lock.json
├── package.json
├── plane
├── README.md
├── code
│ ├── .env.example
│ ├── README.md
│ ├── build.yml
│ ├── docker-compose.yml
│ ├── images
│ │ ├── download.png
│ │ ├── migrate-error.png
│ │ ├── restart.png
│ │ ├── started.png
│ │ ├── stopped.png
│ │ └── upgrade.png
│ ├── install.sh
│ ├── migration-0.13-0.14.sh
│ ├── restore.sh
│ └── swarm.sh
├── update.js
└── update.sh
├── supabase
├── README.md
├── code
│ ├── .env.example
│ ├── .gitignore
│ ├── README.md
│ ├── dev
│ │ ├── data.sql
│ │ └── docker-compose.dev.yml
│ ├── docker-compose.s3.yml
│ ├── docker-compose.yml
│ ├── reset.sh
│ └── volumes
│ │ ├── api
│ │ └── kong.yml
│ │ ├── db
│ │ ├── _supabase.sql
│ │ ├── init
│ │ │ └── data.sql
│ │ ├── jwt.sql
│ │ ├── logs.sql
│ │ ├── pooler.sql
│ │ ├── realtime.sql
│ │ ├── roles.sql
│ │ └── webhooks.sql
│ │ ├── functions
│ │ ├── hello
│ │ │ └── index.ts
│ │ └── main
│ │ │ └── index.ts
│ │ ├── logs
│ │ └── vector.yml
│ │ └── pooler
│ │ └── pooler.exs
├── update.js
└── update.sh
├── twenty
├── README.md
├── code
│ ├── .env.example
│ ├── Makefile
│ ├── docker-compose.yml
│ ├── k8s
│ │ ├── README.md
│ │ ├── manifests
│ │ │ ├── deployment-db.yaml
│ │ │ ├── deployment-redis.yaml
│ │ │ ├── deployment-server.yaml
│ │ │ ├── deployment-worker.yaml
│ │ │ ├── ingress.yaml
│ │ │ ├── pv-db.yaml
│ │ │ ├── pv-docker-data.yaml
│ │ │ ├── pv-server.yaml
│ │ │ ├── pvc-db.yaml
│ │ │ ├── pvc-docker-data.yaml
│ │ │ ├── pvc-server.yaml
│ │ │ ├── service-db.yaml
│ │ │ ├── service-redis.yaml
│ │ │ └── service-server.yaml
│ │ └── terraform
│ │ │ ├── .terraform-docs.yml
│ │ │ ├── README.md
│ │ │ ├── deployment-db.tf
│ │ │ ├── deployment-redis.tf
│ │ │ ├── deployment-server.tf
│ │ │ ├── deployment-worker.tf
│ │ │ ├── ingress.tf
│ │ │ ├── main.tf
│ │ │ ├── namespace.tf
│ │ │ ├── pv-db.tf
│ │ │ ├── pv-docker-data.tf
│ │ │ ├── pv-server.tf
│ │ │ ├── pvc-db.tf
│ │ │ ├── pvc-docker-data.tf
│ │ │ ├── pvc-server.tf
│ │ │ ├── secret.tf
│ │ │ ├── service-db.tf
│ │ │ ├── service-redis.tf
│ │ │ ├── service-server.tf
│ │ │ └── variables.tf
│ ├── podman
│ │ ├── README.md
│ │ ├── install-systemd-user-service
│ │ ├── manual-steps-to-deploy-twenty-on-podman
│ │ ├── podman-compose.yml
│ │ └── twentycrm.service
│ ├── scripts
│ │ ├── 1-click.sh
│ │ └── install.sh
│ ├── twenty-postgres-spilo
│ │ └── Dockerfile
│ ├── twenty-website
│ │ └── Dockerfile
│ └── twenty
│ │ ├── Dockerfile
│ │ └── entrypoint.sh
├── update.js
└── update.sh
├── update.js
└── utils.js
/.gitignore:
--------------------------------------------------------------------------------
1 | repo
2 | node_modules
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Easypanel Compose
2 |
--------------------------------------------------------------------------------
/appwrite/README.md:
--------------------------------------------------------------------------------
1 | # Appwrite
2 |
3 | - copied from https://github.com/appwrite/appwrite
4 | - https://appwrite.io/install/compose
5 | - https://appwrite.io/install/env
6 | - removed `ports`
7 | - removed `container_name`
8 |
--------------------------------------------------------------------------------
/appwrite/code/.env.example:
--------------------------------------------------------------------------------
1 | _APP_ENV=production
2 | _APP_LOCALE=en
3 | _APP_OPTIONS_ABUSE=enabled
4 | _APP_OPTIONS_FORCE_HTTPS=disabled
5 | _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS=disabled
6 | _APP_OPTIONS_ROUTER_PROTECTION=disabled
7 | _APP_OPENSSL_KEY_V1=your-secret-key
8 | _APP_DOMAIN=localhost
9 | _APP_CUSTOM_DOMAIN_DENY_LIST=example.com,test.com,app.example.com
10 | _APP_DOMAIN_FUNCTIONS=functions.localhost
11 | _APP_DOMAIN_TARGET=localhost
12 | _APP_CONSOLE_WHITELIST_ROOT=enabled
13 | _APP_CONSOLE_WHITELIST_EMAILS=
14 | _APP_CONSOLE_WHITELIST_IPS=
15 | _APP_CONSOLE_HOSTNAMES=
16 | _APP_SYSTEM_EMAIL_NAME=Appwrite
17 | _APP_SYSTEM_EMAIL_ADDRESS=noreply@appwrite.io
18 | _APP_SYSTEM_TEAM_EMAIL=team@appwrite.io
19 | _APP_SYSTEM_RESPONSE_FORMAT=
20 | _APP_SYSTEM_SECURITY_EMAIL_ADDRESS=certs@appwrite.io
21 | _APP_EMAIL_SECURITY=
22 | _APP_EMAIL_CERTIFICATES=
23 | _APP_USAGE_STATS=enabled
24 | _APP_LOGGING_PROVIDER=
25 | _APP_LOGGING_CONFIG=
26 | _APP_USAGE_AGGREGATION_INTERVAL=30
27 | _APP_USAGE_TIMESERIES_INTERVAL=30
28 | _APP_USAGE_DATABASE_INTERVAL=900
29 | _APP_WORKER_PER_CORE=6
30 | _APP_CONSOLE_SESSION_ALERTS=disabled
31 | _APP_COMPRESSION_ENABLED=enabled
32 | _APP_COMPRESSION_MIN_SIZE_BYTES=1024
33 | _APP_REDIS_HOST=redis
34 | _APP_REDIS_PORT=6379
35 | _APP_REDIS_USER=
36 | _APP_REDIS_PASS=
37 | _APP_DB_HOST=mariadb
38 | _APP_DB_PORT=3306
39 | _APP_DB_SCHEMA=appwrite
40 | _APP_DB_USER=user
41 | _APP_DB_PASS=password
42 | _APP_DB_ROOT_PASS=rootsecretpassword
43 | _APP_INFLUXDB_HOST=influxdb
44 | _APP_INFLUXDB_PORT=8086
45 | _APP_STATSD_HOST=telegraf
46 | _APP_STATSD_PORT=8125
47 | _APP_SMTP_HOST=
48 | _APP_SMTP_PORT=
49 | _APP_SMTP_SECURE=
50 | _APP_SMTP_USERNAME=
51 | _APP_SMTP_PASSWORD=
52 | _APP_SMS_PROVIDER=
53 | _APP_SMS_FROM=
54 | _APP_STORAGE_LIMIT=30000000
55 | _APP_STORAGE_PREVIEW_LIMIT=20000000
56 | _APP_STORAGE_ANTIVIRUS=disabled
57 | _APP_STORAGE_ANTIVIRUS_HOST=clamav
58 | _APP_STORAGE_ANTIVIRUS_PORT=3310
59 | _APP_STORAGE_DEVICE=local
60 | _APP_STORAGE_S3_ACCESS_KEY=
61 | _APP_STORAGE_S3_SECRET=
62 | _APP_STORAGE_S3_REGION=us-east-1
63 | _APP_STORAGE_S3_BUCKET=
64 | _APP_STORAGE_S3_ENDPOINT=
65 | _APP_STORAGE_DO_SPACES_ACCESS_KEY=
66 | _APP_STORAGE_DO_SPACES_SECRET=
67 | _APP_STORAGE_DO_SPACES_REGION=us-east-1
68 | _APP_STORAGE_DO_SPACES_BUCKET=
69 | _APP_STORAGE_BACKBLAZE_ACCESS_KEY=
70 | _APP_STORAGE_BACKBLAZE_SECRET=
71 | _APP_STORAGE_BACKBLAZE_REGION=us-west-004
72 | _APP_STORAGE_BACKBLAZE_BUCKET=
73 | _APP_STORAGE_LINODE_ACCESS_KEY=
74 | _APP_STORAGE_LINODE_SECRET=
75 | _APP_STORAGE_LINODE_REGION=eu-central-1
76 | _APP_STORAGE_LINODE_BUCKET=
77 | _APP_STORAGE_WASABI_ACCESS_KEY=
78 | _APP_STORAGE_WASABI_SECRET=
79 | _APP_STORAGE_WASABI_REGION=eu-central-1
80 | _APP_STORAGE_WASABI_BUCKET=
81 | _APP_FUNCTIONS_SIZE_LIMIT=30000000
82 | _APP_FUNCTIONS_BUILD_SIZE_LIMIT=2000000000
83 | _APP_FUNCTIONS_TIMEOUT=900
84 | _APP_FUNCTIONS_BUILD_TIMEOUT=900
85 | _APP_FUNCTIONS_CONTAINERS=10
86 | _APP_FUNCTIONS_CPUS=0
87 | _APP_FUNCTIONS_MEMORY=0
88 | _APP_FUNCTIONS_MEMORY_SWAP=0
89 | _APP_FUNCTIONS_RUNTIMES=node-16.0,php-8.0,python-3.9,ruby-3.0
90 | _APP_EXECUTOR_SECRET=your-secret-key
91 | _APP_EXECUTOR_HOST=http://exc1/v1
92 | _APP_EXECUTOR_RUNTIME_NETWORK=appwrite_runtimes
93 | _APP_FUNCTIONS_ENVS=node-16.0,php-7.4,python-3.9,ruby-3.0
94 | _APP_FUNCTIONS_INACTIVE_THRESHOLD=60
95 | DOCKERHUB_PULL_USERNAME=
96 | DOCKERHUB_PULL_PASSWORD=
97 | DOCKERHUB_PULL_EMAIL=
98 | OPEN_RUNTIMES_NETWORK=appwrite_runtimes
99 | _APP_FUNCTIONS_RUNTIMES_NETWORK=runtimes
100 | _APP_DOCKER_HUB_USERNAME=
101 | _APP_DOCKER_HUB_PASSWORD=
102 | _APP_FUNCTIONS_MAINTENANCE_INTERVAL=3600
103 | _APP_VCS_GITHUB_APP_NAME=
104 | _APP_VCS_GITHUB_PRIVATE_KEY=
105 | _APP_VCS_GITHUB_APP_ID=
106 | _APP_VCS_GITHUB_CLIENT_ID=
107 | _APP_VCS_GITHUB_CLIENT_SECRET=
108 | _APP_VCS_GITHUB_WEBHOOK_SECRET=
109 | _APP_MAINTENANCE_INTERVAL=86400
110 | _APP_MAINTENANCE_DELAY=0
111 | _APP_MAINTENANCE_RETENTION_CACHE=2592000
112 | _APP_MAINTENANCE_RETENTION_EXECUTION=1209600
113 | _APP_MAINTENANCE_RETENTION_AUDIT=1209600
114 | _APP_MAINTENANCE_RETENTION_AUDIT_CONSOLE=15778800
115 | _APP_MAINTENANCE_RETENTION_ABUSE=86400
116 | _APP_MAINTENANCE_RETENTION_USAGE_HOURLY=8640000
117 | _APP_MAINTENANCE_RETENTION_SCHEDULES=86400
118 | _APP_GRAPHQL_MAX_BATCH_SIZE=10
119 | _APP_GRAPHQL_MAX_COMPLEXITY=250
120 | _APP_GRAPHQL_MAX_DEPTH=3
121 | _APP_MIGRATIONS_FIREBASE_CLIENT_ID=
122 | _APP_MIGRATIONS_FIREBASE_CLIENT_SECRET=
123 | _APP_ASSISTANT_OPENAI_API_KEY=
124 |
--------------------------------------------------------------------------------
/appwrite/update.js:
--------------------------------------------------------------------------------
1 | import utils from "../utils.js";
2 |
3 | await utils.downloadFile(
4 | "https://appwrite.io/install/compose",
5 | "./code/docker-compose.yml"
6 | );
7 | await utils.downloadFile(
8 | "https://appwrite.io/install/env",
9 | "./code/.env.example"
10 | );
11 |
12 | await utils.removeContainerNames("./code/docker-compose.yml");
13 | await utils.removePorts("./code/docker-compose.yml");
14 |
--------------------------------------------------------------------------------
/appwrite/update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d "./repo" ]; then
4 | git clone --depth 1 --branch main --single-branch https://github.com/appwrite/appwrite.git repo
5 | else
6 | cd repo
7 | git pull
8 | cd ..
9 | fi
10 |
11 | curl -s https://appwrite.io/install/compose > ./code/docker-compose.yml
12 | curl -s https://appwrite.io/install/env > ./code/.env.example
13 |
14 |
15 |
--------------------------------------------------------------------------------
/dify/README.md:
--------------------------------------------------------------------------------
1 | # Dify
2 |
3 | - copied from https://github.com/langgenius/dify
4 | - removed `container_name`
5 | - removed `ports`
6 |
--------------------------------------------------------------------------------
/dify/code/README.md:
--------------------------------------------------------------------------------
1 | ## README for docker Deployment
2 |
3 | Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
4 |
5 | ### What's Updated
6 |
7 | - **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.
8 | For more information, refer `docker/certbot/README.md`.
9 |
10 | - **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
11 |
12 | > What is `.env`?
13 | > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
14 |
15 | - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
16 | - **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
17 |
18 | ### How to Deploy Dify with `docker-compose.yaml`
19 |
20 | 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
21 | 2. **Environment Setup**:
22 | - Navigate to the `docker` directory.
23 | - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
24 | - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
25 | 3. **Running the Services**:
26 | - Execute `docker compose up` from the `docker` directory to start the services.
27 | - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
28 | 4. **SSL Certificate Setup**:
29 | - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot.
30 | 5. **OpenTelemetry Collector Setup**:
31 | - Change `ENABLE_OTEL` to `true` in `.env`.
32 | - Configure `OTLP_BASE_ENDPOINT` properly.
33 |
34 | ### How to Deploy Middleware for Developing Dify
35 |
36 | 1. **Middleware Setup**:
37 | - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
38 | - Navigate to the `docker` directory.
39 | - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
40 | 2. **Running Middleware Services**:
41 | - Navigate to the `docker` directory.
42 | - Execute `docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d` to start the middleware services. (Change the profile to other vector database if you are not using weaviate)
43 |
44 | ### Migration for Existing Users
45 |
46 | For users migrating from the `docker-legacy` setup:
47 |
48 | 1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
49 | 2. **Transfer Customizations**:
50 | - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
51 | 3. **Data Migration**:
52 | - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
53 |
54 | ### Overview of `.env`
55 |
56 | #### Key Modules and Customization
57 |
58 | - **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
59 | - **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
60 | - **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontend operate.
61 |
62 | #### Other notable variables
63 |
64 | The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
65 |
66 | 1. **Common Variables**:
67 | - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
68 | - `APP_WEB_URL`: Frontend application URL.
69 | - `FILES_URL`: Base URL for file downloads and previews.
70 |
71 | 2. **Server Configuration**:
72 | - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
73 | - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
74 |
75 | 3. **Database Configuration**:
76 | - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
77 |
78 | 4. **Redis Configuration**:
79 | - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
80 |
81 | 5. **Celery Configuration**:
82 | - `CELERY_BROKER_URL`: Configuration for Celery message broker.
83 |
84 | 6. **Storage Configuration**:
85 | - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
86 |
87 | 7. **Vector Database Configuration**:
88 | - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
89 | - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_URI`.
90 |
91 | 8. **CORS Configuration**:
92 | - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
93 |
94 | 9. **OpenTelemetry Configuration**:
95 | - `ENABLE_OTEL`: Enable OpenTelemetry collector in api.
96 | - `OTLP_BASE_ENDPOINT`: Endpoint for your OTLP exporter.
97 |
98 | 10. **Other Service-Specific Environment Variables**:
99 | - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
100 |
101 | ### Additional Information
102 |
103 | - **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
104 | - **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
105 |
106 | This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
107 |
--------------------------------------------------------------------------------
/dify/code/certbot/README.md:
--------------------------------------------------------------------------------
1 | # Launching new servers with SSL certificates
2 |
3 | ## Short description
4 |
5 | docker compose certbot configurations with Backward compatibility (without certbot container).
6 | Use `docker compose --profile certbot up` to use this features.
7 |
8 | ## The simplest way for launching new servers with SSL certificates
9 |
10 | 1. Get letsencrypt certs
11 | set `.env` values
12 | ```properties
13 | NGINX_SSL_CERT_FILENAME=fullchain.pem
14 | NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
15 | NGINX_ENABLE_CERTBOT_CHALLENGE=true
16 | CERTBOT_DOMAIN=your_domain.com
17 | CERTBOT_EMAIL=example@your_domain.com
18 | ```
19 | execute command:
20 | ```shell
21 | docker network prune
22 | docker compose --profile certbot up --force-recreate -d
23 | ```
24 | then after the containers launched:
25 | ```shell
26 | docker compose exec -it certbot /bin/sh /update-cert.sh
27 | ```
28 | 2. Edit `.env` file and `docker compose --profile certbot up` again.
29 | set `.env` value additionally
30 | ```properties
31 | NGINX_HTTPS_ENABLED=true
32 | ```
33 | execute command:
34 | ```shell
35 | docker compose --profile certbot up -d --no-deps --force-recreate nginx
36 | ```
37 | Then you can access your serve with HTTPS.
38 | [https://your_domain.com](https://your_domain.com)
39 |
40 | ## SSL certificates renewal
41 |
42 | For SSL certificates renewal, execute commands below:
43 |
44 | ```shell
45 | docker compose exec -it certbot /bin/sh /update-cert.sh
46 | docker compose exec nginx nginx -s reload
47 | ```
48 |
49 | ## Options for certbot
50 |
51 | `CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
52 |
53 | ```properties
54 | CERTBOT_OPTIONS=--dry-run
55 | ```
56 |
57 | To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
58 |
59 | ```shell
60 | docker compose --profile certbot up -d --no-deps --force-recreate certbot
61 | docker compose exec -it certbot /bin/sh /update-cert.sh
62 | ```
63 |
64 | Then, reload the nginx container if necessary.
65 |
66 | ```shell
67 | docker compose exec nginx nginx -s reload
68 | ```
69 |
70 | ## For legacy servers
71 |
72 | To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
73 |
74 | ```shell
75 | docker compose up -d
76 | ```
77 |
--------------------------------------------------------------------------------
/dify/code/certbot/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | printf '%s\n' "Docker entrypoint script is running"
5 |
6 | printf '%s\n' "\nChecking specific environment variables:"
7 | printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
8 | printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
9 | printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
10 |
11 | printf '%s\n' "\nChecking mounted directories:"
12 | for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
13 | if [ -d "$dir" ]; then
14 | printf '%s\n' "$dir exists. Contents:"
15 | ls -la "$dir"
16 | else
17 | printf '%s\n' "$dir does not exist."
18 | fi
19 | done
20 |
21 | printf '%s\n' "\nGenerating update-cert.sh from template"
22 | sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
23 | -e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
24 | -e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
25 | /update-cert.template.txt > /update-cert.sh
26 |
27 | chmod +x /update-cert.sh
28 |
29 | printf '%s\n' "\nExecuting command:" "$@"
30 | exec "$@"
31 |
--------------------------------------------------------------------------------
/dify/code/certbot/update-cert.template.txt:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | DOMAIN="${CERTBOT_DOMAIN}"
5 | EMAIL="${CERTBOT_EMAIL}"
6 | OPTIONS="${CERTBOT_OPTIONS}"
7 | CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
8 |
9 | # Check if the certificate already exists
10 | if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
11 | echo "Certificate exists. Attempting to renew..."
12 | certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
13 | else
14 | echo "Certificate does not exist. Obtaining a new certificate..."
15 | certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
16 | fi
17 | echo "Certificate operation successful"
18 | # Note: Nginx reload should be handled outside this container
19 | echo "Please ensure to reload Nginx to apply any certificate changes."
20 |
--------------------------------------------------------------------------------
/dify/code/couchbase-server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM couchbase/server:latest AS stage_base
2 | # FROM couchbase:latest AS stage_base
3 | COPY init-cbserver.sh /opt/couchbase/init/
4 | RUN chmod +x /opt/couchbase/init/init-cbserver.sh
5 |
--------------------------------------------------------------------------------
/dify/code/couchbase-server/init-cbserver.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
3 | # https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
4 |
5 | /entrypoint.sh couchbase-server &
6 |
7 | # track if setup is complete so we don't try to setup again
8 | FILE=/opt/couchbase/init/setupComplete.txt
9 |
10 | if ! [ -f "$FILE" ]; then
11 | # used to automatically create the cluster based on environment variables
12 | # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
13 |
14 | echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
15 |
16 | sleep 20s
17 | /opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
18 | --cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
19 | --cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
20 | --services data,index,query,fts \
21 | --cluster-ramsize $COUCHBASE_RAM_SIZE \
22 | --cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
23 | --cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
24 | --cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
25 | --index-storage-setting default
26 |
27 | sleep 2s
28 |
29 | # used to auto create the bucket based on environment variables
30 | # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
31 |
32 | /opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
33 | --username $COUCHBASE_ADMINISTRATOR_USERNAME \
34 | --password $COUCHBASE_ADMINISTRATOR_PASSWORD \
35 | --bucket $COUCHBASE_BUCKET \
36 | --bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
37 | --bucket-type couchbase
38 |
39 | # create file so we know that the cluster is setup and don't run the setup again
40 | touch $FILE
41 | fi
42 | # docker compose will stop the container from running unless we do this
43 | # known issue and workaround
44 | tail -f /dev/null
45 |
--------------------------------------------------------------------------------
/dify/code/docker-compose.middleware.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | # The postgres database.
3 | db:
4 | image: postgres:15-alpine
5 | restart: always
6 | env_file:
7 | - ./middleware.env
8 | environment:
9 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
10 | POSTGRES_DB: ${POSTGRES_DB:-dify}
11 | PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
12 | command: >
13 | postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
14 | -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
15 | -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
16 | -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
17 | -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
18 | volumes:
19 | - ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
20 | ports:
21 | - "${EXPOSE_POSTGRES_PORT:-5432}:5432"
22 | healthcheck:
23 | test: [ "CMD", "pg_isready" ]
24 | interval: 1s
25 | timeout: 3s
26 | retries: 30
27 |
28 | # The redis cache.
29 | redis:
30 | image: redis:6-alpine
31 | restart: always
32 | env_file:
33 | - ./middleware.env
34 | environment:
35 | REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
36 | volumes:
37 | # Mount the redis data directory to the container.
38 | - ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
39 | # Set the redis password when startup redis server.
40 | command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
41 | ports:
42 | - "${EXPOSE_REDIS_PORT:-6379}:6379"
43 | healthcheck:
44 | test: [ "CMD", "redis-cli", "ping" ]
45 |
46 | # The DifySandbox
47 | sandbox:
48 | image: langgenius/dify-sandbox:0.2.12
49 | restart: always
50 | env_file:
51 | - ./middleware.env
52 | environment:
53 | # The DifySandbox configurations
54 | # Make sure you are changing this key for your deployment with a strong key.
55 | # You can generate a strong key using `openssl rand -base64 42`.
56 | API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
57 | GIN_MODE: ${SANDBOX_GIN_MODE:-release}
58 | WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
59 | ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
60 | HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
61 | HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
62 | SANDBOX_PORT: ${SANDBOX_PORT:-8194}
63 | PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
64 | volumes:
65 | - ./volumes/sandbox/dependencies:/dependencies
66 | - ./volumes/sandbox/conf:/conf
67 | healthcheck:
68 | test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ]
69 | networks:
70 | - ssrf_proxy_network
71 |
72 | # plugin daemon
73 | plugin_daemon:
74 | image: langgenius/dify-plugin-daemon:0.0.10-local
75 | restart: always
76 | env_file:
77 | - ./middleware.env
78 | environment:
79 | # Use the shared environment variables.
80 | DB_HOST: ${DB_HOST:-db}
81 | DB_PORT: ${DB_PORT:-5432}
82 | DB_USERNAME: ${DB_USER:-postgres}
83 | DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
84 | DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
85 | REDIS_HOST: ${REDIS_HOST:-redis}
86 | REDIS_PORT: ${REDIS_PORT:-6379}
87 | REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
88 | SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
89 | SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
90 | MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
91 | PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
92 | DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://host.docker.internal:5001}
93 | DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
94 | PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
95 | PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
96 | PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
97 | FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
98 | PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
99 | PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
100 | PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
101 | PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
102 | PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
103 | PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
104 | PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
105 | PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
106 | PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
107 | S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
108 | S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
109 | S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
110 | AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
111 | AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
112 | AWS_REGION: ${PLUGIN_AWS_REGION:-}
113 | AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
114 | AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
115 | TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
116 | TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
117 | TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
118 | ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
119 | ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
120 | ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
121 | ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
122 | ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
123 | ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
124 | ports:
125 | - "${EXPOSE_PLUGIN_DAEMON_PORT:-5002}:${PLUGIN_DAEMON_PORT:-5002}"
126 | - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
127 | volumes:
128 | - ./volumes/plugin_daemon:/app/storage
129 |
130 | # ssrf_proxy server
131 | # for more information, please refer to
132 | # https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
133 | ssrf_proxy:
134 | image: ubuntu/squid:latest
135 | restart: always
136 | volumes:
137 | - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
138 | - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
139 | entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
140 | env_file:
141 | - ./middleware.env
142 | environment:
143 | # pls clearly modify the squid env vars to fit your network environment.
144 | HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
145 | COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
146 | REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
147 | SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
148 | SANDBOX_PORT: ${SANDBOX_PORT:-8194}
149 | ports:
150 | - "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
151 | - "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
152 | networks:
153 | - ssrf_proxy_network
154 | - default
155 |
156 | # The Weaviate vector store.
157 | weaviate:
158 | image: semitechnologies/weaviate:1.19.0
159 | profiles:
160 | - ""
161 | - weaviate
162 | restart: always
163 | volumes:
164 | # Mount the Weaviate data directory to the container.
165 | - ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
166 | env_file:
167 | - ./middleware.env
168 | environment:
169 | # The Weaviate configurations
170 | # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
171 | PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
172 | QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
173 | AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
174 | DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
175 | CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
176 | AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
177 | AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
178 | AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
179 | AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
180 | AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
181 | ports:
182 | - "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
183 |
184 | networks:
185 | # create a network between sandbox, api and ssrf_proxy, and can not access outside.
186 | ssrf_proxy_network:
187 | driver: bridge
188 | internal: true
189 |
--------------------------------------------------------------------------------
/dify/code/docker-compose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/dify/code/docker-compose.png
--------------------------------------------------------------------------------
/dify/code/elasticsearch/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
6 | # Check if the ICU tokenizer plugin is installed
7 | if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
8 | printf '%s\n' "Installing the ICU tokenizer plugin"
9 | if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
10 | printf '%s\n' "Failed to install the ICU tokenizer plugin"
11 | exit 1
12 | fi
13 | fi
14 | # Check if the Japanese language analyzer plugin is installed
15 | if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
16 | printf '%s\n' "Installing the Japanese language analyzer plugin"
17 | if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
18 | printf '%s\n' "Failed to install the Japanese language analyzer plugin"
19 | exit 1
20 | fi
21 | fi
22 | fi
23 |
24 | # Run the original entrypoint script
25 | exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
26 |
--------------------------------------------------------------------------------
/dify/code/generate_docker_compose:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import re
4 | import sys
5 |
6 |
7 | def parse_env_example(file_path):
8 | """
9 | Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
10 | """
11 | env_vars = {}
12 | with open(file_path, "r") as f:
13 | for line_number, line in enumerate(f, 1):
14 | line = line.strip()
15 | # Ignore empty lines and comments
16 | if not line or line.startswith("#"):
17 | continue
18 | # Use regex to parse KEY=VALUE
19 | match = re.match(r"^([^=]+)=(.*)$", line)
20 | if match:
21 | key = match.group(1).strip()
22 | value = match.group(2).strip()
23 | # Remove possible quotes around the value
24 | if (value.startswith('"') and value.endswith('"')) or (
25 | value.startswith("'") and value.endswith("'")
26 | ):
27 | value = value[1:-1]
28 | env_vars[key] = value
29 | else:
30 | print(f"Warning: Unable to parse line {line_number}: {line}")
31 | return env_vars
32 |
33 |
34 | def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
35 | """
36 | Generates a shared environment variables block as a YAML string.
37 | """
38 | lines = [f"x-shared-env: &{anchor_name}"]
39 | for key, default in env_vars.items():
40 | if key == "COMPOSE_PROFILES":
41 | continue
42 | # If default value is empty, use ${KEY:-}
43 | if default == "":
44 | lines.append(f" {key}: ${{{key}:-}}")
45 | else:
46 | # If default value contains special characters, wrap it in quotes
47 | if re.search(r"[:\s]", default):
48 | default = f"{default}"
49 | lines.append(f" {key}: ${{{key}:-{default}}}")
50 | return "\n".join(lines)
51 |
52 |
53 | def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
54 | """
55 | Inserts the shared environment variables block and header comments into the template file,
56 | removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
57 | """
58 | with open(template_path, "r") as f:
59 | template_content = f.read()
60 |
61 | # Remove existing x-shared-env: &shared-api-worker-env lines
62 | template_content = re.sub(
63 | r"^x-shared-env: &shared-api-worker-env\s*\n?",
64 | "",
65 | template_content,
66 | flags=re.MULTILINE,
67 | )
68 |
69 | # Prepare the final content with header comments and shared env block
70 | final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
71 |
72 | with open(output_path, "w") as f:
73 | f.write(final_content)
74 | print(f"Generated {output_path}")
75 |
76 |
77 | def main():
78 | env_example_path = ".env.example"
79 | template_path = "docker-compose-template.yaml"
80 | output_path = "docker-compose.yaml"
81 | anchor_name = "shared-api-worker-env" # Can be modified as needed
82 |
83 | # Define header comments to be added at the top of docker-compose.yaml
84 | header_comments = (
85 | "# ==================================================================\n"
86 | "# WARNING: This file is auto-generated by generate_docker_compose\n"
87 | "# Do not modify this file directly. Instead, update the .env.example\n"
88 | "# or docker-compose-template.yaml and regenerate this file.\n"
89 | "# ==================================================================\n"
90 | )
91 |
92 | # Check if required files exist
93 | for path in [env_example_path, template_path]:
94 | if not os.path.isfile(path):
95 | print(f"Error: File {path} does not exist.")
96 | sys.exit(1)
97 |
98 | # Parse .env.example file
99 | env_vars = parse_env_example(env_example_path)
100 |
101 | if not env_vars:
102 | print("Warning: No environment variables found in .env.example.")
103 |
104 | # Generate shared environment variables block
105 | shared_env_block = generate_shared_env_block(env_vars, anchor_name)
106 |
107 | # Insert shared environment variables block and header comments into the template
108 | insert_shared_env(template_path, output_path, shared_env_block, header_comments)
109 |
110 |
111 | if __name__ == "__main__":
112 | main()
113 |
--------------------------------------------------------------------------------
/dify/code/middleware.env.example:
--------------------------------------------------------------------------------
1 | # ------------------------------
2 | # Environment Variables for db Service
3 | # ------------------------------
4 | PGUSER=postgres
5 | # The password for the default postgres user.
6 | POSTGRES_PASSWORD=difyai123456
7 | # The name of the default postgres database.
8 | POSTGRES_DB=dify
9 | # postgres data directory
10 | PGDATA=/var/lib/postgresql/data/pgdata
11 | PGDATA_HOST_VOLUME=./volumes/db/data
12 |
13 | # Maximum number of connections to the database
14 | # Default is 100
15 | #
16 | # Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
17 | POSTGRES_MAX_CONNECTIONS=100
18 |
19 | # Sets the amount of shared memory used for postgres's shared buffers.
20 | # Default is 128MB
21 | # Recommended value: 25% of available memory
22 | # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
23 | POSTGRES_SHARED_BUFFERS=128MB
24 |
25 | # Sets the amount of memory used by each database worker for working space.
26 | # Default is 4MB
27 | #
28 | # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
29 | POSTGRES_WORK_MEM=4MB
30 |
31 | # Sets the amount of memory reserved for maintenance activities.
32 | # Default is 64MB
33 | #
34 | # Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
35 | POSTGRES_MAINTENANCE_WORK_MEM=64MB
36 |
37 | # Sets the planner's assumption about the effective cache size.
38 | # Default is 4096MB
39 | #
40 | # Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
41 | POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
42 |
43 | # -----------------------------
44 | # Environment Variables for redis Service
45 | # -----------------------------
46 | REDIS_HOST_VOLUME=./volumes/redis/data
47 | REDIS_PASSWORD=difyai123456
48 |
49 | # ------------------------------
50 | # Environment Variables for sandbox Service
51 | # ------------------------------
52 | SANDBOX_API_KEY=dify-sandbox
53 | SANDBOX_GIN_MODE=release
54 | SANDBOX_WORKER_TIMEOUT=15
55 | SANDBOX_ENABLE_NETWORK=true
56 | SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
57 | SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
58 | SANDBOX_PORT=8194
59 |
60 | # ------------------------------
61 | # Environment Variables for ssrf_proxy Service
62 | # ------------------------------
63 | SSRF_HTTP_PORT=3128
64 | SSRF_COREDUMP_DIR=/var/spool/squid
65 | SSRF_REVERSE_PROXY_PORT=8194
66 | SSRF_SANDBOX_HOST=sandbox
67 |
68 | # ------------------------------
69 | # Environment Variables for weaviate Service
70 | # ------------------------------
71 | WEAVIATE_QUERY_DEFAULTS_LIMIT=25
72 | WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
73 | WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
74 | WEAVIATE_CLUSTER_HOSTNAME=node1
75 | WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
76 | WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
77 | WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
78 | WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
79 | WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
80 | WEAVIATE_HOST_VOLUME=./volumes/weaviate
81 |
82 | # ------------------------------
83 | # Docker Compose Service Expose Host Port Configurations
84 | # ------------------------------
85 | EXPOSE_POSTGRES_PORT=5432
86 | EXPOSE_REDIS_PORT=6379
87 | EXPOSE_SANDBOX_PORT=8194
88 | EXPOSE_SSRF_PROXY_PORT=3128
89 | EXPOSE_WEAVIATE_PORT=8080
90 |
91 | # ------------------------------
92 | # Plugin Daemon Configuration
93 | # ------------------------------
94 |
95 | DB_PLUGIN_DATABASE=dify_plugin
96 | EXPOSE_PLUGIN_DAEMON_PORT=5002
97 | PLUGIN_DAEMON_PORT=5002
98 | PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
99 | PLUGIN_DAEMON_URL=http://host.docker.internal:5002
100 | PLUGIN_MAX_PACKAGE_SIZE=52428800
101 | PLUGIN_PPROF_ENABLED=false
102 | PLUGIN_WORKING_PATH=/app/storage/cwd
103 |
104 | ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
105 |
106 | PLUGIN_DEBUGGING_PORT=5003
107 | PLUGIN_DEBUGGING_HOST=0.0.0.0
108 | EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
109 | EXPOSE_PLUGIN_DEBUGGING_PORT=5003
110 |
111 | PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
112 | PLUGIN_DIFY_INNER_API_URL=http://api:5001
113 |
114 | MARKETPLACE_ENABLED=true
115 | MARKETPLACE_API_URL=https://marketplace.dify.ai
116 |
117 | FORCE_VERIFYING_SIGNATURE=true
118 |
119 | PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
120 | PLUGIN_MAX_EXECUTION_TIMEOUT=600
121 | # PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
122 | PIP_MIRROR_URL=
123 |
124 | # https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example
125 | # Plugin storage type, local aws_s3 tencent_cos azure_blob
126 | PLUGIN_STORAGE_TYPE=local
127 | PLUGIN_STORAGE_LOCAL_ROOT=/app/storage
128 | PLUGIN_WORKING_PATH=/app/storage/cwd
129 | PLUGIN_INSTALLED_PATH=plugin
130 | PLUGIN_PACKAGE_CACHE_PATH=plugin_packages
131 | PLUGIN_MEDIA_CACHE_PATH=assets
132 | # Plugin oss bucket
133 | PLUGIN_STORAGE_OSS_BUCKET=
134 | # Plugin oss s3 credentials
135 | PLUGIN_S3_USE_AWS_MANAGED_IAM=false
136 | PLUGIN_S3_ENDPOINT=
137 | PLUGIN_S3_USE_PATH_STYLE=false
138 | PLUGIN_AWS_ACCESS_KEY=
139 | PLUGIN_AWS_SECRET_KEY=
140 | PLUGIN_AWS_REGION=
141 | # Plugin oss azure blob
142 | PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME=
143 | PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING=
144 | # Plugin oss tencent cos
145 | PLUGIN_TENCENT_COS_SECRET_KEY=
146 | PLUGIN_TENCENT_COS_SECRET_ID=
147 | PLUGIN_TENCENT_COS_REGION=
148 | # Plugin oss aliyun oss
149 | PLUGIN_ALIYUN_OSS_REGION=
150 | PLUGIN_ALIYUN_OSS_ENDPOINT=
151 | PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID=
152 | PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET=
153 | PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4
154 | PLUGIN_ALIYUN_OSS_PATH=
155 |
--------------------------------------------------------------------------------
/dify/code/nginx/conf.d/default.conf.template:
--------------------------------------------------------------------------------
1 | # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2 |
3 | server {
4 | listen ${NGINX_PORT};
5 | server_name ${NGINX_SERVER_NAME};
6 |
7 | location /console/api {
8 | proxy_pass http://api:5001;
9 | include proxy.conf;
10 | }
11 |
12 | location /api {
13 | proxy_pass http://api:5001;
14 | include proxy.conf;
15 | }
16 |
17 | location /v1 {
18 | proxy_pass http://api:5001;
19 | include proxy.conf;
20 | }
21 |
22 | location /files {
23 | proxy_pass http://api:5001;
24 | include proxy.conf;
25 | }
26 |
27 | location /explore {
28 | proxy_pass http://web:3000;
29 | include proxy.conf;
30 | }
31 |
32 | location /e/ {
33 | proxy_pass http://plugin_daemon:5002;
34 | proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
35 | include proxy.conf;
36 | }
37 |
38 | location / {
39 | proxy_pass http://web:3000;
40 | include proxy.conf;
41 | }
42 |
43 | # placeholder for acme challenge location
44 | ${ACME_CHALLENGE_LOCATION}
45 |
46 | # placeholder for https config defined in https.conf.template
47 | ${HTTPS_CONFIG}
48 | }
49 |
--------------------------------------------------------------------------------
/dify/code/nginx/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | HTTPS_CONFIG=''
4 |
5 | if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
6 | # Check if the certificate and key files for the specified domain exist
7 | if [ -n "${CERTBOT_DOMAIN}" ] && \
8 | [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
9 | [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
10 | SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
11 | SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
12 | else
13 | SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
14 | SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
15 | fi
16 | export SSL_CERTIFICATE_PATH
17 | export SSL_CERTIFICATE_KEY_PATH
18 |
19 | # set the HTTPS_CONFIG environment variable to the content of the https.conf.template
20 | HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
21 | export HTTPS_CONFIG
22 | # Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
23 | envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
24 | fi
25 | export HTTPS_CONFIG
26 |
27 | if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
28 | ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
29 | else
30 | ACME_CHALLENGE_LOCATION=''
31 | fi
32 | export ACME_CHALLENGE_LOCATION
33 |
34 | env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
35 |
36 | envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
37 | envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
38 |
39 | envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
40 |
41 | # Start Nginx using the default entrypoint
42 | exec nginx -g 'daemon off;'
43 |
--------------------------------------------------------------------------------
/dify/code/nginx/https.conf.template:
--------------------------------------------------------------------------------
1 | # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2 |
3 | listen ${NGINX_SSL_PORT} ssl;
4 | ssl_certificate ${SSL_CERTIFICATE_PATH};
5 | ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
6 | ssl_protocols ${NGINX_SSL_PROTOCOLS};
7 | ssl_prefer_server_ciphers on;
8 | ssl_session_cache shared:SSL:10m;
9 | ssl_session_timeout 10m;
10 |
--------------------------------------------------------------------------------
/dify/code/nginx/nginx.conf.template:
--------------------------------------------------------------------------------
1 | # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2 |
3 | user nginx;
4 | worker_processes ${NGINX_WORKER_PROCESSES};
5 |
6 | error_log /var/log/nginx/error.log notice;
7 | pid /var/run/nginx.pid;
8 |
9 |
10 | events {
11 | worker_connections 1024;
12 | }
13 |
14 |
15 | http {
16 | include /etc/nginx/mime.types;
17 | default_type application/octet-stream;
18 |
19 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20 | '$status $body_bytes_sent "$http_referer" '
21 | '"$http_user_agent" "$http_x_forwarded_for"';
22 |
23 | access_log /var/log/nginx/access.log main;
24 |
25 | sendfile on;
26 | #tcp_nopush on;
27 |
28 | keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
29 |
30 | #gzip on;
31 | client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
32 |
33 | include /etc/nginx/conf.d/*.conf;
34 | }
35 |
--------------------------------------------------------------------------------
/dify/code/nginx/proxy.conf.template:
--------------------------------------------------------------------------------
1 | # Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
2 |
3 | proxy_set_header Host $host;
4 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
5 | proxy_set_header X-Forwarded-Proto $scheme;
6 | proxy_set_header X-Forwarded-Port $server_port;
7 | proxy_http_version 1.1;
8 | proxy_set_header Connection "";
9 | proxy_buffering off;
10 | proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
11 | proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
12 |
--------------------------------------------------------------------------------
/dify/code/nginx/ssl/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/dify/code/nginx/ssl/.gitkeep
--------------------------------------------------------------------------------
/dify/code/pgvector/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PG_MAJOR=16
4 |
5 | if [ "${PG_BIGM}" = "true" ]; then
6 | # install pg_bigm
7 | apt-get update
8 | apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR}
9 |
10 | curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz
11 | tar xf v${PG_BIGM_VERSION}.tar.gz
12 | cd pg_bigm-${PG_BIGM_VERSION} || exit 1
13 | make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config
14 | make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install
15 |
16 | cd - || exit 1
17 | rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION}
18 |
19 | # enable pg_bigm
20 | sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf
21 | fi
22 |
23 | # Run the original entrypoint script
24 | exec /usr/local/bin/docker-entrypoint.sh postgres
25 |
--------------------------------------------------------------------------------
/dify/code/ssrf_proxy/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Modified based on Squid OCI image entrypoint
4 |
5 | # This entrypoint aims to forward the squid logs to stdout to assist users of
6 | # common container related tooling (e.g., kubernetes, docker-compose, etc) to
7 | # access the service logs.
8 |
9 | # Moreover, it invokes the squid binary, leaving all the desired parameters to
10 | # be provided by the "command" passed to the spawned container. If no command
11 | # is provided by the user, the default behavior (as per the CMD statement in
12 | # the Dockerfile) will be to use Ubuntu's default configuration [1] and run
13 | # squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
14 | # systemd unit.
15 |
16 | # [1] The default configuration is changed in the Dockerfile to allow local
17 | # network connections. See the Dockerfile for further information.
18 |
19 | echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
20 | if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
21 | /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
22 | fi
23 |
24 | tail -F /var/log/squid/access.log 2>/dev/null &
25 | tail -F /var/log/squid/error.log 2>/dev/null &
26 | tail -F /var/log/squid/store.log 2>/dev/null &
27 | tail -F /var/log/squid/cache.log 2>/dev/null &
28 |
29 | # Replace environment variables in the template and output to the squid.conf
30 | echo "[ENTRYPOINT] replacing environment variables in the template"
31 | awk '{
32 | while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
33 | var = substr($0, RSTART+2, RLENGTH-3)
34 | val = ENVIRON[var]
35 | $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
36 | }
37 | print
38 | }' /etc/squid/squid.conf.template > /etc/squid/squid.conf
39 |
40 | /usr/sbin/squid -Nz
41 | echo "[ENTRYPOINT] starting squid"
42 | /usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
43 |
--------------------------------------------------------------------------------
/dify/code/ssrf_proxy/squid.conf.template:
--------------------------------------------------------------------------------
1 | acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
2 | acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
3 | acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
4 | acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
5 | acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
6 | acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
7 | acl localnet src fc00::/7 # RFC 4193 local private network range
8 | acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
9 | acl SSL_ports port 443
10 | # acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
11 | acl Safe_ports port 80 # http
12 | acl Safe_ports port 21 # ftp
13 | acl Safe_ports port 443 # https
14 | acl Safe_ports port 70 # gopher
15 | acl Safe_ports port 210 # wais
16 | acl Safe_ports port 1025-65535 # unregistered ports
17 | acl Safe_ports port 280 # http-mgmt
18 | acl Safe_ports port 488 # gss-http
19 | acl Safe_ports port 591 # filemaker
20 | acl Safe_ports port 777 # multiling http
21 | acl CONNECT method CONNECT
22 | acl allowed_domains dstdomain .marketplace.dify.ai
23 | http_access allow allowed_domains
24 | http_access deny !Safe_ports
25 | http_access deny CONNECT !SSL_ports
26 | http_access allow localhost manager
27 | http_access deny manager
28 | http_access allow localhost
29 | include /etc/squid/conf.d/*.conf
30 | http_access deny all
31 |
32 | ################################## Proxy Server ################################
33 | http_port ${HTTP_PORT}
34 | coredump_dir ${COREDUMP_DIR}
35 | refresh_pattern ^ftp: 1440 20% 10080
36 | refresh_pattern ^gopher: 1440 0% 1440
37 | refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
38 | refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
39 | refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
40 | refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
41 | refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
42 | refresh_pattern . 0 20% 4320
43 |
44 |
45 | # cache_dir ufs /var/spool/squid 100 16 256
46 | # upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
47 | # cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
48 |
49 | ################################## Reverse Proxy To Sandbox ################################
50 | http_port ${REVERSE_PROXY_PORT} accel vhost
51 | cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
52 | acl src_all src all
53 | http_access allow src_all
54 |
55 | # Unless the option's size is increased, an error will occur when uploading more than two files.
56 | client_request_buffer_max_size 100 MB
57 |
--------------------------------------------------------------------------------
/dify/code/startupscripts/init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DB_INITIALIZED="/opt/oracle/oradata/dbinit"
4 | #[ -f ${DB_INITIALIZED} ] && exit
5 | #touch ${DB_INITIALIZED}
6 | if [ -f ${DB_INITIALIZED} ]; then
7 | echo 'File exists. Standards for have been Init'
8 | exit
9 | else
10 | echo 'File does not exist. Standards for first time Start up this DB'
11 | "$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
12 | touch ${DB_INITIALIZED}
13 | fi
14 |
--------------------------------------------------------------------------------
/dify/code/startupscripts/init_user.script:
--------------------------------------------------------------------------------
1 | show pdbs;
2 | ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
3 | alter session set container= freepdb1;
4 | create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
5 | grant DB_DEVELOPER_ROLE to dify;
6 |
7 | BEGIN
8 | CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER');
9 | END;
10 | /
11 |
--------------------------------------------------------------------------------
/dify/code/tidb/config/pd.toml:
--------------------------------------------------------------------------------
1 | # PD Configuration File reference:
2 | # https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
3 | [replication]
4 | max-replicas = 1
5 |
--------------------------------------------------------------------------------
/dify/code/tidb/config/tiflash-learner.toml:
--------------------------------------------------------------------------------
1 | # TiFlash tiflash-learner.toml Configuration File reference:
2 | # https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
3 |
4 | log-file = "/logs/tiflash_tikv.log"
5 |
6 | [server]
7 | engine-addr = "tiflash:4030"
8 | addr = "0.0.0.0:20280"
9 | advertise-addr = "tiflash:20280"
10 | status-addr = "tiflash:20292"
11 |
12 | [storage]
13 | data-dir = "/data/flash"
14 |
--------------------------------------------------------------------------------
/dify/code/tidb/config/tiflash.toml:
--------------------------------------------------------------------------------
1 | # TiFlash tiflash.toml Configuration File reference:
2 | # https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
3 |
4 | listen_host = "0.0.0.0"
5 | path = "/data"
6 |
7 | [flash]
8 | tidb_status_addr = "tidb:10080"
9 | service_addr = "tiflash:4030"
10 |
11 | [flash.proxy]
12 | config = "/tiflash-learner.toml"
13 |
14 | [logger]
15 | errorlog = "/logs/tiflash_error.log"
16 | log = "/logs/tiflash.log"
17 |
18 | [raft]
19 | pd_addr = "pd0:2379"
20 |
--------------------------------------------------------------------------------
/dify/code/tidb/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | pd0:
3 | image: pingcap/pd:v8.5.1
4 | # ports:
5 | # - "2379"
6 | volumes:
7 | - ./config/pd.toml:/pd.toml:ro
8 | - ./volumes/data:/data
9 | - ./volumes/logs:/logs
10 | command:
11 | - --name=pd0
12 | - --client-urls=http://0.0.0.0:2379
13 | - --peer-urls=http://0.0.0.0:2380
14 | - --advertise-client-urls=http://pd0:2379
15 | - --advertise-peer-urls=http://pd0:2380
16 | - --initial-cluster=pd0=http://pd0:2380
17 | - --data-dir=/data/pd
18 | - --config=/pd.toml
19 | - --log-file=/logs/pd.log
20 | restart: on-failure
21 | tikv:
22 | image: pingcap/tikv:v8.5.1
23 | volumes:
24 | - ./volumes/data:/data
25 | - ./volumes/logs:/logs
26 | command:
27 | - --addr=0.0.0.0:20160
28 | - --advertise-addr=tikv:20160
29 | - --status-addr=tikv:20180
30 | - --data-dir=/data/tikv
31 | - --pd=pd0:2379
32 | - --log-file=/logs/tikv.log
33 | depends_on:
34 | - "pd0"
35 | restart: on-failure
36 | tidb:
37 | image: pingcap/tidb:v8.5.1
38 | # ports:
39 | # - "4000:4000"
40 | volumes:
41 | - ./volumes/logs:/logs
42 | command:
43 | - --advertise-address=tidb
44 | - --store=tikv
45 | - --path=pd0:2379
46 | - --log-file=/logs/tidb.log
47 | depends_on:
48 | - "tikv"
49 | restart: on-failure
50 | tiflash:
51 | image: pingcap/tiflash:v8.5.1
52 | volumes:
53 | - ./config/tiflash.toml:/tiflash.toml:ro
54 | - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
55 | - ./volumes/data:/data
56 | - ./volumes/logs:/logs
57 | command:
58 | - --config=/tiflash.toml
59 | depends_on:
60 | - "tikv"
61 | - "tidb"
62 | restart: on-failure
63 |
--------------------------------------------------------------------------------
/dify/code/volumes/myscale/config/users.d/custom_users_config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | ::1
7 | 127.0.0.1
8 | 10.0.0.0/8
9 | 172.16.0.0/12
10 | 192.168.0.0/16
11 |
12 | default
13 | default
14 | 1
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/dify/code/volumes/oceanbase/init.d/vec_memory.sql:
--------------------------------------------------------------------------------
1 | ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
2 |
--------------------------------------------------------------------------------
/dify/code/volumes/opensearch/opensearch_dashboards.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Copyright OpenSearch Contributors
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # Description:
6 | # Default configuration for OpenSearch Dashboards
7 |
8 | # OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
9 | # server.port: 5601
10 |
11 | # Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
12 | # The default is 'localhost', which usually means remote machines will not be able to connect.
13 | # To allow connections from remote users, set this parameter to a non-loopback address.
14 | # server.host: "localhost"
15 |
16 | # Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
17 | # Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
18 | # from requests it receives, and to prevent a deprecation warning at startup.
19 | # This setting cannot end in a slash.
20 | # server.basePath: ""
21 |
22 | # Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
23 | # `server.basePath` or require that they are rewritten by your reverse proxy.
24 | # server.rewriteBasePath: false
25 |
26 | # The maximum payload size in bytes for incoming server requests.
27 | # server.maxPayloadBytes: 1048576
28 |
29 | # The OpenSearch Dashboards server's name. This is used for display purposes.
30 | # server.name: "your-hostname"
31 |
32 | # The URLs of the OpenSearch instances to use for all your queries.
33 | # opensearch.hosts: ["http://localhost:9200"]
34 |
35 | # OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
36 | # dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
37 | # opensearchDashboards.index: ".opensearch_dashboards"
38 |
39 | # The default application to load.
40 | # opensearchDashboards.defaultAppId: "home"
41 |
42 | # Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
43 | # This settings should be used for large clusters or for clusters with ingest heavy nodes.
44 | # It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
45 | #
46 | # It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
47 | # This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
48 | # e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
49 | # Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
50 | # opensearch.optimizedHealthcheckId: "cluster_id"
51 |
52 | # If your OpenSearch is protected with basic authentication, these settings provide
53 | # the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
54 | # index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
55 | # is proxied through the OpenSearch Dashboards server.
56 | # opensearch.username: "opensearch_dashboards_system"
57 | # opensearch.password: "pass"
58 |
59 | # Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
60 | # These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
61 | # server.ssl.enabled: false
62 | # server.ssl.certificate: /path/to/your/server.crt
63 | # server.ssl.key: /path/to/your/server.key
64 |
65 | # Optional settings that provide the paths to the PEM-format SSL certificate and key files.
66 | # These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
67 | # xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
68 | # opensearch.ssl.certificate: /path/to/your/client.crt
69 | # opensearch.ssl.key: /path/to/your/client.key
70 |
71 | # Optional setting that enables you to specify a path to the PEM file for the certificate
72 | # authority for your OpenSearch instance.
73 | # opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
74 |
75 | # To disregard the validity of SSL certificates, change this setting's value to 'none'.
76 | # opensearch.ssl.verificationMode: full
77 |
78 | # Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
79 | # the opensearch.requestTimeout setting.
80 | # opensearch.pingTimeout: 1500
81 |
82 | # Time in milliseconds to wait for responses from the back end or OpenSearch. This value
83 | # must be a positive integer.
84 | # opensearch.requestTimeout: 30000
85 |
86 | # List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
87 | # headers, set this value to [] (an empty list).
88 | # opensearch.requestHeadersWhitelist: [ authorization ]
89 |
90 | # Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
91 | # by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
92 | # opensearch.customHeaders: {}
93 |
94 | # Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
95 | # opensearch.shardTimeout: 30000
96 |
97 | # Logs queries sent to OpenSearch. Requires logging.verbose set to true.
98 | # opensearch.logQueries: false
99 |
100 | # Specifies the path where OpenSearch Dashboards creates the process ID file.
101 | # pid.file: /var/run/opensearchDashboards.pid
102 |
103 | # Enables you to specify a file where OpenSearch Dashboards stores log output.
104 | # logging.dest: stdout
105 |
106 | # Set the value of this setting to true to suppress all logging output.
107 | # logging.silent: false
108 |
109 | # Set the value of this setting to true to suppress all logging output other than error messages.
110 | # logging.quiet: false
111 |
112 | # Set the value of this setting to true to log all events, including system usage information
113 | # and all requests.
114 | # logging.verbose: false
115 |
116 | # Set the interval in milliseconds to sample system and process performance
117 | # metrics. Minimum is 100ms. Defaults to 5000.
118 | # ops.interval: 5000
119 |
120 | # Specifies locale to be used for all localizable strings, dates and number formats.
121 | # Supported languages are the following: English - en , by default , Chinese - zh-CN .
122 | # i18n.locale: "en"
123 |
124 | # Set the allowlist to check input graphite Url. Allowlist is the default check list.
125 | # vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
126 |
127 | # Set the blocklist to check input graphite Url. Blocklist is an IP list.
128 | # Below is an example for reference
129 | # vis_type_timeline.graphiteBlockedIPs: [
130 | # //Loopback
131 | # '127.0.0.0/8',
132 | # '::1/128',
133 | # //Link-local Address for IPv6
134 | # 'fe80::/10',
135 | # //Private IP address for IPv4
136 | # '10.0.0.0/8',
137 | # '172.16.0.0/12',
138 | # '192.168.0.0/16',
139 | # //Unique local address (ULA)
140 | # 'fc00::/7',
141 | # //Reserved IP address
142 | # '0.0.0.0/8',
143 | # '100.64.0.0/10',
144 | # '192.0.0.0/24',
145 | # '192.0.2.0/24',
146 | # '198.18.0.0/15',
147 | # '192.88.99.0/24',
148 | # '198.51.100.0/24',
149 | # '203.0.113.0/24',
150 | # '224.0.0.0/4',
151 | # '240.0.0.0/4',
152 | # '255.255.255.255/32',
153 | # '::/128',
154 | # '2001:db8::/32',
155 | # 'ff00::/8',
156 | # ]
157 | # vis_type_timeline.graphiteBlockedIPs: []
158 |
159 | # opensearchDashboards.branding:
160 | # logo:
161 | # defaultUrl: ""
162 | # darkModeUrl: ""
163 | # mark:
164 | # defaultUrl: ""
165 | # darkModeUrl: ""
166 | # loadingLogo:
167 | # defaultUrl: ""
168 | # darkModeUrl: ""
169 | # faviconUrl: ""
170 | # applicationTitle: ""
171 |
172 | # Set the value of this setting to true to capture region blocked warnings and errors
173 | # for your map rendering services.
174 | # map.showRegionBlockedWarning: false%
175 |
176 | # Set the value of this setting to false to suppress search usage telemetry
177 | # for reducing the load of OpenSearch cluster.
178 | # data.search.usageTelemetry.enabled: false
179 |
180 | # 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
181 | # Set the value of this setting to false to disable VisBuilder
182 | # functionality in Visualization.
183 | # vis_builder.enabled: false
184 |
185 | # 2.4 New Experimental Feature
186 | # Set the value of this setting to true to enable the experimental multiple data source
187 | # support feature. Use with caution.
188 | # data_source.enabled: false
189 | # Set the value of these settings to customize crypto materials to encryption saved credentials
190 | # in data sources.
191 | # data_source.encryption.wrappingKeyName: 'changeme'
192 | # data_source.encryption.wrappingKeyNamespace: 'changeme'
193 | # data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
194 |
195 | # 2.6 New ML Commons Dashboards Feature
196 | # Set the value of this setting to true to enable the ml commons dashboards
197 | # ml_commons_dashboards.enabled: false
198 |
199 | # 2.12 New experimental Assistant Dashboards Feature
200 | # Set the value of this setting to true to enable the assistant dashboards
201 | # assistant.chat.enabled: false
202 |
203 | # 2.13 New Query Assistant Feature
204 | # Set the value of this setting to false to disable the query assistant
205 | # observability.query_assist.enabled: false
206 |
207 | # 2.14 Enable Ui Metric Collectors in Usage Collector
208 | # Set the value of this setting to true to enable UI Metric collections
209 | # usageCollection.uiMetric.enabled: false
210 |
211 | opensearch.hosts: [https://localhost:9200]
212 | opensearch.ssl.verificationMode: none
213 | opensearch.username: admin
214 | opensearch.password: 'Qazwsxedc!@#123'
215 | opensearch.requestHeadersWhitelist: [authorization, securitytenant]
216 |
217 | opensearch_security.multitenancy.enabled: true
218 | opensearch_security.multitenancy.tenants.preferred: [Private, Global]
219 | opensearch_security.readonly_mode.roles: [kibana_read_only]
220 | # Use this setting if you are running opensearch-dashboards without https
221 | opensearch_security.cookie.secure: false
222 | server.host: '0.0.0.0'
223 |
--------------------------------------------------------------------------------
/dify/code/volumes/sandbox/conf/config.yaml:
--------------------------------------------------------------------------------
1 | app:
2 | port: 8194
3 | debug: True
4 | key: dify-sandbox
5 | max_workers: 4
6 | max_requests: 50
7 | worker_timeout: 5
8 | python_path: /usr/local/bin/python3
9 | enable_network: True # please make sure there is no network risk in your environment
10 | allowed_syscalls: # please leave it empty if you have no idea how seccomp works
11 | proxy:
12 | socks5: ''
13 | http: ''
14 | https: ''
15 |
--------------------------------------------------------------------------------
/dify/code/volumes/sandbox/conf/config.yaml.example:
--------------------------------------------------------------------------------
1 | app:
2 | port: 8194
3 | debug: True
4 | key: dify-sandbox
5 | max_workers: 4
6 | max_requests: 50
7 | worker_timeout: 5
8 | python_path: /usr/local/bin/python3
9 | python_lib_path:
10 | - /usr/local/lib/python3.10
11 | - /usr/lib/python3.10
12 | - /usr/lib/python3
13 | - /usr/lib/x86_64-linux-gnu
14 | - /etc/ssl/certs/ca-certificates.crt
15 | - /etc/nsswitch.conf
16 | - /etc/hosts
17 | - /etc/resolv.conf
18 | - /run/systemd/resolve/stub-resolv.conf
19 | - /run/resolvconf/resolv.conf
20 | - /etc/localtime
21 | - /usr/share/zoneinfo
22 | - /etc/timezone
23 | # add more paths if needed
24 | python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple
25 | nodejs_path: /usr/local/bin/node
26 | enable_network: True
27 | allowed_syscalls:
28 | - 1
29 | - 2
30 | - 3
31 | # add all the syscalls which you require
32 | proxy:
33 | socks5: ''
34 | http: ''
35 | https: ''
36 |
--------------------------------------------------------------------------------
/dify/code/volumes/sandbox/dependencies/python-requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/dify/code/volumes/sandbox/dependencies/python-requirements.txt
--------------------------------------------------------------------------------
/dify/update.js:
--------------------------------------------------------------------------------
1 | import utils from "../utils.js";
2 |
3 | await utils.cloneOrPullRepo({ repo: "https://github.com/langgenius/dify.git" });
4 | await utils.copyDir("./repo/docker", "./code");
5 | await utils.removeContainerNames("./code/docker-compose.yaml");
6 | await utils.removePorts("./code/docker-compose.yaml");
7 |
--------------------------------------------------------------------------------
/dify/update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d "./repo" ]; then
4 | git clone --depth 1 --branch main --single-branch https://github.com/langgenius/dify.git repo
5 | else
6 | cd repo
7 | git pull
8 | cd ..
9 | fi
10 |
11 | cp -r ./repo/docker/. ./code
12 |
13 |
14 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "module",
3 | "scripts": {
4 | "update": "node update.js"
5 | },
6 | "dependencies": {
7 | "execa": "^9.5.1",
8 | "glob": "^11.0.0",
9 | "yaml": "^2.6.1"
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/plane/README.md:
--------------------------------------------------------------------------------
1 | # Plane
2 |
3 | - copied from https://github.com/makeplane/plane
4 | - removed `ports`
5 |
--------------------------------------------------------------------------------
/plane/code/.env.example:
--------------------------------------------------------------------------------
1 | APP_DOMAIN=localhost
2 | APP_RELEASE=stable
3 |
4 | WEB_REPLICAS=1
5 | SPACE_REPLICAS=1
6 | ADMIN_REPLICAS=1
7 | API_REPLICAS=1
8 | WORKER_REPLICAS=1
9 | BEAT_WORKER_REPLICAS=1
10 | LIVE_REPLICAS=1
11 |
12 | NGINX_PORT=80
13 | WEB_URL=http://${APP_DOMAIN}
14 | DEBUG=0
15 | CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN}
16 | API_BASE_URL=http://api:8000
17 |
18 | #DB SETTINGS
19 | PGHOST=plane-db
20 | PGDATABASE=plane
21 | POSTGRES_USER=plane
22 | POSTGRES_PASSWORD=plane
23 | POSTGRES_DB=plane
24 | POSTGRES_PORT=5432
25 | PGDATA=/var/lib/postgresql/data
26 | DATABASE_URL=
27 |
28 | # REDIS SETTINGS
29 | REDIS_HOST=plane-redis
30 | REDIS_PORT=6379
31 | REDIS_URL=
32 |
33 | # RabbitMQ Settings
34 | RABBITMQ_HOST=plane-mq
35 | RABBITMQ_PORT=5672
36 | RABBITMQ_USER=plane
37 | RABBITMQ_PASSWORD=plane
38 | RABBITMQ_VHOST=plane
39 | AMQP_URL=
40 |
41 | # Secret Key
42 | SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5
43 |
44 | # DATA STORE SETTINGS
45 | USE_MINIO=1
46 | AWS_REGION=
47 | AWS_ACCESS_KEY_ID=access-key
48 | AWS_SECRET_ACCESS_KEY=secret-key
49 | AWS_S3_ENDPOINT_URL=http://plane-minio:9000
50 | AWS_S3_BUCKET_NAME=uploads
51 | FILE_SIZE_LIMIT=5242880
52 |
53 | # Gunicorn Workers
54 | GUNICORN_WORKERS=1
55 |
56 | # UNCOMMENT `DOCKER_PLATFORM` IF YOU ARE ON `ARM64` AND DOCKER IMAGE IS NOT AVAILABLE FOR RESPECTIVE `APP_RELEASE`
57 | # DOCKER_PLATFORM=linux/amd64
58 |
59 | # Force HTTPS for handling SSL Termination
60 | MINIO_ENDPOINT_SSL=0
61 |
62 | # API key rate limit
63 | API_KEY_RATE_LIMIT=60/minute
64 |
--------------------------------------------------------------------------------
/plane/code/build.yml:
--------------------------------------------------------------------------------
1 | services:
2 | web:
3 | image: ${DOCKERHUB_USER:-local}/plane-frontend:${APP_RELEASE:-latest}
4 | build:
5 | context: .
6 | dockerfile: ./web/Dockerfile.web
7 |
8 | space:
9 | image: ${DOCKERHUB_USER:-local}/plane-space:${APP_RELEASE:-latest}
10 | build:
11 | context: ./
12 | dockerfile: ./space/Dockerfile.space
13 |
14 | admin:
15 | image: ${DOCKERHUB_USER:-local}/plane-admin:${APP_RELEASE:-latest}
16 | build:
17 | context: ./
18 | dockerfile: ./admin/Dockerfile.admin
19 |
20 | api:
21 | image: ${DOCKERHUB_USER:-local}/plane-backend:${APP_RELEASE:-latest}
22 | build:
23 | context: ./apiserver
24 | dockerfile: ./Dockerfile.api
25 |
26 | proxy:
27 | image: ${DOCKERHUB_USER:-local}/plane-proxy:${APP_RELEASE:-latest}
28 | build:
29 | context: ./nginx
30 | dockerfile: ./Dockerfile
31 |
--------------------------------------------------------------------------------
/plane/code/docker-compose.yml:
--------------------------------------------------------------------------------
1 | x-db-env: &db-env
2 | PGHOST: ${PGHOST:-plane-db}
3 | PGDATABASE: ${PGDATABASE:-plane}
4 | POSTGRES_USER: ${POSTGRES_USER:-plane}
5 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-plane}
6 | POSTGRES_DB: ${POSTGRES_DB:-plane}
7 | POSTGRES_PORT: ${POSTGRES_PORT:-5432}
8 | PGDATA: ${PGDATA:-/var/lib/postgresql/data}
9 |
10 | x-redis-env: &redis-env
11 | REDIS_HOST: ${REDIS_HOST:-plane-redis}
12 | REDIS_PORT: ${REDIS_PORT:-6379}
13 | REDIS_URL: ${REDIS_URL:-redis://plane-redis:6379/}
14 |
15 | x-minio-env: &minio-env
16 | MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-access-key}
17 | MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-secret-key}
18 |
19 | x-aws-s3-env: &aws-s3-env
20 | AWS_REGION: ${AWS_REGION:-}
21 | AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-access-key}
22 | AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-secret-key}
23 | AWS_S3_ENDPOINT_URL: ${AWS_S3_ENDPOINT_URL:-http://plane-minio:9000}
24 | AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
25 |
26 | x-proxy-env: &proxy-env
27 | NGINX_PORT: ${NGINX_PORT:-80}
28 | BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
29 | FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880}
30 |
31 | x-mq-env:
32 | # RabbitMQ Settings
33 | &mq-env
34 | RABBITMQ_HOST: ${RABBITMQ_HOST:-plane-mq}
35 | RABBITMQ_PORT: ${RABBITMQ_PORT:-5672}
36 | RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-plane}
37 | RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASSWORD:-plane}
38 | RABBITMQ_DEFAULT_VHOST: ${RABBITMQ_VHOST:-plane}
39 | RABBITMQ_VHOST: ${RABBITMQ_VHOST:-plane}
40 |
41 | x-live-env: &live-env
42 | API_BASE_URL: ${API_BASE_URL:-http://api:8000}
43 |
44 | x-app-env: &app-env
45 | WEB_URL: ${WEB_URL:-http://localhost}
46 | DEBUG: ${DEBUG:-0}
47 | CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS}
48 | GUNICORN_WORKERS: 1
49 | USE_MINIO: ${USE_MINIO:-1}
50 | DATABASE_URL: ${DATABASE_URL:-postgresql://plane:plane@plane-db/plane}
51 | SECRET_KEY: ${SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}
52 | AMQP_URL: ${AMQP_URL:-amqp://plane:plane@plane-mq:5672/plane}
53 | API_KEY_RATE_LIMIT: ${API_KEY_RATE_LIMIT:-60/minute}
54 | MINIO_ENDPOINT_SSL: ${MINIO_ENDPOINT_SSL:-0}
55 |
56 | services:
57 | web:
58 | image: artifacts.plane.so/makeplane/plane-frontend:${APP_RELEASE:-stable}
59 | command: node web/server.js web
60 | deploy:
61 | replicas: ${WEB_REPLICAS:-1}
62 | restart_policy:
63 | condition: on-failure
64 | depends_on:
65 | - api
66 | - worker
67 |
68 | space:
69 | image: artifacts.plane.so/makeplane/plane-space:${APP_RELEASE:-stable}
70 | command: node space/server.js space
71 | deploy:
72 | replicas: ${SPACE_REPLICAS:-1}
73 | restart_policy:
74 | condition: on-failure
75 | depends_on:
76 | - api
77 | - worker
78 | - web
79 |
80 | admin:
81 | image: artifacts.plane.so/makeplane/plane-admin:${APP_RELEASE:-stable}
82 | command: node admin/server.js admin
83 | deploy:
84 | replicas: ${ADMIN_REPLICAS:-1}
85 | restart_policy:
86 | condition: on-failure
87 | depends_on:
88 | - api
89 | - web
90 |
91 | live:
92 | image: artifacts.plane.so/makeplane/plane-live:${APP_RELEASE:-stable}
93 | command: node live/dist/server.js live
94 | environment:
95 | <<: [ *live-env ]
96 | deploy:
97 | replicas: ${LIVE_REPLICAS:-1}
98 | restart_policy:
99 | condition: on-failure
100 | depends_on:
101 | - api
102 | - web
103 |
104 | api:
105 | image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable}
106 | command: ./bin/docker-entrypoint-api.sh
107 | deploy:
108 | replicas: ${API_REPLICAS:-1}
109 | restart_policy:
110 | condition: on-failure
111 | volumes:
112 | - logs_api:/code/plane/logs
113 | environment:
114 | <<: [ *app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env ]
115 | depends_on:
116 | - plane-db
117 | - plane-redis
118 | - plane-mq
119 |
120 | worker:
121 | image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable}
122 | command: ./bin/docker-entrypoint-worker.sh
123 | deploy:
124 | replicas: ${WORKER_REPLICAS:-1}
125 | restart_policy:
126 | condition: on-failure
127 | volumes:
128 | - logs_worker:/code/plane/logs
129 | environment:
130 | <<: [ *app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env ]
131 | depends_on:
132 | - api
133 | - plane-db
134 | - plane-redis
135 | - plane-mq
136 |
137 | beat-worker:
138 | image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable}
139 | command: ./bin/docker-entrypoint-beat.sh
140 | deploy:
141 | replicas: ${BEAT_WORKER_REPLICAS:-1}
142 | restart_policy:
143 | condition: on-failure
144 | volumes:
145 | - logs_beat-worker:/code/plane/logs
146 | environment:
147 | <<: [ *app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env ]
148 | depends_on:
149 | - api
150 | - plane-db
151 | - plane-redis
152 | - plane-mq
153 |
154 | migrator:
155 | image: artifacts.plane.so/makeplane/plane-backend:${APP_RELEASE:-stable}
156 | command: ./bin/docker-entrypoint-migrator.sh
157 | deploy:
158 | replicas: 1
159 | restart_policy:
160 | condition: on-failure
161 | volumes:
162 | - logs_migrator:/code/plane/logs
163 | environment:
164 | <<: [ *app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env ]
165 | depends_on:
166 | - plane-db
167 | - plane-redis
168 |
169 | # Comment this if you already have a database running
170 | plane-db:
171 | image: postgres:15.7-alpine
172 | command: postgres -c 'max_connections=1000'
173 | deploy:
174 | replicas: 1
175 | restart_policy:
176 | condition: on-failure
177 | environment:
178 | <<: *db-env
179 | volumes:
180 | - pgdata:/var/lib/postgresql/data
181 |
182 | plane-redis:
183 | image: valkey/valkey:7.2.5-alpine
184 | deploy:
185 | replicas: 1
186 | restart_policy:
187 | condition: on-failure
188 | volumes:
189 | - redisdata:/data
190 |
191 | plane-mq:
192 | image: rabbitmq:3.13.6-management-alpine
193 | deploy:
194 | replicas: 1
195 | restart_policy:
196 | condition: on-failure
197 | environment:
198 | <<: *mq-env
199 | volumes:
200 | - rabbitmq_data:/var/lib/rabbitmq
201 |
202 | # Comment this if you using any external s3 compatible storage
203 | plane-minio:
204 | image: minio/minio:latest
205 | command: server /export --console-address ":9090"
206 | deploy:
207 | replicas: 1
208 | restart_policy:
209 | condition: on-failure
210 | environment:
211 | <<: *minio-env
212 | volumes:
213 | - uploads:/export
214 |
215 | # Comment this if you already have a reverse proxy running
216 | proxy:
217 | image: artifacts.plane.so/makeplane/plane-proxy:${APP_RELEASE:-stable}
218 | environment:
219 | <<: *proxy-env
220 | deploy:
221 | replicas: 1
222 | restart_policy:
223 | condition: on-failure
224 | depends_on:
225 | - web
226 | - api
227 | - space
228 |
229 | volumes:
230 | pgdata:
231 | redisdata:
232 | uploads:
233 | logs_api:
234 | logs_worker:
235 | logs_beat-worker:
236 | logs_migrator:
237 | rabbitmq_data:
238 |
--------------------------------------------------------------------------------
/plane/code/images/download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/download.png
--------------------------------------------------------------------------------
/plane/code/images/migrate-error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/migrate-error.png
--------------------------------------------------------------------------------
/plane/code/images/restart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/restart.png
--------------------------------------------------------------------------------
/plane/code/images/started.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/started.png
--------------------------------------------------------------------------------
/plane/code/images/stopped.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/stopped.png
--------------------------------------------------------------------------------
/plane/code/images/upgrade.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/plane/code/images/upgrade.png
--------------------------------------------------------------------------------
/plane/code/migration-0.13-0.14.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo '
4 | ******************************************************************
5 |
6 | This script is solely for the migration purpose only.
7 | This is a 1 time migration of volume data from v0.13.2 => v0.14.x
8 |
9 | Assumption:
10 | 1. Postgres data volume name ends with _pgdata
11 | 2. Minio data volume name ends with _uploads
12 | 3. Redis data volume name ends with _redisdata
13 |
14 | Any changes to this script can break the migration.
15 |
16 | Before you proceed, make sure you run the below command
17 | to know the docker volumes
18 |
19 | docker volume ls -q | grep -i "_pgdata"
20 | docker volume ls -q | grep -i "_uploads"
21 | docker volume ls -q | grep -i "_redisdata"
22 |
23 | *******************************************************
24 | '
25 |
26 | DOWNLOAD_FOL=./download
27 | rm -rf ${DOWNLOAD_FOL}
28 | mkdir -p ${DOWNLOAD_FOL}
29 |
30 | function volumeExists {
31 | if [ "$(docker volume ls -f name=$1 | awk '{print $NF}' | grep -E '^'$1'$')" ]; then
32 | return 0
33 | else
34 | return 1
35 | fi
36 | }
37 |
38 | function readPrefixes(){
39 | echo ''
40 | echo 'Given below list of REDIS volumes, identify the prefix of source and destination volumes leaving "_redisdata" '
41 | echo '---------------------'
42 | docker volume ls -q | grep -i "_redisdata"
43 | echo ''
44 |
45 | read -p "Provide the Source Volume Prefix : " SRC_VOL_PREFIX
46 | until [ "$SRC_VOL_PREFIX" ]; do
47 | read -p "Provide the Source Volume Prefix : " SRC_VOL_PREFIX
48 | done
49 |
50 | read -p "Provide the Destination Volume Prefix : " DEST_VOL_PREFIX
51 | until [ "$DEST_VOL_PREFIX" ]; do
52 | read -p "Provide the Source Volume Prefix : " DEST_VOL_PREFIX
53 | done
54 |
55 | echo ''
56 | echo 'Prefix Provided '
57 | echo " Source : ${SRC_VOL_PREFIX}"
58 | echo " Destination : ${DEST_VOL_PREFIX}"
59 | echo '---------------------------------------'
60 | }
61 |
62 | function migrate(){
63 |
64 | SRC_VOLUME=${SRC_VOL_PREFIX}_${VOL_NAME_SUFFIX}
65 | DEST_VOLUME=${DEST_VOL_PREFIX}_${VOL_NAME_SUFFIX}
66 |
67 | if volumeExists $SRC_VOLUME; then
68 | if volumeExists $DEST_VOLUME; then
69 | GOOD_TO_GO=1
70 | else
71 | echo "Destination Volume '$DEST_VOLUME' does not exist"
72 | echo ''
73 | fi
74 | else
75 | echo "Source Volume '$SRC_VOLUME' does not exist"
76 | echo ''
77 | fi
78 |
79 | if [ $GOOD_TO_GO = 1 ]; then
80 |
81 | echo "MIGRATING ${VOL_NAME_SUFFIX} FROM ${SRC_VOLUME} => ${DEST_VOLUME}"
82 |
83 | TEMP_CONTAINER=$(docker run -d -v $SRC_VOLUME:$CONTAINER_VOL_FOLDER busybox true)
84 | docker cp -q $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX}
85 | docker rm $TEMP_CONTAINER &> /dev/null
86 |
87 | TEMP_CONTAINER=$(docker run -d -v $DEST_VOLUME:$CONTAINER_VOL_FOLDER busybox true)
88 | if [ "$VOL_NAME_SUFFIX" = "pgdata" ]; then
89 | docker cp -q ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX} $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER/_temp
90 | docker run --rm -v $DEST_VOLUME:$CONTAINER_VOL_FOLDER \
91 | -e DATA_FOLDER="${CONTAINER_VOL_FOLDER}" \
92 | busybox /bin/sh -c 'cp -Rf $DATA_FOLDER/_temp/* $DATA_FOLDER '
93 | else
94 | docker cp -q ${DOWNLOAD_FOL}/${VOL_NAME_SUFFIX} $TEMP_CONTAINER:$CONTAINER_VOL_FOLDER
95 | fi
96 | docker rm $TEMP_CONTAINER &> /dev/null
97 |
98 | echo ''
99 | fi
100 | }
101 |
102 | readPrefixes
103 |
104 | # MIGRATE DB
105 | CONTAINER_VOL_FOLDER=/var/lib/postgresql/data
106 | VOL_NAME_SUFFIX=pgdata
107 | migrate
108 |
109 | # MIGRATE REDIS
110 | CONTAINER_VOL_FOLDER=/data
111 | VOL_NAME_SUFFIX=redisdata
112 | migrate
113 |
114 | # MIGRATE MINIO
115 | CONTAINER_VOL_FOLDER=/export
116 | VOL_NAME_SUFFIX=uploads
117 | migrate
118 |
119 |
--------------------------------------------------------------------------------
/plane/code/restore.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function print_header() {
4 | clear
5 |
6 | cat <<"EOF"
7 | --------------------------------------------
8 | ____ _ /////////
9 | | _ \| | __ _ _ __ ___ /////////
10 | | |_) | |/ _` | '_ \ / _ \ ///// /////
11 | | __/| | (_| | | | | __/ ///// /////
12 | |_| |_|\__,_|_| |_|\___| ////
13 | ////
14 | --------------------------------------------
15 | Project management tool from the future
16 | --------------------------------------------
17 | EOF
18 | }
19 |
20 | function restoreSingleVolume() {
21 | selectedVolume=$1
22 | backupFolder=$2
23 | restoreFile=$3
24 |
25 | docker volume rm "$selectedVolume" > /dev/null 2>&1
26 |
27 | if [ $? -ne 0 ]; then
28 | echo "Error: Failed to remove volume $selectedVolume"
29 | echo ""
30 | return 1
31 | fi
32 |
33 | docker volume create "$selectedVolume" > /dev/null 2>&1
34 | if [ $? -ne 0 ]; then
35 | echo "Error: Failed to create volume $selectedVolume"
36 | echo ""
37 | return 1
38 | fi
39 |
40 | docker run --rm \
41 | -e TAR_NAME="$restoreFile" \
42 | -v "$selectedVolume":"/vol" \
43 | -v "$backupFolder":/backup \
44 | busybox sh -c 'mkdir -p /restore && tar -xzf "/backup/${TAR_NAME}.tar.gz" -C /restore && mv /restore/${TAR_NAME}/* /vol'
45 |
46 | if [ $? -ne 0 ]; then
47 | echo "Error: Failed to restore volume ${selectedVolume} from ${restoreFile}.tar.gz"
48 | echo ""
49 | return 1
50 | fi
51 | echo ".....Successfully restored volume $selectedVolume from ${restoreFile}.tar.gz"
52 | echo ""
53 | }
54 |
55 | function restoreData() {
56 | print_header
57 | local BACKUP_FOLDER=${1:-$PWD}
58 |
59 | local dockerServiceStatus
60 | dockerServiceStatus=$($COMPOSE_CMD ls --filter name=plane-app --format=json | jq -r .[0].Status)
61 | local dockerServicePrefix
62 | dockerServicePrefix="running"
63 |
64 | if [[ $dockerServiceStatus == $dockerServicePrefix* ]]; then
65 | echo "Plane App is running. Please STOP the Plane App before restoring data."
66 | exit 1
67 | fi
68 |
69 | local volume_suffix
70 | volume_suffix="_pgdata|_redisdata|_uploads|_rabbitmq_data"
71 | local volumes
72 | volumes=$(docker volume ls -f "name=plane-app" --format "{{.Name}}" | grep -E "$volume_suffix")
73 | # Check if there are any matching volumes
74 | if [ -z "$volumes" ]; then
75 | echo ".....No volumes found starting with 'plane-app'"
76 | exit 1
77 | fi
78 |
79 |
80 | for BACKUP_FILE in $BACKUP_FOLDER/*.tar.gz; do
81 | if [ -e "$BACKUP_FILE" ]; then
82 |
83 | local restoreFileName
84 | restoreFileName=$(basename "$BACKUP_FILE")
85 | restoreFileName="${restoreFileName%.tar.gz}"
86 |
87 | local restoreVolName
88 | restoreVolName="plane-app_${restoreFileName}"
89 | echo "Found $BACKUP_FILE"
90 |
91 | local docVol
92 | docVol=$(docker volume ls -f "name=$restoreVolName" --format "{{.Name}}" | grep -E "$volume_suffix")
93 |
94 | if [ -z "$docVol" ]; then
95 | echo "Skipping: No volume found with name $restoreVolName"
96 | else
97 | echo ".....Restoring $docVol"
98 | restoreSingleVolume "$docVol" "$BACKUP_FOLDER" "$restoreFileName"
99 | fi
100 | else
101 | echo "No .tar.gz files found in the current directory."
102 | echo ""
103 | echo "Please provide the path to the backup file."
104 | echo ""
105 | echo "Usage: ./restore.sh /path/to/backup"
106 | exit 1
107 | fi
108 | done
109 |
110 | echo ""
111 | echo "Restore completed successfully."
112 | echo ""
113 | }
114 |
115 | # if docker-compose is installed
116 | if command -v docker-compose &> /dev/null
117 | then
118 | COMPOSE_CMD="docker-compose"
119 | else
120 | COMPOSE_CMD="docker compose"
121 | fi
122 |
123 | restoreData "$@"
--------------------------------------------------------------------------------
/plane/update.js:
--------------------------------------------------------------------------------
1 | import utils from "../utils.js";
2 |
3 | await utils.cloneOrPullRepo({
4 | repo: "https://github.com/makeplane/plane.git",
5 | path: "./repo",
6 | branch: "preview",
7 | });
8 |
9 | await utils.copyDir("./repo/deploy/selfhost", "./code");
10 | await utils.renameFile("./code/variables.env", "./code/.env.example");
11 |
12 | await utils.removeContainerNames("./code/docker-compose.yml");
13 | await utils.removePorts("./code/docker-compose.yml");
14 |
--------------------------------------------------------------------------------
/plane/update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d "./repo" ]; then
4 | git clone --depth 1 --branch preview --single-branch https://github.com/makeplane/plane.git repo
5 | else
6 | cd repo
7 | git pull
8 | cd ..
9 | fi
10 |
11 | cp -r ./repo/deploy/selfhost/. ./code
12 | mv ./code/variables.env ./code/.env.example
13 |
14 |
15 |
--------------------------------------------------------------------------------
/supabase/README.md:
--------------------------------------------------------------------------------
1 | # Supabase
2 |
3 | - copied from https://github.com/supabase/supabase/tree/master/docker
4 | - removed `container_name`
5 | - removed `ports`
6 |
--------------------------------------------------------------------------------
/supabase/code/.env.example:
--------------------------------------------------------------------------------
1 | ############
2 | # Secrets
3 | # YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
4 | ############
5 |
6 | POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
7 | JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
8 | ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
9 | SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
10 | DASHBOARD_USERNAME=supabase
11 | DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
12 | SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
13 | VAULT_ENC_KEY=your-encryption-key-32-chars-min
14 |
15 |
16 | ############
17 | # Database - You can change these to any PostgreSQL database that has logical replication enabled.
18 | ############
19 |
20 | POSTGRES_HOST=db
21 | POSTGRES_DB=postgres
22 | POSTGRES_PORT=5432
23 | # default user is postgres
24 |
25 |
26 | ############
27 | # Supavisor -- Database pooler
28 | ############
29 | POOLER_PROXY_PORT_TRANSACTION=6543
30 | POOLER_DEFAULT_POOL_SIZE=20
31 | POOLER_MAX_CLIENT_CONN=100
32 | POOLER_TENANT_ID=your-tenant-id
33 |
34 |
35 | ############
36 | # API Proxy - Configuration for the Kong Reverse proxy.
37 | ############
38 |
39 | KONG_HTTP_PORT=8000
40 | KONG_HTTPS_PORT=8443
41 |
42 |
43 | ############
44 | # API - Configuration for PostgREST.
45 | ############
46 |
47 | PGRST_DB_SCHEMAS=public,storage,graphql_public
48 |
49 |
50 | ############
51 | # Auth - Configuration for the GoTrue authentication server.
52 | ############
53 |
54 | ## General
55 | SITE_URL=http://localhost:3000
56 | ADDITIONAL_REDIRECT_URLS=
57 | JWT_EXPIRY=3600
58 | DISABLE_SIGNUP=false
59 | API_EXTERNAL_URL=http://localhost:8000
60 |
61 | ## Mailer Config
62 | MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
63 | MAILER_URLPATHS_INVITE="/auth/v1/verify"
64 | MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
65 | MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
66 |
67 | ## Email auth
68 | ENABLE_EMAIL_SIGNUP=true
69 | ENABLE_EMAIL_AUTOCONFIRM=false
70 | SMTP_ADMIN_EMAIL=admin@example.com
71 | SMTP_HOST=supabase-mail
72 | SMTP_PORT=2500
73 | SMTP_USER=fake_mail_user
74 | SMTP_PASS=fake_mail_password
75 | SMTP_SENDER_NAME=fake_sender
76 | ENABLE_ANONYMOUS_USERS=false
77 |
78 | ## Phone auth
79 | ENABLE_PHONE_SIGNUP=true
80 | ENABLE_PHONE_AUTOCONFIRM=true
81 |
82 |
83 | ############
84 | # Studio - Configuration for the Dashboard
85 | ############
86 |
87 | STUDIO_DEFAULT_ORGANIZATION=Default Organization
88 | STUDIO_DEFAULT_PROJECT=Default Project
89 |
90 | STUDIO_PORT=3000
91 | # replace if you intend to use Studio outside of localhost
92 | SUPABASE_PUBLIC_URL=http://localhost:8000
93 |
94 | # Enable webp support
95 | IMGPROXY_ENABLE_WEBP_DETECTION=true
96 |
97 | # Add your OpenAI API key to enable SQL Editor Assistant
98 | OPENAI_API_KEY=
99 |
100 |
101 | ############
102 | # Functions - Configuration for Functions
103 | ############
104 | # NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
105 | FUNCTIONS_VERIFY_JWT=false
106 |
107 |
108 | ############
109 | # Logs - Configuration for Logflare
110 | # Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
111 | ############
112 |
113 | LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
114 |
115 | # Change vector.toml sinks to reflect this change
116 | LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
117 |
118 | # Docker socket location - this value will differ depending on your OS
119 | DOCKER_SOCKET_LOCATION=/var/run/docker.sock
120 |
121 | # Google Cloud Project details
122 | GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
123 | GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
124 |
--------------------------------------------------------------------------------
/supabase/code/.gitignore:
--------------------------------------------------------------------------------
1 | volumes/db/data
2 | volumes/storage
3 | .env
4 | test.http
5 | docker-compose.override.yml
6 |
--------------------------------------------------------------------------------
/supabase/code/README.md:
--------------------------------------------------------------------------------
1 | # Supabase Docker
2 |
3 | This is a minimal Docker Compose setup for self-hosting Supabase. Follow the steps [here](https://supabase.com/docs/guides/hosting/docker) to get started.
4 |
--------------------------------------------------------------------------------
/supabase/code/dev/data.sql:
--------------------------------------------------------------------------------
1 | create table profiles (
2 | id uuid references auth.users not null,
3 | updated_at timestamp with time zone,
4 | username text unique,
5 | avatar_url text,
6 | website text,
7 |
8 | primary key (id),
9 | unique(username),
10 | constraint username_length check (char_length(username) >= 3)
11 | );
12 |
13 | alter table profiles enable row level security;
14 |
15 | create policy "Public profiles are viewable by the owner."
16 | on profiles for select
17 | using ( auth.uid() = id );
18 |
19 | create policy "Users can insert their own profile."
20 | on profiles for insert
21 | with check ( auth.uid() = id );
22 |
23 | create policy "Users can update own profile."
24 | on profiles for update
25 | using ( auth.uid() = id );
26 |
27 | -- Set up Realtime
28 | begin;
29 | drop publication if exists supabase_realtime;
30 | create publication supabase_realtime;
31 | commit;
32 | alter publication supabase_realtime add table profiles;
33 |
34 | -- Set up Storage
35 | insert into storage.buckets (id, name)
36 | values ('avatars', 'avatars');
37 |
38 | create policy "Avatar images are publicly accessible."
39 | on storage.objects for select
40 | using ( bucket_id = 'avatars' );
41 |
42 | create policy "Anyone can upload an avatar."
43 | on storage.objects for insert
44 | with check ( bucket_id = 'avatars' );
45 |
46 | create policy "Anyone can update an avatar."
47 | on storage.objects for update
48 | with check ( bucket_id = 'avatars' );
49 |
--------------------------------------------------------------------------------
/supabase/code/dev/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 |
3 | services:
4 | studio:
5 | build:
6 | context: ..
7 | dockerfile: studio/Dockerfile
8 | target: dev
9 | ports:
10 | - 8082:8082
11 | mail:
12 | container_name: supabase-mail
13 | image: inbucket/inbucket:3.0.3
14 | ports:
15 | - '2500:2500' # SMTP
16 | - '9000:9000' # web interface
17 | - '1100:1100' # POP3
18 | auth:
19 | environment:
20 | - GOTRUE_SMTP_USER=
21 | - GOTRUE_SMTP_PASS=
22 | meta:
23 | ports:
24 | - 5555:8080
25 | db:
26 | restart: 'no'
27 | volumes:
28 | # Always use a fresh database when developing
29 | - /var/lib/postgresql/data
30 | # Seed data should be inserted last (alphabetical order)
31 | - ./dev/data.sql:/docker-entrypoint-initdb.d/seed.sql
32 | storage:
33 | volumes:
34 | - /var/lib/storage
35 |
--------------------------------------------------------------------------------
/supabase/code/docker-compose.s3.yml:
--------------------------------------------------------------------------------
1 | services:
2 |
3 | minio:
4 | image: minio/minio
5 | ports:
6 | - '9000:9000'
7 | - '9001:9001'
8 | environment:
9 | MINIO_ROOT_USER: supa-storage
10 | MINIO_ROOT_PASSWORD: secret1234
11 | command: server --console-address ":9001" /data
12 | healthcheck:
13 | test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ]
14 | interval: 2s
15 | timeout: 10s
16 | retries: 5
17 | volumes:
18 | - ./volumes/storage:/data:z
19 |
20 | minio-createbucket:
21 | image: minio/mc
22 | depends_on:
23 | minio:
24 | condition: service_healthy
25 | entrypoint: >
26 | /bin/sh -c "
27 | /usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234;
28 | /usr/bin/mc mb supa-minio/stub;
29 | exit 0;
30 | "
31 |
32 | storage:
33 | container_name: supabase-storage
34 | image: supabase/storage-api:v1.11.13
35 | depends_on:
36 | db:
37 | # Disable this if you are using an external Postgres database
38 | condition: service_healthy
39 | rest:
40 | condition: service_started
41 | imgproxy:
42 | condition: service_started
43 | minio:
44 | condition: service_healthy
45 | healthcheck:
46 | test:
47 | [
48 | "CMD",
49 | "wget",
50 | "--no-verbose",
51 | "--tries=1",
52 | "--spider",
53 | "http://localhost:5000/status"
54 | ]
55 | timeout: 5s
56 | interval: 5s
57 | retries: 3
58 | restart: unless-stopped
59 | environment:
60 | ANON_KEY: ${ANON_KEY}
61 | SERVICE_KEY: ${SERVICE_ROLE_KEY}
62 | POSTGREST_URL: http://rest:3000
63 | PGRST_JWT_SECRET: ${JWT_SECRET}
64 | DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
65 | FILE_SIZE_LIMIT: 52428800
66 | STORAGE_BACKEND: s3
67 | GLOBAL_S3_BUCKET: stub
68 | GLOBAL_S3_ENDPOINT: http://minio:9000
69 | GLOBAL_S3_PROTOCOL: http
70 | GLOBAL_S3_FORCE_PATH_STYLE: true
71 | AWS_ACCESS_KEY_ID: supa-storage
72 | AWS_SECRET_ACCESS_KEY: secret1234
73 | AWS_DEFAULT_REGION: stub
74 | FILE_STORAGE_BACKEND_PATH: /var/lib/storage
75 | TENANT_ID: stub
76 | # TODO: https://github.com/supabase/storage-api/issues/55
77 | REGION: stub
78 | ENABLE_IMAGE_TRANSFORMATION: "true"
79 | IMGPROXY_URL: http://imgproxy:5001
80 | volumes:
81 | - ./volumes/storage:/var/lib/storage:z
82 |
83 | imgproxy:
84 | container_name: supabase-imgproxy
85 | image: darthsim/imgproxy:v3.8.0
86 | healthcheck:
87 | test: [ "CMD", "imgproxy", "health" ]
88 | timeout: 5s
89 | interval: 5s
90 | retries: 3
91 | environment:
92 | IMGPROXY_BIND: ":5001"
93 | IMGPROXY_USE_ETAG: "true"
94 | IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
95 |
--------------------------------------------------------------------------------
/supabase/code/reset.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "WARNING: This will remove all containers and container data, and will reset the .env file. This action cannot be undone!"
4 | read -p "Are you sure you want to proceed? (y/N) " -n 1 -r
5 | echo # Move to a new line
6 | if [[ ! $REPLY =~ ^[Yy]$ ]]
7 | then
8 | echo "Operation cancelled."
9 | exit 1
10 | fi
11 |
12 | echo "Stopping and removing all containers..."
13 | docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
14 |
15 | echo "Cleaning up bind-mounted directories..."
16 | BIND_MOUNTS=(
17 | "./volumes/db/data"
18 | )
19 |
20 | for DIR in "${BIND_MOUNTS[@]}"; do
21 | if [ -d "$DIR" ]; then
22 | echo "Deleting $DIR..."
23 | rm -rf "$DIR"
24 | else
25 | echo "Directory $DIR does not exist. Skipping bind mount deletion step..."
26 | fi
27 | done
28 |
29 | echo "Resetting .env file..."
30 | if [ -f ".env" ]; then
31 | echo "Removing existing .env file..."
32 | rm -f .env
33 | else
34 | echo "No .env file found. Skipping .env removal step..."
35 | fi
36 |
37 | if [ -f ".env.example" ]; then
38 | echo "Copying .env.example to .env..."
39 | cp .env.example .env
40 | else
41 | echo ".env.example file not found. Skipping .env reset step..."
42 | fi
43 |
44 | echo "Cleanup complete!"
--------------------------------------------------------------------------------
/supabase/code/volumes/api/kong.yml:
--------------------------------------------------------------------------------
1 | _format_version: '2.1'
2 | _transform: true
3 |
4 | ###
5 | ### Consumers / Users
6 | ###
7 | consumers:
8 | - username: DASHBOARD
9 | - username: anon
10 | keyauth_credentials:
11 | - key: $SUPABASE_ANON_KEY
12 | - username: service_role
13 | keyauth_credentials:
14 | - key: $SUPABASE_SERVICE_KEY
15 |
16 | ###
17 | ### Access Control List
18 | ###
19 | acls:
20 | - consumer: anon
21 | group: anon
22 | - consumer: service_role
23 | group: admin
24 |
25 | ###
26 | ### Dashboard credentials
27 | ###
28 | basicauth_credentials:
29 | - consumer: DASHBOARD
30 | username: $DASHBOARD_USERNAME
31 | password: $DASHBOARD_PASSWORD
32 |
33 | ###
34 | ### API Routes
35 | ###
36 | services:
37 | ## Open Auth routes
38 | - name: auth-v1-open
39 | url: http://auth:9999/verify
40 | routes:
41 | - name: auth-v1-open
42 | strip_path: true
43 | paths:
44 | - /auth/v1/verify
45 | plugins:
46 | - name: cors
47 | - name: auth-v1-open-callback
48 | url: http://auth:9999/callback
49 | routes:
50 | - name: auth-v1-open-callback
51 | strip_path: true
52 | paths:
53 | - /auth/v1/callback
54 | plugins:
55 | - name: cors
56 | - name: auth-v1-open-authorize
57 | url: http://auth:9999/authorize
58 | routes:
59 | - name: auth-v1-open-authorize
60 | strip_path: true
61 | paths:
62 | - /auth/v1/authorize
63 | plugins:
64 | - name: cors
65 |
66 | ## Secure Auth routes
67 | - name: auth-v1
68 | _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
69 | url: http://auth:9999/
70 | routes:
71 | - name: auth-v1-all
72 | strip_path: true
73 | paths:
74 | - /auth/v1/
75 | plugins:
76 | - name: cors
77 | - name: key-auth
78 | config:
79 | hide_credentials: false
80 | - name: acl
81 | config:
82 | hide_groups_header: true
83 | allow:
84 | - admin
85 | - anon
86 |
87 | ## Secure REST routes
88 | - name: rest-v1
89 | _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
90 | url: http://rest:3000/
91 | routes:
92 | - name: rest-v1-all
93 | strip_path: true
94 | paths:
95 | - /rest/v1/
96 | plugins:
97 | - name: cors
98 | - name: key-auth
99 | config:
100 | hide_credentials: true
101 | - name: acl
102 | config:
103 | hide_groups_header: true
104 | allow:
105 | - admin
106 | - anon
107 |
108 | ## Secure GraphQL routes
109 | - name: graphql-v1
110 | _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
111 | url: http://rest:3000/rpc/graphql
112 | routes:
113 | - name: graphql-v1-all
114 | strip_path: true
115 | paths:
116 | - /graphql/v1
117 | plugins:
118 | - name: cors
119 | - name: key-auth
120 | config:
121 | hide_credentials: true
122 | - name: request-transformer
123 | config:
124 | add:
125 | headers:
126 | - Content-Profile:graphql_public
127 | - name: acl
128 | config:
129 | hide_groups_header: true
130 | allow:
131 | - admin
132 | - anon
133 |
134 | ## Secure Realtime routes
135 | - name: realtime-v1-ws
136 | _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
137 | url: http://realtime-dev.supabase-realtime:4000/socket
138 | protocol: ws
139 | routes:
140 | - name: realtime-v1-ws
141 | strip_path: true
142 | paths:
143 | - /realtime/v1/
144 | plugins:
145 | - name: cors
146 | - name: key-auth
147 | config:
148 | hide_credentials: false
149 | - name: acl
150 | config:
151 | hide_groups_header: true
152 | allow:
153 | - admin
154 | - anon
155 | - name: realtime-v1-rest
156 | _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
157 | url: http://realtime-dev.supabase-realtime:4000/api
158 | protocol: http
159 | routes:
160 | - name: realtime-v1-rest
161 | strip_path: true
162 | paths:
163 | - /realtime/v1/api
164 | plugins:
165 | - name: cors
166 | - name: key-auth
167 | config:
168 | hide_credentials: false
169 | - name: acl
170 | config:
171 | hide_groups_header: true
172 | allow:
173 | - admin
174 | - anon
175 | ## Storage routes: the storage server manages its own auth
176 | - name: storage-v1
177 | _comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
178 | url: http://storage:5000/
179 | routes:
180 | - name: storage-v1-all
181 | strip_path: true
182 | paths:
183 | - /storage/v1/
184 | plugins:
185 | - name: cors
186 |
187 | ## Edge Functions routes
188 | - name: functions-v1
189 | _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
190 | url: http://functions:9000/
191 | routes:
192 | - name: functions-v1-all
193 | strip_path: true
194 | paths:
195 | - /functions/v1/
196 | plugins:
197 | - name: cors
198 |
199 | ## Analytics routes
200 | - name: analytics-v1
201 | _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
202 | url: http://analytics:4000/
203 | routes:
204 | - name: analytics-v1-all
205 | strip_path: true
206 | paths:
207 | - /analytics/v1/
208 |
209 | ## Secure Database routes
210 | - name: meta
211 | _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
212 | url: http://meta:8080/
213 | routes:
214 | - name: meta-all
215 | strip_path: true
216 | paths:
217 | - /pg/
218 | plugins:
219 | - name: key-auth
220 | config:
221 | hide_credentials: false
222 | - name: acl
223 | config:
224 | hide_groups_header: true
225 | allow:
226 | - admin
227 |
228 | ## Protected Dashboard - catch all remaining routes
229 | - name: dashboard
230 | _comment: 'Studio: /* -> http://studio:3000/*'
231 | url: http://studio:3000/
232 | routes:
233 | - name: dashboard-all
234 | strip_path: true
235 | paths:
236 | - /
237 | plugins:
238 | - name: cors
239 | - name: basic-auth
240 | config:
241 | hide_credentials: true
242 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/_supabase.sql:
--------------------------------------------------------------------------------
1 | \set pguser `echo "$POSTGRES_USER"`
2 |
3 | CREATE DATABASE _supabase WITH OWNER :pguser;
4 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/init/data.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easypanel-io/compose/405fd495347ebecae506e97d33616bec47f23089/supabase/code/volumes/db/init/data.sql
--------------------------------------------------------------------------------
/supabase/code/volumes/db/jwt.sql:
--------------------------------------------------------------------------------
1 | \set jwt_secret `echo "$JWT_SECRET"`
2 | \set jwt_exp `echo "$JWT_EXP"`
3 |
4 | ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
5 | ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
6 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/logs.sql:
--------------------------------------------------------------------------------
1 | \set pguser `echo "$POSTGRES_USER"`
2 |
3 | \c _supabase
4 | create schema if not exists _analytics;
5 | alter schema _analytics owner to :pguser;
6 | \c postgres
7 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/pooler.sql:
--------------------------------------------------------------------------------
1 | \set pguser `echo "$POSTGRES_USER"`
2 |
3 | \c _supabase
4 | create schema if not exists _supavisor;
5 | alter schema _supavisor owner to :pguser;
6 | \c postgres
7 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/realtime.sql:
--------------------------------------------------------------------------------
1 | \set pguser `echo "$POSTGRES_USER"`
2 |
3 | create schema if not exists _realtime;
4 | alter schema _realtime owner to :pguser;
5 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/roles.sql:
--------------------------------------------------------------------------------
1 | -- NOTE: change to your own passwords for production environments
2 | \set pgpass `echo "$POSTGRES_PASSWORD"`
3 |
4 | ALTER USER authenticator WITH PASSWORD :'pgpass';
5 | ALTER USER pgbouncer WITH PASSWORD :'pgpass';
6 | ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
7 | ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
8 | ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
9 |
--------------------------------------------------------------------------------
/supabase/code/volumes/db/webhooks.sql:
--------------------------------------------------------------------------------
1 | BEGIN;
2 | -- Create pg_net extension
3 | CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
4 | -- Create supabase_functions schema
5 | CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
6 | GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
7 | ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
8 | ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
9 | ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
10 | -- supabase_functions.migrations definition
11 | CREATE TABLE supabase_functions.migrations (
12 | version text PRIMARY KEY,
13 | inserted_at timestamptz NOT NULL DEFAULT NOW()
14 | );
15 | -- Initial supabase_functions migration
16 | INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
17 | -- supabase_functions.hooks definition
18 | CREATE TABLE supabase_functions.hooks (
19 | id bigserial PRIMARY KEY,
20 | hook_table_id integer NOT NULL,
21 | hook_name text NOT NULL,
22 | created_at timestamptz NOT NULL DEFAULT NOW(),
23 | request_id bigint
24 | );
25 | CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
26 | CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
27 | COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
28 | CREATE FUNCTION supabase_functions.http_request()
29 | RETURNS trigger
30 | LANGUAGE plpgsql
31 | AS $function$
32 | DECLARE
33 | request_id bigint;
34 | payload jsonb;
35 | url text := TG_ARGV[0]::text;
36 | method text := TG_ARGV[1]::text;
37 | headers jsonb DEFAULT '{}'::jsonb;
38 | params jsonb DEFAULT '{}'::jsonb;
39 | timeout_ms integer DEFAULT 1000;
40 | BEGIN
41 | IF url IS NULL OR url = 'null' THEN
42 | RAISE EXCEPTION 'url argument is missing';
43 | END IF;
44 |
45 | IF method IS NULL OR method = 'null' THEN
46 | RAISE EXCEPTION 'method argument is missing';
47 | END IF;
48 |
49 | IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
50 | headers = '{"Content-Type": "application/json"}'::jsonb;
51 | ELSE
52 | headers = TG_ARGV[2]::jsonb;
53 | END IF;
54 |
55 | IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
56 | params = '{}'::jsonb;
57 | ELSE
58 | params = TG_ARGV[3]::jsonb;
59 | END IF;
60 |
61 | IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
62 | timeout_ms = 1000;
63 | ELSE
64 | timeout_ms = TG_ARGV[4]::integer;
65 | END IF;
66 |
67 | CASE
68 | WHEN method = 'GET' THEN
69 | SELECT http_get INTO request_id FROM net.http_get(
70 | url,
71 | params,
72 | headers,
73 | timeout_ms
74 | );
75 | WHEN method = 'POST' THEN
76 | payload = jsonb_build_object(
77 | 'old_record', OLD,
78 | 'record', NEW,
79 | 'type', TG_OP,
80 | 'table', TG_TABLE_NAME,
81 | 'schema', TG_TABLE_SCHEMA
82 | );
83 |
84 | SELECT http_post INTO request_id FROM net.http_post(
85 | url,
86 | payload,
87 | params,
88 | headers,
89 | timeout_ms
90 | );
91 | ELSE
92 | RAISE EXCEPTION 'method argument % is invalid', method;
93 | END CASE;
94 |
95 | INSERT INTO supabase_functions.hooks
96 | (hook_table_id, hook_name, request_id)
97 | VALUES
98 | (TG_RELID, TG_NAME, request_id);
99 |
100 | RETURN NEW;
101 | END
102 | $function$;
103 | -- Supabase super admin
104 | DO
105 | $$
106 | BEGIN
107 | IF NOT EXISTS (
108 | SELECT 1
109 | FROM pg_roles
110 | WHERE rolname = 'supabase_functions_admin'
111 | )
112 | THEN
113 | CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
114 | END IF;
115 | END
116 | $$;
117 | GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
118 | GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
119 | GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
120 | ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
121 | ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
122 | ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
123 | ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
124 | GRANT supabase_functions_admin TO postgres;
125 | -- Remove unused supabase_pg_net_admin role
126 | DO
127 | $$
128 | BEGIN
129 | IF EXISTS (
130 | SELECT 1
131 | FROM pg_roles
132 | WHERE rolname = 'supabase_pg_net_admin'
133 | )
134 | THEN
135 | REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
136 | DROP OWNED BY supabase_pg_net_admin;
137 | DROP ROLE supabase_pg_net_admin;
138 | END IF;
139 | END
140 | $$;
141 | -- pg_net grants when extension is already enabled
142 | DO
143 | $$
144 | BEGIN
145 | IF EXISTS (
146 | SELECT 1
147 | FROM pg_extension
148 | WHERE extname = 'pg_net'
149 | )
150 | THEN
151 | GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
152 | ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
153 | ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
154 | ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
155 | ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
156 | REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
157 | REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
158 | GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
159 | GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
160 | END IF;
161 | END
162 | $$;
163 | -- Event trigger for pg_net
164 | CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
165 | RETURNS event_trigger
166 | LANGUAGE plpgsql
167 | AS $$
168 | BEGIN
169 | IF EXISTS (
170 | SELECT 1
171 | FROM pg_event_trigger_ddl_commands() AS ev
172 | JOIN pg_extension AS ext
173 | ON ev.objid = ext.oid
174 | WHERE ext.extname = 'pg_net'
175 | )
176 | THEN
177 | GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
178 | ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
179 | ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
180 | ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
181 | ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
182 | REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
183 | REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
184 | GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
185 | GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
186 | END IF;
187 | END;
188 | $$;
189 | COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
190 | DO
191 | $$
192 | BEGIN
193 | IF NOT EXISTS (
194 | SELECT 1
195 | FROM pg_event_trigger
196 | WHERE evtname = 'issue_pg_net_access'
197 | ) THEN
198 | CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
199 | EXECUTE PROCEDURE extensions.grant_pg_net_access();
200 | END IF;
201 | END
202 | $$;
203 | INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
204 | ALTER function supabase_functions.http_request() SECURITY DEFINER;
205 | ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
206 | REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
207 | GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
208 | COMMIT;
209 |
--------------------------------------------------------------------------------
/supabase/code/volumes/functions/hello/index.ts:
--------------------------------------------------------------------------------
1 | // Follow this setup guide to integrate the Deno language server with your editor:
2 | // https://deno.land/manual/getting_started/setup_your_environment
3 | // This enables autocomplete, go to definition, etc.
4 |
5 | import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
6 |
7 | serve(async () => {
8 | return new Response(
9 | `"Hello from Edge Functions!"`,
10 | { headers: { "Content-Type": "application/json" } },
11 | )
12 | })
13 |
14 | // To invoke:
15 | // curl 'http://localhost:/functions/v1/hello' \
16 | // --header 'Authorization: Bearer '
17 |
--------------------------------------------------------------------------------
/supabase/code/volumes/functions/main/index.ts:
--------------------------------------------------------------------------------
1 | import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
2 | import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
3 |
4 | console.log('main function started')
5 |
6 | const JWT_SECRET = Deno.env.get('JWT_SECRET')
7 | const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
8 |
9 | function getAuthToken(req: Request) {
10 | const authHeader = req.headers.get('authorization')
11 | if (!authHeader) {
12 | throw new Error('Missing authorization header')
13 | }
14 | const [bearer, token] = authHeader.split(' ')
15 | if (bearer !== 'Bearer') {
16 | throw new Error(`Auth header is not 'Bearer {token}'`)
17 | }
18 | return token
19 | }
20 |
21 | async function verifyJWT(jwt: string): Promise {
22 | const encoder = new TextEncoder()
23 | const secretKey = encoder.encode(JWT_SECRET)
24 | try {
25 | await jose.jwtVerify(jwt, secretKey)
26 | } catch (err) {
27 | console.error(err)
28 | return false
29 | }
30 | return true
31 | }
32 |
33 | serve(async (req: Request) => {
34 | if (req.method !== 'OPTIONS' && VERIFY_JWT) {
35 | try {
36 | const token = getAuthToken(req)
37 | const isValidJWT = await verifyJWT(token)
38 |
39 | if (!isValidJWT) {
40 | return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
41 | status: 401,
42 | headers: { 'Content-Type': 'application/json' },
43 | })
44 | }
45 | } catch (e) {
46 | console.error(e)
47 | return new Response(JSON.stringify({ msg: e.toString() }), {
48 | status: 401,
49 | headers: { 'Content-Type': 'application/json' },
50 | })
51 | }
52 | }
53 |
54 | const url = new URL(req.url)
55 | const { pathname } = url
56 | const path_parts = pathname.split('/')
57 | const service_name = path_parts[1]
58 |
59 | if (!service_name || service_name === '') {
60 | const error = { msg: 'missing function name in request' }
61 | return new Response(JSON.stringify(error), {
62 | status: 400,
63 | headers: { 'Content-Type': 'application/json' },
64 | })
65 | }
66 |
67 | const servicePath = `/home/deno/functions/${service_name}`
68 | console.error(`serving the request with ${servicePath}`)
69 |
70 | const memoryLimitMb = 150
71 | const workerTimeoutMs = 1 * 60 * 1000
72 | const noModuleCache = false
73 | const importMapPath = null
74 | const envVarsObj = Deno.env.toObject()
75 | const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
76 |
77 | try {
78 | const worker = await EdgeRuntime.userWorkers.create({
79 | servicePath,
80 | memoryLimitMb,
81 | workerTimeoutMs,
82 | noModuleCache,
83 | importMapPath,
84 | envVars,
85 | })
86 | return await worker.fetch(req)
87 | } catch (e) {
88 | const error = { msg: e.toString() }
89 | return new Response(JSON.stringify(error), {
90 | status: 500,
91 | headers: { 'Content-Type': 'application/json' },
92 | })
93 | }
94 | })
95 |
--------------------------------------------------------------------------------
/supabase/code/volumes/logs/vector.yml:
--------------------------------------------------------------------------------
1 | api:
2 | enabled: true
3 | address: 0.0.0.0:9001
4 |
5 | sources:
6 | docker_host:
7 | type: docker_logs
8 | exclude_containers:
9 | - supabase-vector
10 |
11 | transforms:
12 | project_logs:
13 | type: remap
14 | inputs:
15 | - docker_host
16 | source: |-
17 | .project = "default"
18 | .event_message = del(.message)
19 | .appname = del(.container_name)
20 | del(.container_created_at)
21 | del(.container_id)
22 | del(.source_type)
23 | del(.stream)
24 | del(.label)
25 | del(.image)
26 | del(.host)
27 | del(.stream)
28 | router:
29 | type: route
30 | inputs:
31 | - project_logs
32 | route:
33 | kong: '.appname == "supabase-kong"'
34 | auth: '.appname == "supabase-auth"'
35 | rest: '.appname == "supabase-rest"'
36 | realtime: '.appname == "supabase-realtime"'
37 | storage: '.appname == "supabase-storage"'
38 | functions: '.appname == "supabase-functions"'
39 | db: '.appname == "supabase-db"'
40 | # Ignores non nginx errors since they are related with kong booting up
41 | kong_logs:
42 | type: remap
43 | inputs:
44 | - router.kong
45 | source: |-
46 | req, err = parse_nginx_log(.event_message, "combined")
47 | if err == null {
48 | .timestamp = req.timestamp
49 | .metadata.request.headers.referer = req.referer
50 | .metadata.request.headers.user_agent = req.agent
51 | .metadata.request.headers.cf_connecting_ip = req.client
52 | .metadata.request.method = req.method
53 | .metadata.request.path = req.path
54 | .metadata.request.protocol = req.protocol
55 | .metadata.response.status_code = req.status
56 | }
57 | if err != null {
58 | abort
59 | }
60 | # Ignores non nginx errors since they are related with kong booting up
61 | kong_err:
62 | type: remap
63 | inputs:
64 | - router.kong
65 | source: |-
66 | .metadata.request.method = "GET"
67 | .metadata.response.status_code = 200
68 | parsed, err = parse_nginx_log(.event_message, "error")
69 | if err == null {
70 | .timestamp = parsed.timestamp
71 | .severity = parsed.severity
72 | .metadata.request.host = parsed.host
73 | .metadata.request.headers.cf_connecting_ip = parsed.client
74 | url, err = split(parsed.request, " ")
75 | if err == null {
76 | .metadata.request.method = url[0]
77 | .metadata.request.path = url[1]
78 | .metadata.request.protocol = url[2]
79 | }
80 | }
81 | if err != null {
82 | abort
83 | }
84 | # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
85 | auth_logs:
86 | type: remap
87 | inputs:
88 | - router.auth
89 | source: |-
90 | parsed, err = parse_json(.event_message)
91 | if err == null {
92 | .metadata.timestamp = parsed.time
93 | .metadata = merge!(.metadata, parsed)
94 | }
95 | # PostgREST logs are structured so we separate timestamp from message using regex
96 | rest_logs:
97 | type: remap
98 | inputs:
99 | - router.rest
100 | source: |-
101 | parsed, err = parse_regex(.event_message, r'^(?P