├── .env.development.default
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── config.yml
└── workflows
│ ├── build-processors-image.yaml
│ ├── docker-ci.yml
│ ├── events-processor-tests.yml
│ ├── gh-page.yml
│ ├── release-docker-image.yml
│ └── release.yml
├── .gitignore
├── .gitmodules
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── PULL_REQUEST_TEMPLATE.md
├── README.md
├── deploy
├── .env.light.example
├── .env.production.example
├── README.md
├── deploy.sh
├── docker-compose.light.yml
├── docker-compose.local.yml
└── docker-compose.production.yml
├── docker-compose.dev.yml
├── docker-compose.yml
├── docker
├── Dockerfile
├── Procfile
├── README.md
├── nginx.conf
├── redis.conf
└── runner.sh
├── events-processor
├── .air.toml
├── .gitignore
├── Dockerfile
├── Dockerfile.dev
├── README.md
├── config
│ ├── database
│ │ ├── database.go
│ │ └── database_test.go
│ ├── kafka
│ │ ├── consumer.go
│ │ ├── consumer_test.go
│ │ ├── kafka.go
│ │ └── producer.go
│ ├── redis
│ │ └── redis.go
│ └── tracer.go
├── go.mod
├── go.sum
├── main.go
├── models
│ ├── billable_metrics.go
│ ├── billable_metrics_test.go
│ ├── charges.go
│ ├── charges_test.go
│ ├── event.go
│ ├── event_test.go
│ ├── stores.go
│ ├── stores_test.go
│ ├── subscriptions.go
│ └── subscriptions_test.go
├── processors
│ ├── events.go
│ ├── events_test.go
│ └── processors.go
├── tests
│ ├── mocked_flag_store.go
│ ├── mocked_producer.go
│ └── mocked_store.go
└── utils
│ ├── env.go
│ ├── env_test.go
│ ├── error_tracker.go
│ ├── result.go
│ ├── result_test.go
│ ├── time.go
│ └── time_test.go
├── extra
├── clickhouse
│ ├── config.d
│ │ └── config.xml
│ └── users.d
│ │ └── users.xml
├── init-letsencrypt.sh
├── init-selfsigned.sh
├── nginx-letsencrypt.conf
├── nginx-selfsigned.conf
└── ssl
│ └── .keep
├── scripts
├── bootstrap.sh
└── pg-init-scripts
│ ├── bootstrap.sh
│ └── create-multiple-postgresql-databases.sh
└── traefik
├── dynamic.yml
└── traefik.yml
/.env.development.default:
--------------------------------------------------------------------------------
1 | LAGO_API_URL=https://api.lago.dev
2 | LAGO_FRONT_URL=https://app.lago.dev
3 |
4 | # Feature flags
5 | LAGO_SIDEKIQ_WEB=true
6 | LAGO_CLICKHOUSE_ENABLED=true
7 | LAGO_CLICKHOUSE_MIGRATIONS_ENABLED=true
8 | LAGO_DISABLE_SEGMENT=true
9 | LAGO_DISABLE_PDF_GENERATION=false
10 | LAGO_DISABLE_WALLET_REFRESH=true
11 | LAGO_USE_AWS_S3=false
12 |
13 | # DB Seeding
14 | LAGO_CREATE_ORG=true
15 | LAGO_ORG_USER_EMAIL=email@example.com
16 | LAGO_ORG_USER_PASSWORD=password
17 | LAGO_ORG_NAME=Acme
18 |
19 | # Accessories (DB, pdf, kafka)
20 | POSTGRES_USER=lago
21 | POSTGRES_DB=lago
22 | POSTGRES_PASSWORD=changeme
23 | DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
24 | REDIS_URL=redis://redis:6379
25 | LAGO_REDIS_CACHE_URL=redis://redis:6379
26 | LAGO_PDF_URL=http://pdf:3000
27 | LAGO_DATA_API_URL=http://data_api
28 | LAGO_EVENTS_PROCESSOR_DATABASE_MAX_CONNECTIONS=200
29 | LAGO_REDIS_STORE_URL=redis:6379
30 | LAGO_REDIS_STORE_PASSWORD=
31 | LAGO_REDIS_STORE_DB=1
32 |
33 | # Misc
34 | LAGO_FROM_EMAIL=noreply@getlago.com
35 | LAGO_PARALLEL_THREADS_COUNT=4
36 |
37 | # Use dedicated services to process certain queues
38 | # If you enable one, make sure the related service is started
39 | # Ex: SIDEKIQ_WEBHOOK=true means `api-webhook-worker` must be running or webhooks-related jobs won't be processed
40 | SIDEKIQ_EVENTS=false
41 | SIDEKIQ_PDFS=false
42 | SIDEKIQ_BILLING=false
43 | SIDEKIQ_CLOCK=false
44 | SIDEKIQ_WEBHOOK=false
45 |
46 | # External API keys
47 | LAGO_DATA_API_BEARER_TOKEN=changeme
48 | LAGO_LICENSE=
49 | NANGO_SECRET_KEY=
50 | SEGMENT_WRITE_KEY=
51 |
52 | # Salts and similar
53 | SECRET_KEY_BASE=your-secret-key-base-hex-64
54 | LAGO_ENCRYPTION_PRIMARY_KEY=your-encrpytion-primary-key
55 | LAGO_ENCRYPTION_DETERMINISTIC_KEY=your-encrpytion-deterministic-key
56 | LAGO_ENCRYPTION_KEY_DERIVATION_SALT=your-encrpytion-derivation-salt
57 |
58 | # Kafka
59 | LAGO_KAFKA_BOOTSTRAP_SERVERS=redpanda:9092
60 | LAGO_KAFKA_RAW_EVENTS_TOPIC=events-raw
61 | LAGO_KAFKA_ENRICHED_EVENTS_TOPIC=events_enriched
62 | LAGO_KAFKA_CLICKHOUSE_CONSUMER_GROUP=clickhouse
63 | LAGO_KAFKA_SCRAM_ALGORITHM=
64 | LAGO_KAFKA_TLS=
65 | LAGO_KAFKA_USERNAME=
66 | LAGO_KAFKA_PASSWORD=
67 | LAGO_KAFKA_EVENTS_CHARGED_IN_ADVANCE_TOPIC=events_charged_in_advance
68 | LAGO_KAFKA_EVENTS_DEAD_LETTER_TOPIC=events_dead_letter
69 | LAGO_KAFKA_ACTIVITY_LOGS_TOPIC=activity_logs
70 | LAGO_KAFKA_CONSUMER_GROUP=lago_dev
71 | LAGO_KARAFKA_WEB=
72 | LAGO_KARAFKA_PROCESSING=
73 | LAGO_KARAFKA_WEB_SECRET=
74 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | custom: ['https://buy.stripe.com/7sIg2X2PVaA96v65kl']
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]: "
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Support**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Additional context**
32 | Add any other context about the problem here.
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Feature Request
4 | url: https://getlago.canny.io/feature-requests
5 | about: Provide feedback to the Lago team and ask for new features or enhancements!
6 | - name: Join the Lago Community
7 | url: https://www.getlago.com/slack
8 | about: Join us on Slack to get answers to your questions and submit new feature requests.
9 |
--------------------------------------------------------------------------------
/.github/workflows/build-processors-image.yaml:
--------------------------------------------------------------------------------
1 | name: "Build Events Processor Production Image"
2 | on:
3 | push:
4 | branches:
5 | - main
6 | paths:
7 | - "events-processor/**"
8 | workflow_dispatch:
9 |
10 | jobs:
11 | build-processor-image:
12 | runs-on: ubuntu-latest
13 | name: Build Events Processor Image
14 | steps:
15 | - uses: actions/checkout@v4
16 | - name: Configure AWS Credentials
17 | uses: aws-actions/configure-aws-credentials@v4
18 | with:
19 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
20 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
21 | aws-region: us-east-1
22 | - name: Login to Amazon ECR
23 | id: login-ecr
24 | uses: aws-actions/amazon-ecr-login@v2
25 | - name: Set short sha
26 | id: sha_short
27 | run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
28 | - name: Set up Docker Buildx
29 | uses: docker/setup-buildx-action@v3
30 | - name: Docker tag
31 | id: docker_tag
32 | env:
33 | ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
34 | ECR_REPOSITORY: lago-events-processor
35 | IMAGE_TAG: ${{ steps.sha_short.outputs.sha_short }}
36 | run: echo "tag=$(echo $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG)" >> $GITHUB_OUTPUT
37 | - name: Build and push
38 | uses: docker/build-push-action@v6
39 | with:
40 | context: ./events-processor
41 | push: true
42 | tags: ${{ steps.docker_tag.outputs.tag }}
43 |
--------------------------------------------------------------------------------
/.github/workflows/docker-ci.yml:
--------------------------------------------------------------------------------
1 | name: "Docker CI"
2 | on:
3 | push:
4 | branches:
5 | - main
6 | workflow_dispatch:
7 | jobs:
8 | docker-ci:
9 | name: Docker CI
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | with:
14 | sparse-checkout: |
15 | docker-compose.yml
16 | - name: Launch all services with default configuration
17 | env:
18 | LAGO_CREATE_ORG: "true"
19 | LAGO_ORG_NAME: "Lago"
20 | LAGO_ORG_USER_EMAIL: "foo@bar.com"
21 | LAGO_ORG_USER_PASSWORD: "foobar"
22 | LAGO_ORG_API_KEY: "test"
23 | run: |
24 | export LAGO_RSA_PRIVATE_KEY="`openssl genrsa 2048 | base64 | tr -d '\n'`"
25 | docker compose up -d --wait
26 | - name: cURL API
27 | run: curl -f http://localhost:3000/health
28 | - name: cURL Front
29 | run: curl -f http://localhost:80
30 | - name: cURL API Customers
31 | run: |
32 | curl -f http://localhost:3000/api/v1/customers -H "Authorization: Bearer test"
33 | - name: Down services
34 | run: docker compose down
35 |
--------------------------------------------------------------------------------
/.github/workflows/events-processor-tests.yml:
--------------------------------------------------------------------------------
1 | name: Events Processor Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | types:
9 | - opened
10 | - synchronize
11 | - reopened
12 | paths:
13 | - "events-processor/**"
14 |
15 | jobs:
16 | test:
17 | name: Run go tests
18 | runs-on: ubuntu-latest
19 | defaults:
20 | run:
21 | working-directory: ./events-processor
22 |
23 | services:
24 | postgres:
25 | image: postgres:14-alpine
26 | ports:
27 | - "5432:5432"
28 | env:
29 | POSTGRES_DB: lago
30 | POSTGRES_USER: lago
31 | POSTGRES_PASSWORD: lago
32 |
33 | env:
34 | DATABASE_URL: "postgres://lago:lago@localhost:5432/lago"
35 |
36 | steps:
37 | - name: Checkout code
38 | uses: actions/checkout@v3
39 |
40 | - name: Checkout lago expression code
41 | uses: actions/checkout@v3
42 | with:
43 | repository: getlago/lago-expression
44 | path: lago-expression
45 | ref: v0.1.4
46 |
47 | - name: Build lago-expression
48 | working-directory: ./lago-expression
49 | run: cargo build --release
50 |
51 | - name: Copy lago-expression shared library
52 | working-directory: ./lago-expression
53 | run: |
54 | mkdir -p /tmp/libs
55 | sudo cp ./target/release/libexpression_go.so /usr/local/lib/libexpression_go.so
56 | sudo ldconfig
57 |
58 | - name: Setup Go
59 | uses: actions/setup-go@v4
60 | with:
61 | go-version: "1.24.0"
62 |
63 | - name: Run tests
64 | run: go test -v ./...
65 |
--------------------------------------------------------------------------------
/.github/workflows/gh-page.yml:
--------------------------------------------------------------------------------
1 | name: "Deploy Github Page"
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - 'deploy/deploy.sh'
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v4
17 | with:
18 | sparse-checkout: |
19 | deploy/
20 | - name: Upload deploy script
21 | uses: actions/upload-pages-artifact@v3
22 | with:
23 | path: deploy/
24 |
25 | deploy:
26 | needs: build
27 | permissions:
28 | pages: write
29 | id-token: write
30 | environment:
31 | name: github-pages
32 | url: ${{ steps.deployment.outputs.page_url}}
33 | runs-on: ubuntu-latest
34 | steps:
35 | - name: Deploy page
36 | id: deployment
37 | uses: actions/deploy-pages@v4
38 | with:
39 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/release-docker-image.yml:
--------------------------------------------------------------------------------
1 | name: "Release Single Docker Image"
2 | on:
3 | release:
4 | types: [released]
5 | workflow_dispatch:
6 | inputs:
7 | version:
8 | description: Version
9 | required: true
10 | env:
11 | REGISTRY_IMAGE: getlago/lago
12 | jobs:
13 | build-single-docker-image:
14 | strategy:
15 | matrix:
16 | platform:
17 | - version: linux/amd64
18 | runner: linux/amd64
19 | - version: linux/arm64
20 | runner: linux-arm64
21 | name: Build ${{ matrix.platform.version }} Image
22 | runs-on: ${{ matrix.platform.runner }}
23 | steps:
24 | - name: Prepare
25 | run: |
26 | platform=${{ matrix.platform.version }}
27 | echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
28 | - uses: actions/checkout@v4
29 | with:
30 | submodules: true
31 | - name: Docker Meta
32 | id: meta
33 | uses: docker/metadata-action@v5
34 | with:
35 | images: ${{ env.REGISTRY_IMAGE }}
36 | tags: |
37 | type=raw,value=${{ github.event_name == 'release' && github.event.release.tag_name || github.event.inputs.version }}
38 | type=raw,value=latest
39 | - name: Set up Docker Buildx
40 | uses: docker/setup-buildx-action@v3
41 | with:
42 | version: latest
43 | - name: Log In to Docker Hub
44 | uses: docker/login-action@v3
45 | with:
46 | username: ${{ secrets.DOCKERHUB_USERNAME }}
47 | password: ${{ secrets.DOCKERHUB_PASSWORD }}
48 | - name: Add version into docker image
49 | id: add_version
50 | run: |
51 | echo "${{ github.event_name == 'release' && github.event.release.tag_name || github.event.inputs.version }}" > LAGO_VERSION
52 | - name: Build and push Docker image
53 | uses: docker/build-push-action@v6
54 | id: build
55 | with:
56 | context: .
57 | file: ./docker/Dockerfile
58 | platforms: ${{ matrix.platform.version }}
59 | labels: ${{ steps.meta.outputs.labels }}
60 | outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
61 | build-args: |
62 | SEGMENT_WRITE_KEY=${{ secrets.SEGMENT_WRITE_KEY }}
63 | - name: Export Digest
64 | run: |
65 | mkdir -p ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests
66 | digest="${{ steps.build.outputs.digest }}"
67 | touch "./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests/${digest#sha256:}"
68 | - name: Upload Digest
69 | uses: actions/upload-artifact@v4
70 | with:
71 | name: digests-${{ env.PLATFORM_PAIR }}
72 | path: ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests/*
73 | if-no-files-found: error
74 | retention-days: 1
75 | - name: Clean up
76 | if: always()
77 | run: |
78 | [ -e ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests ] && \
79 | rm -rf ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests
80 | merge:
81 | name: Merge Images
82 | runs-on: lago-runner
83 | needs: [build-single-docker-image]
84 | steps:
85 | - name: Download Digests
86 | uses: actions/download-artifact@v4
87 | with:
88 | path: ./_tmp/${{ github.run_id}}/${{ github.run_attempt }}/digests
89 | pattern: digests-*
90 | merge-multiple: true
91 | - name: Docker meta
92 | id: meta
93 | uses: docker/metadata-action@v5
94 | with:
95 | images: ${{ env.REGISTRY_IMAGE }}
96 | tags: |
97 | type=raw,value=${{ github.event_name == 'release' && github.event.release.tag_name || github.event.inputs.version }}
98 | type=raw,value=latest
99 | - name: Set up Docker buildx
100 | uses: docker/setup-buildx-action@v3
101 | - name: Log in to Docker Hub
102 | uses: docker/login-action@v3
103 | with:
104 | username: ${{ secrets.DOCKERHUB_USERNAME }}
105 | password: ${{ secrets.DOCKERHUB_PASSWORD }}
106 | - name: Create manifest and push
107 | working-directory: ./_tmp/${{ github.run_id }}/${{ github.run_attempt}}/digests
108 | run: |
109 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
110 | $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
111 | - name: Inspect Image
112 | run: |
113 | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
114 | - name: Clean up
115 | if: always()
116 | run: |
117 | [ -e ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests ] && \
118 | rm -rf ./_tmp/${{ github.run_id }}/${{ github.run_attempt }}/digests
119 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: "Release"
2 | on:
3 | release:
4 | types: [released]
5 | jobs:
6 | release-dispatch:
7 | name: Release Dispatch
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: API Dispatch
11 | uses: peter-evans/repository-dispatch@v2
12 | with:
13 | token: ${{ secrets.GH_TOKEN}}
14 | repository: getlago/lago-api
15 | event-type: release
16 | client-payload: '{"version": "${{ github.event.release.tag_name }}"}'
17 | - name: Front Dispatch
18 | uses: peter-evans/repository-dispatch@v2
19 | with:
20 | token: ${{ secrets.GH_TOKEN }}
21 | repository: getlago/lago-front
22 | event-type: release
23 | client-payload: '{"version": "${{ github.event.release.tag_name }}"}'
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.code-workspace
2 | .DS_Store
3 | .env
4 | .env.development
5 | .rsa_private.pem
6 | .vscode
7 | .zed
8 | /extra/ssl/certbot
9 | /extra/ssl/dhparam.pem
10 | /extra/ssl/nginx*
11 | /traefik/certs/*
12 | /deploy/letsencrypt
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "api"]
2 | path = api
3 | url = git@github.com:getlago/lago-api.git
4 | [submodule "front"]
5 | path = front
6 | url = git@github.com:getlago/lago-front.git
7 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | - Demonstrating empathy and kindness toward other people
21 | - Being respectful of differing opinions, viewpoints, and experiences
22 | - Giving and gracefully accepting constructive feedback
23 | - Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | - Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | - The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | - Trolling, insulting or derogatory comments, and personal or political attacks
33 | - Public or private harassment
34 | - Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | - Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | dev@getlago.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Pull Request template
2 |
3 | Please, go through these steps before you submit a PR.
4 |
5 | 1. Make sure that your PR is not a duplicate.
6 | 2. If not, then make sure that:
7 |
8 | a. You have done your changes in a separate branch. Branches MUST have descriptive names that start with either the `fix/` or `feature/` prefixes. Good examples are: `fix/signin-issue` or `feature/issue-templates`.
9 |
10 | b. You have a descriptive commit message with a short title (first line).
11 |
12 | c. You have only one commit (if not, squash them into one commit).
13 |
14 | d. `pnpm test` doesn't throw any error. If it does, fix them first and amend your commit (`git commit --amend`).
15 |
16 | 3. **After** these steps, you're ready to open a pull request.
17 |
18 | a. Give a descriptive title to your PR.
19 |
20 | b. Describe your changes.
21 |
22 | c. Put `closes #XXXX` in your comment to auto-close the issue that your PR fixes (if such).
23 |
24 | d. Add the corresponding labels to your pull request (ex: feature, improvement, bug...)
25 |
26 | IMPORTANT: Please review the [CONTRIBUTING.md](../CONTRIBUTING.md) file for detailed contributing guidelines.
27 |
28 | **PLEASE REMOVE THIS TEMPLATE BEFORE SUBMITTING**
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Lago
8 |
9 |
10 | Open Source Metering & Usage-Based Billing
11 |
12 |
13 | The best alternative to Chargebee, Recurly or Stripe Billing.
14 |
15 | For usage-based, subscription-based, and all the nuances of pricing in between.
16 |
17 |
18 | Slack
19 | ·
20 | Website
21 | ·
22 | Issues
23 | ·
24 | Roadmap
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 | ## The programmable API for usage-based billing
46 | [](https://www.youtube.com/watch?v=dXnoMRetsr4)
47 |
48 | ### The problem: Billing systems are still a nightmare for engineers
49 | 
50 | Engineers be like…
51 |
52 | Read more first-hand experiences from Qonto, Algolia, Pleo, Segment, or the 350+. Hackernews comments [here](https://news.ycombinator.com/item?id=31424450).
53 |
54 | **The Solution:** Lago, the open-source billing API for product-led SaaS
55 | - Event-based: if you can track it, you can charge for it;
56 | - Built for product-led growth companies;
57 | - Hybrid pricing: subscription and usage;
58 | - Hybrid go-to-market motion: self-serve and sales-led.
59 |
60 | **Open-source, open architecture:**
61 | - Composable: connect Lago to any of your internal systems or tools (i.e. any payment gateway, CRM, CPQ, accounting software);
62 | - Pricing: we’re not rent seekers, we’re not asking for a % of your revenue. Our self-hosted version is free. Our cloud version is priced like a SaaS;
63 | - Privacy: your data never has to leave your infrastructure.
64 |
65 | ## ✨ Features
66 | - **[Usage metering](https://www.getlago.com/products/metering)**: Lago's event-based architecture provides a solid foundation for building a fair pricing model that scales with your business.
67 | - **[Price plans](https://www.getlago.com/products/plans)**: Lago supports all pricing models. Create pay-as-you-go and hybrid plans in no time with our intuitive user interface or API.
68 | - **[Coupons](https://www.getlago.com/products/coupons)**: Create engaging marketing campaigns and increase conversion with coupons that customers can redeem to get a discount.
69 | - **[Add-ons](https://www.getlago.com/products/add-on)**: Why wait until the end of the billing cycle to get paid? Lago allows you to create one-time charges that are invoiced on the fly.
70 | - **[Invoicing](https://www.getlago.com/products/invoicing)**: Depending on the configuration of your plans, Lago automatically calculates what each customer owes you and generates invoices.
71 | - **[Prepaid credits](https://www.getlago.com/products/prepaid-credits)**: Unlock recurring revenue opportunities for pay-as-you-go pricing models with Lago’s prepaid credit features.
72 |
73 | ## 🔔 Stay up to date
74 | Lago launched its v0.1 on June 2nd, 2022. Lots of new features are coming, and are generally released on a bi-weekly basis. Watch updates of this repository to be notified of future updates.
75 |
76 | [Check out our public roadmap](https://getlago.canny.io/)
77 |
78 | ## 🔖 License
79 | Distributed under the AGPLv3 License. Read more [here](https://www.getlago.com/blog/open-source-licensing-and-why-lago-chose-agplv3).
80 |
81 | ## Current Releases
82 |
83 | | Project | Release Badge |
84 | |--------------------|-----------------------------------------------------------------------------------------------------|
85 | | **Lago** | [](https://github.com/getlago/lago/releases) |
86 | | **Lago API** | [](https://github.com/getlago/lago-api/releases) |
87 | | **Lago front** | [](https://github.com/getlago/lago-front/releases) |
88 | | **Lago Go Client** | [](https://github.com/getlago/lago-go-client/releases) |
89 | | **lago-gotenberg** | [](https://github.com/getlago/lago-gotenberg/releases) |
90 | | **Lago JavaScript Client** | [](https://github.com/getlago/lago-javascript-client/releases) |
91 | | **Lago OpenAPI** | [](https://github.com/getlago/lago-openapi/releases) |
92 | | **Lago Python Client** | [](https://github.com/getlago/lago-python-client/releases) |
93 | | **Lago Ruby Client** | [](https://github.com/getlago/lago-ruby-client/releases) |
94 |
95 |
96 | ## 💻 Deploy locally
97 |
98 | ### Requirements
99 | 1. Install Docker on your machine;
100 | 2. Make sure Docker Compose is installed and available (it should be the case if you have chosen to install Docker via Docker Desktop); and
101 | 3. Make sure Git is installed on your machine.
102 |
103 | ### Run the app
104 | To start using Lago, run the following commands in a shell:
105 |
106 |
107 | #### On a fresh install
108 | ```bash
109 | # Get the code
110 | git clone --depth 1 https://github.com/getlago/lago.git
111 |
112 | # Go to Lago folder
113 | cd lago
114 |
115 | # Set up environment configuration
116 | echo "LAGO_RSA_PRIVATE_KEY=\"`openssl genrsa 2048 | base64`\"" >> .env
117 | source .env
118 |
119 | # Start all the components
120 | docker compose up
121 | ```
122 |
123 | #### After an update
124 |
125 | ```bash
126 | docker compose up
127 | ```
128 |
129 | You can now open your browser and go to http://localhost to connect to the application. Lago's API is exposed at http://localhost:3000.
130 |
131 | Note that if our docker server is not at http://localhost, the following env variables must be set: `LAGO_API_URL`. This may be on the command line or in your .env file. For example:
132 |
133 | ```
134 | LAGO_API_URL="http://192.168.122.71:3000"
135 | LAGO_FRONT_URL="http://192.168.122.71"
136 | ```
137 |
138 | ### Find your API key
139 | Your API Key can be found directly in the UI:
140 |
141 | 1. Access the **Developer** section from the sidebar;
142 | 2. The first tab of this section is related to your **API keys**; and
143 | 3. Click the **Copy** button to copy it to clipboard.
144 |
145 | ### Analytics and tracking
146 | Please note that Lago, by default, tracks basic actions performed on your self-hosted instance. If you do not disable tracking, you may receive specific communications or product updates. However, rest assured that Lago will not collect any personal information about your customers or financial information about your invoices.
147 |
148 | If you would like to know more about Lago's analytics or remove the entire tracking, please refer to [this page](https://doc.getlago.com/guide/self-hosted/tracking-analytics) for comprehensive information.
149 |
150 | ### Version, environment variables and components
151 | Docker images are always updated to the last stable version in the docker-compose.yml file. You can use a different tag if needed by checking the releases list.
152 |
153 | Lago uses the following environment variables to configure the components of the application. You can override them to customise your setup. Take a closer look are our [documentation](https://doc.getlago.com/docs/guide/self-hosting/docker#configuration).
154 |
155 | ## ☁️ Use our cloud-based product
156 | Contact our team at hello@getlago.com to get started with Lago Cloud. More information on [our website](https://www.getlago.com/pricing).
157 |
158 | ## 🚀 Getting the most out of Lago
159 | - See the [documentation](https://doc.getlago.com) to learn more about all the features;
160 | - Use our [templates](https://www.getlago.com/resources/templates/all) to get inspiration and learn how to reproduce Algolia’s, Segment’s and Klaviyo’s pricing models;
161 | - Join our [Slack community](https://www.getlago.com/slack) if you need help, or want to chat, we’re here to help;
162 | - Contribute on GitHub: read our [guidelines](https://github.com/getlago/lago/blob/main/CONTRIBUTING.md);
163 | - Follow us on [Twitter](https://twitter.com/GetLago) for the latest news;
164 | - You can email us as well: hello@getlago.com.
165 |
166 | ## 🧑💻 Contributions and development environment
167 |
168 | You can follow this [guide](https://github.com/getlago/lago/wiki/Development-Environment) to set up a Lago development environment on your machine. This guide is intended for people willing to contribute to Lago. If you want to try Lago on your local system, we recommend that you take a look at Lago's public documentation.
169 |
170 | You can contribute by following our [guidelines](https://github.com/getlago/lago/blob/main/CONTRIBUTING.md).
171 |
172 | ## 💡 Philosophy
173 | B2B SaaS has evolved, but billing has not yet.
174 |
175 | ### 1- We’re not in the “subscription economy” anymore. And we won’t go “full usage-based pricing” quite yet
176 | Pricings are now mostly hybrid: they include a usage-based component (i.e. “if you use more you pay more”) and a subscription component (i.e. a recurring fee for basic usage).
177 |
178 | Not all software companies will go full “usage-based” like Snowflake for instance. This model is the new standard for cloud infrastructure products. However, in other areas of SaaS, users want to know beforehand how much they will pay to control their spending and software companies want to be able to predict recurring revenues.
179 |
180 | ### 2- Go-to-market is not either bottom-up or top-down anymore
181 | SaaS used to be either self-service (SMBs) or sales-led (Enterprises).
182 | Go-to-market now mixes the self-service (all customers access the same price plans) and sales-led (customers get a custom quote from a sales representative) motions.
183 | A typical journey involves an individual contributor in a company who tests a new tool, puts their corporate credit card in, and starts spreading the use of the tool within the organization. At that point, the VP or head of department might want to upgrade to a custom plan tailored to the needs of the whole organization.
184 | As a result, billing needs to be flexible, automated, and transparent enough to embrace this hybrid go-to-market motion as well.
185 |
186 | ### 3- The “rent seeker” pricing of current billing solutions needs to stop
187 | Why do payment companies take a cut on revenues?
188 | Because the higher the amount, the higher the risk for them (e.g. fraud, disputes, etc.).
189 |
190 | Why did billing companies adopt the same pricing structure? We’re not able to provide an answer that makes sense. It’s been said on the internet that they did this because they could (read more [here](https://news.ycombinator.com/item?id=16766846)).
191 |
192 | ### One last thing…
193 | Lago is agnostic and we aim at being as transparent as possible, so we won’t nudge or lock you into using a specific tool in exchange for using our billing API ([learn more](https://www.gmass.co/blog/negotiating-stripe-fees/)).
194 |
--------------------------------------------------------------------------------
/deploy/.env.light.example:
--------------------------------------------------------------------------------
1 | LAGO_DOMAIN=
2 | LAGO_ACME_EMAIL=
--------------------------------------------------------------------------------
/deploy/.env.production.example:
--------------------------------------------------------------------------------
1 | LAGO_DOMAIN=
2 | LAGO_ACME_EMAIL=
3 | PORTAINER_USER=
4 | PORTAINER_PASSWORD=
--------------------------------------------------------------------------------
/deploy/README.md:
--------------------------------------------------------------------------------
1 | # Lago Deploy
2 |
3 | This repository contains the necessary files to deploy the Lago project.
4 |
5 | ## Docker Compose Local
6 |
7 | To deploy the project locally, you need to have Docker and Docker Compose installed on your machine.
8 | This configuration can be used for small production usages but it's not recommended for large scale deployments.
9 |
10 | ### Get Started
11 |
12 | 1. Get the docker compose file
13 |
14 | ```bash
15 | curl -o docker-compose.yml https://raw.githubusercontent.com/getlago/lago/main/deploy/docker-compose.local.yml
16 | ```
17 |
18 | 2. Run the following command to start the project:
19 |
20 | ```bash
21 | docker compose up --profile all
22 |
23 | # If you want to run it in the background
24 | docker compose up -d --profile all
25 | ```
26 |
27 | ## Docker Compose Light
28 |
29 | This configuration provide Traefik as a reverse proxy to ease your deployment.
30 | It supports SSL with Let's Encrypt. :warning: You need a valid domain (with at least one A or AAA record)!
31 |
32 | 1. Get the docker compose file
33 |
34 | ```bash
35 | curl -o docker-compose.yml https://raw.githubusercontent.com/getlago/lago/main/deploy/docker-compose.light.yml
36 | curl -o .env https://raw.githubusercontent.com/getlago/lago/main/deploy/.env.light.example
37 | ```
38 |
39 | 2. Replace the .env values with yours
40 |
41 | ```bash
42 | LAGO_DOMAIN=domain.tld
43 | LAGO_ACME_EMAIL=email@domain.tld
44 | ```
45 |
46 | 3. Run the following command to start the project
47 |
48 | ```bash
49 | docker compose up --profile all
50 |
51 | # If you want to run it in the background
52 | docker compose up -d --profile all
53 | ```
54 |
55 | ## Docker Compose Production
56 |
57 | This configuration provide Traefik as a reverse proxy to ease your deployment.
58 | It supports SSL wth Let's Encrypt. :warning: You need a valid domain (with at least one A or AAA record)!
59 | It also adds multiple services that will help your to handle more load.
60 | Portainer is also packed to help you scale services and manage your Lago stack.
61 |
62 | ```bash
63 | curl -o docker-compose.yml https://raw.githubusercontent.com/getlago/lago/main/deploy/docker-compose.production.yml
64 | curl -o .env https://raw.githubusercontent.com/getlago/lago/main/deploy/.env.production.example
65 | ```
66 |
67 | 2. Replace the .env values with yours
68 |
69 | ```bash
70 | LAGO_DOMAIN=domain.tld
71 | LAGO_ACME_EMAIL=email@domain.tld
72 | PORTAINER_USER=lago
73 | PORTAINER_PASSWORD=changeme
74 | ```
75 |
76 | 3. Run the following command to start the project
77 |
78 | ```bash
79 | docker compose up --profile all
80 |
81 | # If you want to run it in the background
82 | docker compose up -d --profile all
83 | ```
84 |
85 |
86 | ## Configuration
87 |
88 | ### Profiles
89 |
90 | The docker compose file contains multiple profiles to enable or disable some services.
91 | Here are the available profiles:
92 | - `all`: Enable all services
93 | - `all-no-pg`: Disable the PostgreSQL service
94 | - `all-no-redis`: Disable the Redis service
95 | - `all-no-keys`: Disable the RSA keys generation service
96 |
97 | This allow you to start only the service you want to use, please see the following sections for more information.
98 |
99 | ```bash
100 | # Start all services
101 | docker compose up --profile all
102 |
103 | # Start without PostgreSQL
104 | docker compose up --profile all-no-pg
105 |
106 | # Start without Redis
107 | docker compose up --profile all-no-redis
108 |
109 | # Start without PostgreSQL and Redis
110 | docker compose up --profile all-no-db
111 |
112 | # Start without RSA keys generation
113 | docker compose up --profile all-no-keys
114 |
115 | # Start without PostgreSQL, Redis and RSA keys generation
116 | docker compose up
117 | ```
118 |
119 | ### PostgreSQL
120 |
121 | It is possible to disable the usage of the PostgreSQL database to use an external database instance.
122 |
123 | 1. Set those environment variables:
124 |
125 | - `POSTGRES_USER`
126 | - `POSTGRES_PASSWORD`
127 | - `POSTGRES_DB`
128 | - `POSTGRES_HOST`
129 | - `POSTGRES_PORT`
130 | - `POSTGRES_SCHEMA` optional
131 |
132 | 2. Run the following command to start the project without PostgreSQL:
133 |
134 | ```bash
135 | docker compose up --profile all-no-pg
136 | ```
137 |
138 | ### Redis
139 |
140 | It is possible to disable the usage of the Redis database to use an external Redis instance.
141 |
142 | 1. Set those environment variables:
143 |
144 | - `REDIS_HOST`
145 | - `REDIS_PORT`
146 | - `REDIS_PASSWORD` optional
147 |
148 | 2. Run the following command to start the project without Redis:
149 |
150 | ```bash
151 | docker compose up --profile all-no-redis
152 | ```
153 |
154 | ### RSA Keys
155 |
156 | Those docker compose file generates an RSA Keys pair for the JWT tokens generation.
157 | You can find the keys in the `lago_rsa_data` volume or in the `/app/config/keys` directory in the backends containers.
158 | If you do not want to use those keys:
159 | - Remove the `lago_rsa_data` volume
160 | - Generate your own key using `openssl genrsa 2048 | base64 | tr -d '\n'`
161 | - Export this generated key to the `LAGO_RSA_PRIVATE_KEY` env var.
162 | - Run the following command to start the project without the RSA keys generation:
163 |
164 | ```bash
165 | docker compose up --profile all-no-keys
166 | ```
167 |
168 | *All BE Services use the same RSA key, they will exit immediately if no key is provided.*
169 |
--------------------------------------------------------------------------------
/deploy/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | GREEN=$(tput setaf 2)
4 | CYAN=$(tput setaf 6)
5 | YELLOW=$(tput setaf 3)
6 | RED=$(tput setaf 1)
7 | NORMAL=$(tput sgr0)
8 | BOLD=$(tput bold)
9 |
10 | ENV_FILE=".env"
11 |
12 | check_command() {
13 | if ! command -v "$1" &> /dev/null; then
14 | echo "${RED}❌ Error:${NORMAL} ${BOLD}$1${NORMAL} is not installed."
15 | return 1
16 | else
17 | echo "${GREEN}✅ $1 is installed.${NORMAL}"
18 | return 0
19 | fi
20 | }
21 |
22 | ask_yes_no() {
23 | while true; do
24 | read -p "${YELLOW}👉 $1 [y/N]: ${NORMAL}" yn /dev/null; do
41 | local temp=${spinstr#?}
42 | printf " [%c] " "$spinstr"
43 | local spinstr=$temp${spinstr%"$temp"}
44 | sleep $delay
45 | printf "\b\b\b\b\b\b"
46 | done
47 | printf " \b\b\b\b"
48 | }
49 |
50 | echo "${CYAN}${BOLD}"
51 | echo "============================="
52 | echo "🚀 Lago Docker Deployments 🚀"
53 | echo "=============================${NORMAL}"
54 | echo ""
55 |
56 | echo "${CYAN}${BOLD}🔍 Checking Dependencies...${NORMAL}"
57 | check_command docker || MISSING_DOCKER=true
58 | check_command docker-compose || check_command "docker compose" || MISSING_DOCKER_COMPOSE=true
59 |
60 | if [[ "$MISSING_DOCKER" = true || "$MISSING_DOCKER_COMPOSE" = true ]]; then
61 | echo "${YELLOW}⚠️ Please install missing dependencies:${NORMAL}"
62 |
63 | if [ "$MISSING_DOCKER" = true ]; then
64 | echo "👉 Docker: https://docs.docker.com/get-docker/"
65 | fi
66 |
67 | if [ "$MISSING_DOCKER_COMPOSE" = true ]; then
68 | 👉 Docker Compose: https://docs.docker.com/compose/install/
69 | fi
70 | fi
71 |
72 | echo ""
73 |
74 | check_and_stop_containers(){
75 | containers_to_check=("lago-quickstart")
76 |
77 | for container in "${containers_to_check[@]}"; do
78 | if [ "$(docker ps -q -f name="^/${container}$")" ]; then
79 | echo "${YELLOW}⚠️ Detected running container: ${BOLD}$container${NORMAL}"
80 |
81 | if ask_yes_no "Do you want to stop ${BOLD}${container}${NORMAL}?"; then
82 | echo -n "${CYAN}⏳ Stopping ${container}...${NORMAL}"
83 |
84 | (docker stop "$container" &>/dev/null) &
85 | spinner $!
86 |
87 | echo "${GREEN}✅ done.${NORMAL}"
88 |
89 | if ask_yes_no "Do you want to remove ${BOLD}${container}${NORMAL}?"; then
90 | echo -n "${CYAN}⏳ Deleting ${container}...${NORMAL}"
91 |
92 | (docker rm "$container" &>/dev/null) &
93 | spinner $!
94 |
95 | echo "${GREEN}✅ done.${NORMAL}"
96 | fi
97 | else
98 | echo "${RED}⚠️ Please manually stop ${container} before proceeding.${NORMAL}"
99 | exit 1
100 | fi
101 | fi
102 | done
103 |
104 | compose_projects=("lago-local" "lago-light" "lago-production")
105 | for project in "${compose_projects[@]}"; do
106 | running_services=$(docker compose -p "$project" ps -q &>/dev/null || docker-compose -p "$project" ps -q &>/dev/null)
107 | if [ -n "$running_services" ]; then
108 | echo "${YELLOW}⚠️ Detected running Docker Compose project: ${BOLD}$project${NORMAL}"
109 |
110 | if ask_yes_no "Do you want to stop ${BOLD}${project}${NORMAL}?"; then
111 | docker compose -p "$project" down &>/dev/null || docker-compose -p "$project" down &>/dev/null
112 | echo "${GREEN}✅ ${project} stopped.${NORMAL}"
113 |
114 | if ask_yes_no "Do you want to clean volumes and all data from ${BOLD}${project}${NORMAL}?"; then
115 | docker volume rm -f lago_rsa_data lago_postgres_data lago_redis_data lago_storage_data
116 | echo "${GREEN}✅ ${project} data has been cleaned up.${NORMAL}"
117 | fi
118 | else
119 | echo "${RED}⚠️ Please manually stop ${project} before proceeding.${NORMAL}"
120 | exit 1
121 | fi
122 | fi
123 | done
124 | }
125 |
126 | # Checks existing deployments
127 | echo "${CYAN}${BOLD}🔍 Checking for existing Lago deployments...${NORMAL}"
128 | check_and_stop_containers
129 | echo ""
130 |
131 | templates=(
132 | "Quickstart|One-line Docker run command, ideal for testing"
133 | "Local|Local installation of Lago, without SSL support"
134 | "Light|Light Lago installation, ideal for small production usage"
135 | "Production|Optimized Production Setup for scalability and performances"
136 | )
137 |
138 | # Display Templates
139 | echo "${BOLD}📋 Available Deployments:${NORMAL}"
140 | i=1
141 | for template in "${templates[@]}"; do
142 | IFS='|' read -r key desc <<< "$template"
143 | echo "${YELLOW}[$i]${NORMAL} ${BOLD}${key}${NORMAL} - ${desc}"
144 | ((i++))
145 | done
146 | echo ""
147 |
148 | while true; do
149 | read -p "${CYAN}👉 Enter your choice [1-$((${#templates[@]}))]: ${NORMAL}" choice = 1 && choice <= ${#templates[@]} )); then
151 | selected="${templates[$((choice-1))]}"
152 | IFS='|' read -r selected_key selected_desc <<< "$selected"
153 | break
154 | else
155 | echo ""
156 | echo "${RED}⚠️ Invalid choice, please try again.${NORMAL}"
157 | echo ""
158 | fi
159 | done
160 |
161 | echo ""
162 |
163 | profile="all"
164 |
165 | # Download docker-compose file based on choice
166 | case "$selected_key" in
167 | "Local")
168 | echo "${CYAN}${BOLD}🚀 Downloading Local deployment files...${NORMAL}"
169 | curl -s -o docker-compose.yml https://deploy.getlago.com/docker-compose.local.yml
170 | curl -s -o .env https://deploy.getlago.com/.env.local.example
171 | if [ $? -eq 0 ]; then
172 | echo "${GREEN}✅ Successfully downloaded Local deployment files${NORMAL}"
173 | else
174 | echo "${RED}❌ Failed to download Local deployment files${NORMAL}"
175 | exit 1
176 | fi
177 | ;;
178 | "Light")
179 | echo "${CYAN}${BOLD}🚀 Downloading Light deployment files...${NORMAL}"
180 | curl -s -o docker-compose.yml https://deploy.getlago.com/docker-compose.light.yml
181 | curl -s -o .env https://deploy.getlago.com/.env.light.example
182 | if [ $? -eq 0 ]; then
183 | echo "${GREEN}✅ Successfully downloaded Light deployment files${NORMAL}"
184 | else
185 | echo "${RED}❌ Failed to download Light deployment files${NORMAL}"
186 | exit 1
187 | fi
188 | ;;
189 | "Production")
190 | echo "${CYAN}${BOLD}🚀 Downloading Production deployment files...${NORMAL}"
191 | curl -s -o docker-compose.yml https://deploy.getlago.com/docker-compose.production.yml
192 | curl -s -o .env https://deploy.getlago.com/.env.production.example
193 | if [ $? -eq 0 ]; then
194 | echo "${GREEN}✅ Successfully downloaded Production deployment files${NORMAL}"
195 | else
196 | echo "${RED}❌ Failed to download Production deployment files${NORMAL}"
197 | exit 1
198 | fi
199 | ;;
200 | esac
201 |
202 | echo ""
203 |
204 |
205 | # Check Env Vars depending on the deployment
206 | if [[ "$selected_key" == "Light" || "$selected_key" == "Production" ]]; then
207 | mandatory_vars=("LAGO_DOMAIN" "LAGO_ACME_EMAIL" "PORTAINER_USER" "PORTAINER_PASSWORD")
208 | external_pg=false
209 | external_redis=false
210 |
211 | if [[ -n "$LAGO_DOMAIN" ]]; then
212 | check_domain_dns "$LAGO_DOMAIN"
213 | if [[ $? -eq 1 ]] && ! ask_yes_no "No valid DNS record found. Continue anyway?"; then
214 | echo "${YELLOW}⚠️ Deployment aborted.${NORMAL}"
215 | exit 1
216 | fi
217 | fi
218 |
219 | if ask_yes_no "Do you want to use an external PostgreSQL instance?"; then
220 | mandatory_vars+=("POSTGRES_HOST" "POSTGRES_USER" "POSTGRES_PASSWORD" "POSTGRES_PORT" "POSTGRES_DB")
221 | external_pg=true
222 |
223 | if ask_yes_no "Does your PG Database use an other schema than public?"; then
224 | mandatory_vars+=("POSTGRES_SCHEMA")
225 | fi
226 | fi
227 |
228 | if ask_yes_no "Do you want to use an external Redis instance?"; then
229 | mandatory_vars+=("REDIS_HOST" "REDIS_PORT")
230 | external_redis=true
231 |
232 | if ask_yes_no "Does you Redis instance need a password?"; then
233 | mandatory_vars+=("REDIS_PASSWORD")
234 | fi
235 | fi
236 |
237 | if $external_pg && $external_redis; then
238 | profile="all-no-db"
239 | elif $external_pg; then
240 | profile="all-no-pg"
241 | elif $external_redis; then
242 | profile="all-no-redis"
243 | fi
244 |
245 | echo ""
246 |
247 | echo "${CYAN}${BOLD}🔧 Checking mandatory environment variables...${NORMAL}"
248 |
249 | # Load Existing .env values
250 | if [ -f "$ENV_FILE" ]; then
251 | # shellcheck disable=SC2046
252 | export $(grep -v '^#' "$ENV_FILE" | xargs)
253 | echo "${GREEN}✅ Loaded existing .env file.${NORMAL}"
254 | else
255 | touch "$ENV_FILE"
256 | echo "${YELLOW}⚠️ No .env file found. Created a new one.${NORMAL}"
257 | fi
258 |
259 | {
260 | echo "# Updated by Lago Deploy"
261 | for var in "${mandatory_vars[@]}"; do
262 | if [ -z "${!var}" ]; then
263 | read -p "${YELLOW}⚠️ $var is missing. Enter value: ${NORMAL}" user_input "$ENV_FILE"
271 |
272 | echo "${GREEN}${BOLD}✅ .env file updated successfully.${NORMAL}"
273 | echo ""
274 | fi
275 |
276 | # Check if domain has A record
277 | check_domain_dns() {
278 | local domain="$1"
279 |
280 | # Remove protocol if present
281 | domain=$(echo "$domain" | sed -E 's|^https?://||')
282 |
283 | echo "${CYAN}${BOLD}🔍 Checking DNS A record for ${domain}...${NORMAL}"
284 |
285 | if command -v dig &> /dev/null; then
286 | if dig +short A "$domain" | grep -q '^[0-9]'; then
287 | echo "${GREEN}✅ Valid A record found for ${BOLD}${domain}${NORMAL}"
288 | return 0
289 | else
290 | echo "${RED}❌ No valid A record found for ${BOLD}${domain}${NORMAL}"
291 | return 1
292 | fi
293 | elif command -v nslookup &> /dev/null; then
294 | if nslookup "$domain" | grep -q 'Address: [0-9]'; then
295 | echo "${GREEN}✅ Valid A record found for ${BOLD}${domain}${NORMAL}"
296 | return 0
297 | else
298 | echo "${RED}❌ No valid A record found for ${BOLD}${domain}${NORMAL}"
299 | return 1
300 | fi
301 | else
302 | echo "${YELLOW}⚠️ Cannot check domain DNS record - neither dig nor nslookup available${NORMAL}"
303 | return 2
304 | fi
305 | }
306 |
307 | # Execute selected deployment
308 | case "$selected_key" in
309 | Quickstart)
310 | echo "${CYAN}🚧 Running quickstart Docker container...${NORMAL}"
311 | docker run -d --name lago-quickstart -p 3000:3000 -p 80:80 getlago/lago:latest &>/dev/null
312 | ;;
313 | Local)
314 | echo "${CYAN}🚧 Running Local Docker Compose deployment...${NORMAL}"
315 | docker compose -f docker-compose.local.yml up -d || docker-compose -f docker-compose.local.yml up -d &>/dev/null
316 | ;;
317 | Light)
318 | echo "${CYAN}🚧 Running Light Docker Compose deployment...${NORMAL}"
319 |
320 | docker compose -f docker-compose.light.yml --profile "$profile" up -d &>/dev/null || \
321 | docker-compose -f docker-compose.light.yml --profile "$profile" up -d &>/dev/null
322 | ;;
323 | Production)
324 | echo "${CYAN}🚧 Running Production Docker Compose deployment...${NORMAL}"
325 |
326 | docker compose -f docker-compose.production.yml --profile "$profile" up -d &>/dev/null || \
327 | docker-compose -f docker-compose.production.yml --profile "$profile" up -d &>/dev/null
328 | ;;
329 | esac
330 |
331 | echo ""
332 | echo "${GREEN}${BOLD}🎉 Lago deployment started successfully!${NORMAL}"
333 |
--------------------------------------------------------------------------------
/deploy/docker-compose.light.yml:
--------------------------------------------------------------------------------
1 | name: lago-light
2 |
3 | volumes:
4 | lago_rsa_data:
5 | lago_postgres_data:
6 | lago_redis_data:
7 | lago_storage_data:
8 |
9 | x-postgres-image: &postgres-image
10 | image: postgres:15-alpine
11 | x-redis-image: &redis-image
12 | image: redis:7-alpine
13 | x-backend-image: &backend-image
14 | image: getlago/api:v1.27.1
15 | x-frontend-image: &frontend-image
16 | image: getlago/front:v1.27.1
17 |
18 | x-lago-domain: &lago-domain
19 | "LAGO_DOMAIN": ${LAGO_DOMAIN}
20 |
21 | # TODO: Use only LAGO_DOMAIN
22 | x-backend-urls: &backend-urls
23 | "LAGO_FRONT_URL": https://${LAGO_DOMAIN}
24 | "LAGO_API_URL": https://${LAGO_DOMAIN}/api
25 |
26 | x-backend-environment: &backend-env
27 | "DATABASE_URL": postgresql://${POSTGRES_USER:-lago}:${POSTGRES_PASSWORD:-changeme}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-lago}?search_path=${POSTGRES_SCHEMA:-public}
28 | "REDIS_URL": redis://${REDIS_HOST:-redis}:${REDIS_PORT:-6379}
29 | "REDIS_PASSWORD": ${REDIS_PASSWORD:-}
30 | "SECRET_KEY_BASE": ${SECRET_KEY_BASE:-your-secret-key-base-hex-64}
31 | "RAILS_ENV": production
32 | "RAILS_LOG_TO_STDOUT": ${LAGO_RAILS_STDOUT:-true}
33 | "LAGO_RSA_PRIVATE_KEY": ${LAGO_RSA_PRIVATE_KEY:-}
34 | "LAGO_SIDEKIQ_WEB": ${LAGO_SIDEKIQ_WEB:-true}
35 | "LAGO_ENCRYPTION_PRIMARY_KEY": ${LAGO_ENCRYPTION_PRIMARY_KEY:-your-encryption-primary-key}
36 | "LAGO_ENCRYPTION_DETERMINISTIC_KEY": ${LAGO_ENCRYPTION_DETERMINISTIC_KEY:-your-encryption-deterministic-key}
37 | "LAGO_ENCRYPTION_KEY_DERIVATION_SALT": ${LAGO_ENCRYPTION_KEY_DERIVATION_SALT:-your-encryption-derivation-salt}
38 | "LAGO_USE_AWS_S3": ${LAGO_USE_AWS_S3:-false}
39 | "LAGO_AWS_S3_ACCESS_KEY_ID": ${LAGO_AWS_S3_ACCESS_KEY_ID:-azerty123456}
40 | "LAGO_AWS_S3_SECRET_ACCESS_KEY": ${LAGO_AWS_S3_SECRET_ACCESS_KEY:-azerty123456}
41 | "LAGO_AWS_S3_REGION": ${LAGO_AWS_S3_REGION:-us-east-1}
42 | "LAGO_AWS_S3_BUCKET": ${LAGO_AWS_S3_BUCKET:-bucket}
43 | "LAGO_AWS_S3_ENDPOINT": ${LAGO_AWS_S3_ENDPOINT:-}
44 | "LAGO_USE_GCS": ${LAGO_USE_GCS:-false}
45 | "LAGO_GCS_PROJECT": ${LAGO_GCS_PROJECT:-}
46 | "LAGO_GCS_BUCKET": ${LAGO_GCS_BUCKET:-}
47 | "LAGO_FROM_EMAIL": ${LAGO_FROM_EMAIL:-}
48 | "LAGO_SMTP_ADDRESS": ${LAGO_SMTP_ADDRESS:-}
49 | "LAGO_SMTP_PORT": ${LAGO_SMTP_PORT:-587}
50 | "LAGO_SMTP_USERNAME": ${LAGO_SMTP_USERNAME:-}
51 | "LAGO_SMTP_PASSWORD": ${LAGO_SMTP_PASSWORD:-}
52 | "LAGO_PDF_URL": http://pdf:3000
53 | "LAGO_REDIS_CACHE_URL": redis://${LAGO_REDIS_CACHE_HOST:-redis}:${LAGO_REDIS_CACHE_PORT:-6379}
54 | "LAGO_REDIS_CACHE_PASSWORD": ${LAGO_REDIS_CACHE_PASSWORD:-}
55 | "LAGO_DISABLE_SEGMENT": ${LAGO_DISABLE_SEGMENT:-}
56 | "LAGO_DISABLE_WALLET_REFRESH": ${LAGO_DISABLE_WALLET_REFRESH:-}
57 | "LAGO_DISABLE_SIGNUP": ${LAGO_DISABLE_SIGNUP:-false}
58 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
59 | "LAGO_LICENSE": ${LAGO_LICENSE:-}
60 | "LAGO_CREATE_ORG": ${LAGO_CREATE_ORG:-false}
61 | "LAGO_ORG_USER_PASSWORD": ${LAGO_ORG_USER_PASSWORD:-}
62 | "LAGO_ORG_USER_EMAIL": ${LAGO_ORG_USER_EMAIL:-}
63 | "LAGO_ORG_NAME": ${LAGO_ORG_NAME:-}
64 | "LAGO_ORG_API_KEY": ${LAGO_ORG_API_KEY:-}
65 | "GOOGLE_AUTH_CLIENT_ID": ${GOOGLE_AUTH_CLIENT_ID:-}
66 | "GOOGLE_AUTH_CLIENT_SECRET": ${GOOGLE_AUTH_CLIENT_SECRET:-}
67 |
68 | x-frontend-environment: &frontend-env
69 | "API_URL": "https://${LAGO_DOMAIN}"
70 | "APP_ENV": production
71 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
72 |
73 | services:
74 | traefik:
75 | image: traefik:v3.3
76 | container_name: lago-traefik
77 | restart: unless-stopped
78 | command:
79 | - "--api.insecure=true"
80 | - "--providers.docker=true"
81 | - "--providers.docker.exposedbydefault=false"
82 | - "--entrypoints.websecure.address=:443"
83 | - "--certificatesresolvers.letsencrypt.acme.tlschallenge=true"
84 | - "--certificatesresolvers.letsencrypt.acme.email=${LAGO_ACME_EMAIL:-your_email@example.com}"
85 | - "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
86 | - "--certificatesresolvers.letsencrypt.acme.caServer=https://acme-staging-v02.api.letsencrypt.org/directory"
87 | ports:
88 | - 8080:8080
89 | - 443:443
90 | volumes:
91 | - "/var/run/docker.sock:/var/run/docker.sock:ro"
92 | - "./letsencrypt:/letsencrypt"
93 |
94 | db:
95 | <<: *postgres-image
96 | container_name: lago-db
97 | restart: unless-stopped
98 | healthcheck:
99 | test:
100 | [
101 | "CMD-SHELL",
102 | "pg_isready -U ${POSTGRES_USER:-lago} -d ${POSTGRES_DB:-lago} -h localhost -p ${POSTGRES_PORT:-5432}",
103 | ]
104 | interval: 10s
105 | timeout: 5s
106 | retries: 5
107 | environment:
108 | POSTGRES_DB: ${POSTGRES_DB:-lago}
109 | POSTGRES_USER: ${POSTGRES_USER:-lago}
110 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
111 | PGDATA: /data/postgres
112 | PGPORT: ${POSTGRES_PORT:-5432}
113 | POSTGRES_SCHEMA: public
114 | volumes:
115 | - lago_postgres_data:/data/postgres
116 | ports:
117 | - ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}
118 | profiles:
119 | - all
120 | - all-no-redis
121 | - all-no-keys
122 |
123 | redis:
124 | <<: *redis-image
125 | container_name: lago-redis
126 | restart: unless-stopped
127 | healthcheck:
128 | test: ["CMD", "redis-cli", "ping"]
129 | interval: 10s
130 | timeout: 5s
131 | retries: 5
132 | command: --port ${REDIS_PORT:-6379}
133 | volumes:
134 | - lago_redis_data:/data
135 | ports:
136 | - ${REDIS_PORT:-6379}:${REDIS_PORT:-6379}
137 | profiles:
138 | - all
139 | - all-no-pg
140 | - all-no-keys
141 |
142 | rsa-keys:
143 | <<: *backend-image
144 | container_name: lago-rsa-keys
145 | command: ["./scripts/generate.rsa.sh"]
146 | volumes:
147 | - lago_rsa_data:/app/config/keys
148 | profiles:
149 | - all
150 | - all-no-pg
151 | - all-no-redis
152 | - all-no-db
153 |
154 | migrate:
155 | <<: *backend-image
156 | container_name: lago-migrate
157 | restart: no
158 | depends_on:
159 | db:
160 | condition: service_healthy
161 | restart: true
162 | required: false
163 | command: ["./scripts/migrate.sh"]
164 | volumes:
165 | - lago_rsa_data:/app/config/keys
166 | environment:
167 | <<: *backend-env
168 |
169 | api:
170 | <<: *backend-image
171 | container_name: lago-api
172 | restart: unless-stopped
173 | depends_on:
174 | migrate:
175 | condition: service_completed_successfully
176 | db:
177 | condition: service_healthy
178 | restart: true
179 | required: false
180 | redis:
181 | condition: service_healthy
182 | restart: true
183 | required: false
184 | command: ["./scripts/start.api.sh"]
185 | healthcheck:
186 | test: curl -f http://localhost:3000/health || exit 1
187 | interval: 10s
188 | start_period: 30s
189 | timeout: 60s
190 | start_interval: 2s
191 | environment:
192 | <<: [*backend-env, *backend-urls]
193 | volumes:
194 | - lago_storage_data:/app/storage
195 | - lago_rsa_data:/app/config/keys
196 | labels:
197 | - "traefik.enable=true"
198 |
199 | # API Routes
200 | - "traefik.http.routers.lago-api.priority=100"
201 | - "traefik.http.routers.lago-api.entrypoints=websecure"
202 | - "traefik.http.routers.lago-api.rule=Host(`${LAGO_DOMAIN}`) && PathPrefix(`/api/`)"
203 | - "traefik.http.routers.lago-api.middlewares=lago-api-stripprefix"
204 | - "traefik.http.middlewares.lago-api-stripprefix.stripprefix.prefixes=/api"
205 | - "traefik.http.routers.lago-api.tls.certresolver=letsencrypt"
206 | - "traefik.http.routers.lago-api.service=lago-api-service"
207 | - "traefik.http.services.lago-api-service.loadbalancer.server.port=3000"
208 |
209 | # API Versioned Routes
210 | - "traefik.http.routers.lago-api-versioned.priority=110"
211 | - "traefik.http.routers.lago-api-versioned.entrypoints=websecure"
212 | - "traefik.http.routers.lago-api-versioned.rule=Host(`${LAGO_DOMAIN}`) && PathPrefix(`/api/v`)"
213 | - "traefik.http.routers.lago-api-versioned.tls.certresolver=letsencrypt"
214 | - "traefik.http.routers.lago-api-versioned.service=lago-api-service"
215 |
216 | # API Rails Assets
217 | - "traefik.http.routers.lago-api-rails.priority=100"
218 | - "traefik.http.routers.lago-api-rails.entrypoints=websecure"
219 | - "traefik.http.routers.lago-api-rails.rule=Host(`${LAGO_DOMAIN}`) && PathPrefix(`/rails`)"
220 | - "traefik.http.routers.lago-api-rails.tls.certresolver=letsencrypt"
221 | - "traefik.http.routers.lago-api-rails.service=lago-api-service"
222 |
223 | # GraphQL Routes
224 | - "traefik.http.routers.lago-graphql.priority=100"
225 | - "traefik.http.routers.lago-graphql.entrypoints=websecure"
226 | - "traefik.http.routers.lago-graphql.rule=Host(`${LAGO_DOMAIN}`) && Path(`/graphql`)"
227 | - "traefik.http.routers.lago-graphql.tls.certresolver=letsencrypt"
228 | - "traefik.http.routers.lago-graphql.service=lago-api-service"
229 |
230 | front:
231 | <<: *frontend-image
232 | container_name: lago-front
233 | restart: unless-stopped
234 | depends_on:
235 | api:
236 | condition: service_healthy
237 | restart: true
238 | environment:
239 | <<: [*frontend-env]
240 | labels:
241 | - "traefik.enable=true"
242 | - "traefik.http.routers.lago-front.priority=50"
243 | - "traefik.http.routers.lago-front.entrypoints=websecure"
244 | - "traefik.http.routers.lago-front.rule=Host(`${LAGO_DOMAIN}`)"
245 | - "traefik.http.routers.lago-front.tls.certresolver=letsencrypt"
246 | - "traefik.http.services.lago-front.loadbalancer.server.port=80"
247 |
248 |
249 | api-worker:
250 | <<: *backend-image
251 | container_name: lago-worker
252 | restart: unless-stopped
253 | depends_on:
254 | migrate:
255 | condition: service_completed_successfully
256 | db:
257 | condition: service_healthy
258 | restart: true
259 | required: false
260 | redis:
261 | condition: service_healthy
262 | restart: true
263 | required: false
264 | command: ["./scripts/start.worker.sh"]
265 | healthcheck:
266 | test: curl -f http://localhost:8080 || exit 1
267 | interval: 10s
268 | start_period: 30s
269 | timeout: 60s
270 | start_interval: 2s
271 | environment:
272 | <<: [*backend-env, *backend-urls]
273 | volumes:
274 | - lago_storage_data:/app/storage
275 | - lago_rsa_data:/app/config/keys
276 |
277 | api-clock:
278 | <<: *backend-image
279 | container_name: lago-clock
280 | restart: unless-stopped
281 | depends_on:
282 | migrate:
283 | condition: service_completed_successfully
284 | db:
285 | condition: service_healthy
286 | restart: true
287 | required: false
288 | redis:
289 | condition: service_healthy
290 | restart: true
291 | required: false
292 | command: ["./scripts/start.clock.sh"]
293 | volumes:
294 | - lago_rsa_data:/app/config/keys
295 | environment:
296 | <<: [*backend-env, *backend-urls]
297 |
298 | pdf:
299 | image: getlago/lago-gotenberg:8.15
300 | command:
301 | - gotenberg
302 | - --libreoffice-disable-routes=true
303 | - --chromium-ignore-certificate-errors=true
304 | - --chromium-disable-javascript=true
305 | - --api-timeout=300s
306 |
--------------------------------------------------------------------------------
/deploy/docker-compose.local.yml:
--------------------------------------------------------------------------------
1 | name: lago-local
2 |
3 | volumes:
4 | lago_rsa_data:
5 | lago_postgres_data:
6 | lago_redis_data:
7 | lago_storage_data:
8 |
9 | x-postgres-image: &postgres-image
10 | image: postgres:15-alpine
11 | x-redis-image: &redis-image
12 | image: redis:7-alpine
13 | x-backend-image: &backend-image
14 | image: getlago/api:v1.27.1
15 | x-frontend-image: &frontend-image
16 | image: getlago/front:v1.27.1
17 |
18 | x-lago-api-url: &lago-api-url
19 | "LAGO_API_URL": ${LAGO_API_URL:-http://localhost:3000}
20 | x-lago-front-url: &lago-front-url
21 | "LAGO_FRONT_URL": ${LAGO_FRONT_URL:-http://localhost}
22 |
23 | x-backend-environment: &backend-env
24 | "DATABASE_URL": postgresql://${POSTGRES_USER:-lago}:${POSTGRES_PASSWORD:-changeme}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-lago}?search_path=${POSTGRES_SCHEMA:-public}
25 | "REDIS_URL": redis://${REDIS_HOST:-redis}:${REDIS_PORT:-6379}
26 | "REDIS_PASSWORD": ${REDIS_PASSWORD:-}
27 | "SECRET_KEY_BASE": ${SECRET_KEY_BASE:-your-secret-key-base-hex-64}
28 | "RAILS_ENV": production
29 | "RAILS_LOG_TO_STDOUT": ${LAGO_RAILS_STDOUT:-true}
30 | "LAGO_RSA_PRIVATE_KEY": ${LAGO_RSA_PRIVATE_KEY:-}
31 | "LAGO_SIDEKIQ_WEB": ${LAGO_SIDEKIQ_WEB:-true}
32 | "LAGO_ENCRYPTION_PRIMARY_KEY": ${LAGO_ENCRYPTION_PRIMARY_KEY:-your-encryption-primary-key}
33 | "LAGO_ENCRYPTION_DETERMINISTIC_KEY": ${LAGO_ENCRYPTION_DETERMINISTIC_KEY:-your-encryption-deterministic-key}
34 | "LAGO_ENCRYPTION_KEY_DERIVATION_SALT": ${LAGO_ENCRYPTION_KEY_DERIVATION_SALT:-your-encryption-derivation-salt}
35 | "LAGO_USE_AWS_S3": ${LAGO_USE_AWS_S3:-false}
36 | "LAGO_AWS_S3_ACCESS_KEY_ID": ${LAGO_AWS_S3_ACCESS_KEY_ID:-azerty123456}
37 | "LAGO_AWS_S3_SECRET_ACCESS_KEY": ${LAGO_AWS_S3_SECRET_ACCESS_KEY:-azerty123456}
38 | "LAGO_AWS_S3_REGION": ${LAGO_AWS_S3_REGION:-us-east-1}
39 | "LAGO_AWS_S3_BUCKET": ${LAGO_AWS_S3_BUCKET:-bucket}
40 | "LAGO_AWS_S3_ENDPOINT": ${LAGO_AWS_S3_ENDPOINT:-}
41 | "LAGO_USE_GCS": ${LAGO_USE_GCS:-false}
42 | "LAGO_GCS_PROJECT": ${LAGO_GCS_PROJECT:-}
43 | "LAGO_GCS_BUCKET": ${LAGO_GCS_BUCKET:-}
44 | "LAGO_FROM_EMAIL": ${LAGO_FROM_EMAIL:-}
45 | "LAGO_SMTP_ADDRESS": ${LAGO_SMTP_ADDRESS:-}
46 | "LAGO_SMTP_PORT": ${LAGO_SMTP_PORT:-587}
47 | "LAGO_SMTP_USERNAME": ${LAGO_SMTP_USERNAME:-}
48 | "LAGO_SMTP_PASSWORD": ${LAGO_SMTP_PASSWORD:-}
49 | "LAGO_PDF_URL": ${LAGO_PDF_URL:-http://pdf:3000}
50 | "LAGO_REDIS_CACHE_URL": redis://${LAGO_REDIS_CACHE_HOST:-redis}:${LAGO_REDIS_CACHE_PORT:-6379}
51 | "LAGO_REDIS_CACHE_PASSWORD": ${LAGO_REDIS_CACHE_PASSWORD:-}
52 | "LAGO_DISABLE_SEGMENT": ${LAGO_DISABLE_SEGMENT:-}
53 | "LAGO_DISABLE_WALLET_REFRESH": ${LAGO_DISABLE_WALLET_REFRESH:-}
54 | "LAGO_DISABLE_SIGNUP": ${LAGO_DISABLE_SIGNUP:-false}
55 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
56 | "LAGO_LICENSE": ${LAGO_LICENSE:-}
57 | "LAGO_CREATE_ORG": ${LAGO_CREATE_ORG:-false}
58 | "LAGO_ORG_USER_PASSWORD": ${LAGO_ORG_USER_PASSWORD:-}
59 | "LAGO_ORG_USER_EMAIL": ${LAGO_ORG_USER_EMAIL:-}
60 | "LAGO_ORG_NAME": ${LAGO_ORG_NAME:-}
61 | "LAGO_ORG_API_KEY": ${LAGO_ORG_API_KEY:-}
62 | "GOOGLE_AUTH_CLIENT_ID": ${GOOGLE_AUTH_CLIENT_ID:-}
63 | "GOOGLE_AUTH_CLIENT_SECRET": ${GOOGLE_AUTH_CLIENT_SECRET:-}
64 | x-frontend-environment: &frontend-env
65 | "API_URL": ${LAGO_API_URL:-http://localhost:3000}
66 | "APP_ENV": production
67 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
68 |
69 | services:
70 | db:
71 | <<: *postgres-image
72 | container_name: lago-db
73 | restart: unless-stopped
74 | healthcheck:
75 | test:
76 | [
77 | "CMD-SHELL",
78 | "pg_isready -U ${POSTGRES_USER:-lago} -d ${POSTGRES_DB:-lago} -h localhost -p ${POSTGRES_PORT:-5432}",
79 | ]
80 | interval: 10s
81 | timeout: 5s
82 | retries: 5
83 | environment:
84 | POSTGRES_DB: ${POSTGRES_DB:-lago}
85 | POSTGRES_USER: ${POSTGRES_USER:-lago}
86 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
87 | PGDATA: /data/postgres
88 | PGPORT: ${POSTGRES_PORT:-5432}
89 | POSTGRES_SCHEMA: public
90 | volumes:
91 | - lago_postgres_data:/data/postgres
92 | ports:
93 | - ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}
94 | profiles:
95 | - all
96 | - all-no-redis
97 | - all-no-keys
98 |
99 | redis:
100 | <<: *redis-image
101 | container_name: lago-redis
102 | restart: unless-stopped
103 | healthcheck:
104 | test: ["CMD", "redis-cli", "ping"]
105 | interval: 10s
106 | timeout: 5s
107 | retries: 5
108 | command: --port ${REDIS_PORT:-6379}
109 | volumes:
110 | - lago_redis_data:/data
111 | ports:
112 | - ${REDIS_PORT:-6379}:${REDIS_PORT:-6379}
113 | profiles:
114 | - all
115 | - all-no-pg
116 | - all-no-keys
117 |
118 | rsa-keys:
119 | <<: *backend-image
120 | container_name: lago-rsa-keys
121 | command: ["./scripts/generate.rsa.sh"]
122 | volumes:
123 | - lago_rsa_data:/app/config/keys
124 | profiles:
125 | - all
126 | - all-no-pg
127 | - all-no-redis
128 | - all-no-db
129 |
130 | migrate:
131 | <<: *backend-image
132 | container_name: lago-migrate
133 | restart: no
134 | depends_on:
135 | db:
136 | condition: service_healthy
137 | restart: true
138 | required: false
139 | command: ["./scripts/migrate.sh"]
140 | volumes:
141 | - lago_rsa_data:/app/config/keys
142 | environment:
143 | <<: *backend-env
144 |
145 | api:
146 | <<: *backend-image
147 | container_name: lago-api
148 | restart: unless-stopped
149 | depends_on:
150 | migrate:
151 | condition: service_completed_successfully
152 | db:
153 | condition: service_healthy
154 | restart: true
155 | required: false
156 | redis:
157 | condition: service_healthy
158 | restart: true
159 | required: false
160 | command: ["./scripts/start.api.sh"]
161 | healthcheck:
162 | test: curl -f http://localhost:3000/health || exit 1
163 | interval: 10s
164 | start_period: 30s
165 | timeout: 60s
166 | start_interval: 2s
167 | environment:
168 | <<: [*backend-env, *lago-api-url, *lago-front-url]
169 | volumes:
170 | - lago_storage_data:/app/storage
171 | - lago_rsa_data:/app/config/keys
172 | ports:
173 | - ${API_PORT:-3000}:3000
174 |
175 | front:
176 | <<: *frontend-image
177 | container_name: lago-front
178 | restart: unless-stopped
179 | depends_on:
180 | api:
181 | condition: service_healthy
182 | restart: true
183 | environment:
184 | <<: [*frontend-env, *lago-api-url, *lago-front-url]
185 | ports:
186 | - ${FRONT_PORT:-80}:80
187 |
188 | api-worker:
189 | <<: *backend-image
190 | container_name: lago-worker
191 | restart: unless-stopped
192 | depends_on:
193 | migrate:
194 | condition: service_completed_successfully
195 | db:
196 | condition: service_healthy
197 | restart: true
198 | required: false
199 | redis:
200 | condition: service_healthy
201 | restart: true
202 | required: false
203 | command: ["./scripts/start.worker.sh"]
204 | healthcheck:
205 | test: curl -f http://localhost:8080 || exit 1
206 | interval: 10s
207 | start_period: 30s
208 | timeout: 60s
209 | start_interval: 2s
210 | environment:
211 | <<: [*backend-env, *lago-api-url]
212 | volumes:
213 | - lago_storage_data:/app/storage
214 | - lago_rsa_data:/app/config/keys
215 |
216 | api-clock:
217 | <<: *backend-image
218 | container_name: lago-clock
219 | restart: unless-stopped
220 | depends_on:
221 | migrate:
222 | condition: service_completed_successfully
223 | db:
224 | condition: service_healthy
225 | restart: true
226 | required: false
227 | redis:
228 | condition: service_healthy
229 | restart: true
230 | required: false
231 | command: ["./scripts/start.clock.sh"]
232 | volumes:
233 | - lago_rsa_data:/app/config/keys
234 | environment:
235 | <<: [*backend-env, *lago-api-url]
236 |
237 | pdf:
238 | image: getlago/lago-gotenberg:8.15
239 | command:
240 | - gotenberg
241 | - --libreoffice-disable-routes=true
242 | - --chromium-ignore-certificate-errors=true
243 | - --chromium-disable-javascript=true
244 | - --api-timeout=300s
245 |
--------------------------------------------------------------------------------
/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | name: lago_dev
2 |
3 | volumes:
4 | front_node_modules_dev:
5 | front_dist_dev:
6 | postgres_data_dev:
7 | redis_data_dev:
8 | redpanda_data_dev:
9 | clickhouse_data_dev:
10 |
11 | services:
12 | traefik:
13 | image: "traefik:v2.5.4"
14 | container_name: lago_traefik_dev
15 | restart: unless-stopped
16 | ports:
17 | - 80:80
18 | - 443:443
19 | volumes:
20 | - ./traefik/traefik.yml:/etc/traefik/traefik.yml
21 | - ./traefik/dynamic.yml:/etc/traefik/dynamic.yml
22 | - ./traefik/certs:/etc/certs
23 | - "/var/run/docker.sock:/var/run/docker.sock:ro"
24 | labels:
25 | - "traefik.enable=true"
26 | - "traefik.http.routers.traefik.rule=Host(`traefik.lago.dev`)"
27 | - "traefik.http.routers.traefik.entrypoints=web,websecure"
28 | - "traefik.http.routers.traefik.tls=true"
29 | - "traefik.http.services.traefik.loadbalancer.server.port=8080"
30 |
31 | db:
32 | image: postgres:14.0-alpine
33 | container_name: lago_db_dev
34 | restart: unless-stopped
35 | environment:
36 | POSTGRES_USER: ${POSTGRES_USER:-lago}
37 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
38 | PGDATA: /data/postgres
39 | POSTGRES_MULTIPLE_DATABASES: lago,lago_test
40 | volumes:
41 | - ./pg-init-scripts:/docker-entrypoint-initdb.d
42 | - postgres_data_dev:/data/postgres
43 | ports:
44 | - 5432:5432
45 |
46 | redis:
47 | image: redis:6.2-alpine
48 | container_name: lago_redis_dev
49 | restart: unless-stopped
50 | volumes:
51 | - redis_data_dev:/data
52 | ports:
53 | - 6379:6379
54 |
55 | front:
56 | image: front_dev
57 | container_name: lago_front_dev
58 | stdin_open: true
59 | restart: unless-stopped
60 | depends_on:
61 | - api
62 | build:
63 | context: ./front
64 | dockerfile: $LAGO_PATH/front/Dockerfile.dev
65 | volumes:
66 | - $LAGO_PATH/front:/app:delegated
67 | - front_node_modules_dev:/app/node_modules:delegated
68 | - front_dist_dev:/app/dist:delegated
69 | environment:
70 | - NODE_ENV=development
71 | - API_URL=https://api.lago.dev
72 | - APP_DOMAIN=https://app.lago.dev
73 | - CODEGEN_API=http://api:3000/graphql
74 | - LAGO_DISABLE_SIGNUP=${LAGO_DISABLE_SIGNUP:-}
75 | - LAGO_DISABLE_PDF_GENERATION=${LAGO_DISABLE_PDF_GENERATION:-false}
76 | - NANGO_SECRET_KEY=${NANGO_SECRET_KEY:-}
77 | labels:
78 | - "traefik.enable=true"
79 | - "traefik.http.routers.app.rule=Host(`app.lago.dev`)"
80 | - "traefik.http.routers.app.entrypoints=web,ws,websecure"
81 | - "traefik.http.routers.app.tls=true"
82 | - "traefik.http.services.app.loadbalancer.server.port=8080"
83 |
84 | api:
85 | image: api_dev
86 | container_name: lago_api_dev
87 | restart: unless-stopped
88 | command: ["./scripts/start.dev.sh"]
89 | depends_on:
90 | - db
91 | build:
92 | context: ./api
93 | dockerfile: $LAGO_PATH/api/Dockerfile.dev
94 | volumes:
95 | - $LAGO_PATH/api:/app:delegated
96 | env_file:
97 | - path: ./.env.development.default
98 | - path: ./.env.development
99 | required: false
100 | stdin_open: true
101 | tty: true
102 | environment:
103 | - DATABASE_TEST_URL=postgresql://${POSTGRES_USER:-lago}:${POSTGRES_PASSWORD:-changeme}@db:5432/lago_test
104 | - GOOGLE_AUTH_CLIENT_ID=${GOOGLE_AUTH_CLIENT_ID:-}
105 | - GOOGLE_AUTH_CLIENT_SECRET=${GOOGLE_AUTH_CLIENT_SECRET:-}
106 | labels:
107 | - "traefik.enable=true"
108 | - "traefik.http.routers.api_http.rule=Host(`api.lago.dev`)"
109 | - "traefik.http.routers.api_http.entrypoints=web"
110 | - "traefik.http.routers.api_http.service=api_http"
111 | - "traefik.http.services.api_http.loadbalancer.server.port=3000"
112 | - "traefik.http.routers.api.rule=Host(`api.lago.dev`)"
113 | - "traefik.http.routers.api.entrypoints=websecure"
114 | - "traefik.http.routers.api.service=api"
115 | - "traefik.http.routers.api.tls=true"
116 | - "traefik.http.services.api.loadbalancer.server.port=3000"
117 |
118 | api-worker: &api_worker
119 | image: api_dev
120 | container_name: lago_api_worker
121 | restart: unless-stopped
122 | command: bash -c "bundle install && ./scripts/start.worker.sh"
123 | depends_on:
124 | - api
125 | build:
126 | context: ./api
127 | dockerfile: $LAGO_PATH/api/Dockerfile.dev
128 | volumes:
129 | - $LAGO_PATH/api:/app:delegated
130 | env_file:
131 | - path: ./.env.development.default
132 | - path: ./.env.development
133 | required: false
134 | stdin_open: true
135 | tty: true
136 |
137 | api-events-worker:
138 | <<: *api_worker
139 | container_name: lago_api_events_worker_dev
140 | command: bash -c "bundle install && ./scripts/start.events.worker.sh"
141 |
142 | api-pdfs-worker:
143 | <<: *api_worker
144 | container_name: lago_api_pdfs_worker_dev
145 | command: bash -c "bundle install && ./scripts/start.pdfs.worker.sh"
146 |
147 | api-billing-worker:
148 | <<: *api_worker
149 | container_name: lago_api_billing_worker_dev
150 | command: bash -c "bundle install && ./scripts/start.billing.worker.sh"
151 |
152 | api-clock-worker:
153 | <<: *api_worker
154 | container_name: lago_api_clock_worker_dev
155 | command: bash -c "bundle install && ./scripts/start.clock.worker.sh"
156 |
157 | api-webhook-worker:
158 | <<: *api_worker
159 | container_name: lago_api_webhook_worker_dev
160 | command: bash -c "bundle install && ./scripts/start.webhook.worker.sh"
161 |
162 | api-clock:
163 | image: api_dev
164 | container_name: lago_api_clock_dev
165 | restart: unless-stopped
166 | command: bash -c "bundle install && ./scripts/start.clock.sh"
167 | depends_on:
168 | - api
169 | build:
170 | context: ./api
171 | dockerfile: $LAGO_PATH/api/Dockerfile.dev
172 | volumes:
173 | - $LAGO_PATH/api:/app:delegated
174 | env_file:
175 | - path: ./.env.development.default
176 | - path: ./.env.development
177 | required: false
178 |
179 | api-events-consumer:
180 | <<: *api_worker
181 | container_name: lago_api_events_consumer_dev
182 | command: bash -c "bundle install && ./scripts/start.events.consumer.sh"
183 |
184 | events-processor:
185 | image: events-processor_dev
186 | container_name: lago_events-processor
187 | restart: unless-stopped
188 | build:
189 | context: ./events-processor
190 | dockerfile: $LAGO_PATH/events-processor/Dockerfile.dev
191 | depends_on:
192 | - db
193 | - redpanda
194 | - redis
195 | env_file:
196 | - path: ./.env.development.default
197 | - path: ./.env.development
198 | required: false
199 | volumes:
200 | - $LAGO_PATH/events-processor:/app:delegated
201 |
202 | pdf:
203 | image: getlago/lago-gotenberg:7
204 | container_name: lago_pdf_dev
205 | restart: unless-stopped
206 | command:
207 | - "gotenberg"
208 | - "--log-level=debug"
209 | labels:
210 | - "traefik.enable=true"
211 | - "traefik.http.routers.pdf.rule=Host(`pdf.lago.dev`)"
212 | - "traefik.http.routers.pdf.entrypoints=web,websecure"
213 | - "traefik.http.routers.pdf.tls=true"
214 | - "traefik.http.services.pdf.loadbalancer.server.port=3000"
215 |
216 | mailhog:
217 | image: mailhog/mailhog
218 | container_name: lago_mailhog_dev
219 | restart: unless-stopped
220 | platform: linux/amd64
221 | labels:
222 | - "traefik.enable=true"
223 | - "traefik.http.routers.mail.rule=Host(`mail.lago.dev`)"
224 | - "traefik.http.routers.mail.entrypoints=websecure"
225 | - "traefik.http.routers.mail.tls=true"
226 | - "traefik.http.services.mail.loadbalancer.server.port=8025"
227 |
228 | redpanda:
229 | image: docker.redpanda.com/redpandadata/redpanda:v23.2.9
230 | container_name: lago_redpanda_dev
231 | restart: unless-stopped
232 | hostname: redpanda
233 | command:
234 | - redpanda start
235 | - --smp 1
236 | - --overprovisioned
237 | - --kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092
238 | - --advertise-kafka-addr internal://redpanda:9092,external://localhost:19092
239 | volumes:
240 | - redpanda_data_dev:/var/lib/redpanda/data
241 | ports:
242 | - 9092:9092
243 | - 19092:19092
244 |
245 | redpandacreatetopics:
246 | image: docker.redpanda.com/redpandadata/redpanda:v23.2.9
247 | depends_on:
248 | - redpanda
249 | entrypoint: >
250 | rpk topic create events-raw events_enriched events_charged_in_advance events_dead_letter activity_logs --brokers redpanda:9092
251 |
252 | redpanda-console:
253 | image: docker.redpanda.com/redpandadata/console:v2.3.1
254 | container_name: lago_redpanda_console_dev
255 | entrypoint: /bin/sh
256 | command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console"
257 | environment:
258 | CONFIG_FILEPATH: /tmp/config.yml
259 | CONSOLE_CONFIG_FILE: |
260 | kafka:
261 | brokers: ["redpanda:9092"]
262 | schemaRegistry:
263 | enabled: false
264 | urls: ["http://redpanda:8081"]
265 | redpanda:
266 | adminApi:
267 | enabled: true
268 | urls: ["http://redpanda:9644"]
269 | depends_on:
270 | - redpanda
271 | labels:
272 | - "traefik.enable=true"
273 | - "traefik.http.routers.console.rule=Host(`console.lago.dev`)"
274 | - "traefik.http.routers.console.entrypoints=websecure"
275 | - "traefik.http.routers.console.tls=true"
276 | - "traefik.http.services.console.loadbalancer.server.port=8080"
277 |
278 | clickhouse:
279 | image: clickhouse/clickhouse-server
280 | container_name: lago_clickhouse_dev
281 | restart: unless-stopped
282 | hostname: clickhouse
283 | user: "101:101"
284 | depends_on:
285 | - db
286 | - redpanda
287 | - redpandacreatetopics
288 | volumes:
289 | - clickhouse_data_dev:/var/lib/clickhouse
290 | - ./extra/clickhouse/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
291 | - ./extra/clickhouse/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
292 | ports:
293 | - 9000:9000
294 | - 8123:8123
295 |
296 | migrate:
297 | container_name: lago-migrate_dev
298 | image: api_dev
299 | depends_on:
300 | - db
301 | command: ["./scripts/migrate.dev.sh"]
302 | build:
303 | context: ./api
304 | dockerfile: $LAGO_PATH/api/Dockerfile.dev
305 | volumes:
306 | - $LAGO_PATH/api:/app:delegated
307 | environment:
308 | - DATABASE_URL=postgresql://${POSTGRES_USER:-lago}:${POSTGRES_PASSWORD:-changeme}@db:5432/${POSTGRES_DB:-lago}
309 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | volumes:
2 | lago_postgres_data:
3 | lago_redis_data:
4 | lago_storage_data:
5 |
6 | x-postgres-image: &postgres-image
7 | image: postgres:14-alpine
8 | x-redis-image: &redis-image
9 | image: redis:6-alpine
10 | x-backend-image: &backend-image
11 | image: getlago/api:v1.29.0
12 | x-frontend-image: &frontend-image
13 | image: getlago/front:v1.29.0
14 |
15 | x-lago-api-url: &lago-api-url
16 | "LAGO_API_URL": ${LAGO_API_URL:-http://localhost:3000}
17 | x-lago-front-url: &lago-front-url
18 | "LAGO_FRONT_URL": ${LAGO_FRONT_URL:-http://localhost}
19 |
20 | x-backend-environment: &backend-env
21 | "DATABASE_URL": postgresql://${POSTGRES_USER:-lago}:${POSTGRES_PASSWORD:-changeme}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-lago}?search_path=${POSTGRES_SCHEMA:-public}
22 | "REDIS_URL": redis://${REDIS_HOST:-redis}:${REDIS_PORT:-6379}
23 | "REDIS_PASSWORD": ${REDIS_PASSWORD:-}
24 | "SECRET_KEY_BASE": ${SECRET_KEY_BASE:-your-secret-key-base-hex-64}
25 | "RAILS_ENV": production
26 | "RAILS_LOG_TO_STDOUT": ${LAGO_RAILS_STDOUT:-true}
27 | "LAGO_RSA_PRIVATE_KEY": ${LAGO_RSA_PRIVATE_KEY}
28 | "LAGO_SIDEKIQ_WEB": ${LAGO_SIDEKIQ_WEB:-true}
29 | "LAGO_ENCRYPTION_PRIMARY_KEY": ${LAGO_ENCRYPTION_PRIMARY_KEY:-your-encryption-primary-key}
30 | "LAGO_ENCRYPTION_DETERMINISTIC_KEY": ${LAGO_ENCRYPTION_DETERMINISTIC_KEY:-your-encryption-deterministic-key}
31 | "LAGO_ENCRYPTION_KEY_DERIVATION_SALT": ${LAGO_ENCRYPTION_KEY_DERIVATION_SALT:-your-encryption-derivation-salt}
32 | "LAGO_USE_AWS_S3": ${LAGO_USE_AWS_S3:-false}
33 | "LAGO_AWS_S3_ACCESS_KEY_ID": ${LAGO_AWS_S3_ACCESS_KEY_ID:-azerty123456}
34 | "LAGO_AWS_S3_SECRET_ACCESS_KEY": ${LAGO_AWS_S3_SECRET_ACCESS_KEY:-azerty123456}
35 | "LAGO_AWS_S3_REGION": ${LAGO_AWS_S3_REGION:-us-east-1}
36 | "LAGO_AWS_S3_BUCKET": ${LAGO_AWS_S3_BUCKET:-bucket}
37 | "LAGO_AWS_S3_ENDPOINT": ${LAGO_AWS_S3_ENDPOINT}
38 | "LAGO_USE_GCS": ${LAGO_USE_GCS:-false}
39 | "LAGO_GCS_PROJECT": ${LAGO_GCS_PROJECT:-}
40 | "LAGO_GCS_BUCKET": ${LAGO_GCS_BUCKET:-}
41 | "LAGO_FROM_EMAIL": ${LAGO_FROM_EMAIL:-}
42 | "LAGO_SMTP_ADDRESS": ${LAGO_SMTP_ADDRESS:-}
43 | "LAGO_SMTP_PORT": ${LAGO_SMTP_PORT:-587}
44 | "LAGO_SMTP_USERNAME": ${LAGO_SMTP_USERNAME:-}
45 | "LAGO_SMTP_PASSWORD": ${LAGO_SMTP_PASSWORD:-}
46 | "LAGO_PDF_URL": ${LAGO_PDF_URL:-http://pdf:3000}
47 | "LAGO_DATA_API_URL": ${LAGO_DATA_API_URL:-http://data-api}
48 | "LAGO_DATA_API_BEARER_TOKEN": ${LAGO_DATA_API_BEARER_TOKEN:-}
49 | "LAGO_REDIS_CACHE_URL": redis://${LAGO_REDIS_CACHE_HOST:-redis}:${LAGO_REDIS_CACHE_PORT:-6379}
50 | "LAGO_REDIS_CACHE_PASSWORD": ${LAGO_REDIS_CACHE_PASSWORD}
51 | "LAGO_DISABLE_SEGMENT": ${LAGO_DISABLE_SEGMENT}
52 | "LAGO_DISABLE_WALLET_REFRESH": ${LAGO_DISABLE_WALLET_REFRESH}
53 | "LAGO_DISABLE_SIGNUP": ${LAGO_DISABLE_SIGNUP:-false}
54 | "LAGO_DISABLE_PDF_GENERATION": ${LAGO_DISABLE_PDF_GENERATION:-false}
55 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
56 | "LAGO_LICENSE": ${LAGO_LICENSE:-}
57 | "LAGO_CREATE_ORG": ${LAGO_CREATE_ORG:-false}
58 | "LAGO_ORG_USER_PASSWORD": ${LAGO_ORG_USER_PASSWORD:-}
59 | "LAGO_ORG_USER_EMAIL": ${LAGO_ORG_USER_EMAIL:-}
60 | "LAGO_ORG_NAME": ${LAGO_ORG_NAME:-}
61 | "LAGO_ORG_API_KEY": ${LAGO_ORG_API_KEY:-}
62 | "GOOGLE_AUTH_CLIENT_ID": ${GOOGLE_AUTH_CLIENT_ID:-}
63 | "GOOGLE_AUTH_CLIENT_SECRET": ${GOOGLE_AUTH_CLIENT_SECRET:-}
64 | # - SIDEKIQ_EVENTS=true
65 | # - SIDEKIQ_PDFS=true
66 | # - SIDEKIQ_BILLING=true
67 | # - SIDEKIQ_CLOCK=true
68 | # - SIDEKIQ_WEBHOOK=true
69 | x-frontend-environment: &frontend-env
70 | "API_URL": ${LAGO_API_URL:-http://localhost:3000}
71 | "APP_ENV": production
72 | "LAGO_OAUTH_PROXY_URL": https://proxy.getlago.com
73 | "LAGO_DISABLE_PDF_GENERATION": ${LAGO_DISABLE_PDF_GENERATION:-false}
74 |
75 | services:
76 | db:
77 | <<: *postgres-image
78 | container_name: lago-db
79 | restart: unless-stopped
80 | healthcheck:
81 | test:
82 | [
83 | "CMD-SHELL",
84 | "pg_isready -U ${POSTGRES_USER:-lago} -d ${POSTGRES_DB:-lago} -h localhost -p ${POSTGRES_PORT:-5432}",
85 | ]
86 | interval: 10s
87 | timeout: 5s
88 | retries: 5
89 | environment:
90 | POSTGRES_DB: ${POSTGRES_DB:-lago}
91 | POSTGRES_USER: ${POSTGRES_USER:-lago}
92 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
93 | PGDATA: /data/postgres
94 | PGPORT: ${POSTGRES_PORT:-5432}
95 | POSTGRES_SCHEMA: public
96 | volumes:
97 | - lago_postgres_data:/data/postgres
98 | ports:
99 | - ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}
100 |
101 | redis:
102 | <<: *redis-image
103 | container_name: lago-redis
104 | restart: unless-stopped
105 | healthcheck:
106 | test: ["CMD", "redis-cli", "-p", "${REDIS_PORT:-6379}", "ping"]
107 | interval: 10s
108 | timeout: 5s
109 | retries: 5
110 | command: --port ${REDIS_PORT:-6379}
111 | volumes:
112 | - lago_redis_data:/data
113 | ports:
114 | - ${REDIS_PORT:-6379}:${REDIS_PORT:-6379}
115 |
116 | migrate:
117 | <<: *backend-image
118 | container_name: lago-migrate
119 | depends_on:
120 | db:
121 | condition: service_healthy
122 | restart: true
123 | command: ["./scripts/migrate.sh"]
124 | environment:
125 | <<: *backend-env
126 |
127 | api:
128 | <<: *backend-image
129 | container_name: lago-api
130 | restart: unless-stopped
131 | depends_on:
132 | migrate:
133 | condition: service_completed_successfully
134 | db:
135 | condition: service_healthy
136 | restart: true
137 | redis:
138 | condition: service_healthy
139 | restart: true
140 | command: ["./scripts/start.api.sh"]
141 | healthcheck:
142 | test: curl -f http://localhost:3000/health || exit 1
143 | interval: 10s
144 | start_period: 30s
145 | timeout: 60s
146 | start_interval: 2s
147 | environment:
148 | <<: [*backend-env, *lago-api-url, *lago-front-url]
149 | volumes:
150 | - lago_storage_data:/app/storage
151 | ports:
152 | - ${API_PORT:-3000}:3000
153 |
154 | front:
155 | <<: *frontend-image
156 | container_name: lago-front
157 | restart: unless-stopped
158 | # Use this command if you want to use SSL with Let's Encrypt
159 | # command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
160 | depends_on:
161 | api:
162 | condition: service_healthy
163 | restart: true
164 | environment:
165 | <<: [*frontend-env, *lago-api-url, *lago-front-url]
166 | ports:
167 | - ${FRONT_PORT:-80}:80
168 | # - 443:443
169 | # Using SSL with Let's Encrypt
170 | # volumes:
171 | # - ./extra/nginx-letsencrypt.conf:/etc/nginx/conf.d/default.conf
172 | # - ./extra/certbot/conf:/etc/letsencrypt
173 | # - ./extra/certbot/www:/var/www/certbot
174 | # Using SSL with self signed certificates
175 | # volumes:
176 | # - ./extra/nginx-selfsigned.conf:/etc/nginx/conf.d/default.conf
177 | # - ./extra/ssl/nginx-selfsigned.crt:/etc/ssl/certs/nginx-selfsigned.crt
178 | # - ./extra/ssl/nginx-selfsigned.key:/etc/ssl/private/nginx-selfsigned.key
179 | # - ./extra/ssl/dhparam.pem:/etc/ssl/certs/dhparam.pem
180 |
181 | # Only used for SSL support with Let's Encrypt
182 | # certbot:
183 | # image: certbot/certbot
184 | # entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
185 | # volumes:
186 | # - ./extra/certbot/conf:/etc/letsencrypt
187 | # - ./extra/certbot/www:/var/www/certbot
188 |
189 | api-worker:
190 | <<: *backend-image
191 | container_name: lago-worker
192 | restart: unless-stopped
193 | depends_on:
194 | migrate:
195 | condition: service_completed_successfully
196 | db:
197 | condition: service_healthy
198 | restart: true
199 | redis:
200 | condition: service_healthy
201 | restart: true
202 | command: ["./scripts/start.worker.sh"]
203 | healthcheck:
204 | test: curl -f http://localhost:8080 || exit 1
205 | interval: 10s
206 | start_period: 30s
207 | timeout: 60s
208 | start_interval: 2s
209 | environment:
210 | <<: [*backend-env, *lago-api-url]
211 | volumes:
212 | - lago_storage_data:/app/storage
213 |
214 | # You can uncomment this if you want to use a dedicated Sidekiq worker for the event ingestion.
215 | # It is recommendend if you have a high usage of events to not impact the other Sidekiq Jobs.
216 | #api-events-worker:
217 | # <<: *backend-image
218 | # container_name: lago-events-worker
219 | # restart: unless-stopped
220 | # depends_on:
221 | # api:
222 | # condition: service_healthy
223 | # command: ["./scripts/start.events.worker.sh"]
224 | # environment:
225 | # <<: [*backend-env, *lago-api-url]
226 |
227 | # You can uncomment this if you want to use a dedicated Sidekiq worker for the invoices pdf creation.
228 | # It is recommended if you have a high usage of invoices being created to not impact the other Sidekiq Jobs.
229 | #api-pdfs-worker:
230 | # <<: *backend-image
231 | # container_name: lago-pdfs-worker
232 | # restart: unless-stopped
233 | # depends_on:
234 | # api:
235 | # condition: service_healthy
236 | # command: ["./scripts/start.pdfs.worker.sh"]
237 | # environment:
238 | # <<: [*backend-env, *lago-api-url]
239 |
240 | # You can uncomment this if you want to use a dedicated Sidekiq worker for the invoices creation.
241 | # It is recommended if you have a high usage of invoices being created to not impact the other Sidekiq Jobs.
242 | #api-billing-worker:
243 | # <<: *backend-image
244 | # container_name: lago-billing-worker
245 | # restart: unless-stopped
246 | # depends_on:
247 | # api:
248 | # condition: service_healthy
249 | # command: ["./scripts/start.billing.worker.sh"]
250 | # environment:
251 | # <<: [*backend-env, *lago-api-url]
252 |
253 | # You can uncomment this if you want to use a dedicated Sidekiq worker for the clock jobs.
254 | #api-clock-worker:
255 | # <<: *backend-image
256 | # container_name: lago-clock-worker
257 | # restart: unless-stopped
258 | # depends_on:
259 | # api:
260 | # condition: service_healthy
261 | # command: ["./scripts/start.clock.worker.sh"]
262 | # environment:
263 | # <<: [*backend-env, *lago-api-url]
264 |
265 | # You can uncomment this if you want to use a dedicated Sidekiq worker for the webhook jobs.
266 | #api-webhook-worker:
267 | # <<: *backend-image
268 | # container_name: lago-webhook-worker
269 | # restart: unless-stopped
270 | # depends_on:
271 | # api:
272 | # condition: service_healthy
273 | # command: ["./scripts/start.webhook.worker.sh"]
274 | # environment:
275 | # <<: [*backend-env, *lago-api-url]
276 |
277 | api-clock:
278 | <<: *backend-image
279 | container_name: lago-clock
280 | restart: unless-stopped
281 | depends_on:
282 | migrate:
283 | condition: service_completed_successfully
284 | db:
285 | condition: service_healthy
286 | restart: true
287 | redis:
288 | condition: service_healthy
289 | restart: true
290 | command: ["./scripts/start.clock.sh"]
291 | environment:
292 | <<: [*backend-env, *lago-api-url]
293 |
294 | pdf:
295 | image: getlago/lago-gotenberg:7.8.2
296 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG NODE_VERSION=20
2 | ARG RUBY_VERSION=3.4.4
3 |
4 | # Front Build
5 | FROM node:$NODE_VERSION-alpine AS front_build
6 |
7 | WORKDIR /app
8 |
9 | COPY ./front/ .
10 |
11 | RUN apk add python3 build-base && \
12 | corepack enable && corepack prepare pnpm@latest --activate && \
13 | rm -rf /app/node_modules && \
14 | pnpm install && pnpm build && pnpm prune --prod
15 |
16 | # API Build
17 | FROM ruby:$RUBY_VERSION-slim AS api_build
18 |
19 | ENV BUNDLER_VERSION='2.5.5'
20 | ENV PATH="$PATH:/root/.cargo/bin/"
21 |
22 | WORKDIR /app
23 |
24 | RUN apt-get update && apt-get upgrade -y && \
25 | apt-get install nodejs curl build-essential git pkg-config libpq-dev libclang-dev libyaml-dev curl -y && \
26 | curl https://sh.rustup.rs -sSf | bash -s -- -y
27 |
28 | COPY ./api/Gemfile /app/Gemfile
29 | COPY ./api/Gemfile.lock /app/Gemfile.lock
30 |
31 | RUN gem install bundler --no-document -v '2.5.5' && \
32 | gem install foreman && \
33 | bundle config build.nokogiri --use-system-libraries &&\
34 | bundle install --jobs=3 --retry=3 --without development test
35 |
36 | # Final Image
37 | FROM ruby:$RUBY_VERSION-slim
38 |
39 | WORKDIR /app
40 |
41 | RUN apt-get update -y && \
42 | apt-get install curl ca-certificates gnupg software-properties-common -y && \
43 | curl -fsSL https://postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql-archive-keyring.gpg > /dev/null && \
44 | echo deb [arch=amd64,arm64,ppc64e1 signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main | tee /etc/ap && \
45 | curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc && \
46 | chmod a+r /etc/apt/keyrings/docker.asc && \
47 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null && \
48 | apt-get update && \
49 | apt-get install nginx xz-utils git libpq-dev postgresql-15 redis-server docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y && \
50 | apt-get remove software-properties-common apt-transport-https -y
51 |
52 | COPY ./docker/nginx.conf /etc/nginx/sites-enabled/default
53 |
54 | COPY --from=front_build /app/dist /app/front
55 | COPY --from=api_build /usr/local/bundle/ /usr/local/bundle
56 |
57 | COPY ./front/.env.sh ./front/.env.sh
58 | COPY ./api ./api
59 | COPY ./docker/Procfile ./api/Procfile
60 | COPY ./docker/runner.sh ./runner.sh
61 |
62 | EXPOSE 80
63 | EXPOSE 3000
64 | VOLUME /data
65 |
66 | ENTRYPOINT ["./runner.sh"]
67 |
--------------------------------------------------------------------------------
/docker/Procfile:
--------------------------------------------------------------------------------
1 | web: bundle exec rails s -b :: -p 3000
2 | worker: bundle exec sidekiq -C config/sidekiq/sidekiq.yml
3 | clock: bundle exec clockwork ./clock.rb
4 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # Lago Docker Image
2 |
3 | This is the official one docker image for Lago.
4 |
5 | This docker image is designed for testing and staging environments only. It is not recommended for production use due to its simplified architecture and resource constraints. For production deployments, please refer to our deployment guides in the `deploy` folder or use our [helm chart](https://github.com/getlago/lago-helm-charts) for a more robust and scalable setup.
6 |
7 | ## Features
8 |
9 | This docker image embed everything to run Lago with just one command line to ease the deployment.
10 | Here are the services that are running into the container :
11 | - PostgreSQL
12 | - Redis
13 | - Lago UI
14 | - Lago API
15 | - Lago Worker
16 | - Lago Clock
17 | - PDF Service (optional)
18 |
19 | ## Get Started
20 |
21 | ```bash
22 | docker run -d --name lago -p 80:80 -p 3000:3000 getlago/lago:latest
23 | ```
24 |
25 | PDF generation is disabled by default. We use [Gotenberg](https://github.com/gotenberg/gotenberg), which is only available as a Docker image. To enable PDF generation, use the following command:
26 |
27 | ```bash
28 | docker run -d --name lago -v /var/run/docker.sock:/var/run/docker.sock -p 80:80 -p 3000:3000 getlago/lago:latest
29 | ```
30 |
31 | You will see an other docker container named `lago-pdf` running.
32 |
33 | ## Using External Services
34 |
35 | You can use external services for the database and Redis instance.
36 | Here are the env var you should pass to the container to use them :
37 |
38 | | Env Var | Description | Default |
39 | |---------|-------------|---------|
40 | | DATABASE_URL | The URL of the database | postgres://lago:lago@localhost:5432/lago |
41 | | REDIS_URL | The URL of the Redis instance | redis://localhost:6379/0 |
42 |
43 |
44 | ## Storage
45 |
46 | The container is using a volume to store the data, you can mount it to your host to keep the data safe.
47 | You can find many folders for each services in the `/data` folder.
48 |
49 |
50 | ## SSL
51 |
52 | SSL is disabled by default in this development/staging image. To enable SSL support when using a proxy or load balancer:
53 |
54 | For new installations:
55 | - Add the environment variable `LAGO_DISABLE_SSL=false` when running the container
56 | ```bash
57 | docker run -e LAGO_DISABLE_SSL=false ...
58 | ```
59 |
60 | For existing installations:
61 | - Navigate to your `/data` volume
62 | - Edit the `.env` file to change `LAGO_DISABLE_SSL=true` to `LAGO_DISABLE_SSL=false`
63 |
64 | :warning: Note that this only enables SSL support - you must still configure SSL certificates and termination through your reverse proxy or load balancer.
65 |
66 | ## Logs
67 |
68 | Database Logs (creation, migration) are stored in the `/data/db.log` file.
69 | Applicative logs are streamed to the standard output.
70 |
71 | ## Contributing
72 |
73 | This docker image is a work in progress.
74 | Feel free to open issues or PRs to improve it or ask for new features.
75 |
--------------------------------------------------------------------------------
/docker/nginx.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | listen [::]:80;
4 |
5 | location / {
6 | root /app/front;
7 | index index.html index.htm;
8 | try_files $uri $uri/ /index.html =404;
9 | }
10 |
11 | include /etc/nginx/extra-conf.d/*.conf;
12 | }
13 |
--------------------------------------------------------------------------------
/docker/redis.conf:
--------------------------------------------------------------------------------
1 | bind 127.0.0.1 -::1
2 | protected-mode yes
3 | port 6379
4 | tcp-backlog 511
5 | timeout 0
6 | tcp-keepalive 300
7 | daemonize yes
8 | supervised auto
9 | pidfile /run/redis/redis-server.pid
10 | loglevel notice
11 | logfile /var/log/redis/redis-server.log
12 | databases 16
13 | always-show-logo no
14 | set-proc-title yes
15 | proc-title-template "{title} {listen-addr} {server-mode}"
16 | locale-collate ""
17 | stop-writes-on-bgsave-error yes
18 | rdbcompression yes
19 | rdbchecksum yes
20 | dbfilename dump.rdb
21 | rdb-del-sync-files no
22 | dir /data/redis
23 | replica-serve-stale-data yes
24 | replica-read-only yes
25 | repl-diskless-sync yes
26 | repl-diskless-sync-delay 5
27 | repl-diskless-sync-max-replicas 0
28 | repl-diskless-load disabled
29 | repl-disable-tcp-nodelay no
30 | replica-priority 100
31 | acllog-max-len 128
32 | lazyfree-lazy-eviction no
33 | lazyfree-lazy-expire no
34 | lazyfree-lazy-server-del no
35 | replica-lazy-flush no
36 | lazyfree-lazy-user-del no
37 | lazyfree-lazy-user-flush no
38 | oom-score-adj no
39 | oom-score-adj-values 0 200 800
40 | disable-thp yes
41 | appendonly yes
42 | appendfilename "appendonly.aof"
43 | appenddirname "appendonlydir"
44 | appendfsync everysec
45 | no-appendfsync-on-rewrite no
46 | auto-aof-rewrite-percentage 100
47 | auto-aof-rewrite-min-size 64mb
48 | aof-load-truncated yes
49 | aof-use-rdb-preamble yes
50 | aof-timestamp-enabled no
51 | slowlog-log-slower-than 10000
52 | slowlog-max-len 128
53 | latency-monitor-threshold 0
54 | notify-keyspace-events ""
55 | hash-max-listpack-entries 512
56 | hash-max-listpack-value 64
57 | list-max-listpack-size -2
58 | list-compress-depth 0
59 | set-max-intset-entries 512
60 | set-max-listpack-entries 128
61 | set-max-listpack-value 64
62 | zset-max-listpack-entries 128
63 | zset-max-listpack-value 64
64 | hll-sparse-max-bytes 3000
65 | stream-node-max-bytes 4096
66 | stream-node-max-entries 100
67 | activerehashing yes
68 | client-output-buffer-limit normal 0 0 0
69 | client-output-buffer-limit replica 256mb 64mb 60
70 | client-output-buffer-limit pubsub 32mb 8mb 60
71 | hz 10
72 | dynamic-hz yes
73 | aof-rewrite-incremental-fsync yes
74 | rdb-save-incremental-fsync yes
75 | jemalloc-bg-thread yes
76 |
--------------------------------------------------------------------------------
/docker/runner.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Starting Lago..."
4 |
5 | declare -A ENV_VARS=(
6 | [RAILS_ENV]="production"
7 | [RAILS_LOG_TO_STDOUT]="true"
8 | [POSTGRES_PASSWORD]=$(openssl rand -hex 16)
9 | [SECRET_KEY_BASE]=$(openssl rand -base64 16)
10 | [LAGO_RSA_PRIVATE_KEY]=$(openssl genrsa 2048 | base64 | tr -d '\n')
11 | [LAGO_DISABLE_SSL]="true"
12 | [LAGO_ENCRYPTION_PRIMARY_KEY]=$(openssl rand -hex 16)
13 | [LAGO_ENCRYPTION_DETERMINISTIC_KEY]=$(openssl rand -hex 16)
14 | [LAGO_ENCRYPTION_KEY_DERIVATION_SALT]=$(openssl rand -hex 16)
15 | [REDIS_URL]="redis://localhost:6379/0"
16 | [LAGO_FRONT_URL]="http://localhost"
17 | [LAGO_API_URL]="http://localhost:3000"
18 | [API_URL]="http://localhost:3000"
19 | [LAGO_PDF_URL]="http://host.docker.internal:3001"
20 | [APP_ENV]="production"
21 | )
22 |
23 | if [ -f "/data/.env" ]; then
24 | for LINE in $(cat /data/.env); do export $LINE; done
25 | fi
26 |
27 | # Configure data directories
28 | if [ -z "${DATA_DIR}" ]; then
29 | export DATA_DIR=/data
30 | mkdir -p ${DATA_DIR}
31 | mkdir -p ${DATA_DIR}/redis
32 | chown redis:redis ${DATA_DIR}/redis
33 | mkdir -p ${DATA_DIR}/postgresql
34 | touch ${DATA_DIR}/db.log
35 | touch ${DATA_DIR}/.env
36 | echo "DATA_DIR=${DATA_DIR}" >> ${DATA_DIR}/.env
37 | fi
38 |
39 | # Configure Redis
40 | sed -i "s#DATA_DIR#${DATA_DIR}#g" /etc/redis/redis.conf
41 |
42 | # Configure PG
43 | export PGDATA="${DATA_DIR}/postgresql"
44 | export PGPORT=5432
45 |
46 | # Start Redis, PG and Nginx
47 | service redis-server start >> /dev/null
48 | service postgresql restart >> /dev/null
49 | service nginx restart >> /dev/null
50 |
51 | # PDF Service
52 | if df -hT | grep -q docker.sock > /dev/null; then
53 | if docker ps --filter "name=lago-pdf" | grep -q lago-pdf > /dev/null; then
54 | docker stop lago-pdf > /dev/null
55 | docker rm lago-pdf > /dev/null
56 | fi
57 | docker run -d --name lago-pdf -p 3001:3000 getlago/lago-gotenberg:8 > /dev/null
58 | else
59 | echo "WARN: Docker socket is not mounted. Skipping PDF service."
60 | fi
61 |
62 | # Prepare Environment
63 | # Defaulting values
64 | for VAR in "${!ENV_VARS[@]}"; do
65 | if [ -z "${!VAR}" ]; then
66 | export $VAR=${ENV_VARS[$VAR]}
67 | echo "$VAR=${ENV_VARS[$VAR]}" >> ${DATA_DIR}/.env
68 | fi
69 | done
70 |
71 | if [ -z "${DATABASE_URL}" ]; then
72 | export DATABASE_URL=postgresql://lago:$POSTGRES_PASSWORD@localhost:5432/lago
73 | echo "DATABASE_URL=${DATABASE_URL}" >> ${DATA_DIR}/.env
74 | fi
75 |
76 | # Prepare Front Environment
77 | cd ./front
78 | bash -c ./.env.sh
79 | cd ..
80 |
81 | export RAILS_ENV=production
82 |
83 | # Create DB User
84 | su -c "psql -tc \"SELECT 1 FROM pg_user WHERE usename = 'lago';\" | grep -q 1 || psql -c \"CREATE ROLE lago PASSWORD '${POSTGRES_PASSWORD}' CREATEDB LOGIN;\"" postgres >> ${DATA_DIR}/db.log
85 |
86 | # Launch BE Services
87 | cd ./api
88 | bundle exec rake db:create >> ${DATA_DIR}/db.log
89 | bundle exec rake db:migrate >> ${DATA_DIR}/db.log
90 | bundle exec rails signup:seed_organization >> ${DATA_DIR}/db.log
91 | rm -f ./tmp/pids/server.pid
92 | foreman start
93 |
--------------------------------------------------------------------------------
/events-processor/.air.toml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/events-processor/.gitignore:
--------------------------------------------------------------------------------
1 | # If you prefer the allow list template instead of the deny list, see community template:
2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
3 | #
4 | # Binaries for programs and plugins
5 | *.exe
6 | *.exe~
7 | *.dll
8 | *.so
9 | *.dylib
10 |
11 | # Test binary, built with `go test -c`
12 | *.test
13 |
14 | # Output of the go coverage tool, specifically when used with LiteIDE
15 | *.out
16 |
17 | # Dependency directories (remove the comment below to include it)
18 | # vendor/
19 |
20 | # Go workspace file
21 | go.work
22 |
23 | .env
24 | events-processor
25 | tmp/
26 |
--------------------------------------------------------------------------------
/events-processor/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rust:1.82 AS rust-build
2 |
3 | RUN git clone https://github.com/getlago/lago-expression/
4 | WORKDIR /lago-expression/expression-go
5 | RUN git checkout v0.1.4 && cargo build --release
6 |
7 | FROM golang:1.24 AS go-build
8 |
9 | WORKDIR /app
10 |
11 | COPY . /app/
12 |
13 | RUN go mod download
14 | COPY --from=rust-build /lago-expression/target/release/libexpression_go.so /usr/lib/libexpression_go.so
15 | RUN go build -o event_processors .
16 |
17 | FROM debian:bookworm
18 |
19 | RUN apt-get update && apt-get upgrade -y && apt-get install -y ca-certificates
20 |
21 | WORKDIR /app
22 | COPY --from=rust-build /lago-expression/target/release/libexpression_go.so /usr/lib/libexpression_go.so
23 | COPY --from=go-build /app/event_processors /app/event_processors
24 | ENTRYPOINT ["./event_processors"]
25 |
--------------------------------------------------------------------------------
/events-processor/Dockerfile.dev:
--------------------------------------------------------------------------------
1 | FROM rust:1.85 AS rust-build
2 |
3 | RUN git clone https://github.com/getlago/lago-expression/
4 | WORKDIR /lago-expression/expression-go
5 | RUN git checkout v0.1.4 && cargo build --release
6 |
7 | FROM golang:1.24 AS go-build
8 |
9 | WORKDIR /app
10 |
11 | RUN go install github.com/go-delve/delve/cmd/dlv@latest
12 | RUN go install github.com/air-verse/air@latest
13 |
14 | COPY --from=rust-build /lago-expression/target/release/libexpression_go.so /usr/lib/libexpression_go.so
15 | COPY go.mod go.sum ./
16 | RUN go mod download
17 |
18 | # Expose the debugging port
19 | EXPOSE 2345
20 |
21 | CMD ["air", "-c", ".air.toml"]
22 |
--------------------------------------------------------------------------------
/events-processor/README.md:
--------------------------------------------------------------------------------
1 | # Lago Events Processor
2 |
3 | High throughput events processor for Lago.
4 | This service is in charge of providing a post-process for events in high volume scenarios.
5 |
6 | This service need to be configured with Clickhouse and Redpanda. Please contact us for further informations.
7 |
8 |
9 | ## How to run it
10 |
11 | With the docker compose environment:
12 |
13 | ```shell
14 | go build -o event_processors .
15 |
16 | ./event_processors
17 | ```
18 |
19 | ## In development
20 |
21 | ```
22 | lago up -d events-processor
23 | ```
24 |
25 | ## Configuration
26 |
27 | This app requires some env vars
28 |
29 | |Variable|Description|
30 | |---|---|
31 | |ENV|Set as `production` to not load `.env` file|
32 | | DATABASE_URL | PostgreSQL server URL (eg: `postgresql://lago_user:lago_password@lago_server:5432/lago_db`) |
33 | | LAGO_KAFKA_BOOTSTRAP_SERVERS | Kafka Broker URL with port (eg: `redpanda:9092`) |
34 | | LAGO_KAFKA_USERNAME | If your broker needs auth, your Kafka Username |
35 | | LAGO_KAFKA_PASSWORD | If your broker needs auth, your Kafka password |
36 | | LAGO_KAFKA_SCRAM_ALGORITHM | Your Broker SCRAM algo, supported values are `SCRAM-SHA-256` and `SCRAM-SHA-512`. If your provide a SCRAM Algo, `KAFKA_USERNAME` and `KAFKA_PASSWORD` are required |
37 | | LAGO_KAFKA_TLS | Set to `true` if your broker use a TLS termination |
38 | | LAGO_KAFKA_RAW_EVENTS_TOPIC | Events Kafka Topic (eg: `events_raw`) |
39 | | LAGO_KAFKA_ENRICHED_EVENTS_TOPIC | Events Enriched Kafka Topic (eg: `events_enriched`) |
40 | | LAGO_KAFKA_EVENTS_CHARGED_IN_ADVANCE_TOPIC | Events Charge In Advance Kafka Topic (eg: `events_charge_in_advance`) |
41 | | LAGO_KAFKA_EVENTS_DEAD_LETTER_TOPIC | Events Dead Letter Queue (eg: `events_dead_letter`) |
42 | | LAGO_KAFKA_CONSUMER_GROUP | Kafka Consumer Group Name for Post Processing |
43 | | OTEL_SERVICE_NAME | OpenTelemetry service name (eg: `events-processor`) |
44 | | OTEL_EXPORTER_OTLP_ENDPOINT | OpenTelemetry server URL |
45 | | OTEL_INSECURE | Set to `true` to use the insecure mode of OpenTelemetry |
46 |
--------------------------------------------------------------------------------
/events-processor/config/database/database.go:
--------------------------------------------------------------------------------
1 | package database
2 |
3 | import (
4 | "context"
5 | "log/slog"
6 |
7 | "github.com/jackc/pgx/v5/pgxpool"
8 | "github.com/jackc/pgx/v5/stdlib"
9 | slogGorm "github.com/orandin/slog-gorm"
10 | "gorm.io/driver/postgres"
11 | "gorm.io/gorm"
12 | )
13 |
14 | type DB struct {
15 | Connection *gorm.DB
16 | logger *slog.Logger
17 | pool *pgxpool.Pool
18 | }
19 |
20 | type DBConfig struct {
21 | Url string
22 | MaxConns int32
23 | }
24 |
25 | func NewConnection(config DBConfig) (*DB, error) {
26 | logger := slog.Default()
27 | logger = logger.With("component", "db")
28 |
29 | poolConfig, err := pgxpool.ParseConfig(config.Url)
30 | if err != nil {
31 | return nil, err
32 | }
33 |
34 | poolConfig.MaxConns = config.MaxConns
35 |
36 | pool, err := pgxpool.NewWithConfig(context.Background(), poolConfig)
37 | if err != nil {
38 | return nil, err
39 | }
40 |
41 | dialector := postgres.New(postgres.Config{
42 | Conn: stdlib.OpenDBFromPool(pool),
43 | })
44 |
45 | conn, err := OpenConnection(logger, dialector)
46 | if err == nil {
47 | conn.pool = pool
48 | }
49 | return conn, err
50 | }
51 |
52 | func OpenConnection(logger *slog.Logger, dialector gorm.Dialector) (*DB, error) {
53 | gormLogger := slogGorm.New(
54 | slogGorm.WithHandler(logger.Handler()),
55 | )
56 |
57 | db, err := gorm.Open(dialector, &gorm.Config{
58 | Logger: gormLogger,
59 | })
60 |
61 | if err != nil {
62 | return nil, err
63 | }
64 |
65 | return &DB{Connection: db, logger: logger}, nil
66 | }
67 |
68 | func (db *DB) Close() {
69 | db.pool.Close()
70 | }
71 |
--------------------------------------------------------------------------------
/events-processor/config/database/database_test.go:
--------------------------------------------------------------------------------
1 | package database
2 |
3 | import (
4 | "os"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestNewConnection(t *testing.T) {
11 | config := DBConfig{
12 | Url: "invalid connection",
13 | MaxConns: 200,
14 | }
15 |
16 | _, err := NewConnection(config)
17 | assert.Error(t, err)
18 |
19 | config.Url = os.Getenv("DATABASE_URL")
20 |
21 | db, err := NewConnection(config)
22 | assert.NoError(t, err)
23 | assert.NotNil(t, db)
24 | assert.NotNil(t, db.Connection)
25 | assert.NotNil(t, db.logger)
26 | }
27 |
--------------------------------------------------------------------------------
/events-processor/config/kafka/consumer.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log/slog"
7 | "math"
8 | "sync"
9 |
10 | "github.com/twmb/franz-go/pkg/kgo"
11 | "go.opentelemetry.io/otel/attribute"
12 |
13 | tracer "github.com/getlago/lago/events-processor/config"
14 | "github.com/getlago/lago/events-processor/utils"
15 | )
16 |
17 | type ConsumerGroupConfig struct {
18 | Topic string
19 | ConsumerGroup string
20 | ProcessRecords func([]*kgo.Record) []*kgo.Record
21 | }
22 |
23 | type TopicPartition struct {
24 | topic string
25 | partition int32
26 | }
27 |
28 | type PartitionConsumer struct {
29 | client *kgo.Client
30 | logger *slog.Logger
31 | topic string
32 | partition int32
33 |
34 | quit chan struct{}
35 | done chan struct{}
36 | records chan []*kgo.Record
37 | processRecords func([]*kgo.Record) []*kgo.Record
38 | }
39 |
40 | type ConsumerGroup struct {
41 | consumers map[TopicPartition]*PartitionConsumer
42 | client *kgo.Client
43 | processRecords func([]*kgo.Record) []*kgo.Record
44 | logger *slog.Logger
45 | }
46 |
47 | func (pc *PartitionConsumer) consume() {
48 | defer close(pc.done)
49 |
50 | pc.logger.Info(fmt.Sprintf("Starting consume for topic %s partition %d\n", pc.topic, pc.partition))
51 | defer pc.logger.Info(fmt.Sprintf("Closing consume for topic %s partition %d\n", pc.topic, pc.partition))
52 |
53 | for {
54 | select {
55 | case <-pc.quit:
56 | pc.logger.Info("partition consumer quit")
57 | return
58 |
59 | case records := <-pc.records:
60 | ctx := context.Background()
61 | span := tracer.GetTracerSpan(ctx, "post_process", "Consumer.Consume")
62 | recordsAttr := attribute.Int("records.length", len(records))
63 | span.SetAttributes(recordsAttr)
64 | defer span.End()
65 |
66 | processedRecords := pc.processRecords(records)
67 | commitableRecords := records
68 |
69 | if len(processedRecords) != len(records) {
70 | // Ensure we are not committing records that were not processed and can be re-consumed
71 | record := findMaxCommitableRecord(processedRecords, records)
72 | commitableRecords = []*kgo.Record{record}
73 | return
74 | }
75 |
76 | err := pc.client.CommitRecords(ctx, commitableRecords...)
77 | if err != nil {
78 | pc.logger.Error(fmt.Sprintf("Error when committing offets to kafka. Error: %v topic: %s partition: %d offset: %d\n", err, pc.topic, pc.partition, records[len(records)-1].Offset+1))
79 | utils.CaptureError(err)
80 | }
81 | }
82 | }
83 | }
84 |
85 | func (cg *ConsumerGroup) assigned(_ context.Context, cl *kgo.Client, assigned map[string][]int32) {
86 | for topic, partitions := range assigned {
87 | for _, partition := range partitions {
88 | pc := &PartitionConsumer{
89 | client: cl,
90 | topic: topic,
91 | partition: partition,
92 | logger: cg.logger,
93 |
94 | quit: make(chan struct{}),
95 | done: make(chan struct{}),
96 | records: make(chan []*kgo.Record),
97 | processRecords: cg.processRecords,
98 | }
99 | cg.consumers[TopicPartition{topic: topic, partition: partition}] = pc
100 | go pc.consume()
101 | }
102 | }
103 | }
104 |
105 | func (cg *ConsumerGroup) lost(_ context.Context, _ *kgo.Client, lost map[string][]int32) {
106 | var wg sync.WaitGroup
107 | defer wg.Wait()
108 |
109 | for topic, partitions := range lost {
110 | for _, partition := range partitions {
111 | tp := TopicPartition{topic: topic, partition: partition}
112 | pc := cg.consumers[tp]
113 | delete(cg.consumers, tp)
114 | close(pc.quit)
115 |
116 | pc.logger.Info(fmt.Sprintf("waiting for work to finish topic %s partition %d\n", topic, partition))
117 | wg.Add(1)
118 | go func() { <-pc.done; wg.Done() }()
119 | }
120 | }
121 | }
122 |
123 | func (cg *ConsumerGroup) poll() {
124 | for {
125 | fetches := cg.client.PollRecords(context.Background(), 10000)
126 | if fetches.IsClientClosed() {
127 | cg.logger.Info("client closed")
128 | return
129 | }
130 |
131 | fetches.EachError(func(_ string, _ int32, err error) {
132 | panic(err)
133 | })
134 |
135 | fetches.EachPartition(func(p kgo.FetchTopicPartition) {
136 | tp := TopicPartition{p.Topic, p.Partition}
137 | cg.consumers[tp].records <- p.Records
138 | })
139 |
140 | cg.client.AllowRebalance()
141 | }
142 | }
143 |
144 | func NewConsumerGroup(serverConfig ServerConfig, cfg *ConsumerGroupConfig) (*ConsumerGroup, error) {
145 | logger := slog.Default()
146 | logger = logger.With("kafka-topic-consumer", cfg.Topic)
147 |
148 | cg := &ConsumerGroup{
149 | consumers: make(map[TopicPartition]*PartitionConsumer),
150 | processRecords: cfg.ProcessRecords,
151 | logger: logger,
152 | }
153 |
154 | cgName := fmt.Sprintf("%s_%s", cfg.ConsumerGroup, cfg.Topic)
155 | opts := []kgo.Opt{
156 | kgo.ConsumerGroup(cgName),
157 | kgo.ConsumeTopics(cfg.Topic),
158 | kgo.OnPartitionsAssigned(cg.assigned),
159 | kgo.OnPartitionsLost(cg.lost),
160 | kgo.OnPartitionsRevoked(cg.lost),
161 | kgo.DisableAutoCommit(),
162 | kgo.BlockRebalanceOnPoll(),
163 | }
164 |
165 | kcl, err := NewKafkaClient(serverConfig, opts)
166 | if err != nil {
167 | return nil, err
168 | }
169 |
170 | if err = kcl.Ping(context.Background()); err != nil {
171 | return nil, err
172 | }
173 |
174 | cg.client = kcl
175 | return cg, nil
176 | }
177 |
178 | func (cg *ConsumerGroup) Start() {
179 | cg.poll()
180 | }
181 |
182 | func findMaxCommitableRecord(processedRecords []*kgo.Record, records []*kgo.Record) *kgo.Record {
183 | // Keep track of processed records
184 | processedMap := make(map[string]bool)
185 | for _, record := range processedRecords {
186 | key := fmt.Sprintf("%s-%d", string(record.Key), record.Offset)
187 | processedMap[key] = true
188 | }
189 |
190 | // Find the minimum offset of the unprocessed records
191 | minUnprocessedOffset := int64(math.MaxInt64)
192 | foundUnprocessed := false
193 | for _, record := range records {
194 | key := fmt.Sprintf("%s-%d", string(record.Key), record.Offset)
195 | if !processedMap[key] {
196 | if !foundUnprocessed || record.Offset < minUnprocessedOffset {
197 | minUnprocessedOffset = record.Offset
198 | foundUnprocessed = true
199 | }
200 | }
201 | }
202 |
203 | // Find the record with the offset just before the minimum unprocessed offset
204 | var maxRecord *kgo.Record
205 | for _, record := range processedRecords {
206 | if record.Offset < minUnprocessedOffset && (maxRecord == nil || record.Offset > maxRecord.Offset) {
207 | maxRecord = record
208 | }
209 | }
210 |
211 | return maxRecord
212 | }
213 |
--------------------------------------------------------------------------------
/events-processor/config/kafka/consumer_test.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | "github.com/twmb/franz-go/pkg/kgo"
8 | )
9 |
10 | func createRecord(key string, offset int64) *kgo.Record {
11 | return &kgo.Record{
12 | Key: []byte(key),
13 | Value: []byte("value"),
14 | Offset: offset,
15 | }
16 | }
17 |
18 | func TestFindMaxCommitableRecord(t *testing.T) {
19 | {
20 | tests := []struct {
21 | name string
22 | processedRecords []*kgo.Record
23 | records []*kgo.Record
24 | expected *kgo.Record
25 | }{
26 | {
27 | name: "WIth continuous offsets",
28 | processedRecords: []*kgo.Record{
29 | createRecord("key1", 1),
30 | createRecord("key2", 2),
31 | },
32 | records: []*kgo.Record{
33 | createRecord("key1", 1),
34 | createRecord("key2", 2),
35 | createRecord("key3", 3),
36 | createRecord("key4", 4),
37 | },
38 | expected: createRecord("key2", 2),
39 | },
40 | {
41 | name: "With non-continuous offsets",
42 | processedRecords: []*kgo.Record{
43 | createRecord("key1", 1),
44 | createRecord("key5", 5),
45 | },
46 | records: []*kgo.Record{
47 | createRecord("key1", 1),
48 | createRecord("key3", 3),
49 | createRecord("key5", 5),
50 | createRecord("key7", 7),
51 | },
52 | expected: createRecord("key1", 1),
53 | },
54 | {
55 | name: "With empty processed records",
56 | processedRecords: []*kgo.Record{},
57 | records: []*kgo.Record{
58 | createRecord("key1", 1),
59 | createRecord("key3", 3),
60 | createRecord("key5", 5),
61 | createRecord("key7", 7),
62 | },
63 | expected: nil,
64 | },
65 | {
66 | name: "All records processed",
67 | processedRecords: []*kgo.Record{
68 | createRecord("key1", 1),
69 | createRecord("key2", 2),
70 | createRecord("key3", 3),
71 | createRecord("key4", 4),
72 | },
73 | records: []*kgo.Record{
74 | createRecord("key1", 1),
75 | createRecord("key2", 2),
76 | createRecord("key3", 3),
77 | createRecord("key4", 4),
78 | },
79 | expected: createRecord("key4", 4),
80 | },
81 | {
82 | name: "Only one processed records - not first",
83 | processedRecords: []*kgo.Record{
84 | createRecord("key5", 5),
85 | },
86 | records: []*kgo.Record{
87 | createRecord("key1", 1),
88 | createRecord("key3", 3),
89 | createRecord("key5", 5),
90 | createRecord("key7", 7),
91 | },
92 | expected: nil,
93 | },
94 | {
95 | name: "Only one processed records - first",
96 | processedRecords: []*kgo.Record{
97 | createRecord("key1", 1),
98 | },
99 | records: []*kgo.Record{
100 | createRecord("key1", 1),
101 | createRecord("key3", 3),
102 | createRecord("key5", 5),
103 | createRecord("key7", 7),
104 | },
105 | expected: createRecord("key1", 1),
106 | },
107 | }
108 |
109 | for _, test := range tests {
110 | t.Run(test.name, func(t *testing.T) {
111 | result := findMaxCommitableRecord(test.processedRecords, test.records)
112 |
113 | if test.expected == nil {
114 | assert.Nil(t, result)
115 | } else {
116 | assert.NotNil(t, result)
117 | assert.Equal(t, test.expected.Key, result.Key)
118 | assert.Equal(t, test.expected.Offset, result.Offset)
119 | }
120 | })
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/events-processor/config/kafka/kafka.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "context"
5 | "log/slog"
6 | "time"
7 |
8 | "github.com/twmb/franz-go/pkg/kgo"
9 | "github.com/twmb/franz-go/pkg/sasl/scram"
10 | "github.com/twmb/franz-go/plugin/kotel"
11 | "github.com/twmb/franz-go/plugin/kslog"
12 | "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
13 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
14 | "go.opentelemetry.io/otel/propagation"
15 | "go.opentelemetry.io/otel/sdk/metric"
16 | "go.opentelemetry.io/otel/sdk/trace"
17 | )
18 |
19 | const (
20 | Scram256 string = "SCRAM-SHA-256"
21 | Scram512 string = "SCRAM-SHA-512"
22 | )
23 |
24 | type ServerConfig struct {
25 | ScramAlgorithm string
26 | TLS bool
27 | Server string
28 | UseTelemetry bool
29 | UserName string
30 | Password string
31 | }
32 |
33 | func NewKafkaClient(serverConfig ServerConfig, config []kgo.Opt) (*kgo.Client, error) {
34 | logger := slog.Default()
35 | logger = logger.With("component", "kafka")
36 |
37 | opts := []kgo.Opt{
38 | kgo.SeedBrokers(serverConfig.Server),
39 | kgo.WithLogger(kslog.New(logger)),
40 | }
41 |
42 | if len(config) > 0 {
43 | opts = append(opts, config...)
44 | }
45 |
46 | if serverConfig.UseTelemetry {
47 | meterProvider, err := initMeterProvider(context.Background())
48 | if err != nil {
49 | return nil, err
50 | }
51 | meterOpts := []kotel.MeterOpt{kotel.MeterProvider(meterProvider)}
52 | meter := kotel.NewMeter(meterOpts...)
53 |
54 | tracerProvider, err := initTracerProvider(context.Background())
55 | if err != nil {
56 | return nil, err
57 | }
58 | tracerOpts := []kotel.TracerOpt{
59 | kotel.TracerProvider(tracerProvider),
60 | kotel.TracerPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{})),
61 | }
62 | tracer := kotel.NewTracer(tracerOpts...)
63 |
64 | kotelOps := []kotel.Opt{
65 | kotel.WithTracer(tracer),
66 | kotel.WithMeter(meter),
67 | }
68 |
69 | kotelService := kotel.NewKotel(kotelOps...)
70 | kotelOpt := kgo.WithHooks(kotelService.Hooks()...)
71 | opts = append(opts, kotelOpt)
72 | }
73 |
74 | if serverConfig.ScramAlgorithm != "" {
75 | var scramOpt kgo.Opt
76 |
77 | scramAuth := scram.Auth{
78 | User: serverConfig.UserName,
79 | Pass: serverConfig.Password,
80 | }
81 |
82 | switch serverConfig.ScramAlgorithm {
83 | case Scram256:
84 | scramOpt = kgo.SASL(scramAuth.AsSha256Mechanism())
85 | case Scram512:
86 | scramOpt = kgo.SASL(scramAuth.AsSha512Mechanism())
87 | }
88 |
89 | opts = append(opts, scramOpt)
90 | }
91 |
92 | if serverConfig.TLS {
93 | tlsOpt := kgo.DialTLS()
94 | opts = append(opts, tlsOpt)
95 | }
96 |
97 | client, err := kgo.NewClient(opts...)
98 | if err != nil {
99 | return nil, err
100 | }
101 |
102 | return client, nil
103 | }
104 |
105 | func initTracerProvider(ctx context.Context) (*trace.TracerProvider, error) {
106 | traceExporter, err := otlptracegrpc.New(ctx)
107 | if err != nil {
108 | return nil, err
109 | }
110 |
111 | tracerProvider := trace.NewTracerProvider(
112 | trace.WithBatcher(traceExporter),
113 | )
114 |
115 | return tracerProvider, nil
116 | }
117 |
118 | func initMeterProvider(ctx context.Context) (*metric.MeterProvider, error) {
119 | metricExporter, err := otlpmetricgrpc.New(ctx)
120 | if err != nil {
121 | return nil, err
122 | }
123 |
124 | meterProvider := metric.NewMeterProvider(
125 | metric.WithReader(metric.NewPeriodicReader(metricExporter,
126 | metric.WithInterval(60*time.Second))),
127 | )
128 |
129 | return meterProvider, nil
130 | }
131 |
--------------------------------------------------------------------------------
/events-processor/config/kafka/producer.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "context"
5 | "log/slog"
6 |
7 | "github.com/twmb/franz-go/pkg/kgo"
8 |
9 | tracer "github.com/getlago/lago/events-processor/config"
10 | "github.com/getlago/lago/events-processor/utils"
11 | )
12 |
13 | type ProducerConfig struct {
14 | Topic string
15 | }
16 |
17 | type Producer struct {
18 | client *kgo.Client
19 | config ProducerConfig
20 | logger *slog.Logger
21 | }
22 |
23 | type ProducerMessage struct {
24 | Key []byte
25 | Value []byte
26 | }
27 |
28 | type MessageProducer interface {
29 | Produce(context.Context, *ProducerMessage) bool
30 | GetTopic() string
31 | }
32 |
33 | func NewProducer(serverConfig ServerConfig, cfg *ProducerConfig) (*Producer, error) {
34 | opts := make([]kgo.Opt, 0)
35 | kcl, err := NewKafkaClient(serverConfig, opts)
36 | if err != nil {
37 | return nil, err
38 | }
39 |
40 | logger := slog.Default()
41 | logger = logger.With("component", "kafka-producer")
42 |
43 | pdr := &Producer{
44 | client: kcl,
45 | config: *cfg,
46 | logger: logger,
47 | }
48 |
49 | return pdr, nil
50 | }
51 |
52 | func (p *Producer) Produce(ctx context.Context, msg *ProducerMessage) bool {
53 | span := tracer.GetTracerSpan(ctx, "post_process", "Producer.Produce")
54 | defer span.End()
55 |
56 | record := &kgo.Record{
57 | Topic: p.config.Topic,
58 | Key: msg.Key,
59 | Value: msg.Value,
60 | }
61 |
62 | pr := p.client.ProduceSync(ctx, record)
63 | if err := pr.FirstErr(); err != nil {
64 | p.logger.Error("record had a produce error while synchronously producing", slog.String("error", err.Error()))
65 | utils.CaptureError(err)
66 | return false
67 | }
68 |
69 | return true
70 | }
71 |
72 | func (p *Producer) Ping(ctx context.Context) error {
73 | return p.client.Ping(ctx)
74 | }
75 |
76 | func (p *Producer) GetTopic() string {
77 | return p.config.Topic
78 | }
79 |
--------------------------------------------------------------------------------
/events-processor/config/redis/redis.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "context"
5 | "crypto/tls"
6 | "time"
7 |
8 | "github.com/redis/go-redis/extra/redisotel/v9"
9 | "github.com/redis/go-redis/v9"
10 | )
11 |
12 | type RedisConfig struct {
13 | Address string
14 | Password string
15 | DB int
16 | UseTracer bool
17 | UseTLS bool
18 | }
19 |
20 | type RedisDB struct {
21 | Client *redis.Client
22 | }
23 |
24 | func NewRedisDB(ctx context.Context, cfg RedisConfig) (*RedisDB, error) {
25 | tlsConfig := &tls.Config{}
26 | if cfg.UseTLS {
27 | tlsConfig = &tls.Config{
28 | InsecureSkipVerify: true,
29 | }
30 | }
31 |
32 | redisClient := redis.NewClient(&redis.Options{
33 | Addr: cfg.Address,
34 | Password: cfg.Password,
35 | DB: cfg.DB,
36 | DialTimeout: 5 * time.Second,
37 | ReadTimeout: 3 * time.Second,
38 | WriteTimeout: 3 * time.Second,
39 | PoolSize: 10,
40 | PoolTimeout: 4 * time.Second,
41 | TLSConfig: tlsConfig,
42 | })
43 |
44 | status := redisClient.Ping(ctx)
45 | if status.Err() != nil {
46 | return nil, status.Err()
47 | }
48 |
49 | if cfg.UseTracer {
50 | if err := redisotel.InstrumentTracing(redisClient); err != nil {
51 | return nil, err
52 | }
53 | }
54 |
55 | store := &RedisDB{
56 | Client: redisClient,
57 | }
58 |
59 | return store, nil
60 | }
61 |
--------------------------------------------------------------------------------
/events-processor/config/tracer.go:
--------------------------------------------------------------------------------
1 | package tracer
2 |
3 | import (
4 | "context"
5 | "log"
6 | "strings"
7 |
8 | "go.opentelemetry.io/otel"
9 | "go.opentelemetry.io/otel/attribute"
10 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
11 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
12 | "go.opentelemetry.io/otel/sdk/resource"
13 | sdktrace "go.opentelemetry.io/otel/sdk/trace"
14 | "go.opentelemetry.io/otel/trace"
15 | "google.golang.org/grpc/credentials"
16 | )
17 |
18 | type TracerConfig struct {
19 | ServiceName string
20 | EndpointURL string
21 | Insecure string
22 | }
23 |
24 | func (tc TracerConfig) UseSecureMode() bool {
25 | return !(strings.ToLower(tc.Insecure) == "false" || tc.Insecure == "0" || strings.ToLower(tc.Insecure) == "f")
26 | }
27 |
28 | func GetTracerSpan(ctx context.Context, tracerName string, name string) trace.Span {
29 | tracer := otel.GetTracerProvider().Tracer(tracerName)
30 | _, span := tracer.Start(ctx, name)
31 | return span
32 | }
33 |
34 | func InitOTLPTracer(cfg TracerConfig) func(context.Context) error {
35 | var secureOption otlptracegrpc.Option
36 |
37 | if cfg.UseSecureMode() {
38 | secureOption = otlptracegrpc.WithTLSCredentials(credentials.NewClientTLSFromCert(nil, ""))
39 | } else {
40 | secureOption = otlptracegrpc.WithInsecure()
41 | }
42 |
43 | exporter, err := otlptrace.New(
44 | context.Background(),
45 | otlptracegrpc.NewClient(
46 | secureOption,
47 | otlptracegrpc.WithEndpoint(cfg.EndpointURL),
48 | ),
49 | )
50 |
51 | if err != nil {
52 | log.Fatalf("Failed to create exporter: %v", err)
53 | }
54 |
55 | resources, err := resource.New(
56 | context.Background(),
57 | resource.WithAttributes(
58 | attribute.String("service.name", cfg.ServiceName),
59 | attribute.String("library.language", "go"),
60 | ),
61 | )
62 |
63 | if err != nil {
64 | log.Fatalf("Could not set resource: %v", err)
65 | }
66 |
67 | otel.SetTracerProvider(
68 | sdktrace.NewTracerProvider(
69 | sdktrace.WithSampler(sdktrace.AlwaysSample()),
70 | sdktrace.WithBatcher(exporter),
71 | sdktrace.WithResource(resources),
72 | ),
73 | )
74 |
75 | return exporter.Shutdown
76 | }
77 |
--------------------------------------------------------------------------------
/events-processor/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/getlago/lago/events-processor
2 |
3 | go 1.24.0
4 |
5 | require (
6 | github.com/DATA-DOG/go-sqlmock v1.5.2
7 | github.com/getlago/lago-expression/expression-go v0.1.4
8 | github.com/getsentry/sentry-go v0.31.1
9 | github.com/jackc/pgx/v5 v5.7.2
10 | github.com/orandin/slog-gorm v1.4.0
11 | github.com/redis/go-redis/extra/redisotel/v9 v9.7.3
12 | github.com/redis/go-redis/v9 v9.7.3
13 | github.com/stretchr/testify v1.10.0
14 | github.com/twmb/franz-go v1.18.1
15 | github.com/twmb/franz-go/plugin/kotel v1.5.0
16 | github.com/twmb/franz-go/plugin/kslog v1.0.0
17 | go.opentelemetry.io/otel v1.34.0
18 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0
19 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0
20 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
21 | go.opentelemetry.io/otel/sdk v1.34.0
22 | go.opentelemetry.io/otel/sdk/metric v1.34.0
23 | go.opentelemetry.io/otel/trace v1.34.0
24 | google.golang.org/grpc v1.69.4
25 | gorm.io/driver/postgres v1.5.11
26 | gorm.io/gorm v1.25.12
27 | )
28 |
29 | require (
30 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect
31 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
32 | github.com/davecgh/go-spew v1.1.1 // indirect
33 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
34 | github.com/go-logr/logr v1.4.2 // indirect
35 | github.com/go-logr/stdr v1.2.2 // indirect
36 | github.com/google/uuid v1.6.0 // indirect
37 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
38 | github.com/jackc/pgpassfile v1.0.0 // indirect
39 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
40 | github.com/jackc/puddle/v2 v2.2.2 // indirect
41 | github.com/jinzhu/inflection v1.0.0 // indirect
42 | github.com/jinzhu/now v1.1.5 // indirect
43 | github.com/klauspost/compress v1.17.11 // indirect
44 | github.com/pierrec/lz4/v4 v4.1.22 // indirect
45 | github.com/pmezard/go-difflib v1.0.0 // indirect
46 | github.com/redis/go-redis/extra/rediscmd/v9 v9.7.3 // indirect
47 | github.com/twmb/franz-go/pkg/kmsg v1.9.0 // indirect
48 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect
49 | go.opentelemetry.io/otel/metric v1.34.0 // indirect
50 | go.opentelemetry.io/proto/otlp v1.5.0 // indirect
51 | golang.org/x/crypto v0.36.0 // indirect
52 | golang.org/x/net v0.38.0 // indirect
53 | golang.org/x/sync v0.12.0 // indirect
54 | golang.org/x/sys v0.31.0 // indirect
55 | golang.org/x/text v0.23.0 // indirect
56 | google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
57 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
58 | google.golang.org/protobuf v1.36.3 // indirect
59 | gopkg.in/yaml.v3 v3.0.1 // indirect
60 | )
61 |
--------------------------------------------------------------------------------
/events-processor/go.sum:
--------------------------------------------------------------------------------
1 | github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
2 | github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
3 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
4 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
5 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
6 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
7 | github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
8 | github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
9 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
10 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
14 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
16 | github.com/getlago/lago-expression/expression-go v0.1.4 h1:GISVFLxDhPZvQ5CVHTOS1OlfSizmgdodsHjClUDbx+Q=
17 | github.com/getlago/lago-expression/expression-go v0.1.4/go.mod h1:yFYUWUrkEBwx0RuNxVXPmGCNeqOIK3x2b8izhb3ma9c=
18 | github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4=
19 | github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY=
20 | github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
21 | github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
22 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
23 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
24 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
25 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
26 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
27 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
28 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
29 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
30 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
31 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
32 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
33 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
34 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
35 | github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
36 | github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
37 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
38 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
39 | github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI=
40 | github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
41 | github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
42 | github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
43 | github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
44 | github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
45 | github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
46 | github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
47 | github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
48 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
49 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
50 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
51 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
52 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
53 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
54 | github.com/orandin/slog-gorm v1.4.0 h1:FgA8hJufF9/jeNSYoEXmHPPBwET2gwlF3B85JdpsTUU=
55 | github.com/orandin/slog-gorm v1.4.0/go.mod h1:MoZ51+b7xE9lwGNPYEhxcUtRNrYzjdcKvA8QXQQGEPA=
56 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
57 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
58 | github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
59 | github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
60 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
61 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
62 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
63 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
64 | github.com/redis/go-redis/extra/rediscmd/v9 v9.7.3 h1:1AXQZkJkFxGV3f78mSnUI70l0orO6FHnYoSmBos8SZM=
65 | github.com/redis/go-redis/extra/rediscmd/v9 v9.7.3/go.mod h1:OgkpkwJYex1oyVAabK+VhVUKhUXw8uZUfewJYH1wG90=
66 | github.com/redis/go-redis/extra/redisotel/v9 v9.7.3 h1:ICBA9xYh+SmZqMfBtjKpp1ohi/V5R1TEZglLZc8IxTc=
67 | github.com/redis/go-redis/extra/redisotel/v9 v9.7.3/go.mod h1:DMzxd0CDyZ9VFw9sEPIVpIgKTAaubfGuaPQSUaS7/fo=
68 | github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
69 | github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
70 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
71 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
72 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
73 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
74 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
75 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
76 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
77 | github.com/twmb/franz-go v1.18.1 h1:D75xxCDyvTqBSiImFx2lkPduE39jz1vaD7+FNc+vMkc=
78 | github.com/twmb/franz-go v1.18.1/go.mod h1:Uzo77TarcLTUZeLuGq+9lNpSkfZI+JErv7YJhlDjs9M=
79 | github.com/twmb/franz-go/pkg/kmsg v1.9.0 h1:JojYUph2TKAau6SBtErXpXGC7E3gg4vGZMv9xFU/B6M=
80 | github.com/twmb/franz-go/pkg/kmsg v1.9.0/go.mod h1:CMbfazviCyY6HM0SXuG5t9vOwYDHRCSrJJyBAe5paqg=
81 | github.com/twmb/franz-go/plugin/kotel v1.5.0 h1:TiPfGUbQK384OO7ZYGdo7JuPCbJn+/8njQ/D9Je9CDE=
82 | github.com/twmb/franz-go/plugin/kotel v1.5.0/go.mod h1:wRXzRo76x1myOUMaVHAyraXoGBdEcvlLChGTVv5+DWU=
83 | github.com/twmb/franz-go/plugin/kslog v1.0.0 h1:I64oEmF+0PDvmyLgwrlOtg4mfpSE9GwlcLxM4af2t60=
84 | github.com/twmb/franz-go/plugin/kslog v1.0.0/go.mod h1:8pMjK3OJJJNNYddBSbnXZkIK5dCKFIk9GcVVCDgvnQc=
85 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
86 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
87 | go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
88 | go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
89 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk=
90 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA=
91 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
92 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
93 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
94 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
95 | go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
96 | go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
97 | go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
98 | go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
99 | go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
100 | go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
101 | go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
102 | go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
103 | go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
104 | go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
105 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
106 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
107 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
108 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
109 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
110 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
111 | golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
112 | golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
113 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
114 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
115 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
116 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
117 | google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA=
118 | google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o=
119 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
120 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
121 | google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
122 | google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
123 | google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
124 | google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
125 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
126 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
127 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
128 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
129 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
130 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
131 | gorm.io/driver/postgres v1.5.11 h1:ubBVAfbKEUld/twyKZ0IYn9rSQh448EdelLYk9Mv314=
132 | gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
133 | gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
134 | gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
135 |
--------------------------------------------------------------------------------
/events-processor/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "time"
7 |
8 | "github.com/getsentry/sentry-go"
9 |
10 | "github.com/getlago/lago/events-processor/processors"
11 | )
12 |
13 | func main() {
14 | err := sentry.Init(sentry.ClientOptions{
15 | Dsn: os.Getenv("SENTRY_DSN"),
16 | Environment: os.Getenv("ENV"),
17 | Debug: false,
18 | AttachStacktrace: true,
19 | })
20 |
21 | if err != nil {
22 | fmt.Printf("Sentry initialization failed: %v\n", err)
23 | }
24 |
25 | defer sentry.Flush(2 * time.Second)
26 |
27 | // start processing events & loop forever
28 | processors.StartProcessingEvents()
29 | }
30 |
--------------------------------------------------------------------------------
/events-processor/models/billable_metrics.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "time"
5 |
6 | "gorm.io/gorm"
7 |
8 | "github.com/getlago/lago/events-processor/utils"
9 | )
10 |
11 | type BillableMetric struct {
12 | ID string `gorm:"primaryKey;->"`
13 | OrganizationID string `gorm:"->"`
14 | Code string `gorm:"->"`
15 | FieldName string `gorm:"->"`
16 | Expression string `gorm:"->"`
17 | CreatedAt time.Time `gorm:"->"`
18 | UpdatedAt time.Time `gorm:"->"`
19 | DeletedAt gorm.DeletedAt `gorm:"index;->"`
20 | }
21 |
22 | func (store *ApiStore) FetchBillableMetric(organizationID string, code string) utils.Result[*BillableMetric] {
23 | var bm BillableMetric
24 | result := store.db.Connection.First(&bm, "organization_id = ? AND code = ?", organizationID, code)
25 | if result.Error != nil {
26 | return failedBillabmeMetricResult(result.Error)
27 | }
28 |
29 | return utils.SuccessResult(&bm)
30 | }
31 |
32 | func failedBillabmeMetricResult(err error) utils.Result[*BillableMetric] {
33 | result := utils.FailedResult[*BillableMetric](err)
34 |
35 | if err.Error() == gorm.ErrRecordNotFound.Error() {
36 | result = result.NonCapturable().NonRetryable()
37 | }
38 |
39 | return result
40 | }
41 |
--------------------------------------------------------------------------------
/events-processor/models/billable_metrics_test.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "errors"
5 | "regexp"
6 | "testing"
7 | "time"
8 |
9 | "github.com/DATA-DOG/go-sqlmock"
10 | "github.com/stretchr/testify/assert"
11 | "gorm.io/gorm"
12 | )
13 |
14 | var fetchBillableMetricQuery = regexp.QuoteMeta(`
15 | SELECT * FROM "billable_metrics"
16 | WHERE (organization_id = $1 AND code = $2)
17 | AND "billable_metrics"."deleted_at" IS NULL
18 | ORDER BY "billable_metrics"."id"
19 | LIMIT $3`,
20 | )
21 |
22 | func TestFetchBillableMetric(t *testing.T) {
23 | t.Run("should return billable metric when found", func(t *testing.T) {
24 | // Setup
25 | store, mock, cleanup := setupApiStore(t)
26 | defer cleanup()
27 |
28 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
29 | code := "api_calls"
30 | now := time.Now()
31 |
32 | // Define expected rows and columns
33 | columns := []string{"id", "organization_id", "code", "field_name", "expression", "created_at", "updated_at", "deleted_at"}
34 | rows := sqlmock.NewRows(columns).
35 | AddRow("bm123", orgID, code, "api_requests", "count", now, now, nil)
36 |
37 | // Expect the query
38 | mock.ExpectQuery(fetchBillableMetricQuery).
39 | WithArgs(orgID, code, 1).
40 | WillReturnRows(rows)
41 |
42 | // Execute
43 | result := store.FetchBillableMetric(orgID, code)
44 |
45 | // Assert
46 | assert.True(t, result.Success())
47 |
48 | metric := result.Value()
49 | assert.NotNil(t, metric)
50 | assert.Equal(t, "bm123", metric.ID)
51 | assert.Equal(t, orgID, metric.OrganizationID)
52 | assert.Equal(t, code, metric.Code)
53 | assert.Equal(t, "api_requests", metric.FieldName)
54 | assert.Equal(t, "count", metric.Expression)
55 | })
56 |
57 | t.Run("should return error when billable metric not found", func(t *testing.T) {
58 | // Setup
59 | store, mock, cleanup := setupApiStore(t)
60 | defer cleanup()
61 |
62 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
63 | code := "api_calls"
64 |
65 | // Expect the query but return error
66 | mock.ExpectQuery(fetchBillableMetricQuery).
67 | WithArgs(orgID, code, 1).
68 | WillReturnError(gorm.ErrRecordNotFound)
69 |
70 | // Execute
71 | result := store.FetchBillableMetric(orgID, code)
72 |
73 | // Assert
74 | assert.False(t, result.Success())
75 | assert.NotNil(t, result.Error())
76 | assert.Equal(t, gorm.ErrRecordNotFound, result.Error())
77 | assert.Nil(t, result.Value())
78 | assert.False(t, result.IsCapturable())
79 | assert.False(t, result.IsRetryable())
80 | })
81 |
82 | t.Run("should handle database connection error", func(t *testing.T) {
83 | // Setup
84 | store, mock, cleanup := setupApiStore(t)
85 | defer cleanup()
86 |
87 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
88 | code := "api_calls"
89 | dbError := errors.New("database connection failed")
90 |
91 | // Expect the query but return error
92 | mock.ExpectQuery(fetchBillableMetricQuery).
93 | WithArgs(orgID, code, 1).
94 | WillReturnError(dbError)
95 |
96 | // Execute
97 | result := store.FetchBillableMetric(orgID, code)
98 |
99 | // Assert
100 | assert.False(t, result.Success())
101 | assert.NotNil(t, result.Error())
102 | assert.Equal(t, dbError, result.Error())
103 | assert.Nil(t, result.Value())
104 | assert.True(t, result.IsCapturable())
105 | assert.True(t, result.IsRetryable())
106 | })
107 | }
108 |
--------------------------------------------------------------------------------
/events-processor/models/charges.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "time"
5 |
6 | "gorm.io/gorm"
7 |
8 | "github.com/getlago/lago/events-processor/utils"
9 | )
10 |
11 | type Charge struct {
12 | ID string `gorm:"primaryKey;->"`
13 | BillableMetricID string `gorm:"->"`
14 | PlanID string `gorm:"->"`
15 | PayInAdvance bool `gorm:"->"`
16 | CreatedAt time.Time `gorm:"->"`
17 | UpdatedAt time.Time `gorm:"->"`
18 | DeletedAt gorm.DeletedAt `gorm:"index;->"`
19 | }
20 |
21 | func (store *ApiStore) AnyInAdvanceCharge(planID string, billableMetricID string) utils.Result[bool] {
22 | var count int64
23 |
24 | result := store.db.Connection.Model(&Charge{}).
25 | Where("plan_id = ? AND billable_metric_id = ?", planID, billableMetricID).
26 | Where("pay_in_advance = true").
27 | Count(&count)
28 | if result.Error != nil {
29 | return utils.FailedBoolResult(result.Error)
30 | }
31 |
32 | return utils.SuccessResult(count > 0)
33 | }
34 |
--------------------------------------------------------------------------------
/events-processor/models/charges_test.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "errors"
5 | "regexp"
6 | "testing"
7 |
8 | "github.com/DATA-DOG/go-sqlmock"
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | var anyInAdvanceChargeQuery = regexp.QuoteMeta(`
13 | SELECT count(*) FROM "charges"
14 | WHERE (plan_id = $1 AND billable_metric_id = $2)
15 | AND pay_in_advance = true
16 | AND "charges"."deleted_at" IS NULL`,
17 | )
18 |
19 | func TestAnyInAdvanceCharge(t *testing.T) {
20 | t.Run("should return true when in advance charge exists", func(t *testing.T) {
21 | // Setup
22 | store, mock, cleanup := setupApiStore(t)
23 | defer cleanup()
24 |
25 | planID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
26 | bmID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
27 |
28 | countRows := sqlmock.NewRows([]string{"count"}).AddRow(3)
29 |
30 | // Expect the query but return error
31 | mock.ExpectQuery(anyInAdvanceChargeQuery).
32 | WithArgs(planID, bmID).
33 | WillReturnRows(countRows)
34 |
35 | // Execute
36 | result := store.AnyInAdvanceCharge(planID, bmID)
37 |
38 | // Assert
39 | assert.True(t, result.Success())
40 | assert.Equal(t, true, result.Value())
41 | })
42 |
43 | t.Run("should return false when no in advance charge exists", func(t *testing.T) {
44 | // Setup
45 | store, mock, cleanup := setupApiStore(t)
46 | defer cleanup()
47 |
48 | planID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
49 | bmID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
50 |
51 | countRows := sqlmock.NewRows([]string{"count"}).AddRow(0)
52 |
53 | // Expect the query but return error
54 | mock.ExpectQuery(anyInAdvanceChargeQuery).
55 | WithArgs(planID, bmID).
56 | WillReturnRows(countRows)
57 |
58 | // Execute
59 | result := store.AnyInAdvanceCharge(planID, bmID)
60 |
61 | // Assert
62 | assert.True(t, result.Success())
63 | assert.Equal(t, false, result.Value())
64 | })
65 |
66 | t.Run("should handle database connection error", func(t *testing.T) {
67 | // Setup
68 | store, mock, cleanup := setupApiStore(t)
69 | defer cleanup()
70 |
71 | planID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
72 | bmID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
73 | dbError := errors.New("database connection failed")
74 |
75 | // Expect the query but return error
76 | mock.ExpectQuery(anyInAdvanceChargeQuery).
77 | WithArgs(planID, bmID).
78 | WillReturnError(dbError)
79 |
80 | // Execute
81 | result := store.AnyInAdvanceCharge(planID, bmID)
82 |
83 | // Assert
84 | assert.False(t, result.Success())
85 | assert.NotNil(t, result.Error())
86 | assert.Equal(t, dbError, result.Error())
87 | assert.False(t, result.Value())
88 | })
89 | }
90 |
--------------------------------------------------------------------------------
/events-processor/models/event.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/getlago/lago/events-processor/utils"
8 | )
9 |
10 | const HTTP_RUBY string = "http_ruby"
11 |
12 | type Event struct {
13 | OrganizationID string `json:"organization_id"`
14 | ExternalSubscriptionID string `json:"external_subscription_id"`
15 | TransactionID string `json:"transaction_id"`
16 | Code string `json:"code"`
17 | Properties map[string]any `json:"properties"`
18 | PreciseTotalAmountCents string `json:"precise_total_amount_cents"`
19 | Source string `json:"source,omitempty"`
20 | Timestamp any `json:"timestamp"`
21 | SourceMetadata *SourceMetadata `json:"source_metadata"`
22 | IngestedAt utils.CustomTime `json:"ingested_at"`
23 | }
24 |
25 | type SourceMetadata struct {
26 | ApiPostProcess bool `json:"api_post_processed"`
27 | }
28 |
29 | type EnrichedEvent struct {
30 | IntialEvent *Event `json:"-"`
31 |
32 | OrganizationID string `json:"organization_id"`
33 | ExternalSubscriptionID string `json:"external_subscription_id"`
34 | TransactionID string `json:"transaction_id"`
35 | Code string `json:"code"`
36 | Properties map[string]any `json:"properties"`
37 | PreciseTotalAmountCents string `json:"precise_total_amount_cents"`
38 | Source string `json:"source,omitempty"`
39 | Value *string `json:"value"`
40 | Timestamp float64 `json:"timestamp"`
41 | TimestampStr string `json:"-"`
42 | Time time.Time `json:"-"`
43 | }
44 |
45 | type FailedEvent struct {
46 | Event Event `json:"event"`
47 | InitialErrorMessage string `json:"initial_error_message"`
48 | ErrorMessage string `json:"error_message"`
49 | ErrorCode string `json:"error_code"`
50 | FailedAt time.Time `json:"failed_at"`
51 | }
52 |
53 | func (ev *Event) ToEnrichedEvent() utils.Result[*EnrichedEvent] {
54 | er := &EnrichedEvent{
55 | IntialEvent: ev,
56 | OrganizationID: ev.OrganizationID,
57 | ExternalSubscriptionID: ev.ExternalSubscriptionID,
58 | TransactionID: ev.TransactionID,
59 | Code: ev.Code,
60 | Properties: ev.Properties,
61 | PreciseTotalAmountCents: ev.PreciseTotalAmountCents,
62 | Source: ev.Source,
63 | }
64 |
65 | timestampResult := utils.ToFloat64Timestamp(ev.Timestamp)
66 | if timestampResult.Failure() {
67 | return utils.FailedResult[*EnrichedEvent](timestampResult.Error()).NonRetryable()
68 | }
69 | er.Timestamp = timestampResult.Value()
70 | er.TimestampStr = fmt.Sprintf("%f", er.Timestamp)
71 |
72 | timeResult := utils.ToTime(ev.Timestamp)
73 | if timeResult.Failure() {
74 | return utils.FailedResult[*EnrichedEvent](timeResult.Error()).NonRetryable()
75 | }
76 | er.Time = timeResult.Value()
77 |
78 | return utils.SuccessResult(er)
79 | }
80 |
81 | func (ev *Event) NotAPIPostProcessed() bool {
82 | if ev.Source != HTTP_RUBY {
83 | return true
84 | }
85 |
86 | return ev.SourceMetadata == nil || !ev.SourceMetadata.ApiPostProcess
87 | }
88 |
--------------------------------------------------------------------------------
/events-processor/models/event_test.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestToEnrichedEvent(t *testing.T) {
11 | t.Run("With valid time format", func(t *testing.T) {
12 | expectedTime, _ := time.Parse(time.RFC3339, "2025-03-03T13:03:29Z")
13 |
14 | properties := map[string]any{
15 | "value": "12.12",
16 | }
17 |
18 | event := Event{
19 | OrganizationID: "1a901a90-1a90-1a90-1a90-1a901a901a90",
20 | ExternalSubscriptionID: "sub_id",
21 | Code: "api_calls",
22 | Properties: properties,
23 | PreciseTotalAmountCents: "100.00",
24 | Source: HTTP_RUBY,
25 | Timestamp: 1741007009,
26 | }
27 |
28 | result := event.ToEnrichedEvent()
29 | assert.True(t, result.Success())
30 |
31 | ere := result.Value()
32 | assert.Equal(t, event.OrganizationID, ere.OrganizationID)
33 | assert.Equal(t, event.ExternalSubscriptionID, ere.ExternalSubscriptionID)
34 | assert.Equal(t, event.Code, ere.Code)
35 | assert.Equal(t, event.Properties, ere.Properties)
36 | assert.Equal(t, event.PreciseTotalAmountCents, ere.PreciseTotalAmountCents)
37 | assert.Equal(t, event.Source, ere.Source)
38 | assert.Equal(t, 1741007009.0, ere.Timestamp)
39 | assert.Equal(t, expectedTime, ere.Time)
40 | })
41 |
42 | t.Run("With unsupported time format", func(t *testing.T) {
43 | event := Event{
44 | OrganizationID: "1a901a90-1a90-1a90-1a90-1a901a901a90",
45 | ExternalSubscriptionID: "sub_id",
46 | Code: "api_calls",
47 | PreciseTotalAmountCents: "100.00",
48 | Source: HTTP_RUBY,
49 | Timestamp: "2025-03-03T13:03:29Z",
50 | }
51 |
52 | result := event.ToEnrichedEvent()
53 | assert.False(t, result.Success())
54 | assert.Equal(t, "strconv.ParseFloat: parsing \"2025-03-03T13:03:29Z\": invalid syntax", result.ErrorMsg())
55 | assert.False(t, result.Retryable)
56 | })
57 | }
58 |
59 | func TestNotAPIPostProcessed(t *testing.T) {
60 | t.Run("When event source is not HTTP_RUBY", func(t *testing.T) {
61 | event := Event{
62 | Source: "REDPANDA_CONNECT",
63 | }
64 |
65 | assert.True(t, event.NotAPIPostProcessed())
66 | })
67 |
68 | t.Run("When event source is HTTP_RUBY without source metadata", func(t *testing.T) {
69 | event := Event{
70 | Source: HTTP_RUBY,
71 | }
72 |
73 | assert.True(t, event.NotAPIPostProcessed())
74 | })
75 |
76 | t.Run("When event source is HTTP_RUBY with source metadata", func(t *testing.T) {
77 | event := Event{
78 | Source: HTTP_RUBY,
79 | SourceMetadata: &SourceMetadata{
80 | ApiPostProcess: true,
81 | },
82 | }
83 | assert.False(t, event.NotAPIPostProcessed())
84 |
85 | event.SourceMetadata.ApiPostProcess = false
86 | assert.True(t, event.NotAPIPostProcessed())
87 | })
88 | }
89 |
--------------------------------------------------------------------------------
/events-processor/models/stores.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/getlago/lago/events-processor/config/database"
8 | "github.com/getlago/lago/events-processor/config/redis"
9 | )
10 |
11 | type ApiStore struct {
12 | db *database.DB
13 | }
14 |
15 | func NewApiStore(db *database.DB) *ApiStore {
16 | return &ApiStore{
17 | db: db,
18 | }
19 | }
20 |
21 | type FlagStore struct {
22 | name string
23 | context context.Context
24 | db *redis.RedisDB
25 | }
26 |
27 | type Flagger interface {
28 | Flag(value string) error
29 | }
30 |
31 | func NewFlagStore(ctx context.Context, redis *redis.RedisDB, name string) *FlagStore {
32 | return &FlagStore{
33 | name: name,
34 | context: ctx,
35 | db: redis,
36 | }
37 | }
38 |
39 | func (store *FlagStore) Flag(value string) error {
40 | result := store.db.Client.SAdd(store.context, store.name, fmt.Sprintf("%s", value))
41 | if err := result.Err(); err != nil {
42 | return err
43 | }
44 |
45 | return nil
46 | }
47 |
48 | func (store *FlagStore) Close() error {
49 | return store.db.Client.Close()
50 | }
51 |
--------------------------------------------------------------------------------
/events-processor/models/stores_test.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/DATA-DOG/go-sqlmock"
7 |
8 | "github.com/getlago/lago/events-processor/tests"
9 | )
10 |
11 | func setupApiStore(t *testing.T) (*ApiStore, sqlmock.Sqlmock, func()) {
12 | db, mock, delete := tests.SetupMockStore(t)
13 |
14 | store := &ApiStore{
15 | db: db,
16 | }
17 |
18 | return store, mock, delete
19 | }
20 |
--------------------------------------------------------------------------------
/events-processor/models/subscriptions.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "database/sql"
5 | "time"
6 |
7 | "gorm.io/gorm"
8 |
9 | "github.com/getlago/lago/events-processor/utils"
10 | )
11 |
12 | type Subscription struct {
13 | ID string `gorm:"primaryKey;->"`
14 | ExternalID string `gorm:"->"`
15 | PlanID string `gorm:"->"`
16 | CreatedAt time.Time `gorm:"->"`
17 | UpdatedAt time.Time `gorm:"->"`
18 | StartedAt sql.NullTime `gorm:"->"`
19 | TerminatedAt sql.NullTime `gorm:"->"`
20 | }
21 |
22 | func (store *ApiStore) FetchSubscription(organizationID string, externalID string, timestamp time.Time) utils.Result[*Subscription] {
23 | var sub Subscription
24 |
25 | var conditions = `
26 | customers.organization_id = ?
27 | AND subscriptions.external_id = ?
28 | AND date_trunc('millisecond', subscriptions.started_at::timestamp) <= ?::timestamp
29 | AND (subscriptions.terminated_at IS NULL OR date_trunc('millisecond', subscriptions.terminated_at::timestamp) >= ?)
30 | `
31 | result := store.db.Connection.
32 | Table("subscriptions").
33 | Unscoped().
34 | Joins("INNER JOIN customers ON customers.id = subscriptions.customer_id").
35 | Where(conditions, organizationID, externalID, timestamp, timestamp).
36 | Order("terminated_at DESC NULLS FIRST, started_at DESC").
37 | Limit(1).
38 | Find(&sub)
39 |
40 | if result.Error != nil {
41 | return failedSubscriptionResult(result.Error)
42 | }
43 | if sub.ID == "" {
44 | return failedSubscriptionResult(gorm.ErrRecordNotFound)
45 | }
46 |
47 | return utils.SuccessResult(&sub)
48 | }
49 |
50 | func failedSubscriptionResult(err error) utils.Result[*Subscription] {
51 | result := utils.FailedResult[*Subscription](err)
52 |
53 | if err.Error() == gorm.ErrRecordNotFound.Error() {
54 | result = result.NonCapturable().NonRetryable()
55 | }
56 |
57 | return result
58 | }
59 |
--------------------------------------------------------------------------------
/events-processor/models/subscriptions_test.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "errors"
5 | "regexp"
6 | "testing"
7 | "time"
8 |
9 | "github.com/DATA-DOG/go-sqlmock"
10 | "github.com/stretchr/testify/assert"
11 | "gorm.io/gorm"
12 | )
13 |
14 | var fetchSubscriptionQuery = regexp.QuoteMeta(`
15 | SELECT
16 | "subscriptions"."id","subscriptions"."external_id","subscriptions"."plan_id","subscriptions"."created_at","subscriptions"."updated_at","subscriptions"."started_at","subscriptions"."terminated_at"
17 | FROM "subscriptions"
18 | INNER JOIN customers ON customers.id = subscriptions.customer_id
19 | WHERE customers.organization_id = $1
20 | AND subscriptions.external_id = $2
21 | AND date_trunc('millisecond', subscriptions.started_at::timestamp) <= $3::timestamp
22 | AND (subscriptions.terminated_at IS NULL OR date_trunc('millisecond', subscriptions.terminated_at::timestamp) >= $4)
23 | ORDER BY terminated_at DESC NULLS FIRST, started_at DESC LIMIT $5`,
24 | )
25 |
26 | func TestFetchSubscription(t *testing.T) {
27 | t.Run("should return subscription when found", func(t *testing.T) {
28 | // Setup
29 | store, mock, cleanup := setupApiStore(t)
30 | defer cleanup()
31 |
32 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
33 | externalID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
34 | timestamp := time.Now()
35 |
36 | // Define expected rows and columns
37 | columns := []string{"id", "external_id", "plan_id", "created_at", "updated_at", "started_at", "terminated_at"}
38 | rows := sqlmock.NewRows(columns).
39 | AddRow("sub123", externalID, "plan123", timestamp, timestamp, timestamp, timestamp)
40 |
41 | // Expect the query
42 | mock.ExpectQuery(fetchSubscriptionQuery).
43 | WithArgs(orgID, externalID, timestamp, timestamp, 1).
44 | WillReturnRows(rows)
45 |
46 | // Execute
47 | result := store.FetchSubscription(orgID, externalID, timestamp)
48 |
49 | // Assert
50 | assert.True(t, result.Success())
51 |
52 | sub := result.Value()
53 | assert.NotNil(t, sub)
54 | assert.Equal(t, "sub123", sub.ID)
55 | assert.Equal(t, externalID, sub.ExternalID)
56 | assert.Equal(t, "plan123", sub.PlanID)
57 | })
58 |
59 | t.Run("should return error subscription not found", func(t *testing.T) {
60 | // Setup
61 | store, mock, cleanup := setupApiStore(t)
62 | defer cleanup()
63 |
64 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
65 | externalID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
66 | timestamp := time.Now()
67 |
68 | // Expect the query but return error
69 | mock.ExpectQuery(fetchSubscriptionQuery).
70 | WithArgs(orgID, externalID, timestamp, timestamp, 1).
71 | WillReturnError(gorm.ErrRecordNotFound)
72 |
73 | // Execute
74 | result := store.FetchSubscription(orgID, externalID, timestamp)
75 |
76 | // Assert
77 | assert.False(t, result.Success())
78 | assert.NotNil(t, result.Error())
79 | assert.Equal(t, gorm.ErrRecordNotFound, result.Error())
80 | assert.Nil(t, result.Value())
81 | assert.False(t, result.IsCapturable())
82 | assert.False(t, result.IsRetryable())
83 | })
84 |
85 | t.Run("should handle database connection error", func(t *testing.T) {
86 | // Setup
87 | store, mock, cleanup := setupApiStore(t)
88 | defer cleanup()
89 |
90 | orgID := "1a901a90-1a90-1a90-1a90-1a901a901a90"
91 | externalID := "1a901a90-1a90-1a90-1a90-1a901a901a91"
92 | timestamp := time.Now()
93 | dbError := errors.New("database connection failed")
94 |
95 | // Expect the query but return error
96 | mock.ExpectQuery(fetchSubscriptionQuery).
97 | WithArgs(orgID, externalID, timestamp, timestamp, 1).
98 | WillReturnError(dbError)
99 |
100 | // Execute
101 | result := store.FetchSubscription(orgID, externalID, timestamp)
102 |
103 | // Assert
104 | assert.False(t, result.Success())
105 | assert.NotNil(t, result.Error())
106 | assert.Equal(t, dbError, result.Error())
107 | assert.Nil(t, result.Value())
108 | assert.True(t, result.IsCapturable())
109 | assert.True(t, result.IsRetryable())
110 | })
111 | }
112 |
--------------------------------------------------------------------------------
/events-processor/processors/events.go:
--------------------------------------------------------------------------------
1 | package processors
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "log/slog"
8 | "sync"
9 | "time"
10 |
11 | "github.com/getlago/lago-expression/expression-go"
12 | "github.com/twmb/franz-go/pkg/kgo"
13 | "go.opentelemetry.io/otel/attribute"
14 |
15 | tracer "github.com/getlago/lago/events-processor/config"
16 | "github.com/getlago/lago/events-processor/config/kafka"
17 | "github.com/getlago/lago/events-processor/models"
18 | "github.com/getlago/lago/events-processor/utils"
19 | )
20 |
21 | func processEvents(records []*kgo.Record) []*kgo.Record {
22 | ctx := context.Background()
23 | span := tracer.GetTracerSpan(ctx, "post_process", "PostProcess.ProcessEvents")
24 | recordsAttr := attribute.Int("records.length", len(records))
25 | span.SetAttributes(recordsAttr)
26 | defer span.End()
27 |
28 | wg := sync.WaitGroup{}
29 | wg.Add(len(records))
30 |
31 | var mu sync.Mutex
32 | processedRecords := make([]*kgo.Record, 0)
33 |
34 | for _, record := range records {
35 | go func(record *kgo.Record) {
36 | defer wg.Done()
37 |
38 | sp := tracer.GetTracerSpan(ctx, "post_process", "PostProcess.ProcessOneEvent")
39 | defer sp.End()
40 |
41 | event := models.Event{}
42 | err := json.Unmarshal(record.Value, &event)
43 | if err != nil {
44 | logger.Error("Error unmarshalling message", slog.String("error", err.Error()))
45 | utils.CaptureError(err)
46 |
47 | mu.Lock()
48 | // If we fail to unmarshal the record, we should commit it as it will failed forever
49 | processedRecords = append(processedRecords, record)
50 | mu.Unlock()
51 | return
52 | }
53 |
54 | result := processEvent(&event)
55 | if result.Failure() {
56 | logger.Error(
57 | result.ErrorMessage(),
58 | slog.String("error_code", result.ErrorCode()),
59 | slog.String("error", result.ErrorMsg()),
60 | )
61 |
62 | if result.IsCapturable() {
63 | utils.CaptureErrorResultWithExtra(result, "event", event)
64 | }
65 |
66 | if result.IsRetryable() && time.Since(event.IngestedAt.Time()) < 12*time.Hour {
67 | // For retryable errors, we should avoid commiting the record,
68 | // It will be consumed again and reprocessed
69 | // Events older than 12 hours should also be pushed dead letter queue
70 | return
71 | }
72 |
73 | // Push failed records to the dead letter queue
74 | go produceToDeadLetterQueue(event, result)
75 | }
76 |
77 | // Track processed records
78 | mu.Lock()
79 | processedRecords = append(processedRecords, record)
80 | mu.Unlock()
81 | }(record)
82 | }
83 |
84 | wg.Wait()
85 |
86 | return processedRecords
87 | }
88 |
89 | func processEvent(event *models.Event) utils.Result[*models.EnrichedEvent] {
90 | enrichedEventResult := event.ToEnrichedEvent()
91 | if enrichedEventResult.Failure() {
92 | return failedResult(enrichedEventResult, "build_enriched_event", "Error while converting event to enriched event")
93 | }
94 | enrichedEvent := enrichedEventResult.Value()
95 |
96 | bmResult := apiStore.FetchBillableMetric(event.OrganizationID, event.Code)
97 | if bmResult.Failure() {
98 | return failedResult(bmResult, "fetch_billable_metric", "Error fetching billable metric")
99 | }
100 | bm := bmResult.Value()
101 |
102 | subResult := apiStore.FetchSubscription(event.OrganizationID, event.ExternalSubscriptionID, enrichedEvent.Time)
103 | if subResult.Failure() && subResult.IsCapturable() {
104 | // We want to keep processing the event even if the subscription is not found
105 | return failedResult(subResult, "fetch_subscription", "Error fetching subscription")
106 | }
107 | sub := subResult.Value()
108 |
109 | if event.Source != models.HTTP_RUBY {
110 | expressionResult := evaluateExpression(enrichedEvent, bm)
111 | if expressionResult.Failure() {
112 | return failedResult(expressionResult, "evaluate_expression", "Error evaluating custom expression")
113 | }
114 | }
115 |
116 | var value = fmt.Sprintf("%v", event.Properties[bm.FieldName])
117 | enrichedEvent.Value = &value
118 |
119 | go produceEnrichedEvent(enrichedEvent)
120 |
121 | if sub != nil && event.NotAPIPostProcessed() {
122 | hasInAdvanceChargeResult := apiStore.AnyInAdvanceCharge(sub.PlanID, bm.ID)
123 | if hasInAdvanceChargeResult.Failure() {
124 | return failedResult(hasInAdvanceChargeResult, "fetch_in_advance_charges", "Error fetching in advance charges")
125 | }
126 |
127 | if hasInAdvanceChargeResult.Value() {
128 | go produceChargedInAdvanceEvent(enrichedEvent)
129 | }
130 |
131 | flagResult := flagSubscriptionRefresh(event.OrganizationID, sub)
132 | if flagResult.Failure() {
133 | return failedResult(flagResult, "flag_subscription_refresh", "Error flagging subscription refresh")
134 | }
135 | }
136 |
137 | return utils.SuccessResult(enrichedEvent)
138 | }
139 |
140 | func failedResult(r utils.AnyResult, code string, message string) utils.Result[*models.EnrichedEvent] {
141 | result := utils.FailedResult[*models.EnrichedEvent](r.Error()).AddErrorDetails(code, message)
142 | result.Retryable = r.IsRetryable()
143 | result.Capture = r.IsCapturable()
144 | return result
145 | }
146 |
147 | func evaluateExpression(ev *models.EnrichedEvent, bm *models.BillableMetric) utils.Result[bool] {
148 | if bm.Expression == "" {
149 | return utils.SuccessResult(false)
150 | }
151 |
152 | eventJson, err := json.Marshal(ev)
153 | if err != nil {
154 | return utils.FailedBoolResult(err).NonRetryable()
155 | }
156 | eventJsonString := string(eventJson[:])
157 |
158 | result := expression.Evaluate(bm.Expression, eventJsonString)
159 | if result != nil {
160 | ev.Properties[bm.FieldName] = *result
161 | } else {
162 | return utils.
163 | FailedBoolResult(fmt.Errorf("Failed to evaluate expr: %s with json: %s", bm.Expression, eventJsonString)).
164 | NonRetryable()
165 | }
166 |
167 | return utils.SuccessResult(true)
168 | }
169 |
170 | func produceEnrichedEvent(ev *models.EnrichedEvent) {
171 | eventJson, err := json.Marshal(ev)
172 | if err != nil {
173 | logger.Error("error while marshaling enriched events")
174 | }
175 |
176 | msgKey := fmt.Sprintf("%s-%s-%s", ev.OrganizationID, ev.ExternalSubscriptionID, ev.Code)
177 |
178 | pushed := eventsEnrichedProducer.Produce(ctx, &kafka.ProducerMessage{
179 | Key: []byte(msgKey),
180 | Value: eventJson,
181 | })
182 |
183 | if !pushed {
184 | produceToDeadLetterQueue(*ev.IntialEvent, utils.FailedBoolResult(fmt.Errorf("Failed to push to %s topic", eventsEnrichedProducer.GetTopic())))
185 | }
186 | }
187 |
188 | func produceChargedInAdvanceEvent(ev *models.EnrichedEvent) {
189 | eventJson, err := json.Marshal(ev)
190 | if err != nil {
191 | logger.Error("error while marshaling charged in advance events")
192 | utils.CaptureError(err)
193 | }
194 |
195 | msgKey := fmt.Sprintf("%s-%s-%s", ev.OrganizationID, ev.ExternalSubscriptionID, ev.Code)
196 |
197 | pushed := eventsInAdvanceProducer.Produce(ctx, &kafka.ProducerMessage{
198 | Key: []byte(msgKey),
199 | Value: eventJson,
200 | })
201 |
202 | if !pushed {
203 | produceToDeadLetterQueue(*ev.IntialEvent, utils.FailedBoolResult(fmt.Errorf("Failed to push to %s topic", eventsInAdvanceProducer.GetTopic())))
204 | }
205 | }
206 |
207 | func produceToDeadLetterQueue(event models.Event, errorResult utils.AnyResult) {
208 | failedEvent := models.FailedEvent{
209 | Event: event,
210 | InitialErrorMessage: errorResult.ErrorMsg(),
211 | ErrorCode: errorResult.ErrorCode(),
212 | ErrorMessage: errorResult.ErrorMessage(),
213 | FailedAt: time.Now(),
214 | }
215 |
216 | eventJson, err := json.Marshal(failedEvent)
217 | if err != nil {
218 | logger.Error("error while marshaling failed event with error details")
219 | utils.CaptureError(err)
220 | }
221 |
222 | pushed := eventsDeadLetterQueue.Produce(ctx, &kafka.ProducerMessage{
223 | Value: eventJson,
224 | })
225 |
226 | if !pushed {
227 | logger.Error("error while pushing to dead letter topic", slog.String("topic", eventsDeadLetterQueue.GetTopic()))
228 | utils.CaptureErrorResultWithExtra(errorResult, "event", event)
229 | }
230 | }
231 |
232 | func flagSubscriptionRefresh(orgID string, sub *models.Subscription) utils.Result[bool] {
233 | err := subscriptionFlagStore.Flag(fmt.Sprintf("%s:%s", orgID, sub.ID))
234 | if err != nil {
235 | return utils.FailedBoolResult(err)
236 | }
237 |
238 | return utils.SuccessResult(true)
239 | }
240 |
--------------------------------------------------------------------------------
/events-processor/processors/processors.go:
--------------------------------------------------------------------------------
1 | package processors
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log/slog"
7 | "os"
8 |
9 | "github.com/twmb/franz-go/pkg/kgo"
10 |
11 | tracer "github.com/getlago/lago/events-processor/config"
12 | "github.com/getlago/lago/events-processor/config/database"
13 | "github.com/getlago/lago/events-processor/config/kafka"
14 | "github.com/getlago/lago/events-processor/config/redis"
15 | "github.com/getlago/lago/events-processor/models"
16 | "github.com/getlago/lago/events-processor/utils"
17 | )
18 |
19 | var (
20 | ctx context.Context
21 | logger *slog.Logger
22 | eventsEnrichedProducer kafka.MessageProducer
23 | eventsInAdvanceProducer kafka.MessageProducer
24 | eventsDeadLetterQueue kafka.MessageProducer
25 | apiStore *models.ApiStore
26 | subscriptionFlagStore models.Flagger
27 | kafkaConfig kafka.ServerConfig
28 | )
29 |
30 | func initProducer(context context.Context, topicEnv string) utils.Result[*kafka.Producer] {
31 | if os.Getenv(topicEnv) == "" {
32 | return utils.FailedResult[*kafka.Producer](fmt.Errorf("%s variable is required", topicEnv))
33 | }
34 |
35 | topic := os.Getenv(topicEnv)
36 |
37 | producer, err := kafka.NewProducer(
38 | kafkaConfig,
39 | &kafka.ProducerConfig{
40 | Topic: topic,
41 | })
42 | if err != nil {
43 | return utils.FailedResult[*kafka.Producer](err)
44 | }
45 |
46 | err = producer.Ping(context)
47 | if err != nil {
48 | return utils.FailedResult[*kafka.Producer](err)
49 | }
50 |
51 | return utils.SuccessResult(producer)
52 | }
53 |
54 | func initFlagStore(name string) (*models.FlagStore, error) {
55 | redisDb, err := utils.GetEnvAsInt("LAGO_REDIS_STORE_DB", 0)
56 | if err != nil {
57 | return nil, err
58 | }
59 |
60 | redisConfig := redis.RedisConfig{
61 | Address: os.Getenv("LAGO_REDIS_STORE_URL"),
62 | Password: os.Getenv("LAGO_REDIS_STORE_PASSWORD"),
63 | DB: redisDb,
64 | UseTLS: os.Getenv("ENV") == "production",
65 | }
66 |
67 | db, err := redis.NewRedisDB(ctx, redisConfig)
68 | if err != nil {
69 | return nil, err
70 | }
71 |
72 | return models.NewFlagStore(ctx, db, name), nil
73 | }
74 |
75 | func StartProcessingEvents() {
76 | ctx = context.Background()
77 |
78 | logger = slog.New(slog.NewJSONHandler(os.Stdout, nil)).
79 | With("service", "post_process")
80 | slog.SetDefault(logger)
81 |
82 | if os.Getenv("ENV") == "production" {
83 | telemetryCfg := tracer.TracerConfig{
84 | ServiceName: os.Getenv("OTEL_SERVICE_NAME"),
85 | EndpointURL: os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"),
86 | Insecure: os.Getenv("OTEL_INSECURE"),
87 | }
88 | tracer.InitOTLPTracer(telemetryCfg)
89 | }
90 |
91 | kafkaConfig = kafka.ServerConfig{
92 | ScramAlgorithm: os.Getenv("LAGO_KAFKA_SCRAM_ALGORITHM"),
93 | TLS: os.Getenv("LAGO_KAFKA_TLS") == "true",
94 | Server: os.Getenv("LAGO_KAFKA_BOOTSTRAP_SERVERS"),
95 | UseTelemetry: os.Getenv("ENV") == "production",
96 | UserName: os.Getenv("LAGO_KAFKA_USERNAME"),
97 | Password: os.Getenv("LAGO_KAFKA_PASSWORD"),
98 | }
99 |
100 | eventsEnrichedProducerResult := initProducer(ctx, "LAGO_KAFKA_ENRICHED_EVENTS_TOPIC")
101 | if eventsEnrichedProducerResult.Failure() {
102 | logger.Error(eventsEnrichedProducerResult.ErrorMsg())
103 | utils.CaptureErrorResult(eventsEnrichedProducerResult)
104 | panic(eventsEnrichedProducerResult.ErrorMessage())
105 | }
106 | eventsEnrichedProducer = eventsEnrichedProducerResult.Value()
107 |
108 | eventsInAdvanceProducerResult := initProducer(ctx, "LAGO_KAFKA_EVENTS_CHARGED_IN_ADVANCE_TOPIC")
109 | if eventsInAdvanceProducerResult.Failure() {
110 | logger.Error(eventsInAdvanceProducerResult.ErrorMsg())
111 | utils.CaptureErrorResult(eventsInAdvanceProducerResult)
112 | panic(eventsInAdvanceProducerResult.ErrorMessage())
113 | }
114 | eventsInAdvanceProducer = eventsInAdvanceProducerResult.Value()
115 |
116 | eventsDeadLetterQueueResult := initProducer(ctx, "LAGO_KAFKA_EVENTS_DEAD_LETTER_TOPIC")
117 | if eventsDeadLetterQueueResult.Failure() {
118 | logger.Error(eventsDeadLetterQueueResult.ErrorMsg())
119 | utils.CaptureErrorResult(eventsDeadLetterQueueResult)
120 | panic(eventsDeadLetterQueueResult.ErrorMessage())
121 | }
122 | eventsDeadLetterQueue = eventsDeadLetterQueueResult.Value()
123 |
124 | cg, err := kafka.NewConsumerGroup(
125 | kafkaConfig,
126 | &kafka.ConsumerGroupConfig{
127 | Topic: os.Getenv("LAGO_KAFKA_RAW_EVENTS_TOPIC"),
128 | ConsumerGroup: os.Getenv("LAGO_KAFKA_CONSUMER_GROUP"),
129 | ProcessRecords: func(records []*kgo.Record) []*kgo.Record {
130 | return processEvents(records)
131 | },
132 | })
133 | if err != nil {
134 | logger.Error("Error starting the event consumer", slog.String("error", err.Error()))
135 | utils.CaptureError(err)
136 | panic(err.Error())
137 | }
138 |
139 | maxConns, err := utils.GetEnvAsInt("LAGO_EVENTS_PROCESSOR_DATABASE_MAX_CONNECTIONS", 200)
140 | if err != nil {
141 | logger.Error("Error converting max connections into integer", slog.String("error", err.Error()))
142 | utils.CaptureError(err)
143 | panic(err.Error())
144 | }
145 |
146 | dbConfig := database.DBConfig{
147 | Url: os.Getenv("DATABASE_URL"),
148 | MaxConns: int32(maxConns),
149 | }
150 |
151 | db, err := database.NewConnection(dbConfig)
152 | if err != nil {
153 | logger.Error("Error connecting to the database", slog.String("error", err.Error()))
154 | utils.CaptureError(err)
155 | panic(err.Error())
156 | }
157 | apiStore = models.NewApiStore(db)
158 | defer db.Close()
159 |
160 | flagger, err := initFlagStore("subscription_refreshed")
161 | if err != nil {
162 | logger.Error("Error connecting to the flag store", slog.String("error", err.Error()))
163 | utils.CaptureError(err)
164 | panic(err.Error())
165 | }
166 | subscriptionFlagStore = flagger
167 | defer flagger.Close()
168 |
169 | cg.Start()
170 | }
171 |
--------------------------------------------------------------------------------
/events-processor/tests/mocked_flag_store.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | type MockFlagStore struct {
4 | Key string
5 | ExecutionCount int
6 | ReturnedError error
7 | }
8 |
9 | func (mfs *MockFlagStore) Flag(key string) error {
10 | mfs.ExecutionCount++
11 | mfs.Key = key
12 |
13 | return mfs.ReturnedError
14 | }
15 |
--------------------------------------------------------------------------------
/events-processor/tests/mocked_producer.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/getlago/lago/events-processor/config/kafka"
7 | )
8 |
9 | type MockMessageProducer struct {
10 | Key []byte
11 | Value []byte
12 | ExecutionCount int
13 | }
14 |
15 | func (mp *MockMessageProducer) Produce(ctx context.Context, msg *kafka.ProducerMessage) bool {
16 | mp.Key = msg.Key
17 | mp.Value = msg.Value
18 | mp.ExecutionCount++
19 | return true
20 | }
21 |
22 | func (mp *MockMessageProducer) GetTopic() string {
23 | return "mocked_topic"
24 | }
25 |
--------------------------------------------------------------------------------
/events-processor/tests/mocked_store.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "log/slog"
5 | "os"
6 | "testing"
7 |
8 | "github.com/DATA-DOG/go-sqlmock"
9 | "gorm.io/driver/postgres"
10 |
11 | "github.com/getlago/lago/events-processor/config/database"
12 | )
13 |
14 | func SetupMockStore(t *testing.T) (*database.DB, sqlmock.Sqlmock, func()) {
15 | mockDB, mock, err := sqlmock.New()
16 | if err != nil {
17 | t.Fatalf("Failed to create mock database: %v", err)
18 | }
19 |
20 | dialector := postgres.New(postgres.Config{
21 | Conn: mockDB,
22 | DriverName: "postgres",
23 | })
24 |
25 | logger := slog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(-4)}))
26 |
27 | db, err := database.OpenConnection(logger, dialector)
28 | if err != nil {
29 | t.Fatalf("Failed to open gorm connection: %v", err)
30 | }
31 |
32 | return db, mock, func() {
33 | mockDB.Close()
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/events-processor/utils/env.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "os"
5 | "strconv"
6 | )
7 |
8 | func GetEnvAsInt(key string, defaultValue int) (int, error) {
9 | value := os.Getenv(key)
10 | if value == "" {
11 | return defaultValue, nil
12 | }
13 |
14 | intValue, err := strconv.Atoi(value)
15 | if err != nil {
16 | return defaultValue, err
17 | }
18 | return intValue, nil
19 | }
20 |
--------------------------------------------------------------------------------
/events-processor/utils/env_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestGetEnvAsInt(t *testing.T) {
10 | t.Run("When environment variable exists", func(t *testing.T) {
11 | t.Setenv("TEST_INT_ENV", "42")
12 | value, err := GetEnvAsInt("TEST_INT_ENV", 0)
13 | assert.Equal(t, 42, value)
14 | assert.NoError(t, err)
15 | })
16 |
17 | t.Run("When environment variable does not exist", func(t *testing.T) {
18 | value, err := GetEnvAsInt("NON_EXISTENT_INT_ENV", 100)
19 | assert.Equal(t, 100, value)
20 | assert.NoError(t, err)
21 | })
22 |
23 | t.Run("When environment variable is invalid", func(t *testing.T) {
24 | t.Setenv("INVALID_INT_ENV", "not_an_int")
25 | value, err := GetEnvAsInt("INVALID_INT_ENV", 0)
26 | assert.Equal(t, 0, value)
27 | assert.Error(t, err)
28 | })
29 | }
30 |
--------------------------------------------------------------------------------
/events-processor/utils/error_tracker.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import "github.com/getsentry/sentry-go"
4 |
5 | func CaptureErrorResult(errResult AnyResult) {
6 | CaptureErrorResultWithExtra(errResult, "", nil)
7 | }
8 |
9 | func CaptureErrorResultWithExtra(errResult AnyResult, extraKey string, extraValue any) {
10 | sentry.WithScope(func(scope *sentry.Scope) {
11 | scope.SetExtra("error_code", errResult.ErrorCode())
12 | scope.SetExtra("error_message", errResult.ErrorMessage())
13 |
14 | if extraKey != "" {
15 | scope.SetExtra(extraKey, extraValue)
16 | }
17 |
18 | sentry.CaptureException(errResult.Error())
19 | })
20 | }
21 |
22 | func CaptureError(err error) {
23 | sentry.CaptureException(err)
24 | }
25 |
--------------------------------------------------------------------------------
/events-processor/utils/result.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | type Result[T any] struct {
4 | value T
5 | err error
6 | details *ErrorDetails
7 | Retryable bool
8 | Capture bool
9 | }
10 |
11 | type ErrorDetails struct {
12 | Code string
13 | Message string
14 | }
15 |
16 | type AnyResult interface {
17 | Success() bool
18 | Failure() bool
19 | Error() error
20 | ErrorMsg() string
21 | ErrorCode() string
22 | ErrorMessage() string
23 | IsCapturable() bool
24 | IsRetryable() bool
25 | }
26 |
27 | func (r Result[T]) Success() bool {
28 | return r.err == nil
29 | }
30 |
31 | func (r Result[T]) Failure() bool {
32 | return r.err != nil
33 | }
34 |
35 | func (r Result[T]) Value() T {
36 | return r.value
37 | }
38 |
39 | func (r Result[T]) ValueOrPanic() T {
40 | if r.Failure() {
41 | panic(r.err)
42 | }
43 |
44 | return r.value
45 | }
46 |
47 | func (r Result[T]) Error() error {
48 | return r.err
49 | }
50 |
51 | func (r Result[T]) ErrorMsg() string {
52 | if r.Success() {
53 | return ""
54 | }
55 |
56 | return r.err.Error()
57 | }
58 |
59 | func (r Result[T]) AddErrorDetails(code string, message string) Result[T] {
60 | r.details = &ErrorDetails{
61 | Code: code,
62 | Message: message,
63 | }
64 | return r
65 | }
66 |
67 | func (r Result[T]) NonRetryable() Result[T] {
68 | r.Retryable = false
69 | return r
70 | }
71 |
72 | func (r Result[T]) IsRetryable() bool {
73 | return r.Retryable
74 | }
75 |
76 | func (r Result[T]) NonCapturable() Result[T] {
77 | r.Capture = false
78 | return r
79 | }
80 |
81 | func (r Result[T]) IsCapturable() bool {
82 | return r.Capture
83 | }
84 |
85 | func (r Result[T]) ErrorDetails() *ErrorDetails {
86 | return r.details
87 | }
88 |
89 | func (r Result[T]) ErrorCode() string {
90 | if r.details == nil {
91 | return ""
92 | }
93 |
94 | return r.details.Code
95 | }
96 |
97 | func (r Result[T]) ErrorMessage() string {
98 | if r.details == nil {
99 | return ""
100 | }
101 |
102 | return r.details.Message
103 | }
104 |
105 | func SuccessResult[T any](value T) Result[T] {
106 | result := Result[T]{
107 | value: value,
108 | err: nil,
109 | }
110 | return result
111 | }
112 |
113 | func FailedResult[T any](err error) Result[T] {
114 | result := Result[T]{
115 | err: err,
116 | Capture: true,
117 | Retryable: true,
118 | }
119 | return result
120 | }
121 |
122 | func FailedBoolResult(err error) Result[bool] {
123 | result := Result[bool]{
124 | err: err,
125 | Capture: true,
126 | Retryable: true,
127 | }
128 | return result
129 | }
130 |
--------------------------------------------------------------------------------
/events-processor/utils/result_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | var successResult = Result[string]{value: "Success", err: nil}
11 | var failedResult = Result[string]{
12 | err: fmt.Errorf("Failed result"),
13 | Capture: true,
14 | Retryable: true,
15 | details: &ErrorDetails{
16 | Code: "failed_result",
17 | Message: "More details",
18 | },
19 | }
20 |
21 | type booleanTest struct {
22 | arg Result[string]
23 | expected bool
24 | }
25 |
26 | type stringTest struct {
27 | arg Result[string]
28 | expected string
29 | }
30 |
31 | var successTests = []booleanTest{
32 | booleanTest{successResult, true},
33 | booleanTest{failedResult, false},
34 | }
35 |
36 | func TestSuccess(t *testing.T) {
37 | for _, test := range successTests {
38 | assert.Equal(t, test.arg.Success(), test.expected)
39 | }
40 | }
41 |
42 | var failureTests = []booleanTest{
43 | booleanTest{successResult, false},
44 | booleanTest{failedResult, true},
45 | }
46 |
47 | func TestFailure(t *testing.T) {
48 | for _, test := range failureTests {
49 | assert.Equal(t, test.arg.Failure(), test.expected)
50 | }
51 | }
52 |
53 | var valueTests = []stringTest{
54 | {successResult, "Success"},
55 | {failedResult, ""},
56 | }
57 |
58 | func TestValue(t *testing.T) {
59 | for _, test := range valueTests {
60 | assert.Equal(t, test.arg.Value(), test.expected)
61 | }
62 | }
63 |
64 | func TestValueOrPanic(t *testing.T) {
65 | assert.Panics(t, func() { failedResult.ValueOrPanic() })
66 | assert.Equal(t, successResult.ValueOrPanic(), "Success")
67 | }
68 |
69 | func TestError(t *testing.T) {
70 | assert.Nil(t, successResult.Error())
71 | assert.Error(t, failedResult.Error())
72 | }
73 |
74 | var errorMsgTests = []stringTest{
75 | {successResult, ""},
76 | {failedResult, "Failed result"},
77 | }
78 |
79 | func TestErrorMsg(t *testing.T) {
80 | for _, test := range errorMsgTests {
81 | assert.Equal(t, test.arg.ErrorMsg(), test.expected)
82 | }
83 | }
84 |
85 | func TestErrorDetails(t *testing.T) {
86 | assert.Nil(t, successResult.ErrorDetails())
87 | assert.NotNil(t, failedResult.ErrorDetails())
88 | }
89 |
90 | type resultTest struct {
91 | arg Result[string]
92 | expectedSuccess bool
93 | expectedFailure bool
94 | expectedValue any
95 | expectedErrorMsg string
96 | }
97 |
98 | var successResultTests = []resultTest{
99 | {
100 | SuccessResult("Success"),
101 | true,
102 | false,
103 | "Success",
104 | "",
105 | },
106 | {
107 | FailedResult[string](fmt.Errorf("Failed result")),
108 | false,
109 | true,
110 | "",
111 | "Failed result",
112 | },
113 | }
114 |
115 | func TestResults(t *testing.T) {
116 | for _, test := range successResultTests {
117 | assert.Equal(t, test.arg.Success(), test.expectedSuccess)
118 | assert.Equal(t, test.arg.Failure(), test.expectedFailure)
119 | assert.Equal(t, test.arg.Value(), test.expectedValue)
120 | assert.Equal(t, test.arg.ErrorMsg(), test.expectedErrorMsg)
121 | }
122 | }
123 |
124 | func TestNonCapturable(t *testing.T) {
125 | assert.True(t, failedResult.Capture)
126 | assert.True(t, failedResult.IsCapturable())
127 | assert.False(t, failedResult.NonCapturable().Capture)
128 | assert.False(t, failedResult.NonCapturable().IsCapturable())
129 | }
130 |
131 | func TestNonRetryable(t *testing.T) {
132 | assert.True(t, failedResult.Retryable)
133 | assert.True(t, failedResult.IsRetryable())
134 | assert.False(t, failedResult.NonRetryable().Retryable)
135 | assert.False(t, failedResult.NonRetryable().IsRetryable())
136 | }
137 |
--------------------------------------------------------------------------------
/events-processor/utils/time.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "strconv"
7 | "strings"
8 | "time"
9 | )
10 |
11 | func ToTime(timestamp any) Result[time.Time] {
12 | var seconds int64
13 | var nanoseconds int64
14 |
15 | switch timestamp := timestamp.(type) {
16 | case string:
17 | floatTimestamp, err := strconv.ParseFloat(timestamp, 64)
18 | if err != nil {
19 | return FailedResult[time.Time](err)
20 | }
21 |
22 | seconds = int64(floatTimestamp)
23 | nanoseconds = int64((floatTimestamp - float64(seconds)) * 1e9)
24 |
25 | case int:
26 | seconds = int64(timestamp)
27 | nanoseconds = 0
28 |
29 | case int64:
30 | seconds = timestamp
31 | nanoseconds = 0
32 |
33 | case float64:
34 | seconds = int64(timestamp)
35 | nanoseconds = int64((timestamp - float64(seconds)) * 1e9)
36 |
37 | default:
38 | return FailedResult[time.Time](fmt.Errorf("Unsupported timestamp type: %T", timestamp))
39 | }
40 |
41 | return SuccessResult(time.Unix(seconds, nanoseconds).In(time.UTC).Truncate(time.Millisecond))
42 | }
43 |
44 | func ToFloat64Timestamp(timeValue any) Result[float64] {
45 | var value float64
46 |
47 | switch timestamp := timeValue.(type) {
48 | case string:
49 | floatTimestamp, err := strconv.ParseFloat(timestamp, 64)
50 | if err != nil {
51 | return FailedResult[float64](err)
52 | }
53 | value = math.Trunc(floatTimestamp*1000) / 1000
54 | case int:
55 | value = float64(timestamp)
56 | case int64:
57 | value = float64(timestamp)
58 | case float64:
59 | value = float64(timestamp)
60 | default:
61 | return FailedResult[float64](fmt.Errorf("Unsupported timestamp type: %T", timestamp))
62 | }
63 |
64 | return SuccessResult(value)
65 | }
66 |
67 | type CustomTime time.Time
68 |
69 | func (ct *CustomTime) UnmarshalJSON(b []byte) error {
70 | s := strings.Trim(string(b), "\"")
71 | if s == "null" || s == "" {
72 | return nil
73 | }
74 |
75 | t, err := time.Parse("2006-01-02T15:04:05", s)
76 | if err != nil {
77 | // value could be a Unix timestamp encoded as a string
78 | timeResult := ToTime(s)
79 | if timeResult.Failure() {
80 | return err
81 | }
82 |
83 | t = timeResult.value
84 | }
85 |
86 | *ct = CustomTime(t)
87 | return nil
88 | }
89 |
90 | func (ct CustomTime) MarshalJSON() ([]byte, error) {
91 | t := time.Time(ct)
92 | if t.IsZero() {
93 | return []byte("null"), nil
94 | }
95 |
96 | data := make([]byte, 0, 21) // 19 characters for time format and 2 for quotes
97 | return fmt.Appendf(data, "\"%s\"", t.Format("2006-01-02T15:04:05")), nil
98 | }
99 |
100 | func (ct CustomTime) Time() time.Time {
101 | return time.Time(ct)
102 | }
103 |
104 | func (ct CustomTime) String() string {
105 | return ct.Time().Format("2006-01-02T15:04:05")
106 | }
107 |
--------------------------------------------------------------------------------
/events-processor/utils/time_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | "time"
7 |
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | type expectedTime struct {
12 | timestamp any
13 | parsedValue time.Time
14 | }
15 |
16 | func TestToTime(t *testing.T) {
17 | t.Run("With supported time format", func(t *testing.T) {
18 | valueInt, _ := time.Parse(time.RFC3339, "2025-03-03T13:03:29Z")
19 | valueFloat, _ := time.Parse(time.RFC3339, "2025-03-03T13:03:29.344Z")
20 |
21 | expectations := []expectedTime{
22 | expectedTime{
23 | timestamp: 1741007009,
24 | parsedValue: valueInt,
25 | },
26 | expectedTime{
27 | timestamp: int64(1741007009),
28 | parsedValue: valueInt,
29 | },
30 | expectedTime{
31 | timestamp: float64(1741007009.344),
32 | parsedValue: valueFloat,
33 | },
34 | expectedTime{
35 | timestamp: fmt.Sprintf("%f", 1741007009.344),
36 | parsedValue: valueFloat,
37 | },
38 | }
39 |
40 | for _, test := range expectations {
41 | result := ToTime(test.timestamp)
42 | assert.True(t, result.Success())
43 | assert.Equal(t, test.parsedValue, result.Value())
44 | }
45 | })
46 |
47 | t.Run("With unsuported time format", func(t *testing.T) {
48 | result := ToTime("2025-03-03T13:03:29Z")
49 | assert.False(t, result.Success())
50 | assert.Equal(t, "strconv.ParseFloat: parsing \"2025-03-03T13:03:29Z\": invalid syntax", result.ErrorMsg())
51 | })
52 | }
53 |
54 | type expectedTime64 struct {
55 | timestamp any
56 | parsedValue float64
57 | }
58 |
59 | func TestToFloat64Timestamp(t *testing.T) {
60 | t.Run("With supported time format", func(t *testing.T) {
61 | expectations := []expectedTime64{
62 | expectedTime64{
63 | timestamp: 1741007009,
64 | parsedValue: 1741007009.0,
65 | },
66 | expectedTime64{
67 | timestamp: int64(1741007009),
68 | parsedValue: 1741007009.0,
69 | },
70 | expectedTime64{
71 | timestamp: float64(1741007009.344),
72 | parsedValue: 1741007009.344,
73 | },
74 | expectedTime64{
75 | timestamp: fmt.Sprintf("%f", 1741007009.344),
76 | parsedValue: 1741007009.344,
77 | },
78 | }
79 |
80 | for _, test := range expectations {
81 | result := ToFloat64Timestamp(test.timestamp)
82 | assert.True(t, result.Success())
83 | assert.Equal(t, test.parsedValue, result.Value())
84 | }
85 | })
86 |
87 | t.Run("With unsuported time format", func(t *testing.T) {
88 | result := ToFloat64Timestamp("2025-03-03T13:03:29Z")
89 | assert.False(t, result.Success())
90 | assert.Equal(t, "strconv.ParseFloat: parsing \"2025-03-03T13:03:29Z\": invalid syntax", result.ErrorMsg())
91 | })
92 | }
93 |
94 | func TestCustomTime(t *testing.T) {
95 | t.Run("With expected time format", func(t *testing.T) {
96 | ct := &CustomTime{}
97 |
98 | time := "2025-03-03T13:03:29"
99 | err := ct.UnmarshalJSON([]byte(time))
100 | assert.NoError(t, err)
101 | assert.Equal(t, time, ct.String())
102 |
103 | json, err := ct.MarshalJSON()
104 | assert.NoError(t, err)
105 |
106 | data := make([]byte, 0, 21)
107 | assert.Equal(t, json, fmt.Appendf(data, "\"%s\"", time))
108 | })
109 |
110 | t.Run("With invalid time format", func(t *testing.T) {
111 | ct := &CustomTime{}
112 |
113 | time := "2025-03-03T13:03:29Z"
114 | err := ct.UnmarshalJSON([]byte(time))
115 | assert.Error(t, err)
116 | })
117 |
118 | t.Run("When timestamp is a unix timestamp sent as string", func(t *testing.T) {
119 | ct := &CustomTime{}
120 | time := "1744335427"
121 | expectedTime := "2025-04-11T01:37:07"
122 |
123 | err := ct.UnmarshalJSON([]byte(time))
124 | assert.NoError(t, err)
125 | assert.Equal(t, expectedTime, ct.String())
126 |
127 | json, err := ct.MarshalJSON()
128 | assert.NoError(t, err)
129 |
130 | data := make([]byte, 0, 21)
131 | assert.Equal(t, json, fmt.Appendf(data, "\"%s\"", expectedTime))
132 | })
133 | }
134 |
--------------------------------------------------------------------------------
/extra/clickhouse/config.d/config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | debug
4 | /var/log/clickhouse-server/clickhouse-server.log
5 | /var/log/clickhouse-server/clickhouse-server.err.log
6 | 1000M
7 | 3
8 |
9 | clickhouse_dev
10 | 0.0.0.0
11 | 8123
12 | 9000
13 |
14 |
15 | users.xml
16 |
17 |
18 | /var/lib/clickhouse/access/
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/extra/clickhouse/users.d/users.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 10000000000
6 | 0
7 | in_order
8 | 1
9 |
10 |
11 |
12 |
13 | 1
14 | default
15 | default
16 |
17 | ::/0
18 |
19 | default
20 | 1
21 | 1
22 | 1
23 | 1
24 |
25 |
26 |
27 |
28 |
29 | 3600
30 | 0
31 | 0
32 | 0
33 | 0
34 | 0
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/extra/init-letsencrypt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if ! [ -x "$(command -v docker-compose)" ]; then
4 | echo 'Error: docker-compose is not installed.' >&2
5 | exit 1
6 | fi
7 |
8 | domains=(lago.example www.lago.example)
9 | rsa_key_size=4096
10 | data_path="./extra/certbot"
11 | email="jeremy@getlago.com" # Adding a valid address is strongly recommended
12 | staging=0 # Set to 1 if you're testing your setup to avoid hitting request limits
13 |
14 | if [ -d "$data_path" ]; then
15 | read -p "Existing data found for $domains. Continue and replace existing certificate? (y/N) " decision
16 | if [ "$decision" != "Y" ] && [ "$decision" != "y" ]; then
17 | exit
18 | fi
19 | fi
20 |
21 |
22 | if [ ! -e "$data_path/conf/options-ssl-nginx.conf" ] || [ ! -e "$data_path/conf/ssl-dhparams.pem" ]; then
23 | echo "### Downloading recommended TLS parameters ..."
24 | mkdir -p "$data_path/conf"
25 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/_internal/tls_configs/options-ssl-nginx.conf > "$data_path/conf/options-ssl-nginx.conf"
26 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/certbot/ssl-dhparams.pem > "$data_path/conf/ssl-dhparams.pem"
27 | echo
28 | fi
29 |
30 | echo "### Creating dummy certificate for $domains ..."
31 | path="/etc/letsencrypt/live/$domains"
32 | mkdir -p "$data_path/conf/live/$domains"
33 | docker-compose run --rm --entrypoint "\
34 | openssl req -x509 -nodes -newkey rsa:$rsa_key_size -days 1\
35 | -keyout '$path/privkey.pem' \
36 | -out '$path/fullchain.pem' \
37 | -subj '/CN=localhost'" certbot
38 | echo
39 |
40 |
41 | echo "### Starting front ..."
42 | docker-compose up --force-recreate -d front
43 | echo
44 |
45 | echo "### Deleting dummy certificate for $domains ..."
46 | docker-compose run --rm --entrypoint "\
47 | rm -Rf /etc/letsencrypt/live/$domains && \
48 | rm -Rf /etc/letsencrypt/archive/$domains && \
49 | rm -Rf /etc/letsencrypt/renewal/$domains.conf" certbot
50 | echo
51 |
52 |
53 | echo "### Requesting Let's Encrypt certificate for $domains ..."
54 | #Join $domains to -d args
55 | domain_args=""
56 | for domain in "${domains[@]}"; do
57 | domain_args="$domain_args -d $domain"
58 | done
59 |
60 | # Select appropriate email arg
61 | case "$email" in
62 | "") email_arg="--register-unsafely-without-email" ;;
63 | *) email_arg="--email $email" ;;
64 | esac
65 |
66 | # Enable staging mode if needed
67 | if [ $staging != "0" ]; then staging_arg="--staging"; fi
68 |
69 | docker-compose run --rm --entrypoint "\
70 | certbot certonly --webroot -w /var/www/certbot \
71 | $staging_arg \
72 | $email_arg \
73 | $domain_args \
74 | --rsa-key-size $rsa_key_size \
75 | --agree-tos \
76 | --force-renewal" certbot
77 | echo
78 |
79 | echo "### Reloading nginx ..."
80 | docker-compose exec front nginx -s reload
--------------------------------------------------------------------------------
/extra/init-selfsigned.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./extra/ssl/nginx-selfsigned.key -out ./extra/ssl/nginx-selfsigned.crt
4 | sudo openssl dhparam -out ./extra/ssl/dhparam.pem 2048
5 |
--------------------------------------------------------------------------------
/extra/nginx-letsencrypt.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name lago.example;
4 |
5 | location / {
6 | root /usr/share/nginx/html;
7 | index index.html index.htm;
8 | try_files $uri $uri/ /index.html =404;
9 | return 301 https://$host$request_uri;
10 | }
11 |
12 | location /.well-known/acme-challenge/ {
13 | root /var/www/certbot;
14 | }
15 | }
16 |
17 | server {
18 | listen 443 ssl;
19 | server_name lago.example;
20 |
21 | location / {
22 | root /usr/share/nginx/html;
23 | index index.html index.htm;
24 | try_files $uri $uri/ /index.html =404;
25 | }
26 |
27 | ssl_certificate /etc/letsencrypt/live/lago.example/fullchain.pem;
28 | ssl_certificate_key /etc/letsencrypt/live/lago.example/privkey.pem;
29 |
30 | include /etc/letsencrypt/options-ssl-nginx.conf;
31 | ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
32 | }
--------------------------------------------------------------------------------
/extra/nginx-selfsigned.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 |
4 | location / {
5 | root /usr/share/nginx/html;
6 | index index.html index.htm;
7 | try_files $uri $uri/ /index.html =404;
8 | return 301 https://$host$request_uri;
9 | }
10 |
11 | }
12 |
13 | server {
14 | listen 443 ssl;
15 |
16 | ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
17 | ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
18 | ssl_dhparam /etc/ssl/certs/dhparam.pem;
19 |
20 | location / {
21 | root /usr/share/nginx/html;
22 | index index.html index.htm;
23 | try_files $uri $uri/ /index.html =404;
24 | }
25 | }
--------------------------------------------------------------------------------
/extra/ssl/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/getlago/lago/31430c719d64834d1a1d237da5b300c2d16a9370/extra/ssl/.keep
--------------------------------------------------------------------------------
/scripts/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | apt update
4 | apt install -y git curl
5 | curl -sL https://deb.nodesource.com/setup_20.x | sh -
6 | apt update
7 | apt install nodejs npm
8 |
--------------------------------------------------------------------------------
/scripts/pg-init-scripts/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | apt update
4 | apt install -y git curl
5 | curl -sL https://deb.nodesource.com/setup_20.x | sh
6 | apt update
7 | apt install build-essential nodejs npm
8 |
--------------------------------------------------------------------------------
/scripts/pg-init-scripts/create-multiple-postgresql-databases.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -u
5 |
6 | function create_user_and_database() {
7 | local database=$1
8 | echo " Creating user and database '$database'"
9 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
10 | CREATE USER $database;
11 | CREATE DATABASE $database;
12 | GRANT ALL PRIVILEGES ON DATABASE $database TO $database;
13 | EOSQL
14 | }
15 |
16 | if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then
17 | echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES"
18 | for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do
19 | create_user_and_database $db
20 | done
21 | echo "Multiple databases created"
22 | fi
23 |
--------------------------------------------------------------------------------
/traefik/dynamic.yml:
--------------------------------------------------------------------------------
1 | tls:
2 | certificates:
3 | - certFile: "/etc/certs/lago.dev.pem"
4 | keyFile: "/etc/certs/lago.dev-key.pem"
--------------------------------------------------------------------------------
/traefik/traefik.yml:
--------------------------------------------------------------------------------
1 | logs:
2 | level: debug
3 |
4 | providers: # You can add more than one provider if needed
5 | docker:
6 | endpoint: "unix:///var/run/docker.sock"
7 | exposedByDefault: false # Only expose explicitly enabled containers
8 |
9 | file:
10 | filename: /etc/traefik/dynamic.yml
11 | watch: true
12 |
13 | entryPoints:
14 | web:
15 | address: ":80"
16 | websecure:
17 | address: ":443"
18 |
19 | api:
20 | dashboard: true
21 | insecure: true
--------------------------------------------------------------------------------