├── .gitignore
├── changelog.zulipchat.png
├── dagger
├── .gitignore
├── .gitattributes
├── proxy.go
├── go.mod
├── go.sum
└── main.go
├── how-it-started-changelog-friends-38.png
├── test
├── acceptance
│ ├── fastly
│ │ ├── www.hurl
│ │ ├── feeds.hurl
│ │ ├── admin.hurl
│ │ ├── homepage.hurl
│ │ └── feed.hurl
│ ├── local
│ │ ├── www.hurl
│ │ └── http.hurl
│ ├── pipedream
│ │ ├── feeds.hurl
│ │ ├── admin.hurl
│ │ ├── assets.hurl
│ │ ├── health.hurl
│ │ ├── homepage.hurl
│ │ └── feed.hurl
│ ├── practicalai.hurl
│ ├── http.hurl
│ ├── assets.hurl
│ ├── feeds.hurl
│ └── news-mp3.hurl
└── vtc
│ ├── www.vtc
│ ├── http.vtc
│ ├── purge.vtc
│ ├── fly-request-id.vtc
│ ├── practicalai.vtc
│ ├── news-mp3.vtc
│ ├── client-ip.vtc
│ ├── health.vtc
│ ├── assets.vtc
│ ├── cache-status.vtc
│ └── feeds.vtc
├── vector
└── pipedream.changelog.com
│ ├── debug_varnish.yaml
│ ├── debug_varnish_geoip.yaml
│ ├── debug_s3.yaml
│ ├── default.yaml
│ └── geoip.yaml
├── dagger.json
├── docs
├── README.md
└── local_dev.md
├── container
├── welcome.bashrc
└── justfile
├── varnish
├── vcl
│ ├── http.vcl
│ ├── fly-request-id.vcl
│ ├── www.vcl
│ ├── news-mp3.vcl
│ └── default.vcl
└── varnish-json-response.bash
├── .envrc
├── just
├── dagger.just
├── hurl.just
├── op.just
└── _config.just
├── envrc.secrets.op
├── fly.toml
├── .github
└── workflows
│ ├── ship_it.yml
│ ├── _github.yml
│ └── _namespace.yml
├── LICENSE
├── regions.txt
├── justfile
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | /tmp
2 | .envrc.secrets
3 |
--------------------------------------------------------------------------------
/changelog.zulipchat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thechangelog/pipely/HEAD/changelog.zulipchat.png
--------------------------------------------------------------------------------
/dagger/.gitignore:
--------------------------------------------------------------------------------
1 | /dagger.gen.go
2 | /internal/dagger
3 | /internal/querybuilder
4 | /internal/telemetry
5 | /.env
6 |
--------------------------------------------------------------------------------
/how-it-started-changelog-friends-38.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thechangelog/pipely/HEAD/how-it-started-changelog-friends-38.png
--------------------------------------------------------------------------------
/test/acceptance/fastly/www.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://www.{{host}}/
2 | HTTP 301 # expect moved permanently response
3 | [Asserts]
4 | header "Location" == "https://{{host}}/"
5 |
--------------------------------------------------------------------------------
/test/acceptance/local/www.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://www.{{host}}/
2 | HTTP 301 # expect moved permanently response
3 | [Asserts]
4 | header "Location" == "https://{{host}}/"
5 |
--------------------------------------------------------------------------------
/dagger/.gitattributes:
--------------------------------------------------------------------------------
1 | /dagger.gen.go linguist-generated
2 | /internal/dagger/** linguist-generated
3 | /internal/querybuilder/** linguist-generated
4 | /internal/telemetry/** linguist-generated
5 |
--------------------------------------------------------------------------------
/test/acceptance/fastly/feeds.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/rss
2 | HTTP 302 # expect Found response
3 | [Asserts]
4 | header "via" matches /[vV]arnish/ # served via Varnish
5 | header "location" contains "{{proto}}://{{host}}/feed" # redirects to /feed
6 |
--------------------------------------------------------------------------------
/vector/pipedream.changelog.com/debug_varnish.yaml:
--------------------------------------------------------------------------------
1 | sinks:
2 | # https://vector.dev/docs/reference/configuration/sinks/console/
3 | debug_varnish:
4 | type: "console"
5 | encoding:
6 | codec: "logfmt"
7 | inputs:
8 | - "varnish"
9 |
--------------------------------------------------------------------------------
/vector/pipedream.changelog.com/debug_varnish_geoip.yaml:
--------------------------------------------------------------------------------
1 | sinks:
2 | # https://vector.dev/docs/reference/configuration/sinks/console/
3 | debug_varnish_geoip:
4 | type: "console"
5 | encoding:
6 | codec: "logfmt"
7 | inputs:
8 | - "varnish_geoip"
9 |
--------------------------------------------------------------------------------
/vector/pipedream.changelog.com/debug_s3.yaml:
--------------------------------------------------------------------------------
1 | sinks:
2 | # https://vector.dev/docs/reference/configuration/sinks/console/
3 | debug_s3:
4 | type: "console"
5 | encoding:
6 | codec: "logfmt"
7 | inputs:
8 | - "s3_csv"
9 | - "s3_json_feeds"
10 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/feeds.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/rss
2 | HTTP 200 # expect OK response
3 | [Asserts]
4 | header "cf-ray" exists # served by Cloudflare
5 | header "via" matches /[vV]arnish/ # served via Varnish
6 | header "age" exists # cache age works
7 | header "content-type" contains "application/xml" # content type is XML
8 |
--------------------------------------------------------------------------------
/dagger.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pipely",
3 | "engineVersion": "v0.18.12",
4 | "sdk": {
5 | "source": "go"
6 | },
7 | "dependencies": [
8 | {
9 | "name": "flyio",
10 | "source": "github.com/gerhard/daggerverse/flyio",
11 | "pin": "3cb1cd957782f7b841f6fc49a662073180021a29"
12 | }
13 | ],
14 | "source": "dagger"
15 | }
16 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Docs
2 |
3 | We aim to keep docs lean & simple. GitHub is the default front-end - search
4 | works well enough. Your code editor is the other option.
5 |
6 | Read next: [Developing & experimenting locally using Docker](local_dev.md)
7 |
8 | ---
9 |
10 | If something is missing, feel free to open a GitHub issue. Pull requests always welcome 👍
11 |
--------------------------------------------------------------------------------
/vector/pipedream.changelog.com/default.yaml:
--------------------------------------------------------------------------------
1 | # https://vector.dev/docs/reference/configuration/global-options/
2 | api:
3 | enabled: true
4 |
5 | timezone: "UTC"
6 |
7 | data_dir: "/var/lib/vector"
8 |
9 | sources:
10 | varnish:
11 | # https://vector.dev/docs/reference/configuration/sources/stdin/
12 | type: "stdin"
13 | decoding:
14 | codec: "json"
15 |
--------------------------------------------------------------------------------
/container/welcome.bashrc:
--------------------------------------------------------------------------------
1 | export PS1="[pipely-local] $PS1"
2 |
3 | echo 🧑🏼🔧 Welcome to the Pipely local debug environment!
4 | echo 🧰 Available tools: just, hurl, httpstat, sasqwatch, gotop, oha, neovim, htop
5 | echo 📚 To learn more about how the above tools can be used for local development / debugging, see /docs/local_dev.md
6 | echo ""
7 | echo 💡 Run \'just\' to see available commands
8 | echo ""
9 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/admin.hurl:
--------------------------------------------------------------------------------
1 | # Get the admin homepage
2 | GET {{proto}}://{{host}}/admin
3 | [Options]
4 | repeat: 2 # repeat so that we confirm caching behaviour
5 | HTTP 302 # expect found redirect
6 | [Asserts]
7 | header "fly-request-id" exists # served by Fly
8 | header "via" matches /[vV]arnish/ # served via Varnish
9 | header "location" == "/" # redirect to homepage
10 | header "age" == "0" # NOT stored in cache
11 |
--------------------------------------------------------------------------------
/varnish/vcl/http.vcl:
--------------------------------------------------------------------------------
1 | sub vcl_recv {
2 | # Check if the proxy layer marked this as an http connection
3 | if (req.http.x-forwarded-proto == "http") {
4 | return (synth(301, "Moved Permanently"));
5 | }
6 | }
7 |
8 | sub vcl_synth {
9 | # Handle the redirect
10 | if (req.http.x-forwarded-proto == "http"
11 | && resp.status == 301) {
12 | set resp.http.Location = "https://" + req.http.host + req.url;
13 | return (deliver);
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/.envrc:
--------------------------------------------------------------------------------
1 | # Required for op to know which account to use
2 | export OP_ACCOUNT=changelog.1password.com
3 |
4 | # Load secrets if configured - only available to changelog.com team members:
5 | # https://github.com/orgs/thechangelog/people
6 | source_env_if_exists .envrc.secrets
7 |
8 | # Defining env vars which are changelog.com specific
9 | # Will need refactoring post 1.0
10 | export AWS_REGION="eu-west-1"
11 | export AWS_S3_BUCKET_SUFFIX="-pipedream-local"
12 | export HONEYCOMB_DATASET="pipedream-local"
13 |
--------------------------------------------------------------------------------
/test/acceptance/fastly/admin.hurl:
--------------------------------------------------------------------------------
1 | # Get the admin homepage
2 | GET {{proto}}://{{host}}/admin
3 | [Options]
4 | repeat: 2 # repeat so that we confirm caching behaviour
5 | HTTP 302 # expect found redirect
6 | [Asserts]
7 | header "fly-request-id" exists # served by Fly
8 | header "via" matches /[vV]arnish/ # served via Varnish
9 | header "location" == "/" # redirect to homepage
10 | header "x-cache" == "MISS" # double-check that it's NOT stored in cache
11 | header "x-cache-hits" == "0" # NOT served from cache
12 |
--------------------------------------------------------------------------------
/just/dagger.just:
--------------------------------------------------------------------------------
1 | # https://github.com/dagger/dagger/releases
2 |
3 | [private]
4 | DAGGER_VERSION := "0.19.2"
5 | [private]
6 | DAGGER_DIR := BIN_PATH / "pipely-dagger-" + DAGGER_VERSION
7 | [private]
8 | DAGGER := env("DAGGER_BIN", DAGGER_DIR / "dagger")
9 |
10 | [private]
11 | dagger *ARGS:
12 | @[ -x {{ DAGGER }} ] \
13 | || (curl -fsSL https://dl.dagger.io/dagger/install.sh | BIN_DIR={{ DAGGER_DIR }} DAGGER_VERSION={{ DAGGER_VERSION }} sh)
14 | {{ if ARGS != "" { DAGGER + " " + ARGS } else { DAGGER + " --help" } }}
15 |
--------------------------------------------------------------------------------
/varnish/vcl/fly-request-id.vcl:
--------------------------------------------------------------------------------
1 | sub vcl_recv {
2 | # Normalize the request headers early
3 | # If fly-request-id exists, enforce it as the x-request-id immediately.
4 | if (req.http.fly-request-id) {
5 | set req.http.x-request-id = req.http.fly-request-id;
6 | }
7 | }
8 |
9 | sub vcl_deliver {
10 | # Ensure the response header matches the request header
11 | # Because we normalized in vcl_recv, req.http.x-request-id is already set
12 | # to the fly-id value if it was present.
13 | if (req.http.x-request-id) {
14 | set resp.http.x-request-id = req.http.x-request-id;
15 | }
16 | }
--------------------------------------------------------------------------------
/test/vtc/www.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test www redirects"
2 |
3 | server s1 {
4 | rxreq
5 | txresp
6 | } -start
7 |
8 | varnish v1 -vcl+backend {
9 | vcl 4.1;
10 | include "/etc/varnish/www.vcl";
11 |
12 | # Disable caching for testing
13 | sub vcl_backend_response {
14 | set beresp.uncacheable = true;
15 | return(deliver);
16 | }
17 | } -start
18 |
19 | # test basic redirect returning different path and status code
20 | client c1 {
21 | txreq -url "/" -hdr "Host: www.cdn.tld"
22 | rxresp
23 | expect resp.status == 301
24 | expect resp.http.Location == "https://cdn.tld/"
25 | } -run
26 |
--------------------------------------------------------------------------------
/envrc.secrets.op:
--------------------------------------------------------------------------------
1 | # This token gets us access to all others
2 | export OP_SERVICE_ACCOUNT_TOKEN="op://pipely/op/credential"
3 | # Working around: an internal error occurred, please contact 1Password at support@1password.com or https://developer.1password.com/joinslack: invalid client id
4 | export PURGE_TOKEN="op://pipely/purge/credential"
5 | export HONEYCOMB_API_KEY="op://pipely/honeycomb/credential"
6 | export AWS_ACCESS_KEY_ID="op://pipely/aws-s3-logs/access-key-id"
7 | export AWS_SECRET_ACCESS_KEY="op://pipely/aws-s3-logs/secret-access-key"
8 | export MAXMIND_AUTH="op://pipely/maxmind/credential"
9 | export GCHR_PASS="op://pipely/ghcr/credential"
10 |
--------------------------------------------------------------------------------
/dagger/proxy.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "strings"
6 | )
7 |
8 | type Proxy struct {
9 | TlsExterminator string
10 | Port string
11 | Fqdn string
12 | Host string
13 | }
14 |
15 | func NewProxy(hint string) (*Proxy, error) {
16 | proxyParts := strings.Split(hint, ":")
17 | if len(proxyParts) != 3 {
18 | return nil, errors.New("must be of format 'PORT:FQDN:HOST'")
19 | }
20 |
21 | return &Proxy{
22 | TlsExterminator: strings.Join(proxyParts[:2], ":"),
23 | Port: proxyParts[0],
24 | Fqdn: proxyParts[1],
25 | Host: proxyParts[2],
26 | }, nil
27 | }
28 |
--------------------------------------------------------------------------------
/test/acceptance/fastly/homepage.hurl:
--------------------------------------------------------------------------------
1 | # Get the homepage
2 | GET {{proto}}://{{host}}
3 | HTTP 200 # expect OK response
4 | [Asserts]
5 | duration < 1000 # ensure that it loads sub 1s when cache is cold...
6 | header "fly-request-id" exists # served by Fly
7 | header "via" matches /[vV]arnish/ # served via Varnish
8 | header "age" exists # cache age works
9 |
10 | # Get the homepage AGAIN
11 | GET {{proto}}://{{host}}
12 | HTTP 200 # expect OK response
13 | [Asserts]
14 | duration < 100 # ensure that it loads sub 100ms when cached...
15 | header "x-cache" == "HIT" # double-check that it's NOT stored in cache
16 | header "x-cache-hits" toInt > 0 # served from cache
17 |
--------------------------------------------------------------------------------
/test/acceptance/practicalai.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/practicalai
2 | HTTP 301 # expect moved permanently response
3 | [Asserts]
4 | header "location" == "https://practicalai.fm"
5 | body contains "
You are being redirected."
6 |
7 |
8 | GET {{proto}}://{{host}}/practicalai/feed
9 | HTTP 301 # expect moved permanently response
10 | [Asserts]
11 | header "location" == "https://feeds.transistor.fm/practical-ai-machine-learning-data-science-llm"
12 | body contains "You are being redirected."
13 |
--------------------------------------------------------------------------------
/varnish/vcl/www.vcl:
--------------------------------------------------------------------------------
1 | sub vcl_recv {
2 | # Check if the host starts with www.
3 | if (req.http.host ~ "^www\.") {
4 | # Remove www. from the host
5 | set req.http.host = regsub(req.http.host, "^www\.", "");
6 | set req.http.www = "true";
7 |
8 | # Return a 301 redirect to the non-www version
9 | return (synth(301, "Moved Permanently"));
10 | }
11 |
12 | if (req.http.X-Redirect) {
13 | return (synth(308, "Permanent Redirect"));
14 | }
15 | }
16 |
17 | sub vcl_synth {
18 | # Handle the redirect
19 | if (req.http.www == "true"
20 | && resp.status == 301) {
21 | set resp.http.Location = "https://" + req.http.host + req.url;
22 | return (deliver);
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/test/acceptance/fastly/feed.hurl:
--------------------------------------------------------------------------------
1 | # Get the changelog feed
2 | GET {{proto}}://{{host}}/podcast/feed
3 | HTTP 200 # expect OK response
4 | [Asserts]
5 | duration < 1000 # ensure that it loads sub 1s when cache is cold...
6 | header "cf-ray" exists # served by Cloudflare
7 | header "via" matches /[vV]arnish/ # served via Varnish
8 | header "age" exists # cache age works
9 | header "content-type" contains "application/xml" # content type is XML
10 |
11 | # Get the changelog feed AGAIN
12 | GET {{proto}}://{{host}}/podcast/feed
13 | [Options]
14 | HTTP 200 # expect OK response
15 | [Asserts]
16 | duration < 500 # ensure that it loads sub 500ms when cached...
17 | header "x-cache" == "HIT" # served from cache
18 | header "x-cache-hits" toInt > 0 # served from cache
19 |
--------------------------------------------------------------------------------
/test/acceptance/http.hurl:
--------------------------------------------------------------------------------
1 | GET http://{{host}}/
2 | # we only add the X-Forwarded-Proto header to test local behaviour.
3 | # Fastly ignores it.
4 | # Fly.io overwrites this at a platform level.
5 | X-Forwarded-Proto: http
6 | HTTP 301 # expect moved permanently response
7 | [Asserts]
8 | header "location" == "https://{{host}}/"
9 |
10 | # Test the assets_host
11 | GET http://{{host}}/friends/73-pipely-tech.jpg?hurl=true&http
12 | Host: {{assets_host}}
13 | # we only add the X-Forwarded-Proto header to test local behaviour.
14 | # Fastly ignores it.
15 | # Fly.io overwrites this at a platform level.
16 | X-Forwarded-Proto: http
17 | HTTP 301 # expect moved permanently response
18 | [Asserts]
19 | header "location" contains "https://{{assets_host}}/friends/73-pipely-tech.jpg?hurl=true&http"
20 |
--------------------------------------------------------------------------------
/just/hurl.just:
--------------------------------------------------------------------------------
1 | # https://github.com/Orange-OpenSource/hurl/releases
2 |
3 | [private]
4 | HURL_VERSION := "7.0.0"
5 | [private]
6 | HURL_NAME := "hurl-" + HURL_VERSION + "-" + arch() + "-" + OS + "-" + OS_ALT
7 | [private]
8 | HURL := LOCAL_PATH / HURL_NAME / "bin" / "hurl"
9 |
10 | [private]
11 | hurl *ARGS:
12 | @[ -x {{ HURL }} ] \
13 | || (echo {{ _GREEN }}🔀 Installing hurl {{ HURL_VERSION }} ...{{ _RESET }} \
14 | && mkdir -p {{ BIN_PATH }} \
15 | && (curl -LSsf "https://github.com/Orange-OpenSource/hurl/releases/download/{{ HURL_VERSION }}/{{ HURL_NAME }}.tar.gz" | tar zxv -C {{ LOCAL_PATH }}) \
16 | && chmod +x {{ HURL }} && echo {{ _MAGENTA }}{{ HURL }} {{ _RESET }} && {{ HURL }} --version \
17 | && ln -sf {{ HURL }} {{ BIN_PATH }}/hurl && echo {{ _MAGENTA }}hurl{{ _RESET }} && hurl --version)
18 | {{ if ARGS != "" { HURL + " " + ARGS } else { HURL + " --help" } }}
19 |
--------------------------------------------------------------------------------
/test/vtc/http.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test http redirects"
2 |
3 | server s1 -repeat 3 {
4 | rxreq
5 | txresp
6 | } -start
7 |
8 | varnish v1 -vcl+backend {
9 | vcl 4.1;
10 | include "/etc/varnish/http.vcl";
11 |
12 | # Disable caching for testing
13 | sub vcl_backend_response {
14 | set beresp.uncacheable = true;
15 | return(deliver);
16 | }
17 | } -start
18 |
19 | # no x-forwarded-proto
20 | client c1 {
21 | txreq -url "/"
22 | rxresp
23 | expect resp.status == 200
24 | } -run
25 |
26 | # x-forwarded-proto=https
27 | client c2 {
28 | txreq -url "/" -hdr "X-Forwarded-Proto: https"
29 | rxresp
30 | expect resp.status == 200
31 | } -run
32 |
33 | # x-forwarded-proto=http
34 | client c3 {
35 | txreq -url "/" -hdr "X-Forwarded-Proto: http" -hdr "Host: cdn.tld"
36 | rxresp
37 | expect resp.status == 301
38 | expect resp.http.Location == "https://cdn.tld/"
39 | } -run
40 |
--------------------------------------------------------------------------------
/test/vtc/purge.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test PURGE method handling"
2 |
3 | server s1 {
4 | rxreq
5 | txresp
6 | } -start
7 |
8 | varnish v1 -vcl+backend {
9 | sub vcl_recv {
10 | if (req.method == "PURGE") {
11 | if (req.http.purge-token == "doit") {
12 | return(purge);
13 | } else {
14 | return(synth(401, "Invalid PURGE token"));
15 | }
16 | }
17 | }
18 | } -start
19 |
20 | # PURGE with no token
21 | client c1 {
22 | txreq -method PURGE -url "/"
23 | rxresp
24 | expect resp.status == 401
25 | expect resp.body ~ "Invalid PURGE token"
26 | } -run
27 |
28 | # PURGE with invalid token
29 | client c2 {
30 | txreq -method PURGE -url "/" -hdr "purge-token: do"
31 | rxresp
32 | expect resp.status == 401
33 | expect resp.body ~ "Invalid PURGE token"
34 | } -run
35 |
36 | # PURGE with correct token
37 | client c3 {
38 | txreq -method PURGE -url "/" -hdr "purge-token: doit"
39 | rxresp
40 | expect resp.status == 200
41 | } -run
42 |
--------------------------------------------------------------------------------
/just/op.just:
--------------------------------------------------------------------------------
1 | # https://app-updates.agilebits.com/product_history/CLI2
2 |
3 | [private]
4 | OP_VERSION := "2.32.0"
5 | [private]
6 | OP_NAME := "op_" + OS_ALT2 + "_" + ARCH_ALT + "_v" + OP_VERSION
7 | [private]
8 | OP_DIR := LOCAL_PATH / OP_NAME / "bin"
9 | [private]
10 | OP := OP_DIR / "op"
11 |
12 | [private]
13 | [positional-arguments]
14 | op *ARGS:
15 | @[ -x {{ OP }} ] \
16 | || (echo {{ _GREEN }}🔐 Installing op {{ OP_VERSION }} ...{{ _RESET }} \
17 | && mkdir -p {{ BIN_PATH }} {{ OP_DIR }} \
18 | && curl -LSsfo {{ OP_NAME }}.zip "https://cache.agilebits.com/dist/1P/op2/pkg/v{{ OP_VERSION }}/{{ OP_NAME }}.zip" \
19 | && unzip {{ OP_NAME }}.zip -d {{ OP_DIR }} \
20 | && rm {{ OP_NAME }}.zip \
21 | && chmod +x {{ OP }} && echo {{ _MAGENTA }}{{ OP }} {{ _RESET }} && {{ OP }} --version \
22 | && ln -sf {{ OP }} {{ BIN_PATH }}/op && echo {{ _MAGENTA }}op{{ _RESET }} && op --version)
23 | {{ if ARGS != "" { OP + " " + ' "$@"' } else { OP + " --help" } }}
24 |
--------------------------------------------------------------------------------
/fly.toml:
--------------------------------------------------------------------------------
1 | # Full app config reference: https://fly.io/docs/reference/configuration/
2 | app = "cdn-2025-02-25"
3 | # Closest to the origin
4 | primary_region = "iad"
5 |
6 | kill_signal = "SIGTERM"
7 | kill_timeout = 30
8 |
9 | [env]
10 | # This leaves 20% headroom from the machine's total available memory
11 | # Any less than this and Varnish makes the machine crash due to OOM errors.
12 | VARNISH_SIZE = "6400M"
13 |
14 | [[vm]]
15 | size = "performance-1x"
16 | memory = "8GB"
17 |
18 | [deploy]
19 | strategy = "bluegreen"
20 |
21 | [[services]]
22 | internal_port = 9000
23 | protocol = "tcp"
24 |
25 | [[services.http_checks]]
26 | grace_period = "5s"
27 | interval = "5s"
28 | method = "get"
29 | path = "/health"
30 | protocol = "http"
31 | timeout = "3s"
32 |
33 | [[services.ports]]
34 | handlers = ["tls", "http"]
35 | port = 443
36 |
37 | [[services.ports]]
38 | handlers = ["http"]
39 | port = "80"
40 |
41 | [services.concurrency]
42 | hard_limit = 2500
43 | soft_limit = 2000
44 | type = "connections"
45 |
--------------------------------------------------------------------------------
/test/acceptance/local/http.hurl:
--------------------------------------------------------------------------------
1 | # We are simulating the Fly.io behaviour locally
2 | GET http://{{host}}/
3 | X-Forwarded-Proto: https
4 | HTTP 200
5 | [Asserts]
6 | header "via" matches /[vV]arnish/ # served via Varnish
7 | header "age" exists # cache age works
8 | header "cache-status" contains "region=" # region that served this request
9 | header "cache-status" contains "origin=" # origin that served this request
10 | header "cache-status" contains "ttl=" # ttl is set
11 | header "cache-status" contains "grace=" # grace is set
12 |
13 | # Test the assets_host
14 | GET http://{{host}}/friends/73-pipely-tech.jpg?hurl=true&https
15 | Host: {{assets_host}}
16 | # we only add the X-Forwarded-Proto header to test local behaviour.
17 | # Fastly ignores it.
18 | # Fly.io overwrites this at a platform level.
19 | X-Forwarded-Proto: https
20 | Content-Type: image/jpeg # expect JPG
21 | HTTP 200 # expect OK response
22 | [Asserts]
23 | header "cf-ray" exists # served by Cloudflare
24 | header "via" matches /[vV]arnish/ # served via Varnish
25 | header "age" exists # cache age works
26 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/assets.hurl:
--------------------------------------------------------------------------------
1 | # Get a static asset
2 | GET {{proto}}://{{host}}/friends/73-pipely-tech.jpg?hurl=true
3 | Host: {{assets_host}}
4 | Content-Type: image/jpeg # expect JPG
5 | HTTP 200 # expect OK response
6 | [Asserts]
7 | header "cf-ray" exists # served by Cloudflare
8 | header "via" matches /[vV]arnish/ # served via Varnish
9 | header "age" exists # cache age works
10 |
11 | # Purge the static asset
12 | PURGE {{proto}}://{{host}}/friends/73-pipely-tech.jpg?purge=true
13 | Host: {{assets_host}}
14 | Purge-Token: {{purge_token}}
15 | HTTP 200 # expect OK response
16 | [Asserts]
17 | header "x-varnish" exists # served by Varnish
18 | header "cache-status" contains "synth" # synthetic response
19 |
20 | # Get the static asset after PURGE
21 | GET {{proto}}://{{host}}/friends/73-pipely-tech.jpg?purge=true
22 | Host: {{assets_host}}
23 | Content-Type: image/jpeg # expect JPG
24 | HTTP 200 # expect OK response
25 | [Asserts]
26 | header "cf-ray" exists # served by Cloudflare
27 | header "via" matches /[vV]arnish/ # served via Varnish
28 | header "cache-status" contains "miss" # fresh after purge
29 |
--------------------------------------------------------------------------------
/test/vtc/fly-request-id.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test fly-request-id header handling"
2 |
3 | server s1 {
4 | # Transaction 1: Generic check
5 | rxreq
6 | expect req.http.x-request-id != ""
7 | txresp -status 200 -hdr "x-request-id: generated-backend-id"
8 |
9 | # Transaction 2: Specific fly-request-id check
10 | rxreq
11 | expect req.http.x-request-id == "01KARNY9D13ZM9B7HB7YYEDW8Z-lhr"
12 | txresp -status 200 -hdr "x-request-id: 01KARNY9D13ZM9B7HB7YYEDW8Z-lhr"
13 | } -start
14 |
15 | varnish v1 -vcl+backend {
16 | vcl 4.1;
17 | include "/etc/varnish/fly-request-id.vcl";
18 |
19 | # Disable caching for testing
20 | sub vcl_backend_response {
21 | set beresp.uncacheable = true;
22 | return(deliver);
23 | }
24 | } -start
25 |
26 | # Test 1: The backend x-request-id header is used
27 | client c1 {
28 | txreq -url "/"
29 | rxresp
30 | expect resp.status == 200
31 | expect resp.http.x-request-id == "generated-backend-id"
32 | } -run
33 |
34 | # Test 2: Test fly-request-id header
35 | client c2 {
36 | txreq -url "/" -hdr "fly-request-id: 01KARNY9D13ZM9B7HB7YYEDW8Z-lhr"
37 | rxresp
38 | expect resp.status == 200
39 | expect resp.http.x-request-id == "01KARNY9D13ZM9B7HB7YYEDW8Z-lhr"
40 | } -run
--------------------------------------------------------------------------------
/.github/workflows/ship_it.yml:
--------------------------------------------------------------------------------
1 | name: 'Ship It!'
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | tags:
8 | - 'v**'
9 | pull_request:
10 | types:
11 | - opened
12 | - synchronize
13 | - reopened
14 | - ready_for_review
15 | workflow_dispatch:
16 |
17 | # All jobs have the same outcome. We define multiple for resiliency reasons.
18 | jobs:
19 | # In thechangelog/pipely repository (a.k.a. upstream),
20 | # this is the preferred default (i.e. custom runners, faster than GitHub):
21 | on-namespace:
22 | if: ${{ contains(vars.RUNS_ON, 'namespace') }}
23 | uses: ./.github/workflows/_namespace.yml
24 | secrets: inherit
25 |
26 | # Just in case Namespace.so misbehaves, we want a fallback.
27 | # Always run two of everything™
28 | on-github-fallback:
29 | needs: on-namespace
30 | if: ${{ failure() }}
31 | uses: ./.github/workflows/_github.yml
32 | secrets: inherit
33 |
34 | # As forks will not have access to our Namespace.so custom runners,
35 | # we fallback to the default GitHub free runners:
36 | on-github:
37 | if: ${{ !contains(vars.RUNS_ON, 'namespace') }}
38 | uses: ./.github/workflows/_github.yml
39 | secrets: inherit
40 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Content and Design Copyright (c) Changelog Media LLC. All rights reserved.
2 |
3 | Code Copyright (c) Changelog Media LLC and licensed under the following conditions:
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/varnish/varnish-json-response.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -eu
3 |
4 | # https://varnish-cache.org/docs/trunk/reference/varnishncsa.html
5 | # https://varnish-cache.org/docs/trunk/reference/vsl.html
6 | #
7 | # NOTE: When GEOIP_ENRICHED=true env var, GeoIP enrichment is handled by Vector.dev using the `client_ip` value below:
8 |
9 | VARNISH_LOG_FORMAT='{"time": "%{%Y-%m-%dT%H:%M:%SZ}t", "request_id":"%{x-request-id}o", "request_referer": "%{referer}i", "request_user_agent": "%{user-agent}i", "request_accept_content": "%{accept}i", "req_header_size": "%{VSL:ReqAcct[1]}x", "req_body_size": "%{VSL:ReqAcct[2]}x", "req_total_size": "%{VSL:ReqAcct[3]}x", "client_ip": "%{VCL_Log:client_ip}x", "protocol": "%H", "request": "%m", "host": "%{host}i", "url": "%U", "content_type": "%{content-type}o", "status": "%s", "cache_status": "%{Varnish:handling}x", "hits": "%{VCL_Log:hits}x", "ttl": "%{VCL_Log:ttl}x", "grace": "%{VCL_Log:grace}x", "server_datacenter": "%{VCL_Log:server_datacenter}x", "origin": "%{VCL_Log:backend}x", "time_first_byte_s": "%{Varnish:time_firstbyte}x", "time_elapsed": "%D", "resp_header_size": "%{VSL:ReqAcct[4]}x", "resp_body_size": "%{VSL:ReqAcct[5]}x", "resp_total_size": "%{VSL:ReqAcct[6]}x"}'
10 |
11 | exec varnishncsa -jF "$VARNISH_LOG_FORMAT"
12 |
--------------------------------------------------------------------------------
/test/vtc/practicalai.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test practical.ai redirects"
2 |
3 | varnish v1 -vcl {
4 | vcl 4.1;
5 |
6 | import std;
7 |
8 | backend default none;
9 |
10 | sub vcl_recv {
11 | if (req.url == "/practicalai/feed"
12 | || req.url == "/practicalai") {
13 | return (synth(301, "Moved Permanently"));
14 | }
15 | }
16 |
17 | sub vcl_synth {
18 | if (req.url == "/practicalai/feed"
19 | && resp.status == 301) {
20 | set resp.http.location = "https://feeds.transistor.fm/practical-ai-machine-learning-data-science-llm";
21 | return (deliver);
22 | }
23 |
24 | if (req.url == "/practicalai"
25 | && resp.status == 301) {
26 | set resp.http.location = "https://practicalai.fm";
27 | return (deliver);
28 | }
29 | }
30 | } -start
31 |
32 | # /practicalai/feed redirect
33 | client c1 {
34 | txreq -url "/practicalai/feed"
35 | rxresp
36 | expect resp.status == 301
37 | expect resp.http.location == "https://feeds.transistor.fm/practical-ai-machine-learning-data-science-llm"
38 | } -run
39 |
40 | # /practicalai redirect
41 | client c2 {
42 | txreq -url "/practicalai"
43 | rxresp
44 | expect resp.status == 301
45 | expect resp.http.location == "https://practicalai.fm"
46 | } -run
47 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/health.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/health
2 | HTTP 204 # expect No Content response
3 | [Asserts]
4 | header "x-varnish" exists # served by Varnish
5 | header "cache-status" contains "region" # contains region info (a.k.a. server_datacentre)
6 | header "cache-status" contains "synth" # never cached
7 |
8 | GET {{proto}}://{{host}}/app_health
9 | HTTP 200 # expect OK response
10 | [Asserts]
11 | header "via" matches /[vV]arnish/ # served via Varnish
12 | header "cache-status" contains "region" # contains region info (a.k.a. server_datacentre)
13 | header "cache-status" contains "origin=app" # served by app backend
14 | header "cache-status" contains "bypass" # never cached
15 |
16 | GET {{proto}}://{{host}}/feeds_health
17 | HTTP 200 # expect OK response
18 | [Asserts]
19 | header "via" matches /[vV]arnish/ # served via Varnish
20 | header "cache-status" contains "region" # contains region info (a.k.a. server_datacentre)
21 | header "cache-status" contains "origin=feeds" # served by feeds backend
22 | header "cache-status" contains "bypass" # never cached
23 |
24 | GET {{proto}}://{{host}}/assets_health
25 | HTTP 200 # expect OK response
26 | [Asserts]
27 | header "via" matches /[vV]arnish/ # served via Varnish
28 | header "cache-status" contains "region" # contains region info (a.k.a. server_datacentre)
29 | header "cache-status" contains "origin=assets" # served by feeds backend
30 | header "cache-status" contains "bypass" # never cached
31 |
--------------------------------------------------------------------------------
/.github/workflows/_github.yml:
--------------------------------------------------------------------------------
1 | name: GitHub
2 |
3 | on:
4 | workflow_call:
5 |
6 | env:
7 | DAGGER_CLOUD_TOKEN: ${{ secrets.DAGGER_CLOUD_TOKEN }}
8 | OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }}
9 | TERM: xterm
10 |
11 | jobs:
12 | run:
13 | runs-on: ubuntu-24.04
14 | steps:
15 | - name: 'Checkout code...'
16 | uses: actions/checkout@v4
17 |
18 | - uses: extractions/setup-just@v2
19 | with:
20 | just-version: '1.43.0'
21 |
22 | - name: 'Test all (including local acceptance)...'
23 | run: |
24 | just test
25 |
26 | - name: 'Test acceptance of current CDN deployment...'
27 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
28 | run: |
29 | just test-acceptance-fastly
30 |
31 | - name: 'Test acceptance of NEW CDN deployment...'
32 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
33 | run: |
34 | just test-acceptance-pipedream
35 |
36 | - name: 'Archive test acceptance reports...'
37 | uses: actions/upload-artifact@v4
38 | with:
39 | name: test-acceptance
40 | path: |
41 | tmp/test-acceptance-*
42 |
43 | - name: 'Publish & deploy tag...'
44 | if: ${{ startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push' }}
45 | run: |
46 | just publish ${{ github.ref_name }}
47 | just deploy ${{ github.ref_name }}
48 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/homepage.hurl:
--------------------------------------------------------------------------------
1 | # Get the homepage
2 | GET {{proto}}://{{host}}?hurl=true
3 | HTTP 200 # expect OK response
4 | [Asserts]
5 | duration < 1000 # ensure that it loads sub 1s when cache is cold...
6 | header "fly-request-id" exists # served by Fly
7 | header "via" matches /[vV]arnish/ # served via Varnish
8 | header "age" exists # cache age works
9 | header "cache-status" contains "region=" # region that served this request
10 | header "cache-status" contains "origin=" # origin that served this request
11 | header "cache-status" contains "ttl=" # ttl is set
12 | header "cache-status" contains "grace=" # grace is set
13 |
14 | # Get the homepage AGAIN
15 | GET {{proto}}://{{host}}?hurl=true
16 | HTTP 200 # expect OK response
17 | [Asserts]
18 | duration < 100 # ensure that it loads sub 100ms when cached...
19 | header "fly-request-id" exists # served by Fly
20 | header "via" matches /[vV]arnish/ # served via Varnish
21 | header "cache-status" contains "hit" # definitely served from cache
22 |
23 | # Purge the homepage
24 | PURGE {{proto}}://{{host}}?purge=true
25 | Purge-Token: {{purge_token}}
26 | HTTP 200 # expect OK response
27 | [Asserts]
28 | header "x-varnish" exists # served by Varnish
29 | header "cache-status" contains "synth" # synthetic response
30 |
31 | # Get the homepage after PURGE
32 | GET {{proto}}://{{host}}?purge=true
33 | Purge-Token: {{purge_token}}
34 | HTTP 200 # expect OK response
35 | [Asserts]
36 | header "fly-request-id" exists # served by Fly
37 | header "via" matches /[vV]arnish/ # served via Varnish
38 | header "cache-status" contains "miss" # fresh after purge
39 |
--------------------------------------------------------------------------------
/test/vtc/news-mp3.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test news mp3 redirects"
2 |
3 | # App mock server
4 | server s1 {
5 | rxreq
6 | txresp -status 404 -body "App backend"
7 | } -start
8 |
9 | # Start varnish with our VCL
10 | varnish v1 -vcl {
11 | vcl 4.1;
12 |
13 | import std;
14 |
15 | # include the separate redirects vcl file
16 | include "/etc/varnish/news-mp3.vcl";
17 |
18 | backend app {
19 | .host = "${s1_addr}";
20 | .port = "${s1_port}";
21 | }
22 |
23 | sub vcl_recv {
24 | set req.http.x-backend = "app";
25 | }
26 |
27 | # Disable caching for testing
28 | sub vcl_backend_response {
29 | set beresp.uncacheable = true;
30 | return(deliver);
31 | }
32 | } -start
33 |
34 | # / should go to app backend simulating a page not found
35 | client c1 {
36 | txreq -url "/not-found"
37 | rxresp
38 | expect resp.status == 404
39 | expect resp.body == "App backend"
40 | } -run
41 |
42 | # test basic redirect returning different path and status code
43 | client c2 {
44 | txreq -url "/uploads/podcast/news-2022-06-27/the-changelog-news-2022-06-27.mp3"
45 | rxresp
46 | expect resp.status == 308
47 | expect resp.http.Location == "https://127.0.0.1/uploads/news/1/changelog-news-1.mp3"
48 | } -run
49 |
50 | # test basic redirect returning different path and status code and including the query string
51 | client c3 {
52 | txreq -url "/uploads/podcast/news-2022-06-27/the-changelog-news-2022-06-27.mp3?this=is&a=query&string"
53 | rxresp
54 | expect resp.status == 308
55 | expect resp.http.Location == "https://127.0.0.1/uploads/news/1/changelog-news-1.mp3?this=is&a=query&string"
56 | } -run
57 |
58 | # we use acceptance tests for the other redirects, so that we compare the behaviour of both CDNs
59 |
--------------------------------------------------------------------------------
/.github/workflows/_namespace.yml:
--------------------------------------------------------------------------------
1 | name: Namespace.so
2 |
3 | on:
4 | workflow_call:
5 |
6 | env:
7 | DAGGER_BIN: /vendor/dagger/bin/dagger
8 | DAGGER_CLOUD_TOKEN: ${{ secrets.DAGGER_CLOUD_TOKEN }}
9 | OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.OP_SERVICE_ACCOUNT_TOKEN }}
10 | TERM: xterm
11 |
12 | jobs:
13 | run:
14 | runs-on:
15 | - nscloud-ubuntu-24.04-amd64-4x8-with-cache
16 | - nscloud-git-mirror-5gb
17 | - namespace-experiments:dagger.integration=enabled;dagger.version=0.19.2
18 | steps:
19 | - name: 'Checkout code...'
20 | uses: namespacelabs/nscloud-checkout-action@v5
21 |
22 | - uses: extractions/setup-just@v2
23 | with:
24 | just-version: '1.43.0'
25 |
26 | - name: 'Test all (including local acceptance)...'
27 | run: |
28 | just test
29 |
30 | - name: 'Test acceptance of current CDN deployment...'
31 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
32 | run: |
33 | just test-acceptance-fastly
34 |
35 | - name: 'Test acceptance of NEW CDN deployment...'
36 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
37 | run: |
38 | just test-acceptance-pipedream
39 |
40 | - name: 'Archive test acceptance reports...'
41 | uses: actions/upload-artifact@v4
42 | with:
43 | name: test-acceptance
44 | path: |
45 | tmp/test-acceptance-*
46 |
47 | - name: 'Publish & deploy tag...'
48 | if: ${{ startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push' }}
49 | run: |
50 | just publish ${{ github.ref_name }}
51 | just deploy ${{ github.ref_name }}
52 |
--------------------------------------------------------------------------------
/just/_config.just:
--------------------------------------------------------------------------------
1 | set shell := ["bash", "-uc"]
2 |
3 | [private]
4 | OS := if os() == "macos" { "apple" } else { "unknown" }
5 |
6 | [private]
7 | OS_ALT := if os() == "macos" { "darwin" } else { "linux-gnu" }
8 |
9 | [private]
10 | OS_ALT2 := if os() == "macos" { "darwin" } else { "linux" }
11 |
12 | [private]
13 | ARCH_ALT := if arch() == "x86_64" { "amd64" } else { "arm64" }
14 |
15 | [private]
16 | LOCAL_PATH := "~/.local"
17 |
18 | [private]
19 | BIN_PATH := LOCAL_PATH / "bin"
20 |
21 | [private]
22 | _DEFAULT_TAG := "dev-" + env("USER", "piper")
23 |
24 | [private]
25 | DAGGER_FLY_MODULE := "github.com/gerhard/daggerverse/flyio@flyio/v0.2.0"
26 |
27 | [private]
28 | FLY_ORG := env("FLY_ORG", "changelog")
29 |
30 | [private]
31 | FLY_APP := env("FLY_APP", "cdn-2025-02-25")
32 |
33 | [private]
34 | FLY_APP_IMAGE := env("FLY_APP_IMAGE", "ghcr.io/thechangelog/pipely")
35 |
36 | [private]
37 | FLY_APP_REGIONS := env("FLY_APP_REGIONS", "sea,sjc,lax,dfw,ord,iad,ewr,scl,lhr,cdg,ams,fra,jnb,sin,nrt,syd")
38 |
39 | [private]
40 | export PURGE_TOKEN := env("PURGE_TOKEN", "local-purge")
41 |
42 | [private]
43 | LOCAL_CONTAINER_IMAGE := env("LOCAL_CONTAINER_IMAGE", "pipely.dev:" + datetime("%Y-%m-%d"))
44 |
45 | # https://linux.101hacks.com/ps1-examples/prompt-color-using-tput/
46 |
47 | [private]
48 | _RESET := "$(tput sgr0)"
49 | [private]
50 | _GREEN := "$(tput bold)$(tput setaf 2)"
51 | [private]
52 | _MAGENTA := "$(tput bold)$(tput setaf 5)"
53 | [private]
54 | _WHITEB := "$(tput bold)$(tput setaf 7)"
55 | [private]
56 | _YELLOWB := "$(tput bold)$(tput setaf 3)"
57 | [private]
58 | _CYANB := "$(tput bold)$(tput setaf 6)"
59 | [private]
60 | _MAGENTAB := "$(tput bold)$(tput setaf 5)"
61 | [private]
62 | _GREENB := "$(tput bold)$(tput setaf 2)"
63 | [private]
64 | _BLUEB := "$(tput bold)$(tput setaf 4)"
65 |
--------------------------------------------------------------------------------
/test/acceptance/pipedream/feed.hurl:
--------------------------------------------------------------------------------
1 | # Get the changelog feed
2 | GET {{proto}}://{{host}}/podcast/feed?hurl=true
3 | HTTP 200 # expect OK response
4 | [Asserts]
5 | duration < 1000 # ensure that it loads sub 1s when cache is cold...
6 | header "cf-ray" exists # served from Cloudflare
7 | header "via" matches /[vV]arnish/ # served via Varnish
8 | header "age" exists # cache age works
9 | header "cache-status" contains "region=" # region that served this request
10 | header "cache-status" contains "origin=" # origin that served this request
11 | header "cache-status" contains "ttl=" # ttl is set
12 | header "cache-status" contains "grace=" # grace is set
13 |
14 | # Get the changelog feed AGAIN
15 | GET {{proto}}://{{host}}/podcast/feed?hurl=true
16 | [Options]
17 | delay: {{delay_ms}} # wait more than TTL so that it becomes stale
18 | HTTP 200 # expect OK response
19 | [Asserts]
20 | duration < 500 # ensure that it loads sub 500ms when cached...
21 | header "cache-status" contains "hit" # served from cache
22 | header "cache-status" contains "stale" # will need to be refreshed from origin
23 | header "age" toInt >= {{delay_s}} # has been stored in cache for MORE than TTL
24 |
25 | # Get the changelog feed ONE MORE TIME
26 | GET {{proto}}://{{host}}/podcast/feed?hurl=true
27 | [Options]
28 | delay: 5s # wait a bit so that it refreshes from origin
29 | HTTP 200 # expect OK response
30 | [Asserts]
31 | duration < 500 # ensure that it loads sub 500ms when cached...
32 | header "cache-status" contains "hit" # served from cache
33 | header "cache-status" not contains "stale" # not stale
34 | header "age" toInt <= {{delay_s}} # has been stored in cache LESS than TTL
35 |
36 | # Purge the changelog feed
37 | PURGE {{proto}}://{{host}}/podcast/feed?hurl=true
38 | Purge-Token: {{purge_token}}
39 | HTTP 200 # expect OK response
40 | [Asserts]
41 | header "x-varnish" exists # served by Varnish
42 | header "cache-status" contains "synth" # synthetic response
43 |
44 | # Get the changelog feed after PURGE
45 | GET {{proto}}://{{host}}/podcast/feed?hurl=true
46 | HTTP 200 # expect OK response
47 | [Asserts]
48 | header "via" matches /[vV]arnish/ # served via Varnish
49 | header "cache-status" contains "miss" # fresh after purge
50 |
--------------------------------------------------------------------------------
/test/vtc/client-ip.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test client-ip handling"
2 |
3 | server s1 {
4 | rxreq
5 | txresp
6 | } -start
7 |
8 | varnish v1 -vcl+backend {
9 | sub vcl_deliver {
10 | ### Figure out which is the best public IP to use
11 | # Prefer fly-client-ip header
12 | if (req.http.fly-client-ip) {
13 | set resp.http.x-client-ip = req.http.fly-client-ip;
14 | # If the above is not present, take x-forwarded-for
15 | } else if (req.http.x-forwarded-for) {
16 | set resp.http.x-client-ip = regsub(req.http.x-forwarded-for, "^([^,]+).*", "\1");
17 | # If neither are present, use the default
18 | } else {
19 | set resp.http.x-client-ip = client.ip;
20 | }
21 | }
22 | } -start
23 |
24 | # Test 1: Test fly-client-ip header
25 | client c1 {
26 | txreq -url "/" -hdr "fly-client-ip: 151.101.129.162"
27 | rxresp
28 | expect resp.status == 200
29 | expect resp.http.x-client-ip == "151.101.129.162"
30 | } -run
31 |
32 | # Test 2: fly-client-ip header is preferred when x-forwarded-for is set
33 | client c1 {
34 | txreq -url "/" -hdr "fly-client-ip: 151.101.129.162" -hdr "x-forwarded-for: 151.101.1.162, 66.241.124.108, 172.16.5.82"
35 | rxresp
36 | expect resp.status == 200
37 | expect resp.http.x-client-ip == "151.101.129.162"
38 | } -run
39 |
40 | # Test 3: uses the first ip when 3 ips are present in x-forwarded-for
41 | client c1 {
42 | txreq -url "/" -hdr "x-forwarded-for: 151.101.1.162, 66.241.124.108, 172.16.5.82"
43 | rxresp
44 | expect resp.status == 200
45 | expect resp.http.x-client-ip == "151.101.1.162"
46 | } -run
47 |
48 | # Test 4: uses the first ip when 2 ips are present in x-forwarded-for
49 | client c1 {
50 | txreq -url "/" -hdr "x-forwarded-for: 151.101.1.162, 66.241.124.108"
51 | rxresp
52 | expect resp.status == 200
53 | expect resp.http.x-client-ip == "151.101.1.162"
54 | } -run
55 |
56 | # Test 5: uses the ip in x-forwarded-for & it prefers it over the default client ip
57 | client c1 {
58 | txreq -url "/" -hdr "x-forwarded-for: 151.101.1.162"
59 | rxresp
60 | expect resp.status == 200
61 | expect resp.http.x-client-ip == "151.101.1.162"
62 | } -run
63 |
64 | # Test 6: uses the default client ip
65 | client c1 {
66 | txreq -url "/"
67 | rxresp
68 | expect resp.status == 200
69 | expect resp.http.x-client-ip == "127.0.0.1"
70 | } -run
71 |
--------------------------------------------------------------------------------
/regions.txt:
--------------------------------------------------------------------------------
1 | NAME CODE GATEWAY LAUNCH PLAN + ONLY GPUS
2 | Amsterdam, Netherlands ams ✓ ✓
3 | Ashburn, Virginia (US) iad ✓ ✓
4 | Atlanta, Georgia (US) atl
5 | Bogotá, Colombia bog
6 | Boston, Massachusetts (US) bos
7 | Bucharest, Romania otp
8 | Chicago, Illinois (US) ord ✓
9 | Dallas, Texas (US) dfw ✓
10 | Denver, Colorado (US) den
11 | Ezeiza, Argentina eze
12 | Frankfurt, Germany fra ✓ ✓
13 | Guadalajara, Mexico gdl
14 | Hong Kong, Hong Kong hkg ✓
15 | Johannesburg, South Africa jnb
16 | London, United Kingdom lhr ✓
17 | Los Angeles, California (US) lax ✓
18 | Madrid, Spain mad
19 | Miami, Florida (US) mia
20 | Montreal, Canada yul
21 | Mumbai, India bom ✓
22 | Paris, France cdg ✓
23 | Phoenix, Arizona (US) phx
24 | Querétaro, Mexico qro ✓
25 | Rio de Janeiro, Brazil gig
26 | San Jose, California (US) sjc ✓ ✓
27 | Santiago, Chile scl ✓
28 | Sao Paulo, Brazil gru
29 | Seattle, Washington (US) sea ✓
30 | Secaucus, NJ (US) ewr ✓
31 | Singapore, Singapore sin ✓
32 | Stockholm, Sweden arn
33 | Sydney, Australia syd ✓ ✓
34 | Tokyo, Japan nrt ✓
35 | Toronto, Canada yyz ✓
36 | Warsaw, Poland waw
37 |
--------------------------------------------------------------------------------
/dagger/go.mod:
--------------------------------------------------------------------------------
1 | module dagger/pipely
2 |
3 | go 1.23.2
4 |
5 | require (
6 | github.com/99designs/gqlgen v0.17.75
7 | github.com/Khan/genqlient v0.8.1
8 | github.com/vektah/gqlparser/v2 v2.5.28
9 | go.opentelemetry.io/otel v1.36.0
10 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2
11 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2
12 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0
13 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0
14 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
15 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
16 | go.opentelemetry.io/otel/log v0.12.2
17 | go.opentelemetry.io/otel/metric v1.36.0
18 | go.opentelemetry.io/otel/sdk v1.36.0
19 | go.opentelemetry.io/otel/sdk/log v0.12.2
20 | go.opentelemetry.io/otel/sdk/metric v1.36.0
21 | go.opentelemetry.io/otel/trace v1.36.0
22 | go.opentelemetry.io/proto/otlp v1.6.0
23 | golang.org/x/sync v0.15.0
24 | google.golang.org/grpc v1.73.0
25 | )
26 |
27 | require (
28 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect
29 | github.com/cenkalti/backoff/v5 v5.0.2 // indirect
30 | github.com/containerd/log v0.1.0 // indirect
31 | github.com/containerd/platforms v0.2.1
32 | github.com/go-logr/logr v1.4.2 // indirect
33 | github.com/go-logr/stdr v1.2.2 // indirect
34 | github.com/google/uuid v1.6.0 // indirect
35 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
36 | github.com/opencontainers/go-digest v1.0.0 // indirect
37 | github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
38 | github.com/sirupsen/logrus v1.9.3 // indirect
39 | github.com/sosodev/duration v1.3.1 // indirect
40 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect
41 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
42 | golang.org/x/net v0.41.0 // indirect
43 | golang.org/x/sys v0.33.0 // indirect
44 | golang.org/x/text v0.26.0 // indirect
45 | google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
46 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
47 | google.golang.org/protobuf v1.36.6 // indirect
48 | )
49 |
50 | replace go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc => go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2
51 |
52 | replace go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp => go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2
53 |
54 | replace go.opentelemetry.io/otel/log => go.opentelemetry.io/otel/log v0.12.2
55 |
56 | replace go.opentelemetry.io/otel/sdk/log => go.opentelemetry.io/otel/sdk/log v0.12.2
57 |
--------------------------------------------------------------------------------
/test/vtc/health.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test health checks for different backends"
2 |
3 | # App mock server
4 | server s1 {
5 | rxreq
6 | expect req.url == "/health"
7 | txresp -status 200 -body "App backend is healthy"
8 | } -start
9 |
10 | # Feeds mock server
11 | server s2 {
12 | rxreq
13 | expect req.url == "/health"
14 | txresp -status 200 -body "Feeds backend is healthy"
15 | } -start
16 |
17 | # Assets mock server
18 | server s3 {
19 | rxreq
20 | expect req.url == "/health"
21 | txresp -status 200 -body "Assets backend is healthy"
22 | } -start
23 |
24 | # Start Varnish with a VCL close to our final one
25 | # FWIW, we don't use dynamic directors so that we don't need to handle variable timing & delay
26 | varnish v1 -vcl {
27 | vcl 4.1;
28 |
29 | import std;
30 |
31 | backend app {
32 | .host = "${s1_addr}";
33 | .port = "${s1_port}";
34 | .host_header = "app.tld";
35 | }
36 |
37 | backend feeds {
38 | .host = "${s2_addr}";
39 | .port = "${s2_port}";
40 | .host_header = "feeds.tld";
41 | }
42 |
43 | backend assets {
44 | .host = "${s3_addr}";
45 | .port = "${s3_port}";
46 | .host_header = "assets.tld";
47 | }
48 |
49 | sub vcl_recv {
50 | if (req.url == "/health") {
51 | return(synth(204));
52 | }
53 |
54 | if (req.url == "/app_health") {
55 | set req.http.x-backend = "app";
56 | set req.url = "/health";
57 | return(pass);
58 | }
59 |
60 | if (req.url == "/feeds_health") {
61 | set req.http.x-backend = "feeds";
62 | set req.url = "/health";
63 | return(pass);
64 | }
65 |
66 | if (req.url == "/assets_health") {
67 | set req.http.x-backend = "assets";
68 | set req.url = "/health";
69 | return(pass);
70 | }
71 | }
72 |
73 | sub vcl_pass {
74 | if (req.http.x-backend == "assets") {
75 | set req.backend_hint = assets;
76 | } else if (req.http.x-backend == "feeds") {
77 | set req.backend_hint = feeds;
78 | } else {
79 | set req.backend_hint = app;
80 | }
81 | }
82 | } -start
83 |
84 | # Check Varnish health endpoint
85 | client c1 {
86 | txreq -url "/health"
87 | rxresp
88 | expect resp.status == 204
89 | } -run
90 |
91 | # Check App health endpoint
92 | client c2 {
93 | txreq -url "/app_health"
94 | rxresp
95 | expect resp.status == 200
96 | expect resp.body == "App backend is healthy"
97 | } -run
98 |
99 | # Check Feeds health endpoint
100 | client c3 {
101 | txreq -url "/feeds_health"
102 | rxresp
103 | expect resp.status == 200
104 | expect resp.body == "Feeds backend is healthy"
105 | } -run
106 |
107 | # Check Assets health endpoint
108 | client c4 {
109 | txreq -url "/assets_health"
110 | rxresp
111 | expect resp.status == 200
112 | expect resp.body == "Assets backend is healthy"
113 | } -run
114 |
--------------------------------------------------------------------------------
/test/acceptance/assets.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png
2 | Host: {{assets_host}}
3 | HTTP 200 # expect OK response
4 | Content-Type: image/png # expect PNG
5 | Access-Control-Allow-Origin: * # CORS
6 | [Asserts]
7 | header "cf-ray" exists # served by Cloudflare
8 | header "via" matches /[vV]arnish/ # served via Varnish
9 | header "age" exists # cache age works
10 |
11 | HEAD {{proto}}://{{host}}/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png
12 | Host: {{assets_host}}
13 | HTTP 200 # expect OK response
14 | Content-Type: image/png # expect PNG
15 | Access-Control-Allow-Origin: * # CORS
16 | [Asserts]
17 | header "cf-ray" exists # served by Cloudflare
18 | header "via" matches /[vV]arnish/ # served via Varnish
19 | header "age" exists # cache age works
20 |
21 | POST {{proto}}://{{host}}/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png
22 | Host: {{assets_host}}
23 | HTTP 405 # expect method not allowed
24 |
25 | GET {{proto}}://{{host}}/static/css/email-5690e09e20c0b25fefebbc5049362b39.css
26 | Host: {{assets_host}}
27 | HTTP 200 # expect OK response
28 | Content-Type: text/css # expect CSS
29 | Access-Control-Allow-Origin: * # CORS
30 | [Asserts]
31 | header "cf-ray" exists # served by Cloudflare
32 | header "via" matches /[vV]arnish/ # served via Varnish
33 | header "age" exists # cache age works
34 |
35 | GET {{proto}}://{{host}}/uploads/news/140/changelog-news-140.mp3
36 | Host: {{assets_host}}
37 | HTTP 200 # expect OK response
38 | Content-Type: audio/mpeg # expect MP3
39 | Access-Control-Allow-Origin: * # CORS
40 | [Asserts]
41 | header "cf-ray" exists # served by Cloudflare
42 | header "via" matches /[vV]arnish/ # served via Varnish
43 | header "age" exists # cache age works
44 | bytes count == 8575592 # full file size is correct
45 |
46 | # Request first mp3 chunk
47 | GET {{proto}}://{{host}}/uploads/news/140/changelog-news-140.mp3
48 | Range: bytes=0-1023
49 | Host: {{assets_host}}
50 | HTTP 206 # expect partial content response
51 | Content-Type: audio/mpeg # expect MP3
52 | [Asserts]
53 | header "cf-ray" exists # served by Cloudflare
54 | header "via" matches /[vV]arnish/ # served via Varnish
55 | header "age" exists # cache age works
56 | bytes count == 1024 # first chunk size is correct
57 |
58 | # Request middle mp3 chunk
59 | GET {{proto}}://{{host}}/uploads/news/140/changelog-news-140.mp3
60 | Range: bytes=1024-2047
61 | Host: {{assets_host}}
62 | HTTP 206 # expect partial content response
63 | Content-Type: audio/mpeg
64 | Content-Range: bytes 1024-2047/8575592
65 | [Asserts]
66 | header "cf-ray" exists # served by Cloudflare
67 | header "via" matches /[vV]arnish/ # served via Varnish
68 | header "age" exists # cache age works
69 | bytes count == 1024 # middle chunk size is correct
70 |
71 | # Request end mp3 chunk
72 | GET {{proto}}://{{host}}/uploads/news/140/changelog-news-140.mp3
73 | Range: bytes=-1024
74 | Host: {{assets_host}}
75 | HTTP 206 # expect partial content response
76 | Content-Type: audio/mpeg
77 | Content-Range: bytes 8574568-8575591/8575592
78 | [Asserts]
79 | header "cf-ray" exists # served by Cloudflare
80 | header "via" matches /[vV]arnish/ # served via Varnish
81 | header "age" exists # cache age works
82 | bytes count == 1024 # last chunk size is correct
83 |
--------------------------------------------------------------------------------
/container/justfile:
--------------------------------------------------------------------------------
1 | # vim: set tabstop=4 shiftwidth=4 expandtab:
2 |
3 | set shell := ["bash", "-uc"]
4 |
5 | [private]
6 | default:
7 | @just --list
8 |
9 | [private]
10 | fmt:
11 | just --fmt --check --unstable
12 | just --version
13 |
14 | # Start all processes
15 | up:
16 | overmind start --timeout=30 --no-port --auto-restart=all
17 |
18 | # Check $url
19 | check url="http://localhost:9000":
20 | httpstat {{ url }}
21 |
22 | # List Varnish backends
23 | backends:
24 | varnishadm backend.list
25 |
26 | # Tail Varnish backend_health
27 | health:
28 | varnishlog -g raw -i backend_health
29 |
30 | # Varnish top
31 | top:
32 | varnishtop
33 |
34 | # Varnish stat
35 | stat:
36 | varnishstat
37 |
38 | # Run VCL tests
39 | test-vtc *ARGS:
40 | varnishtest {{ ARGS }} test/vtc/*
41 |
42 | # Run acceptance tests
43 | test-acceptance-local *ARGS:
44 | hurl --test --color --continue-on-error --report-html /var/opt/hurl/test-acceptance-local \
45 | --variable proto=http \
46 | --variable host=localhost:9000 \
47 | --variable assets_host=cdn.changelog.com \
48 | --variable delay_ms=6000 \
49 | --variable delay_s=5 \
50 | --variable purge_token="{{ env("PURGE_TOKEN") }}" \
51 | {{ ARGS }} \
52 | test/acceptance/*.hurl test/acceptance/local/*.hurl test/acceptance/pipedream/*.hurl
53 |
54 | # Show Varnish cache stats
55 | cache:
56 | varnishncsa -c -f '%m %u %h %{x-cache}o %{x-cache-hits}o'
57 |
58 | [private]
59 | bench url="http://localhost:9000/" http="2" reqs="1000" conns="50":
60 | time oha -n {{ reqs }} -c {{ conns }} {{ url }} --http-version={{ http }}
61 |
62 | # Benchmark app origin
63 | bench-app-1-origin: (bench "https://changelog-2025-05-05.fly.dev/")
64 |
65 | # Benchmark app Fastly
66 | bench-app-2-fastly: (bench "https://changelog.com/" "2" "10000")
67 |
68 | # Benchmark app Bunny
69 | bench-app-3-bunny: (bench "https://bunny.changelog.com/")
70 |
71 | # Benchmark app Pipedream
72 | bench-app-4-pipedream: (bench "https://pipedream.changelog.com/" "2" "10000")
73 |
74 | # Benchmark app via local Varnish
75 | bench-app-5-local: (bench "http://localhost:9000/" "2" "10000")
76 |
77 | # Benchmark app TLS proxy
78 | bench-app-6-tls-proxy: (bench "http://localhost:5000/" "1.1")
79 |
80 | # Benchmark feed origin
81 | bench-feed-1-origin: (bench "https://feeds.changelog.place/podcast.xml")
82 |
83 | # Benchmark feed Fastly
84 | bench-feed-2-fastly: (bench "https://changelog.com/podcast/feed")
85 |
86 | # Benchmark feed Bunny CDN
87 | bench-feed-3-bunny: (bench "https://bunny.changelog.com/podcast/feed")
88 |
89 | # Benchmark feed Pipedream
90 | bench-feed-4-pipedream: (bench "https://pipedream.changelog.com/podcast/feed")
91 |
92 | # Benchmark feed via local Varnish
93 | bench-feed-5-local: (bench "http://localhost:9000/podcast/feed" "2" "10000" "50")
94 |
95 | # Benchmark feed TLS proxy
96 | bench-feed-6-tls-proxy: (bench "http://localhost:5010/podcast.xml" "1.1")
97 |
98 | # https://williamyaps.github.io/wlmjavascript/servercli.html
99 |
100 | # Speedtest Los Angeles
101 | speedtest-los-angeles:
102 | speedtest-go -s 9916
103 |
104 | # Speedtest Denver
105 | speedtest-denver:
106 | speedtest-go -s 9912
107 |
108 | # Speedtest Chicago
109 | speedtest-chicago:
110 | speedtest-go -s 11750
111 |
112 | # Speedtest Toronto
113 | speedtest-toronto:
114 | speedtest-go -s 9911
115 |
116 | # Speedtest Ashburn
117 | speedtest-ashburn:
118 | speedtest-go -s 6030
119 |
120 | # Speedtest London
121 | speedtest-london:
122 | speedtest-go -s 6032
123 |
124 | # Speedtest Paris
125 | speedtest-paris:
126 | speedtest-go -s 6027
127 |
128 | # Speedtest Amsterdam
129 | speedtest-amsterdam:
130 | speedtest-go -s 9913
131 |
132 | # Speedtest Frankfurt
133 | speedtest-frankfurt:
134 | speedtest-go -s 10010
135 |
--------------------------------------------------------------------------------
/test/vtc/assets.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test Assets backend"
2 |
3 | # App mock server
4 | server s1 {
5 | rxreq
6 | txresp -status 200 -body "App backend"
7 | } -start
8 |
9 | # Assets mock server with responses for all feed requests
10 | server s2 {
11 | # Test for HEAD /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png
12 | rxreq
13 | expect req.url == "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
14 | txresp -status 200
15 |
16 | # Test for /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png
17 | rxreq
18 | expect req.url == "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
19 | txresp -status 200 -body "podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
20 |
21 | # Test for /static/css/email-5690e09e20c0b25fefebbc5049362b39.css
22 | rxreq
23 | expect req.url == "/static/css/email-5690e09e20c0b25fefebbc5049362b39.css"
24 | txresp -status 200 -body "email-5690e09e20c0b25fefebbc5049362b39.css"
25 |
26 | # Test for /uploads/news/140/changelog-news-140.mp3
27 | rxreq
28 | expect req.url == "/uploads/news/140/changelog-news-140.mp3"
29 | txresp -status 200 -body "changelog-news-140.mp3"
30 | } -start
31 |
32 | # Start varnish with our VCL
33 | varnish v1 -vcl {
34 | vcl 4.1;
35 |
36 | import std;
37 |
38 | backend app {
39 | .host = "${s1_addr}";
40 | .port = "${s1_port}";
41 | }
42 |
43 | backend assets {
44 | .host = "${s2_addr}";
45 | .port = "${s2_port}";
46 | }
47 |
48 | sub vcl_recv {
49 | if (req.http.host == "cdn.tld") {
50 | if (req.method !~ "GET|HEAD|PURGE") {
51 | return(synth(405, "Method Not Allowed"));
52 | }
53 | set req.http.x-backend = "assets";
54 | }
55 | if (req.method == "PURGE") {
56 | return(purge);
57 | }
58 | }
59 |
60 | sub vcl_hash {
61 | if (req.http.x-backend == "assets") {
62 | set req.backend_hint = assets;
63 | } else {
64 | set req.backend_hint = app;
65 | }
66 | }
67 |
68 | sub vcl_synth {
69 | if (req.http.host == "cdn.tld"
70 | && resp.status == 405) {
71 | set resp.http.allow = "GET, HEAD, PURGE";
72 | return(deliver);
73 | }
74 | }
75 |
76 | sub vcl_deliver {
77 | if (req.http.x-backend == "assets") {
78 | set resp.http.access-control-allow-origin = "*";
79 | }
80 | }
81 |
82 | # Disable caching for testing
83 | sub vcl_backend_response {
84 | set beresp.uncacheable = true;
85 | return(deliver);
86 | }
87 | } -start
88 |
89 | # / should go to app backend
90 | client c1 {
91 | txreq -url "/"
92 | rxresp
93 | expect resp.status == 200
94 | expect resp.body == "App backend"
95 | } -run
96 |
97 | # POST /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png should not be allowed
98 | client c2 {
99 | txreq -method "POST" -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: cdn.tld"
100 | rxresp
101 | expect resp.status == 405
102 | expect resp.http.allow == "GET, HEAD, PURGE"
103 | } -run
104 |
105 | # HEAD /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png should go to assets backend
106 | client c3 {
107 | txreq -method "HEAD" -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: cdn.tld"
108 | rxresp
109 | expect resp.status == 200
110 | expect resp.http.access-control-allow-origin == "*"
111 | } -run
112 |
113 | # PURGE /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png should work
114 | client c4 {
115 | txreq -method "PURGE" -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: cdn.tld"
116 | rxresp
117 | expect resp.status == 200
118 | } -run
119 |
120 | # /static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png should go to assets backend
121 | client c5 {
122 | txreq -method "GET" -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: cdn.tld"
123 | rxresp
124 | expect resp.status == 200
125 | expect resp.body == "podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
126 | expect resp.http.access-control-allow-origin == "*"
127 | } -run
128 |
129 | # /static/css/email-5690e09e20c0b25fefebbc5049362b39.css should go to assets backend
130 | client c6 {
131 | txreq -method "GET" -url "/static/css/email-5690e09e20c0b25fefebbc5049362b39.css" -hdr "Host: cdn.tld"
132 | rxresp
133 | expect resp.status == 200
134 | expect resp.body == "email-5690e09e20c0b25fefebbc5049362b39.css"
135 | expect resp.http.access-control-allow-origin == "*"
136 | } -run
137 |
138 | # /uploads/news/140/changelog-news-140.mp3 should go to assets backend
139 | client c7 {
140 | txreq -method "GET" -url "/uploads/news/140/changelog-news-140.mp3" -hdr "Host: cdn.tld"
141 | rxresp
142 | expect resp.status == 200
143 | expect resp.body == "changelog-news-140.mp3"
144 | expect resp.http.access-control-allow-origin == "*"
145 | } -run
146 |
--------------------------------------------------------------------------------
/test/acceptance/feeds.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/podcast/feed
2 | HTTP 200 # expect OK response
3 | [Asserts]
4 | header "cf-ray" exists # served by Cloudflare
5 | header "via" matches /[vV]arnish/ # served via Varnish
6 | header "age" exists # cache age works
7 | header "content-type" contains "application/xml" # content type is XML
8 |
9 | GET {{proto}}://{{host}}/gotime/feed
10 | HTTP 200 # expect OK response
11 | [Asserts]
12 | header "cf-ray" exists # served by Cloudflare
13 | header "via" matches /[vV]arnish/ # served via Varnish
14 | header "age" exists # cache age works
15 | header "content-type" contains "application/xml" # content type is XML
16 |
17 | GET {{proto}}://{{host}}/master/feed
18 | HTTP 200 # expect OK response
19 | [Asserts]
20 | header "cf-ray" exists # served by Cloudflare
21 | header "via" matches /[vV]arnish/ # served via Varnish
22 | header "age" exists # cache age works
23 | header "content-type" contains "application/xml" # content type is XML
24 |
25 | GET {{proto}}://{{host}}/feed
26 | HTTP 200 # expect OK response
27 | [Asserts]
28 | header "cf-ray" exists # served by Cloudflare
29 | header "via" matches /[vV]arnish/ # served via Varnish
30 | header "age" exists # cache age works
31 | header "content-type" contains "application/xml" # content type is XML
32 |
33 | GET {{proto}}://{{host}}/jsparty/feed
34 | HTTP 200 # expect OK response
35 | [Asserts]
36 | header "cf-ray" exists # served by Cloudflare
37 | header "via" matches /[vV]arnish/ # served via Varnish
38 | header "age" exists # cache age works
39 | header "content-type" contains "application/xml" # content type is XML
40 |
41 | GET {{proto}}://{{host}}/shipit/feed
42 | HTTP 200 # expect OK response
43 | [Asserts]
44 | header "cf-ray" exists # served by Cloudflare
45 | header "via" matches /[vV]arnish/ # served via Varnish
46 | header "age" exists # cache age works
47 | header "content-type" contains "application/xml" # content type is XML
48 |
49 | GET {{proto}}://{{host}}/news/feed
50 | HTTP 200 # expect OK response
51 | [Asserts]
52 | header "cf-ray" exists # served by Cloudflare
53 | header "via" matches /[vV]arnish/ # served via Varnish
54 | header "age" exists # cache age works
55 | header "content-type" contains "application/xml" # content type is XML
56 |
57 | GET {{proto}}://{{host}}/brainscience/feed
58 | HTTP 200 # expect OK response
59 | [Asserts]
60 | header "cf-ray" exists # served by Cloudflare
61 | header "via" matches /[vV]arnish/ # served via Varnish
62 | header "age" exists # cache age works
63 | header "content-type" contains "application/xml" # content type is XML
64 |
65 | GET {{proto}}://{{host}}/founderstalk/feed
66 | HTTP 200 # expect OK response
67 | [Asserts]
68 | header "cf-ray" exists # served by Cloudflare
69 | header "via" matches /[vV]arnish/ # served via Varnish
70 | header "age" exists # cache age works
71 | header "content-type" contains "application/xml" # content type is XML
72 |
73 | GET {{proto}}://{{host}}/interviews/feed
74 | HTTP 200 # expect OK response
75 | [Asserts]
76 | header "cf-ray" exists # served by Cloudflare
77 | header "via" matches /[vV]arnish/ # served via Varnish
78 | header "age" exists # cache age works
79 | header "content-type" contains "application/xml" # content type is XML
80 |
81 | GET {{proto}}://{{host}}/feed/
82 | [Options]
83 | skip: true
84 | HTTP 200 # expect OK response
85 | [Asserts]
86 | header "cf-ray" exists # served by Cloudflare
87 | header "via" matches /[vV]arnish/ # served via Varnish
88 | header "age" exists # cache age works
89 | header "content-type" contains "application/xml" # content type is XML
90 |
91 | GET {{proto}}://{{host}}/rfc/feed
92 | HTTP 200 # expect OK response
93 | [Asserts]
94 | header "cf-ray" exists # served by Cloudflare
95 | header "via" matches /[vV]arnish/ # served via Varnish
96 | header "age" exists # cache age works
97 | header "content-type" contains "application/xml" # content type is XML
98 |
99 | GET {{proto}}://{{host}}/spotlight/feed
100 | HTTP 200 # expect OK response
101 | [Asserts]
102 | header "cf-ray" exists # served by Cloudflare
103 | header "via" matches /[vV]arnish/ # served via Varnish
104 | header "age" exists # cache age works
105 | header "content-type" contains "application/xml" # content type is XML
106 |
107 | GET {{proto}}://{{host}}/afk/feed
108 | HTTP 200 # expect OK response
109 | [Asserts]
110 | header "cf-ray" exists # served by Cloudflare
111 | header "via" matches /[vV]arnish/ # served via Varnish
112 | header "age" exists # cache age works
113 | header "content-type" contains "application/xml" # content type is XML
114 |
115 | GET {{proto}}://{{host}}/posts/feed
116 | HTTP 200 # expect OK response
117 | [Asserts]
118 | header "cf-ray" exists # served by Cloudflare
119 | header "via" matches /[vV]arnish/ # served via Varnish
120 | header "age" exists # cache age works
121 | header "content-type" contains "application/xml" # content type is XML
122 |
123 | GET {{proto}}://{{host}}/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed
124 | HTTP 200 # expect OK response
125 | [Asserts]
126 | header "cf-ray" exists # served by Cloudflare
127 | header "via" matches /[vV]arnish/ # served via Varnish
128 | header "age" exists # cache age works
129 | header "content-type" contains "application/xml" # content type is XML
130 |
--------------------------------------------------------------------------------
/test/vtc/cache-status.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test cache-status header"
2 |
3 | # App mock server
4 | server s1 {
5 | rxreq
6 | expect req.url == "/health"
7 | txresp -status 200 -body "App OK"
8 |
9 | rxreq
10 | expect req.url == "/"
11 | txresp -status 200 -body "App homepage"
12 | } -start
13 |
14 | # Feeds mock server
15 | server s2 {
16 | rxreq
17 | expect req.url == "/health"
18 | txresp -status 200 -body "Feeds OK"
19 |
20 | rxreq
21 | expect req.url == "/podcast.xml"
22 | txresp -status 200 -body "podcast.xml"
23 | } -start
24 |
25 | # Assets mock server
26 | server s3 {
27 | rxreq
28 | expect req.url == "/health"
29 | txresp -status 200 -body "Assets OK"
30 |
31 | rxreq
32 | expect req.url == "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
33 | txresp -status 200 -body "podcast-original-f16d0363067166f241d080ee2e2d4a28.png"
34 | } -start
35 |
36 | # Start Varnish with a VCL close to our final one
37 | # FWIW, we don't use dynamic directors so that we don't need to handle variable timing & delay
38 | varnish v1 -vcl {
39 | vcl 4.1;
40 |
41 | import std;
42 | import var;
43 |
44 | backend app {
45 | .host = "${s1_addr}";
46 | .port = "${s1_port}";
47 | }
48 |
49 | backend feeds {
50 | .host = "${s2_addr}";
51 | .port = "${s2_port}";
52 | }
53 |
54 | backend assets {
55 | .host = "${s3_addr}";
56 | .port = "${s3_port}";
57 | }
58 |
59 | sub vcl_recv {
60 | if (req.url == "/varnish_health") {
61 | return(synth(204));
62 | }
63 |
64 | if (req.http.host == "assets.tld") {
65 | set req.http.x-backend = "assets";
66 | return(hash);
67 | }
68 |
69 | if (req.url == "/podcast/feed") {
70 | set req.http.x-backend = "feeds";
71 | set req.url = "/podcast.xml";
72 | return(hash);
73 | }
74 |
75 | if (req.url == "/app_health") {
76 | set req.http.x-backend = "app";
77 | set req.url = "/health";
78 | return(pass);
79 | }
80 |
81 | if (req.url == "/feeds_health") {
82 | set req.http.x-backend = "feeds";
83 | set req.url = "/health";
84 | return(pass);
85 | }
86 |
87 | if (req.url == "/assets_health") {
88 | set req.http.x-backend = "assets";
89 | set req.url = "/health";
90 | return(pass);
91 | }
92 | }
93 |
94 | sub vcl_synth {
95 | # Which region is serving this request?
96 | var.set("region", std.getenv("FLY_REGION"));
97 | if (var.get("region") == "") {
98 | var.set("region", "LOCAL");
99 | }
100 | set resp.http.cache-status = "region=" + var.get("region") + "; synth";
101 | }
102 |
103 | sub vcl_hash {
104 | if (req.http.x-backend == "assets") {
105 | set req.backend_hint = assets;
106 | set req.http.x-backend-fqdn = "assets.tld";
107 | } else if (req.http.x-backend == "feeds") {
108 | set req.backend_hint = feeds;
109 | set req.http.x-backend-fqdn = "feeds.tld";
110 | } else {
111 | set req.backend_hint = app;
112 | set req.http.x-backend-fqdn = "app.tld";
113 | }
114 | }
115 |
116 | sub vcl_pass {
117 | # Bypass caching
118 | set req.http.x-bypass = "true";
119 | if (req.http.x-backend == "assets") {
120 | set req.backend_hint = assets;
121 | set req.http.x-backend-fqdn = "assets.tld";
122 | } else if (req.http.x-backend == "feeds") {
123 | set req.backend_hint = feeds;
124 | set req.http.x-backend-fqdn = "feeds.tld";
125 | } else {
126 | set req.backend_hint = app;
127 | set req.http.x-backend-fqdn = "app.tld";
128 | }
129 | }
130 |
131 | sub vcl_deliver {
132 | # Which region is serving this request?
133 | var.set("region", std.getenv("FLY_REGION"));
134 | if (var.get("region") == "") {
135 | var.set("region", "LOCAL");
136 | }
137 | set resp.http.cache-status = "region=" + var.get("region");
138 |
139 | # Which origin is serving this request?
140 | set resp.http.cache-status = resp.http.cache-status + "; origin=" + req.backend_hint + "," + req.http.x-backend-fqdn;
141 | unset req.http.x-backend-fqdn;
142 |
143 | if (req.http.x-bypass == "true") {
144 | set resp.http.cache-status = resp.http.cache-status + "; bypass";
145 | return(deliver);
146 | }
147 |
148 | # What is the remaining TTL for this object?
149 | set resp.http.cache-status = resp.http.cache-status + "; ttl=" + obj.ttl;
150 | # What is the max object staleness permitted?
151 | set resp.http.cache-status = resp.http.cache-status + "; grace=" + obj.grace;
152 |
153 | # Did the response come from Varnish or from the backend?
154 | if (obj.hits > 0) {
155 | set resp.http.cache-status = resp.http.cache-status + "; hit";
156 | } else {
157 | set resp.http.cache-status = resp.http.cache-status + "; miss";
158 | }
159 |
160 | # Is this object stale?
161 | if (obj.hits > 0 && obj.ttl < std.duration(integer=0)) {
162 | set resp.http.cache-status = resp.http.cache-status + "; stale";
163 | }
164 |
165 | # How many times has this response been served from Varnish?
166 | set resp.http.cache-status = resp.http.cache-status + "; hits=" + obj.hits;
167 | }
168 | } -start
169 |
170 | # Varnish
171 | client c1 {
172 | txreq -url "/varnish_health"
173 | rxresp
174 | expect resp.status == 204
175 | expect resp.http.cache-status == "region=LOCAL; synth"
176 | } -run
177 |
178 | # App health
179 | client c2 {
180 | txreq -url "/app_health"
181 | rxresp
182 | expect resp.status == 200
183 | expect resp.http.cache-status == "region=LOCAL; origin=app,app.tld; bypass"
184 | } -run
185 |
186 | # App homepage uncached
187 | client c3 {
188 | txreq -url "/"
189 | rxresp
190 | expect resp.status == 200
191 | expect resp.http.cache-status == "region=LOCAL; origin=app,app.tld; ttl=120.000; grace=10.000; miss; hits=0"
192 | } -run
193 |
194 | # App homepage cached
195 | client c4 {
196 | txreq -url "/"
197 | rxresp
198 | expect resp.status == 200
199 | expect resp.http.cache-status ~ "region=LOCAL; origin=app,app.tld; ttl=1.+; grace=10.000; hit; hits=1"
200 | } -run
201 |
202 | # Feeds health
203 | client c5 {
204 | txreq -url "/feeds_health"
205 | rxresp
206 | expect resp.status == 200
207 | expect resp.http.cache-status == "region=LOCAL; origin=feeds,feeds.tld; bypass"
208 | } -run
209 |
210 | # Feeds uncached
211 | client c6 {
212 | txreq -url "/podcast/feed"
213 | rxresp
214 | expect resp.status == 200
215 | expect resp.http.cache-status == "region=LOCAL; origin=feeds,feeds.tld; ttl=120.000; grace=10.000; miss; hits=0"
216 | } -run
217 |
218 | # Feeds cached
219 | client c7 {
220 | txreq -url "/podcast/feed"
221 | rxresp
222 | expect resp.status == 200
223 | expect resp.http.cache-status ~ "region=LOCAL; origin=feeds,feeds.tld; ttl=1.+; grace=10.000; hit; hits=1"
224 | } -run
225 |
226 | # Assets health
227 | client c9 {
228 | txreq -url "/assets_health"
229 | rxresp
230 | expect resp.status == 200
231 | expect resp.http.cache-status == "region=LOCAL; origin=assets,assets.tld; bypass"
232 | } -run
233 |
234 | # Assets uncached
235 | client c10 {
236 | txreq -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: assets.tld"
237 | rxresp
238 | expect resp.status == 200
239 | expect resp.http.cache-status == "region=LOCAL; origin=assets,assets.tld; ttl=120.000; grace=10.000; miss; hits=0"
240 | } -run
241 |
242 | # Assets cached
243 | client c11 {
244 | txreq -url "/static/images/podcasts/podcast-original-f16d0363067166f241d080ee2e2d4a28.png" -hdr "Host: assets.tld"
245 | rxresp
246 | expect resp.status == 200
247 | expect resp.http.cache-status ~ "region=LOCAL; origin=assets,assets.tld; ttl=1.+; grace=10.000; hit; hits=1"
248 | } -run
249 |
--------------------------------------------------------------------------------
/varnish/vcl/news-mp3.vcl:
--------------------------------------------------------------------------------
1 | sub vcl_recv {
2 | # Remove x-redirect header from client requests to prevent header injection
3 | unset req.http.x-redirect;
4 |
5 | if (req.url ~ "^/uploads/podcast/news-2022-06-27/the-changelog-news-2022-06-27.mp3($|\?)") {
6 | set req.http.x-redirect = "/uploads/news/1/changelog-news-1.mp3";
7 | } else if (req.url ~ "^/uploads/podcast/news-2022-07-04/the-changelog-news-2022-07-04.mp3($|\?)") {
8 | set req.http.x-redirect = "/uploads/news/2/changelog-news-2.mp3";
9 | } else if (req.url ~ "^/uploads/podcast/news-2022-07-11/the-changelog-news-2022-07-11.mp3($|\?)") {
10 | set req.http.x-redirect = "/uploads/news/3/changelog-news-3.mp3";
11 | } else if (req.url ~ "^/uploads/podcast/news-2022-07-18/the-changelog-news-2022-07-18.mp3($|\?)") {
12 | set req.http.x-redirect = "/uploads/news/4/changelog-news-4.mp3";
13 | } else if (req.url ~ "^/uploads/podcast/news-2022-07-25/the-changelog-news-2022-07-25.mp3($|\?)") {
14 | set req.http.x-redirect = "/uploads/news/5/changelog-news-5.mp3";
15 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-01/the-changelog-news-2022-08-01.mp3($|\?)") {
16 | set req.http.x-redirect = "/uploads/news/6/changelog-news-6.mp3";
17 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-08/the-changelog-news-2022-08-08.mp3($|\?)") {
18 | set req.http.x-redirect = "/uploads/news/7/changelog-news-7.mp3";
19 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-15/the-changelog-news-2022-08-15.mp3($|\?)") {
20 | set req.http.x-redirect = "/uploads/news/8/changelog-news-8.mp3";
21 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-22/the-changelog-news-2022-08-22.mp3($|\?)") {
22 | set req.http.x-redirect = "/uploads/news/9/changelog-news-9.mp3";
23 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-22/the-changelog-news-2022-08-22-j2g.mp3($|\?)") {
24 | set req.http.x-redirect = "/uploads/news/9/changelog-news-9-j2g.mp3";
25 | } else if (req.url ~ "^/uploads/podcast/news-2022-08-29/the-changelog-news-2022-08-29.mp3($|\?)") {
26 | set req.http.x-redirect = "/uploads/news/10/changelog-news-10.mp3";
27 | } else if (req.url ~ "^/uploads/podcast/news-2022-09-05/the-changelog-news-2022-09-05.mp3($|\?)") {
28 | set req.http.x-redirect = "/uploads/news/11/changelog-news-11.mp3";
29 | } else if (req.url ~ "^/uploads/podcast/news-2022-09-12/the-changelog-news-2022-09-12.mp3($|\?)") {
30 | set req.http.x-redirect = "/uploads/news/12/changelog-news-12.mp3";
31 | } else if (req.url ~ "^/uploads/podcast/news-2022-09-19/the-changelog-news-2022-09-19.mp3($|\?)") {
32 | set req.http.x-redirect = "/uploads/news/13/changelog-news-13.mp3";
33 | } else if (req.url ~ "^/uploads/podcast/news-2022-09-26/the-changelog-news-2022-09-26.mp3($|\?)") {
34 | set req.http.x-redirect = "/uploads/news/14/changelog-news-14.mp3";
35 | } else if (req.url ~ "^/uploads/podcast/news-2022-10-03/the-changelog-news-2022-10-03.mp3($|\?)") {
36 | set req.http.x-redirect = "/uploads/news/15/changelog-news-15.mp3";
37 | } else if (req.url ~ "^/uploads/podcast/news-2022-10-10/the-changelog-news-2022-10-10.mp3($|\?)") {
38 | set req.http.x-redirect = "/uploads/news/16/changelog-news-16.mp3";
39 | } else if (req.url ~ "^/uploads/podcast/news-2022-10-17/the-changelog-news-2022-10-17.mp3($|\?)") {
40 | set req.http.x-redirect = "/uploads/news/17/changelog-news-17.mp3";
41 | } else if (req.url ~ "^/uploads/podcast/news-2022-10-24/the-changelog-news-2022-10-24.mp3($|\?)") {
42 | set req.http.x-redirect = "/uploads/news/18/changelog-news-18.mp3";
43 | } else if (req.url ~ "^/uploads/podcast/news-2022-11-07/the-changelog-news-2022-11-07.mp3($|\?)") {
44 | set req.http.x-redirect = "/uploads/news/19/changelog-news-19.mp3";
45 | } else if (req.url ~ "^/uploads/podcast/news-2022-11-14/the-changelog-news-2022-11-14.mp3($|\?)") {
46 | set req.http.x-redirect = "/uploads/news/20/changelog-news-20.mp3";
47 | } else if (req.url ~ "^/uploads/podcast/news-2022-11-21/the-changelog-news-2022-11-21.mp3($|\?)") {
48 | set req.http.x-redirect = "/uploads/news/21/changelog-news-21.mp3";
49 | } else if (req.url ~ "^/uploads/podcast/news-2022-11-28/the-changelog-news-2022-11-28.mp3($|\?)") {
50 | set req.http.x-redirect = "/uploads/news/22/changelog-news-22.mp3";
51 | } else if (req.url ~ "^/uploads/podcast/news-2022-12-05/the-changelog-news-2022-12-05.mp3($|\?)") {
52 | set req.http.x-redirect = "/uploads/news/23/changelog-news-23.mp3";
53 | } else if (req.url ~ "^/uploads/podcast/news-2022-12-12/the-changelog-news-2022-12-12.mp3($|\?)") {
54 | set req.http.x-redirect = "/uploads/news/24/changelog-news-24.mp3";
55 | } else if (req.url ~ "^/uploads/podcast/news-2023-01-02/the-changelog-news-2023-01-02.mp3($|\?)") {
56 | set req.http.x-redirect = "/uploads/news/25/changelog-news-25.mp3";
57 | } else if (req.url ~ "^/uploads/podcast/news-2023-01-09/the-changelog-news-2023-01-09.mp3($|\?)") {
58 | set req.http.x-redirect = "/uploads/news/26/changelog-news-26.mp3";
59 | } else if (req.url ~ "^/uploads/podcast/news-2023-01-16/the-changelog-news-2023-01-16.mp3($|\?)") {
60 | set req.http.x-redirect = "/uploads/news/27/changelog-news-27.mp3";
61 | } else if (req.url ~ "^/uploads/podcast/news-2023-01-23/the-changelog-news-2023-01-23.mp3($|\?)") {
62 | set req.http.x-redirect = "/uploads/news/28/changelog-news-28.mp3";
63 | } else if (req.url ~ "^/uploads/podcast/news-2023-01-30/the-changelog-news-2023-01-30.mp3($|\?)") {
64 | set req.http.x-redirect = "/uploads/news/29/changelog-news-29.mp3";
65 | } else if (req.url ~ "^/uploads/podcast/news-2023-02-06/the-changelog-news-2023-02-06.mp3($|\?)") {
66 | set req.http.x-redirect = "/uploads/news/30/changelog-news-30.mp3";
67 | } else if (req.url ~ "^/uploads/podcast/news-2023-02-13/the-changelog-news-2023-02-13.mp3($|\?)") {
68 | set req.http.x-redirect = "/uploads/news/31/changelog-news-31.mp3";
69 | } else if (req.url ~ "^/uploads/podcast/news-2023-02-20/the-changelog-news-2023-02-20.mp3($|\?)") {
70 | set req.http.x-redirect = "/uploads/news/32/changelog-news-32.mp3";
71 | } else if (req.url ~ "^/uploads/podcast/news-2023-02-20/the-changelog-news-2023-02-20-p883.mp3($|\?)") {
72 | set req.http.x-redirect = "/uploads/news/32/changelog-news-32p883.mp3";
73 | } else if (req.url ~ "^/uploads/podcast/news-2023-02-27/the-changelog-news-2023-02-27.mp3($|\?)") {
74 | set req.http.x-redirect = "/uploads/news/33/changelog-news-33.mp3";
75 | } else if (req.url ~ "^/uploads/podcast/news-2023-03-06/the-changelog-news-2023-03-06.mp3($|\?)") {
76 | set req.http.x-redirect = "/uploads/news/34/changelog-news-34.mp3";
77 | } else if (req.url ~ "^/uploads/podcast/news-2023-03-06/the-changelog-news-2023-03-06-XXXL.mp3($|\?)") {
78 | set req.http.x-redirect = "/uploads/news/34/changelog-news-34-XXXL.mp3";
79 | } else if (req.url ~ "^/uploads/podcast/news-2023-03-13/the-changelog-news-2023-03-13.mp3($|\?)") {
80 | set req.http.x-redirect = "/uploads/news/35/changelog-news-35.mp3";
81 | } else if (req.url ~ "^/uploads/podcast/news-2023-03-20/the-changelog-news-2023-03-20.mp3($|\?)") {
82 | set req.http.x-redirect = "/uploads/news/36/changelog-news-36.mp3";
83 | } else if (req.url ~ "^/uploads/podcast/news-2023-03-27/the-changelog-news-2023-03-27.mp3($|\?)") {
84 | set req.http.x-redirect = "/uploads/news/37/changelog-news-37.mp3";
85 | } else if (req.url ~ "^/uploads/podcast/news-2023-04-03/the-changelog-news-2023-04-03.mp3($|\?)") {
86 | set req.http.x-redirect = "/uploads/news/38/changelog-news-38.mp3";
87 | }
88 |
89 | if (req.http.x-redirect) {
90 | return (synth(308, "Permanent Redirect"));
91 | }
92 | }
93 |
94 | sub vcl_synth {
95 | if (req.http.x-redirect
96 | && resp.status == 308) {
97 | set resp.http.location = "https://" + req.http.host + req.http.x-redirect;
98 |
99 | # If a query string exists, append it to the new path
100 | if (req.url ~ "\?.+") {
101 | set resp.http.location += regsub(req.url, "^[^?]*", "");
102 | }
103 |
104 | return (deliver);
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/docs/local_dev.md:
--------------------------------------------------------------------------------
1 | # Local Development and Testing
2 |
3 | You can start up a local instance of Pipely by running `just local-debug`. Once the container is built it will be started up with the name `pipely-debug` and you will be in a shell. From there, you have all the tools you need to run and experiment.
4 |
5 | ## Available Tools
6 |
7 | The following tools are available inside the container:
8 |
9 | - hurl - HTTP testing tool
10 | - httpstat - HTTP request statistics
11 | - htop - Process monitor
12 | - gotop - System monitor
13 | - oha - HTTP load testing
14 | - jq - JSON processor
15 | - neovim - Text editor
16 | - varnish tools - varnishadm, varnishlog, varnishtop, varnishstat
17 | - sasqwatch - Varnish log analysis
18 | - just - Task runner (available via container.justfile)
19 |
20 | ## Available Commands
21 |
22 | Like the project, the development container has its own `just` file to run several useful operations. Simply type `just` to view all your options.
23 |
24 | ## Running the Server
25 |
26 | To work effectively on the container, you're going to want to start up tmux. This will allow you to run the server in one window, other commands in other windows, and to switch between them quickly and easily.
27 |
28 | If you're not familiar with tmux, I highly recommend taking a quick tutorial, but to jump right in:
29 |
30 | - Get into the local container with `just local-debug`.
31 | - type `tmux`.
32 | - Start the server with `just up`.
33 | - Create a new window by pressing `ctrl-b` followed by `c`.
34 | - Try fetching the front page from the locally running server with `curl http://localhost:9000`.
35 | - Run a benchmark test such as `just bench-app-4-pipedream`.
36 | - Switch back to the other window and check out the live logs feed with `ctrl-b b`.
37 | - Quit the server with `ctrl-c`.
38 | - Close your two tmux windows with `exit` and `exit`.
39 | - Close your container prompt with `exit` again.
40 |
41 | ## Architecture
42 |
43 | ```
44 | [localhost:9000]
45 | ↓
46 | [Varnish Cache] ← health checks backends
47 | ↓
48 | [Dynamic Backend Selection]
49 | ↓
50 | ┌─────────────────┬─────────────────┬──────────────────┐
51 | │ App Proxy │ Feeds Proxy │ Assets Proxy │
52 | │ (localhost:5000)│ (localhost:5010)│ (localhost:5020) │
53 | │ ↓ │ ↓ │ ↓ │
54 | │ TLS Terminator │ TLS Terminator │ TLS Terminator │
55 | │ ↓ │ ↓ │ ↓ │
56 | │ External App │ External Feeds │ External Assets │
57 | └─────────────────┴─────────────────┴──────────────────┘
58 | ```
59 |
60 | ## Troubleshooting and Misc
61 |
62 | The `/justfile` contains the commands listed when you run `just` from the host. There is a separate `/container/justfile` that is used for the commands available inside the application container when you run the `just` command.
63 |
64 | From the shell, to see what command a just recipie would call use the `-n` flag with the `just` command. The `-n` or `--dry-run` option will print the command without running it.
65 | ```bash
66 | just -n cache
67 | just -n local-run
68 | ```
69 |
70 | The Pipely application can be launched in one shell
71 |
72 | ```bash
73 | just local-run
74 | ```
75 |
76 | Then from a separate shell on the host, establish a shell into the nested container where the Pipely application is running.
77 |
78 | ```bash
79 | # Exec into nested container (broken down)
80 | docker_container_name="$(docker ps --format json | jq --slurp -r '[.[] | select((.Command | contains("dagger")) and (.Image | contains("dagger")) and (.Names | contains("dagger")))][0].Names')"
81 | nested_containers="$(docker exec "${docker_container_name}" runc list -f json)"
82 | nested_container_id="$(echo -E "${nested_containers}" | jq -r '[.[] | select(.status=="running" and (.bundle | contains("dagger/worker/executor")))][0].id')"
83 | docker exec -it "${docker_container_name}" runc exec -t "${nested_container_id}" bash
84 |
85 | # Crazy One-Liner to shell into nested container
86 | docker exec -it "$(docker ps --format json | jq --slurp -r '[.[] | select((.Command | contains("dagger")) and (.Image | contains("dagger")) and (.Names | contains("dagger")))][0].Names')" runc exec -t "$(echo -E "$(docker exec "$(docker ps --format json | jq --slurp -r '[.[] | select((.Command | contains("dagger")) and (.Image | contains("dagger")) and (.Names | contains("dagger")))][0].Names')" runc list -f json)" | jq -r '[.[] | select(.status=="running" and (.bundle | contains("dagger/worker/executor")))][0].id')" bash
87 | ```
88 |
89 | From within the application container additional tools can be used to diagnose and troubleshoot the environment
90 |
91 | ```bash
92 | # Monitor the full details of all the varnish events
93 | varnishlog
94 |
95 | # Monitoring vmod-dynamic with varnishlog
96 | # This will show you the DNS resolution that is occurring when the vmod is trying
97 | # to dynamically resolve the domain for the backends. If the varnish config has an
98 | # acl for only allowing IPv6 or IPv4 addresses, you will see errors when it gets a
99 | # response from the dns query that is not part of the acl.
100 | varnishlog -g raw -q '* ~ vmod-dynamic'
101 |
102 | # Tail Varnish backend_health
103 | varnishlog -g raw -i backend_health
104 | # or use the recipie provided in the container's justfile
105 | just health
106 |
107 | # review the processes within the application container
108 | ps -eo user,pid,ppid,%cpu,%mem,stat,start,time,cmd --forest
109 | ```
110 |
111 | ## Max Mind GeoIP Database for Log Enrichment
112 |
113 | If you want to work with the geoip data used by vector to enrich log data, you can get a license key from maxmind in order to download the geoip database files.
114 |
115 | https://dev.maxmind.com/geoip/geolite2-free-geolocation-data/#sign-up-for-a-maxmind-account-to-get-geolite
116 | https://www.maxmind.com/en/geolite-free-ip-geolocation-data
117 | My Account -> Manage License Keys
118 |
119 | When some `just` commands call dagger they may pass in an option with the location of maxmind auth credentials to be retrieved from a 1Password vault `op://pipely/maxmind/credential` where secrets can be managed securely.
120 |
121 | justfile
122 | ```justfile
123 | # Debug production container locally - assumes envrc-secrets has already run
124 | [group('team')]
125 | local-production-debug:
126 | @PURGE_TOKEN="local-production" \
127 | just dagger call --beresp-ttl=5s \
128 | --honeycomb-dataset=pipely-dev --honeycomb-api-key=op://pipely/honeycomb/credential \
129 | --max-mind-auth=op://pipely/maxmind/credential \
130 | --purge-token=env:PURGE_TOKEN \
131 | local-production terminal --cmd=bash
132 | ```
133 |
134 | There is a section of the dagger code that downloads the maxmind database using a license key.
135 |
136 | dagger/main.go
137 | ```go
138 | geoLite2CityArchive := dag.HTTP("https://download.maxmind.com/geoip/databases/GeoLite2-City/download?suffix=tar.gz", dagger.HTTPOpts{
139 | AuthHeader: maxMindAuth,
140 | })
141 | ```
142 |
143 | Dagger passes AuthHeader details to the URL so that it can authenticate with basic user credentials. The value kept in 1Password is expected to be in a raw header format.
144 | https://github.com/dagger/dagger/blob/18701532b7a268ba42b542dff0fed0ce3db21419/core/schema/http.go#L84
145 |
146 | The raw authorization header for basic authentication is a base64 encoded value that contains the username separated by a `:` and then the password.
147 |
148 | ```bash
149 | curl -v -u YOUR_ACCOUNT_ID:YOUR_LICENSE_KEY https://download.maxmind.com/ 2>&1 | grep Authorization
150 | # > Authorization: Basic
151 | echo "" | base64 -d
152 | ```
153 |
154 | To generate the value that needs to be stored in 1Password it should include `Basic ` followed by the base64 encoded credentials.
155 | ```bash
156 | echo "Basic $(echo -n "YOUR_ACCOUNT_ID:YOUR_LICENSE_KEY" | base64)"
157 | ```
158 |
159 | This value can be put into 1Password at `op://pipely/maxmind/credential` which relates to a `pipely` vault, and an item named `maxmind` with a custom password field named `credential`.
160 |
161 | ## Honeycomb Credentials
162 |
163 | It is possible to create an entry in 1Password to satisfy some dagger calls attempting to pull the secret for `op://pipely/honeycomb/credential` even if the credential is invalid.
164 |
--------------------------------------------------------------------------------
/justfile:
--------------------------------------------------------------------------------
1 | # vim: set tabstop=4 shiftwidth=4 expandtab:
2 |
3 | import 'just/_config.just'
4 | import 'just/hurl.just'
5 | import 'just/dagger.just'
6 | import 'just/op.just'
7 |
8 | [private]
9 | default:
10 | @just --list
11 |
12 | [private]
13 | fmt:
14 | just --fmt --check --unstable
15 | just --version
16 |
17 | # Debug container locally
18 | local-debug:
19 | @just dagger call \
20 | --beresp-ttl=5s \
21 | --purge-token=env:PURGE_TOKEN \
22 | local-production terminal --cmd=bash
23 |
24 | # Run container locally: available on http://localhost:9000
25 | local-run:
26 | @just dagger call \
27 | --beresp-ttl=5s \
28 | --purge-token=env:PURGE_TOKEN \
29 | local-production as-service --use-entrypoint=true up
30 |
31 | # Run container in Docker (works on remote servers too): http://:9000
32 | docker-run *ARGS:
33 | @just dagger call \
34 | --beresp-ttl=5s \
35 | --purge-token=env:PURGE_TOKEN \
36 | local-production export --path=tmp/{{ LOCAL_CONTAINER_IMAGE }}
37 | @docker rm --force pipely.dev
38 | @docker tag $(docker load --input=tmp/{{ LOCAL_CONTAINER_IMAGE }} | awk -F: '{ print $3 }') {{ LOCAL_CONTAINER_IMAGE }}
39 | @docker run --detach --publish 9000:9000 --env PURGE_TOKEN=$PURGE_TOKEN --name pipely.dev {{ LOCAL_CONTAINER_IMAGE }}
40 | @docker container ls --filter name="pipely.dev" --format=json --no-trunc | jq .
41 | @rm -f tmp/{{ LOCAL_CONTAINER_IMAGE }}
42 |
43 | # Test VTC + acceptance locally
44 | test: test-vtc test-acceptance-local
45 |
46 | # Test VCL config
47 | test-vtc:
48 | @just dagger call test-varnish stdout
49 |
50 | # Test local setup
51 | test-acceptance-local:
52 | @just dagger call \
53 | --beresp-ttl=5s \
54 | --purge-token=env:PURGE_TOKEN \
55 | test-acceptance-report export \
56 | --path=./tmp/test-acceptance-local
57 |
58 | # Test NEW production - Pipedream, the Changelog variant of Pipely
59 | [group('team')]
60 | test-acceptance-pipedream *ARGS:
61 | HURL_purge_token="op://pipely/purge/credential" \
62 | just op run -- \
63 | just hurl --test --color --report-html tmp/test-acceptance-pipedream --continue-on-error \
64 | --variable proto=https \
65 | --variable host=changelog.com \
66 | --resolve changelog.com:443:137.66.2.20 \
67 | --variable assets_host=cdn.changelog.com \
68 | --resolve cdn.changelog.com:443:137.66.2.20 \
69 | --variable delay_ms=65000 \
70 | --variable delay_s=60 \
71 | {{ ARGS }} \
72 | test/acceptance/*.hurl test/acceptance/pipedream/*.hurl
73 |
74 | # Test CURRENT production
75 | test-acceptance-fastly *ARGS:
76 | @just hurl --test --color --report-html tmp/test-acceptance-fastly --continue-on-error \
77 | --variable proto=https \
78 | --variable host=changelog.com \
79 | --resolve changelog.com:443:151.101.129.162 \
80 | --variable assets_host=cdn.changelog.com \
81 | --resolve cdn.changelog.com:443:151.101.129.162 \
82 | {{ ARGS }} \
83 | test/acceptance/*.hurl test/acceptance/fastly/*.hurl
84 |
85 | # Open test reports
86 | test-reports:
87 | open tmp/*/index.html
88 |
89 | # Clear test reports
90 | test-reports-rm:
91 | rm -fr tmp/*
92 |
93 | # Debug production container locally - assumes envrc-secrets has already run
94 | [group('team')]
95 | local-debug-production:
96 | @PURGE_TOKEN="local-production" \
97 | just dagger call --beresp-ttl=5s \
98 | --honeycomb-dataset=${HONEYCOMB_DATASET} \
99 | --honeycomb-api-key=env:HONEYCOMB_API_KEY \
100 | --max-mind-auth=env:MAXMIND_AUTH \
101 | --purge-token=env:PURGE_TOKEN \
102 | --aws-region=${AWS_REGION} \
103 | --aws-local-production-s3-bucket-suffix=${AWS_S3_BUCKET_SUFFIX} \
104 | --aws-access-key-id=env:AWS_ACCESS_KEY_ID \
105 | --aws-secret-access-key=env:AWS_SECRET_ACCESS_KEY \
106 | local-production terminal --cmd=bash
107 |
108 | # Run production container locally - assumes envrc-secrets has already run - available on http://localhost:9000
109 | [group('team')]
110 | local-run-production:
111 | @PURGE_TOKEN="local-production" \
112 | just dagger call --beresp-ttl=5s \
113 | --honeycomb-dataset=${HONEYCOMB_DATASET} \
114 | --honeycomb-api-key=env:HONEYCOMB_API_KEY \
115 | --max-mind-auth=env:MAXMIND_AUTH \
116 | --purge-token=env:PURGE_TOKEN \
117 | --aws-region=${AWS_REGION} \
118 | --aws-local-production-s3-bucket-suffix=${AWS_S3_BUCKET_SUFFIX} \
119 | --aws-access-key-id=env:AWS_ACCESS_KEY_ID \
120 | --aws-secret-access-key=env:AWS_SECRET_ACCESS_KEY \
121 | local-production as-service --use-entrypoint=true up
122 |
123 | # Observe all HTTP timings - https://blog.cloudflare.com/a-question-of-timing
124 | http-profile url="https://pipedream.changelog.com/":
125 | @while sleep 1; do \
126 | curl -sL -o /dev/null \
127 | --write-out "%{url} http:%{http_version} status:%{http_code} {{ _WHITEB }}ip:%{remote_ip}{{ _RESET }} {{ _CYANB }}dns:%{time_namelookup}s{{ _RESET }} {{ _YELLOWB }}tcp:%{time_connect}s{{ _RESET }} {{ _MAGENTAB }}tls:%{time_appconnect}s{{ _RESET }} {{ _GREENB }}wait:%{time_starttransfer}s{{ _RESET }} {{ _BLUEB }}total:%{time_total}s{{ _RESET }}\n" \
128 | "{{ url }}"; \
129 | done
130 |
131 | # How many lines of Varnish config?
132 | how-many-lines:
133 | rg -c '' varnish/*.vcl
134 |
135 | # How many lines of Varnish config?
136 | how-many-lines-raw:
137 | rg -cv '^.*#|^\$' varnish/*.vcl
138 |
139 | # Publish container image - assumes envrc-secrets was already run
140 | [group('team')]
141 | publish tag=_DEFAULT_TAG:
142 | @just dagger call --tag={{ tag }} --max-mind-auth=op://pipely/maxmind/credential \
143 | publish --registry-username=$USER --registry-password=op://pipely/ghcr/credential --image={{ FLY_APP_IMAGE }}
144 |
145 | # Deploy container image
146 | [group('team')]
147 | deploy tag=_DEFAULT_TAG:
148 | @just dagger --mod={{ DAGGER_FLY_MODULE }} call \
149 | --token=op://pipely/fly/credential \
150 | --org={{ FLY_ORG }} \
151 | deploy --dir=. --image={{ FLY_APP_IMAGE }}:{{ tag }}
152 |
153 | # Scale production app
154 | [group('team')]
155 | scale:
156 | flyctl scale count $(echo {{ FLY_APP_REGIONS }}, | grep -o ',' | wc -l) --max-per-region 1 --region {{ FLY_APP_REGIONS }} --app {{ FLY_APP }}
157 |
158 | # Set app secrets - assumes envrc-secrets was already run
159 | [group('team')]
160 | secrets:
161 | PURGE_TOKEN="op://pipely/purge/credential" \
162 | HONEYCOMB_API_KEY="op://pipely/honeycomb/credential" \
163 | AWS_ACCESS_KEY_ID="op://pipely/aws-s3-logs/access-key-id" \
164 | AWS_SECRET_ACCESS_KEY="op://pipely/aws-s3-logs/secret-access-key" \
165 | just op run -- bash -c 'flyctl secrets set --stage HONEYCOMB_DATASET="pipedream" HONEYCOMB_API_KEY="$HONEYCOMB_API_KEY" PURGE_TOKEN="$PURGE_TOKEN" AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"'
166 | flyctl secrets list
167 |
168 | # Add cert $fqdn to app
169 | [group('team')]
170 | cert-add fqdn:
171 | flyctl certs add {{ fqdn }} --app {{ FLY_APP }}
172 |
173 | # Show cert $fqdn for app
174 | [group('team')]
175 | cert fqdn:
176 | flyctl certs show {{ fqdn }} --app {{ FLY_APP }}
177 |
178 | # Show app certs
179 | [group('team')]
180 | certs:
181 | flyctl certs list --app {{ FLY_APP }}
182 |
183 | # Show app IPs
184 | [group('team')]
185 | ips:
186 | flyctl ips list --app {{ FLY_APP }}
187 |
188 | # Show app machines
189 | [group('team')]
190 | machines:
191 | flyctl machines list --app {{ FLY_APP }}
192 |
193 | # Restart ALL app machines, one-by-one
194 | [group('team')]
195 | restart:
196 | @just machines \
197 | | awk '/pipely/ { print $1 }' \
198 | | while read machine; do \
199 | echo -en "\n♻️ "; \
200 | flyctl machine stop $machine; \
201 | sleep 10; \
202 | flyctl machine start $machine \
203 | || (sleep 10; flyctl machine start $machine); \
204 | done
205 | @echo {{ _MAGENTA }}🧐 Any stopped machines?{{ _RESET }}
206 | @just machines | grep stop || echo ✨
207 |
208 | # Show app status
209 | [group('team')]
210 | status:
211 | flyctl status --app {{ FLY_APP }}
212 |
213 | # Tag a new release
214 | [group('team')]
215 | tag tag sha discussion:
216 | git tag --force --sign --message="Discussed in " {{ tag }} {{ sha }}
217 |
218 | # Create .envrc.secrets with credentials from 1Password
219 | [group('team')]
220 | envrc-secrets:
221 | just op inject --in-file envrc.secrets.op --out-file .envrc.secrets
222 |
223 | [private]
224 | create:
225 | (flyctl apps list --org {{ FLY_ORG }} | grep {{ FLY_APP }}) \
226 | || flyctl apps create {{ FLY_APP }} --org {{ FLY_ORG }}
227 |
228 | [private]
229 | actions-runner:
230 | docker run --interactive --tty \
231 | --volume=pipely-linuxbrew:/home/linuxbrew/.linuxbrew \
232 | --volume=pipely-asdf:/home/runner/.asdf \
233 | --volume=.:/home/runner/work --workdir=/home/runner/work \
234 | --env=HOST=$(hostname) --publish=9090:9000 \
235 | --pull=always ghcr.io/actions/actions-runner
236 |
237 | [private]
238 | just0:
239 | curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | sudo bash -s -- --to /usr/local/bin
240 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pipely™ - single-purpose, single-tenant CDN
2 |
3 | Based on [Varnish Cache](https://varnish-cache.org/releases/index.html). This started as the simplest CDN running on [fly.io](https://fly.io/changelog)
4 | for [changelog.com](https://changelog.com)
5 |
6 | You are welcome to fork this and build your own - OSS FTW 💚
7 |
8 | ## How it started
9 |
10 | 
11 |
12 | > 🧢 Jerod Santo - March 29, 2024 - Changelog & Friends #38
13 |
14 | ## The Roadmap to `v2.0`
15 |
16 | - Tag & ship `v1.1`
17 | - ✅ Log & forward original `fly-request-id` header - [PR #42](https://github.com/thechangelog/pipely/pull/42)
18 | - Support websocket connections
19 | - Enable backend streaming so that we don't load more data in memory than the clients can handle
20 | - Update all dependencies to latest (hold Varnish at `v7.7.3`)
21 | - Split instances into HOT & COLD
22 | - Run periodic mp3 & feed checks against all regions
23 | - Add nightly.changelog.com backend
24 | - Split the default VCL into `include` files
25 | - This will enable us to do reuse the same configs in the tests [💪 @mttjohnson](https://github.com/thechangelog/pipely/pull/19#pullrequestreview-3013467499)
26 | - [Add logging acceptance tests](https://github.com/thechangelog/pipely/pull/27#issuecomment-3094684063)
27 | - Keep Dagger version in `.github/workflows/_namespace.yaml` in sync with `just/dagger.just`
28 |
29 | ## What went into `v1.0`
30 |
31 | - ✅ Static backend, 1 day stale, stale on error, `x`-headers - [Initial commit](https://github.com/thechangelog/pipely/commit/17d3899a52d9dc887efd7f49de92b24249431234)
32 | - ✅ Dynamic backend, `cache-status` header - [PR #1](https://github.com/thechangelog/pipely/pull/1)
33 | - ✅ Add tests - [PR #3](https://github.com/thechangelog/pipely/pull/3)
34 | - ✅ Make it easy to develop locally - [PR #7](https://github.com/thechangelog/pipely/pull/7)
35 | - ✅ Add support for TLS backends, publish & deploy to production - [PR #8](https://github.com/thechangelog/pipely/pull/8)
36 | - ✅ Add Feeds backend - [PR #10](https://github.com/thechangelog/pipely/pull/10)
37 | - ✅ Add Assets backend - [PR #11](https://github.com/thechangelog/pipely/pull/11)
38 | - ✅ Send Varnish logs to Honeycomb.io - [PR #12](https://github.com/thechangelog/pipely/pull/12)
39 | - ✅ Enrich Varnish logs with GeoIP data - [PR #13](https://github.com/thechangelog/pipely/pull/13)
40 | - ✅ Supervisor restarts crashed processes - [PR #14](https://github.com/thechangelog/pipely/pull/14)
41 | - ✅ Auth `PURGE` requests - [PR #16](https://github.com/thechangelog/pipely/pull/16)
42 | - ✅ Add redirects from [Fastly VCL](./varnish/changelog.com.vcl) - [PR #19](https://github.com/thechangelog/pipely/pull/19)
43 | - ✅ Send Varnish logs to S3 - [PR #27](https://github.com/thechangelog/pipely/pull/27)
44 | - ✅ All contributors review & clean-up
45 | - Is the VCL as clean & efficient as it could be?
46 | - Does everything work as expected?
47 | - Anything that can be removed?
48 | - How do we make this friendlier to new users?
49 | - What would make this more contribution-friendly?
50 | - How easy is this to use as your own deployment?
51 | - ✅ Tag & ship `v1.0-rc.1`
52 | - ✅ Update documentation and do some local dev tests - [PR #22](https://github.com/thechangelog/pipely/pull/22)
53 | - ✅ Add debug welcome message and prompt - [PR #25](https://github.com/thechangelog/pipely/pull/25)
54 | - ✅ Avoid using home_dir() due to Windows issues - [PR #26](https://github.com/thechangelog/pipely/pull/26)
55 | - ✅ Add troubleshooting and misc to local dev docs - [PR #29](https://github.com/thechangelog/pipely/pull/29)
56 | - ✅ Tag & ship `v1.0-rc.2`
57 | - ✅ Prepare for 20% of the production traffic - [PR #30](https://github.com/thechangelog/pipely/pull/30)
58 | - Route 20% of the production traffic through
59 | - Observe cold cache behaviour
60 | - ✅ Tag & ship `v1.0-rc.3`
61 | - ✅ Fix feeds URL rewrite - [PR #31](https://github.com/thechangelog/pipely/pull/31)
62 | - ✅ Increase instance size - [PR #32](https://github.com/thechangelog/pipely/pull/32)
63 | - ✅ Tag & ship `v1.0-rc.4`
64 | - ✅ Limit Varnish memory to 66% (out of `3200M` out of `4000M`) - [3553723](https://github.com/thechangelog/pipely/commit/355372334b602a0ad55a96a85a288409ad4b8d84)
65 | - ✅ Tag & ship `v1.0-rc.5`
66 | - ✅ Handle varnish-json-response failing on startup - [PR #33](https://github.com/thechangelog/pipely/pull/33)
67 | - ✅ Bump the instance size to performance-1x with 8GB of RAM - [PR #34](https://github.com/thechangelog/pipely/pull/34)
68 | - Route 50% of the production traffic through
69 | - ✅ Tag & ship `v1.0-rc.6`
70 | - ✅ Add more locations - [PR #35](https://github.com/thechangelog/pipely/pull/35)
71 | - ✅ Increase backend timeout - [PR #36](https://github.com/thechangelog/pipely/pull/36)
72 | - ✅ Tag & ship `v1.0-rc.7`
73 | - ✅ Update to Varnish v7.7.3 & Vector v0.49.0 - [PR #38](https://github.com/thechangelog/pipely/pull/38)
74 | - ✅ Support MP3 uploads - [PR #39](https://github.com/thechangelog/pipely/pull/39)
75 | - ✅ Tag & ship `v1.0`
76 | - ✅ Update all dependencies to latest (hold Varnish at v7.7.3) - [PR #40](https://github.com/thechangelog/pipely/pull/40)
77 | - ✅ Route 100% of the production traffic through `v1.0`
78 |
79 | ## Local development and testing
80 |
81 | While it's fun watching other people experiment with digital resin (varnish 😂), it's a whole lot more fun when you can repeat those experiments yourself, understand more how it works, and make your own modifications.
82 |
83 | ### Prerequisites
84 |
85 | - 🐳 [Docker](https://docs.docker.com/engine/install/)
86 | - 🤖 [Just](https://github.com/casey/just?tab=readme-ov-file#installation) version `1.27.0` or higher
87 |
88 | And that's about it. Everything else is containerized with Dagger.
89 |
90 | > [!NOTE]
91 | > **For Windows Developers:**
92 | > The project's toolchain is made for Linux-like systems. On a Windows machine you will need to have the Windows Subsystem for Linux (WSL) installed in addition to Docker. `just` should be installed inside your WSL Linux operating system. You might be able to run Just natively from Windows, but there are some known bugs related to home directory filenames, so better to avoid that altogether and work directly in WSL.
93 |
94 | ```bash
95 | just
96 | Available recipes:
97 | how-many-lines # How many lines of Varnish config?
98 | how-many-lines-raw # How many lines of Varnish config?
99 | http-profile url="https://pipedream.changelog.com/" # Observe all HTTP timings - https://blog.cloudflare.com/a-question-of-timing
100 | local-debug # Debug container locally
101 | local-run # Run container locally: available on http://localhost:9000
102 | test # Test VTC + acceptance locally
103 | test-acceptance-fastly *ARGS # Test CURRENT production
104 | test-acceptance-local # Test local setup
105 | test-reports # Open test reports
106 | test-reports-rm # Clear test reports
107 | test-vtc # Test VCL config
108 |
109 | [team]
110 | cert fqdn # Show cert $fqdn for app
111 | cert-add fqdn # Add cert $fqdn to app
112 | certs # Show app certs
113 | deploy tag=_DEFAULT_TAG # Deploy container image
114 | envrc-secrets # Create .envrc.secrets with credentials from 1Password
115 | ips # Show app IPs
116 | local-debug-production # Debug production container locally - assumes envrc-secrets has already run
117 | local-run-production # Run production container locally - assumes envrc-secrets has already run - available on http://localhost:9000
118 | machines # Show app machines
119 | publish tag=_DEFAULT_TAG # Publish container image - assumes envrc-secrets was already run
120 | restart # Restart ALL app machines, one-by-one
121 | scale # Scale production app
122 | secrets # Set app secrets - assumes envrc-secrets was already run
123 | status # Show app status
124 | tag tag sha discussion # Tag a new release
125 | test-acceptance-pipedream *ARGS # Test NEW production - Pipedream, the Changelog variant of Pipely
126 |
127 | # Run the tests
128 | just test
129 | ```
130 |
131 | ## How can you help
132 |
133 | If you have any ideas on how to improve this, please open an issue or go
134 | straight for a pull request. We make this as easy as possible:
135 | - All commits emphasize [good commit messages](https://cbea.ms/git-commit/) (more text for humans)
136 | - This repository is kept small & simple (single purpose: build the simplest CDN on Fly.io)
137 | - Slow & thoughtful approach - join our journey via [audio with transcripts](https://changelog.com/topic/kaizen) or [written](https://github.com/thechangelog/changelog.com/discussions/categories/kaizen)
138 |
139 | See you in our [Zulip Chat](https://changelog.zulipchat.com/#narrow/channel/513743-pipely) 👋
140 |
141 | > [!NOTE]
142 | > Join from . It requires signing up and requesting an invite before you can **Log in**
143 |
144 | 
145 |
146 | ## Contributors
147 |
148 | - [Nabeel Sulieman](https://github.com/nabsul)
149 | - [Matt Johnson](https://github.com/mttjohnson)
150 | - [James A Rosen](https://www.jamesarosen.com/now)
151 | - [Gerhard Lazu](https://gerhard.io)
152 |
--------------------------------------------------------------------------------
/dagger/go.sum:
--------------------------------------------------------------------------------
1 | github.com/99designs/gqlgen v0.17.75 h1:GwHJsptXWLHeY7JO8b7YueUI4w9Pom6wJTICosDtQuI=
2 | github.com/99designs/gqlgen v0.17.75/go.mod h1:p7gbTpdnHyl70hmSpM8XG8GiKwmCv+T5zkdY8U8bLog=
3 | github.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs=
4 | github.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU=
5 | github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
6 | github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
7 | github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
8 | github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
9 | github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
10 | github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
11 | github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
12 | github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
13 | github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
14 | github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
15 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
16 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
17 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
18 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
19 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
20 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
21 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
22 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
23 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
24 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
25 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
26 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
27 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
28 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
29 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
30 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
31 | github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
32 | github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
33 | github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
34 | github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
35 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
36 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
37 | github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
38 | github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
39 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
40 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
41 | github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
42 | github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
43 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
44 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
45 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
46 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
47 | github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY=
48 | github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
49 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
50 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
51 | go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
52 | go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
53 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8=
54 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY=
55 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs=
56 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8=
57 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
58 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
59 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
60 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
61 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac=
62 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8=
63 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU=
64 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ=
65 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
66 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
67 | go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
68 | go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
69 | go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
70 | go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
71 | go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
72 | go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
73 | go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0=
74 | go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY=
75 | go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0=
76 | go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE=
77 | go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
78 | go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
79 | go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
80 | go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
81 | go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
82 | go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
83 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
84 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
85 | golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
86 | golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
87 | golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
88 | golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
89 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
90 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
91 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
92 | golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
93 | golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
94 | google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
95 | google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
96 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
97 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
98 | google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
99 | google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
100 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
101 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
102 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
103 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
104 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
105 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
106 |
--------------------------------------------------------------------------------
/test/acceptance/news-mp3.hurl:
--------------------------------------------------------------------------------
1 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-06-27/the-changelog-news-2022-06-27.mp3
2 | Host: {{assets_host}}
3 | HTTP 308 # expect permanent redirect response
4 | [Asserts]
5 | header "Location" == "https://{{assets_host}}/uploads/news/1/changelog-news-1.mp3"
6 |
7 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-06-27/the-changelog-news-2022-06-27.mp3?this=is&a=query&string
8 | Host: {{assets_host}}
9 | HTTP 308 # expect permanent redirect response
10 | [Asserts]
11 | header "Location" == "https://{{assets_host}}/uploads/news/1/changelog-news-1.mp3?this=is&a=query&string"
12 |
13 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-07-04/the-changelog-news-2022-07-04.mp3
14 | Host: {{assets_host}}
15 | HTTP 308 # expect permanent redirect response
16 | [Asserts]
17 | header "Location" == "https://{{assets_host}}/uploads/news/2/changelog-news-2.mp3"
18 |
19 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-07-11/the-changelog-news-2022-07-11.mp3
20 | Host: {{assets_host}}
21 | HTTP 308 # expect permanent redirect response
22 | [Asserts]
23 | header "Location" == "https://{{assets_host}}/uploads/news/3/changelog-news-3.mp3"
24 |
25 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-07-18/the-changelog-news-2022-07-18.mp3
26 | Host: {{assets_host}}
27 | HTTP 308 # expect permanent redirect response
28 | [Asserts]
29 | header "Location" == "https://{{assets_host}}/uploads/news/4/changelog-news-4.mp3"
30 |
31 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-07-25/the-changelog-news-2022-07-25.mp3
32 | Host: {{assets_host}}
33 | HTTP 308 # expect permanent redirect response
34 | [Asserts]
35 | header "Location" == "https://{{assets_host}}/uploads/news/5/changelog-news-5.mp3"
36 |
37 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-01/the-changelog-news-2022-08-01.mp3
38 | Host: {{assets_host}}
39 | HTTP 308 # expect permanent redirect response
40 | [Asserts]
41 | header "Location" == "https://{{assets_host}}/uploads/news/6/changelog-news-6.mp3"
42 |
43 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-08/the-changelog-news-2022-08-08.mp3
44 | Host: {{assets_host}}
45 | HTTP 308 # expect permanent redirect response
46 | [Asserts]
47 | header "Location" == "https://{{assets_host}}/uploads/news/7/changelog-news-7.mp3"
48 |
49 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-15/the-changelog-news-2022-08-15.mp3
50 | Host: {{assets_host}}
51 | HTTP 308 # expect permanent redirect response
52 | [Asserts]
53 | header "Location" == "https://{{assets_host}}/uploads/news/8/changelog-news-8.mp3"
54 |
55 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-22/the-changelog-news-2022-08-22.mp3
56 | Host: {{assets_host}}
57 | HTTP 308 # expect permanent redirect response
58 | [Asserts]
59 | header "Location" == "https://{{assets_host}}/uploads/news/9/changelog-news-9.mp3"
60 |
61 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-22/the-changelog-news-2022-08-22-j2g.mp3
62 | Host: {{assets_host}}
63 | HTTP 308 # expect permanent redirect response
64 | [Asserts]
65 | header "Location" == "https://{{assets_host}}/uploads/news/9/changelog-news-9-j2g.mp3"
66 |
67 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-08-29/the-changelog-news-2022-08-29.mp3
68 | Host: {{assets_host}}
69 | HTTP 308 # expect permanent redirect response
70 | [Asserts]
71 | header "Location" == "https://{{assets_host}}/uploads/news/10/changelog-news-10.mp3"
72 |
73 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-09-05/the-changelog-news-2022-09-05.mp3
74 | Host: {{assets_host}}
75 | HTTP 308 # expect permanent redirect response
76 | [Asserts]
77 | header "Location" == "https://{{assets_host}}/uploads/news/11/changelog-news-11.mp3"
78 |
79 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-09-12/the-changelog-news-2022-09-12.mp3
80 | Host: {{assets_host}}
81 | HTTP 308 # expect permanent redirect response
82 | [Asserts]
83 | header "Location" == "https://{{assets_host}}/uploads/news/12/changelog-news-12.mp3"
84 |
85 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-09-19/the-changelog-news-2022-09-19.mp3
86 | Host: {{assets_host}}
87 | HTTP 308 # expect permanent redirect response
88 | [Asserts]
89 | header "Location" == "https://{{assets_host}}/uploads/news/13/changelog-news-13.mp3"
90 |
91 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-09-26/the-changelog-news-2022-09-26.mp3
92 | Host: {{assets_host}}
93 | HTTP 308 # expect permanent redirect response
94 | [Asserts]
95 | header "Location" == "https://{{assets_host}}/uploads/news/14/changelog-news-14.mp3"
96 |
97 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-10-03/the-changelog-news-2022-10-03.mp3
98 | Host: {{assets_host}}
99 | HTTP 308 # expect permanent redirect response
100 | [Asserts]
101 | header "Location" == "https://{{assets_host}}/uploads/news/15/changelog-news-15.mp3"
102 |
103 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-10-10/the-changelog-news-2022-10-10.mp3
104 | Host: {{assets_host}}
105 | HTTP 308 # expect permanent redirect response
106 | [Asserts]
107 | header "Location" == "https://{{assets_host}}/uploads/news/16/changelog-news-16.mp3"
108 |
109 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-10-17/the-changelog-news-2022-10-17.mp3
110 | Host: {{assets_host}}
111 | HTTP 308 # expect permanent redirect response
112 | [Asserts]
113 | header "Location" == "https://{{assets_host}}/uploads/news/17/changelog-news-17.mp3"
114 |
115 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-10-24/the-changelog-news-2022-10-24.mp3
116 | Host: {{assets_host}}
117 | HTTP 308 # expect permanent redirect response
118 | [Asserts]
119 | header "Location" == "https://{{assets_host}}/uploads/news/18/changelog-news-18.mp3"
120 |
121 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-11-07/the-changelog-news-2022-11-07.mp3
122 | Host: {{assets_host}}
123 | HTTP 308 # expect permanent redirect response
124 | [Asserts]
125 | header "Location" == "https://{{assets_host}}/uploads/news/19/changelog-news-19.mp3"
126 |
127 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-11-14/the-changelog-news-2022-11-14.mp3
128 | Host: {{assets_host}}
129 | HTTP 308 # expect permanent redirect response
130 | [Asserts]
131 | header "Location" == "https://{{assets_host}}/uploads/news/20/changelog-news-20.mp3"
132 |
133 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-11-21/the-changelog-news-2022-11-21.mp3
134 | Host: {{assets_host}}
135 | HTTP 308 # expect permanent redirect response
136 | [Asserts]
137 | header "Location" == "https://{{assets_host}}/uploads/news/21/changelog-news-21.mp3"
138 |
139 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-11-28/the-changelog-news-2022-11-28.mp3
140 | Host: {{assets_host}}
141 | HTTP 308 # expect permanent redirect response
142 | [Asserts]
143 | header "Location" == "https://{{assets_host}}/uploads/news/22/changelog-news-22.mp3"
144 |
145 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-12-05/the-changelog-news-2022-12-05.mp3
146 | Host: {{assets_host}}
147 | HTTP 308 # expect permanent redirect response
148 | [Asserts]
149 | header "Location" == "https://{{assets_host}}/uploads/news/23/changelog-news-23.mp3"
150 |
151 | GET {{proto}}://{{host}}/uploads/podcast/news-2022-12-12/the-changelog-news-2022-12-12.mp3
152 | Host: {{assets_host}}
153 | HTTP 308 # expect permanent redirect response
154 | [Asserts]
155 | header "Location" == "https://{{assets_host}}/uploads/news/24/changelog-news-24.mp3"
156 |
157 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-01-02/the-changelog-news-2023-01-02.mp3
158 | Host: {{assets_host}}
159 | HTTP 308 # expect permanent redirect response
160 | [Asserts]
161 | header "Location" == "https://{{assets_host}}/uploads/news/25/changelog-news-25.mp3"
162 |
163 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-01-09/the-changelog-news-2023-01-09.mp3
164 | Host: {{assets_host}}
165 | HTTP 308 # expect permanent redirect response
166 | [Asserts]
167 | header "Location" == "https://{{assets_host}}/uploads/news/26/changelog-news-26.mp3"
168 |
169 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-01-16/the-changelog-news-2023-01-16.mp3
170 | Host: {{assets_host}}
171 | HTTP 308 # expect permanent redirect response
172 | [Asserts]
173 | header "Location" == "https://{{assets_host}}/uploads/news/27/changelog-news-27.mp3"
174 |
175 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-01-23/the-changelog-news-2023-01-23.mp3
176 | Host: {{assets_host}}
177 | HTTP 308 # expect permanent redirect response
178 | [Asserts]
179 | header "Location" == "https://{{assets_host}}/uploads/news/28/changelog-news-28.mp3"
180 |
181 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-01-30/the-changelog-news-2023-01-30.mp3
182 | Host: {{assets_host}}
183 | HTTP 308 # expect permanent redirect response
184 | [Asserts]
185 | header "Location" == "https://{{assets_host}}/uploads/news/29/changelog-news-29.mp3"
186 |
187 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-02-06/the-changelog-news-2023-02-06.mp3
188 | Host: {{assets_host}}
189 | HTTP 308 # expect permanent redirect response
190 | [Asserts]
191 | header "Location" == "https://{{assets_host}}/uploads/news/30/changelog-news-30.mp3"
192 |
193 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-02-13/the-changelog-news-2023-02-13.mp3
194 | Host: {{assets_host}}
195 | HTTP 308 # expect permanent redirect response
196 | [Asserts]
197 | header "Location" == "https://{{assets_host}}/uploads/news/31/changelog-news-31.mp3"
198 |
199 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-02-20/the-changelog-news-2023-02-20.mp3
200 | Host: {{assets_host}}
201 | HTTP 308 # expect permanent redirect response
202 | [Asserts]
203 | header "Location" == "https://{{assets_host}}/uploads/news/32/changelog-news-32.mp3"
204 |
205 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-02-20/the-changelog-news-2023-02-20-p883.mp3
206 | Host: {{assets_host}}
207 | HTTP 308 # expect permanent redirect response
208 | [Asserts]
209 | header "Location" == "https://{{assets_host}}/uploads/news/32/changelog-news-32p883.mp3"
210 |
211 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-02-27/the-changelog-news-2023-02-27.mp3
212 | Host: {{assets_host}}
213 | HTTP 308 # expect permanent redirect response
214 | [Asserts]
215 | header "Location" == "https://{{assets_host}}/uploads/news/33/changelog-news-33.mp3"
216 |
217 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-03-06/the-changelog-news-2023-03-06.mp3
218 | Host: {{assets_host}}
219 | HTTP 308 # expect permanent redirect response
220 | [Asserts]
221 | header "Location" == "https://{{assets_host}}/uploads/news/34/changelog-news-34.mp3"
222 |
223 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-03-06/the-changelog-news-2023-03-06-XXXL.mp3
224 | Host: {{assets_host}}
225 | HTTP 308 # expect permanent redirect response
226 | [Asserts]
227 | header "Location" == "https://{{assets_host}}/uploads/news/34/changelog-news-34-XXXL.mp3"
228 |
229 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-03-13/the-changelog-news-2023-03-13.mp3
230 | Host: {{assets_host}}
231 | HTTP 308 # expect permanent redirect response
232 | [Asserts]
233 | header "Location" == "https://{{assets_host}}/uploads/news/35/changelog-news-35.mp3"
234 |
235 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-03-20/the-changelog-news-2023-03-20.mp3
236 | Host: {{assets_host}}
237 | HTTP 308 # expect permanent redirect response
238 | [Asserts]
239 | header "Location" == "https://{{assets_host}}/uploads/news/36/changelog-news-36.mp3"
240 |
241 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-03-27/the-changelog-news-2023-03-27.mp3
242 | Host: {{assets_host}}
243 | HTTP 308 # expect permanent redirect response
244 | [Asserts]
245 | header "Location" == "https://{{assets_host}}/uploads/news/37/changelog-news-37.mp3"
246 |
247 | GET {{proto}}://{{host}}/uploads/podcast/news-2023-04-03/the-changelog-news-2023-04-03.mp3
248 | Host: {{assets_host}}
249 | HTTP 308 # expect permanent redirect response
250 | [Asserts]
251 | header "Location" == "https://{{assets_host}}/uploads/news/38/changelog-news-38.mp3"
252 |
--------------------------------------------------------------------------------
/varnish/vcl/default.vcl:
--------------------------------------------------------------------------------
1 | # https://varnish-cache.org/docs/7.7/reference/vcl.html#versioning
2 | vcl 4.1;
3 |
4 | # For duration comparisons & access to env vars
5 | import std;
6 |
7 | # So that we can get & set variables
8 | import var;
9 |
10 | # So that we can resolve backend hosts via DNS
11 | import dynamic;
12 |
13 | include "fly-request-id.vcl";
14 | include "http.vcl";
15 | include "www.vcl";
16 | include "news-mp3.vcl";
17 |
18 | # Disable default backend as we are using dynamic backends **only** so that we
19 | # can handle new origin instances (e.g. new app version gets deployed)
20 | backend default none;
21 |
22 | probe backend_health {
23 | # The URL path to request during health checks
24 | # This should be a lightweight endpoint on your backend that returns a 200 status
25 | # when the service is healthy
26 | .url = "/health";
27 |
28 | # How frequently Varnish will poll the backend (in seconds)
29 | # Lower values provide faster detection of backend failures but increase load
30 | # Higher values reduce backend load but increase failure detection time
31 | .interval = 10s;
32 |
33 | # Maximum time to wait for a response from the backend
34 | # If the backend does not respond within this time, the probe is considered failed
35 | # Should be less than the interval to prevent probe overlap
36 | .timeout = 9s;
37 |
38 | # Number of most recent probes to consider when determining backend health
39 | # Varnish keeps a sliding window of the latest probe results
40 | # Higher values make the health determination more stable but slower to change
41 | .window = 6;
42 |
43 | # Minimum number of probes in the window that must succeed for the backend
44 | # to be considered healthy
45 | # In this case, at least 5 out of the 10 most recent probes must be successful
46 | # Half the window is a common value for basic fault tolerance
47 | .threshold = 4;
48 |
49 | # Initial assumed state of the backend
50 | # Starts with the backend considered healthy
51 | .initial = 4;
52 | }
53 |
54 | # Setup a dynamic director
55 | sub vcl_init {
56 | # https://github.com/nigoroll/libvmod-dynamic/blob/0590f76b05f9b83a5a2e1d246e67a12d66e55c27/src/vmod_dynamic.vcc#L234-L255
57 | new app = dynamic.director(
58 | ttl = 10s,
59 | probe = backend_health,
60 | host_header = std.getenv("BACKEND_APP_FQDN"),
61 | # Increase first_byte_timeout so that mp3 uploads work
62 | first_byte_timeout = 300s,
63 | connect_timeout = 10s,
64 | between_bytes_timeout = 60s
65 | );
66 |
67 | new feeds = dynamic.director(
68 | ttl = 10s,
69 | probe = backend_health,
70 | host_header = std.getenv("BACKEND_FEEDS_FQDN"),
71 | first_byte_timeout = 10s,
72 | connect_timeout = 10s,
73 | between_bytes_timeout = 60s
74 | );
75 |
76 | new assets = dynamic.director(
77 | ttl = 10s,
78 | probe = backend_health,
79 | host_header = std.getenv("BACKEND_ASSETS_FQDN"),
80 | first_byte_timeout = 10s,
81 | connect_timeout = 10s,
82 | between_bytes_timeout = 60s
83 | );
84 | }
85 |
86 | # NOTE: vcl_recv is called at the beginning of a request, after the complete
87 | # request has been received and parsed. Its purpose is to decide whether or not
88 | # to serve the request, how to do it, and, if applicable, which backend to use.
89 | sub vcl_recv {
90 | ### Figure out which is the best public IP to use
91 | # This needs to happen first, otherwise the health-checker IP will not be set correctly
92 | # Prefer fly-client-ip header
93 | if (req.http.fly-client-ip) {
94 | std.log("client_ip:" + req.http.fly-client-ip);
95 | # If the above is not present, take x-forwarded-for
96 | } else if (req.http.x-forwarded-for) {
97 | std.log("client_ip:" + regsub(req.http.x-forwarded-for, "^([^,]+).*", "\1"));
98 | # If neither are present, use the default
99 | } else {
100 | std.log("client_ip:" + client.ip);
101 | }
102 |
103 | ### Varnish health-check
104 | # This is the first HTTP endpoint that will get hit, before traffic arrives for any of the following HTTP endpoints.
105 | if (req.url == "/health") {
106 | return(synth(204));
107 | }
108 | ### Configure health-checks for all backends
109 | # APP
110 | if (req.url == "/app_health") {
111 | set req.http.x-backend = "app";
112 | set req.url = "/health";
113 | return(pass);
114 | }
115 | # FEEDS
116 | if (req.url == "/feeds_health") {
117 | set req.http.x-backend = "feeds";
118 | set req.url = "/health";
119 | return(pass);
120 | }
121 | # ASSETS
122 | if (req.url == "/assets_health") {
123 | set req.http.x-backend = "assets";
124 | set req.url = "/health";
125 | return(pass);
126 | }
127 |
128 | ### practical.ai redirects
129 | if (req.url == "/practicalai/feed"
130 | || req.url == "/practicalai") {
131 | return(synth(301, "Moved Permanently"));
132 | }
133 |
134 | ### Static assets requests
135 | #
136 | if (req.http.host == std.getenv("ASSETS_HOST")) {
137 | # Reject non-GET/HEAD/PURGE requests
138 | if (req.method !~ "GET|HEAD|PURGE") {
139 | return(synth(405, "Method Not Allowed"));
140 | }
141 | set req.http.x-backend = "assets";
142 | }
143 |
144 | ### Feed requests
145 | # Ordered by number of requests in April 2025 (most popular at the top)
146 | # https://ui.honeycomb.io/changelog/datasets/fastly/board-query/xCqdG5ysitw/result/da96aC9mAQf
147 | #
148 | # TODO: Upload feed.json too?
149 | #
150 | # FWIW 🤦 https://github.com/varnishcache/varnish-cache/issues/2355
151 | if (req.url ~ "^/podcast/feed/?(\?.*)?$") {
152 | set req.http.x-backend = "feeds";
153 | set req.url = "/podcast.xml";
154 | } else if (req.url ~ "^/gotime/feed/?(\?.*)?$") {
155 | set req.http.x-backend = "feeds";
156 | set req.url = "/gotime.xml";
157 | } else if (req.url ~ "^/master/feed/?(\?.*)?$") {
158 | set req.http.x-backend = "feeds";
159 | set req.url = "/master.xml";
160 | } else if (req.url ~ "^/feed/?(\?.*)?$") {
161 | set req.http.x-backend = "feeds";
162 | set req.url = "/feed.xml";
163 | } else if (req.url ~ "^/jsparty/feed/?(\?.*)?$") {
164 | set req.http.x-backend = "feeds";
165 | set req.url = "/jsparty.xml";
166 | } else if (req.url ~ "^/shipit/feed/?(\?.*)?$") {
167 | set req.http.x-backend = "feeds";
168 | set req.url = "/shipit.xml";
169 | } else if (req.url ~ "^/news/feed/?(\?.*)?$") {
170 | set req.http.x-backend = "feeds";
171 | set req.url = "/news.xml";
172 | } else if (req.url ~ "^/brainscience/feed/?(\?.*)?$") {
173 | set req.http.x-backend = "feeds";
174 | set req.url = "/brainscience.xml";
175 | } else if (req.url ~ "^/founderstalk/feed/?(\?.*)?$") {
176 | set req.http.x-backend = "feeds";
177 | set req.url = "/founderstalk.xml";
178 | } else if (req.url ~ "^/interviews/feed/?(\?.*)?$") {
179 | set req.http.x-backend = "feeds";
180 | set req.url = "/interviews.xml";
181 | } else if (req.url ~ "^/friends/feed/?(\?.*)?$") {
182 | set req.http.x-backend = "feeds";
183 | set req.url = "/friends.xml";
184 | } else if (req.url ~ "^/rfc/feed/?(\?.*)?$") {
185 | set req.http.x-backend = "feeds";
186 | set req.url = "/rfc.xml";
187 | } else if (req.url ~ "^/spotlight/feed/?(\?.*)?$") {
188 | set req.http.x-backend = "feeds";
189 | set req.url = "/spotlight.xml";
190 | } else if (req.url ~ "^/afk/feed/?(\?.*)?$") {
191 | set req.http.x-backend = "feeds";
192 | set req.url = "/afk.xml";
193 | } else if (req.url ~ "^/posts/feed/?(\?.*)?$") {
194 | set req.http.x-backend = "feeds";
195 | set req.url = "/posts.xml";
196 | } else if (req.url ~ "^/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed/?(\?.*)?$") {
197 | set req.http.x-backend = "feeds";
198 | set req.url = "/plusplus.xml";
199 | } else if (req.url ~ "^/rss/?(\?.*)?$") {
200 | set req.http.x-backend = "feeds";
201 | set req.url = "/feed.xml";
202 | } else if (req.url ~ "^/feeds/.*(\?.*)?$") {
203 | set req.http.x-backend = "feeds";
204 | set req.url = regsub(req.url, "^/feeds/([^?]*)(\?.*)?$", "/\1.xml");
205 | }
206 |
207 | ### PURGE
208 | # https://varnish-cache.org/docs/7.7/users-guide/purging.html
209 | if (req.method == "PURGE") {
210 | # If no token token is configured allow un-authenticated PURGEs, otherwise require it.
211 | if (std.getenv("PURGE_TOKEN") == "" || req.http.purge-token == std.getenv("PURGE_TOKEN")) {
212 | return(purge);
213 | } else {
214 | return(synth(401, "Invalid PURGE token"));
215 | }
216 | }
217 | }
218 |
219 | sub vcl_hash {
220 | if (req.http.x-backend == "assets") {
221 | set req.backend_hint = assets.backend(std.getenv("BACKEND_ASSETS_HOST"), std.getenv("BACKEND_ASSETS_PORT"));
222 | set req.http.x-backend-fqdn = std.getenv("BACKEND_ASSETS_FQDN");
223 | } else if (req.http.x-backend == "feeds") {
224 | set req.backend_hint = feeds.backend(std.getenv("BACKEND_FEEDS_HOST"), std.getenv("BACKEND_FEEDS_PORT"));
225 | set req.http.x-backend-fqdn = std.getenv("BACKEND_FEEDS_FQDN");
226 | } else {
227 | set req.backend_hint = app.backend(std.getenv("BACKEND_APP_HOST"), std.getenv("BACKEND_APP_PORT"));
228 | set req.http.x-backend-fqdn = std.getenv("BACKEND_APP_FQDN");
229 | }
230 | }
231 |
232 | sub vcl_pass {
233 | # Bypass caching
234 | set req.http.x-bypass = "true";
235 | if (req.http.x-backend == "assets") {
236 | set req.backend_hint = assets.backend(std.getenv("BACKEND_ASSETS_HOST"), std.getenv("BACKEND_ASSETS_PORT"));
237 | set req.http.x-backend-fqdn = std.getenv("BACKEND_ASSETS_FQDN");
238 | } else if (req.http.x-backend == "feeds") {
239 | set req.backend_hint = feeds.backend(std.getenv("BACKEND_FEEDS_HOST"), std.getenv("BACKEND_FEEDS_PORT"));
240 | set req.http.x-backend-fqdn = std.getenv("BACKEND_FEEDS_FQDN");
241 | } else {
242 | set req.backend_hint = app.backend(std.getenv("BACKEND_APP_HOST"), std.getenv("BACKEND_APP_PORT"));
243 | set req.http.x-backend-fqdn = std.getenv("BACKEND_APP_FQDN");
244 | }
245 | }
246 |
247 | sub vcl_synth {
248 | # Reject non-GET/HEAD/PURGE requests
249 | if (req.http.host == std.getenv("BACKEND_ASSETS_FQDN")
250 | && resp.status == 405) {
251 | set resp.http.allow = "GET, HEAD, PURGE";
252 | return(deliver);
253 | }
254 |
255 | # practical.ai redirects
256 | if (req.url == "/practicalai/feed"
257 | && resp.status == 301) {
258 | set resp.http.location = "https://feeds.transistor.fm/practical-ai-machine-learning-data-science-llm";
259 | set resp.body = {"
260 | You are being redirected.
261 | "};
262 | return(deliver);
263 | }
264 |
265 | if (req.url == "/practicalai"
266 | && resp.status == 301) {
267 | set resp.http.location = "https://practicalai.fm";
268 | set resp.body = {"
269 | You are being redirected.
270 | "};
271 | return(deliver);
272 | }
273 |
274 | # Which region is serving this request?
275 | var.set("region", std.getenv("FLY_REGION"));
276 | if (var.get("region") == "") {
277 | var.set("region", "LOCAL");
278 | }
279 | set resp.http.cache-status = "region=" + var.get("region") + "; synth";
280 | std.log("server_datacenter:" + var.get("region"));
281 | }
282 |
283 | # https://varnish-cache.org/docs/7.7/users-guide/vcl-grace.html
284 | # https://docs.varnish-software.com/tutorials/object-lifetime/
285 | # https://www.varnish-software.com/developers/tutorials/http-caching-basics/
286 | # https://blog.markvincze.com/how-to-gracefully-fall-back-to-cache-on-5xx-responses-with-varnish/
287 | sub vcl_backend_response {
288 | # Objects within ttl are considered fresh.
289 | set beresp.ttl = std.duration(std.getenv("BERESP_TTL"));
290 |
291 | # Objects within grace are considered stale.
292 | # Serve stale content while refreshing in the background.
293 | # 🤔 QUESTION: should we vary this based on backend health?
294 | set beresp.grace = std.duration(std.getenv("BERESP_GRACE"));
295 |
296 | if (beresp.status >= 500) {
297 | # Don't cache a 5xx response
298 | set beresp.uncacheable = true;
299 |
300 | # If is_bgfetch is true, it means that we've found and returned the cached
301 | # object to the client, and triggered an asynchoronus background update. In
302 | # that case, since backend returned a 5xx, we have to abandon, otherwise
303 | # the previously cached object would be erased from the cache (even if we
304 | # set uncacheable to true).
305 | if (bereq.is_bgfetch) {
306 | return (abandon);
307 | }
308 | }
309 |
310 | # 🤔 QUESTION: Should we configure beresp.keep?
311 | }
312 |
313 |
314 | # https://gist.github.com/leotsem/1246511/824cb9027a0a65d717c83e678850021dad84688d#file-default-vcl-pl
315 | # https://varnish-cache.org/docs/7.7/reference/vcl-var.html#obj
316 | sub vcl_deliver {
317 | # Add CORS * header for all assets responses
318 | if (req.http.x-backend == "assets") {
319 | set resp.http.access-control-allow-origin = "*";
320 | }
321 |
322 | # Which region is serving this request?
323 | var.set("region", std.getenv("FLY_REGION"));
324 | if (var.get("region") == "") {
325 | var.set("region", "LOCAL");
326 | }
327 | set resp.http.cache-status = "region=" + var.get("region");
328 | std.log("server_datacenter:" + var.get("region"));
329 |
330 | # Which origin is serving this request?
331 | set resp.http.cache-status = resp.http.cache-status + "; origin=" + req.backend_hint + "," + req.http.x-backend-fqdn;
332 | std.log("backend:" + req.http.x-backend-fqdn);
333 |
334 | if (req.http.x-bypass == "true") {
335 | set resp.http.cache-status = resp.http.cache-status + "; bypass";
336 | return(deliver);
337 | }
338 |
339 | # What is the remaining TTL for this object?
340 | set resp.http.cache-status = resp.http.cache-status + "; ttl=" + obj.ttl;
341 | std.log("ttl:" + obj.ttl);
342 |
343 | # What is the max object staleness permitted?
344 | set resp.http.cache-status = resp.http.cache-status + "; grace=" + obj.grace;
345 | std.log("grace:" + obj.grace);
346 |
347 | # Did the response come from Varnish or from the backend?
348 | if (obj.hits > 0) {
349 | set resp.http.cache-status = resp.http.cache-status + "; hit";
350 | } else {
351 | set resp.http.cache-status = resp.http.cache-status + "; miss";
352 | }
353 |
354 | # Is this object stale?
355 | if (obj.hits > 0 && obj.ttl < std.duration(integer=0)) {
356 | set resp.http.cache-status = resp.http.cache-status + "; stale";
357 | }
358 |
359 | # How many times has this response been served from Varnish?
360 | set resp.http.cache-status = resp.http.cache-status + "; hits=" + obj.hits;
361 | std.log("hits:" + obj.hits);
362 | }
363 |
364 | # LINKS:
365 | # - https://github.com/magento/magento2/blob/03621bbcd75cbac4ffa8266a51aa2606980f4830/app/code/Magento/PageCache/etc/varnish6.vcl
366 | # - https://abhishekjakhotiya.medium.com/magento-internals-cache-purging-and-cache-tags-bf7772e60797
367 |
--------------------------------------------------------------------------------
/dagger/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "dagger/pipely/internal/dagger"
6 | "errors"
7 | "fmt"
8 |
9 | "github.com/containerd/platforms"
10 | )
11 |
12 | const (
13 | // https://hub.docker.com/_/golang/tags?name=1.24
14 | golangVersion = "1.24.9@sha256:02ce1d7ea7825dccb7cd10222e44e7c0565a08c5a38795e50fbf43936484507b"
15 |
16 | // https://github.com/nabsul/tls-exterminator
17 | tlsExterminatorVersion = "93583389e8bab9b519466d95e69e594682d8de5b"
18 |
19 | // https://github.com/DarthSim/overmind/releases
20 | overmindVersion = "2.5.1"
21 |
22 | // https://hub.docker.com/r/timberio/vector/tags?name=debian
23 | vectorVersion = "0.50.0-debian@sha256:8e81f992197125f736e1fe5d73117ca6b69a0bb69cf3633f82b9233c9769c9c1"
24 |
25 | // https://github.com/Orange-OpenSource/hurl/releases
26 | hurlVersion = "7.0.0"
27 |
28 | // https://github.com/hatoo/oha/releases
29 | ohaVersion = "1.10.0"
30 | )
31 |
32 | type Env int
33 |
34 | const (
35 | Dev Env = iota
36 | Test
37 | Prod
38 | )
39 |
40 | type Pipely struct {
41 | // Golang container
42 | Golang *dagger.Container
43 | // Varnish container
44 | Varnish *dagger.Container
45 | // Varnish PURGE token
46 | VarnishPurgeToken *dagger.Secret
47 | // Source code
48 | Source *dagger.Directory
49 | // Container image tag
50 | Tag string
51 | // App proxy
52 | AppProxy *Proxy
53 | // Feeds proxy
54 | FeedsProxy *Proxy
55 | // Assets proxy
56 | AssetsProxy *Proxy
57 | }
58 |
59 | func New(
60 | ctx context.Context,
61 |
62 | // +defaultPath="./"
63 | source *dagger.Directory,
64 |
65 | // +default="dev"
66 | tag string,
67 |
68 | // https://hub.docker.com/_/varnish/tags
69 | // +default="7.7.3@sha256:9310a9cbdb61b632afb83fff44fa7c71b90f987a0587d23fc159e3b22334d577"
70 | varnishVersion string,
71 |
72 | // +default=9000
73 | varnishPort int,
74 |
75 | // +default="60s"
76 | berespTtl string,
77 |
78 | // +default="24h"
79 | berespGrace string,
80 |
81 | // +optional
82 | purgeToken *dagger.Secret,
83 |
84 | // +default="5000:changelog-2025-05-05.fly.dev:"
85 | appProxy string,
86 |
87 | // +default="5010:feeds.changelog.place:"
88 | feedsProxy string,
89 |
90 | // +default="5020:changelog.place:cdn.changelog.com"
91 | assetsProxy string,
92 |
93 | // https://ui.honeycomb.io/changelog/datasets/pipely/overview
94 | // +default="pipely"
95 | honeycombDataset string,
96 |
97 | // +optional
98 | honeycombApiKey *dagger.Secret,
99 |
100 | // https://dev.maxmind.com/geoip/updating-databases/#directly-downloading-databases
101 | // +optional
102 | maxMindAuth *dagger.Secret,
103 |
104 | // +default="us-east-1"
105 | awsRegion string,
106 |
107 | // +optional
108 | awsLocalProductionS3BucketSuffix string,
109 |
110 | // +optional
111 | awsAccessKeyId *dagger.Secret,
112 |
113 | // +optional
114 | awsSecretAccessKey *dagger.Secret,
115 | ) (*Pipely, error) {
116 | pipely := &Pipely{
117 | Golang: dag.Container().From("golang:" + golangVersion),
118 | Tag: tag,
119 | Source: source,
120 | VarnishPurgeToken: purgeToken,
121 | }
122 |
123 | pipely.Varnish = dag.Container().From("varnish:"+varnishVersion).
124 | WithUser("root"). // a bunch of commands fail if we are not root, so YOLO & sandbox with Firecracker, Kata Containers, etc.
125 | WithEnvVariable("VARNISH_HTTP_PORT", fmt.Sprintf("%d", varnishPort)).
126 | WithExposedPort(varnishPort).
127 | WithEnvVariable("BERESP_TTL", berespTtl).
128 | WithEnvVariable("BERESP_GRACE", berespGrace).
129 | WithEnvVariable("HONEYCOMB_DATASET", honeycombDataset).
130 | WithEnvVariable("AWS_REGION", awsRegion)
131 |
132 | if pipely.VarnishPurgeToken != nil {
133 | pipely.Varnish = pipely.Varnish.
134 | WithSecretVariable("PURGE_TOKEN", pipely.VarnishPurgeToken)
135 | }
136 |
137 | if honeycombApiKey != nil {
138 | pipely.Varnish = pipely.Varnish.
139 | WithSecretVariable("HONEYCOMB_API_KEY", honeycombApiKey)
140 | }
141 |
142 | if awsAccessKeyId != nil {
143 | if awsSecretAccessKey == nil {
144 | return nil, errors.New("--aws-secret-access-key is required")
145 | }
146 |
147 | pipely.Varnish = pipely.Varnish.
148 | WithSecretVariable("AWS_ACCESS_KEY_ID", awsAccessKeyId).
149 | WithSecretVariable("AWS_SECRET_ACCESS_KEY", awsSecretAccessKey).
150 | WithEnvVariable("S3_BUCKET_SUFFIX", awsLocalProductionS3BucketSuffix)
151 | }
152 |
153 | if maxMindAuth != nil {
154 | geoLite2CityArchive := dag.HTTP("https://download.maxmind.com/geoip/databases/GeoLite2-City/download?suffix=tar.gz", dagger.HTTPOpts{
155 | AuthHeader: maxMindAuth,
156 | })
157 | pipely.Varnish = pipely.Varnish.
158 | WithExec([]string{"mkdir", "-p", "/usr/local/share/GeoIP"}).
159 | WithMountedFile("/tmp/geolite2-city.tar.gz", geoLite2CityArchive).
160 | WithExec([]string{"tar", "-zxvf", "/tmp/geolite2-city.tar.gz", "-C", "/usr/local/share/GeoIP", "--strip-components=1"}).
161 | WithExec([]string{"ls", "/usr/local/share/GeoIP/GeoLite2-City.mmdb"}).
162 | WithEnvVariable("GEOIP_ENRICHED", "true")
163 | }
164 |
165 | app, err := NewProxy(appProxy)
166 | if err != nil {
167 | return nil, err
168 | }
169 | pipely.AppProxy = app
170 |
171 | feeds, err := NewProxy(feedsProxy)
172 | if err != nil {
173 | return nil, err
174 | }
175 | pipely.FeedsProxy = feeds
176 |
177 | assets, err := NewProxy(assetsProxy)
178 | if err != nil {
179 | return nil, err
180 | }
181 | pipely.AssetsProxy = assets
182 |
183 | return pipely, nil
184 | }
185 |
186 | func (m *Pipely) app() *dagger.Container {
187 | tlsExterminator := m.Golang.
188 | WithExec([]string{"go", "install", "github.com/nabsul/tls-exterminator@" + tlsExterminatorVersion}).
189 | File("/go/bin/tls-exterminator")
190 |
191 | overmind := m.Golang.
192 | WithExec([]string{"go", "install", "github.com/DarthSim/overmind/v2@v" + overmindVersion}).
193 | File("/go/bin/overmind")
194 |
195 | vectorContainer := dag.Container().From("timberio/vector:" + vectorVersion)
196 |
197 | procfile := fmt.Sprintf(`varnish: docker-varnish-entrypoint
198 | app: tls-exterminator %s
199 | feeds: tls-exterminator %s
200 | assets: tls-exterminator %s
201 | logs: bash -c 'coproc VJS { varnish-json-response; }; if [ -z "${VJS_PID}" ]; then echo "ERROR: Failed to start varnish-json-response coprocess." >&2; exit 1; fi; trap "kill ${VJS_PID} 2>/dev/null" EXIT; vector <&${VJS[0]}'
202 | `, m.AppProxy.TlsExterminator, m.FeedsProxy.TlsExterminator, m.AssetsProxy.TlsExterminator)
203 |
204 | return m.Varnish.
205 | // Configure various environment variables
206 | WithEnvVariable("BACKEND_APP_FQDN", m.AppProxy.Fqdn).
207 | WithEnvVariable("BACKEND_APP_HOST", "localhost").
208 | WithEnvVariable("BACKEND_APP_PORT", m.AppProxy.Port).
209 | WithEnvVariable("BACKEND_FEEDS_FQDN", m.FeedsProxy.Fqdn).
210 | WithEnvVariable("BACKEND_FEEDS_HOST", "localhost").
211 | WithEnvVariable("BACKEND_FEEDS_PORT", m.FeedsProxy.Port).
212 | WithEnvVariable("BACKEND_ASSETS_FQDN", m.AssetsProxy.Fqdn).
213 | WithEnvVariable("BACKEND_ASSETS_HOST", "localhost").
214 | WithEnvVariable("BACKEND_ASSETS_PORT", m.AssetsProxy.Port).
215 | WithEnvVariable("ASSETS_HOST", m.AssetsProxy.Host).
216 | // Add tls-exterminator
217 | WithFile("/usr/local/bin/tls-exterminator", tlsExterminator).
218 | // Prepare apt packages
219 | WithEnvVariable("DEBIAN_FRONTEND", "noninteractive").
220 | WithEnvVariable("TERM", "xterm-256color").
221 | WithExec([]string{"apt-get", "update"}).
222 | // Install tmux
223 | WithExec([]string{"apt-get", "install", "--yes", "tmux"}).
224 | WithExec([]string{"tmux", "-V"}).
225 | // Install vector.dev
226 | WithFile("/usr/bin/vector", vectorContainer.File("/usr/bin/vector")).
227 | WithDirectory("/usr/share/vector", vectorContainer.Directory("/usr/share/vector")).
228 | WithDirectory("/usr/share/doc/vector", vectorContainer.Directory("/usr/share/doc/vector")).
229 | WithDirectory("/etc/vector", vectorContainer.Directory("/etc/vector")).
230 | WithDirectory("/var/lib/vector", vectorContainer.Directory("/var/lib/vector")).
231 | WithExec([]string{"vector", "--version"}).
232 | // Install & configure overmind
233 | WithFile("/usr/local/bin/overmind", overmind).
234 | WithNewFile("/Procfile", procfile).
235 | WithWorkdir("/").
236 | WithEntrypoint([]string{"overmind", "start", "--timeout=30", "--no-port", "--auto-restart=all"})
237 | }
238 |
239 | func (m *Pipely) withConfigs(c *dagger.Container, env Env) *dagger.Container {
240 | return m.withVectorConfig(
241 | m.withVarnishJsonResponse(
242 | m.withVarnishConfig(c),
243 | ),
244 | env)
245 | }
246 |
247 | func (m *Pipely) withVarnishConfig(c *dagger.Container) *dagger.Container {
248 | return c.
249 | WithDirectory(
250 | "/etc/varnish",
251 | m.Source.Directory("varnish/vcl"))
252 | }
253 |
254 | func (m *Pipely) withVarnishJsonResponse(c *dagger.Container) *dagger.Container {
255 | return c.WithFile(
256 | "/usr/local/bin/varnish-json-response",
257 | m.Source.File("varnish/varnish-json-response.bash"),
258 | dagger.ContainerWithFileOpts{
259 | Permissions: 755,
260 | })
261 | }
262 |
263 | func (m *Pipely) withVectorConfig(c *dagger.Container, env Env) *dagger.Container {
264 | ctx := context.Background()
265 |
266 | containerWithVectorConfigs := c.
267 | WithEnvVariable("VECTOR_CONFIG", "/etc/vector/*.yaml").
268 | WithFile(
269 | "/etc/vector/vector.yaml",
270 | m.Source.File("vector/pipedream.changelog.com/default.yaml"))
271 |
272 | if env != Prod {
273 | containerWithVectorConfigs = containerWithVectorConfigs.
274 | WithFile(
275 | "/etc/vector/debug_varnish.yaml",
276 | m.Source.File("vector/pipedream.changelog.com/debug_varnish.yaml"))
277 | }
278 |
279 | geoipEnriched, _ := c.EnvVariable(ctx, "GEOIP_ENRICHED")
280 | if geoipEnriched == "true" {
281 | containerWithVectorConfigs = containerWithVectorConfigs.
282 | WithFile(
283 | "/etc/vector/geoip.yaml",
284 | m.Source.File("vector/pipedream.changelog.com/geoip.yaml"))
285 | }
286 |
287 | if geoipEnriched == "true" && env != Prod {
288 | containerWithVectorConfigs = containerWithVectorConfigs.
289 | WithFile(
290 | "/etc/vector/debug_varnish_geoip.yaml",
291 | m.Source.File("vector/pipedream.changelog.com/debug_varnish_geoip.yaml")).
292 | WithFile(
293 | "/etc/vector/debug_s3.yaml",
294 | m.Source.File("vector/pipedream.changelog.com/debug_s3.yaml"))
295 | }
296 |
297 | return containerWithVectorConfigs.
298 | WithExec([]string{"vector", "validate", "--skip-healthchecks"})
299 | }
300 |
301 | // Test container with various useful tools - use `just` as the starting point
302 | func (m *Pipely) Test(ctx context.Context) *dagger.Container {
303 | return m.withConfigs(
304 | m.local(ctx),
305 | Test)
306 | }
307 |
308 | // Production container for local use with various useful debugging tools - use `just` as the starting point
309 | func (m *Pipely) LocalProduction(ctx context.Context) *dagger.Container {
310 | return m.withConfigs(
311 | m.local(ctx),
312 | Dev)
313 | }
314 |
315 | func (m *Pipely) local(ctx context.Context) *dagger.Container {
316 | hurlArchive := dag.HTTP("https://github.com/Orange-OpenSource/hurl/releases/download/" + hurlVersion + "/hurl-" + hurlVersion + "-" + altArchitecture(ctx) + "-unknown-linux-gnu.tar.gz")
317 |
318 | // https://github.com/davecheney/httpstat
319 | httpstat := m.Golang.
320 | WithExec([]string{"go", "install", "github.com/davecheney/httpstat@v1.2.1"}).
321 | File("/go/bin/httpstat")
322 |
323 | // https://github.com/fabio42/sasqwatch
324 | sasqwatch := m.Golang.
325 | WithExec([]string{"go", "install", "github.com/fabio42/sasqwatch@8564c29ceaa03d5211b8b6d7a3012f9acf691fd1"}).
326 | File("/go/bin/sasqwatch")
327 |
328 | // https://github.com/xxxserxxx/gotop
329 | gotop := m.Golang.
330 | WithExec([]string{"go", "install", "github.com/xxxserxxx/gotop/v4/cmd/gotop@bba42d08624edee8e339ac98c1a9c46810414f78"}).
331 | File("/go/bin/gotop")
332 |
333 | // https://github.com/showwin/speedtest-go
334 | speedtest := m.Golang.
335 | WithExec([]string{"go", "install", "github.com/showwin/speedtest-go@v1.7.10"}).
336 | File("/go/bin/speedtest-go")
337 |
338 | p, _ := dag.DefaultPlatform(ctx)
339 | platform := platforms.MustParse(string(p))
340 | oha := dag.HTTP("https://github.com/hatoo/oha/releases/download/v" + ohaVersion + "/oha-linux-" + platform.Architecture)
341 |
342 | return m.app().
343 | // Install hurl.dev + dependencies (curl & libxml2)
344 | WithExec([]string{"apt-get", "install", "--yes", "curl"}).
345 | WithExec([]string{"curl", "--version"}).
346 | WithExec([]string{"apt-get", "install", "--yes", "libxml2"}).
347 | WithExec([]string{"mkdir", "-p", "/opt/hurl"}).
348 | WithMountedFile("/opt/hurl.tar.gz", hurlArchive).
349 | WithExec([]string{"tar", "-zxvf", "/opt/hurl.tar.gz", "-C", "/opt/hurl", "--strip-components=1"}).
350 | WithExec([]string{"ln", "-sf", "/opt/hurl/bin/hurl", "/usr/local/bin/hurl"}).
351 | WithExec([]string{"hurl", "--version"}).
352 | // Install htop
353 | WithExec([]string{"apt-get", "install", "--yes", "htop"}).
354 | WithExec([]string{"htop", "-V"}).
355 | // Install procps
356 | WithExec([]string{"apt-get", "install", "--yes", "procps"}).
357 | WithExec([]string{"ps", "-V"}).
358 | // Install neovim
359 | WithExec([]string{"apt-get", "install", "--yes", "neovim"}).
360 | WithExec([]string{"nvim", "--version"}).
361 | // Install jq
362 | WithExec([]string{"apt-get", "install", "--yes", "jq"}).
363 | WithExec([]string{"jq", "--version"}).
364 | // Install httpstat
365 | WithFile("/usr/local/bin/httpstat", httpstat).
366 | WithExec([]string{"httpstat", "-v"}).
367 | // Install sasqwatch
368 | WithFile("/usr/local/bin/sasqwatch", sasqwatch).
369 | WithExec([]string{"sasqwatch", "--version"}).
370 | // Install gotop
371 | WithFile("/usr/local/bin/gotop", gotop).
372 | WithExec([]string{"gotop", "--version"}).
373 | // Install speedtest-go
374 | WithFile("/usr/local/bin/speedtest-go", speedtest).
375 | WithExec([]string{"speedtest-go", "--version"}).
376 | // Install oha
377 | WithFile("/usr/local/bin/oha", oha, dagger.ContainerWithFileOpts{
378 | Permissions: 755,
379 | }).
380 | WithExec([]string{"oha", "--version"}).
381 | // Install just.systems
382 | WithExec([]string{"bash", "-c", "curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin"}).
383 | WithFile("/justfile", m.Source.File("container/justfile")).
384 | WithExec([]string{"just"}).
385 | // Add test directory
386 | WithDirectory("/test", m.Source.Directory("test")).
387 | // Add dics directory
388 | WithDirectory("/docs", m.Source.Directory("docs")).
389 | // Add welcome message
390 | WithMountedFile("/tmp/welcome.bashrc", m.Source.File("container/welcome.bashrc")).
391 | WithExec([]string{"bash", "-c", "cat /tmp/welcome.bashrc >> ~/.bashrc"})
392 | }
393 |
394 | func altArchitecture(ctx context.Context) string {
395 | p, _ := dag.DefaultPlatform(ctx)
396 | platform := platforms.MustParse(string(p))
397 |
398 | switch platform.Architecture {
399 | case "amd64":
400 | return "x86_64"
401 | case "arm64":
402 | return "aarch64"
403 | default:
404 | return platform.Architecture
405 | }
406 | }
407 |
408 | // Test VCL via VTC
409 | func (m *Pipely) TestVarnish(ctx context.Context) *dagger.Container {
410 | return m.Test(ctx).WithExec([]string{"just", "test-vtc"})
411 | }
412 |
413 | // Test acceptance
414 | func (m *Pipely) TestAcceptance(ctx context.Context) *dagger.Container {
415 | pipely := m.Test(ctx).
416 | AsService(dagger.ContainerAsServiceOpts{UseEntrypoint: true})
417 |
418 | testAcceptanceCmd := []string{"just", "test-acceptance-local", "--variable", "proto=http", "--variable", "host=pipely:9000"}
419 | if m.VarnishPurgeToken != nil {
420 | purgeToken, err := m.VarnishPurgeToken.Plaintext(ctx)
421 | if err != nil {
422 | panic(err)
423 | }
424 | testAcceptanceCmd = append(testAcceptanceCmd, "--variable", "purge_token="+purgeToken)
425 | }
426 |
427 | return m.Test(ctx).
428 | WithServiceBinding("pipely", pipely).
429 | WithServiceBinding("www.pipely", pipely).
430 | WithExec(testAcceptanceCmd)
431 | }
432 |
433 | // Test acceptance report
434 | func (m *Pipely) TestAcceptanceReport(ctx context.Context) *dagger.Directory {
435 | return m.TestAcceptance(ctx).Directory("/var/opt/hurl/test-acceptance-local")
436 | }
437 |
438 | // Publish app container
439 | func (m *Pipely) Publish(
440 | ctx context.Context,
441 |
442 | // +default="ghcr.io/thechangelog/pipely"
443 | image string,
444 |
445 | // +default="ghcr.io"
446 | registryAddress string,
447 |
448 | registryUsername string,
449 |
450 | registryPassword *dagger.Secret,
451 | ) (string, error) {
452 | return m.withConfigs(m.app(), Prod).
453 | WithLabel("org.opencontainers.image.url", "https://pipely.tech").
454 | WithLabel("org.opencontainers.image.description", "A single-purpose, single-tenant CDN running Varnish Cache (open source) on Fly.io").
455 | WithLabel("org.opencontainers.image.authors", "@"+registryUsername).
456 | WithRegistryAuth(registryAddress, registryUsername, registryPassword).
457 | Publish(ctx, image+":"+m.Tag)
458 | }
459 |
--------------------------------------------------------------------------------
/test/vtc/feeds.vtc:
--------------------------------------------------------------------------------
1 | varnishtest "Test Feeds backend"
2 |
3 | # App mock server
4 | server s1 {
5 | rxreq
6 | txresp -status 200 -body "App backend"
7 | } -start
8 |
9 | # Feeds mock server with responses for all feed requests
10 | server s2 {
11 | # Test for /podcast/feed
12 | rxreq
13 | expect req.url == "/podcast.xml"
14 | txresp -status 200 -body "podcast.xml"
15 |
16 | # Test for /podcast/feed/
17 | rxreq
18 | expect req.url == "/podcast.xml"
19 | txresp -status 200 -body "podcast.xml"
20 |
21 | # Test for /podcast/feed?arg=first&arg=second
22 | rxreq
23 | expect req.url == "/podcast.xml"
24 | txresp -status 200 -body "podcast.xml"
25 |
26 | # Test for /gotime/feed
27 | rxreq
28 | expect req.url == "/gotime.xml"
29 | txresp -status 200 -body "gotime.xml"
30 |
31 | # Test for /gotime/feed/
32 | rxreq
33 | expect req.url == "/gotime.xml"
34 | txresp -status 200 -body "gotime.xml"
35 |
36 | # Test for /gotime/feed?arg=first&arg=second
37 | rxreq
38 | expect req.url == "/gotime.xml"
39 | txresp -status 200 -body "gotime.xml"
40 |
41 | # Test for /master/feed
42 | rxreq
43 | expect req.url == "/master.xml"
44 | txresp -status 200 -body "master.xml"
45 |
46 | # Test for /master/feed/
47 | rxreq
48 | expect req.url == "/master.xml"
49 | txresp -status 200 -body "master.xml"
50 |
51 | # Test for /master/feed?arg=first&arg=second
52 | rxreq
53 | expect req.url == "/master.xml"
54 | txresp -status 200 -body "master.xml"
55 |
56 | # Test for /feed
57 | rxreq
58 | expect req.url == "/feed.xml"
59 | txresp -status 200 -body "feed.xml"
60 |
61 | # Test for /feed/
62 | rxreq
63 | expect req.url == "/feed.xml"
64 | txresp -status 200 -body "feed.xml"
65 |
66 | # Test for /feed?arg=first&arg=second
67 | rxreq
68 | expect req.url == "/feed.xml"
69 | txresp -status 200 -body "feed.xml"
70 |
71 | # Test for /jsparty/feed
72 | rxreq
73 | expect req.url == "/jsparty.xml"
74 | txresp -status 200 -body "jsparty.xml"
75 |
76 | # Test for /jsparty/feed/
77 | rxreq
78 | expect req.url == "/jsparty.xml"
79 | txresp -status 200 -body "jsparty.xml"
80 |
81 | # Test for /jsparty/feed?arg=first&arg=second
82 | rxreq
83 | expect req.url == "/jsparty.xml"
84 | txresp -status 200 -body "jsparty.xml"
85 |
86 | # Test for /shipit/feed
87 | rxreq
88 | expect req.url == "/shipit.xml"
89 | txresp -status 200 -body "shipit.xml"
90 |
91 | # Test for /shipit/feed/
92 | rxreq
93 | expect req.url == "/shipit.xml"
94 | txresp -status 200 -body "shipit.xml"
95 |
96 | # Test for /shipit/feed?arg=first&arg=second
97 | rxreq
98 | expect req.url == "/shipit.xml"
99 | txresp -status 200 -body "shipit.xml"
100 |
101 | # Test for /news/feed
102 | rxreq
103 | expect req.url == "/news.xml"
104 | txresp -status 200 -body "news.xml"
105 |
106 | # Test for /news/feed/
107 | rxreq
108 | expect req.url == "/news.xml"
109 | txresp -status 200 -body "news.xml"
110 |
111 | # Test for /news/feed?arg=first&arg=second
112 | rxreq
113 | expect req.url == "/news.xml"
114 | txresp -status 200 -body "news.xml"
115 |
116 | # Test for /brainscience/feed
117 | rxreq
118 | expect req.url == "/brainscience.xml"
119 | txresp -status 200 -body "brainscience.xml"
120 |
121 | # Test for /brainscience/feed/
122 | rxreq
123 | expect req.url == "/brainscience.xml"
124 | txresp -status 200 -body "brainscience.xml"
125 |
126 | # Test for /brainscience/feed?arg=first&arg=second
127 | rxreq
128 | expect req.url == "/brainscience.xml"
129 | txresp -status 200 -body "brainscience.xml"
130 |
131 | # Test for /founderstalk/feed
132 | rxreq
133 | expect req.url == "/founderstalk.xml"
134 | txresp -status 200 -body "founderstalk.xml"
135 |
136 | # Test for /founderstalk/feed/
137 | rxreq
138 | expect req.url == "/founderstalk.xml"
139 | txresp -status 200 -body "founderstalk.xml"
140 |
141 | # Test for /founderstalk/feed?arg=first&arg=second
142 | rxreq
143 | expect req.url == "/founderstalk.xml"
144 | txresp -status 200 -body "founderstalk.xml"
145 |
146 | # Test for /interviews/feed
147 | rxreq
148 | expect req.url == "/interviews.xml"
149 | txresp -status 200 -body "interviews.xml"
150 |
151 | # Test for /interviews/feed/
152 | rxreq
153 | expect req.url == "/interviews.xml"
154 | txresp -status 200 -body "interviews.xml"
155 |
156 | # Test for /interviews/feed?arg=first&arg=second
157 | rxreq
158 | expect req.url == "/interviews.xml"
159 | txresp -status 200 -body "interviews.xml"
160 |
161 | # Test for /friends/feed
162 | rxreq
163 | expect req.url == "/friends.xml"
164 | txresp -status 200 -body "friends.xml"
165 |
166 | # Test for /friends/feed/
167 | rxreq
168 | expect req.url == "/friends.xml"
169 | txresp -status 200 -body "friends.xml"
170 |
171 | # Test for /friends/feed?arg=first&arg=second
172 | rxreq
173 | expect req.url == "/friends.xml"
174 | txresp -status 200 -body "friends.xml"
175 |
176 | # Test for /rfc/feed
177 | rxreq
178 | expect req.url == "/rfc.xml"
179 | txresp -status 200 -body "rfc.xml"
180 |
181 | # Test for /rfc/feed/
182 | rxreq
183 | expect req.url == "/rfc.xml"
184 | txresp -status 200 -body "rfc.xml"
185 |
186 | # Test for /rfc/feed?arg=first&arg=second
187 | rxreq
188 | expect req.url == "/rfc.xml"
189 | txresp -status 200 -body "rfc.xml"
190 |
191 | # Test for /spotlight/feed
192 | rxreq
193 | expect req.url == "/spotlight.xml"
194 | txresp -status 200 -body "spotlight.xml"
195 |
196 | # Test for /spotlight/feed/
197 | rxreq
198 | expect req.url == "/spotlight.xml"
199 | txresp -status 200 -body "spotlight.xml"
200 |
201 | # Test for /spotlight/feed?arg=first&arg=second
202 | rxreq
203 | expect req.url == "/spotlight.xml"
204 | txresp -status 200 -body "spotlight.xml"
205 |
206 | # Test for /afk/feed
207 | rxreq
208 | expect req.url == "/afk.xml"
209 | txresp -status 200 -body "afk.xml"
210 |
211 | # Test for /afk/feed/
212 | rxreq
213 | expect req.url == "/afk.xml"
214 | txresp -status 200 -body "afk.xml"
215 |
216 | # Test for /afk/feed?arg=first&arg=second
217 | rxreq
218 | expect req.url == "/afk.xml"
219 | txresp -status 200 -body "afk.xml"
220 |
221 | # Test for /posts/feed
222 | rxreq
223 | expect req.url == "/posts.xml"
224 | txresp -status 200 -body "posts.xml"
225 |
226 | # Test for /posts/feed/
227 | rxreq
228 | expect req.url == "/posts.xml"
229 | txresp -status 200 -body "posts.xml"
230 |
231 | # Test for /posts/feed?arg=first&arg=second
232 | rxreq
233 | expect req.url == "/posts.xml"
234 | txresp -status 200 -body "posts.xml"
235 |
236 | # Test for /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed
237 | rxreq
238 | expect req.url == "/plusplus.xml"
239 | txresp -status 200 -body "plusplus.xml"
240 |
241 | # Test for /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed/
242 | rxreq
243 | expect req.url == "/plusplus.xml"
244 | txresp -status 200 -body "plusplus.xml"
245 |
246 | # Test for /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed?arg=first&arg=second
247 | rxreq
248 | expect req.url == "/plusplus.xml"
249 | txresp -status 200 -body "plusplus.xml"
250 |
251 | # Test for /rss
252 | rxreq
253 | expect req.url == "/feed.xml"
254 | txresp -status 200 -body "feed.xml"
255 |
256 | # Test for /rss/
257 | rxreq
258 | expect req.url == "/feed.xml"
259 | txresp -status 200 -body "feed.xml"
260 |
261 | # Test for /rss?arg=first&arg=second
262 | rxreq
263 | expect req.url == "/feed.xml"
264 | txresp -status 200 -body "feed.xml"
265 |
266 | # Test for /feeds/* path
267 | rxreq
268 | expect req.url == "/0284CC5C777C51D158BBECCBBB56422A.xml"
269 | txresp -status 200 -body "0284CC5C777C51D158BBECCBBB56422A.xml"
270 |
271 | # Test for /feeds/*?arg=first&arg=second path
272 | rxreq
273 | expect req.url == "/0284CC5C777C51D158BBECCBBB56422A.xml"
274 | txresp -status 200 -body "0284CC5C777C51D158BBECCBBB56422A.xml"
275 | } -start
276 |
277 | # Start varnish with our VCL
278 | varnish v1 -vcl {
279 | vcl 4.1;
280 |
281 | import std;
282 |
283 | backend app {
284 | .host = "${s1_addr}";
285 | .port = "${s1_port}";
286 | }
287 |
288 | backend feeds {
289 | .host = "${s2_addr}";
290 | .port = "${s2_port}";
291 | }
292 |
293 | sub vcl_recv {
294 | set req.http.x-backend = "app";
295 |
296 | if (req.url ~ "^/podcast/feed/?(\?.*)?$") {
297 | set req.http.x-backend = "feeds";
298 | set req.url = "/podcast.xml";
299 | } else if (req.url ~ "^/gotime/feed/?(\?.*)?$") {
300 | set req.http.x-backend = "feeds";
301 | set req.url = "/gotime.xml";
302 | } else if (req.url ~ "^/master/feed/?(\?.*)?$") {
303 | set req.http.x-backend = "feeds";
304 | set req.url = "/master.xml";
305 | } else if (req.url ~ "^/feed/?(\?.*)?$") {
306 | set req.http.x-backend = "feeds";
307 | set req.url = "/feed.xml";
308 | } else if (req.url ~ "^/jsparty/feed/?(\?.*)?$") {
309 | set req.http.x-backend = "feeds";
310 | set req.url = "/jsparty.xml";
311 | } else if (req.url ~ "^/shipit/feed/?(\?.*)?$") {
312 | set req.http.x-backend = "feeds";
313 | set req.url = "/shipit.xml";
314 | } else if (req.url ~ "^/news/feed/?(\?.*)?$") {
315 | set req.http.x-backend = "feeds";
316 | set req.url = "/news.xml";
317 | } else if (req.url ~ "^/brainscience/feed/?(\?.*)?$") {
318 | set req.http.x-backend = "feeds";
319 | set req.url = "/brainscience.xml";
320 | } else if (req.url ~ "^/founderstalk/feed/?(\?.*)?$") {
321 | set req.http.x-backend = "feeds";
322 | set req.url = "/founderstalk.xml";
323 | } else if (req.url ~ "^/interviews/feed/?(\?.*)?$") {
324 | set req.http.x-backend = "feeds";
325 | set req.url = "/interviews.xml";
326 | } else if (req.url ~ "^/friends/feed/?(\?.*)?$") {
327 | set req.http.x-backend = "feeds";
328 | set req.url = "/friends.xml";
329 | } else if (req.url ~ "^/rfc/feed/?(\?.*)?$") {
330 | set req.http.x-backend = "feeds";
331 | set req.url = "/rfc.xml";
332 | } else if (req.url ~ "^/spotlight/feed/?(\?.*)?$") {
333 | set req.http.x-backend = "feeds";
334 | set req.url = "/spotlight.xml";
335 | } else if (req.url ~ "^/afk/feed/?(\?.*)?$") {
336 | set req.http.x-backend = "feeds";
337 | set req.url = "/afk.xml";
338 | } else if (req.url ~ "^/posts/feed/?(\?.*)?$") {
339 | set req.http.x-backend = "feeds";
340 | set req.url = "/posts.xml";
341 | } else if (req.url ~ "^/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed/?(\?.*)?$") {
342 | set req.http.x-backend = "feeds";
343 | set req.url = "/plusplus.xml";
344 | } else if (req.url ~ "^/rss/?(\?.*)?$") {
345 | set req.http.x-backend = "feeds";
346 | set req.url = "/feed.xml";
347 | } else if (req.url ~ "^/feeds/.*(\?.*)?$") {
348 | set req.http.x-backend = "feeds";
349 | set req.url = regsub(req.url, "^/feeds/([^?]*)(\?.*)?$", "/\1.xml");
350 | }
351 | }
352 |
353 | sub vcl_hash {
354 | if (req.http.x-backend == "feeds") {
355 | set req.backend_hint = feeds;
356 | } else {
357 | set req.backend_hint = app;
358 | }
359 | unset req.http.x-backend;
360 | }
361 |
362 | # Disable caching for testing
363 | sub vcl_backend_response {
364 | set beresp.uncacheable = true;
365 | return(deliver);
366 | }
367 | } -start
368 |
369 | # / should go to app backend
370 | client c1 {
371 | txreq -url "/"
372 | rxresp
373 | expect resp.status == 200
374 | expect resp.body == "App backend"
375 | } -run
376 |
377 | # /podcast/feed should go to feeds backend
378 | client c2 {
379 | txreq -url "/podcast/feed"
380 | rxresp
381 | expect resp.status == 200
382 | expect resp.body == "podcast.xml"
383 | } -run
384 |
385 | # /podcast/feed/ should go to feeds backend
386 | client c3 {
387 | txreq -url "/podcast/feed/"
388 | rxresp
389 | expect resp.status == 200
390 | expect resp.body == "podcast.xml"
391 | } -run
392 |
393 | # /podcast/feed?arg=first&arg=second should go to feeds backend
394 | client c4 {
395 | txreq -url "/podcast/feed?arg=first&arg=second"
396 | rxresp
397 | expect resp.status == 200
398 | expect resp.body == "podcast.xml"
399 | } -run
400 |
401 | # /gotime/feed should go to feeds backend
402 | client c5 {
403 | txreq -url "/gotime/feed"
404 | rxresp
405 | expect resp.status == 200
406 | expect resp.body == "gotime.xml"
407 | } -run
408 |
409 | # /gotime/feed/ should go to feeds backend
410 | client c6 {
411 | txreq -url "/gotime/feed/"
412 | rxresp
413 | expect resp.status == 200
414 | expect resp.body == "gotime.xml"
415 | } -run
416 |
417 | # /gotime/feed?arg=first&arg=second should go to feeds backend
418 | client c7 {
419 | txreq -url "/gotime/feed?arg=first&arg=second"
420 | rxresp
421 | expect resp.status == 200
422 | expect resp.body == "gotime.xml"
423 | } -run
424 |
425 | # /master/feed should go to feeds backend
426 | client c8 {
427 | txreq -url "/master/feed"
428 | rxresp
429 | expect resp.status == 200
430 | expect resp.body == "master.xml"
431 | } -run
432 |
433 | # /master/feed/ should go to feeds backend
434 | client c9 {
435 | txreq -url "/master/feed/"
436 | rxresp
437 | expect resp.status == 200
438 | expect resp.body == "master.xml"
439 | } -run
440 |
441 | # /master/feed?arg=first&arg=second should go to feeds backend
442 | client c10 {
443 | txreq -url "/master/feed?arg=first&arg=second"
444 | rxresp
445 | expect resp.status == 200
446 | expect resp.body == "master.xml"
447 | } -run
448 |
449 | # /feed should go to feeds backend
450 | client c11 {
451 | txreq -url "/feed"
452 | rxresp
453 | expect resp.status == 200
454 | expect resp.body == "feed.xml"
455 | } -run
456 |
457 | # /feed/ should go to feeds backend
458 | client c12 {
459 | txreq -url "/feed/"
460 | rxresp
461 | expect resp.status == 200
462 | expect resp.body == "feed.xml"
463 | } -run
464 |
465 | # /feed?arg=first&arg=second should go to feeds backend
466 | client c13 {
467 | txreq -url "/feed?arg=first&arg=second"
468 | rxresp
469 | expect resp.status == 200
470 | expect resp.body == "feed.xml"
471 | } -run
472 |
473 | # /jsparty/feed should go to feeds backend
474 | client c14 {
475 | txreq -url "/jsparty/feed"
476 | rxresp
477 | expect resp.status == 200
478 | expect resp.body == "jsparty.xml"
479 | } -run
480 |
481 | # /jsparty/feed/ should go to feeds backend
482 | client c15 {
483 | txreq -url "/jsparty/feed/"
484 | rxresp
485 | expect resp.status == 200
486 | expect resp.body == "jsparty.xml"
487 | } -run
488 |
489 | # /jsparty/feed?arg=first&arg=second should go to feeds backend
490 | client c16 {
491 | txreq -url "/jsparty/feed?arg=first&arg=second"
492 | rxresp
493 | expect resp.status == 200
494 | expect resp.body == "jsparty.xml"
495 | } -run
496 |
497 | # /shipit/feed should go to feeds backend
498 | client c17 {
499 | txreq -url "/shipit/feed"
500 | rxresp
501 | expect resp.status == 200
502 | expect resp.body == "shipit.xml"
503 | } -run
504 |
505 | # /shipit/feed/ should go to feeds backend
506 | client c18 {
507 | txreq -url "/shipit/feed/"
508 | rxresp
509 | expect resp.status == 200
510 | expect resp.body == "shipit.xml"
511 | } -run
512 |
513 | # /shipit/feed?arg=first&arg=second should go to feeds backend
514 | client c19 {
515 | txreq -url "/shipit/feed?arg=first&arg=second"
516 | rxresp
517 | expect resp.status == 200
518 | expect resp.body == "shipit.xml"
519 | } -run
520 |
521 | # /news/feed should go to feeds backend
522 | client c20 {
523 | txreq -url "/news/feed"
524 | rxresp
525 | expect resp.status == 200
526 | expect resp.body == "news.xml"
527 | } -run
528 |
529 | # /news/feed/ should go to feeds backend
530 | client c21 {
531 | txreq -url "/news/feed/"
532 | rxresp
533 | expect resp.status == 200
534 | expect resp.body == "news.xml"
535 | } -run
536 |
537 | # /news/feed?arg=first&arg=second should go to feeds backend
538 | client c22 {
539 | txreq -url "/news/feed?arg=first&arg=second"
540 | rxresp
541 | expect resp.status == 200
542 | expect resp.body == "news.xml"
543 | } -run
544 |
545 | # /brainscience/feed should go to feeds backend
546 | client c23 {
547 | txreq -url "/brainscience/feed"
548 | rxresp
549 | expect resp.status == 200
550 | expect resp.body == "brainscience.xml"
551 | } -run
552 |
553 | # /brainscience/feed/ should go to feeds backend
554 | client c24 {
555 | txreq -url "/brainscience/feed/"
556 | rxresp
557 | expect resp.status == 200
558 | expect resp.body == "brainscience.xml"
559 | } -run
560 |
561 | # /brainscience/feed?arg=first&arg=second should go to feeds backend
562 | client c25 {
563 | txreq -url "/brainscience/feed?arg=first&arg=second"
564 | rxresp
565 | expect resp.status == 200
566 | expect resp.body == "brainscience.xml"
567 | } -run
568 |
569 | # /founderstalk/feed should go to feeds backend
570 | client c26 {
571 | txreq -url "/founderstalk/feed"
572 | rxresp
573 | expect resp.status == 200
574 | expect resp.body == "founderstalk.xml"
575 | } -run
576 |
577 | # /founderstalk/feed/ should go to feeds backend
578 | client c27 {
579 | txreq -url "/founderstalk/feed/"
580 | rxresp
581 | expect resp.status == 200
582 | expect resp.body == "founderstalk.xml"
583 | } -run
584 |
585 | # /founderstalk/feed?arg=first&arg=second should go to feeds backend
586 | client c28 {
587 | txreq -url "/founderstalk/feed?arg=first&arg=second"
588 | rxresp
589 | expect resp.status == 200
590 | expect resp.body == "founderstalk.xml"
591 | } -run
592 |
593 | # /interviews/feed should go to feeds backend
594 | client c29 {
595 | txreq -url "/interviews/feed"
596 | rxresp
597 | expect resp.status == 200
598 | expect resp.body == "interviews.xml"
599 | } -run
600 |
601 | # /interviews/feed/ should go to feeds backend
602 | client c30 {
603 | txreq -url "/interviews/feed/"
604 | rxresp
605 | expect resp.status == 200
606 | expect resp.body == "interviews.xml"
607 | } -run
608 |
609 | # /interviews/feed?arg=first&arg=second should go to feeds backend
610 | client c31 {
611 | txreq -url "/interviews/feed?arg=first&arg=second"
612 | rxresp
613 | expect resp.status == 200
614 | expect resp.body == "interviews.xml"
615 | } -run
616 |
617 | # /friends/feed should go to feeds backend
618 | client c32 {
619 | txreq -url "/friends/feed"
620 | rxresp
621 | expect resp.status == 200
622 | expect resp.body == "friends.xml"
623 | } -run
624 |
625 | # /friends/feed/ should go to feeds backend
626 | client c33 {
627 | txreq -url "/friends/feed/"
628 | rxresp
629 | expect resp.status == 200
630 | expect resp.body == "friends.xml"
631 | } -run
632 |
633 | # /friends/feed?arg=first&arg=second should go to feeds backend
634 | client c34 {
635 | txreq -url "/friends/feed?arg=first&arg=second"
636 | rxresp
637 | expect resp.status == 200
638 | expect resp.body == "friends.xml"
639 | } -run
640 |
641 | # /rfc/feed should go to feeds backend
642 | client c35 {
643 | txreq -url "/rfc/feed"
644 | rxresp
645 | expect resp.status == 200
646 | expect resp.body == "rfc.xml"
647 | } -run
648 |
649 | # /rfc/feed/ should go to feeds backend
650 | client c36 {
651 | txreq -url "/rfc/feed/"
652 | rxresp
653 | expect resp.status == 200
654 | expect resp.body == "rfc.xml"
655 | } -run
656 |
657 | # /rfc/feed?arg=first&arg=second should go to feeds backend
658 | client c37 {
659 | txreq -url "/rfc/feed?arg=first&arg=second"
660 | rxresp
661 | expect resp.status == 200
662 | expect resp.body == "rfc.xml"
663 | } -run
664 |
665 | # /spotlight/feed should go to feeds backend
666 | client c38 {
667 | txreq -url "/spotlight/feed"
668 | rxresp
669 | expect resp.status == 200
670 | expect resp.body == "spotlight.xml"
671 | } -run
672 |
673 | # /spotlight/feed/ should go to feeds backend
674 | client c39 {
675 | txreq -url "/spotlight/feed/"
676 | rxresp
677 | expect resp.status == 200
678 | expect resp.body == "spotlight.xml"
679 | } -run
680 |
681 | # /spotlight/feed?arg=first&arg=second should go to feeds backend
682 | client c40 {
683 | txreq -url "/spotlight/feed?arg=first&arg=second"
684 | rxresp
685 | expect resp.status == 200
686 | expect resp.body == "spotlight.xml"
687 | } -run
688 |
689 | # /afk/feed should go to feeds backend
690 | client c41 {
691 | txreq -url "/afk/feed"
692 | rxresp
693 | expect resp.status == 200
694 | expect resp.body == "afk.xml"
695 | } -run
696 |
697 | # /afk/feed/ should go to feeds backend
698 | client c42 {
699 | txreq -url "/afk/feed/"
700 | rxresp
701 | expect resp.status == 200
702 | expect resp.body == "afk.xml"
703 | } -run
704 |
705 | # /afk/feed?arg=first&arg=second should go to feeds backend
706 | client c43 {
707 | txreq -url "/afk/feed?arg=first&arg=second"
708 | rxresp
709 | expect resp.status == 200
710 | expect resp.body == "afk.xml"
711 | } -run
712 |
713 | # /posts/feed should go to feeds backend
714 | client c44 {
715 | txreq -url "/posts/feed"
716 | rxresp
717 | expect resp.status == 200
718 | expect resp.body == "posts.xml"
719 | } -run
720 |
721 | # /posts/feed/ should go to feeds backend
722 | client c45 {
723 | txreq -url "/posts/feed/"
724 | rxresp
725 | expect resp.status == 200
726 | expect resp.body == "posts.xml"
727 | } -run
728 |
729 | # /posts/feed?arg=first&arg=second should go to feeds backend
730 | client c46 {
731 | txreq -url "/posts/feed?arg=first&arg=second"
732 | rxresp
733 | expect resp.status == 200
734 | expect resp.body == "posts.xml"
735 | } -run
736 |
737 | # /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed should go to feeds backend
738 | client c47 {
739 | txreq -url "/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed"
740 | rxresp
741 | expect resp.status == 200
742 | expect resp.body == "plusplus.xml"
743 | } -run
744 |
745 | # /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed/ should go to feeds backend
746 | client c48 {
747 | txreq -url "/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed/"
748 | rxresp
749 | expect resp.status == 200
750 | expect resp.body == "plusplus.xml"
751 | } -run
752 |
753 | # /plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed?arg=first&arg=second should go to feeds backend
754 | client c49 {
755 | txreq -url "/plusplus/xae9heiphohtupha1Ahha3aexoo0oo4W/feed?arg=first&arg=second"
756 | rxresp
757 | expect resp.status == 200
758 | expect resp.body == "plusplus.xml"
759 | } -run
760 |
761 | # /rss should go to feeds backend
762 | client c50 {
763 | txreq -url "/rss"
764 | rxresp
765 | expect resp.status == 200
766 | expect resp.body == "feed.xml"
767 | } -run
768 |
769 | # /rss/ should go to feeds backend
770 | client c51 {
771 | txreq -url "/rss/"
772 | rxresp
773 | expect resp.status == 200
774 | expect resp.body == "feed.xml"
775 | } -run
776 |
777 | # /rss?arg=first&arg=second should go to feeds backend
778 | client c52 {
779 | txreq -url "/rss?arg=first&arg=second"
780 | rxresp
781 | expect resp.status == 200
782 | expect resp.body == "feed.xml"
783 | } -run
784 |
785 | # /feeds/* should go to feeds backend
786 | client c53 {
787 | txreq -url "/feeds/0284CC5C777C51D158BBECCBBB56422A"
788 | rxresp
789 | expect resp.status == 200
790 | expect resp.body == "0284CC5C777C51D158BBECCBBB56422A.xml"
791 | } -run
792 |
793 | # /feeds/*?arg=first&arg=second should go to feeds backend
794 | client c54 {
795 | txreq -url "/feeds/0284CC5C777C51D158BBECCBBB56422A?arg=first&arg=second"
796 | rxresp
797 | expect resp.status == 200
798 | expect resp.body == "0284CC5C777C51D158BBECCBBB56422A.xml"
799 | } -run
800 |
--------------------------------------------------------------------------------
/vector/pipedream.changelog.com/geoip.yaml:
--------------------------------------------------------------------------------
1 | enrichment_tables:
2 | # https://vector.dev/docs/reference/configuration/#enrichment-tables
3 | geoip:
4 | type: "mmdb"
5 | path: "/usr/local/share/GeoIP/GeoLite2-City.mmdb"
6 |
7 | transforms:
8 | varnish_geoip:
9 | # https://vector.dev/docs/reference/configuration/transforms/remap/
10 | type: "remap"
11 | inputs:
12 | - "varnish"
13 | source: |
14 | .transform = "varnish_geoip"
15 | geoip_data, err = get_enrichment_table_record("geoip", {"ip": .client_ip})
16 | if err == null {
17 | .geo_city = geoip_data.city.names.en
18 | .geo_country_code = geoip_data.country.iso_code
19 | .geo_country_name = geoip_data.country.names.en
20 | .geo_continent_code = geoip_data.continent.code
21 | .geo_latitude = geoip_data.location.latitude
22 | .geo_longitude = geoip_data.location.longitude
23 | }
24 |
25 | s3_json_feeds:
26 | # https://vector.dev/docs/reference/configuration/transforms/remap/
27 | type: "remap"
28 | inputs:
29 | - "varnish_geoip"
30 | source: |
31 | .transform = "s3_json_feeds"
32 | url_string = string!(.url) || ""
33 | url_parts = parse_url!("http://for.vector.parse-url.local" + url_string)
34 | url_parts.path = string!(url_parts.path)
35 | if starts_with(url_parts.path, "/feeds/") || ends_with(url_parts.path, "/feed") {
36 | .message = {
37 | "timestamp": parse_timestamp!(.time, format: "%Y-%m-%dT%H:%M:%SZ"),
38 | "client_ip": .client_ip,
39 | "geo_country": downcase!(.geo_country_name || ""),
40 | "geo_city": downcase!(.geo_city || ""),
41 | "host": .host,
42 | "url": .url,
43 | "request_method": .request,
44 | "request_protocol": .protocol,
45 | "request_referer": .request_referer,
46 | "request_user_agent": .request_user_agent,
47 | "response_state": .cache_status,
48 | "response_status": .status,
49 | "response_reason": "",
50 | "response_body_size": .resp_body_size,
51 | "server_datacenter": .server_datacenter
52 | }
53 | } else {
54 | abort
55 | }
56 |
57 | s3_csv:
58 | # https://vector.dev/docs/reference/configuration/transforms/remap/
59 | type: "remap"
60 | inputs:
61 | - "varnish_geoip"
62 | source: |
63 | .transform = "s3_csv"
64 | formatted_time = format_timestamp!(now(), "%d/%b/%Y:%H:%M:%S %z")
65 | parsed_time, err = parse_timestamp(.time, "%+")
66 | if err == null {
67 | formatted_time = format_timestamp!(parsed_time, "%d/%b/%Y:%H:%M:%S %z")
68 | }
69 | .formatted_time = "[" + formatted_time + "]"
70 | .geo_city = downcase!(.geo_city || "")
71 | .geo_country_name = downcase!(.geo_country_name || "")
72 |
73 | s3_csv_changelog:
74 | # https://vector.dev/docs/reference/configuration/transforms/filter/
75 | type: "filter"
76 | inputs:
77 | - "s3_csv"
78 | condition:
79 | type: "vrl"
80 | source: |
81 | starts_with(string!(.url), "/uploads/podcast/")
82 |
83 | s3_csv_gotime:
84 | # https://vector.dev/docs/reference/configuration/transforms/filter/
85 | type: "filter"
86 | inputs:
87 | - "s3_csv"
88 | condition:
89 | type: "vrl"
90 | source: |
91 | starts_with(string!(.url), "/uploads/gotime/")
92 |
93 | s3_csv_rfc:
94 | # https://vector.dev/docs/reference/configuration/transforms/filter/
95 | type: "filter"
96 | inputs:
97 | - "s3_csv"
98 | condition:
99 | type: "vrl"
100 | source: |
101 | starts_with(string!(.url), "/uploads/rfc/")
102 |
103 | s3_csv_founderstalk:
104 | # https://vector.dev/docs/reference/configuration/transforms/filter/
105 | type: "filter"
106 | inputs:
107 | - "s3_csv"
108 | condition:
109 | type: "vrl"
110 | source: |
111 | starts_with(string!(.url), "/uploads/founderstalk/")
112 |
113 | s3_csv_spotlight:
114 | # https://vector.dev/docs/reference/configuration/transforms/filter/
115 | type: "filter"
116 | inputs:
117 | - "s3_csv"
118 | condition:
119 | type: "vrl"
120 | source: |
121 | starts_with(string!(.url), "/uploads/spotlight/")
122 |
123 | s3_csv_jsparty:
124 | # https://vector.dev/docs/reference/configuration/transforms/filter/
125 | type: "filter"
126 | inputs:
127 | - "s3_csv"
128 | condition:
129 | type: "vrl"
130 | source: |
131 | starts_with(string!(.url), "/uploads/jsparty/")
132 |
133 | s3_csv_practicalai:
134 | # https://vector.dev/docs/reference/configuration/transforms/filter/
135 | type: "filter"
136 | inputs:
137 | - "s3_csv"
138 | condition:
139 | type: "vrl"
140 | source: |
141 | starts_with(string!(.url), "/uploads/practicalai/")
142 |
143 | s3_csv_reactpodcast:
144 | # https://vector.dev/docs/reference/configuration/transforms/filter/
145 | type: "filter"
146 | inputs:
147 | - "s3_csv"
148 | condition:
149 | type: "vrl"
150 | source: |
151 | starts_with(string!(.url), "/uploads/reactpodcast/")
152 |
153 | s3_csv_afk:
154 | # https://vector.dev/docs/reference/configuration/transforms/filter/
155 | type: "filter"
156 | inputs:
157 | - "s3_csv"
158 | condition:
159 | type: "vrl"
160 | source: |
161 | starts_with(string!(.url), "/uploads/afk/")
162 |
163 | s3_csv_backstage:
164 | # https://vector.dev/docs/reference/configuration/transforms/filter/
165 | type: "filter"
166 | inputs:
167 | - "s3_csv"
168 | condition:
169 | type: "vrl"
170 | source: |
171 | starts_with(string!(.url), "/uploads/backstage/")
172 |
173 | s3_csv_brainscience:
174 | # https://vector.dev/docs/reference/configuration/transforms/filter/
175 | type: "filter"
176 | inputs:
177 | - "s3_csv"
178 | condition:
179 | type: "vrl"
180 | source: |
181 | starts_with(string!(.url), "/uploads/brainscience/")
182 |
183 | s3_csv_shipit:
184 | # https://vector.dev/docs/reference/configuration/transforms/filter/
185 | type: "filter"
186 | inputs:
187 | - "s3_csv"
188 | condition:
189 | type: "vrl"
190 | source: |
191 | starts_with(string!(.url), "/uploads/shipit/")
192 |
193 | s3_csv_news:
194 | # https://vector.dev/docs/reference/configuration/transforms/filter/
195 | type: "filter"
196 | inputs:
197 | - "s3_csv"
198 | condition:
199 | type: "vrl"
200 | source: |
201 | starts_with(string!(.url), "/uploads/news/")
202 |
203 | s3_csv_friends:
204 | # https://vector.dev/docs/reference/configuration/transforms/filter/
205 | type: "filter"
206 | inputs:
207 | - "s3_csv"
208 | condition:
209 | type: "vrl"
210 | source: |
211 | starts_with(string!(.url), "/uploads/friends/")
212 |
213 | sinks:
214 | honeycomb:
215 | # https://vector.dev/docs/reference/configuration/sinks/honeycomb/
216 | type: "honeycomb"
217 | inputs:
218 | - "varnish_geoip"
219 | api_key: ${HONEYCOMB_API_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
220 | dataset: ${HONEYCOMB_DATASET:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
221 |
222 | s3_logs_feeds:
223 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
224 | type: "aws_s3"
225 | inputs:
226 | - "s3_json_feeds"
227 | bucket: "changelog-logs-feeds${S3_BUCKET_SUFFIX:-}"
228 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
229 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
230 | filename_append_uuid: false
231 | encoding:
232 | codec: "text"
233 | compression: "none"
234 | batch:
235 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
236 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
237 | auth:
238 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
239 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
240 |
241 | s3_logs_changelog:
242 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
243 | type: "aws_s3"
244 | inputs:
245 | - "s3_csv_changelog"
246 | bucket: "changelog-logs-podcast${S3_BUCKET_SUFFIX:-}"
247 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
248 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
249 | filename_append_uuid: false
250 | encoding:
251 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
252 | codec: "csv"
253 | csv:
254 | fields:
255 | - client_ip
256 | - formatted_time
257 | - url
258 | - resp_body_size
259 | - status
260 | - request_user_agent
261 | - geo_latitude
262 | - geo_longitude
263 | - geo_city
264 | - geo_continent_code
265 | - geo_country_name
266 | compression: "none"
267 | batch:
268 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
269 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
270 | auth:
271 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
272 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
273 |
274 | s3_logs_gotime:
275 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
276 | type: "aws_s3"
277 | inputs:
278 | - "s3_csv_gotime"
279 | bucket: "changelog-logs-gotime${S3_BUCKET_SUFFIX:-}"
280 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
281 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
282 | filename_append_uuid: false
283 | encoding:
284 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
285 | codec: "csv"
286 | csv:
287 | fields:
288 | - client_ip
289 | - formatted_time
290 | - url
291 | - resp_body_size
292 | - status
293 | - request_user_agent
294 | - geo_latitude
295 | - geo_longitude
296 | - geo_city
297 | - geo_continent_code
298 | - geo_country_name
299 | compression: "none"
300 | batch:
301 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
302 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
303 | auth:
304 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
305 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
306 |
307 | s3_logs_rfc:
308 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
309 | type: "aws_s3"
310 | inputs:
311 | - "s3_csv_rfc"
312 | bucket: "changelog-logs-rfc${S3_BUCKET_SUFFIX:-}"
313 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
314 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
315 | filename_append_uuid: false
316 | encoding:
317 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
318 | codec: "csv"
319 | csv:
320 | fields:
321 | - client_ip
322 | - formatted_time
323 | - url
324 | - resp_body_size
325 | - status
326 | - request_user_agent
327 | - geo_latitude
328 | - geo_longitude
329 | - geo_city
330 | - geo_continent_code
331 | - geo_country_name
332 | compression: "none"
333 | batch:
334 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
335 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
336 | auth:
337 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
338 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
339 |
340 | s3_logs_founderstalk:
341 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
342 | type: "aws_s3"
343 | inputs:
344 | - "s3_csv_founderstalk"
345 | bucket: "changelog-logs-founderstalk${S3_BUCKET_SUFFIX:-}"
346 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
347 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
348 | filename_append_uuid: false
349 | encoding:
350 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
351 | codec: "csv"
352 | csv:
353 | fields:
354 | - client_ip
355 | - formatted_time
356 | - url
357 | - resp_body_size
358 | - status
359 | - request_user_agent
360 | - geo_latitude
361 | - geo_longitude
362 | - geo_city
363 | - geo_continent_code
364 | - geo_country_name
365 | compression: "none"
366 | batch:
367 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
368 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
369 | auth:
370 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
371 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
372 |
373 | s3_logs_spotlight:
374 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
375 | type: "aws_s3"
376 | inputs:
377 | - "s3_csv_spotlight"
378 | bucket: "changelog-logs-spotlight${S3_BUCKET_SUFFIX:-}"
379 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
380 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
381 | filename_append_uuid: false
382 | encoding:
383 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
384 | codec: "csv"
385 | csv:
386 | fields:
387 | - client_ip
388 | - formatted_time
389 | - url
390 | - resp_body_size
391 | - status
392 | - request_user_agent
393 | - geo_latitude
394 | - geo_longitude
395 | - geo_city
396 | - geo_continent_code
397 | - geo_country_name
398 | compression: "none"
399 | batch:
400 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
401 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
402 | auth:
403 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
404 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
405 |
406 | s3_logs_jsparty:
407 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
408 | type: "aws_s3"
409 | inputs:
410 | - "s3_csv_jsparty"
411 | bucket: "changelog-logs-jsparty${S3_BUCKET_SUFFIX:-}"
412 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
413 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
414 | filename_append_uuid: false
415 | encoding:
416 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
417 | codec: "csv"
418 | csv:
419 | fields:
420 | - client_ip
421 | - formatted_time
422 | - url
423 | - resp_body_size
424 | - status
425 | - request_user_agent
426 | - geo_latitude
427 | - geo_longitude
428 | - geo_city
429 | - geo_continent_code
430 | - geo_country_name
431 | compression: "none"
432 | batch:
433 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
434 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
435 | auth:
436 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
437 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
438 |
439 | s3_logs_practicalai:
440 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
441 | type: "aws_s3"
442 | inputs:
443 | - "s3_csv_practicalai"
444 | bucket: "changelog-logs-practicalai${S3_BUCKET_SUFFIX:-}"
445 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
446 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
447 | filename_append_uuid: false
448 | encoding:
449 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
450 | codec: "csv"
451 | csv:
452 | fields:
453 | - client_ip
454 | - formatted_time
455 | - url
456 | - resp_body_size
457 | - status
458 | - request_user_agent
459 | - geo_latitude
460 | - geo_longitude
461 | - geo_city
462 | - geo_continent_code
463 | - geo_country_name
464 | compression: "none"
465 | batch:
466 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
467 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
468 | auth:
469 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
470 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
471 |
472 | s3_logs_reactpodcast:
473 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
474 | type: "aws_s3"
475 | inputs:
476 | - "s3_csv_reactpodcast"
477 | bucket: "changelog-logs-reactpodcast${S3_BUCKET_SUFFIX:-}"
478 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
479 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
480 | filename_append_uuid: false
481 | encoding:
482 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
483 | codec: "csv"
484 | csv:
485 | fields:
486 | - client_ip
487 | - formatted_time
488 | - url
489 | - resp_body_size
490 | - status
491 | - request_user_agent
492 | - geo_latitude
493 | - geo_longitude
494 | - geo_city
495 | - geo_continent_code
496 | - geo_country_name
497 | compression: "none"
498 | batch:
499 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
500 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
501 | auth:
502 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
503 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
504 |
505 | s3_logs_afk:
506 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
507 | type: "aws_s3"
508 | inputs:
509 | - "s3_csv_afk"
510 | bucket: "changelog-logs-afk${S3_BUCKET_SUFFIX:-}"
511 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
512 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
513 | filename_append_uuid: false
514 | encoding:
515 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
516 | codec: "csv"
517 | csv:
518 | fields:
519 | - client_ip
520 | - formatted_time
521 | - url
522 | - resp_body_size
523 | - status
524 | - request_user_agent
525 | - geo_latitude
526 | - geo_longitude
527 | - geo_city
528 | - geo_continent_code
529 | - geo_country_name
530 | compression: "none"
531 | batch:
532 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
533 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
534 | auth:
535 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
536 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
537 |
538 | s3_logs_backstage:
539 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
540 | type: "aws_s3"
541 | inputs:
542 | - "s3_csv_backstage"
543 | bucket: "changelog-logs-backstage${S3_BUCKET_SUFFIX:-}"
544 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
545 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
546 | filename_append_uuid: false
547 | encoding:
548 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
549 | codec: "csv"
550 | csv:
551 | fields:
552 | - client_ip
553 | - formatted_time
554 | - url
555 | - resp_body_size
556 | - status
557 | - request_user_agent
558 | - geo_latitude
559 | - geo_longitude
560 | - geo_city
561 | - geo_continent_code
562 | - geo_country_name
563 | compression: "none"
564 | batch:
565 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
566 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
567 | auth:
568 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
569 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
570 |
571 | s3_logs_brainscience:
572 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
573 | type: "aws_s3"
574 | inputs:
575 | - "s3_csv_brainscience"
576 | bucket: "changelog-logs-brainscience${S3_BUCKET_SUFFIX:-}"
577 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
578 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
579 | filename_append_uuid: false
580 | encoding:
581 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
582 | codec: "csv"
583 | csv:
584 | fields:
585 | - client_ip
586 | - formatted_time
587 | - url
588 | - resp_body_size
589 | - status
590 | - request_user_agent
591 | - geo_latitude
592 | - geo_longitude
593 | - geo_city
594 | - geo_continent_code
595 | - geo_country_name
596 | compression: "none"
597 | batch:
598 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
599 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
600 | auth:
601 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
602 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
603 |
604 | s3_logs_shipit:
605 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
606 | type: "aws_s3"
607 | inputs:
608 | - "s3_csv_shipit"
609 | bucket: "changelog-logs-shipit${S3_BUCKET_SUFFIX:-}"
610 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
611 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
612 | filename_append_uuid: false
613 | encoding:
614 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
615 | codec: "csv"
616 | csv:
617 | fields:
618 | - client_ip
619 | - formatted_time
620 | - url
621 | - resp_body_size
622 | - status
623 | - request_user_agent
624 | - geo_latitude
625 | - geo_longitude
626 | - geo_city
627 | - geo_continent_code
628 | - geo_country_name
629 | compression: "none"
630 | batch:
631 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
632 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
633 | auth:
634 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
635 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
636 |
637 | s3_logs_news:
638 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
639 | type: "aws_s3"
640 | inputs:
641 | - "s3_csv_news"
642 | bucket: "changelog-logs-news${S3_BUCKET_SUFFIX:-}"
643 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
644 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
645 | filename_append_uuid: false
646 | encoding:
647 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
648 | codec: "csv"
649 | csv:
650 | fields:
651 | - client_ip
652 | - formatted_time
653 | - url
654 | - resp_body_size
655 | - status
656 | - request_user_agent
657 | - geo_latitude
658 | - geo_longitude
659 | - geo_city
660 | - geo_continent_code
661 | - geo_country_name
662 | compression: "none"
663 | batch:
664 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
665 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
666 | auth:
667 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
668 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
669 |
670 | s3_logs_friends:
671 | # https://vector.dev/docs/reference/configuration/sinks/aws_s3/
672 | type: "aws_s3"
673 | inputs:
674 | - "s3_csv_friends"
675 | bucket: "changelog-logs-friends${S3_BUCKET_SUFFIX:-}"
676 | region: ${AWS_REGION:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
677 | key_prefix: "%Y-%m-%dT%H:%M%z-${FLY_REGION:-LOCAL}-"
678 | filename_append_uuid: false
679 | encoding:
680 | # https://vector.dev/docs/reference/configuration/sinks/file/#encoding.csv.fields
681 | codec: "csv"
682 | csv:
683 | fields:
684 | - client_ip
685 | - formatted_time
686 | - url
687 | - resp_body_size
688 | - status
689 | - request_user_agent
690 | - geo_latitude
691 | - geo_longitude
692 | - geo_city
693 | - geo_continent_code
694 | - geo_country_name
695 | compression: "none"
696 | batch:
697 | max_bytes: 102400 # write to S3 when 100KB worth of events are stored in memory
698 | timeout_secs: 60 # write to S3 once per minute if any events in memory (matches the key_prefix)
699 | auth:
700 | access_key_id: ${AWS_ACCESS_KEY_ID:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
701 | secret_access_key: ${AWS_SECRET_ACCESS_KEY:-REMEMBER_TO_SET_THIS_IN_PRODUCTION}
702 |
--------------------------------------------------------------------------------