├── .dockerignore ├── .github └── workflows │ └── release.yaml ├── .gitignore ├── Dockerfile ├── README.md ├── fly.toml ├── start-fly-log-transporter.sh └── vector-configs ├── sinks ├── appsignal.toml ├── aws_s3.toml ├── axiom.toml ├── baselime.toml ├── better-stack.toml ├── datadog.toml ├── erasearch.toml ├── highlight.toml ├── honeybadger.toml ├── honeycomb.toml ├── http.toml ├── humio.toml ├── hyperdx.toml ├── logdna.toml ├── logflare.toml ├── loki.toml ├── new_relic.toml ├── opsverse.toml ├── papertrail.toml ├── sematext.toml ├── signoz.toml ├── slack.toml └── uptrace.toml └── vector.toml /.dockerignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release image 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | push_to_registry: 10 | runs-on: ubuntu-latest 11 | name: Push image 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | 17 | - name: Registry login 18 | uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 19 | with: 20 | username: ${{ secrets.FLYIOBUILDS_DOCKERHUB_USERNAME }} 21 | password: ${{ secrets.FLYIOBUILDS_DOCKERHUB_TOKEN }} 22 | 23 | - name: Meta data 24 | id: meta 25 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 26 | with: 27 | images: flyio/log-shipper 28 | 29 | - name: Build 30 | id: build 31 | uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc 32 | with: 33 | context: . 34 | push: true 35 | tags: ${{ steps.meta.outputs.tags }} 36 | labels: ${{ steps.meta.outputs.labels }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM timberio/vector:0.29.1-debian 2 | COPY vector-configs /etc/vector/ 3 | COPY ./start-fly-log-transporter.sh . 4 | CMD ["bash", "start-fly-log-transporter.sh"] 5 | ENTRYPOINT [] 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fly-log-shipper 2 | 3 | Ship logs from fly to other providers using [NATS](https://docs.nats.io/) and [Vector](https://vector.dev/) 4 | 5 | In this repo you will find various [Vector Sinks](https://vector.dev/docs/reference/configuration/sinks/) along with the required fly config. The end result is a Fly.IO application that automatically reads your organisation logs and sends them to external providers. 6 | 7 | # Quick start 8 | 9 | 1. Create a new fly logger app based on our docker image 10 | 11 | ``` 12 | fly launch --image flyio/log-shipper:latest --no-public-ips 13 | ``` 14 | 15 | 2. Set [NATS source secrets](#nats-source-configuration) for your new app 16 | 3. Set your desired [provider](#provider-configuration) from below 17 | 18 | **Thats it** - no need to setup NATs clients within your apps, as fly apps are already sending monitoring information back to fly which we can read. 19 | 20 | However for advanced uses you can still configure a NATs client in your apps to talk to this NATs server. See [NATS](#nats) 21 | 22 | ## NATS source configuration 23 | 24 | | Secret | Description | 25 | | -------------- | ---------------------------------------------------------------------------------------------------------------- | 26 | | `ORG` | Organisation slug (default to `personal`) | 27 | | `ACCESS_TOKEN` | Fly personal access token (required; set with `fly secrets set ACCESS_TOKEN=$(fly auth token)`) | 28 | | `SUBJECT` | Subject to subscribe to. See [[NATS]] below (defaults to `logs.>`) | 29 | | `QUEUE` | Arbitrary queue name if you want to run multiple log processes for HA and avoid duplicate messages being shipped | 30 | | `NETWORK` | 6PN network, if you want to run log-shipper through a WireGuard connection (defaults to `fdaa:0:0`) | 31 | 32 | After generating your `fly.toml`, remember to update the internal port to match the `vector` internal port 33 | defined in `vector-configs/vector.toml`. Not doing so will result in health checks failing on deployment. 34 | 35 | ``` 36 | [[services]] 37 | http_checks = [] 38 | internal_port = 8686 39 | ``` 40 | 41 | --- 42 | 43 | Set the secrets below associated with your desired log destination 44 | 45 | ## Provider configuration 46 | 47 | ### AppSignal 48 | 49 | | Secret | Description | 50 | | ------------------------ | ---------------------- | 51 | | `APPSIGNAL_PUSH_API_KEY` | AppSignal push API key | 52 | 53 | ### AWS S3 54 | 55 | | Secret | Description | 56 | | ----------------------- | --------------------------------------------------------------------------------------- | 57 | | `AWS_ACCESS_KEY_ID` | AWS Access key with access to the log bucket | 58 | | `AWS_SECRET_ACCESS_KEY` | AWS secret access key | 59 | | `AWS_BUCKET` | AWS S3 bucket to store logs in | 60 | | `AWS_REGION` | Region for the bucket | 61 | | `S3_ENDPOINT` | (optional) Endpoint URL for S3 compatible object stores such as Cloudflare R2 or Wasabi | 62 | 63 | ### Axiom 64 | 65 | | Secret | Description | 66 | | --------------- | ------------- | 67 | | `AXIOM_TOKEN` | Axiom token | 68 | | `AXIOM_DATASET` | Axiom dataset | 69 | 70 | ### Baselime 71 | 72 | | Secret | Description | 73 | |---------------------|-----------------------------------------------| 74 | | `BASELIME_API_KEY` | Baselime API key | 75 | | `BASELIME_DATASET` | (optional) Baselime dataset (default "flyio") | 76 | 77 | ### Better Stack Logs (formerly Logtail) 78 | 79 | | Secret | Description | 80 | |-------------------------------|---------------------------------------------------------------------------| 81 | | `BETTER_STACK_SOURCE_TOKEN` | Better Stack Telemetry source token | 82 | | `BETTER_STACK_INGESTING_HOST` | Better Stack source ingesting host (default is `in.logs.betterstack.com`) | 83 | 84 | ### Datadog 85 | 86 | | Secret | Description | 87 | | ----------------- | --------------------------------------------- | 88 | | `DATADOG_API_KEY` | API key for your Datadog account | 89 | | `DATADOG_SITE` | (optional) The Datadog site. ie: datadoghq.eu | 90 | 91 | ### Highlight 92 | 93 | | Secret | Description | 94 | | ---------------------- | -------------------- | 95 | | `HIGHLIGHT_PROJECT_ID` | Highlight Project ID | 96 | 97 | ### Honeybadger 98 | 99 | | Secret | Description | 100 | | --------------------- | ------------------- | 101 | | `HONEYBADGER_API_KEY` | Honeybadger API key | 102 | 103 | ### Honeycomb 104 | 105 | | Secret | Description | 106 | | ------------------- | ----------------- | 107 | | `HONEYCOMB_API_KEY` | Honeycomb API key | 108 | | `HONEYCOMB_DATASET` | Honeycomb dataset | 109 | 110 | ### Humio 111 | 112 | | Secret | Description | 113 | | ---------------- | --------------------------------------- | 114 | | `HUMIO_TOKEN` | Humio token | 115 | | `HUMIO_ENDPOINT` | (optional) Endpoint URL to send logs to | 116 | 117 | ### HyperDX 118 | 119 | | Secret | Description | 120 | | ----------------- | --------------- | 121 | | `HYPERDX_API_KEY` | HyperDX API key | 122 | 123 | ### Logdna 124 | 125 | | Secret | Description | 126 | | ---------------- | -------------- | 127 | | `LOGDNA_API_KEY` | LogDNA API key | 128 | 129 | ### Logflare 130 | 131 | | Secret | Description | 132 | | ----------------------- | ------------------------------------------------------- | 133 | | `LOGFLARE_API_KEY` | Logflare ingest API key | 134 | | `LOGFLARE_SOURCE_TOKEN` | Logflare source token (uuid on your Logflare dashboard) | 135 | 136 | ### Loki 137 | 138 | | Secret | Description | 139 | | --------------- | ------------- | 140 | | `LOKI_URL` | Loki Endpoint | 141 | | `LOKI_USERNAME` | Loki Username | 142 | | `LOKI_PASSWORD` | Loki Password | 143 | 144 | ### New Relic 145 | 146 | One of these is required for New Relic logs. New Relic recommend the license key be used (ref: https://docs.newrelic.com/docs/logs/enable-log-management-new-relic/enable-log-monitoring-new-relic/vector-output-sink-log-forwarding/) 147 | 148 | | Secret | Description | 149 | | ----------------------- | -------------------------------- | 150 | | `NEW_RELIC_INSERT_KEY` | (optional) New Relic Insert key | 151 | | `NEW_RELIC_LICENSE_KEY` | (optional) New Relic License key | 152 | | `NEW_RELIC_REGION` | (optional) eu or us (default us) | 153 | | `NEW_RELIC_ACCOUNT_ID` | New Relic Account Id | 154 | 155 | ### OpsVerse 156 | 157 | | Secret | Description | 158 | | ----------------------- | ---------------------- | 159 | | `OPSVERSE_LOGS_ENDPOINT`| OpsVerse Logs Endpoint | 160 | | `OPSVERSE_USERNAME` | OpsVerse Username | 161 | | `OPSVERSE_PASSWORD` | OpsVerse Password | 162 | 163 | ### Papertrail 164 | 165 | | Secret | Description | 166 | | --------------------- | ------------------- | 167 | | `PAPERTRAIL_ENDPOINT` | Papertrail endpoint | 168 | | `PAPERTRAIL_ENCODING_CODEC` | Papertrail codec (default is "json") | 169 | 170 | ### Sematext 171 | 172 | | Secret | Description | 173 | | ----------------- | --------------- | 174 | | `SEMATEXT_REGION` | Sematext region | 175 | | `SEMATEXT_TOKEN` | Sematext token | 176 | 177 | 178 | ### Signoz 179 | 180 | | Secret | Description | 181 | | --------------------- | --------------------------------------------------------------- | 182 | | `SIGNOZ_INGESTION_KEY`| Signoz Access Token | 183 | | `SIGNOZ_URI` | Signoz URI (default is 'https://ingest.us.signoz.cloud/logs/vector') | 184 | 185 | ### Uptrace 186 | 187 | | Secret | Description | 188 | | ----------------- | ------------------ | 189 | | `UPTRACE_API_KEY` | Uptrace API key | 190 | | `UPTRACE_PROJECT` | Uptrace project ID | 191 | | `UPTRACE_SINK_INPUT` | `"log_json"`, etc. | 192 | | `UPTRACE_SINK_ENCODING` | `"json"`, etc. | 193 | 194 | For UPTRACE_SINK_ENCODING Vector expects one of `avro`, `gelf`, `json`, `logfmt`, `native`, 195 | `native_json`, `raw_message`, `text` for key `sinks.uptrace`. 196 | 197 | ### EraSearch 198 | 199 | | Secret | Description | 200 | | ----------------- | ------------------------------- | 201 | | `ERASEARCH_URL` | EraSearch Endpoint | 202 | | `ERASEARCH_AUTH` | EraSearch User | 203 | | `ERASEARCH_INDEX` | EraSearch Index you want to use | 204 | 205 | ### HTTP 206 | 207 | | Secret | Description | 208 | | ------------ | ---------------------- | 209 | | `HTTP_URL` | HTTP/HTTPS Endpoint | 210 | | `HTTP_TOKEN` | HTTP Bearer auth token | 211 | 212 | ### Slack ( experimental ) 213 | 214 | HTTP sink that can be used for sending log alerts to Slack. 215 | 216 | | Secret | Description | 217 | | ---------------------- | ---------------------- | 218 | | `SLACK_WEBHOOK_URL` | Slack WebHook URL | 219 | | `SLACK_ALERT_KEYWORDS` | Keywords to alert on | 220 | 221 | Example for setting keywords `fly secrets set SLACK_ALERT_KEYWORDS="[r'SIGTERM', r'reboot']"` 222 | 223 | --- 224 | 225 | # NATS 226 | 227 | The log stream is provided through the [NATS protocol](https://docs.nats.io/nats-protocol/nats-protocol) and is limited to subscriptions to logs in your organisations. 228 | 229 | ## Connecting 230 | 231 | > Note: You do **not** have to manually connect a NAT Client, see [Quick Start](#quick-start) 232 | 233 | If you want to add custom behaviours or modify the subject sent from your app, then you can connect your app to the NATs server manually. 234 | 235 | Any fly app can connect to the NATs server on `nats://[fdaa::3]:4223` (IPV6). 236 | 237 | **Note: you will need to supply a user / password.** 238 | 239 | > **User**: is your Fly organisation slug, which you can obtain from `fly orgs list` > **Password**: is your fly token, which you can obtain from `fly auth token` 240 | 241 | ### Example using the NATs client 242 | 243 | Launch a nats client based on the nats-server image 244 | 245 | ``` 246 | fly launch --image="synadia/nats-server:nightly" --name="nats-client" 247 | ``` 248 | 249 | SSH into the new app 250 | 251 | ``` 252 | fly -a nats-client ssh console 253 | ``` 254 | 255 | ``` 256 | nats context add nats --server [fdaa::3]:4223 --description "NATS Demo" --select \ 257 | --user \ 258 | --password 259 | ``` 260 | 261 | ``` 262 | nats pub "logs.test" "hello world" 263 | ``` 264 | 265 | ## Subject 266 | 267 | The subject schema is `logs...` and the standard 268 | [NATS wildcards](https://docs.nats.io/nats-concepts/subjects#wildcards) can be used. 269 | In this app, the `SUBJECT` secret can be used to set the subject and limit the scope of the logs streamed. 270 | 271 | ## Queue 272 | 273 | If you would like to run multiple vm's for high availability, the NATS endpoint supports 274 | [subscription queues](https://docs.nats.io/nats-concepts/queue) to ensure messages are only sent to one 275 | subscriber of the named queue. The `QUEUE` secret can be set to configure a queue name for the client. 276 | 277 | --- 278 | 279 | # Vector 280 | 281 | The `nats` source component sends logs to other downstream transforms and sinks in the Vector config. 282 | This processes the log lines and sends them to various providers. 283 | The config is generated from a shell wrapper script which uses conditionals on environment variables to 284 | decide which Vector sinks to configure in the final config. 285 | -------------------------------------------------------------------------------- /fly.toml: -------------------------------------------------------------------------------- 1 | app = "fly-log-shipper-example" 2 | 3 | [metrics] 4 | port = 9598 5 | path = "/metrics" 6 | -------------------------------------------------------------------------------- /start-fly-log-transporter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | template() { eval $'cat <<_EOF\n'"$(awk '1;END{print"_EOF"}')"; } 5 | sponge() { cat <<<"$(cat)" >"$1"; } 6 | filter() { for i in "$@"; do template <"$i" | sponge "$i" || rm "$i"; done; } 7 | filter /etc/vector/sinks/*.toml 2>&- 8 | echo 'Configured sinks:' 9 | find /etc/vector/sinks -type f -exec basename -s '.toml' {} \; 10 | 11 | exec vector -c /etc/vector/vector.toml -C /etc/vector/sinks 12 | -------------------------------------------------------------------------------- /vector-configs/sinks/appsignal.toml: -------------------------------------------------------------------------------- 1 | [sinks.appsignal] 2 | type = "appsignal" 3 | inputs = [ "log_json" ] 4 | push_api_key = "${APPSIGNAL_PUSH_API_KEY}" 5 | -------------------------------------------------------------------------------- /vector-configs/sinks/aws_s3.toml: -------------------------------------------------------------------------------- 1 | [sinks.aws_s3] 2 | # General 3 | type = "aws_s3" 4 | inputs = ["log_json"] 5 | bucket = "${AWS_BUCKET:-$BUCKET_NAME}" 6 | compression = "gzip" 7 | region = "${AWS_REGION:-auto}" 8 | framing.method = "newline_delimited" 9 | encoding.codec = "json" 10 | key_prefix = "{{fly.app.name}}/%F/" # optional, default 11 | healthcheck.enabled = true # optional, default 12 | ${S3_ENDPOINT+endpoint = "\"$S3_ENDPOINT"\"} 13 | ${AWS_ENDPOINT_URL_S3+endpoint = "\"$AWS_ENDPOINT_URL_S3"\"} -------------------------------------------------------------------------------- /vector-configs/sinks/axiom.toml: -------------------------------------------------------------------------------- 1 | [sinks.axiom] 2 | type = "axiom" 3 | inputs = ["log_json"] 4 | token = "${AXIOM_TOKEN}" 5 | dataset = "${AXIOM_DATASET}" 6 | 7 | -------------------------------------------------------------------------------- /vector-configs/sinks/baselime.toml: -------------------------------------------------------------------------------- 1 | [sinks.baselime] 2 | type = "http" 3 | inputs = ["log_json"] 4 | uri = "https://events.baselime.io/v1/${BASELIME_DATASET:-flyio}" 5 | encoding.codec = "json" 6 | auth.strategy = "bearer" 7 | auth.token = "${BASELIME_API_KEY}" 8 | 9 | -------------------------------------------------------------------------------- /vector-configs/sinks/better-stack.toml: -------------------------------------------------------------------------------- 1 | [transforms.remap_better_stack_timestamp] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | .dt = del(.timestamp) 6 | ''' 7 | 8 | [sinks.better_stack] 9 | type = "http" 10 | inputs = ["remap_better_stack_timestamp"] 11 | uri = "https://${BETTER_STACK_INGESTING_HOST:-in.logs.betterstack.com}" 12 | encoding.codec = "json" 13 | auth.strategy = "bearer" 14 | auth.token = "${BETTER_STACK_SOURCE_TOKEN:-$LOGTAIL_TOKEN}" 15 | -------------------------------------------------------------------------------- /vector-configs/sinks/datadog.toml: -------------------------------------------------------------------------------- 1 | [sinks.datadog] 2 | # General 3 | type = "datadog_logs" # required 4 | inputs = ["log_json"] # required 5 | default_api_key = "${DATADOG_API_KEY}" # required 6 | site = "${DATADOG_SITE:-datadoghq.com}" # optional 7 | compression = "gzip" # optional, default 8 | 9 | # Healthcheck 10 | healthcheck.enabled = true # optional, default 11 | 12 | -------------------------------------------------------------------------------- /vector-configs/sinks/erasearch.toml: -------------------------------------------------------------------------------- 1 | [transforms.trf_json] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | if starts_with(.message, "{") ?? false { 6 | # parse json messages 7 | json = object!(parse_json!(.message)) 8 | del(.message) 9 | . |= json 10 | } 11 | ''' 12 | 13 | [sinks.erasearch] 14 | type="elasticsearch" 15 | inputs = ["trf_json"] 16 | endpoint="${ERASEARCH_URL}" 17 | healthcheck.enabled = false 18 | mode = "bulk" 19 | bulk.index = "${ERASEARCH_INDEX}" 20 | 21 | [sinks.erasearch.request] 22 | concurrency = "adaptive" 23 | 24 | [sinks.erasearch.request.headers] 25 | #ERASEARCH_AUTH in the form of "Bearer " for cloud.era.co 26 | #ERASEARCH_AUTH in the form of "Basic " for basic Authorization 27 | Authorization = "${ERASEARCH_AUTH}" 28 | -------------------------------------------------------------------------------- /vector-configs/sinks/highlight.toml: -------------------------------------------------------------------------------- 1 | [sinks.highlight] 2 | type = "http" 3 | inputs = ["log_json"] 4 | encoding.codec = "json" 5 | framing.method = "newline_delimited" 6 | compression = "gzip" 7 | uri = "https://pub.highlight.io/v1/logs/json" 8 | headers.x-highlight-project = "${HIGHLIGHT_PROJECT_ID}" 9 | -------------------------------------------------------------------------------- /vector-configs/sinks/honeybadger.toml: -------------------------------------------------------------------------------- 1 | [transforms.remap_honeybadger_timestamp] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | .ts = del(.timestamp) 6 | ''' 7 | 8 | [sinks.honeybadger] 9 | type = "http" 10 | inputs = ["remap_honeybadger_timestamp"] 11 | uri = "https://api.honeybadger.io/v1/events" 12 | framing.method = "newline_delimited" 13 | encoding.codec = "json" 14 | request.headers = {"X-API-Key" = "${HONEYBADGER_API_KEY}"} 15 | -------------------------------------------------------------------------------- /vector-configs/sinks/honeycomb.toml: -------------------------------------------------------------------------------- 1 | [sinks.honeycomb] 2 | # General 3 | type = "honeycomb" 4 | inputs = ["log_json"] 5 | api_key = "${HONEYCOMB_API_KEY}" 6 | dataset = "${HONEYCOMB_DATASET}" 7 | 8 | -------------------------------------------------------------------------------- /vector-configs/sinks/http.toml: -------------------------------------------------------------------------------- 1 | [sinks.http] 2 | type = "http" 3 | inputs = ["log_json"] 4 | uri = "${HTTP_URL}" 5 | encoding.codec = "json" 6 | auth.strategy = "bearer" 7 | auth.token = "${HTTP_TOKEN}" 8 | 9 | -------------------------------------------------------------------------------- /vector-configs/sinks/humio.toml: -------------------------------------------------------------------------------- 1 | [sinks.humio] 2 | # General 3 | type = "humio_logs" 4 | inputs = ["log_json"] 5 | compression = "gzip" 6 | host_key = "fly.app.instance" 7 | token = "${HUMIO_TOKEN}" 8 | 9 | # Encoding 10 | encoding.codec = "json" 11 | 12 | ${HUMIO_ENDPOINT+endpoint = "$HUMIO_ENDPOINT"} -------------------------------------------------------------------------------- /vector-configs/sinks/hyperdx.toml: -------------------------------------------------------------------------------- 1 | [sinks.hyperdx] 2 | type = "http" 3 | inputs = ["log_json"] 4 | uri = "https://in.hyperdx.io?hdx_platform=flyio" 5 | encoding.codec = "json" 6 | auth.strategy = "bearer" 7 | auth.token = "${HYPERDX_API_KEY}" 8 | 9 | -------------------------------------------------------------------------------- /vector-configs/sinks/logdna.toml: -------------------------------------------------------------------------------- 1 | [sinks.logdna] 2 | # General 3 | type = "logdna" # required 4 | inputs = ["log_json"] # required 5 | api_key = "${LOGDNA_API_KEY}" # required 6 | hostname = "{{fly.app.instance}}" 7 | 8 | -------------------------------------------------------------------------------- /vector-configs/sinks/logflare.toml: -------------------------------------------------------------------------------- 1 | [transforms.remap_logflare_loglevel] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | .level = .log.level 6 | ''' 7 | 8 | [sinks.logflare] 9 | type = "http" 10 | inputs = ["remap_logflare_loglevel"] 11 | uri = "https://api.logflare.app/logs/vector?source=${LOGFLARE_SOURCE_TOKEN}" 12 | request.headers = {"X-API-Key" = "${LOGFLARE_API_KEY}"} 13 | encoding.codec = "json" 14 | compression = "none" 15 | 16 | -------------------------------------------------------------------------------- /vector-configs/sinks/loki.toml: -------------------------------------------------------------------------------- 1 | [transforms.loki_json] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | .level = .log.level 6 | if starts_with(.message, "{") ?? false { 7 | # parse json messages 8 | json = object!(parse_json!(.message)) 9 | del(.message) 10 | . |= json 11 | } 12 | ''' 13 | 14 | [sinks.loki] 15 | type = "loki" 16 | inputs = ["loki_json"] 17 | endpoint = "${LOKI_URL}" 18 | compression = "gzip" 19 | auth.strategy = "basic" 20 | auth.user = "${LOKI_USERNAME}" 21 | auth.password = "${LOKI_PASSWORD}" 22 | encoding.codec = "json" 23 | 24 | labels.event_provider = "{{event.provider}}" 25 | labels.fly_region = "{{fly.region}}" 26 | labels.fly_app_name = "{{fly.app.name}}" 27 | labels.fly_app_instance = "{{fly.app.instance}}" 28 | labels.host = "{{host}}" 29 | labels.level = "{{level}}" 30 | 31 | -------------------------------------------------------------------------------- /vector-configs/sinks/new_relic.toml: -------------------------------------------------------------------------------- 1 | [sinks.new_relic] 2 | # General 3 | type = "new_relic" 4 | inputs = ["log_json"] 5 | compression = "gzip" 6 | region = "${NEW_RELIC_REGION}" 7 | account_id = "${NEW_RELIC_ACCOUNT_ID}" 8 | api = "logs" 9 | # Key: ${NEW_RELIC_LICENSE_KEY-$NEW_RELIC_INSERT_KEY} 10 | ${NEW_RELIC_LICENSE_KEY+license_key = \"$NEW_RELIC_LICENSE_KEY\"} 11 | ${NEW_RELIC_INSERT_KEY+insert_key = \"$NEW_RELIC_INSERT_KEY\"} -------------------------------------------------------------------------------- /vector-configs/sinks/opsverse.toml: -------------------------------------------------------------------------------- 1 | [transforms.loki_json] 2 | type = "remap" 3 | inputs = ["log_json"] 4 | source = ''' 5 | .level = .log.level 6 | if starts_with(.message, "{") ?? false { 7 | # parse json messages 8 | json = object!(parse_json!(.message)) 9 | del(.message) 10 | . |= json 11 | } 12 | ''' 13 | 14 | [sinks.opsverse] 15 | type = "loki" 16 | inputs = ["loki_json"] 17 | endpoint = "${OPSVERSE_LOGS_ENDPOINT}" 18 | compression = "gzip" 19 | auth.strategy = "basic" 20 | auth.user = "${OPSVERSE_USERNAME}" 21 | auth.password = "${OPSVERSE_PASSWORD}" 22 | encoding.codec = "logfmt" 23 | out_of_order_action = "accept" 24 | 25 | labels.event_provider = "{{event.provider}}" 26 | labels.fly_region = "{{fly.region}}" 27 | labels.fly_app_name = "{{fly.app.name}}" 28 | labels.fly_app_instance = "{{fly.app.instance}}" 29 | labels.host = "{{host}}" 30 | labels.level = "{{level}}" 31 | 32 | -------------------------------------------------------------------------------- /vector-configs/sinks/papertrail.toml: -------------------------------------------------------------------------------- 1 | [sinks.papertrail] 2 | type = "papertrail" 3 | inputs = ["log_json"] 4 | endpoint = "${PAPERTRAIL_ENDPOINT}" 5 | encoding.codec = "${PAPERTRAIL_ENCODING_CODEC:-json}" 6 | -------------------------------------------------------------------------------- /vector-configs/sinks/sematext.toml: -------------------------------------------------------------------------------- 1 | [sinks.sematext] 2 | type = "sematext_logs" 3 | inputs = ["log_json"] 4 | region = "${SEMATEXT_REGION}" 5 | token = "${SEMATEXT_TOKEN}" 6 | 7 | -------------------------------------------------------------------------------- /vector-configs/sinks/signoz.toml: -------------------------------------------------------------------------------- 1 | [sinks.signoz] 2 | type = "http" 3 | inputs = ["log_json"] 4 | # Potential regions: "us", "eu", "in" 5 | # See https://signoz.io/docs/logs-management/send-logs/vector-logs-to-signoz/ 6 | # for more details 7 | uri = "${SIGNOZ_URI:-https://ingest.us.signoz.cloud/logs/vector}" 8 | 9 | [sinks.signoz.encoding] 10 | codec = "json" 11 | 12 | [sinks.signoz.request.headers] 13 | signoz-access-token = "${SIGNOZ_INGESTION_KEY}" 14 | -------------------------------------------------------------------------------- /vector-configs/sinks/slack.toml: -------------------------------------------------------------------------------- 1 | [transforms.log_filter] 2 | type = "filter" 3 | inputs = [ "log_json" ] 4 | condition = ''' 5 | match_any!(.message, ${SLACK_ALERT_KEYWORDS}) 6 | ''' 7 | 8 | [transforms.log_throttle] 9 | type = "throttle" 10 | inputs = [ "log_filter" ] 11 | threshold = 1 12 | window_secs = 1 13 | 14 | [transforms.log_parse] 15 | type = "remap" 16 | inputs = [ "log_throttle" ] 17 | source = ''' 18 | .blocks = [{"type": "section", "text": {"type": "mrkdwn", "text":"New alert from *" + .fly.app.name + "*"}},{ "type":"section", "text":{"type": "mrkdwn", "text": "*AppId:* " + .fly.app.instance + "\n*Message:*\n" + "\`\`\`" + .message + "\`\`\`"} }] ?? {} 19 | del(.message) 20 | del(.event) 21 | del(.fly) 22 | del(.host) 23 | del(.log) 24 | del(.timestamp) 25 | . = {"message": encode_json(.)} 26 | ''' 27 | 28 | [sinks.log_http] 29 | type = "http" 30 | inputs = ["log_parse"] 31 | method = "post" 32 | encoding.codec = "text" 33 | uri = "${SLACK_WEBHOOK_URL}" 34 | -------------------------------------------------------------------------------- /vector-configs/sinks/uptrace.toml: -------------------------------------------------------------------------------- 1 | [sinks.uptrace] 2 | type = "http" 3 | inputs = ["${UPTRACE_SINK_INPUT}"] 4 | encoding.codec = "${UPTRACE_SINK_ENCODING}" 5 | framing.method = "newline_delimited" 6 | compression = "gzip" 7 | uri = "https://api.uptrace.dev/api/v1/vector/logs" 8 | headers.uptrace-dsn = "https://${UPTRACE_API_KEY}@uptrace.dev/${UPTRACE_PROJECT}" 9 | -------------------------------------------------------------------------------- /vector-configs/vector.toml: -------------------------------------------------------------------------------- 1 | [api] 2 | enabled = true 3 | address = "[::]:8686" 4 | 5 | [sources.fly_log_metrics] 6 | type = "internal_metrics" 7 | 8 | [sources.nats] 9 | type = "nats" 10 | url = "nats://[${NETWORK-fdaa}::3]:4223" 11 | queue = "${QUEUE-}" 12 | subject = "${SUBJECT-logs.>}" 13 | auth.strategy = "user_password" 14 | auth.user_password.user = "${ORG-personal}" 15 | auth.user_password.password = "${ACCESS_TOKEN?}" 16 | connection_name = "Fly logs stream" 17 | 18 | [transforms.log_json] 19 | type = "remap" 20 | inputs = ["nats"] 21 | source = ''' 22 | . = parse_json!(.message) 23 | ''' 24 | 25 | [sinks.fly_log_metrics_prometheus] 26 | type = "prometheus_exporter" # required 27 | inputs = ["fly_log_metrics"] # required 28 | address = "[::]:9598" # required 29 | default_namespace = "fly-logs" # optional, no default 30 | 31 | [sinks.blackhole] 32 | type = "blackhole" 33 | inputs = ["log_json"] 34 | print_interval_secs = 100000 35 | --------------------------------------------------------------------------------