├── .go-version ├── tools └── tools.go ├── examples ├── warehouse │ ├── versions.tf │ ├── variables.tf │ ├── outputs.tf │ └── main.tf ├── scrape │ ├── outputs.tf │ ├── versions.tf │ ├── variables.tf │ └── main.tf ├── basic │ ├── versions.tf │ ├── outputs.tf │ ├── variables.tf │ └── main.tf ├── advanced │ ├── versions.tf │ ├── variables.tf │ ├── outputs.tf │ └── main.tf ├── connection │ ├── versions.tf │ ├── variables.tf │ ├── main.tf │ └── outputs.tf └── README.md ├── .gitignore ├── docs ├── resources │ ├── metric.md │ ├── source_group.md │ ├── errors_application_group.md │ ├── warehouse_source_group.md │ ├── warehouse_embedding.md │ ├── warehouse_time_series.md │ ├── connection.md │ ├── errors_application.md │ ├── warehouse_source.md │ └── source.md ├── data-sources │ ├── source_group.md │ ├── metric.md │ ├── errors_application_group.md │ ├── warehouse_source_group.md │ ├── warehouse_embedding.md │ ├── connection.md │ ├── errors_application.md │ ├── warehouse_source.md │ └── source.md └── index.md ├── .github └── workflows │ ├── build.yml │ ├── release.yml │ └── test.yml ├── internal └── provider │ ├── data_metric.go │ ├── ptr.go │ ├── provider_test.go │ ├── data_source_source_group.go │ ├── provider.go │ ├── data_source.go │ ├── resource_warehouse_embedding_test.go │ ├── client.go │ ├── type_string_or_int.go │ ├── resource_warehouse_source_group_test.go │ ├── resource_errors_application_group_test.go │ ├── resource_warehouse_time_series_test.go │ ├── resource_source_group.go │ ├── data_connection.go │ ├── resource_connection_test.go │ ├── data_warehouse_embedding.go │ ├── data_source_test.go │ ├── resource_metric.go │ ├── resource_metric_test.go │ ├── resource.go │ ├── resource_warehouse_embedding.go │ ├── resource_source_group_test.go │ ├── resource_warehouse_source_group.go │ ├── resource_errors_application_group.go │ ├── resource_warehouse_time_series.go │ └── resource_connection.go ├── templates └── index.md.tmpl ├── main.go ├── .goreleaser.yml ├── README.md ├── Makefile ├── go.mod └── LICENSE /.go-version: -------------------------------------------------------------------------------- 1 | 1.23 2 | -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs" 7 | ) 8 | -------------------------------------------------------------------------------- /examples/warehouse/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | logtail = { 4 | source = "BetterStackHQ/logtail" 5 | version = ">= 0.7.0" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/scrape/outputs.tf: -------------------------------------------------------------------------------- 1 | output "logtail_scrape_urls" { 2 | value = logtail_source.this.scrape_urls 3 | } 4 | output "logtail_data_region" { 5 | value = logtail_source.this.data_region 6 | } 7 | -------------------------------------------------------------------------------- /examples/basic/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | logtail = { 5 | source = "BetterStackHQ/logtail" 6 | version = ">= 0.7.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/advanced/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | logtail = { 5 | source = "BetterStackHQ/logtail" 6 | version = ">= 0.7.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/connection/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | logtail = { 5 | source = "betterstackhq/logtail" 6 | version = "0.7.3" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/scrape/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | logtail = { 5 | source = "BetterStackHQ/logtail" 6 | version = ">= 0.3.1" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/basic/outputs.tf: -------------------------------------------------------------------------------- 1 | output "logtail_source_token" { 2 | value = logtail_source.this.token 3 | } 4 | output "logtail_ingesting_host" { 5 | value = logtail_source.this.ingesting_host 6 | } 7 | output "logtail_data_region" { 8 | value = logtail_source.this.data_region 9 | } 10 | -------------------------------------------------------------------------------- /examples/advanced/variables.tf: -------------------------------------------------------------------------------- 1 | variable "logtail_api_token" { 2 | type = string 3 | description = < 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `aggregations` (List of String) The list of aggregations to perform on the metric. 21 | - `name` (String) The name of this metric. 22 | - `source_id` (String) The ID of the source this metric belongs to. 23 | - `sql_expression` (String) The SQL expression used to extract the metric value. 24 | - `type` (String) The type of the metric. 25 | 26 | ### Read-Only 27 | 28 | - `id` (String) The ID of this metric. 29 | -------------------------------------------------------------------------------- /examples/warehouse/outputs.tf: -------------------------------------------------------------------------------- 1 | output "warehouse_source_group_id" { 2 | description = "ID of the created warehouse source group" 3 | value = logtail_warehouse_source_group.group.id 4 | } 5 | 6 | output "warehouse_source_id" { 7 | description = "ID of the created warehouse source" 8 | value = logtail_warehouse_source.this.id 9 | } 10 | 11 | output "warehouse_source_token" { 12 | description = "Token of the created warehouse source" 13 | value = logtail_warehouse_source.this.token 14 | sensitive = true 15 | } 16 | 17 | output "warehouse_embedding_message_id" { 18 | description = "ID of the message embedding" 19 | value = logtail_warehouse_embedding.message.id 20 | } 21 | 22 | output "warehouse_time_series_message_embedding_id" { 23 | description = "ID of the message embedding time series with vector index" 24 | value = logtail_warehouse_time_series.message_embedding.id 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-24.04 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions/setup-go@v4 15 | with: 16 | go-version: '1.23.x' 17 | - uses: hashicorp/setup-terraform@v3 18 | with: 19 | terraform_version: 'latest' 20 | - run: make lint 21 | check_docs: 22 | runs-on: ubuntu-24.04 23 | steps: 24 | - uses: actions/checkout@v3 25 | - uses: actions/setup-go@v4 26 | with: 27 | go-version: '1.23.x' 28 | - uses: hashicorp/setup-terraform@v3 29 | with: 30 | terraform_version: 'latest' 31 | - name: Generate docs automatically 32 | run: make gen 33 | - name: Check no versioned file has been updated 34 | uses: numtide/clean-git-action@v1 35 | -------------------------------------------------------------------------------- /internal/provider/data_metric.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | func newMetricDataSource() *schema.Resource { 8 | s := make(map[string]*schema.Schema) 9 | for k, v := range metricSchema { 10 | cp := *v 11 | switch k { 12 | case "source_id": 13 | case "name": 14 | cp.Computed = false 15 | cp.Optional = false 16 | cp.Required = true 17 | default: 18 | cp.Computed = true 19 | cp.Optional = false 20 | cp.Required = false 21 | cp.ValidateFunc = nil 22 | cp.ValidateDiagFunc = nil 23 | cp.Default = nil 24 | cp.DefaultFunc = nil 25 | cp.DiffSuppressFunc = nil 26 | } 27 | s[k] = &cp 28 | } 29 | return &schema.Resource{ 30 | ReadContext: metricLookup, 31 | Description: "This Data Source allows you to look up existing Metrics using their name. You can list all your existing metrics via the [Metrics API](https://betterstack.com/docs/logs/api/list-all-existing-metrics/).", 32 | Schema: s, 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /docs/data-sources/source_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_source_group Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This data source allows you to get information about a Source Group. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/ 7 | --- 8 | 9 | # logtail_source_group (Data Source) 10 | 11 | This data source allows you to get information about a Source Group. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of this source group. 21 | 22 | ### Read-Only 23 | 24 | - `created_at` (String) The time when this source group was created. 25 | - `id` (String) The ID of this source group. 26 | - `sort_index` (Number) The sort index of this source group. 27 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 28 | - `updated_at` (String) The time when this source group was updated. 29 | -------------------------------------------------------------------------------- /docs/data-sources/metric.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_metric Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Metrics using their name. You can list all your existing metrics via the Metrics API https://betterstack.com/docs/logs/api/list-all-existing-metrics/. 7 | --- 8 | 9 | # logtail_metric (Data Source) 10 | 11 | This Data Source allows you to look up existing Metrics using their name. You can list all your existing metrics via the [Metrics API](https://betterstack.com/docs/logs/api/list-all-existing-metrics/). 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of this metric. 21 | - `source_id` (String) The ID of the source this metric belongs to. 22 | 23 | ### Read-Only 24 | 25 | - `aggregations` (List of String) The list of aggregations to perform on the metric. 26 | - `id` (String) The ID of this metric. 27 | - `sql_expression` (String) The SQL expression used to extract the metric value. 28 | - `type` (String) The type of the metric. 29 | -------------------------------------------------------------------------------- /docs/resources/source_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_source_group Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Source Groups. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/ 7 | --- 8 | 9 | # logtail_source_group (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Source Groups. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of this source group. 21 | 22 | ### Optional 23 | 24 | - `sort_index` (Number) The sort index of this source group. 25 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 26 | 27 | ### Read-Only 28 | 29 | - `created_at` (String) The time when this source group was created. 30 | - `id` (String) The ID of this source group. 31 | - `updated_at` (String) The time when this source group was updated. 32 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Example usage 2 | [![build](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/build.yml) 3 | [![tests](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/test.yml) 4 | [![documentation](https://img.shields.io/badge/-documentation-blue)](https://registry.terraform.io/providers/BetterStackHQ/logtail/latest/docs) 5 | 6 | These examples demonstrate how to provision and manage resources such as sources and metrics in Better Stack using Terraform. 7 | 8 | For instructions how to try the examples for yourself, see the subdirectories. 9 | You can start with the [basic example](./basic). 10 | 11 | ## Documentation 12 | 13 | See [Better Stack Telemetry API docs](https://betterstack.com/docs/logs/api/getting-started/) to obtain API token and get the complete list of parameter options. 14 | Or explore the [Terraform Registry provider documentation](https://registry.terraform.io/providers/BetterStackHQ/logtail/latest/docs). 15 | -------------------------------------------------------------------------------- /templates/index.md.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | layout: "" 3 | page_title: "Provider: logtail" 4 | description: |- 5 | The Better Stack Telemetry provider. 6 | --- 7 | 8 | # Better Stack Telemetry Provider 9 | 10 | [Better Stack Telemetry](https://telemetry.betterstack.com) provider provides resources to interact with the [Telemetry API](https://betterstack.com/docs/logs/api/getting-started/). 11 | 12 | ## Installation 13 | 14 | ```terraform 15 | terraform { 16 | required_version = ">= 0.13" 17 | required_providers { 18 | logtail = { 19 | source = "BetterStackHQ/logtail" 20 | version = ">= 0.2.0" 21 | } 22 | } 23 | } 24 | ``` 25 | 26 | ## Example usage 27 | 28 | In our GitHub repository, you can [see multiple executable examples](https://github.com/BetterStackHQ/terraform-provider-logtail/tree/master/examples). 29 | Here's a simple one to get you started: 30 | 31 | ```terraform 32 | provider "logtail" { 33 | # `api_token` can be omitted if LOGTAIL_API_TOKEN env var is set. 34 | api_token = "XXXXXXXXXXXXXXXXXXXXXXXX" 35 | } 36 | 37 | resource "logtail_source" "this" { 38 | name = "Production Server" 39 | platform = "ubuntu" 40 | } 41 | 42 | output "logtail_source_token" { 43 | value = logtail_source.this.token 44 | } 45 | ``` 46 | 47 | {{ .SchemaMarkdown | trimspace }} 48 | -------------------------------------------------------------------------------- /docs/data-sources/errors_application_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_errors_application_group Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Errors application groups using their name. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/list/ 7 | --- 8 | 9 | # logtail_errors_application_group (Data Source) 10 | 11 | This Data Source allows you to look up existing Errors application groups using their name. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/list/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) Application group name. Must be unique within your team. 21 | 22 | ### Read-Only 23 | 24 | - `created_at` (String) The time when this application group was created. 25 | - `id` (String) The ID of this application group. 26 | - `sort_index` (Number) The sort index of this application group. 27 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 28 | - `updated_at` (String) The time when this application group was updated. 29 | -------------------------------------------------------------------------------- /docs/resources/errors_application_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_errors_application_group Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Errors application groups. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/create/ 7 | --- 8 | 9 | # logtail_errors_application_group (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Errors application groups. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/create/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) Application group name. Must be unique within your team. 21 | 22 | ### Optional 23 | 24 | - `sort_index` (Number) The sort index of this application group. 25 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 26 | 27 | ### Read-Only 28 | 29 | - `created_at` (String) The time when this application group was created. 30 | - `id` (String) The ID of this application group. 31 | - `updated_at` (String) The time when this application group was updated. 32 | -------------------------------------------------------------------------------- /docs/data-sources/warehouse_source_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_source_group Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Warehouse source groups using their name. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/index/ 7 | --- 8 | 9 | # logtail_warehouse_source_group (Data Source) 10 | 11 | This Data Source allows you to look up existing Warehouse source groups using their name. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/index/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of the warehouse source group. Can contain letters, numbers, spaces, and special characters. 21 | 22 | ### Read-Only 23 | 24 | - `created_at` (String) The time when this warehouse source group was created. 25 | - `id` (String) The ID of this warehouse source group. 26 | - `sort_index` (Number) The sort index of this warehouse source group. 27 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 28 | - `updated_at` (String) The time when this warehouse source group was updated. 29 | -------------------------------------------------------------------------------- /docs/resources/warehouse_source_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_source_group Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Warehouse source groups. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/create/ 7 | --- 8 | 9 | # logtail_warehouse_source_group (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Warehouse source groups. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/create/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of the warehouse source group. Can contain letters, numbers, spaces, and special characters. 21 | 22 | ### Optional 23 | 24 | - `sort_index` (Number) The sort index of this warehouse source group. 25 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 26 | 27 | ### Read-Only 28 | 29 | - `created_at` (String) The time when this warehouse source group was created. 30 | - `id` (String) The ID of this warehouse source group. 31 | - `updated_at` (String) The time when this warehouse source group was updated. 32 | -------------------------------------------------------------------------------- /docs/data-sources/warehouse_embedding.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_embedding Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This data source allows you to retrieve information about a Warehouse embedding. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/ 7 | --- 8 | 9 | # logtail_warehouse_embedding (Data Source) 10 | 11 | This data source allows you to retrieve information about a Warehouse embedding. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Optional 19 | 20 | - `id` (String) The ID of the embedding to retrieve. 21 | - `source_id` (String) The ID of the warehouse source to filter embeddings by. 22 | 23 | ### Read-Only 24 | 25 | - `created_at` (String) The time when this embedding was created. 26 | - `dimension` (Number) The vector dimension of the embeddings to generate. 27 | - `embed_from` (String) The source column name containing the text to embed. 28 | - `embed_to` (String) The target column name where the generated embeddings will be stored. 29 | - `model` (String) The name of the embedding model to use. 30 | - `updated_at` (String) The time when this embedding was last updated. 31 | -------------------------------------------------------------------------------- /docs/resources/warehouse_embedding.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_embedding Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create and manage embeddings for vector similarity search in Warehouse. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/ 7 | --- 8 | 9 | # logtail_warehouse_embedding (Resource) 10 | 11 | This resource allows you to create and manage embeddings for vector similarity search in Warehouse. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `dimension` (Number) The vector dimension of the embeddings to generate. 21 | - `embed_from` (String) The source column name containing the text to embed. 22 | - `embed_to` (String) The target column name where the generated embeddings will be stored. 23 | - `model` (String) The name of the embedding model to use (e.g., `embeddinggemma:300m`). 24 | - `source_id` (String) The ID of the Warehouse source to create the embedding for. 25 | 26 | ### Read-Only 27 | 28 | - `created_at` (String) The time when this embedding was created. 29 | - `id` (String) The ID of this embedding. 30 | - `updated_at` (String) The time when this embedding was last updated. 31 | -------------------------------------------------------------------------------- /internal/provider/ptr.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 7 | ) 8 | 9 | // nolint 10 | func load(d *schema.ResourceData, key string, receiver interface{}) { 11 | switch x := receiver.(type) { 12 | case **string: 13 | if v, ok := d.GetOkExists(key); ok { 14 | t := v.(string) 15 | *x = &t 16 | } 17 | case **int: 18 | if v, ok := d.GetOkExists(key); ok { 19 | t := v.(int) 20 | *x = &t 21 | } 22 | case **bool: 23 | if v, ok := d.GetOkExists(key); ok { 24 | t := v.(bool) 25 | *x = &t 26 | } 27 | case **[]string: 28 | if v, ok := d.GetOkExists(key); ok { 29 | var t []string 30 | for _, v := range v.([]interface{}) { 31 | t = append(t, v.(string)) 32 | } 33 | *x = &t 34 | } 35 | case **[]int: 36 | if v, ok := d.GetOkExists(key); ok { 37 | var t []int 38 | for _, v := range v.([]interface{}) { 39 | t = append(t, v.(int)) 40 | } 41 | *x = &t 42 | } 43 | case **[]map[string]interface{}: 44 | if v, ok := d.GetOkExists(key); ok { 45 | var t []map[string]interface{} 46 | for _, v := range v.([]interface{}) { 47 | entry := v.(map[string]interface{}) 48 | newEntry := map[string]interface{}{} 49 | for mapKey, mapValue := range entry { 50 | newEntry[mapKey] = mapValue 51 | } 52 | t = append(t, newEntry) 53 | } 54 | *x = &t 55 | } 56 | 57 | default: 58 | panic(fmt.Errorf("unexpected type %T", receiver)) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: "" 3 | page_title: "Provider: logtail" 4 | description: |- 5 | The Better Stack Telemetry provider. 6 | --- 7 | 8 | # Better Stack Telemetry Provider 9 | 10 | [Better Stack Telemetry](https://telemetry.betterstack.com) provider provides resources to interact with the [Telemetry API](https://betterstack.com/docs/logs/api/getting-started/). 11 | 12 | ## Installation 13 | 14 | ```terraform 15 | terraform { 16 | required_version = ">= 0.13" 17 | required_providers { 18 | logtail = { 19 | source = "BetterStackHQ/logtail" 20 | version = ">= 0.2.0" 21 | } 22 | } 23 | } 24 | ``` 25 | 26 | ## Example usage 27 | 28 | In our GitHub repository, you can [see multiple executable examples](https://github.com/BetterStackHQ/terraform-provider-logtail/tree/master/examples). 29 | Here's a simple one to get you started: 30 | 31 | ```terraform 32 | provider "logtail" { 33 | # `api_token` can be omitted if LOGTAIL_API_TOKEN env var is set. 34 | api_token = "XXXXXXXXXXXXXXXXXXXXXXXX" 35 | } 36 | 37 | resource "logtail_source" "this" { 38 | name = "Production Server" 39 | platform = "ubuntu" 40 | } 41 | 42 | output "logtail_source_token" { 43 | value = logtail_source.this.token 44 | } 45 | ``` 46 | 47 | 48 | ## Schema 49 | 50 | ### Required 51 | 52 | - `api_token` (String, Sensitive) Better Stack Telemetry API token. The value can be omitted if `LOGTAIL_API_TOKEN` environment variable is set. See https://betterstack.com/docs/logs/api/getting-started/#get-an-logs-api-token on how to obtain the API token for your team. 53 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "log" 8 | "os" 9 | 10 | "github.com/betterstackhq/terraform-provider-logtail/internal/provider" 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" 13 | ) 14 | 15 | // Format Terraform examples/. 16 | //go:generate terraform fmt -recursive ./examples/ 17 | 18 | // Generate Provider Documentation (see https://www.terraform.io/docs/registry/providers/docs.html for details). 19 | //go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs 20 | 21 | // Value injected during build process via -ldflags "-X main.version=...". 22 | var version string 23 | 24 | func main() { 25 | var printVersionAndExit bool 26 | var debugMode bool 27 | 28 | flag.BoolVar(&printVersionAndExit, "version", false, "print version") 29 | flag.BoolVar(&debugMode, "debug", false, "set to true to run the provider with support for debuggers like delve") 30 | flag.Parse() 31 | 32 | if printVersionAndExit { 33 | fmt.Println(version) 34 | return 35 | } 36 | 37 | // Do not include a timestamp (terraform includes it already). 38 | log.SetFlags(log.Lshortfile) 39 | // Log request/response bodies (that may contain sensitive data) if (and only if) 40 | // TF_PROVIDER_LOGTAIL_LOG_INSECURE env var is set to 1. 41 | if os.Getenv("TF_PROVIDER_LOGTAIL_LOG_INSECURE") != "1" { 42 | log.SetOutput(io.Discard) 43 | } 44 | 45 | opts := &plugin.ServeOpts{ 46 | ProviderFunc: func() *schema.Provider { 47 | return provider.New(provider.WithVersion(version)) 48 | }, 49 | Debug: debugMode, 50 | } 51 | 52 | plugin.Serve(opts) 53 | } 54 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # Visit https://goreleaser.com for documentation on how to customize this 2 | # behavior. 3 | before: 4 | hooks: 5 | # this is just an example and not a requirement for provider building/publishing 6 | - go mod tidy 7 | builds: 8 | - env: 9 | # goreleaser does not work with CGO, it could also complicate 10 | # usage by users in CI/CD systems like Terraform Cloud where 11 | # they are unable to install libraries. 12 | - CGO_ENABLED=0 13 | mod_timestamp: '{{ .CommitTimestamp }}' 14 | flags: 15 | - -trimpath 16 | ldflags: 17 | - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' 18 | goos: 19 | - freebsd 20 | - windows 21 | - linux 22 | - darwin 23 | goarch: 24 | - amd64 25 | - '386' 26 | - arm 27 | - arm64 28 | ignore: 29 | - goos: darwin 30 | goarch: '386' 31 | - goos: windows 32 | goarch: arm64 33 | binary: '{{ .ProjectName }}_v{{ .Version }}' 34 | archives: 35 | - format: zip 36 | name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' 37 | checksum: 38 | name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' 39 | algorithm: sha256 40 | signs: 41 | - artifacts: checksum 42 | args: 43 | # if you are using this in a GitHub action or some other automated pipeline, you 44 | # need to pass the batch flag to indicate its not interactive. 45 | - "--batch" 46 | - "--local-user" 47 | - "{{ .Env.GPG_FINGERPRINT }}" # set this environment variable for your signing key 48 | - "--output" 49 | - "${signature}" 50 | - "--detach-sign" 51 | - "${artifact}" 52 | release: 53 | # If you want to manually examine the release before its live, uncomment this line: 54 | # draft: true 55 | changelog: 56 | skip: true 57 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This GitHub action can publish assets for release when a tag is created. 2 | # Currently its setup to run on any tag that matches the pattern "v*" (ie. v0.1.0). 3 | # 4 | # This uses an action (hashicorp/ghaction-import-gpg) that assumes you set your 5 | # private key in the `GPG_PRIVATE_KEY` secret and passphrase in the `PASSPHRASE` 6 | # secret. If you would rather own your own GPG handling, please fork this action 7 | # or use an alternative one for key handling. 8 | # 9 | # You will need to pass the `--batch` flag to `gpg` in your signing step 10 | # in `goreleaser` to indicate this is being used in a non-interactive mode. 11 | # 12 | name: release 13 | on: 14 | push: 15 | tags: 16 | - 'v*' 17 | workflow_dispatch: 18 | 19 | jobs: 20 | goreleaser: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - 24 | name: Checkout 25 | uses: actions/checkout@v2.3.4 26 | - 27 | name: Unshallow 28 | run: git fetch --prune --unshallow 29 | - 30 | name: Set up Go 31 | uses: actions/setup-go@v2 32 | with: 33 | go-version: '1.23.x' 34 | - 35 | name: Import GPG key 36 | id: import_gpg 37 | uses: crazy-max/ghaction-import-gpg@v5 38 | with: 39 | # These secrets are configured for the repository: 40 | gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} 41 | passphrase: ${{ secrets.PASSPHRASE }} 42 | - 43 | name: Run GoReleaser 44 | uses: goreleaser/goreleaser-action@v3.2.0 45 | with: 46 | version: '~> 1.26' 47 | args: release --clean 48 | env: 49 | GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} 50 | # GitHub sets this automatically 51 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 52 | -------------------------------------------------------------------------------- /internal/provider/provider_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "sync/atomic" 7 | "testing" 8 | 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 11 | ) 12 | 13 | func TestProvider(t *testing.T) { 14 | if err := New().InternalValidate(); err != nil { 15 | t.Fatal(err) 16 | } 17 | } 18 | 19 | func TestProviderInit(t *testing.T) { 20 | var success int32 21 | 22 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 23 | t.Log("Received " + r.Method + " " + r.RequestURI) 24 | 25 | if r.Header.Get("Authorization") != "Bearer foo" { 26 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 27 | } 28 | 29 | exptectedUA := "terraform-provider-logtail/0.0.0-0" 30 | if r.Header.Get("User-Agent") != exptectedUA { 31 | t.Fatalf("User-Agent: got %q, want %q", r.Header.Get("User-Agent"), exptectedUA) 32 | } 33 | 34 | atomic.StoreInt32(&success, 1) 35 | _, _ = w.Write([]byte(`{"data":[{"id":"1","attributes":{"name":"Test source","platform":"ubuntu","token":"token123","table_name":"abc","ingesting_paused":false}}],"pagination":{"next":null}}`)) 36 | })) 37 | defer server.Close() 38 | 39 | resource.Test(t, resource.TestCase{ 40 | IsUnitTest: true, 41 | ProviderFactories: map[string]func() (*schema.Provider, error){ 42 | "logtail": func() (*schema.Provider, error) { 43 | return New(WithURL(server.URL), WithVersion("0.0.0-0")), nil 44 | }, 45 | }, 46 | Steps: []resource.TestStep{ 47 | // Step 1 - create. 48 | { 49 | Config: ` 50 | provider "logtail" { 51 | api_token = "foo" 52 | } 53 | data "logtail_source" "this" { 54 | table_name = "abc" 55 | } 56 | `, 57 | }, 58 | }, 59 | }) 60 | 61 | if atomic.LoadInt32(&success) != int32(1) { 62 | t.Fatalf("HTTP server didn't receive any requests") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /docs/resources/warehouse_time_series.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_time_series Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create and delete your Warehouse time series. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/time-series/create/ 7 | --- 8 | 9 | # logtail_warehouse_time_series (Resource) 10 | 11 | This resource allows you to create and delete your Warehouse time series. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/time-series/create/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of the time series. Must contain only letters, numbers, and underscores. 21 | - `source_id` (String) The ID of the Warehouse source to create the time series for. 22 | - `sql_expression` (String) The SQL expression used to compute the time series. For example `JSONExtract(raw, 'response_time', 'Nullable(Float64)')`. 23 | - `type` (String) The data type of the time series. Valid types are: `string`, `string_low_cardinality`, `int64_delta`, `int64`, `uint64_delta`, `uint64`, `float64_delta`, `datetime64_delta`, `boolean`, `array_bfloat16`, `array_float32` 24 | 25 | ### Optional 26 | 27 | - `aggregations` (List of String) An array of aggregation functions (e.g., `avg`, `min`, `max`). If omitted, no aggregations are applied. 28 | - `expression_index` (String) The type of vector index to apply (e.g., `vector_similarity`). Only applicable for vector types (`array_bfloat16`, `array_float32`). 29 | - `vector_dimension` (Number) The vector dimension if `expression_index` is `vector_similarity` (e.g., `512`). Supported values: 256, 384, 512, 768, 1024, 1536, 3072, 4096, 10752. 30 | - `vector_distance_function` (String) The distance function to use for vector similarity (e.g., `cosine`, `l2`). 31 | 32 | ### Read-Only 33 | 34 | - `id` (String) The ID of this time series. 35 | -------------------------------------------------------------------------------- /examples/warehouse/main.tf: -------------------------------------------------------------------------------- 1 | provider "logtail" { 2 | api_token = var.logtail_api_token 3 | } 4 | 5 | resource "logtail_warehouse_source_group" "group" { 6 | name = "Terraform Warehouse Source Group" 7 | } 8 | 9 | resource "logtail_warehouse_source" "this" { 10 | name = "Terraform Warehouse Source" 11 | data_region = "us_east" 12 | events_retention = 30 13 | time_series_retention = 60 14 | live_tail_pattern = "{status} {message}" 15 | warehouse_source_group_id = logtail_warehouse_source_group.group.id 16 | vrl_transformation = < 16 | ## Schema 17 | 18 | ### Optional 19 | 20 | - `id` (String) The ID of the connection to retrieve. 21 | 22 | ### Read-Only 23 | 24 | - `client_type` (String) Type of client connection. 25 | - `created_at` (String) The time when this connection was created. 26 | - `created_by` (Map of String) Information about the user who created this connection. 27 | - `data_region` (String) Data region of the connection. 28 | - `data_sources` (List of Object) List of available data sources for this connection. (see [below for nested schema](#nestedatt--data_sources)) 29 | - `host` (String) The connection hostname. 30 | - `ip_allowlist` (List of String) Array of IP addresses allowed to use this connection. 31 | - `note` (String) Descriptive note for the connection. 32 | - `port` (Number) The connection port. 33 | - `sample_query` (String) A sample query showing how to use this connection. 34 | - `team_ids` (List of Number) Array of team IDs associated with the connection. 35 | - `team_names` (List of String) Array of team names associated with the connection. 36 | - `username` (String) The connection username. 37 | - `valid_until` (String) Timestamp when the connection expires. 38 | 39 | 40 | ### Nested Schema for `data_sources` 41 | 42 | Read-Only: 43 | 44 | - `data_sources` (List of String) 45 | - `source_id` (Number) 46 | - `source_name` (String) 47 | - `team_name` (String) 48 | -------------------------------------------------------------------------------- /examples/advanced/main.tf: -------------------------------------------------------------------------------- 1 | provider "logtail" { 2 | api_token = var.logtail_api_token 3 | } 4 | 5 | resource "logtail_source_group" "group" { 6 | name = "Terraform Advanced Source Group" 7 | } 8 | 9 | resource "logtail_source" "this" { 10 | name = "Terraform Advanced Source" 11 | platform = "http" 12 | ingesting_paused = true 13 | data_region = "germany" 14 | live_tail_pattern = "{level} {message}" 15 | logs_retention = 60 16 | metrics_retention = 90 17 | vrl_transformation = <[a-zA-Z_-]+)\] .* in (?P\d+(?:\.\d+)?)ms') 20 | if (err == null) { 21 | .service_name = parsed.service 22 | .duration_ms = to_float!(parsed.duration) 23 | } 24 | EOT 25 | source_group_id = logtail_source_group.group.id 26 | } 27 | 28 | resource "logtail_metric" "duration_ms" { 29 | source_id = logtail_source.this.id 30 | name = "duration_ms" 31 | sql_expression = "getJSON(raw, 'duration_ms')" 32 | aggregations = ["avg", "max", "min"] 33 | type = "float64_delta" 34 | } 35 | 36 | resource "logtail_metric" "service_name" { 37 | source_id = logtail_source.this.id 38 | name = "service_name" 39 | sql_expression = "getJSON(raw, 'service_name')" 40 | aggregations = [] 41 | type = "string_low_cardinality" 42 | } 43 | 44 | data "logtail_metric" "level" { 45 | source_id = logtail_source.this.id 46 | name = "level" 47 | } 48 | 49 | resource "logtail_errors_application_group" "errors_group" { 50 | name = "Terraform Advanced Errors Application Group" 51 | } 52 | 53 | resource "logtail_errors_application" "this" { 54 | name = "Terraform Advanced Errors Application" 55 | platform = "ruby_errors" 56 | ingesting_paused = true 57 | data_region = "germany" 58 | errors_retention = 90 59 | application_group_id = logtail_errors_application_group.errors_group.id 60 | } 61 | 62 | data "logtail_errors_application" "lookup" { 63 | name = logtail_errors_application.this.name 64 | } 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-provider-logtail 2 | [![build](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/build.yml) 3 | [![tests](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/BetterStackHQ/terraform-provider-logtail/actions/workflows/test.yml) 4 | [![documentation](https://img.shields.io/badge/-documentation-blue)](https://registry.terraform.io/providers/BetterStackHQ/logtail/latest/docs) 5 | 6 | Terraform (0.13+) provider for [Better Stack Telemetry](https://betterstack.com/logs) (formerly *Logtail.com*). 7 | 8 | ## Installation 9 | 10 | ```terraform 11 | terraform { 12 | required_version = ">= 0.13" 13 | required_providers { 14 | logtail = { 15 | source = "BetterStackHQ/logtail" 16 | version = ">= 0.2.0" 17 | } 18 | } 19 | } 20 | ``` 21 | 22 | ## Example usage 23 | 24 | See [`/examples` directory](./examples) for multiple ready-to-use examples. 25 | Here's a simple one to get you started: 26 | 27 | ```terraform 28 | provider "logtail" { 29 | # `api_token` can be omitted if LOGTAIL_API_TOKEN env var is set. 30 | api_token = "XXXXXXXXXXXXXXXXXXXXXXXX" 31 | } 32 | 33 | resource "logtail_source" "this" { 34 | name = "Production Server" 35 | platform = "ubuntu" 36 | } 37 | 38 | output "logtail_source_token" { 39 | value = logtail_source.this.token 40 | } 41 | ``` 42 | 43 | ## Documentation 44 | 45 | See [Better Stack Telemetry API docs](https://betterstack.com/docs/logs/api/getting-started/) to obtain API token and get the complete list of parameter options. 46 | Or explore the [Terraform Registry provider documentation](https://registry.terraform.io/providers/BetterStackHQ/logtail/latest/docs). 47 | 48 | ## Development 49 | 50 | > PREREQUISITE: [go1.23+](https://golang.org/dl/). 51 | 52 | ```shell script 53 | git clone https://github.com/betterstackhq/terraform-provider-logtail && \ 54 | cd terraform-provider-logtail 55 | 56 | make help 57 | ``` 58 | 59 | ## Releasing New Versions 60 | 61 | Simply push a new tag `vX.Y.Z` to GitHub and a new version will be built and released automatically through a GitHub action. 62 | -------------------------------------------------------------------------------- /docs/resources/connection.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_connection Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create and manage ClickHouse connections for remote querying. For more information about the Connection API check https://betterstack.com/docs/logs/api/connections/ 7 | --- 8 | 9 | # logtail_connection (Resource) 10 | 11 | This resource allows you to create and manage ClickHouse connections for remote querying. For more information about the Connection API check https://betterstack.com/docs/logs/api/connections/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `client_type` (String) Type of client connection. Currently only `clickhouse` is supported. 21 | 22 | ### Optional 23 | 24 | - `data_region` (String) Data region or private cluster name. Permitted values: `us_east`, `us_west`, `germany`, `singapore`. 25 | - `ip_allowlist` (List of String) Array of IP addresses or CIDR ranges that are allowed to use this connection. 26 | - `note` (String) A descriptive note for the connection. 27 | - `team_ids` (List of Number) Array of team IDs to associate with the connection. Only one of `team_names` or `team_ids` should be provided. 28 | - `team_names` (List of String) Array of team names to associate with the connection. Only one of `team_names` or `team_ids` should be provided. 29 | - `valid_until` (String) ISO 8601 timestamp when the connection expires. 30 | 31 | ### Read-Only 32 | 33 | - `created_at` (String) The time when this connection was created. 34 | - `created_by` (Map of String) Information about the user who created this connection. 35 | - `data_sources` (List of Object) List of available data sources for this connection. (see [below for nested schema](#nestedatt--data_sources)) 36 | - `host` (String) The connection hostname. 37 | - `id` (String) The ID of this connection. 38 | - `password` (String, Sensitive) The connection password. Only available immediately after creation. 39 | - `port` (Number) The connection port. 40 | - `sample_query` (String) A sample query showing how to use this connection. 41 | - `username` (String) The connection username. 42 | 43 | 44 | ### Nested Schema for `data_sources` 45 | 46 | Read-Only: 47 | 48 | - `data_sources` (List of String) 49 | - `source_id` (Number) 50 | - `source_name` (String) 51 | - `team_name` (String) 52 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | workflow_dispatch: 8 | schedule: 9 | - cron: '0 3 * * *' 10 | jobs: 11 | test: 12 | strategy: 13 | matrix: 14 | go-version: ['1.23.x'] 15 | os: [ubuntu-latest, macos-latest, windows-latest] 16 | fail-fast: false 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Install Go 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: ${{ matrix.go-version }} 23 | - name: Checkout code 24 | uses: actions/checkout@v2 25 | - name: Test 26 | run: go test ./... 27 | 28 | e2e_test: 29 | strategy: 30 | matrix: 31 | terraform-version: ["0.13", "1.0", "1.8", "latest"] 32 | config: [examples/basic, examples/advanced, examples/connection, examples/scrape, examples/warehouse] 33 | fail-fast: false 34 | runs-on: ubuntu-latest 35 | steps: 36 | - name: Install Go 37 | uses: actions/setup-go@v2 38 | with: 39 | go-version: '1.23.x' 40 | - name: Install Terraform 41 | uses: hashicorp/setup-terraform@v3 42 | with: 43 | terraform_version: ${{ matrix.terraform-version }} 44 | - name: Checkout code 45 | uses: actions/checkout@v2 46 | - name: Add resources via Terraform 47 | run: make terraform CONFIGURATION="${{ matrix.config }}" ARGS="apply --auto-approve --input=false" 48 | env: 49 | LOGTAIL_API_TOKEN: ${{ matrix.config == 'examples/connection' && secrets.GLOBAL_E2E_API_TOKEN || secrets.LOGS_E2E_TEAM_TOKEN }} 50 | - name: Plan resources via Terraform - must be empty 51 | run: | 52 | make terraform CONFIGURATION="${{ matrix.config }}" ARGS="plan --input=false --out=tfplan" 53 | make terraform CONFIGURATION="${{ matrix.config }}" ARGS="show --json tfplan > tfplan.json" 54 | CHANGES="$(jq "[.resource_changes[]? | select(.change.actions != [\"no-op\"])] | length" "${{ matrix.config }}/tfplan.json")" 55 | if [ "$CHANGES" == "0" ]; then 56 | echo "No planned changes detected after first apply. Success!" 57 | else 58 | echo "$CHANGES planned changes detected after first apply. Failure!" 59 | exit 1 60 | fi 61 | env: 62 | LOGTAIL_API_TOKEN: ${{ matrix.config == 'examples/connection' && secrets.GLOBAL_E2E_API_TOKEN || secrets.LOGS_E2E_TEAM_TOKEN }} 63 | - name: Destroy resources via Terraform 64 | if: always() 65 | run: make terraform CONFIGURATION="${{ matrix.config }}" ARGS="destroy --auto-approve --input=false" 66 | env: 67 | LOGTAIL_API_TOKEN: ${{ matrix.config == 'examples/connection' && secrets.GLOBAL_E2E_API_TOKEN || secrets.LOGS_E2E_TEAM_TOKEN }} 68 | -------------------------------------------------------------------------------- /docs/data-sources/errors_application.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_errors_application Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Errors applications using their name. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications/list/ 7 | --- 8 | 9 | # logtail_errors_application (Data Source) 10 | 11 | This Data Source allows you to look up existing Errors applications using their name. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications/list/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) Application name. Must be unique within your team. 21 | 22 | ### Read-Only 23 | 24 | - `application_group_id` (Number) ID of the application group this application belongs to. 25 | - `created_at` (String) The time when this application was created. 26 | - `custom_bucket` (List of Object) Optional custom bucket configuration for the application. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedatt--custom_bucket)) 27 | - `data_region` (String) Data region or cluster name where application data will be stored. If omitted, the default data region for your team will be used. 28 | - `errors_retention` (Number) Error data retention period in days. Default retention is 90 days. 29 | - `id` (String) The ID of this application. 30 | - `ingesting_host` (String) The host where the errors should be sent. See documentation for your specific platform for details. 31 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this application. 32 | - `platform` (String) The platform type for the application. This helps configure appropriate SDKs and integrations. Examples: dot_net_errors, dot_net_maui_errors, dot_net_http_errors, aiohttp_errors, asgi_errors, asp_dot_net_errors, asp_dot_net_core_errors, aws_lambda_dot_net_errors, aws_lambda_node_errors, aws_lambda_python_errors... Must be suffixed with _errors. 33 | - `table_name` (String) The table name generated for this application. 34 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 35 | - `token` (String) The token of this application. This token is used to identify and route the data you will send to Better Stack. 36 | - `updated_at` (String) The time when this application was updated. 37 | 38 | 39 | ### Nested Schema for `custom_bucket` 40 | 41 | Read-Only: 42 | 43 | - `access_key_id` (String) 44 | - `endpoint` (String) 45 | - `keep_data_after_retention` (Boolean) 46 | - `name` (String) 47 | - `secret_access_key` (String) 48 | -------------------------------------------------------------------------------- /internal/provider/data_source_source_group.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 13 | ) 14 | 15 | func newSourceGroupDataSource() *schema.Resource { 16 | s := make(map[string]*schema.Schema) 17 | 18 | for k, v := range sourceGroupSchema { 19 | cp := *v 20 | switch k { 21 | case "name": 22 | cp.Computed = false 23 | cp.Optional = false 24 | cp.Required = true 25 | default: 26 | cp.Computed = true 27 | cp.Optional = false 28 | cp.Required = false 29 | cp.ValidateFunc = nil 30 | cp.ValidateDiagFunc = nil 31 | cp.Default = nil 32 | cp.DefaultFunc = nil 33 | cp.DiffSuppressFunc = nil 34 | } 35 | s[k] = &cp 36 | } 37 | 38 | return &schema.Resource{ 39 | ReadContext: sourceGroupLookup, 40 | Description: "This data source allows you to get information about a Source Group. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/", 41 | Schema: s, 42 | } 43 | } 44 | 45 | type sourceGroupsHTTPResponse struct { 46 | Data []struct { 47 | ID string `json:"id"` 48 | Attributes sourceGroup `json:"attributes"` 49 | } `json:"data"` 50 | Pagination struct { 51 | Next *string `json:"next"` 52 | } `json:"pagination"` 53 | } 54 | 55 | func sourceGroupLookup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 56 | name := d.Get("name").(string) 57 | 58 | fetch := func(u string) (*sourceGroupsHTTPResponse, error) { 59 | res, err := meta.(*client).Get(ctx, u) 60 | if err != nil { 61 | return nil, err 62 | } 63 | defer func() { 64 | _, _ = io.Copy(io.Discard, res.Body) 65 | _ = res.Body.Close() 66 | }() 67 | body, err := io.ReadAll(res.Body) 68 | if res.StatusCode != http.StatusOK { 69 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 70 | } 71 | if err != nil { 72 | return nil, err 73 | } 74 | var out sourceGroupsHTTPResponse 75 | return &out, json.Unmarshal(body, &out) 76 | } 77 | 78 | page := "/api/v1/source-groups?page=1" 79 | for { 80 | out, err := fetch(page) 81 | if err != nil { 82 | return diag.FromErr(err) 83 | } 84 | 85 | for _, item := range out.Data { 86 | if item.Attributes.Name != nil && *item.Attributes.Name == name { 87 | d.SetId(item.ID) 88 | return sourceGroupCopyAttrs(d, &item.Attributes) 89 | } 90 | } 91 | 92 | if out.Pagination.Next == nil { 93 | break 94 | } 95 | 96 | if u, err := url.Parse(*out.Pagination.Next); err != nil { 97 | return diag.FromErr(err) 98 | } else { 99 | page = u.RequestURI() 100 | } 101 | } 102 | 103 | return diag.Errorf("Source group with name %q not found", name) 104 | } 105 | -------------------------------------------------------------------------------- /docs/resources/errors_application.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_errors_application Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Errors applications. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications/create/ 7 | --- 8 | 9 | # logtail_errors_application (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Errors applications. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications/create/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) Application name. Must be unique within your team. 21 | - `platform` (String) The platform type for the application. This helps configure appropriate SDKs and integrations. Examples: dot_net_errors, dot_net_maui_errors, dot_net_http_errors, aiohttp_errors, asgi_errors, asp_dot_net_errors, asp_dot_net_core_errors, aws_lambda_dot_net_errors, aws_lambda_node_errors, aws_lambda_python_errors... Must be suffixed with _errors. 22 | 23 | ### Optional 24 | 25 | - `application_group_id` (Number) ID of the application group this application belongs to. 26 | - `custom_bucket` (Block List, Max: 1) Optional custom bucket configuration for the application. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedblock--custom_bucket)) 27 | - `data_region` (String) Data region or cluster name where application data will be stored. If omitted, the default data region for your team will be used. 28 | - `errors_retention` (Number) Error data retention period in days. Default retention is 90 days. 29 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this application. 30 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 31 | 32 | ### Read-Only 33 | 34 | - `created_at` (String) The time when this application was created. 35 | - `id` (String) The ID of this application. 36 | - `ingesting_host` (String) The host where the errors should be sent. See documentation for your specific platform for details. 37 | - `table_name` (String) The table name generated for this application. 38 | - `token` (String) The token of this application. This token is used to identify and route the data you will send to Better Stack. 39 | - `updated_at` (String) The time when this application was updated. 40 | 41 | 42 | ### Nested Schema for `custom_bucket` 43 | 44 | Required: 45 | 46 | - `access_key_id` (String) Access key ID 47 | - `endpoint` (String) Bucket endpoint 48 | - `name` (String) Bucket name 49 | - `secret_access_key` (String, Sensitive) Secret access key 50 | 51 | Optional: 52 | 53 | - `keep_data_after_retention` (Boolean) Whether we should keep data in the bucket after the retention period. 54 | -------------------------------------------------------------------------------- /docs/data-sources/warehouse_source.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_source Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Warehouse sources using their name. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/sources/index/ 7 | --- 8 | 9 | # logtail_warehouse_source (Data Source) 10 | 11 | This Data Source allows you to look up existing Warehouse sources using their name. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/sources/index/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of the new Warehouse source. Can contain letters, numbers, spaces, and special characters. Source names do not need to be unique. 21 | 22 | ### Read-Only 23 | 24 | - `created_at` (String) The time when this warehouse source was created. 25 | - `custom_bucket` (List of Object) Optional custom bucket configuration for the source. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedatt--custom_bucket)) 26 | - `data_region` (String) The data region or cluster name where the source's data will be stored. 27 | Possible values include `us_east`, `us_west`, `germany`, `singapore`, or a specific cluster name like `us-east-9`. 28 | The actual region created may differ slightly due to dynamic load balancing. 29 | - `events_retention` (Number) The retention period for event data in days. Default is 9999999 days (effectively infinite). 30 | - `id` (String) The ID of this warehouse source. 31 | - `ingesting_host` (String) The host where the data should be sent. See documentation for details. 32 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this source. 33 | - `live_tail_pattern` (String) A display template for live tail messages. Default is `"{status} {message}"`. 34 | - `table_name` (String) The table name generated for this warehouse source. 35 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 36 | - `time_series_retention` (Number) The retention period for time series data in days. Default is 9999999 days (effectively infinite). 37 | - `token` (String) The token of this warehouse source. This token is used to identify and route the data you will send to Better Stack. 38 | - `updated_at` (String) The time when this warehouse source was updated. 39 | - `vrl_transformation` (String) A VRL program for real-time data transformation. Read more about [VRL transformations](https://betterstack.com/docs/logs/using-logtail/transforming-ingested-data/logs-vrl/). 40 | - `warehouse_source_group_id` (Number) The ID of the warehouse source group this source belongs to. 41 | 42 | 43 | ### Nested Schema for `custom_bucket` 44 | 45 | Read-Only: 46 | 47 | - `access_key_id` (String) 48 | - `endpoint` (String) 49 | - `keep_data_after_retention` (Boolean) 50 | - `name` (String) 51 | - `secret_access_key` (String) 52 | -------------------------------------------------------------------------------- /internal/provider/provider.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 10 | ) 11 | 12 | type provider struct { 13 | url string 14 | version string 15 | } 16 | 17 | type Option func(*provider) 18 | 19 | func WithURL(v string) Option { 20 | return func(p *provider) { 21 | p.url = v 22 | } 23 | } 24 | 25 | func WithVersion(v string) Option { 26 | return func(p *provider) { 27 | p.version = v 28 | } 29 | } 30 | 31 | func New(opts ...Option) *schema.Provider { 32 | spec := provider{ 33 | url: "https://telemetry.betterstack.com", 34 | } 35 | for _, opt := range opts { 36 | opt(&spec) 37 | } 38 | return &schema.Provider{ 39 | Schema: map[string]*schema.Schema{ 40 | "api_token": { 41 | Type: schema.TypeString, 42 | Sensitive: true, 43 | Required: true, 44 | DefaultFunc: schema.EnvDefaultFunc("LOGTAIL_API_TOKEN", nil), 45 | Description: "Better Stack Telemetry API token. The value can be omitted if `LOGTAIL_API_TOKEN` environment variable is set. See https://betterstack.com/docs/logs/api/getting-started/#get-an-logs-api-token on how to obtain the API token for your team.", 46 | }, 47 | }, 48 | DataSourcesMap: map[string]*schema.Resource{ 49 | "logtail_source": newSourceDataSource(), 50 | "logtail_metric": newMetricDataSource(), 51 | "logtail_source_group": newSourceGroupDataSource(), 52 | "logtail_errors_application": newErrorsApplicationDataSource(), 53 | "logtail_errors_application_group": newErrorsApplicationGroupDataSource(), 54 | "logtail_warehouse_source": newWarehouseSourceDataSource(), 55 | "logtail_warehouse_source_group": newWarehouseSourceGroupDataSource(), 56 | "logtail_warehouse_embedding": newWarehouseEmbeddingDataSource(), 57 | "logtail_connection": newConnectionDataSource(), 58 | }, 59 | ResourcesMap: map[string]*schema.Resource{ 60 | "logtail_source": newSourceResource(), 61 | "logtail_metric": newMetricResource(), 62 | "logtail_source_group": newSourceGroupResource(), 63 | "logtail_errors_application": newErrorsApplicationResource(), 64 | "logtail_errors_application_group": newErrorsApplicationGroupResource(), 65 | "logtail_warehouse_source": newWarehouseSourceResource(), 66 | "logtail_warehouse_source_group": newWarehouseSourceGroupResource(), 67 | "logtail_warehouse_time_series": newWarehouseTimeSeriesResource(), 68 | "logtail_warehouse_embedding": newWarehouseEmbeddingResource(), 69 | "logtail_connection": newConnectionResource(), 70 | }, 71 | ConfigureContextFunc: func(ctx context.Context, r *schema.ResourceData) (interface{}, diag.Diagnostics) { 72 | var userAgent string 73 | if spec.version != "" { 74 | userAgent = "terraform-provider-logtail/" + spec.version 75 | } 76 | c, err := newClient(spec.url, r.Get("api_token").(string), 77 | withHTTPClient(&http.Client{ 78 | Timeout: time.Second * 60, 79 | }), 80 | withUserAgent(userAgent)) 81 | return c, diag.FromErr(err) 82 | }, 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /internal/provider/data_source.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 12 | ) 13 | 14 | func newSourceDataSource() *schema.Resource { 15 | s := make(map[string]*schema.Schema) 16 | for k, v := range sourceSchema { 17 | cp := *v 18 | switch k { 19 | case "table_name": 20 | cp.Computed = false 21 | cp.Optional = false 22 | cp.Required = true 23 | case "custom_bucket": 24 | cp.Computed = true 25 | cp.Optional = false 26 | cp.Required = false 27 | cp.ValidateFunc = nil 28 | cp.ValidateDiagFunc = nil 29 | cp.Default = nil 30 | cp.DefaultFunc = nil 31 | cp.DiffSuppressFunc = nil 32 | cp.MaxItems = 0 33 | default: 34 | cp.Computed = true 35 | cp.Optional = false 36 | cp.Required = false 37 | cp.ValidateFunc = nil 38 | cp.ValidateDiagFunc = nil 39 | cp.Default = nil 40 | cp.DefaultFunc = nil 41 | cp.DiffSuppressFunc = nil 42 | } 43 | s[k] = &cp 44 | } 45 | return &schema.Resource{ 46 | ReadContext: sourceLookup, 47 | Description: "This Data Source allows you to look up existing Sources using their table name. The table name is shown on the Source settings page on telemetry.betterstack.com or you can list all your existing sources via the [Sources API](https://betterstack.com/docs/logs/api/list-all-existing-sources/).", 48 | Schema: s, 49 | } 50 | } 51 | 52 | type sourcePageHTTPResponse struct { 53 | Data []struct { 54 | ID string `json:"id"` 55 | Attributes source `json:"attributes"` 56 | } `json:"data"` 57 | Pagination struct { 58 | First string `json:"first"` 59 | Last string `json:"last"` 60 | Prev string `json:"prev"` 61 | Next string `json:"next"` 62 | } `json:"pagination"` 63 | } 64 | 65 | func sourceLookup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 66 | fetch := func(page int) (*sourcePageHTTPResponse, error) { 67 | res, err := meta.(*client).Get(ctx, fmt.Sprintf("/api/v1/sources?page=%d", page)) 68 | if err != nil { 69 | return nil, err 70 | } 71 | defer func() { 72 | // Keep-Alive. 73 | _, _ = io.Copy(io.Discard, res.Body) 74 | _ = res.Body.Close() 75 | }() 76 | body, err := io.ReadAll(res.Body) 77 | if res.StatusCode != http.StatusOK { 78 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 79 | } 80 | if err != nil { 81 | return nil, err 82 | } 83 | var tr sourcePageHTTPResponse 84 | return &tr, json.Unmarshal(body, &tr) 85 | } 86 | table_name := d.Get("table_name").(string) 87 | page := 1 88 | for { 89 | res, err := fetch(page) 90 | if err != nil { 91 | return diag.FromErr(err) 92 | } 93 | for _, e := range res.Data { 94 | if *e.Attributes.TableName == table_name { 95 | if d.Id() != "" { 96 | return diag.Errorf("duplicate") 97 | } 98 | d.SetId(e.ID) 99 | if derr := sourceCopyAttrs(d, &e.Attributes); derr != nil { 100 | return derr 101 | } 102 | } 103 | } 104 | page++ 105 | if res.Pagination.Next == "" { 106 | return nil 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /docs/resources/warehouse_source.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_warehouse_source Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Warehouse sources. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/sources/create/ 7 | --- 8 | 9 | # logtail_warehouse_source (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Warehouse sources. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/sources/create/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of the new Warehouse source. Can contain letters, numbers, spaces, and special characters. Source names do not need to be unique. 21 | - `warehouse_source_group_id` (Number) The ID of the warehouse source group this source belongs to. 22 | 23 | ### Optional 24 | 25 | - `custom_bucket` (Block List, Max: 1) Optional custom bucket configuration for the source. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedblock--custom_bucket)) 26 | - `data_region` (String) The data region or cluster name where the source's data will be stored. 27 | Possible values include `us_east`, `us_west`, `germany`, `singapore`, or a specific cluster name like `us-east-9`. 28 | The actual region created may differ slightly due to dynamic load balancing. 29 | - `events_retention` (Number) The retention period for event data in days. Default is 9999999 days (effectively infinite). 30 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this source. 31 | - `live_tail_pattern` (String) A display template for live tail messages. Default is `"{status} {message}"`. 32 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 33 | - `time_series_retention` (Number) The retention period for time series data in days. Default is 9999999 days (effectively infinite). 34 | - `vrl_transformation` (String) A VRL program for real-time data transformation. Read more about [VRL transformations](https://betterstack.com/docs/logs/using-logtail/transforming-ingested-data/logs-vrl/). 35 | 36 | ### Read-Only 37 | 38 | - `created_at` (String) The time when this warehouse source was created. 39 | - `id` (String) The ID of this warehouse source. 40 | - `ingesting_host` (String) The host where the data should be sent. See documentation for details. 41 | - `table_name` (String) The table name generated for this warehouse source. 42 | - `token` (String) The token of this warehouse source. This token is used to identify and route the data you will send to Better Stack. 43 | - `updated_at` (String) The time when this warehouse source was updated. 44 | 45 | 46 | ### Nested Schema for `custom_bucket` 47 | 48 | Required: 49 | 50 | - `access_key_id` (String) Access key ID 51 | - `endpoint` (String) Bucket endpoint 52 | - `name` (String) Bucket name 53 | - `secret_access_key` (String, Sensitive) Secret access key 54 | 55 | Optional: 56 | 57 | - `keep_data_after_retention` (Boolean) Whether we should keep data in the bucket after the retention period. 58 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | GOLANGCI_LINT := golangci-lint run --disable-all \ 3 | -E errcheck \ 4 | -E goimports \ 5 | -E gosimple \ 6 | -E govet \ 7 | -E ineffassign \ 8 | -E staticcheck \ 9 | -E typecheck \ 10 | -E unused 11 | VERSION := 0.7.3 12 | .PHONY: test build 13 | 14 | help: 15 | @echo Usage: 16 | @echo 17 | @echo " make clean" 18 | @echo 19 | @echo " # Regenerate docs/." 20 | @echo " make gen" 21 | @echo 22 | @echo " make lint" 23 | @echo " make fmt" 24 | @echo 25 | @echo " make test" 26 | @echo " make test-coverage" 27 | @echo 28 | @echo " make terraform CONFIGURATION=examples/basic ARGS=apply" 29 | @echo 30 | @echo " # Run in \"Debug\" mode (connect debugger to port 2345)." 31 | @echo " make debug" 32 | @echo 33 | @echo " # Install terraform-provider-logtail locally." 34 | @echo " #" 35 | @echo " # terraform {" 36 | @echo " # required_providers {" 37 | @echo " # custom = {" 38 | @echo " # source = \"registry.terraform.io/BetterStackHQ/logtail\"" 39 | @echo " # version = \"0.0.0-0\"" 40 | @echo " # }" 41 | @echo " # }" 42 | @echo " # }" 43 | @echo " make install" 44 | @echo 45 | @echo " # Upload terraform-provider-logtail to GitHub." 46 | @echo " make VERSION=0.0.0-0 release" 47 | @echo 48 | 49 | clean: 50 | rm -f cover.out coverage.html terraform-provider-logtail 51 | rm -rf release/ 52 | 53 | lint-init: 54 | @test -n "$$(which golangci-lint)" || (curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin v1.64.6) 55 | 56 | lint: lint-init 57 | $(GOLANGCI_LINT) 58 | terraform fmt -check -diff -recursive 59 | 60 | fmt: lint-init 61 | $(GOLANGCI_LINT) --fix 62 | terraform fmt -recursive 63 | 64 | gen: 65 | terraform fmt -check -diff -recursive 66 | go generate ./... 67 | @echo 68 | @echo "docs/ can be previewed at https://registry.terraform.io/tools/doc-preview" 69 | 70 | test: 71 | go test ./... 72 | 73 | test-race: 74 | go test -race ./... 75 | 76 | test-coverage: 77 | go test -coverprofile cover.out ./... 78 | go tool cover -html=cover.out -o coverage.html 79 | rm -f cover.out 80 | @echo 81 | @echo "open coverage.html to review the report" 82 | 83 | # https://www.terraform.io/docs/extend/testing/acceptance-tests/index.html 84 | testacc: 85 | TF_ACC=1 go test ./... -v $(TESTARGS) -timeout 120m 86 | 87 | terraform: install 88 | cd $(CONFIGURATION) && rm -f .terraform.lock.hcl && terraform init && \ 89 | TF_LOG=DEBUG TF_PROVIDER_LOGTAIL_LOG_INSECURE=1 terraform $(ARGS) 90 | 91 | build: 92 | # -gcflags "all=-N -l" is here for delve (`go tool compile -help` for more) 93 | go build -gcflags "all=-N -l" -ldflags "-X main.version=$(VERSION)" 94 | 95 | install: build 96 | PLUGIN_DIR="$$HOME/.terraform.d/plugins/registry.terraform.io/BetterStackHQ/logtail/$(VERSION)/$$(go env GOOS)_$$(go env GOARCH)" && \ 97 | mkdir -p "$$PLUGIN_DIR" && \ 98 | cp terraform-provider-logtail "$$PLUGIN_DIR/" 99 | 100 | uninstall: 101 | rm -rf "$$HOME/.terraform.d/plugins/registry.terraform.io/BetterStackHQ/logtail/$(VERSION)" 102 | 103 | debug: build 104 | # https://github.com/go-delve/delve/blob/master/Documentation/installation/README.md 105 | dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./terraform-provider-logtail -- --debug 106 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_embedding_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "net/http/httptest" 8 | "sync/atomic" 9 | "testing" 10 | 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 13 | ) 14 | 15 | func TestResourceWarehouseEmbedding(t *testing.T) { 16 | var data atomic.Value 17 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 18 | t.Log("Received " + r.Method + " " + r.RequestURI) 19 | 20 | if r.Header.Get("Authorization") != "Bearer foo" { 21 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 22 | } 23 | 24 | prefix := "/api/v1/sources/ws-123/embeddings" 25 | id := "1" 26 | 27 | switch { 28 | case r.Method == http.MethodPost && r.RequestURI == prefix: 29 | body, err := io.ReadAll(r.Body) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | body = inject(t, body, "created_at", "2025-01-26T09:40:00.000Z") 34 | body = inject(t, body, "updated_at", "2025-01-26T09:40:00.000Z") 35 | 36 | data.Store(body) 37 | w.WriteHeader(http.StatusCreated) 38 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 39 | case r.Method == http.MethodGet && (r.RequestURI == prefix || r.RequestURI == prefix+"?page=1"): 40 | // Return list of embeddings 41 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":[{"id":%q,"attributes":%s}],"pagination":{"first":"1","last":"1","prev":null,"next":null}}`, id, data.Load().([]byte)))) 42 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 43 | w.WriteHeader(http.StatusNoContent) 44 | data.Store([]byte(nil)) 45 | default: 46 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 47 | } 48 | })) 49 | defer server.Close() 50 | 51 | var warehouseSourceID = "ws-123" 52 | var embedFrom = "message.text" 53 | var embedTo = "message.embedding" 54 | var model = "embeddinggemma:300m" 55 | var dimension = 512 56 | 57 | resource.Test(t, resource.TestCase{ 58 | IsUnitTest: true, 59 | ProviderFactories: map[string]func() (*schema.Provider, error){ 60 | "logtail": func() (*schema.Provider, error) { 61 | return New(WithURL(server.URL)), nil 62 | }, 63 | }, 64 | Steps: []resource.TestStep{ 65 | // Step 1 - create. 66 | { 67 | Config: fmt.Sprintf(` 68 | provider "logtail" { 69 | api_token = "foo" 70 | } 71 | 72 | resource "logtail_warehouse_embedding" "this" { 73 | source_id = "%s" 74 | embed_from = "%s" 75 | embed_to = "%s" 76 | model = "%s" 77 | dimension = %d 78 | } 79 | `, warehouseSourceID, embedFrom, embedTo, model, dimension), 80 | Check: resource.ComposeTestCheckFunc( 81 | resource.TestCheckResourceAttrSet("logtail_warehouse_embedding.this", "id"), 82 | resource.TestCheckResourceAttr("logtail_warehouse_embedding.this", "source_id", warehouseSourceID), 83 | resource.TestCheckResourceAttr("logtail_warehouse_embedding.this", "embed_from", embedFrom), 84 | resource.TestCheckResourceAttr("logtail_warehouse_embedding.this", "embed_to", embedTo), 85 | resource.TestCheckResourceAttr("logtail_warehouse_embedding.this", "model", model), 86 | resource.TestCheckResourceAttr("logtail_warehouse_embedding.this", "dimension", "512"), 87 | ), 88 | }, 89 | }, 90 | }) 91 | } 92 | -------------------------------------------------------------------------------- /internal/provider/client.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | 9 | "golang.org/x/net/context/ctxhttp" 10 | ) 11 | 12 | type client struct { 13 | baseURL string 14 | errorsBaseURL string 15 | warehouseBaseURL string 16 | token string 17 | httpClient *http.Client 18 | userAgent string 19 | } 20 | 21 | type option func(c *client) 22 | 23 | func withHTTPClient(httpClient *http.Client) option { 24 | return func(c *client) { 25 | c.httpClient = httpClient 26 | } 27 | } 28 | 29 | func withUserAgent(userAgent string) option { 30 | return func(c *client) { 31 | c.userAgent = userAgent 32 | } 33 | } 34 | 35 | func newClient(baseURL, token string, opts ...option) (*client, error) { 36 | c := client{ 37 | baseURL: baseURL, 38 | errorsBaseURL: "https://errors.betterstack.com", 39 | warehouseBaseURL: "https://warehouse.betterstack.com", 40 | token: token, 41 | httpClient: http.DefaultClient, 42 | } 43 | // Override with test URL if baseURL is not the production URL 44 | if baseURL != "https://telemetry.betterstack.com" { 45 | c.errorsBaseURL = baseURL 46 | c.warehouseBaseURL = baseURL 47 | } 48 | for _, opt := range opts { 49 | opt(&c) 50 | } 51 | return &c, nil 52 | } 53 | 54 | func (c *client) Get(ctx context.Context, path string) (*http.Response, error) { 55 | return c.do(ctx, http.MethodGet, c.baseURL, path, nil) 56 | } 57 | 58 | func (c *client) Post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { 59 | return c.do(ctx, http.MethodPost, c.baseURL, path, body) 60 | } 61 | 62 | func (c *client) Patch(ctx context.Context, path string, body io.Reader) (*http.Response, error) { 63 | return c.do(ctx, http.MethodPatch, c.baseURL, path, body) 64 | } 65 | 66 | func (c *client) Delete(ctx context.Context, path string) (*http.Response, error) { 67 | return c.do(ctx, http.MethodDelete, c.baseURL, path, nil) 68 | } 69 | 70 | func (c *client) GetWithBaseURL(ctx context.Context, baseURL, path string) (*http.Response, error) { 71 | return c.do(ctx, http.MethodGet, baseURL, path, nil) 72 | } 73 | 74 | func (c *client) PostWithBaseURL(ctx context.Context, baseURL, path string, body io.Reader) (*http.Response, error) { 75 | return c.do(ctx, http.MethodPost, baseURL, path, body) 76 | } 77 | 78 | func (c *client) PatchWithBaseURL(ctx context.Context, baseURL, path string, body io.Reader) (*http.Response, error) { 79 | return c.do(ctx, http.MethodPatch, baseURL, path, body) 80 | } 81 | 82 | func (c *client) DeleteWithBaseURL(ctx context.Context, baseURL, path string) (*http.Response, error) { 83 | return c.do(ctx, http.MethodDelete, baseURL, path, nil) 84 | } 85 | 86 | func (c *client) TelemetryBaseURL() string { 87 | return c.baseURL 88 | } 89 | 90 | func (c *client) ErrorsBaseURL() string { 91 | return c.errorsBaseURL 92 | } 93 | 94 | func (c *client) WarehouseBaseURL() string { 95 | return c.warehouseBaseURL 96 | } 97 | 98 | func (c *client) do(ctx context.Context, method, baseURL, path string, body io.Reader) (*http.Response, error) { 99 | req, err := http.NewRequest(method, fmt.Sprintf("%s%s", baseURL, path), body) 100 | if err != nil { 101 | return nil, err 102 | } 103 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token)) 104 | if c.userAgent != "" { 105 | req.Header.Set("User-Agent", c.userAgent) 106 | } 107 | if method == http.MethodPost || method == http.MethodPatch { 108 | req.Header.Set("Content-Type", "application/json") 109 | } 110 | return ctxhttp.Do(ctx, c.httpClient, req) 111 | } 112 | -------------------------------------------------------------------------------- /internal/provider/type_string_or_int.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | 8 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 9 | ) 10 | 11 | // StringOrInt handles JSON fields that can be either a string or an integer. 12 | // It unmarshals both "t1234" (string) and 123456 (number) into a string value, 13 | // and marshals back to the appropriate JSON type based on the value. 14 | // 15 | // This is used for fields like team_id in the Better Stack API which can return: 16 | // - A number: 12345 17 | // - A string: "t1234" 18 | // 19 | // When marshaling: 20 | // - Pure numeric strings like "1234" are marshaled as JSON numbers: 1234 21 | // - Non-numeric strings like "t1234" are marshaled as JSON strings: "t1234" 22 | // 23 | // Example usage in resource_source.go: 24 | // 25 | // type source struct { 26 | // TeamId *StringOrInt `json:"team_id,omitempty"` 27 | // } 28 | type StringOrInt string 29 | 30 | // MarshalJSON implements json.Marshaler interface. 31 | // Used when sending data to the Better Stack API: 32 | // - If the value is a pure number (e.g., "1234"), marshals as JSON number: 1234 33 | // - If the value contains non-numeric characters (e.g., "t1234"), marshals as JSON string: "t1234" 34 | func (s StringOrInt) MarshalJSON() ([]byte, error) { 35 | str := string(s) 36 | // Try to parse as integer - if successful, marshal as number 37 | if n, err := strconv.ParseInt(str, 10, 64); err == nil { 38 | return json.Marshal(n) 39 | } 40 | // Otherwise marshal as string 41 | return json.Marshal(str) 42 | } 43 | 44 | // UnmarshalJSON implements json.Unmarshaler interface. 45 | // Used when receiving data from the Better Stack API: 46 | // - When the JSON value is a string (e.g., "t1234"), stores it as-is 47 | // - When the JSON value is a number (e.g., 123456), converts it to a string 48 | func (s *StringOrInt) UnmarshalJSON(data []byte) error { 49 | // Try string first (handles "t1234", "b654654") 50 | var str string 51 | if err := json.Unmarshal(data, &str); err == nil { 52 | *s = StringOrInt(str) 53 | return nil 54 | } 55 | // Try number (handles 6547, 123456) 56 | var n json.Number 57 | if err := json.Unmarshal(data, &n); err == nil { 58 | *s = StringOrInt(n.String()) 59 | return nil 60 | } 61 | return fmt.Errorf("cannot unmarshal %s into StringOrInt", data) 62 | } 63 | 64 | // String returns the underlying string value. 65 | func (s StringOrInt) String() string { 66 | return string(s) 67 | } 68 | 69 | // StringOrIntFromResourceData creates a StringOrInt from a Terraform resource field. 70 | // Used in sourceCreate and sourceUpdate to handle fields that can be string or int. 71 | // 72 | // Parameters: 73 | // - d: The Terraform resource data 74 | // - key: The field name (e.g., "team_id") 75 | // 76 | // Example: 77 | // 78 | // in.TeamId = StringOrIntFromResourceData(d, "team_id") 79 | func StringOrIntFromResourceData(d *schema.ResourceData, key string) *StringOrInt { 80 | if v, ok := d.GetOk(key); ok { 81 | if v == nil { 82 | return nil 83 | } 84 | str := v.(string) 85 | result := StringOrInt(str) 86 | return &result 87 | } 88 | return nil 89 | } 90 | 91 | // SetStringOrIntResourceData sets a Terraform resource field from a StringOrInt. 92 | // Used in sourceCopyAttrs to handle fields that can be string or int. 93 | // 94 | // Parameters: 95 | // - d: The Terraform resource data 96 | // - key: The field name (e.g., "team_id") 97 | // - value: The StringOrInt value from the API 98 | // 99 | // Example: 100 | // 101 | // SetStringOrIntResourceData(d, "team_id", in.TeamId) 102 | func SetStringOrIntResourceData(d *schema.ResourceData, key string, value *StringOrInt) error { 103 | if value == nil { 104 | return d.Set(key, nil) 105 | } 106 | return d.Set(key, string(*value)) 107 | } 108 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_source_group_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "sync/atomic" 10 | "testing" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | func TestResourceWarehouseSourceGroup(t *testing.T) { 17 | var data atomic.Value 18 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 19 | t.Log("Received " + r.Method + " " + r.RequestURI) 20 | 21 | if r.Header.Get("Authorization") != "Bearer foo" { 22 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 23 | } 24 | 25 | prefix := "/api/v1/source-groups" 26 | id := "1" 27 | 28 | switch { 29 | case r.Method == http.MethodPost && r.RequestURI == prefix: 30 | body, err := io.ReadAll(r.Body) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | data.Store(body) 36 | w.WriteHeader(http.StatusCreated) 37 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 38 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 39 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, data.Load().([]byte)))) 40 | case r.Method == http.MethodPatch && r.RequestURI == prefix+"/"+id: 41 | body, err := io.ReadAll(r.Body) 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | patch := make(map[string]interface{}) 46 | if err = json.Unmarshal(data.Load().([]byte), &patch); err != nil { 47 | t.Fatal(err) 48 | } 49 | if err = json.Unmarshal(body, &patch); err != nil { 50 | t.Fatal(err) 51 | } 52 | patched, err := json.Marshal(patch) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | 57 | data.Store(patched) 58 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, patched))) 59 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 60 | w.WriteHeader(http.StatusNoContent) 61 | data.Store([]byte(nil)) 62 | default: 63 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 64 | } 65 | })) 66 | defer server.Close() 67 | 68 | var name = "Test Warehouse Source Group" 69 | 70 | resource.Test(t, resource.TestCase{ 71 | IsUnitTest: true, 72 | ProviderFactories: map[string]func() (*schema.Provider, error){ 73 | "logtail": func() (*schema.Provider, error) { 74 | return New(WithURL(server.URL)), nil 75 | }, 76 | }, 77 | Steps: []resource.TestStep{ 78 | // Step 1 - create. 79 | { 80 | Config: fmt.Sprintf(` 81 | provider "logtail" { 82 | api_token = "foo" 83 | } 84 | 85 | resource "logtail_warehouse_source_group" "this" { 86 | name = "%s" 87 | } 88 | `, name), 89 | Check: resource.ComposeTestCheckFunc( 90 | resource.TestCheckResourceAttrSet("logtail_warehouse_source_group.this", "id"), 91 | resource.TestCheckResourceAttr("logtail_warehouse_source_group.this", "name", name), 92 | ), 93 | }, 94 | // Step 2 - update. 95 | { 96 | Config: fmt.Sprintf(` 97 | provider "logtail" { 98 | api_token = "foo" 99 | } 100 | 101 | resource "logtail_warehouse_source_group" "this" { 102 | name = "%s Updated" 103 | } 104 | `, name), 105 | Check: resource.ComposeTestCheckFunc( 106 | resource.TestCheckResourceAttrSet("logtail_warehouse_source_group.this", "id"), 107 | resource.TestCheckResourceAttr("logtail_warehouse_source_group.this", "name", name+" Updated"), 108 | ), 109 | }, 110 | // Step 3 - destroy. 111 | { 112 | ResourceName: "logtail_warehouse_source_group.this", 113 | ImportState: true, 114 | ImportStateVerify: true, 115 | }, 116 | }, 117 | }) 118 | } 119 | -------------------------------------------------------------------------------- /internal/provider/resource_errors_application_group_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "sync/atomic" 10 | "testing" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | func TestResourceErrorsApplicationGroup(t *testing.T) { 17 | var data atomic.Value 18 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 19 | t.Log("Received " + r.Method + " " + r.RequestURI) 20 | 21 | if r.Header.Get("Authorization") != "Bearer foo" { 22 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 23 | } 24 | 25 | prefix := "/api/v1/application-groups" 26 | id := "1" 27 | 28 | switch { 29 | case r.Method == http.MethodPost && r.RequestURI == prefix: 30 | body, err := io.ReadAll(r.Body) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | data.Store(body) 36 | w.WriteHeader(http.StatusCreated) 37 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 38 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 39 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, data.Load().([]byte)))) 40 | case r.Method == http.MethodPatch && r.RequestURI == prefix+"/"+id: 41 | body, err := io.ReadAll(r.Body) 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | patch := make(map[string]interface{}) 46 | if err = json.Unmarshal(data.Load().([]byte), &patch); err != nil { 47 | t.Fatal(err) 48 | } 49 | if err = json.Unmarshal(body, &patch); err != nil { 50 | t.Fatal(err) 51 | } 52 | patched, err := json.Marshal(patch) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | 57 | data.Store(patched) 58 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, patched))) 59 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 60 | w.WriteHeader(http.StatusNoContent) 61 | data.Store([]byte(nil)) 62 | default: 63 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 64 | } 65 | })) 66 | defer server.Close() 67 | 68 | var name = "Test Errors Application Group" 69 | 70 | resource.Test(t, resource.TestCase{ 71 | IsUnitTest: true, 72 | ProviderFactories: map[string]func() (*schema.Provider, error){ 73 | "logtail": func() (*schema.Provider, error) { 74 | return New(WithURL(server.URL)), nil 75 | }, 76 | }, 77 | Steps: []resource.TestStep{ 78 | // Step 1 - create. 79 | { 80 | Config: fmt.Sprintf(` 81 | provider "logtail" { 82 | api_token = "foo" 83 | } 84 | 85 | resource "logtail_errors_application_group" "this" { 86 | name = "%s" 87 | } 88 | `, name), 89 | Check: resource.ComposeTestCheckFunc( 90 | resource.TestCheckResourceAttrSet("logtail_errors_application_group.this", "id"), 91 | resource.TestCheckResourceAttr("logtail_errors_application_group.this", "name", name), 92 | ), 93 | }, 94 | // Step 2 - update. 95 | { 96 | Config: fmt.Sprintf(` 97 | provider "logtail" { 98 | api_token = "foo" 99 | } 100 | 101 | resource "logtail_errors_application_group" "this" { 102 | name = "%s Updated" 103 | } 104 | `, name), 105 | Check: resource.ComposeTestCheckFunc( 106 | resource.TestCheckResourceAttrSet("logtail_errors_application_group.this", "id"), 107 | resource.TestCheckResourceAttr("logtail_errors_application_group.this", "name", name+" Updated"), 108 | ), 109 | }, 110 | // Step 3 - destroy. 111 | { 112 | ResourceName: "logtail_errors_application_group.this", 113 | ImportState: true, 114 | ImportStateVerify: true, 115 | }, 116 | }, 117 | }) 118 | } 119 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_time_series_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "strings" 10 | "sync/atomic" 11 | "testing" 12 | 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 14 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 15 | ) 16 | 17 | func TestResourceWarehouseTimeSeries(t *testing.T) { 18 | var data atomic.Value 19 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 20 | t.Log("Received " + r.Method + " " + r.RequestURI) 21 | 22 | if r.Header.Get("Authorization") != "Bearer foo" { 23 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 24 | } 25 | 26 | prefix := "/api/v1/sources/123/time_series" 27 | id := "1" 28 | 29 | switch { 30 | case r.Method == http.MethodPost && r.RequestURI == prefix: 31 | body, err := io.ReadAll(r.Body) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | 36 | data.Store(body) 37 | w.WriteHeader(http.StatusCreated) 38 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 39 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 40 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, data.Load().([]byte)))) 41 | case r.Method == http.MethodGet && strings.HasPrefix(r.RequestURI, prefix+"?"): 42 | // Handle list requests for the read operation 43 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":[{"id":%q,"attributes":%s}],"pagination":{"first":"1","last":"1","prev":null,"next":null}}`, id, data.Load().([]byte)))) 44 | case r.Method == http.MethodPatch && r.RequestURI == prefix+"/"+id: 45 | body, err := io.ReadAll(r.Body) 46 | if err != nil { 47 | t.Fatal(err) 48 | } 49 | patch := make(map[string]interface{}) 50 | if err = json.Unmarshal(data.Load().([]byte), &patch); err != nil { 51 | t.Fatal(err) 52 | } 53 | if err = json.Unmarshal(body, &patch); err != nil { 54 | t.Fatal(err) 55 | } 56 | patched, err := json.Marshal(patch) 57 | if err != nil { 58 | t.Fatal(err) 59 | } 60 | 61 | data.Store(patched) 62 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, patched))) 63 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 64 | w.WriteHeader(http.StatusNoContent) 65 | data.Store([]byte(nil)) 66 | default: 67 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 68 | } 69 | })) 70 | defer server.Close() 71 | 72 | var name = "test_time_series" 73 | 74 | resource.Test(t, resource.TestCase{ 75 | IsUnitTest: true, 76 | ProviderFactories: map[string]func() (*schema.Provider, error){ 77 | "logtail": func() (*schema.Provider, error) { 78 | return New(WithURL(server.URL)), nil 79 | }, 80 | }, 81 | Steps: []resource.TestStep{ 82 | // Step 1 - create. 83 | { 84 | Config: fmt.Sprintf(` 85 | provider "logtail" { 86 | api_token = "foo" 87 | } 88 | 89 | resource "logtail_warehouse_time_series" "this" { 90 | source_id = "123" 91 | name = "%s" 92 | type = "string_low_cardinality" 93 | sql_expression = "JSONExtract(raw, 'event_type', 'Nullable(String)')" 94 | aggregations = [] 95 | } 96 | `, name), 97 | Check: resource.ComposeTestCheckFunc( 98 | resource.TestCheckResourceAttrSet("logtail_warehouse_time_series.this", "id"), 99 | resource.TestCheckResourceAttr("logtail_warehouse_time_series.this", "source_id", "123"), 100 | resource.TestCheckResourceAttr("logtail_warehouse_time_series.this", "name", name), 101 | resource.TestCheckResourceAttr("logtail_warehouse_time_series.this", "type", "string_low_cardinality"), 102 | resource.TestCheckResourceAttr("logtail_warehouse_time_series.this", "sql_expression", "JSONExtract(raw, 'event_type', 'Nullable(String)')"), 103 | ), 104 | }, 105 | }, 106 | }) 107 | } 108 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/betterstackhq/terraform-provider-logtail 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 7 | github.com/hashicorp/terraform-plugin-docs v0.21.0 8 | github.com/hashicorp/terraform-plugin-sdk/v2 v2.36.1 9 | golang.org/x/net v0.34.0 10 | ) 11 | 12 | require ( 13 | github.com/BurntSushi/toml v1.2.1 // indirect 14 | github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect 15 | github.com/Masterminds/goutils v1.1.1 // indirect 16 | github.com/Masterminds/semver/v3 v3.2.0 // indirect 17 | github.com/Masterminds/sprig/v3 v3.2.3 // indirect 18 | github.com/ProtonMail/go-crypto v1.1.3 // indirect 19 | github.com/agext/levenshtein v1.2.2 // indirect 20 | github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect 21 | github.com/armon/go-radix v1.0.0 // indirect 22 | github.com/bgentry/speakeasy v0.1.0 // indirect 23 | github.com/bmatcuk/doublestar/v4 v4.8.1 // indirect 24 | github.com/cloudflare/circl v1.3.7 // indirect 25 | github.com/fatih/color v1.16.0 // indirect 26 | github.com/golang/protobuf v1.5.4 // indirect 27 | github.com/google/go-cmp v0.6.0 // indirect 28 | github.com/google/uuid v1.6.0 // indirect 29 | github.com/hashicorp/cli v1.1.7 // indirect 30 | github.com/hashicorp/errwrap v1.1.0 // indirect 31 | github.com/hashicorp/go-checkpoint v0.5.0 // indirect 32 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 33 | github.com/hashicorp/go-hclog v1.6.3 // indirect 34 | github.com/hashicorp/go-multierror v1.1.1 // indirect 35 | github.com/hashicorp/go-plugin v1.6.2 // indirect 36 | github.com/hashicorp/go-retryablehttp v0.7.7 // indirect 37 | github.com/hashicorp/go-uuid v1.0.3 // indirect 38 | github.com/hashicorp/go-version v1.7.0 // indirect 39 | github.com/hashicorp/hc-install v0.9.1 // indirect 40 | github.com/hashicorp/hcl/v2 v2.23.0 // indirect 41 | github.com/hashicorp/logutils v1.0.0 // indirect 42 | github.com/hashicorp/terraform-exec v0.22.0 // indirect 43 | github.com/hashicorp/terraform-json v0.24.0 // indirect 44 | github.com/hashicorp/terraform-plugin-go v0.26.0 // indirect 45 | github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect 46 | github.com/hashicorp/terraform-registry-address v0.2.4 // indirect 47 | github.com/hashicorp/terraform-svchost v0.1.1 // indirect 48 | github.com/hashicorp/yamux v0.1.1 // indirect 49 | github.com/huandu/xstrings v1.3.3 // indirect 50 | github.com/imdario/mergo v0.3.15 // indirect 51 | github.com/mattn/go-colorable v0.1.14 // indirect 52 | github.com/mattn/go-isatty v0.0.20 // indirect 53 | github.com/mattn/go-runewidth v0.0.9 // indirect 54 | github.com/mitchellh/copystructure v1.2.0 // indirect 55 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect 56 | github.com/mitchellh/go-wordwrap v1.0.0 // indirect 57 | github.com/mitchellh/mapstructure v1.5.0 // indirect 58 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 59 | github.com/oklog/run v1.0.0 // indirect 60 | github.com/posener/complete v1.2.3 // indirect 61 | github.com/shopspring/decimal v1.3.1 // indirect 62 | github.com/spf13/cast v1.5.0 // indirect 63 | github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect 64 | github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect 65 | github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 66 | github.com/yuin/goldmark v1.7.7 // indirect 67 | github.com/yuin/goldmark-meta v1.1.0 // indirect 68 | github.com/zclconf/go-cty v1.16.2 // indirect 69 | go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect 70 | golang.org/x/crypto v0.33.0 // indirect 71 | golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect 72 | golang.org/x/mod v0.22.0 // indirect 73 | golang.org/x/sync v0.11.0 // indirect 74 | golang.org/x/sys v0.30.0 // indirect 75 | golang.org/x/text v0.22.0 // indirect 76 | golang.org/x/tools v0.22.0 // indirect 77 | google.golang.org/appengine v1.6.8 // indirect 78 | google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect 79 | google.golang.org/grpc v1.69.4 // indirect 80 | google.golang.org/protobuf v1.36.3 // indirect 81 | gopkg.in/yaml.v2 v2.3.0 // indirect 82 | gopkg.in/yaml.v3 v3.0.1 // indirect 83 | ) 84 | -------------------------------------------------------------------------------- /internal/provider/resource_source_group.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/url" 7 | "reflect" 8 | 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 11 | ) 12 | 13 | var sourceGroupSchema = map[string]*schema.Schema{ 14 | "team_name": { 15 | Description: "Used to specify the team the resource should be created in when using global tokens.", 16 | Type: schema.TypeString, 17 | Optional: true, 18 | Default: nil, 19 | DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { 20 | return d.Id() != "" 21 | }, 22 | }, 23 | "id": { 24 | Description: "The ID of this source group.", 25 | Type: schema.TypeString, 26 | Optional: false, 27 | Computed: true, 28 | }, 29 | "name": { 30 | Description: "The name of this source group.", 31 | Type: schema.TypeString, 32 | Required: true, 33 | }, 34 | "sort_index": { 35 | Description: "The sort index of this source group.", 36 | Type: schema.TypeInt, 37 | Optional: true, 38 | }, 39 | "created_at": { 40 | Description: "The time when this source group was created.", 41 | Type: schema.TypeString, 42 | Optional: false, 43 | Computed: true, 44 | }, 45 | "updated_at": { 46 | Description: "The time when this source group was updated.", 47 | Type: schema.TypeString, 48 | Optional: false, 49 | Computed: true, 50 | }, 51 | } 52 | 53 | func newSourceGroupResource() *schema.Resource { 54 | return &schema.Resource{ 55 | CreateContext: sourceGroupCreate, 56 | ReadContext: sourceGroupRead, 57 | UpdateContext: sourceGroupUpdate, 58 | DeleteContext: sourceGroupDelete, 59 | Importer: &schema.ResourceImporter{ 60 | StateContext: schema.ImportStatePassthroughContext, 61 | }, 62 | Description: "This resource allows you to create, modify, and delete your Source Groups. For more information about the Source Groups API check https://betterstack.com/docs/logs/api/", 63 | Schema: sourceGroupSchema, 64 | } 65 | } 66 | 67 | type sourceGroup struct { 68 | Name *string `json:"name,omitempty"` 69 | SortIndex *int `json:"sort_index,omitempty"` 70 | CreatedAt *string `json:"created_at,omitempty"` 71 | UpdatedAt *string `json:"updated_at,omitempty"` 72 | TeamName *string `json:"team_name,omitempty"` 73 | } 74 | 75 | type sourceGroupHTTPResponse struct { 76 | Data struct { 77 | ID string `json:"id"` 78 | Attributes sourceGroup `json:"attributes"` 79 | } `json:"data"` 80 | } 81 | 82 | func sourceGroupRef(in *sourceGroup) []struct { 83 | k string 84 | v interface{} 85 | } { 86 | return []struct { 87 | k string 88 | v interface{} 89 | }{ 90 | {k: "name", v: &in.Name}, 91 | {k: "sort_index", v: &in.SortIndex}, 92 | {k: "created_at", v: &in.CreatedAt}, 93 | {k: "updated_at", v: &in.UpdatedAt}, 94 | } 95 | } 96 | 97 | func sourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 98 | var in sourceGroup 99 | for _, e := range sourceGroupRef(&in) { 100 | load(d, e.k, e.v) 101 | } 102 | 103 | load(d, "team_name", &in.TeamName) 104 | 105 | var out sourceGroupHTTPResponse 106 | if err := resourceCreate(ctx, meta, "/api/v1/source-groups", &in, &out); err != nil { 107 | return err 108 | } 109 | d.SetId(out.Data.ID) 110 | return sourceGroupCopyAttrs(d, &out.Data.Attributes) 111 | } 112 | 113 | func sourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 114 | var out sourceGroupHTTPResponse 115 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).TelemetryBaseURL(), fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id())), &out); err != nil { 116 | return err 117 | } else if !ok { 118 | d.SetId("") // Force "create" on 404. 119 | return nil 120 | } 121 | return sourceGroupCopyAttrs(d, &out.Data.Attributes) 122 | } 123 | 124 | func sourceGroupCopyAttrs(d *schema.ResourceData, in *sourceGroup) diag.Diagnostics { 125 | var derr diag.Diagnostics 126 | for _, e := range sourceGroupRef(in) { 127 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 128 | derr = append(derr, diag.FromErr(err)[0]) 129 | } 130 | } 131 | return derr 132 | } 133 | 134 | func sourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 135 | var in sourceGroup 136 | for _, e := range sourceGroupRef(&in) { 137 | if d.HasChange(e.k) { 138 | load(d, e.k, e.v) 139 | } 140 | } 141 | return resourceUpdate(ctx, meta, fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id())), &in) 142 | } 143 | 144 | func sourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 145 | return resourceDelete(ctx, meta, fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id()))) 146 | } 147 | -------------------------------------------------------------------------------- /internal/provider/data_connection.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/url" 7 | 8 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 10 | ) 11 | 12 | func newConnectionDataSource() *schema.Resource { 13 | return &schema.Resource{ 14 | ReadContext: dataSourceConnectionRead, 15 | Description: "This data source allows you to retrieve information about ClickHouse connections. For more information about the Connection API check https://betterstack.com/docs/logs/api/connections/", 16 | Schema: map[string]*schema.Schema{ 17 | "id": { 18 | Description: "The ID of the connection to retrieve.", 19 | Type: schema.TypeString, 20 | Optional: true, 21 | Computed: true, 22 | }, 23 | "client_type": { 24 | Description: "Type of client connection.", 25 | Type: schema.TypeString, 26 | Computed: true, 27 | }, 28 | "team_names": { 29 | Description: "Array of team names associated with the connection.", 30 | Type: schema.TypeList, 31 | Computed: true, 32 | Elem: &schema.Schema{ 33 | Type: schema.TypeString, 34 | }, 35 | }, 36 | "team_ids": { 37 | Description: "Array of team IDs associated with the connection.", 38 | Type: schema.TypeList, 39 | Computed: true, 40 | Elem: &schema.Schema{ 41 | Type: schema.TypeInt, 42 | }, 43 | }, 44 | "data_region": { 45 | Description: "Data region of the connection.", 46 | Type: schema.TypeString, 47 | Computed: true, 48 | }, 49 | "ip_allowlist": { 50 | Description: "Array of IP addresses allowed to use this connection.", 51 | Type: schema.TypeList, 52 | Computed: true, 53 | Elem: &schema.Schema{ 54 | Type: schema.TypeString, 55 | }, 56 | }, 57 | "valid_until": { 58 | Description: "Timestamp when the connection expires.", 59 | Type: schema.TypeString, 60 | Computed: true, 61 | }, 62 | "note": { 63 | Description: "Descriptive note for the connection.", 64 | Type: schema.TypeString, 65 | Computed: true, 66 | }, 67 | "host": { 68 | Description: "The connection hostname.", 69 | Type: schema.TypeString, 70 | Computed: true, 71 | }, 72 | "port": { 73 | Description: "The connection port.", 74 | Type: schema.TypeInt, 75 | Computed: true, 76 | }, 77 | "username": { 78 | Description: "The connection username.", 79 | Type: schema.TypeString, 80 | Computed: true, 81 | }, 82 | "created_at": { 83 | Description: "The time when this connection was created.", 84 | Type: schema.TypeString, 85 | Computed: true, 86 | }, 87 | "created_by": { 88 | Description: "Information about the user who created this connection.", 89 | Type: schema.TypeMap, 90 | Computed: true, 91 | Elem: &schema.Schema{ 92 | Type: schema.TypeString, 93 | }, 94 | }, 95 | "sample_query": { 96 | Description: "A sample query showing how to use this connection.", 97 | Type: schema.TypeString, 98 | Computed: true, 99 | }, 100 | "data_sources": { 101 | Description: "List of available data sources for this connection.", 102 | Type: schema.TypeList, 103 | Computed: true, 104 | Elem: &schema.Resource{ 105 | Schema: map[string]*schema.Schema{ 106 | "source_name": { 107 | Type: schema.TypeString, 108 | Computed: true, 109 | }, 110 | "source_id": { 111 | Type: schema.TypeInt, 112 | Computed: true, 113 | }, 114 | "team_name": { 115 | Type: schema.TypeString, 116 | Computed: true, 117 | }, 118 | "data_sources": { 119 | Type: schema.TypeList, 120 | Computed: true, 121 | Elem: &schema.Schema{ 122 | Type: schema.TypeString, 123 | }, 124 | }, 125 | }, 126 | }, 127 | }, 128 | }, 129 | } 130 | } 131 | 132 | func dataSourceConnectionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 133 | id := d.Get("id").(string) 134 | 135 | // If ID is provided, fetch specific connection 136 | if id != "" { 137 | var singleOut connectionHTTPResponse 138 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).TelemetryBaseURL(), fmt.Sprintf("/api/v1/connections/%s", url.PathEscape(id)), &singleOut); err != nil { 139 | if !ok { 140 | return diag.Errorf("connection with ID %s not found", id) 141 | } 142 | return err 143 | } 144 | d.SetId(singleOut.Data.ID) 145 | return connectionCopyAttrs(d, &singleOut.Data.Attributes) 146 | } 147 | 148 | // Otherwise, list connections (this would return all, which might be too many) 149 | return diag.Errorf("connection data source requires an ID to be specified") 150 | } 151 | -------------------------------------------------------------------------------- /internal/provider/resource_connection_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "net/http/httptest" 8 | "sync/atomic" 9 | "testing" 10 | 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 13 | ) 14 | 15 | func TestResourceConnection(t *testing.T) { 16 | var data atomic.Value 17 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 18 | if r.Header.Get("Authorization") != "Bearer foo" { 19 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 20 | } 21 | 22 | prefix := "/api/v1/connections" 23 | id := "1" 24 | 25 | switch { 26 | case r.Method == http.MethodPost && r.RequestURI == prefix: 27 | body, err := io.ReadAll(r.Body) 28 | if err != nil { 29 | t.Fatal(err) 30 | } 31 | // Inject computed fields that API returns 32 | body = inject(t, body, "host", "us-east-9-connect.betterstackdata.com") 33 | body = inject(t, body, "port", 443) 34 | body = inject(t, body, "username", "u3PM1B7BEJgqHXBymIpnfCAs3K02XIZaE") 35 | body = inject(t, body, "password", "XNFT7RaKtjCyZiQIeR782kykeAxOa4U1eLaKxyd7KDN58xlgCwZ0wEkr7YdoBvXh") 36 | body = inject(t, body, "created_at", "2025-11-26T14:00:00.000Z") 37 | body = inject(t, body, "updated_at", "2025-11-26T14:00:00.000Z") 38 | body = inject(t, body, "sample_query", "curl command example") 39 | body = inject(t, body, "created_by", map[string]interface{}{"id": "123", "email": "test@example.com"}) 40 | body = inject(t, body, "data_sources", []interface{}{}) 41 | 42 | data.Store(body) 43 | w.WriteHeader(http.StatusCreated) 44 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 45 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 46 | // For GET, add the complex fields that might be missing 47 | body := data.Load().([]byte) 48 | body = inject(t, body, "created_by", map[string]interface{}{"id": "123", "email": "test@example.com"}) 49 | body = inject(t, body, "data_sources", []interface{}{}) 50 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 51 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 52 | w.WriteHeader(http.StatusNoContent) 53 | data.Store([]byte(nil)) 54 | default: 55 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 56 | } 57 | })) 58 | defer server.Close() 59 | 60 | resource.Test(t, resource.TestCase{ 61 | IsUnitTest: true, 62 | ProviderFactories: map[string]func() (*schema.Provider, error){ 63 | "logtail": func() (*schema.Provider, error) { 64 | return New(WithURL(server.URL)), nil 65 | }, 66 | }, 67 | Steps: []resource.TestStep{ 68 | // Step 1 - create. 69 | { 70 | Config: ` 71 | provider "logtail" { 72 | api_token = "foo" 73 | } 74 | 75 | resource "logtail_connection" "this" { 76 | client_type = "clickhouse" 77 | team_names = ["Test Team"] 78 | data_region = "us_east" 79 | ip_allowlist = ["192.168.1.0/24", "10.0.0.1"] 80 | valid_until = "2025-12-31T23:59:59Z" 81 | note = "Test connection" 82 | } 83 | `, 84 | Check: resource.ComposeTestCheckFunc( 85 | resource.TestCheckResourceAttrSet("logtail_connection.this", "id"), 86 | resource.TestCheckResourceAttr("logtail_connection.this", "client_type", "clickhouse"), 87 | resource.TestCheckResourceAttr("logtail_connection.this", "team_names.#", "1"), 88 | resource.TestCheckResourceAttr("logtail_connection.this", "team_names.0", "Test Team"), 89 | resource.TestCheckResourceAttr("logtail_connection.this", "data_region", "us_east"), // Should preserve user value, not API normalized value 90 | resource.TestCheckResourceAttr("logtail_connection.this", "ip_allowlist.#", "2"), 91 | resource.TestCheckResourceAttr("logtail_connection.this", "ip_allowlist.0", "192.168.1.0/24"), 92 | resource.TestCheckResourceAttr("logtail_connection.this", "ip_allowlist.1", "10.0.0.1"), 93 | resource.TestCheckResourceAttr("logtail_connection.this", "valid_until", "2025-12-31T23:59:59Z"), // Should preserve user value 94 | resource.TestCheckResourceAttr("logtail_connection.this", "note", "Test connection"), 95 | resource.TestCheckResourceAttr("logtail_connection.this", "host", "us-east-9-connect.betterstackdata.com"), 96 | resource.TestCheckResourceAttr("logtail_connection.this", "port", "443"), 97 | resource.TestCheckResourceAttr("logtail_connection.this", "username", "u3PM1B7BEJgqHXBymIpnfCAs3K02XIZaE"), 98 | resource.TestCheckResourceAttr("logtail_connection.this", "password", "XNFT7RaKtjCyZiQIeR782kykeAxOa4U1eLaKxyd7KDN58xlgCwZ0wEkr7YdoBvXh"), 99 | resource.TestCheckResourceAttr("logtail_connection.this", "created_at", "2025-11-26T14:00:00.000Z"), 100 | resource.TestCheckResourceAttr("logtail_connection.this", "sample_query", "curl command example"), 101 | ), 102 | }, 103 | }, 104 | }) 105 | } 106 | -------------------------------------------------------------------------------- /docs/resources/source.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_source Resource - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This resource allows you to create, modify, and delete your Sources. For more information about the Sources API check https://betterstack.com/docs/logs/api/list-all-existing-sources/ 7 | --- 8 | 9 | # logtail_source (Resource) 10 | 11 | This resource allows you to create, modify, and delete your Sources. For more information about the Sources API check https://betterstack.com/docs/logs/api/list-all-existing-sources/ 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `name` (String) The name of this source. 21 | - `platform` (String) The platform of this source. This value can be set only when you're creating a new source. You can't update this value later. Valid values are: 22 | - `apache2` 23 | - `aws_cloudwatch` 24 | - `aws_ecs` 25 | - `aws_elb` 26 | - `aws_fargate` 27 | - `azure_logs` 28 | - `cloudflare_logpush` 29 | - `cloudflare_worker` 30 | - `datadog_agent` 31 | - `digitalocean` 32 | - `docker` 33 | - `dokku` 34 | - `dotnet` 35 | - `elasticsearch` 36 | - `erlang` 37 | - `filebeat` 38 | - `flights` 39 | - `fluentbit` 40 | - `fluentd` 41 | - `fly_io` 42 | - `go` 43 | - `google_cloud_pubsub` 44 | - `haproxy` 45 | - `heroku` 46 | - `http` 47 | - `java` 48 | - `javascript` 49 | - `kubernetes` 50 | - `logstash` 51 | - `minio` 52 | - `mongodb` 53 | - `mysql` 54 | - `nginx` 55 | - `open_telemetry` 56 | - `php` 57 | - `postgresql` 58 | - `prometheus` 59 | - `prometheus_scrape` 60 | - `python` 61 | - `rabbitmq` 62 | - `redis` 63 | - `render` 64 | - `rsyslog` 65 | - `ruby` 66 | - `syslog-ng` 67 | - `traefik` 68 | - `ubuntu` 69 | - `vector` 70 | - `vercel_integration` 71 | 72 | ### Optional 73 | 74 | - `custom_bucket` (Block List, Max: 1) Optional custom bucket configuration for the source. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedblock--custom_bucket)) 75 | - `data_region` (String) Data region or private cluster name to create the source in. Permitted values for most plans are: `us_east`, `us_west`, `germany`, `singapore`. 76 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this source (e.g., when you are reaching your plan's usage quota and you want to prioritize some sources over others). 77 | - `live_tail_pattern` (String) Freeform text template for formatting Live tail output with columns wrapped in {column} brackets. Example: "PID: {message_json.pid} {level} {message}" 78 | - `logs_retention` (Number) Data retention for logs in days. There might be additional charges for longer retention. 79 | - `metrics_retention` (Number) Data retention for metrics in days. There might be additional charges for longer retention. 80 | - `scrape_frequency_secs` (Number) For scrape platform types, how often to scrape the URLs. 81 | - `scrape_request_basic_auth_password` (String, Sensitive) Basic auth password for scraping. 82 | - `scrape_request_basic_auth_user` (String) Basic auth username for scraping. 83 | - `scrape_request_headers` (List of Map of String) An array of request headers, each containing `name` and `value` fields. 84 | - `scrape_urls` (List of String) For scrape platform types, the set of urls to scrape. 85 | - `source_group_id` (Number) The ID of the source group this source belongs to. 86 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 87 | - `vrl_transformation` (String) The VRL code that's used to transform events. Read more about [VRL transformations](https://betterstack.com/docs/logs/using-logtail/transforming-ingested-data/logs-vrl/). 88 | 89 | ### Read-Only 90 | 91 | - `created_at` (String) The time when this monitor group was created. 92 | - `id` (String) The ID of this source. 93 | - `ingesting_host` (String) The host where the logs or metrics should be sent. See [documentation](https://betterstack.com/docs/logs/start/) for your specific source platform for details. 94 | - `table_name` (String) The table name generated for this source. 95 | - `team_id` (String) The team ID for this resource. Can be used with table_name in [Query API](https://betterstack.com/docs/logs/query-api/connect-remotely/). 96 | - `token` (String) The token of this source. This token is used to identify and route the data you will send to Better Stack. 97 | - `updated_at` (String) The time when this monitor group was updated. 98 | 99 | 100 | ### Nested Schema for `custom_bucket` 101 | 102 | Required: 103 | 104 | - `access_key_id` (String) Access key ID 105 | - `endpoint` (String) Bucket endpoint 106 | - `name` (String) Bucket name 107 | - `secret_access_key` (String, Sensitive) Secret access key 108 | 109 | Optional: 110 | 111 | - `keep_data_after_retention` (Boolean) Whether we should keep data in the bucket after the retention period. 112 | -------------------------------------------------------------------------------- /docs/data-sources/source.md: -------------------------------------------------------------------------------- 1 | --- 2 | # generated by https://github.com/hashicorp/terraform-plugin-docs 3 | page_title: "logtail_source Data Source - terraform-provider-logtail" 4 | subcategory: "" 5 | description: |- 6 | This Data Source allows you to look up existing Sources using their table name. The table name is shown on the Source settings page on telemetry.betterstack.com or you can list all your existing sources via the Sources API https://betterstack.com/docs/logs/api/list-all-existing-sources/. 7 | --- 8 | 9 | # logtail_source (Data Source) 10 | 11 | This Data Source allows you to look up existing Sources using their table name. The table name is shown on the Source settings page on telemetry.betterstack.com or you can list all your existing sources via the [Sources API](https://betterstack.com/docs/logs/api/list-all-existing-sources/). 12 | 13 | 14 | 15 | 16 | ## Schema 17 | 18 | ### Required 19 | 20 | - `table_name` (String) The table name generated for this source. 21 | 22 | ### Read-Only 23 | 24 | - `created_at` (String) The time when this monitor group was created. 25 | - `custom_bucket` (List of Object) Optional custom bucket configuration for the source. When provided, all fields (name, endpoint, access_key_id, secret_access_key) are required. (see [below for nested schema](#nestedatt--custom_bucket)) 26 | - `data_region` (String) Data region or private cluster name to create the source in. Permitted values for most plans are: `us_east`, `us_west`, `germany`, `singapore`. 27 | - `id` (String) The ID of this source. 28 | - `ingesting_host` (String) The host where the logs or metrics should be sent. See [documentation](https://betterstack.com/docs/logs/start/) for your specific source platform for details. 29 | - `ingesting_paused` (Boolean) This property allows you to temporarily pause data ingesting for this source (e.g., when you are reaching your plan's usage quota and you want to prioritize some sources over others). 30 | - `live_tail_pattern` (String) Freeform text template for formatting Live tail output with columns wrapped in {column} brackets. Example: "PID: {message_json.pid} {level} {message}" 31 | - `logs_retention` (Number) Data retention for logs in days. There might be additional charges for longer retention. 32 | - `metrics_retention` (Number) Data retention for metrics in days. There might be additional charges for longer retention. 33 | - `name` (String) The name of this source. 34 | - `platform` (String) The platform of this source. This value can be set only when you're creating a new source. You can't update this value later. Valid values are: 35 | - `apache2` 36 | - `aws_cloudwatch` 37 | - `aws_ecs` 38 | - `aws_elb` 39 | - `aws_fargate` 40 | - `azure_logs` 41 | - `cloudflare_logpush` 42 | - `cloudflare_worker` 43 | - `datadog_agent` 44 | - `digitalocean` 45 | - `docker` 46 | - `dokku` 47 | - `dotnet` 48 | - `elasticsearch` 49 | - `erlang` 50 | - `filebeat` 51 | - `flights` 52 | - `fluentbit` 53 | - `fluentd` 54 | - `fly_io` 55 | - `go` 56 | - `google_cloud_pubsub` 57 | - `haproxy` 58 | - `heroku` 59 | - `http` 60 | - `java` 61 | - `javascript` 62 | - `kubernetes` 63 | - `logstash` 64 | - `minio` 65 | - `mongodb` 66 | - `mysql` 67 | - `nginx` 68 | - `open_telemetry` 69 | - `php` 70 | - `postgresql` 71 | - `prometheus` 72 | - `prometheus_scrape` 73 | - `python` 74 | - `rabbitmq` 75 | - `redis` 76 | - `render` 77 | - `rsyslog` 78 | - `ruby` 79 | - `syslog-ng` 80 | - `traefik` 81 | - `ubuntu` 82 | - `vector` 83 | - `vercel_integration` 84 | - `scrape_frequency_secs` (Number) For scrape platform types, how often to scrape the URLs. 85 | - `scrape_request_basic_auth_password` (String, Sensitive) Basic auth password for scraping. 86 | - `scrape_request_basic_auth_user` (String) Basic auth username for scraping. 87 | - `scrape_request_headers` (List of Map of String) An array of request headers, each containing `name` and `value` fields. 88 | - `scrape_urls` (List of String) For scrape platform types, the set of urls to scrape. 89 | - `source_group_id` (Number) The ID of the source group this source belongs to. 90 | - `team_id` (String) The team ID for this resource. Can be used with table_name in [Query API](https://betterstack.com/docs/logs/query-api/connect-remotely/). 91 | - `team_name` (String) Used to specify the team the resource should be created in when using global tokens. 92 | - `token` (String) The token of this source. This token is used to identify and route the data you will send to Better Stack. 93 | - `updated_at` (String) The time when this monitor group was updated. 94 | - `vrl_transformation` (String) The VRL code that's used to transform events. Read more about [VRL transformations](https://betterstack.com/docs/logs/using-logtail/transforming-ingested-data/logs-vrl/). 95 | 96 | 97 | ### Nested Schema for `custom_bucket` 98 | 99 | Read-Only: 100 | 101 | - `access_key_id` (String) 102 | - `endpoint` (String) 103 | - `keep_data_after_retention` (Boolean) 104 | - `name` (String) 105 | - `secret_access_key` (String) 106 | -------------------------------------------------------------------------------- /internal/provider/data_warehouse_embedding.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/url" 9 | 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 12 | ) 13 | 14 | func newWarehouseEmbeddingDataSource() *schema.Resource { 15 | return &schema.Resource{ 16 | ReadContext: dataSourceWarehouseEmbeddingRead, 17 | Description: "This data source allows you to retrieve information about a Warehouse embedding. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/", 18 | Schema: map[string]*schema.Schema{ 19 | "id": { 20 | Description: "The ID of the embedding to retrieve.", 21 | Type: schema.TypeString, 22 | Optional: true, 23 | Computed: true, 24 | }, 25 | "source_id": { 26 | Description: "The ID of the warehouse source to filter embeddings by.", 27 | Type: schema.TypeString, 28 | Optional: true, 29 | }, 30 | "embed_from": { 31 | Description: "The source column name containing the text to embed.", 32 | Type: schema.TypeString, 33 | Computed: true, 34 | }, 35 | "embed_to": { 36 | Description: "The target column name where the generated embeddings will be stored.", 37 | Type: schema.TypeString, 38 | Computed: true, 39 | }, 40 | "model": { 41 | Description: "The name of the embedding model to use.", 42 | Type: schema.TypeString, 43 | Computed: true, 44 | }, 45 | "dimension": { 46 | Description: "The vector dimension of the embeddings to generate.", 47 | Type: schema.TypeInt, 48 | Computed: true, 49 | }, 50 | "created_at": { 51 | Description: "The time when this embedding was created.", 52 | Type: schema.TypeString, 53 | Computed: true, 54 | }, 55 | "updated_at": { 56 | Description: "The time when this embedding was last updated.", 57 | Type: schema.TypeString, 58 | Computed: true, 59 | }, 60 | }, 61 | } 62 | } 63 | 64 | func dataSourceWarehouseEmbeddingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 65 | sourceID := d.Get("source_id").(string) 66 | id := d.Get("id").(string) 67 | 68 | // If ID is provided, fetch specific embedding (requires source_id) 69 | if id != "" { 70 | if sourceID == "" { 71 | return diag.Errorf("source_id must be specified when looking up embedding by ID") 72 | } 73 | var singleOut warehouseEmbeddingHTTPResponse 74 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/embeddings/%s", url.PathEscape(sourceID), url.PathEscape(id)), &singleOut); err != nil { 75 | if !ok { 76 | return diag.Errorf("embedding with ID %s not found", id) 77 | } 78 | return err 79 | } 80 | d.SetId(singleOut.Data.ID) 81 | return warehouseEmbeddingCopyAttrs(d, &singleOut.Data.Attributes) 82 | } 83 | 84 | // Otherwise, list embeddings for the specified source 85 | if sourceID == "" { 86 | return diag.Errorf("source_id must be specified to list embeddings") 87 | } 88 | 89 | fetch := func(page int) (*warehouseEmbeddingPageHTTPResponse, error) { 90 | params := url.Values{} 91 | if page > 1 { 92 | params.Set("page", fmt.Sprintf("%d", page)) 93 | } 94 | 95 | queryString := "" 96 | if len(params) > 0 { 97 | queryString = "?" + params.Encode() 98 | } 99 | 100 | res, err := meta.(*client).do(ctx, "GET", meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/embeddings", url.PathEscape(sourceID))+queryString, nil) 101 | if err != nil { 102 | return nil, err 103 | } 104 | defer func() { 105 | _, _ = io.ReadAll(res.Body) 106 | _ = res.Body.Close() 107 | }() 108 | 109 | body, err := io.ReadAll(res.Body) 110 | if err != nil { 111 | return nil, err 112 | } 113 | if res.StatusCode != 200 { 114 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 115 | } 116 | var pageOut warehouseEmbeddingPageHTTPResponse 117 | if err := json.Unmarshal(body, &pageOut); err != nil { 118 | return nil, err 119 | } 120 | return &pageOut, nil 121 | } 122 | 123 | var allEmbeddings []struct { 124 | ID string `json:"id"` 125 | Attributes warehouseEmbedding `json:"attributes"` 126 | } 127 | page := 1 128 | for { 129 | pageData, err := fetch(page) 130 | if err != nil { 131 | return diag.FromErr(err) 132 | } 133 | for _, e := range pageData.Data { 134 | if sourceID == "" || *e.Attributes.EmbedFrom != "" { // Since we can't filter by embed_from directly, just collect all if no source_id filter 135 | allEmbeddings = append(allEmbeddings, struct { 136 | ID string `json:"id"` 137 | Attributes warehouseEmbedding `json:"attributes"` 138 | }{ 139 | ID: e.ID, 140 | Attributes: e.Attributes, 141 | }) 142 | } 143 | } 144 | if pageData.Pagination.Next == "" { 145 | break 146 | } 147 | page++ 148 | } 149 | 150 | if len(allEmbeddings) == 0 { 151 | return diag.Errorf("no embedding found matching the criteria") 152 | } 153 | if len(allEmbeddings) > 1 { 154 | return diag.Errorf("multiple embeddings found matching the criteria, please specify an ID") 155 | } 156 | 157 | embedding := allEmbeddings[0] 158 | d.SetId(embedding.ID) 159 | return warehouseEmbeddingCopyAttrs(d, &embedding.Attributes) 160 | } 161 | -------------------------------------------------------------------------------- /internal/provider/data_source_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/http/httptest" 7 | "regexp" 8 | "testing" 9 | 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 13 | ) 14 | 15 | func TestDataMonitor(t *testing.T) { 16 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 17 | t.Log("Received " + r.Method + " " + r.RequestURI) 18 | 19 | if r.Header.Get("Authorization") != "Bearer foo" { 20 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 21 | } 22 | 23 | prefix := "/api/v1/sources" 24 | 25 | switch { 26 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=1": 27 | _, _ = w.Write([]byte(`{"data":[{"id":"1","attributes":{"name":"Test Source","token":"token123","table_name":"abc", "team_id": 123456,"platform":"ubuntu"}}],"pagination":{"next":"..."}}`)) 28 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=2": 29 | _, _ = w.Write([]byte(`{"data":[{"id":"2","attributes":{"name":"Other Test Source","token":"token456","table_name":"def", "team_id": 123456,"platform":"ubuntu"}}],"pagination":{"next":null}}`)) 30 | default: 31 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 32 | } 33 | })) 34 | defer server.Close() 35 | 36 | var table_name = "def" 37 | 38 | resource.Test(t, resource.TestCase{ 39 | IsUnitTest: true, 40 | ProviderFactories: map[string]func() (*schema.Provider, error){ 41 | "logtail": func() (*schema.Provider, error) { 42 | return New(WithURL(server.URL)), nil 43 | }, 44 | }, 45 | Steps: []resource.TestStep{ 46 | { 47 | Config: fmt.Sprintf(` 48 | provider "logtail" { 49 | api_token = "foo" 50 | } 51 | 52 | data "logtail_source" "this" { 53 | table_name = "%s" 54 | } 55 | `, table_name), 56 | Check: resource.ComposeTestCheckFunc( 57 | resource.TestCheckResourceAttrSet("data.logtail_source.this", "id"), 58 | resource.TestCheckResourceAttr("data.logtail_source.this", "table_name", table_name), 59 | resource.TestCheckResourceAttr("data.logtail_source.this", "platform", "ubuntu"), 60 | resource.TestCheckResourceAttr("data.logtail_source.this", "team_id", "123456"), 61 | ), 62 | }, 63 | }, 64 | }) 65 | } 66 | 67 | func TestDataSourceGroup(t *testing.T) { 68 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 69 | t.Log("Received " + r.Method + " " + r.RequestURI) 70 | 71 | if r.Header.Get("Authorization") != "Bearer foo" { 72 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 73 | } 74 | 75 | prefix := "/api/v1/source-groups" 76 | 77 | switch { 78 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=1": 79 | _, _ = w.Write([]byte(`{"data":[{"id":"1","attributes":{"name":"Test Group","sort_index":1,"created_at":"2023-01-01T00:00:00Z","updated_at":"2023-01-01T00:00:00Z"}}],"pagination":{"next":"` + prefix + `?page=2"}}`)) 80 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=2": 81 | _, _ = w.Write([]byte(`{"data":[{"id":"2","attributes":{"name":"Production Group","sort_index":2,"created_at":"2023-01-02T00:00:00Z","updated_at":"2023-01-02T00:00:00Z"}}],"pagination":{"next":null}}`)) 82 | default: 83 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 84 | } 85 | })) 86 | defer server.Close() 87 | 88 | var groupName = "Production Group" 89 | 90 | resource.Test(t, resource.TestCase{ 91 | IsUnitTest: true, 92 | ProviderFactories: map[string]func() (*schema.Provider, error){ 93 | "logtail": func() (*schema.Provider, error) { 94 | return New(WithURL(server.URL)), nil 95 | }, 96 | }, 97 | Steps: []resource.TestStep{ 98 | { 99 | Config: fmt.Sprintf(` 100 | provider "logtail" { 101 | api_token = "foo" 102 | } 103 | 104 | data "logtail_source_group" "this" { 105 | name = "%s" 106 | } 107 | `, groupName), 108 | Check: resource.ComposeTestCheckFunc( 109 | resource.TestCheckResourceAttrSet("data.logtail_source_group.this", "id"), 110 | resource.TestCheckResourceAttr("data.logtail_source_group.this", "name", groupName), 111 | resource.TestCheckResourceAttr("data.logtail_source_group.this", "sort_index", "2"), 112 | resource.TestCheckResourceAttrSet("data.logtail_source_group.this", "created_at"), 113 | resource.TestCheckResourceAttrSet("data.logtail_source_group.this", "updated_at"), 114 | ), 115 | }, 116 | }, 117 | }) 118 | } 119 | 120 | func TestDataSourceGroupNotFound(t *testing.T) { 121 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 122 | t.Log("Received " + r.Method + " " + r.RequestURI) 123 | 124 | if r.Header.Get("Authorization") != "Bearer foo" { 125 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 126 | } 127 | 128 | prefix := "/api/v1/source-groups" 129 | 130 | switch { 131 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=1": 132 | _, _ = w.Write([]byte(`{"data":[{"id":"1","attributes":{"name":"Test Group","sort_index":1}}],"pagination":{"next":null}}`)) 133 | default: 134 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 135 | } 136 | })) 137 | defer server.Close() 138 | 139 | resource.Test(t, resource.TestCase{ 140 | IsUnitTest: true, 141 | ProviderFactories: map[string]func() (*schema.Provider, error){ 142 | "logtail": func() (*schema.Provider, error) { 143 | return New(WithURL(server.URL)), nil 144 | }, 145 | }, 146 | Steps: []resource.TestStep{ 147 | { 148 | Config: ` 149 | provider "logtail" { 150 | api_token = "foo" 151 | } 152 | 153 | data "logtail_source_group" "this" { 154 | name = "Nonexistent Group" 155 | } 156 | `, 157 | ExpectError: regexp.MustCompile(`Source group with name "Nonexistent Group" not found`), 158 | }, 159 | }, 160 | }) 161 | } 162 | 163 | // TODO: test duplicate 164 | -------------------------------------------------------------------------------- /internal/provider/resource_metric.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | "reflect" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 15 | ) 16 | 17 | var metricSchema = map[string]*schema.Schema{ 18 | "id": { 19 | Description: "The ID of this metric.", 20 | Type: schema.TypeString, 21 | Optional: false, 22 | Computed: true, 23 | }, 24 | "source_id": { 25 | Description: "The ID of the source this metric belongs to.", 26 | Type: schema.TypeString, 27 | Required: true, 28 | ForceNew: true, 29 | }, 30 | "name": { 31 | Description: "The name of this metric.", 32 | Type: schema.TypeString, 33 | Required: true, 34 | ForceNew: true, 35 | }, 36 | "sql_expression": { 37 | Description: "The SQL expression used to extract the metric value.", 38 | Type: schema.TypeString, 39 | Required: true, 40 | ForceNew: true, 41 | }, 42 | "aggregations": { 43 | Description: "The list of aggregations to perform on the metric.", 44 | Type: schema.TypeList, 45 | Required: true, 46 | ForceNew: true, 47 | Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"avg", "count", "uniq", "max", "min", "anyLast", "sum", "p50", "p90", "p95", "p99"}, false)}, 48 | }, 49 | "type": { 50 | Description: "The type of the metric.", 51 | Type: schema.TypeString, 52 | Required: true, 53 | ForceNew: true, 54 | ValidateFunc: validation.StringInSlice([]string{"string_low_cardinality", "int64_delta", "float64_delta", "datetime64_delta", "boolean"}, false), 55 | }, 56 | } 57 | 58 | func newMetricResource() *schema.Resource { 59 | return &schema.Resource{ 60 | CreateContext: metricCreate, 61 | ReadContext: metricLookup, 62 | DeleteContext: metricDelete, 63 | Importer: &schema.ResourceImporter{ 64 | StateContext: schema.ImportStatePassthroughContext, 65 | }, 66 | Description: "This resource allows you to create and delete Metrics.", 67 | Schema: metricSchema, 68 | } 69 | } 70 | 71 | type metric struct { 72 | SourceID *string `json:"source_id,omitempty"` 73 | Name *string `json:"name,omitempty"` 74 | SQLExpression *string `json:"sql_expression,omitempty"` 75 | Aggregations *[]string `json:"aggregations,omitempty"` 76 | Type *string `json:"type,omitempty"` 77 | } 78 | 79 | type metricHTTPResponse struct { 80 | Data struct { 81 | ID string `json:"id"` 82 | Attributes metric `json:"attributes"` 83 | } `json:"data"` 84 | } 85 | 86 | type metricPageHTTPResponse struct { 87 | Data []struct { 88 | ID string `json:"id"` 89 | Attributes metric `json:"attributes"` 90 | } `json:"data"` 91 | Pagination struct { 92 | First string `json:"first"` 93 | Last string `json:"last"` 94 | Prev string `json:"prev"` 95 | Next string `json:"next"` 96 | } `json:"pagination"` 97 | } 98 | 99 | func metricRef(in *metric) []struct { 100 | k string 101 | v interface{} 102 | } { 103 | return []struct { 104 | k string 105 | v interface{} 106 | }{ 107 | {k: "name", v: &in.Name}, 108 | {k: "sql_expression", v: &in.SQLExpression}, 109 | {k: "aggregations", v: &in.Aggregations}, 110 | {k: "type", v: &in.Type}, 111 | } 112 | } 113 | 114 | func metricCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 115 | var in metric 116 | for _, e := range metricRef(&in) { 117 | load(d, e.k, e.v) 118 | } 119 | sourceId := d.Get("source_id").(string) 120 | var out metricHTTPResponse 121 | if err := resourceCreate(ctx, meta, fmt.Sprintf("/api/v2/sources/%s/metrics", url.PathEscape(sourceId)), &in, &out); err != nil { 122 | return err 123 | } 124 | d.SetId(out.Data.ID) 125 | return metricCopyAttrs(d, &out.Data.Attributes) 126 | } 127 | 128 | func metricLookup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 129 | d.SetId("") 130 | sourceId := d.Get("source_id").(string) 131 | fetch := func(page int) (*metricPageHTTPResponse, error) { 132 | res, err := meta.(*client).Get(ctx, fmt.Sprintf("/api/v2/sources/%s/metrics?page=%d", url.PathEscape(sourceId), page)) 133 | if err != nil { 134 | return nil, err 135 | } 136 | defer func() { 137 | // Keep-Alive. 138 | _, _ = io.Copy(io.Discard, res.Body) 139 | _ = res.Body.Close() 140 | }() 141 | body, err := io.ReadAll(res.Body) 142 | if res.StatusCode != http.StatusOK { 143 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 144 | } 145 | if err != nil { 146 | return nil, err 147 | } 148 | var tr metricPageHTTPResponse 149 | return &tr, json.Unmarshal(body, &tr) 150 | } 151 | name := d.Get("name").(string) 152 | page := 1 153 | for { 154 | res, err := fetch(page) 155 | if err != nil { 156 | return diag.FromErr(err) 157 | } 158 | for _, e := range res.Data { 159 | if *e.Attributes.Name == name { 160 | if d.Id() != "" { 161 | return diag.Errorf("duplicate") 162 | } 163 | d.SetId(e.ID) 164 | if derr := metricCopyAttrs(d, &e.Attributes); derr != nil { 165 | return derr 166 | } 167 | } 168 | } 169 | page++ 170 | if res.Pagination.Next == "" { 171 | return nil 172 | } 173 | } 174 | } 175 | 176 | func metricCopyAttrs(d *schema.ResourceData, in *metric) diag.Diagnostics { 177 | var derr diag.Diagnostics 178 | for _, e := range metricRef(in) { 179 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 180 | derr = append(derr, diag.FromErr(err)[0]) 181 | } 182 | } 183 | return derr 184 | } 185 | 186 | func metricDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 187 | sourceId := d.Get("source_id").(string) 188 | return resourceDelete(ctx, meta, fmt.Sprintf("/api/v2/sources/%s/metrics/%s", url.PathEscape(sourceId), url.PathEscape(d.Id()))) 189 | } 190 | -------------------------------------------------------------------------------- /internal/provider/resource_metric_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "net/http/httptest" 8 | "regexp" 9 | "sync/atomic" 10 | "testing" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | func TestResourceMetric(t *testing.T) { 17 | var data atomic.Value 18 | var id atomic.Value 19 | id.Store(0) 20 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 21 | t.Log("Received " + r.Method + " " + r.RequestURI) 22 | 23 | if r.Header.Get("Authorization") != "Bearer foo" { 24 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 25 | } 26 | 27 | prefix := "/api/v2/sources/source123/metrics" 28 | 29 | switch { 30 | case r.Method == http.MethodPost && r.RequestURI == prefix: 31 | body, err := io.ReadAll(r.Body) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | data.Store(body) 36 | w.WriteHeader(http.StatusCreated) 37 | id.Store(id.Load().(int) + 1) 38 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":"%d","attributes":%s}}`, id.Load(), body))) 39 | case r.Method == http.MethodGet && r.RequestURI == prefix+"?page=1": 40 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":[{"id":"%d","attributes":%s}],"pagination":{"next":null}}`, id.Load(), data.Load()))) 41 | case r.Method == http.MethodDelete && r.RequestURI == fmt.Sprintf(`%s/%d`, prefix, id.Load()): 42 | w.WriteHeader(http.StatusNoContent) 43 | data.Store([]byte(nil)) 44 | default: 45 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 46 | } 47 | })) 48 | defer server.Close() 49 | 50 | var sourceID = "source123" 51 | var name = "Test Metric" 52 | var sqlExpression = "JSONExtract(json, 'duration_ms', 'Nullable(Float)')" 53 | var metricType = "float64_delta" 54 | 55 | resource.Test(t, resource.TestCase{ 56 | IsUnitTest: true, 57 | ProviderFactories: map[string]func() (*schema.Provider, error){ 58 | "logtail": func() (*schema.Provider, error) { 59 | return New(WithURL(server.URL)), nil 60 | }, 61 | }, 62 | Steps: []resource.TestStep{ 63 | // Step 1 - create 64 | { 65 | Config: fmt.Sprintf(` 66 | provider "logtail" { 67 | api_token = "foo" 68 | } 69 | 70 | resource "logtail_metric" "this" { 71 | source_id = "%s" 72 | name = "%s" 73 | sql_expression = "%s" 74 | type = "%s" 75 | aggregations = ["avg", "p50"] 76 | } 77 | `, sourceID, name, sqlExpression, metricType), 78 | Check: resource.ComposeTestCheckFunc( 79 | resource.TestCheckResourceAttr("logtail_metric.this", "id", "1"), 80 | resource.TestCheckResourceAttr("logtail_metric.this", "source_id", sourceID), 81 | resource.TestCheckResourceAttr("logtail_metric.this", "name", name), 82 | resource.TestCheckResourceAttr("logtail_metric.this", "sql_expression", sqlExpression), 83 | resource.TestCheckResourceAttr("logtail_metric.this", "type", metricType), 84 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.#", "2"), 85 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.0", "avg"), 86 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.1", "p50"), 87 | ), 88 | PreConfig: func() { 89 | t.Log("step 1") 90 | }, 91 | }, 92 | // Step 2 - test validation on type and aggregations 93 | { 94 | Config: fmt.Sprintf(` 95 | provider "logtail" { 96 | api_token = "foo" 97 | } 98 | 99 | resource "logtail_metric" "this" { 100 | source_id = "%s" 101 | name = "%s" 102 | sql_expression = "%s" 103 | type = "mystery_column" 104 | aggregations = ["min", "max", "best"] 105 | } 106 | `, sourceID, name, sqlExpression), 107 | Check: resource.ComposeTestCheckFunc(), 108 | ExpectError: regexp.MustCompile(`expected type to be one of \["string_low_cardinality" "int64_delta" "float64_delta" "datetime64_delta" "boolean"], got mystery_column(.|\n)*expected aggregations\.2 to be one of \["avg" "count" "uniq" "max" "min" "anyLast" "sum" "p50" "p90" "p95" "p99"], got best`), 109 | PreConfig: func() { 110 | t.Log("step 2") 111 | }, 112 | }, 113 | // Step 3 - update, should change ID because it's a recreation 114 | { 115 | Config: fmt.Sprintf(` 116 | provider "logtail" { 117 | api_token = "foo" 118 | } 119 | 120 | resource "logtail_metric" "this" { 121 | source_id = "%s" 122 | name = "%s" 123 | sql_expression = "%s" 124 | type = "%s" 125 | aggregations = ["min", "max"] 126 | } 127 | `, sourceID, name, sqlExpression, metricType), 128 | Check: resource.ComposeTestCheckFunc( 129 | resource.TestCheckResourceAttr("logtail_metric.this", "id", "2"), 130 | resource.TestCheckResourceAttr("logtail_metric.this", "source_id", sourceID), 131 | resource.TestCheckResourceAttr("logtail_metric.this", "name", name), 132 | resource.TestCheckResourceAttr("logtail_metric.this", "sql_expression", sqlExpression), 133 | resource.TestCheckResourceAttr("logtail_metric.this", "type", metricType), 134 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.#", "2"), 135 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.0", "min"), 136 | resource.TestCheckResourceAttr("logtail_metric.this", "aggregations.1", "max"), 137 | ), 138 | PreConfig: func() { 139 | t.Log("step 3") 140 | }, 141 | }, 142 | // Step 4 - make no changes, check plan is empty 143 | { 144 | Config: fmt.Sprintf(` 145 | provider "logtail" { 146 | api_token = "foo" 147 | } 148 | 149 | resource "logtail_metric" "this" { 150 | source_id = "%s" 151 | name = "%s" 152 | sql_expression = "%s" 153 | type = "%s" 154 | aggregations = ["min", "max"] 155 | } 156 | `, sourceID, name, sqlExpression, metricType), 157 | PlanOnly: true, 158 | PreConfig: func() { 159 | t.Log("step 4") 160 | }, 161 | }, 162 | }, 163 | }) 164 | } 165 | -------------------------------------------------------------------------------- /internal/provider/resource.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "io" 8 | "log" 9 | "net/http" 10 | 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 12 | ) 13 | 14 | func resourceCreate(ctx context.Context, meta interface{}, url string, in, out interface{}) diag.Diagnostics { 15 | reqBody, err := json.Marshal(&in) 16 | if err != nil { 17 | return diag.FromErr(err) 18 | } 19 | log.Printf("POST %s: %s", url, string(reqBody)) 20 | res, err := meta.(*client).Post(ctx, url, bytes.NewReader(reqBody)) 21 | if err != nil { 22 | return diag.FromErr(err) 23 | } 24 | defer func() { 25 | // Keep-Alive. 26 | _, _ = io.Copy(io.Discard, res.Body) 27 | _ = res.Body.Close() 28 | }() 29 | body, err := io.ReadAll(res.Body) 30 | if res.StatusCode != http.StatusCreated { 31 | return diag.Errorf("POST %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 32 | } 33 | if err != nil { 34 | return diag.FromErr(err) 35 | } 36 | log.Printf("POST %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 37 | if err := json.Unmarshal(body, &out); err != nil { 38 | return diag.FromErr(err) 39 | } 40 | return nil 41 | } 42 | 43 | func resourceCreateWithBaseURL(ctx context.Context, meta interface{}, baseURL, path string, in, out interface{}) diag.Diagnostics { 44 | reqBody, err := json.Marshal(&in) 45 | if err != nil { 46 | return diag.FromErr(err) 47 | } 48 | log.Printf("POST %s%s: %s", baseURL, path, string(reqBody)) 49 | res, err := meta.(*client).PostWithBaseURL(ctx, baseURL, path, bytes.NewReader(reqBody)) 50 | if err != nil { 51 | return diag.FromErr(err) 52 | } 53 | defer func() { 54 | // Keep-Alive. 55 | _, _ = io.Copy(io.Discard, res.Body) 56 | _ = res.Body.Close() 57 | }() 58 | body, err := io.ReadAll(res.Body) 59 | if res.StatusCode != http.StatusCreated { 60 | return diag.Errorf("POST %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 61 | } 62 | if err != nil { 63 | return diag.FromErr(err) 64 | } 65 | log.Printf("POST %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 66 | if err := json.Unmarshal(body, &out); err != nil { 67 | return diag.FromErr(err) 68 | } 69 | return nil 70 | } 71 | 72 | func resourceReadWithBaseURL(ctx context.Context, meta interface{}, baseURL, path string, out interface{}) (derr diag.Diagnostics, ok bool) { 73 | log.Printf("GET %s%s", baseURL, path) 74 | res, err := meta.(*client).GetWithBaseURL(ctx, baseURL, path) 75 | if err != nil { 76 | return diag.FromErr(err), false 77 | } 78 | defer func() { 79 | // Keep-Alive. 80 | _, _ = io.Copy(io.Discard, res.Body) 81 | _ = res.Body.Close() 82 | }() 83 | if res.StatusCode == http.StatusNotFound { 84 | return nil, false 85 | } 86 | body, err := io.ReadAll(res.Body) 87 | if res.StatusCode != http.StatusOK { 88 | return diag.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)), false 89 | } 90 | if err != nil { 91 | return diag.FromErr(err), false 92 | } 93 | log.Printf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 94 | err = json.Unmarshal(body, &out) 95 | if err != nil { 96 | return diag.FromErr(err), false 97 | } 98 | return nil, true 99 | } 100 | 101 | func resourceUpdate(ctx context.Context, meta interface{}, url string, req interface{}) diag.Diagnostics { 102 | reqBody, err := json.Marshal(&req) 103 | if err != nil { 104 | return diag.FromErr(err) 105 | } 106 | log.Printf("PATCH %s: %s", url, string(reqBody)) 107 | res, err := meta.(*client).Patch(ctx, url, bytes.NewReader(reqBody)) 108 | if err != nil { 109 | return diag.FromErr(err) 110 | } 111 | defer func() { 112 | // Keep-Alive. 113 | _, _ = io.Copy(io.Discard, res.Body) 114 | _ = res.Body.Close() 115 | }() 116 | body, _ := io.ReadAll(res.Body) 117 | if res.StatusCode != http.StatusOK { 118 | return diag.Errorf("PATCH %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 119 | } 120 | log.Printf("PATCH %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 121 | return nil 122 | } 123 | 124 | func resourceUpdateWithBaseURL(ctx context.Context, meta interface{}, baseURL, path string, req interface{}) diag.Diagnostics { 125 | reqBody, err := json.Marshal(&req) 126 | if err != nil { 127 | return diag.FromErr(err) 128 | } 129 | log.Printf("PATCH %s%s: %s", baseURL, path, string(reqBody)) 130 | res, err := meta.(*client).PatchWithBaseURL(ctx, baseURL, path, bytes.NewReader(reqBody)) 131 | if err != nil { 132 | return diag.FromErr(err) 133 | } 134 | defer func() { 135 | // Keep-Alive. 136 | _, _ = io.Copy(io.Discard, res.Body) 137 | _ = res.Body.Close() 138 | }() 139 | body, _ := io.ReadAll(res.Body) 140 | if res.StatusCode != http.StatusOK { 141 | return diag.Errorf("PATCH %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 142 | } 143 | log.Printf("PATCH %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 144 | return nil 145 | } 146 | 147 | func resourceDelete(ctx context.Context, meta interface{}, url string) diag.Diagnostics { 148 | log.Printf("DELETE %s", url) 149 | res, err := meta.(*client).Delete(ctx, url) 150 | if err != nil { 151 | return diag.FromErr(err) 152 | } 153 | defer func() { 154 | // Keep-Alive. 155 | _, _ = io.Copy(io.Discard, res.Body) 156 | _ = res.Body.Close() 157 | }() 158 | body, _ := io.ReadAll(res.Body) 159 | if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusNotFound { 160 | return diag.Errorf("DELETE %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 161 | } 162 | log.Printf("DELETE %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 163 | return nil 164 | } 165 | 166 | func resourceDeleteWithBaseURL(ctx context.Context, meta interface{}, baseURL, path string) diag.Diagnostics { 167 | log.Printf("DELETE %s%s", baseURL, path) 168 | res, err := meta.(*client).DeleteWithBaseURL(ctx, baseURL, path) 169 | if err != nil { 170 | return diag.FromErr(err) 171 | } 172 | defer func() { 173 | // Keep-Alive. 174 | _, _ = io.Copy(io.Discard, res.Body) 175 | _ = res.Body.Close() 176 | }() 177 | body, _ := io.ReadAll(res.Body) 178 | if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusNotFound { 179 | return diag.Errorf("DELETE %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 180 | } 181 | log.Printf("DELETE %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 182 | return nil 183 | } 184 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_embedding.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/url" 9 | "reflect" 10 | 11 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 13 | ) 14 | 15 | var warehouseEmbeddingSchema = map[string]*schema.Schema{ 16 | "id": { 17 | Description: "The ID of this embedding.", 18 | Type: schema.TypeString, 19 | Optional: false, 20 | Computed: true, 21 | }, 22 | "source_id": { 23 | Description: "The ID of the Warehouse source to create the embedding for.", 24 | Type: schema.TypeString, 25 | Required: true, 26 | ForceNew: true, 27 | }, 28 | "embed_from": { 29 | Description: "The source column name containing the text to embed.", 30 | Type: schema.TypeString, 31 | Required: true, 32 | ForceNew: true, 33 | }, 34 | "embed_to": { 35 | Description: "The target column name where the generated embeddings will be stored.", 36 | Type: schema.TypeString, 37 | Required: true, 38 | ForceNew: true, 39 | }, 40 | "model": { 41 | Description: "The name of the embedding model to use (e.g., `embeddinggemma:300m`).", 42 | Type: schema.TypeString, 43 | Required: true, 44 | ForceNew: true, 45 | }, 46 | "dimension": { 47 | Description: "The vector dimension of the embeddings to generate.", 48 | Type: schema.TypeInt, 49 | Required: true, 50 | ForceNew: true, 51 | }, 52 | "created_at": { 53 | Description: "The time when this embedding was created.", 54 | Type: schema.TypeString, 55 | Optional: false, 56 | Computed: true, 57 | }, 58 | "updated_at": { 59 | Description: "The time when this embedding was last updated.", 60 | Type: schema.TypeString, 61 | Optional: false, 62 | Computed: true, 63 | }, 64 | } 65 | 66 | func newWarehouseEmbeddingResource() *schema.Resource { 67 | return &schema.Resource{ 68 | CreateContext: warehouseEmbeddingCreate, 69 | ReadContext: warehouseEmbeddingRead, 70 | DeleteContext: warehouseEmbeddingDelete, 71 | Description: "This resource allows you to create and manage embeddings for vector similarity search in Warehouse. For more information about the Warehouse Embeddings API check https://betterstack.com/docs/warehouse/api/embeddings/", 72 | Schema: warehouseEmbeddingSchema, 73 | } 74 | } 75 | 76 | type warehouseEmbedding struct { 77 | EmbedFrom *string `json:"embed_from,omitempty"` 78 | EmbedTo *string `json:"embed_to,omitempty"` 79 | Model *string `json:"model,omitempty"` 80 | Dimension *int `json:"dimension,omitempty"` 81 | CreatedAt *string `json:"created_at,omitempty"` 82 | UpdatedAt *string `json:"updated_at,omitempty"` 83 | } 84 | 85 | type warehouseEmbeddingHTTPResponse struct { 86 | Data struct { 87 | ID string `json:"id"` 88 | Attributes warehouseEmbedding `json:"attributes"` 89 | } `json:"data"` 90 | } 91 | 92 | type warehouseEmbeddingPageHTTPResponse struct { 93 | Data []struct { 94 | ID string `json:"id"` 95 | Attributes warehouseEmbedding `json:"attributes"` 96 | } `json:"data"` 97 | Pagination struct { 98 | First string `json:"first"` 99 | Last string `json:"last"` 100 | Prev string `json:"prev"` 101 | Next string `json:"next"` 102 | } `json:"pagination"` 103 | } 104 | 105 | func warehouseEmbeddingRef(in *warehouseEmbedding) []struct { 106 | k string 107 | v interface{} 108 | } { 109 | return []struct { 110 | k string 111 | v interface{} 112 | }{ 113 | {k: "embed_from", v: &in.EmbedFrom}, 114 | {k: "embed_to", v: &in.EmbedTo}, 115 | {k: "model", v: &in.Model}, 116 | {k: "dimension", v: &in.Dimension}, 117 | {k: "created_at", v: &in.CreatedAt}, 118 | {k: "updated_at", v: &in.UpdatedAt}, 119 | } 120 | } 121 | 122 | func warehouseEmbeddingCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 123 | var in warehouseEmbedding 124 | for _, e := range warehouseEmbeddingRef(&in) { 125 | load(d, e.k, e.v) 126 | } 127 | 128 | sourceID := d.Get("source_id").(string) 129 | 130 | var out warehouseEmbeddingHTTPResponse 131 | if err := resourceCreateWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/embeddings", url.PathEscape(sourceID)), &in, &out); err != nil { 132 | return err 133 | } 134 | d.SetId(out.Data.ID) 135 | return warehouseEmbeddingCopyAttrs(d, &out.Data.Attributes) 136 | } 137 | 138 | func warehouseEmbeddingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 139 | sourceID := d.Get("source_id").(string) 140 | 141 | // List embeddings and find the one with matching ID 142 | fetch := func(page int) (*warehouseEmbeddingPageHTTPResponse, error) { 143 | res, err := meta.(*client).do(ctx, "GET", meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/embeddings?page=%d", url.PathEscape(sourceID), page), nil) 144 | if err != nil { 145 | return nil, err 146 | } 147 | defer func() { 148 | _, _ = io.ReadAll(res.Body) 149 | _ = res.Body.Close() 150 | }() 151 | 152 | body, err := io.ReadAll(res.Body) 153 | if err != nil { 154 | return nil, err 155 | } 156 | if res.StatusCode != 200 { 157 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 158 | } 159 | var pageOut warehouseEmbeddingPageHTTPResponse 160 | if err := json.Unmarshal(body, &pageOut); err != nil { 161 | return nil, err 162 | } 163 | return &pageOut, nil 164 | } 165 | 166 | page := 1 167 | for { 168 | pageData, err := fetch(page) 169 | if err != nil { 170 | return diag.FromErr(err) 171 | } 172 | for _, e := range pageData.Data { 173 | if e.ID == d.Id() { 174 | return warehouseEmbeddingCopyAttrs(d, &e.Attributes) 175 | } 176 | } 177 | if pageData.Pagination.Next == "" { 178 | break 179 | } 180 | page++ 181 | } 182 | 183 | // If we get here, the embedding was not found 184 | d.SetId("") // Force "create" on 404. 185 | return nil 186 | } 187 | 188 | func warehouseEmbeddingDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 189 | sourceID := d.Get("source_id").(string) 190 | return resourceDeleteWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/embeddings/%s", url.PathEscape(sourceID), url.PathEscape(d.Id()))) 191 | } 192 | 193 | func warehouseEmbeddingCopyAttrs(d *schema.ResourceData, in *warehouseEmbedding) diag.Diagnostics { 194 | var derr diag.Diagnostics 195 | for _, e := range warehouseEmbeddingRef(in) { 196 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 197 | derr = append(derr, diag.FromErr(err)[0]) 198 | } 199 | } 200 | return derr 201 | } 202 | -------------------------------------------------------------------------------- /internal/provider/resource_source_group_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "sync/atomic" 10 | "testing" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | // injectSourceGroup adds fields to JSON data for source group testing 17 | func injectSourceGroup(t *testing.T, data []byte, key, value string) []byte { 18 | var m map[string]interface{} 19 | if err := json.Unmarshal(data, &m); err != nil { 20 | t.Fatal(err) 21 | } 22 | m[key] = value 23 | result, err := json.Marshal(m) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | return result 28 | } 29 | 30 | func TestResourceSourceGroup(t *testing.T) { 31 | var data atomic.Value 32 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 33 | t.Log("Received " + r.Method + " " + r.RequestURI) 34 | 35 | if r.Header.Get("Authorization") != "Bearer foo" { 36 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 37 | } 38 | 39 | prefix := "/api/v1/source-groups" 40 | id := "1" 41 | 42 | switch { 43 | case r.Method == http.MethodPost && r.RequestURI == prefix: 44 | body, err := io.ReadAll(r.Body) 45 | if err != nil { 46 | t.Fatal(err) 47 | } 48 | // Inject server-generated fields 49 | body = injectSourceGroup(t, body, "created_at", "2023-01-01T00:00:00Z") 50 | body = injectSourceGroup(t, body, "updated_at", "2023-01-01T00:00:00Z") 51 | 52 | data.Store(body) 53 | w.WriteHeader(http.StatusCreated) 54 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 55 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 56 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, data.Load().([]byte)))) 57 | case r.Method == http.MethodPatch && r.RequestURI == prefix+"/"+id: 58 | body, err := io.ReadAll(r.Body) 59 | if err != nil { 60 | t.Fatal(err) 61 | } 62 | patch := make(map[string]interface{}) 63 | if err = json.Unmarshal(data.Load().([]byte), &patch); err != nil { 64 | t.Fatal(err) 65 | } 66 | if err = json.Unmarshal(body, &patch); err != nil { 67 | t.Fatal(err) 68 | } 69 | patched, err := json.Marshal(patch) 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | // Update the updated_at timestamp 74 | patched = injectSourceGroup(t, patched, "updated_at", "2023-01-02T00:00:00Z") 75 | 76 | data.Store(patched) 77 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, patched))) 78 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 79 | w.WriteHeader(http.StatusNoContent) 80 | data.Store([]byte(nil)) 81 | default: 82 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 83 | } 84 | })) 85 | defer server.Close() 86 | 87 | var name = "Test Source Group" 88 | var sortIndex = 1 89 | 90 | resource.Test(t, resource.TestCase{ 91 | IsUnitTest: true, 92 | ProviderFactories: map[string]func() (*schema.Provider, error){ 93 | "logtail": func() (*schema.Provider, error) { 94 | return New(WithURL(server.URL)), nil 95 | }, 96 | }, 97 | Steps: []resource.TestStep{ 98 | // Step 1 - create 99 | { 100 | Config: fmt.Sprintf(` 101 | provider "logtail" { 102 | api_token = "foo" 103 | } 104 | 105 | resource "logtail_source_group" "this" { 106 | name = "%s" 107 | sort_index = %d 108 | } 109 | `, name, sortIndex), 110 | Check: resource.ComposeTestCheckFunc( 111 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "id"), 112 | resource.TestCheckResourceAttr("logtail_source_group.this", "name", name), 113 | resource.TestCheckResourceAttr("logtail_source_group.this", "sort_index", fmt.Sprintf("%d", sortIndex)), 114 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "created_at"), 115 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "updated_at"), 116 | ), 117 | }, 118 | // Step 2 - update 119 | { 120 | Config: fmt.Sprintf(` 121 | provider "logtail" { 122 | api_token = "foo" 123 | } 124 | 125 | resource "logtail_source_group" "this" { 126 | name = "%s Updated" 127 | sort_index = %d 128 | } 129 | `, name, sortIndex+1), 130 | Check: resource.ComposeTestCheckFunc( 131 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "id"), 132 | resource.TestCheckResourceAttr("logtail_source_group.this", "name", name+" Updated"), 133 | resource.TestCheckResourceAttr("logtail_source_group.this", "sort_index", fmt.Sprintf("%d", sortIndex+1)), 134 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "created_at"), 135 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "updated_at"), 136 | ), 137 | }, 138 | // Step 3 - import 139 | { 140 | ResourceName: "logtail_source_group.this", 141 | ImportState: true, 142 | ImportStateVerify: true, 143 | }, 144 | }, 145 | }) 146 | } 147 | 148 | func TestResourceSourceGroupMinimal(t *testing.T) { 149 | var data atomic.Value 150 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 151 | t.Log("Received " + r.Method + " " + r.RequestURI) 152 | 153 | if r.Header.Get("Authorization") != "Bearer foo" { 154 | t.Fatal("Not authorized: " + r.Header.Get("Authorization")) 155 | } 156 | 157 | prefix := "/api/v1/source-groups" 158 | id := "2" 159 | 160 | switch { 161 | case r.Method == http.MethodPost && r.RequestURI == prefix: 162 | body, err := io.ReadAll(r.Body) 163 | if err != nil { 164 | t.Fatal(err) 165 | } 166 | // Inject server-generated fields 167 | body = injectSourceGroup(t, body, "created_at", "2023-01-01T00:00:00Z") 168 | body = injectSourceGroup(t, body, "updated_at", "2023-01-01T00:00:00Z") 169 | 170 | data.Store(body) 171 | w.WriteHeader(http.StatusCreated) 172 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, body))) 173 | case r.Method == http.MethodGet && r.RequestURI == prefix+"/"+id: 174 | _, _ = w.Write([]byte(fmt.Sprintf(`{"data":{"id":%q,"attributes":%s}}`, id, data.Load().([]byte)))) 175 | case r.Method == http.MethodDelete && r.RequestURI == prefix+"/"+id: 176 | w.WriteHeader(http.StatusNoContent) 177 | data.Store([]byte(nil)) 178 | default: 179 | t.Fatal("Unexpected " + r.Method + " " + r.RequestURI) 180 | } 181 | })) 182 | defer server.Close() 183 | 184 | var name = "Minimal Source Group" 185 | 186 | resource.Test(t, resource.TestCase{ 187 | IsUnitTest: true, 188 | ProviderFactories: map[string]func() (*schema.Provider, error){ 189 | "logtail": func() (*schema.Provider, error) { 190 | return New(WithURL(server.URL)), nil 191 | }, 192 | }, 193 | Steps: []resource.TestStep{ 194 | // Test with only required fields 195 | { 196 | Config: fmt.Sprintf(` 197 | provider "logtail" { 198 | api_token = "foo" 199 | } 200 | 201 | resource "logtail_source_group" "this" { 202 | name = "%s" 203 | } 204 | `, name), 205 | Check: resource.ComposeTestCheckFunc( 206 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "id"), 207 | resource.TestCheckResourceAttr("logtail_source_group.this", "name", name), 208 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "created_at"), 209 | resource.TestCheckResourceAttrSet("logtail_source_group.this", "updated_at"), 210 | ), 211 | }, 212 | }, 213 | }) 214 | } 215 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_source_group.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | "reflect" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | var warehouseSourceGroupSchema = map[string]*schema.Schema{ 17 | "team_name": { 18 | Description: "Used to specify the team the resource should be created in when using global tokens.", 19 | Type: schema.TypeString, 20 | Optional: true, 21 | Default: nil, 22 | DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { 23 | return d.Id() != "" 24 | }, 25 | }, 26 | "id": { 27 | Description: "The ID of this warehouse source group.", 28 | Type: schema.TypeString, 29 | Optional: false, 30 | Computed: true, 31 | }, 32 | "name": { 33 | Description: "The name of the warehouse source group. Can contain letters, numbers, spaces, and special characters.", 34 | Type: schema.TypeString, 35 | Required: true, 36 | }, 37 | "created_at": { 38 | Description: "The time when this warehouse source group was created.", 39 | Type: schema.TypeString, 40 | Optional: false, 41 | Computed: true, 42 | }, 43 | "updated_at": { 44 | Description: "The time when this warehouse source group was updated.", 45 | Type: schema.TypeString, 46 | Optional: false, 47 | Computed: true, 48 | }, 49 | "sort_index": { 50 | Description: "The sort index of this warehouse source group.", 51 | Type: schema.TypeInt, 52 | Optional: true, 53 | }, 54 | } 55 | 56 | func newWarehouseSourceGroupResource() *schema.Resource { 57 | return &schema.Resource{ 58 | CreateContext: warehouseSourceGroupCreate, 59 | ReadContext: warehouseSourceGroupRead, 60 | UpdateContext: warehouseSourceGroupUpdate, 61 | DeleteContext: warehouseSourceGroupDelete, 62 | Importer: &schema.ResourceImporter{ 63 | StateContext: schema.ImportStatePassthroughContext, 64 | }, 65 | Description: "This resource allows you to create, modify, and delete your Warehouse source groups. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/create/", 66 | Schema: warehouseSourceGroupSchema, 67 | } 68 | } 69 | 70 | type warehouseSourceGroup struct { 71 | Name *string `json:"name,omitempty"` 72 | CreatedAt *string `json:"created_at,omitempty"` 73 | UpdatedAt *string `json:"updated_at,omitempty"` 74 | TeamName *string `json:"team_name,omitempty"` 75 | SortIndex *int `json:"sort_index,omitempty"` 76 | } 77 | 78 | type warehouseSourceGroupHTTPResponse struct { 79 | Data struct { 80 | ID string `json:"id"` 81 | Attributes warehouseSourceGroup `json:"attributes"` 82 | } `json:"data"` 83 | } 84 | 85 | func warehouseSourceGroupRef(in *warehouseSourceGroup) []struct { 86 | k string 87 | v interface{} 88 | } { 89 | return []struct { 90 | k string 91 | v interface{} 92 | }{ 93 | {k: "name", v: &in.Name}, 94 | {k: "created_at", v: &in.CreatedAt}, 95 | {k: "updated_at", v: &in.UpdatedAt}, 96 | {k: "sort_index", v: &in.SortIndex}, 97 | } 98 | } 99 | 100 | func warehouseSourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 101 | var in warehouseSourceGroup 102 | for _, e := range warehouseSourceGroupRef(&in) { 103 | load(d, e.k, e.v) 104 | } 105 | 106 | load(d, "team_name", &in.TeamName) 107 | 108 | var out warehouseSourceGroupHTTPResponse 109 | if err := resourceCreateWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), "/api/v1/source-groups", &in, &out); err != nil { 110 | return err 111 | } 112 | d.SetId(out.Data.ID) 113 | return warehouseSourceGroupCopyAttrs(d, &out.Data.Attributes) 114 | } 115 | 116 | func warehouseSourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 117 | var out warehouseSourceGroupHTTPResponse 118 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id())), &out); err != nil { 119 | return err 120 | } else if !ok { 121 | d.SetId("") // Force "create" on 404. 122 | return nil 123 | } 124 | return warehouseSourceGroupCopyAttrs(d, &out.Data.Attributes) 125 | } 126 | 127 | func warehouseSourceGroupCopyAttrs(d *schema.ResourceData, in *warehouseSourceGroup) diag.Diagnostics { 128 | var derr diag.Diagnostics 129 | for _, e := range warehouseSourceGroupRef(in) { 130 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 131 | derr = append(derr, diag.FromErr(err)[0]) 132 | } 133 | } 134 | 135 | return derr 136 | } 137 | 138 | func warehouseSourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 139 | var in warehouseSourceGroup 140 | for _, e := range warehouseSourceGroupRef(&in) { 141 | if d.HasChange(e.k) { 142 | load(d, e.k, e.v) 143 | } 144 | } 145 | return resourceUpdateWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id())), &in) 146 | } 147 | 148 | func warehouseSourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 149 | return resourceDeleteWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/source-groups/%s", url.PathEscape(d.Id()))) 150 | } 151 | 152 | func newWarehouseSourceGroupDataSource() *schema.Resource { 153 | s := make(map[string]*schema.Schema) 154 | for k, v := range warehouseSourceGroupSchema { 155 | cp := *v 156 | switch k { 157 | case "name": 158 | cp.Computed = false 159 | cp.Optional = false 160 | cp.Required = true 161 | default: 162 | cp.Computed = true 163 | cp.Optional = false 164 | cp.Required = false 165 | cp.ValidateFunc = nil 166 | cp.ValidateDiagFunc = nil 167 | cp.Default = nil 168 | cp.DefaultFunc = nil 169 | cp.DiffSuppressFunc = nil 170 | } 171 | s[k] = &cp 172 | } 173 | return &schema.Resource{ 174 | ReadContext: warehouseSourceGroupLookup, 175 | Description: "This Data Source allows you to look up existing Warehouse source groups using their name. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/source-groups/index/", 176 | Schema: s, 177 | } 178 | } 179 | 180 | type warehouseSourceGroupPageHTTPResponse struct { 181 | Data []struct { 182 | ID string `json:"id"` 183 | Attributes warehouseSourceGroup `json:"attributes"` 184 | } `json:"data"` 185 | Pagination struct { 186 | First string `json:"first"` 187 | Last string `json:"last"` 188 | Prev string `json:"prev"` 189 | Next string `json:"next"` 190 | } `json:"pagination"` 191 | } 192 | 193 | func warehouseSourceGroupLookup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 194 | fetch := func(page int) (*warehouseSourceGroupPageHTTPResponse, error) { 195 | res, err := meta.(*client).GetWithBaseURL(ctx, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/source-groups?page=%d", page)) 196 | if err != nil { 197 | return nil, err 198 | } 199 | defer func() { 200 | // Keep-Alive. 201 | _, _ = io.Copy(io.Discard, res.Body) 202 | _ = res.Body.Close() 203 | }() 204 | body, err := io.ReadAll(res.Body) 205 | if res.StatusCode != http.StatusOK { 206 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 207 | } 208 | if err != nil { 209 | return nil, err 210 | } 211 | var tr warehouseSourceGroupPageHTTPResponse 212 | return &tr, json.Unmarshal(body, &tr) 213 | } 214 | name := d.Get("name").(string) 215 | page := 1 216 | for { 217 | res, err := fetch(page) 218 | if err != nil { 219 | return diag.FromErr(err) 220 | } 221 | for _, e := range res.Data { 222 | if *e.Attributes.Name == name { 223 | if d.Id() != "" { 224 | return diag.Errorf("duplicate") 225 | } 226 | d.SetId(e.ID) 227 | if derr := warehouseSourceGroupCopyAttrs(d, &e.Attributes); derr != nil { 228 | return derr 229 | } 230 | } 231 | } 232 | page++ 233 | if res.Pagination.Next == "" { 234 | return nil 235 | } 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /internal/provider/resource_errors_application_group.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | "reflect" 11 | 12 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 14 | ) 15 | 16 | var errorsApplicationGroupSchema = map[string]*schema.Schema{ 17 | "team_name": { 18 | Description: "Used to specify the team the resource should be created in when using global tokens.", 19 | Type: schema.TypeString, 20 | Optional: true, 21 | Default: nil, 22 | DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { 23 | return d.Id() != "" 24 | }, 25 | }, 26 | "id": { 27 | Description: "The ID of this application group.", 28 | Type: schema.TypeString, 29 | Optional: false, 30 | Computed: true, 31 | }, 32 | "name": { 33 | Description: "Application group name. Must be unique within your team.", 34 | Type: schema.TypeString, 35 | Required: true, 36 | }, 37 | "created_at": { 38 | Description: "The time when this application group was created.", 39 | Type: schema.TypeString, 40 | Optional: false, 41 | Computed: true, 42 | }, 43 | "updated_at": { 44 | Description: "The time when this application group was updated.", 45 | Type: schema.TypeString, 46 | Optional: false, 47 | Computed: true, 48 | }, 49 | "sort_index": { 50 | Description: "The sort index of this application group.", 51 | Type: schema.TypeInt, 52 | Optional: true, 53 | }, 54 | } 55 | 56 | func newErrorsApplicationGroupResource() *schema.Resource { 57 | return &schema.Resource{ 58 | CreateContext: errorsApplicationGroupCreate, 59 | ReadContext: errorsApplicationGroupRead, 60 | UpdateContext: errorsApplicationGroupUpdate, 61 | DeleteContext: errorsApplicationGroupDelete, 62 | Importer: &schema.ResourceImporter{ 63 | StateContext: schema.ImportStatePassthroughContext, 64 | }, 65 | Description: "This resource allows you to create, modify, and delete your Errors application groups. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/create/", 66 | Schema: errorsApplicationGroupSchema, 67 | } 68 | } 69 | 70 | type errorsApplicationGroup struct { 71 | Name *string `json:"name,omitempty"` 72 | CreatedAt *string `json:"created_at,omitempty"` 73 | UpdatedAt *string `json:"updated_at,omitempty"` 74 | TeamName *string `json:"team_name,omitempty"` 75 | SortIndex *int `json:"sort_index,omitempty"` 76 | } 77 | 78 | type errorsApplicationGroupHTTPResponse struct { 79 | Data struct { 80 | ID string `json:"id"` 81 | Attributes errorsApplicationGroup `json:"attributes"` 82 | } `json:"data"` 83 | } 84 | 85 | func errorsApplicationGroupRef(in *errorsApplicationGroup) []struct { 86 | k string 87 | v interface{} 88 | } { 89 | return []struct { 90 | k string 91 | v interface{} 92 | }{ 93 | {k: "name", v: &in.Name}, 94 | {k: "created_at", v: &in.CreatedAt}, 95 | {k: "updated_at", v: &in.UpdatedAt}, 96 | {k: "sort_index", v: &in.SortIndex}, 97 | } 98 | } 99 | 100 | func errorsApplicationGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 101 | var in errorsApplicationGroup 102 | for _, e := range errorsApplicationGroupRef(&in) { 103 | load(d, e.k, e.v) 104 | } 105 | 106 | load(d, "team_name", &in.TeamName) 107 | 108 | var out errorsApplicationGroupHTTPResponse 109 | if err := resourceCreateWithBaseURL(ctx, meta, meta.(*client).ErrorsBaseURL(), "/api/v1/application-groups", &in, &out); err != nil { 110 | return err 111 | } 112 | d.SetId(out.Data.ID) 113 | return errorsApplicationGroupCopyAttrs(d, &out.Data.Attributes) 114 | } 115 | 116 | func errorsApplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 117 | var out errorsApplicationGroupHTTPResponse 118 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).ErrorsBaseURL(), fmt.Sprintf("/api/v1/application-groups/%s", url.PathEscape(d.Id())), &out); err != nil { 119 | return err 120 | } else if !ok { 121 | d.SetId("") // Force "create" on 404. 122 | return nil 123 | } 124 | return errorsApplicationGroupCopyAttrs(d, &out.Data.Attributes) 125 | } 126 | 127 | func errorsApplicationGroupCopyAttrs(d *schema.ResourceData, in *errorsApplicationGroup) diag.Diagnostics { 128 | var derr diag.Diagnostics 129 | for _, e := range errorsApplicationGroupRef(in) { 130 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 131 | derr = append(derr, diag.FromErr(err)[0]) 132 | } 133 | } 134 | 135 | return derr 136 | } 137 | 138 | func errorsApplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 139 | var in errorsApplicationGroup 140 | for _, e := range errorsApplicationGroupRef(&in) { 141 | if d.HasChange(e.k) { 142 | load(d, e.k, e.v) 143 | } 144 | } 145 | return resourceUpdateWithBaseURL(ctx, meta, meta.(*client).ErrorsBaseURL(), fmt.Sprintf("/api/v1/application-groups/%s", url.PathEscape(d.Id())), &in) 146 | } 147 | 148 | func errorsApplicationGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 149 | return resourceDeleteWithBaseURL(ctx, meta, meta.(*client).ErrorsBaseURL(), fmt.Sprintf("/api/v1/application-groups/%s", url.PathEscape(d.Id()))) 150 | } 151 | 152 | func newErrorsApplicationGroupDataSource() *schema.Resource { 153 | s := make(map[string]*schema.Schema) 154 | for k, v := range errorsApplicationGroupSchema { 155 | cp := *v 156 | switch k { 157 | case "name": 158 | cp.Computed = false 159 | cp.Optional = false 160 | cp.Required = true 161 | case "platform": 162 | cp.Computed = true 163 | cp.Optional = false 164 | cp.Required = false 165 | cp.Default = nil 166 | cp.ValidateFunc = nil 167 | cp.ValidateDiagFunc = nil 168 | cp.DefaultFunc = nil 169 | cp.DiffSuppressFunc = nil 170 | cp.ForceNew = false 171 | default: 172 | cp.Computed = true 173 | cp.Optional = false 174 | cp.Required = false 175 | cp.ValidateFunc = nil 176 | cp.ValidateDiagFunc = nil 177 | cp.Default = nil 178 | cp.DefaultFunc = nil 179 | cp.DiffSuppressFunc = nil 180 | } 181 | s[k] = &cp 182 | } 183 | return &schema.Resource{ 184 | ReadContext: errorsApplicationGroupLookup, 185 | Description: "This Data Source allows you to look up existing Errors application groups using their name. For more information about the Errors API check https://betterstack.com/docs/errors/api/applications-groups/list/", 186 | Schema: s, 187 | } 188 | } 189 | 190 | type errorsApplicationGroupPageHTTPResponse struct { 191 | Data []struct { 192 | ID string `json:"id"` 193 | Attributes errorsApplicationGroup `json:"attributes"` 194 | } `json:"data"` 195 | Pagination struct { 196 | First string `json:"first"` 197 | Last string `json:"last"` 198 | Prev string `json:"prev"` 199 | Next string `json:"next"` 200 | } `json:"pagination"` 201 | } 202 | 203 | func errorsApplicationGroupLookup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 204 | fetch := func(page int) (*errorsApplicationGroupPageHTTPResponse, error) { 205 | res, err := meta.(*client).GetWithBaseURL(ctx, meta.(*client).ErrorsBaseURL(), fmt.Sprintf("/api/v1/application-groups?page=%d", page)) 206 | if err != nil { 207 | return nil, err 208 | } 209 | defer func() { 210 | // Keep-Alive. 211 | _, _ = io.Copy(io.Discard, res.Body) 212 | _ = res.Body.Close() 213 | }() 214 | body, err := io.ReadAll(res.Body) 215 | if res.StatusCode != http.StatusOK { 216 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 217 | } 218 | if err != nil { 219 | return nil, err 220 | } 221 | var tr errorsApplicationGroupPageHTTPResponse 222 | return &tr, json.Unmarshal(body, &tr) 223 | } 224 | name := d.Get("name").(string) 225 | page := 1 226 | for { 227 | res, err := fetch(page) 228 | if err != nil { 229 | return diag.FromErr(err) 230 | } 231 | for _, e := range res.Data { 232 | if *e.Attributes.Name == name { 233 | if d.Id() != "" { 234 | return diag.Errorf("duplicate") 235 | } 236 | d.SetId(e.ID) 237 | if derr := errorsApplicationGroupCopyAttrs(d, &e.Attributes); derr != nil { 238 | return derr 239 | } 240 | } 241 | } 242 | page++ 243 | if res.Pagination.Next == "" { 244 | return nil 245 | } 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /internal/provider/resource_warehouse_time_series.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | "reflect" 11 | 12 | "github.com/hashicorp/go-cty/cty" 13 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 14 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 15 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 16 | ) 17 | 18 | var timeSeriesTypes = []string{ 19 | "string", "string_low_cardinality", "int64_delta", "int64", "uint64_delta", "uint64", 20 | "float64_delta", "datetime64_delta", "boolean", "array_bfloat16", "array_float32", 21 | } 22 | 23 | var warehouseTimeSeriesSchema = map[string]*schema.Schema{ 24 | "id": { 25 | Description: "The ID of this time series.", 26 | Type: schema.TypeString, 27 | Optional: false, 28 | Computed: true, 29 | }, 30 | "source_id": { 31 | Description: "The ID of the Warehouse source to create the time series for.", 32 | Type: schema.TypeString, 33 | Required: true, 34 | ForceNew: true, 35 | }, 36 | "name": { 37 | Description: "The name of the time series. Must contain only letters, numbers, and underscores.", 38 | Type: schema.TypeString, 39 | Required: true, 40 | ForceNew: true, 41 | }, 42 | "type": { 43 | Description: `The data type of the time series. Valid types are: ` + "`string`, `string_low_cardinality`, `int64_delta`, `int64`, `uint64_delta`, `uint64`, `float64_delta`, `datetime64_delta`, `boolean`, `array_bfloat16`, `array_float32`", 44 | Type: schema.TypeString, 45 | Required: true, 46 | ForceNew: true, 47 | ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { 48 | s := v.(string) 49 | for _, tsType := range timeSeriesTypes { 50 | if s == tsType { 51 | return nil 52 | } 53 | } 54 | return diag.Diagnostics{ 55 | diag.Diagnostic{ 56 | AttributePath: path, 57 | Severity: diag.Error, 58 | Summary: `Invalid "type"`, 59 | Detail: fmt.Sprintf("Expected one of %v", timeSeriesTypes), 60 | }, 61 | } 62 | }, 63 | }, 64 | "sql_expression": { 65 | Description: "The SQL expression used to compute the time series. For example `JSONExtract(raw, 'response_time', 'Nullable(Float64)')`.", 66 | Type: schema.TypeString, 67 | Required: true, 68 | ForceNew: true, 69 | }, 70 | "aggregations": { 71 | Description: "An array of aggregation functions (e.g., `avg`, `min`, `max`). If omitted, no aggregations are applied.", 72 | Type: schema.TypeList, 73 | Optional: true, 74 | ForceNew: true, 75 | Elem: &schema.Schema{ 76 | Type: schema.TypeString, 77 | }, 78 | }, 79 | "expression_index": { 80 | Description: "The type of vector index to apply (e.g., `vector_similarity`). Only applicable for vector types (`array_bfloat16`, `array_float32`).", 81 | Type: schema.TypeString, 82 | Optional: true, 83 | ForceNew: true, 84 | }, 85 | "vector_dimension": { 86 | Description: "The vector dimension if `expression_index` is `vector_similarity` (e.g., `512`). Supported values: 256, 384, 512, 768, 1024, 1536, 3072, 4096, 10752.", 87 | Type: schema.TypeInt, 88 | Optional: true, 89 | ForceNew: true, 90 | ValidateFunc: validation.IntInSlice([]int{256, 384, 512, 768, 1024, 1536, 3072, 4096, 10752}), 91 | }, 92 | "vector_distance_function": { 93 | Description: "The distance function to use for vector similarity (e.g., `cosine`, `l2`).", 94 | Type: schema.TypeString, 95 | Optional: true, 96 | ForceNew: true, 97 | ValidateFunc: validation.StringInSlice([]string{"cosine", "l2"}, false), 98 | }, 99 | } 100 | 101 | func newWarehouseTimeSeriesResource() *schema.Resource { 102 | return &schema.Resource{ 103 | CreateContext: warehouseTimeSeriesCreate, 104 | ReadContext: warehouseTimeSeriesRead, 105 | DeleteContext: warehouseTimeSeriesDelete, 106 | Importer: &schema.ResourceImporter{ 107 | StateContext: schema.ImportStatePassthroughContext, 108 | }, 109 | Description: "This resource allows you to create and delete your Warehouse time series. For more information about the Warehouse API check https://betterstack.com/docs/warehouse/api/time-series/create/", 110 | Schema: warehouseTimeSeriesSchema, 111 | } 112 | } 113 | 114 | type warehouseTimeSeries struct { 115 | Name *string `json:"name,omitempty"` 116 | SqlExpression *string `json:"sql_expression,omitempty"` 117 | Aggregations *[]string `json:"aggregations,omitempty"` 118 | Type *string `json:"type,omitempty"` 119 | ExpressionIndex *string `json:"expression_index,omitempty"` 120 | VectorDimension *int `json:"vector_dimension,omitempty"` 121 | VectorDistanceFunction *string `json:"vector_distance_function,omitempty"` 122 | } 123 | 124 | type warehouseTimeSeriesHTTPResponse struct { 125 | Data struct { 126 | ID string `json:"id"` 127 | Attributes warehouseTimeSeries `json:"attributes"` 128 | } `json:"data"` 129 | } 130 | 131 | type warehouseTimeSeriesPageHTTPResponse struct { 132 | Data []struct { 133 | ID string `json:"id"` 134 | Attributes warehouseTimeSeries `json:"attributes"` 135 | } `json:"data"` 136 | Pagination struct { 137 | First string `json:"first"` 138 | Last string `json:"last"` 139 | Prev string `json:"prev"` 140 | Next string `json:"next"` 141 | } `json:"pagination"` 142 | } 143 | 144 | func warehouseTimeSeriesRef(in *warehouseTimeSeries) []struct { 145 | k string 146 | v interface{} 147 | } { 148 | return []struct { 149 | k string 150 | v interface{} 151 | }{ 152 | {k: "name", v: &in.Name}, 153 | {k: "sql_expression", v: &in.SqlExpression}, 154 | {k: "aggregations", v: &in.Aggregations}, 155 | {k: "type", v: &in.Type}, 156 | {k: "expression_index", v: &in.ExpressionIndex}, 157 | {k: "vector_dimension", v: &in.VectorDimension}, 158 | {k: "vector_distance_function", v: &in.VectorDistanceFunction}, 159 | } 160 | } 161 | 162 | func warehouseTimeSeriesCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 163 | var in warehouseTimeSeries 164 | for _, e := range warehouseTimeSeriesRef(&in) { 165 | load(d, e.k, e.v) 166 | } 167 | 168 | sourceID := d.Get("source_id").(string) 169 | 170 | var out warehouseTimeSeriesHTTPResponse 171 | if err := resourceCreateWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/time_series", url.PathEscape(sourceID)), &in, &out); err != nil { 172 | return err 173 | } 174 | d.SetId(out.Data.ID) 175 | return warehouseTimeSeriesCopyAttrs(d, &out.Data.Attributes) 176 | } 177 | 178 | func warehouseTimeSeriesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 179 | sourceID := d.Get("source_id").(string) 180 | fetch := func(page int) (*warehouseTimeSeriesPageHTTPResponse, error) { 181 | res, err := meta.(*client).do(ctx, "GET", meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/time_series?page=%d", url.PathEscape(sourceID), page), nil) 182 | if err != nil { 183 | return nil, err 184 | } 185 | defer func() { 186 | // Keep-Alive. 187 | _, _ = io.Copy(io.Discard, res.Body) 188 | _ = res.Body.Close() 189 | }() 190 | body, err := io.ReadAll(res.Body) 191 | if res.StatusCode != http.StatusOK { 192 | return nil, fmt.Errorf("GET %s returned %d: %s", res.Request.URL.String(), res.StatusCode, string(body)) 193 | } 194 | if err != nil { 195 | return nil, err 196 | } 197 | var tr warehouseTimeSeriesPageHTTPResponse 198 | return &tr, json.Unmarshal(body, &tr) 199 | } 200 | page := 1 201 | for { 202 | res, err := fetch(page) 203 | if err != nil { 204 | return diag.FromErr(err) 205 | } 206 | for _, e := range res.Data { 207 | if e.ID == d.Id() { 208 | if derr := warehouseTimeSeriesCopyAttrs(d, &e.Attributes); derr != nil { 209 | return derr 210 | } 211 | return nil 212 | } 213 | } 214 | page++ 215 | if res.Pagination.Next == "" { 216 | break 217 | } 218 | } 219 | d.SetId("") // Not found, force "create". 220 | return nil 221 | } 222 | 223 | func warehouseTimeSeriesCopyAttrs(d *schema.ResourceData, in *warehouseTimeSeries) diag.Diagnostics { 224 | var derr diag.Diagnostics 225 | for _, e := range warehouseTimeSeriesRef(in) { 226 | if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 227 | derr = append(derr, diag.FromErr(err)[0]) 228 | } 229 | } 230 | 231 | return derr 232 | } 233 | 234 | func warehouseTimeSeriesDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 235 | sourceID := d.Get("source_id").(string) 236 | return resourceDeleteWithBaseURL(ctx, meta, meta.(*client).WarehouseBaseURL(), fmt.Sprintf("/api/v1/sources/%s/time_series/%s", url.PathEscape(sourceID), url.PathEscape(d.Id()))) 237 | } 238 | -------------------------------------------------------------------------------- /internal/provider/resource_connection.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/url" 7 | "reflect" 8 | 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 11 | ) 12 | 13 | var connectionSchema = map[string]*schema.Schema{ 14 | "id": { 15 | Description: "The ID of this connection.", 16 | Type: schema.TypeString, 17 | Optional: false, 18 | Computed: true, 19 | }, 20 | "client_type": { 21 | Description: "Type of client connection. Currently only `clickhouse` is supported.", 22 | Type: schema.TypeString, 23 | Required: true, 24 | ForceNew: true, 25 | }, 26 | "team_names": { 27 | Description: "Array of team names to associate with the connection. Only one of `team_names` or `team_ids` should be provided.", 28 | Type: schema.TypeList, 29 | Optional: true, 30 | Computed: true, 31 | ForceNew: true, 32 | Elem: &schema.Schema{ 33 | Type: schema.TypeString, 34 | }, 35 | }, 36 | "team_ids": { 37 | Description: "Array of team IDs to associate with the connection. Only one of `team_names` or `team_ids` should be provided.", 38 | Type: schema.TypeList, 39 | Optional: true, 40 | Computed: true, 41 | ForceNew: true, 42 | Elem: &schema.Schema{ 43 | Type: schema.TypeInt, 44 | }, 45 | }, 46 | "data_region": { 47 | Description: "Data region or private cluster name. Permitted values: `us_east`, `us_west`, `germany`, `singapore`.", 48 | Type: schema.TypeString, 49 | Optional: true, 50 | ForceNew: true, 51 | }, 52 | "ip_allowlist": { 53 | Description: "Array of IP addresses or CIDR ranges that are allowed to use this connection.", 54 | Type: schema.TypeList, 55 | Optional: true, 56 | ForceNew: true, 57 | Elem: &schema.Schema{ 58 | Type: schema.TypeString, 59 | }, 60 | }, 61 | "valid_until": { 62 | Description: "ISO 8601 timestamp when the connection expires.", 63 | Type: schema.TypeString, 64 | Optional: true, 65 | ForceNew: true, 66 | }, 67 | "note": { 68 | Description: "A descriptive note for the connection.", 69 | Type: schema.TypeString, 70 | Optional: true, 71 | ForceNew: true, 72 | }, 73 | "host": { 74 | Description: "The connection hostname.", 75 | Type: schema.TypeString, 76 | Computed: true, 77 | }, 78 | "port": { 79 | Description: "The connection port.", 80 | Type: schema.TypeInt, 81 | Computed: true, 82 | }, 83 | "username": { 84 | Description: "The connection username.", 85 | Type: schema.TypeString, 86 | Computed: true, 87 | }, 88 | "password": { 89 | Description: "The connection password. Only available immediately after creation.", 90 | Type: schema.TypeString, 91 | Computed: true, 92 | Sensitive: true, 93 | }, 94 | "created_at": { 95 | Description: "The time when this connection was created.", 96 | Type: schema.TypeString, 97 | Computed: true, 98 | }, 99 | "created_by": { 100 | Description: "Information about the user who created this connection.", 101 | Type: schema.TypeMap, 102 | Computed: true, 103 | Elem: &schema.Schema{ 104 | Type: schema.TypeString, 105 | }, 106 | }, 107 | "sample_query": { 108 | Description: "A sample query showing how to use this connection.", 109 | Type: schema.TypeString, 110 | Computed: true, 111 | }, 112 | "data_sources": { 113 | Description: "List of available data sources for this connection.", 114 | Type: schema.TypeList, 115 | Computed: true, 116 | Elem: &schema.Resource{ 117 | Schema: map[string]*schema.Schema{ 118 | "source_name": { 119 | Type: schema.TypeString, 120 | Computed: true, 121 | }, 122 | "source_id": { 123 | Type: schema.TypeInt, 124 | Computed: true, 125 | }, 126 | "team_name": { 127 | Type: schema.TypeString, 128 | Computed: true, 129 | }, 130 | "data_sources": { 131 | Type: schema.TypeList, 132 | Computed: true, 133 | Elem: &schema.Schema{ 134 | Type: schema.TypeString, 135 | }, 136 | }, 137 | }, 138 | }, 139 | }, 140 | } 141 | 142 | type connection struct { 143 | ClientType *string `json:"client_type,omitempty"` 144 | TeamNames *[]string `json:"team_names,omitempty"` 145 | TeamIds *[]int `json:"team_ids,omitempty"` 146 | DataRegion *string `json:"data_region,omitempty"` 147 | IpAllowlist *[]string `json:"ip_allowlist,omitempty"` 148 | ValidUntil *string `json:"valid_until,omitempty"` 149 | Note *string `json:"note,omitempty"` 150 | Host *string `json:"host,omitempty"` 151 | Port *int `json:"port,omitempty"` 152 | Username *string `json:"username,omitempty"` 153 | Password *string `json:"password,omitempty"` 154 | CreatedAt *string `json:"created_at,omitempty"` 155 | CreatedBy map[string]interface{} `json:"created_by,omitempty"` 156 | SampleQuery *string `json:"sample_query,omitempty"` 157 | DataSources *[]map[string]interface{} `json:"data_sources,omitempty"` 158 | } 159 | 160 | type connectionHTTPResponse struct { 161 | Data struct { 162 | ID string `json:"id"` 163 | Attributes connection `json:"attributes"` 164 | } `json:"data"` 165 | } 166 | 167 | func connectionRef(in *connection) []struct { 168 | k string 169 | v interface{} 170 | } { 171 | return []struct { 172 | k string 173 | v interface{} 174 | }{ 175 | {k: "client_type", v: &in.ClientType}, 176 | {k: "team_names", v: &in.TeamNames}, 177 | {k: "team_ids", v: &in.TeamIds}, 178 | {k: "data_region", v: &in.DataRegion}, 179 | {k: "ip_allowlist", v: &in.IpAllowlist}, 180 | {k: "valid_until", v: &in.ValidUntil}, 181 | {k: "note", v: &in.Note}, 182 | {k: "host", v: &in.Host}, 183 | {k: "port", v: &in.Port}, 184 | {k: "username", v: &in.Username}, 185 | {k: "password", v: &in.Password}, 186 | {k: "created_at", v: &in.CreatedAt}, 187 | {k: "sample_query", v: &in.SampleQuery}, 188 | } 189 | } 190 | 191 | func newConnectionResource() *schema.Resource { 192 | return &schema.Resource{ 193 | CreateContext: connectionCreate, 194 | ReadContext: connectionRead, 195 | DeleteContext: connectionDelete, 196 | Description: "This resource allows you to create and manage ClickHouse connections for remote querying. For more information about the Connection API check https://betterstack.com/docs/logs/api/connections/", 197 | Schema: connectionSchema, 198 | } 199 | } 200 | 201 | func connectionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 202 | // Store original user values before API call 203 | originalDataRegion := d.Get("data_region").(string) 204 | originalValidUntil := d.Get("valid_until").(string) 205 | 206 | var in connection 207 | for _, e := range connectionRef(&in) { 208 | load(d, e.k, e.v) 209 | } 210 | 211 | var out connectionHTTPResponse 212 | if err := resourceCreate(ctx, meta, "/api/v1/connections", &in, &out); err != nil { 213 | return err 214 | } 215 | d.SetId(out.Data.ID) 216 | 217 | // Copy attributes but preserve user-specified values 218 | derr := connectionCopyAttrs(d, &out.Data.Attributes) 219 | 220 | // Restore user-specified values that might have been normalized by API 221 | if originalDataRegion != "" { 222 | if err := d.Set("data_region", originalDataRegion); err != nil { 223 | derr = append(derr, diag.FromErr(err)[0]) 224 | } 225 | } 226 | if originalValidUntil != "" { 227 | if err := d.Set("valid_until", originalValidUntil); err != nil { 228 | derr = append(derr, diag.FromErr(err)[0]) 229 | } 230 | } 231 | 232 | return derr 233 | } 234 | 235 | func connectionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 236 | // Store current user values before API call 237 | currentDataRegion := d.Get("data_region").(string) 238 | currentValidUntil := d.Get("valid_until").(string) 239 | 240 | var out connectionHTTPResponse 241 | if err, ok := resourceReadWithBaseURL(ctx, meta, meta.(*client).TelemetryBaseURL(), fmt.Sprintf("/api/v1/connections/%s", url.PathEscape(d.Id())), &out); err != nil { 242 | if !ok { 243 | d.SetId("") // Force "create" on 404. 244 | return nil 245 | } 246 | return err 247 | } 248 | 249 | // Copy attributes but preserve user-specified values 250 | derr := connectionCopyAttrs(d, &out.Data.Attributes) 251 | 252 | // Restore user-specified values that might have been normalized by API 253 | if currentDataRegion != "" { 254 | if err := d.Set("data_region", currentDataRegion); err != nil { 255 | derr = append(derr, diag.FromErr(err)[0]) 256 | } 257 | } 258 | if currentValidUntil != "" { 259 | if err := d.Set("valid_until", currentValidUntil); err != nil { 260 | derr = append(derr, diag.FromErr(err)[0]) 261 | } 262 | } 263 | 264 | return derr 265 | } 266 | 267 | func connectionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { 268 | return resourceDelete(ctx, meta, fmt.Sprintf("/api/v1/connections/%s", url.PathEscape(d.Id()))) 269 | } 270 | 271 | func connectionCopyAttrs(d *schema.ResourceData, in *connection) diag.Diagnostics { 272 | var derr diag.Diagnostics 273 | for _, e := range connectionRef(in) { 274 | if e.k == "password" && d.Get("password").(string) != "" { 275 | // Don't update password from API if it's already set - password is only returned during creation 276 | continue 277 | } else if e.k == "data_region" && d.Get("data_region").(string) != "" { 278 | // Preserve user-specified data_region over API-normalized value 279 | continue 280 | } else if e.k == "valid_until" && d.Get("valid_until").(string) != "" { 281 | // Preserve user-specified valid_until over API-formatted value 282 | continue 283 | } else if err := d.Set(e.k, reflect.Indirect(reflect.ValueOf(e.v)).Interface()); err != nil { 284 | derr = append(derr, diag.FromErr(err)[0]) 285 | } 286 | } 287 | 288 | // Handle complex fields 289 | if err := d.Set("created_by", in.CreatedBy); err != nil { 290 | derr = append(derr, diag.FromErr(err)[0]) 291 | } 292 | if in.DataSources != nil && *in.DataSources != nil { 293 | if err := d.Set("data_sources", *in.DataSources); err != nil { 294 | derr = append(derr, diag.FromErr(err)[0]) 295 | } 296 | } else { 297 | if err := d.Set("data_sources", []interface{}{}); err != nil { 298 | derr = append(derr, diag.FromErr(err)[0]) 299 | } 300 | } 301 | 302 | return derr 303 | } 304 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------