├── pkg ├── fs │ └── fs.go ├── base_data.go ├── base_transform.go ├── terraform │ ├── lock.go │ ├── block.go │ ├── attribute.go │ ├── attribute_test.go │ ├── object.go │ ├── nested_block.go │ └── root_block.go ├── mptf_funcs.go ├── transform_remove_block.go ├── resource_schema_test.go ├── transform_remove_block_element_block.go ├── transform_move_block.go ├── transform_update_in_place_internal_test.go ├── init.go ├── data_local.go ├── data_output.go ├── mptf_plan_test.go ├── mptf_funcs_test.go ├── data_variable.go ├── mptf_plan.go ├── mptf_block.go ├── transform_regex_replace_expression.go ├── transform_append_block_body.go ├── data_terraform.go ├── data_data_source.go ├── data_resource.go ├── data_output_test.go ├── transform_ensure_local.go ├── resource_schema.go ├── transform_ensure_local_test.go ├── transform_append_block_body_test.go ├── data_local_test.go ├── data_variable_test.go ├── terraform_module_ref.go ├── transform_remove_block_test.go ├── data_provider_schema.go ├── transform_rename_block_element.go ├── mptf_config_test.go ├── data_terraform_test.go ├── backup │ ├── backup.go │ └── backup_test.go ├── transform_update_in_place.go ├── transform_new_block.go ├── data_data_source_test.go └── data_resource_test.go ├── .github ├── dependabot.yml └── workflows │ └── pr-check.yml ├── example ├── prevent_destroy │ ├── variables.tf │ ├── providers.tf │ ├── main.tf │ └── main.mptf.hcl ├── customize_aks_ignore_changes │ ├── main.mptf.hcl │ ├── main.tf │ └── readme.md ├── new_private_endpoint_for_cognitive_account │ ├── main.tf │ └── main.mptf.hcl └── tracing_tags │ ├── main.tf │ └── main.mptf.hcl ├── doc ├── index.md ├── d │ ├── terraform.md │ ├── provider_schema.md │ ├── resource.md │ └── data.md └── t │ ├── regex_replace_expression.md │ ├── append_block_body.md │ ├── new_block.md │ └── remove_block_element.md ├── CODE_OF_CONDUCT.md ├── main.go ├── cmd ├── reset.go ├── config_path.go ├── clean_backup.go ├── terraform_cmd_wrapper.go ├── clean_backup_test.go ├── var_flag_internal_test.go ├── reset_test.go ├── args.go ├── args_test.go ├── root.go ├── var_flag_internal.go ├── transform_test.go ├── debug.go ├── transform.go └── terraform.go ├── LICENSE ├── SECURITY.md ├── readme.md ├── .gitignore └── tutorial └── 1.MapoTF_DSL_Basics.md /pkg/fs/fs.go: -------------------------------------------------------------------------------- 1 | package fs 2 | 3 | import "github.com/spf13/afero" 4 | 5 | var Fs = afero.NewOsFs() 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /example/prevent_destroy/variables.tf: -------------------------------------------------------------------------------- 1 | variable "location" { 2 | type = string 3 | default = "westus" 4 | } 5 | 6 | variable "vnet_location" { 7 | type = string 8 | default = "eastus" 9 | } 10 | -------------------------------------------------------------------------------- /doc/index.md: -------------------------------------------------------------------------------- 1 | ## `transform` blocks 2 | 3 | * [`append_block_body`](t/append_block_body.md) 4 | * [`new_block`](t/new_block.md) 5 | * [`regex_replace_expression`](t/regex_replace_expression.md) 6 | * [`update_in_place`](t/update_in_place.md) 7 | 8 | ## `data` blocks 9 | 10 | * [`data`](d/data.md) 11 | * [`provider_schema`](d/provider_schema.md) 12 | * [`resource`](d/resource.md) 13 | * [`terraform`](d/terraform.md) -------------------------------------------------------------------------------- /pkg/base_data.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import "github.com/Azure/golden" 4 | 5 | type Data interface { 6 | golden.PlanBlock 7 | Data() 8 | } 9 | 10 | type BaseData struct{} 11 | 12 | func (bd *BaseData) BlockType() string { 13 | return "data" 14 | } 15 | 16 | func (bd *BaseData) Data() {} 17 | 18 | func (rd *BaseData) AddressLength() int { return 3 } 19 | 20 | func (bd *BaseData) CanExecutePrePlan() bool { 21 | return false 22 | } 23 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. 4 | 5 | Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) -------------------------------------------------------------------------------- /pkg/base_transform.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | ) 6 | 7 | type Transform interface { 8 | golden.ApplyBlock 9 | Transform() 10 | } 11 | 12 | type BaseTransform struct{} 13 | 14 | func (bt *BaseTransform) BlockType() string { return "transform" } 15 | func (bt *BaseTransform) AddressLength() int { return 3 } 16 | func (bt *BaseTransform) CanExecutePrePlan() bool { return false } 17 | func (bt *BaseTransform) Transform() {} 18 | -------------------------------------------------------------------------------- /example/prevent_destroy/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.2" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = ">=3.11.0, <4.0" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.3.2" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features { 17 | resource_group { 18 | prevent_deletion_if_contains_resources = false 19 | } 20 | } 21 | } 22 | 23 | provider "random" {} -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "syscall" 8 | 9 | "github.com/Azure/mapotf/cmd" 10 | ) 11 | 12 | func main() { 13 | mptfArgs, nonMptfArgs := cmd.FilterArgs(os.Args) 14 | os.Args = mptfArgs 15 | cmd.NonMptfArgs = nonMptfArgs 16 | ctx, cancelFunc := context.WithCancel(context.Background()) 17 | ch := make(chan os.Signal, 1) 18 | signal.Notify(ch, os.Interrupt, syscall.SIGTERM) 19 | go func() { 20 | <-ch 21 | cancelFunc() 22 | }() 23 | cmd.Execute(ctx) 24 | } 25 | -------------------------------------------------------------------------------- /example/customize_aks_ignore_changes/main.mptf.hcl: -------------------------------------------------------------------------------- 1 | data "resource" aks { 2 | resource_type = "azurerm_kubernetes_cluster" 3 | } 4 | 5 | transform "update_in_place" aks_ignore_changes { 6 | for_each = try(data.resource.aks.result.azurerm_kubernetes_cluster, {}) 7 | target_block_address = each.value.mptf.block_address 8 | asstring { 9 | lifecycle { 10 | ignore_changes = "[\nmicrosoft_defender[0].log_analytics_workspace_id, ${trimprefix(try(each.value.lifecycle.0.ignore_changes, "[\n]"), "[")}" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /example/new_private_endpoint_for_cognitive_account/main.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_resource_group" "example" { 2 | name = "example-resources" 3 | location = "West Europe" 4 | } 5 | 6 | resource "azurerm_cognitive_account" "example" { 7 | name = "example-account" 8 | location = azurerm_resource_group.example.location 9 | resource_group_name = azurerm_resource_group.example.name 10 | kind = "Face" 11 | 12 | sku_name = "S0" 13 | 14 | tags = { 15 | Acceptance = "Test" 16 | } 17 | } -------------------------------------------------------------------------------- /example/customize_aks_ignore_changes/main.tf: -------------------------------------------------------------------------------- 1 | variable "resource_group_name" { 2 | type = string 3 | default = "aks_test" 4 | } 5 | 6 | provider "azurerm" { 7 | features {} 8 | } 9 | 10 | resource "random_pet" "this" {} 11 | 12 | resource "azurerm_resource_group" "rg" { 13 | location = "eastus" 14 | name = "${var.resource_group_name}-${random_pet.this.id}" 15 | } 16 | 17 | module "aks" { 18 | source = "Azure/aks/azurerm" 19 | version = "9.1.0" 20 | 21 | cluster_name = "aks-test" 22 | prefix = "akstest" 23 | resource_group_name = azurerm_resource_group.rg.name 24 | rbac_aad = false 25 | } -------------------------------------------------------------------------------- /pkg/terraform/lock.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | var lock = &keyLock{ 8 | keyLocks: make(map[string]*sync.Mutex), 9 | } 10 | 11 | type keyLock struct { 12 | glock sync.Mutex 13 | keyLocks map[string]*sync.Mutex 14 | } 15 | 16 | func (m *keyLock) Lock(key string) { 17 | m.get(key).Lock() 18 | } 19 | 20 | func (m *keyLock) Unlock(key string) { 21 | m.get(key).Unlock() 22 | } 23 | 24 | func (m *keyLock) get(key string) *sync.Mutex { 25 | m.glock.Lock() 26 | defer m.glock.Unlock() 27 | kl, ok := m.keyLocks[key] 28 | if !ok { 29 | kl = &sync.Mutex{} 30 | m.keyLocks[key] = kl 31 | } 32 | return kl 33 | } 34 | -------------------------------------------------------------------------------- /pkg/terraform/block.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "github.com/hashicorp/hcl/v2" 5 | "github.com/hashicorp/hcl/v2/hclwrite" 6 | "github.com/zclconf/go-cty/cty" 7 | ) 8 | 9 | type Block interface { 10 | EvalContext() cty.Value 11 | GetAttributes() map[string]*Attribute 12 | GetNestedBlocks() NestedBlocks 13 | WriteBody() *hclwrite.Body 14 | SetAttributeRaw(name string, tokens hclwrite.Tokens) 15 | AppendBlock(block *hclwrite.Block) 16 | Range() hcl.Range 17 | RemoveContent(path string) 18 | } 19 | 20 | func lockBlockFile(b Block) func() { 21 | fn := b.Range().Filename 22 | lock.Lock(fn) 23 | return func() { 24 | lock.Unlock(fn) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pkg/mptf_funcs.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/hashicorp/hcl/v2/hclwrite" 5 | "github.com/zclconf/go-cty/cty" 6 | "github.com/zclconf/go-cty/cty/function" 7 | ) 8 | 9 | var ToHclFunc = function.New(&function.Spec{ 10 | Description: "Convert an cty.Value to HCL config in string format", 11 | Params: []function.Parameter{ 12 | { 13 | Name: "input", 14 | Type: cty.DynamicPseudoType, 15 | }, 16 | }, 17 | Type: function.StaticReturnType(cty.String), 18 | Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { 19 | input := args[0] 20 | return cty.StringVal(string(hclwrite.TokensForValue(input).BuildTokens(nil).Bytes())), nil 21 | }, 22 | }) 23 | -------------------------------------------------------------------------------- /pkg/transform_remove_block.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | ) 7 | 8 | var _ Transform = &RemoveBlockTransform{} 9 | 10 | type RemoveBlockTransform struct { 11 | *golden.BaseBlock 12 | *BaseTransform 13 | TargetBlockAddress string `hcl:"target_block_address"` 14 | } 15 | 16 | func (r *RemoveBlockTransform) Type() string { 17 | return "remove_block" 18 | } 19 | 20 | func (r *RemoveBlockTransform) Apply() error { 21 | cfg := r.Config().(*MetaProgrammingTFConfig) 22 | b := cfg.RootBlock(r.TargetBlockAddress) 23 | if b == nil { 24 | return fmt.Errorf("cannot find block: %s", r.TargetBlockAddress) 25 | } 26 | cfg.module.RemoveBlock(b.WriteBlock) 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /pkg/terraform/attribute.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/hashicorp/hcl/v2/hclsyntax" 7 | "github.com/hashicorp/hcl/v2/hclwrite" 8 | ) 9 | 10 | type Attribute struct { 11 | Name string 12 | *hclsyntax.Attribute 13 | WriteAttribute *hclwrite.Attribute 14 | } 15 | 16 | func NewAttribute(name string, attribute *hclsyntax.Attribute, writeAttribute *hclwrite.Attribute) *Attribute { 17 | r := &Attribute{ 18 | Name: name, 19 | Attribute: attribute, 20 | WriteAttribute: writeAttribute, 21 | } 22 | return r 23 | } 24 | 25 | func (a *Attribute) String() string { 26 | return strings.TrimSpace(string(a.WriteAttribute.Expr().BuildTokens(hclwrite.Tokens{}).Bytes())) 27 | } 28 | -------------------------------------------------------------------------------- /example/tracing_tags/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | version = "3.112.0" 6 | } 7 | } 8 | } 9 | 10 | resource "azurerm_resource_group" "this" { 11 | location = "West US" 12 | name = "example-resources" 13 | } 14 | 15 | resource "azurerm_storage_account" "this" { 16 | name = "storageaccountname" 17 | resource_group_name = azurerm_resource_group.this.name 18 | location = azurerm_resource_group.this.location 19 | account_tier = "Standard" 20 | account_replication_type = "LRS" 21 | tags = { 22 | env = "prod" 23 | } 24 | } 25 | 26 | resource "azurerm_subnet" "this" { 27 | address_prefixes = [] 28 | name = "" 29 | resource_group_name = "" 30 | virtual_network_name = "" 31 | } 32 | 33 | -------------------------------------------------------------------------------- /pkg/resource_schema_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "runtime" 6 | "testing" 7 | 8 | "github.com/Azure/mapotf/pkg" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestTerraformCliProviderSchemaRetriever_retrieveLocalProviderSchema(t *testing.T) { 14 | if runtime.GOOS == "windows" { 15 | t.Skip("Skipping test on Windows since setup Terraform on windows seems not work with this test") 16 | } 17 | sut := pkg.NewTerraformCliProviderSchemaRetriever(context.Background()) 18 | schema, err := sut.Get("hashicorp/local", "2.5.1") 19 | require.NoError(t, err) 20 | assert.Contains(t, schema.ResourceSchemas, "local_file") 21 | assert.Contains(t, schema.ResourceSchemas, "local_sensitive_file") 22 | assert.Contains(t, schema.DataSourceSchemas, "local_file") 23 | assert.Contains(t, schema.DataSourceSchemas, "local_sensitive_file") 24 | } 25 | -------------------------------------------------------------------------------- /pkg/transform_remove_block_element_block.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | "strings" 7 | ) 8 | 9 | var _ Transform = &RemoveBlockContentBlockTransform{} 10 | 11 | type RemoveBlockContentBlockTransform struct { 12 | *golden.BaseBlock 13 | *BaseTransform 14 | TargetBlockAddress string `hcl:"target_block_address"` 15 | Paths []string `hcl:"paths"` 16 | } 17 | 18 | func (r *RemoveBlockContentBlockTransform) Type() string { 19 | return "remove_block_element" 20 | } 21 | 22 | func (r *RemoveBlockContentBlockTransform) Apply() error { 23 | cfg := r.Config().(*MetaProgrammingTFConfig) 24 | b := cfg.RootBlock(r.TargetBlockAddress) 25 | if b == nil { 26 | return fmt.Errorf("cannot find block: %s", r.TargetBlockAddress) 27 | } 28 | for _, path := range r.Paths { 29 | path = strings.TrimSpace(path) 30 | b.RemoveContent(path) 31 | } 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /example/prevent_destroy/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_id" "rg_name" { 2 | byte_length = 8 3 | } 4 | 5 | resource "azurerm_resource_group" "example" { 6 | location = var.location 7 | name = "azure-subnets-${random_id.rg_name.hex}-rg" 8 | } 9 | 10 | locals { 11 | subnets = { 12 | for i in range(3) : "subnet${i}" => { 13 | address_prefixes = [cidrsubnet(local.virtual_network_address_space, 8, i)] 14 | } 15 | } 16 | virtual_network_address_space = "10.0.0.0/16" 17 | } 18 | 19 | module "vnet" { 20 | source = "Azure/subnets/azurerm" 21 | version = "1.0.0" 22 | resource_group_name = azurerm_resource_group.example.name 23 | subnets = local.subnets 24 | virtual_network_address_space = [local.virtual_network_address_space] 25 | virtual_network_location = var.vnet_location 26 | virtual_network_name = "azure-subnets-vnet" 27 | } -------------------------------------------------------------------------------- /cmd/reset.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/Azure/mapotf/pkg" 7 | "github.com/Azure/mapotf/pkg/backup" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func NewResetCmd() *cobra.Command { 12 | return &cobra.Command{ 13 | Use: "reset", 14 | Short: "Reset all transformed Terraform files, mapotf reset --tf-dir", 15 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 16 | UnknownFlags: true, 17 | }, 18 | RunE: func(cmd *cobra.Command, args []string) error { 19 | return reset() 20 | }, 21 | } 22 | } 23 | 24 | func reset() error { 25 | moduleRefs, err := pkg.ModuleRefs(cf.tfDir) 26 | if err != nil { 27 | return err 28 | } 29 | for _, tfDir := range moduleRefs { 30 | d := tfDir 31 | err = backup.Reset(d.AbsDir) 32 | if err != nil { 33 | return err 34 | } 35 | } 36 | fmt.Println("All transforms have been reverted.") 37 | return nil 38 | } 39 | 40 | func init() { 41 | rootCmd.AddCommand(NewResetCmd()) 42 | } 43 | -------------------------------------------------------------------------------- /cmd/config_path.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | filesystem "github.com/Azure/mapotf/pkg/fs" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/Azure/mapotf/pkg" 11 | "github.com/google/uuid" 12 | "github.com/hashicorp/go-getter/v2" 13 | "github.com/spf13/afero" 14 | ) 15 | 16 | func localizeConfigFolder(path string, ctx context.Context) (configPath string, onDefer func(), err error) { 17 | absPath, err := pkg.AbsDir(path) 18 | if err == nil { 19 | exists, err := afero.Exists(filesystem.Fs, absPath) 20 | if exists && err == nil { 21 | return path, nil, nil 22 | } 23 | } 24 | tmp := filepath.Join(os.TempDir(), uuid.NewString()) 25 | cleaner := func() { 26 | _ = os.RemoveAll(tmp) 27 | } 28 | result, err := getter.Get(ctx, tmp, path) 29 | if err != nil { 30 | return "", cleaner, err 31 | } 32 | if result == nil { 33 | return "", cleaner, fmt.Errorf("cannot get config path") 34 | } 35 | return result.Dst, cleaner, nil 36 | } 37 | -------------------------------------------------------------------------------- /example/prevent_destroy/main.mptf.hcl: -------------------------------------------------------------------------------- 1 | variable "prevent_destroy" { 2 | type = bool 3 | default = false 4 | } 5 | 6 | variable "root_only" { 7 | type = bool 8 | default = false 9 | } 10 | 11 | data "resource" all_resource { 12 | } 13 | 14 | locals { 15 | all_resource_blocks = flatten([ 16 | for resource_type, resource_blocks in data.resource.all_resource.result :resource_blocks 17 | ]) 18 | mptfs = flatten([for _, blocks in local.all_resource_blocks : [for b in blocks : b.mptf]]) 19 | addresses = var.root_only ? [for mptf in local.mptfs : mptf.block_address if mptf.module.dir == "."] : [for mptf in local.mptfs : mptf.block_address] 20 | } 21 | 22 | transform "update_in_place" set_prevent_destroy { 23 | for_each = try(local.addresses, []) 24 | target_block_address = each.value 25 | 26 | asstring { 27 | lifecycle { 28 | prevent_destroy = var.prevent_destroy 29 | } 30 | } 31 | } -------------------------------------------------------------------------------- /pkg/transform_move_block.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | ) 7 | 8 | var _ Transform = &MoveBlockTransform{} 9 | 10 | type MoveBlockTransform struct { 11 | *golden.BaseBlock 12 | *BaseTransform 13 | TargetBlockAddress string `hcl:"target_block_address"` 14 | FileName string `hcl:"file_name" validate:"endswith=.tf"` 15 | } 16 | 17 | func (m *MoveBlockTransform) Type() string { 18 | return "move_block" 19 | } 20 | 21 | func (m *MoveBlockTransform) Apply() error { 22 | cfg := m.Config().(*MetaProgrammingTFConfig) 23 | block := cfg.RootBlock(m.TargetBlockAddress) 24 | if block == nil { 25 | return fmt.Errorf("cannot find block: %s", m.TargetBlockAddress) 26 | } 27 | 28 | // Get the write block from the found block 29 | writeBlock := block.WriteBlock 30 | 31 | if block.Range().Filename == m.FileName { 32 | return nil 33 | } 34 | cfg.AddBlock(m.FileName, writeBlock) 35 | cfg.module.RemoveBlock(writeBlock) 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /cmd/clean_backup.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/Azure/mapotf/pkg" 7 | "github.com/Azure/mapotf/pkg/backup" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func NewClearBackupCmd() *cobra.Command { 12 | return &cobra.Command{ 13 | Use: "clean-backup", 14 | Short: "Reserve all transformed Terraform files, clear backup files, mapotf clean-backup --tf-dir [path to config files]", 15 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 16 | UnknownFlags: true, 17 | }, 18 | RunE: func(cmd *cobra.Command, args []string) error { 19 | return cleanBackup() 20 | }, 21 | } 22 | } 23 | 24 | func cleanBackup() error { 25 | moduleRefs, err := pkg.ModuleRefs(cf.tfDir) 26 | if err != nil { 27 | return err 28 | } 29 | for _, tfDir := range moduleRefs { 30 | d := tfDir 31 | err = backup.ClearBackup(d.AbsDir) 32 | if err != nil { 33 | return err 34 | } 35 | } 36 | fmt.Println("All backups have been cleaned.") 37 | return nil 38 | } 39 | 40 | func init() { 41 | rootCmd.AddCommand(NewClearBackupCmd()) 42 | } 43 | -------------------------------------------------------------------------------- /pkg/transform_update_in_place_internal_test.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/hashicorp/hcl/v2/hclwrite" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestUpdateInPlaceTransform_String(t *testing.T) { 14 | // Initialize a UpdateInPlaceTransform instance 15 | updateBlock := hclwrite.NewBlock("patch", []string{}) 16 | u := &UpdateInPlaceTransform{ 17 | BaseBlock: golden.NewBaseBlock(nil, nil), 18 | TargetBlockAddress: "resource.fake_resource.this", 19 | updateBlock: updateBlock, 20 | } 21 | 22 | // Call the String() method 23 | result := u.String() 24 | 25 | // Parse the result as JSON 26 | var parsed map[string]interface{} 27 | err := json.Unmarshal([]byte(result), &parsed) 28 | require.NoError(t, err) 29 | assert.Equal(t, u.Id(), parsed["id"]) 30 | assert.Equal(t, u.TargetBlockAddress, parsed["target_block_address"]) 31 | assert.Equal(t, `patch{ 32 | } 33 | `, parsed["patch"]) 34 | } 35 | -------------------------------------------------------------------------------- /cmd/terraform_cmd_wrapper.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "os" 6 | "os/exec" 7 | ) 8 | 9 | func wrapTerraformCommandWithEphemeralTransform(tfDir, tfCmd string, recursive *bool) func(*cobra.Command, []string) error { 10 | return func(cmd *cobra.Command, args []string) error { 11 | restores, err := transform(*recursive, cmd.Context()) 12 | if err != nil { 13 | return err 14 | } 15 | for _, restore := range restores { 16 | r := restore 17 | defer r() 18 | } 19 | return wrapTerraformCommand(tfDir, tfCmd)(cmd, args) 20 | } 21 | } 22 | 23 | func wrapTerraformCommand(tfDir, cmd string) func(*cobra.Command, []string) error { 24 | return func(c *cobra.Command, args []string) error { 25 | tfArgs := append([]string{cmd}, NonMptfArgs...) 26 | tfCmd := exec.CommandContext(c.Context(), "terraform", tfArgs...) 27 | tfCmd.Dir = tfDir 28 | tfCmd.Stdin = os.Stdin 29 | tfCmd.Stdout = os.Stdout 30 | tfCmd.Stderr = os.Stderr 31 | // Run the command and pass through exit code 32 | err := tfCmd.Run() 33 | return err 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /pkg/init.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import "github.com/Azure/golden" 4 | 5 | func init() { 6 | golden.RegisterBaseBlock(func() golden.BlockType { 7 | return new(BaseData) 8 | }) 9 | golden.RegisterBaseBlock(func() golden.BlockType { 10 | return new(BaseTransform) 11 | }) 12 | registerData() 13 | registerTransform() 14 | } 15 | 16 | func registerTransform() { 17 | golden.RegisterBlock(new(UpdateInPlaceTransform)) 18 | golden.RegisterBlock(new(NewBlockTransform)) 19 | golden.RegisterBlock(new(RemoveBlockTransform)) 20 | golden.RegisterBlock(new(RemoveBlockContentBlockTransform)) 21 | golden.RegisterBlock(new(RenameAttributeOrNestedBlockTransform)) 22 | golden.RegisterBlock(new(RegexReplaceExpressionTransform)) 23 | golden.RegisterBlock(new(AppendBlockBodyTransform)) 24 | golden.RegisterBlock(new(EnsureLocalTransform)) 25 | golden.RegisterBlock(new(MoveBlockTransform)) 26 | } 27 | 28 | func registerData() { 29 | golden.RegisterBlock(new(ResourceData)) 30 | golden.RegisterBlock(new(ProviderSchemaData)) 31 | golden.RegisterBlock(new(TerraformData)) 32 | golden.RegisterBlock(new(DataSourceData)) 33 | golden.RegisterBlock(new(DataVariable)) 34 | golden.RegisterBlock(new(DataOutput)) 35 | golden.RegisterBlock(new(DataLocal)) 36 | } 37 | -------------------------------------------------------------------------------- /cmd/clean_backup_test.go: -------------------------------------------------------------------------------- 1 | package cmd_test 2 | 3 | import ( 4 | "context" 5 | "github.com/Azure/mapotf/pkg" 6 | "testing" 7 | 8 | "github.com/Azure/mapotf/cmd" 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/prashantv/gostub" 11 | "github.com/spf13/afero" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "os" 15 | ) 16 | 17 | func TestCleanBackup(t *testing.T) { 18 | fs := afero.NewMemMapFs() 19 | _ = afero.WriteFile(fs, "/testTerraform/main.tf.mptfbackup", []byte("backup content"), 0644) 20 | _ = afero.WriteFile(fs, "/testTerraform/another.tf.mptfbackup", []byte("another backup content"), 0644) 21 | stub := gostub.Stub(&filesystem.Fs, fs).Stub( 22 | &os.Args, []string{ 23 | "mapotf", 24 | "clean-backup", 25 | "--tf-dir", "/testTerraform", 26 | }).Stub(&pkg.AbsDir, func(dir string) (string, error) { 27 | return dir, nil 28 | }) 29 | defer stub.Reset() 30 | 31 | cmd.Execute(context.Background()) 32 | 33 | // Verify that all backup files have been cleaned 34 | exists, err := afero.Exists(fs, "/testTerraform/main.tf.mptfbackup") 35 | require.NoError(t, err) 36 | assert.False(t, exists) 37 | 38 | exists, err = afero.Exists(fs, "/testTerraform/another.tf.mptfbackup") 39 | require.NoError(t, err) 40 | assert.False(t, exists) 41 | } 42 | -------------------------------------------------------------------------------- /cmd/var_flag_internal_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Azure/golden" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestVarFlagsWithoutEqualSign(t *testing.T) { 11 | args := []string{"--mptf-var", "testVar"} 12 | _, err := varFlags(args) 13 | assert.NotNil(t, err) 14 | assert.Contains(t, err.Error(), "is not correctly specified. Must be a variable name and value separated by an equals sign, like --mptf-var key=value") 15 | } 16 | 17 | func TestVarFlagsWithVarFile(t *testing.T) { 18 | args := []string{"--mptf-var-file", "testVarFile"} 19 | expected := []golden.CliFlagAssignedVariables{ 20 | golden.NewCliFlagAssignedVariableFile("testVarFile"), 21 | } 22 | 23 | result, err := varFlags(args) 24 | 25 | assert.NoError(t, err, "Unexpected error: %v", err) 26 | assert.Equal(t, expected, result, "Expected %+v, got %+v", expected, result) 27 | } 28 | 29 | func TestVarFlagsWithVarFile_incorrectFlag(t *testing.T) { 30 | args := []string{"--mptf-var-file"} 31 | _, err := varFlags(args) 32 | assert.NotNil(t, err, "Unexpected error: %v", err) 33 | assert.Contains(t, err.Error(), "missing value for --mptf-var-file") 34 | } 35 | 36 | func TestVarFlagsWithoutVarAssignment(t *testing.T) { 37 | args := []string{"--mptf-var"} 38 | _, err := varFlags(args) 39 | assert.NotNil(t, err, "Expected error but got nil") 40 | assert.Contains(t, err.Error(), "missing value for --mptf-var") 41 | } 42 | -------------------------------------------------------------------------------- /pkg/terraform/attribute_test.go: -------------------------------------------------------------------------------- 1 | package terraform_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Azure/mapotf/pkg/terraform" 7 | "github.com/hashicorp/hcl/v2" 8 | "github.com/hashicorp/hcl/v2/hclsyntax" 9 | "github.com/hashicorp/hcl/v2/hclwrite" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestNewAttribute(t *testing.T) { 15 | // Define some Terraform code 16 | sut := newAttribute(t, ` 17 | resource "azurerm_resource_group" "example" { 18 | name = "test" 19 | } 20 | `, "name") 21 | 22 | // Assert that the returned attribute has the expected properties 23 | assert.Equal(t, "name", sut.Name) 24 | assert.Equal(t, `"test"`, sut.String()) 25 | } 26 | 27 | func newAttribute(t *testing.T, code string, attributeName string) *terraform.Attribute { 28 | // Parse the Terraform code 29 | readFile, diags := hclsyntax.ParseConfig([]byte(code), "test", hcl.InitialPos) 30 | require.False(t, diags.HasErrors()) 31 | writeFile, diags := hclwrite.ParseConfig([]byte(code), "test", hcl.InitialPos) 32 | require.False(t, diags.HasErrors()) 33 | 34 | // Get the first attribute from the parsed file 35 | rb := readFile.Body.(*hclsyntax.Body).Blocks[0].Body.Attributes[attributeName] 36 | wb := writeFile.Body().Blocks()[0].Body().GetAttribute(attributeName) 37 | 38 | // Call the function under test 39 | attribute := terraform.NewAttribute(attributeName, rb, wb) 40 | return attribute 41 | } 42 | -------------------------------------------------------------------------------- /cmd/reset_test.go: -------------------------------------------------------------------------------- 1 | package cmd_test 2 | 3 | import ( 4 | "context" 5 | "github.com/Azure/mapotf/pkg" 6 | "os" 7 | "testing" 8 | 9 | "github.com/Azure/mapotf/cmd" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/prashantv/gostub" 12 | "github.com/spf13/afero" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func TestReset(t *testing.T) { 18 | // Stub the filesystem 19 | fs := afero.NewMemMapFs() 20 | _ = afero.WriteFile(fs, "/testTerraform/main.tf", []byte("original content"), 0644) 21 | _ = afero.WriteFile(fs, "/testTerraform/main.tf.mptfbackup", []byte("backup content"), 0644) 22 | stub := gostub.Stub(&filesystem.Fs, fs).Stub( 23 | &os.Args, []string{ 24 | "mapotf", 25 | "reset", 26 | "--tf-dir", "/testTerraform", 27 | }).Stub(&pkg.AbsDir, func(dir string) (string, error) { 28 | return dir, nil 29 | }) 30 | defer stub.Reset() 31 | 32 | cmd.Execute(context.Background()) 33 | 34 | // Verify that the original file has been restored 35 | content, err := afero.ReadFile(fs, "/testTerraform/main.tf") 36 | require.NoError(t, err) 37 | assert.Equal(t, "backup content", string(content)) 38 | 39 | // Verify that the backup file no longer exists 40 | exists, err := afero.Exists(fs, "/testTerraform/main.tf.mptfbackup") 41 | require.NoError(t, err) 42 | assert.False(t, exists) 43 | tfFile, err := afero.ReadFile(fs, "/testTerraform/main.tf") 44 | require.NoError(t, err) 45 | assert.Equal(t, "backup content", string(tfFile)) 46 | } 47 | -------------------------------------------------------------------------------- /cmd/args.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "strings" 6 | ) 7 | 8 | var NonMptfArgs []string 9 | 10 | func FilterArgs(inputArgs []string) ([]string, []string) { 11 | var mptfArgs, nonMptfArgs []string 12 | mptfArgs = append(mptfArgs, inputArgs[0]) 13 | inputArgs = inputArgs[1:] 14 | var subCommands = make(map[string]struct{}) 15 | for _, cmd := range append([]*cobra.Command{ 16 | NewTransformCmd(), 17 | NewDebugCmd(), 18 | NewResetCmd(), 19 | NewClearBackupCmd(), 20 | }, terraformCommands...) { 21 | subCommands[cmd.Use] = struct{}{} 22 | } 23 | mptfVarFlags := map[string]struct{}{ 24 | "--tf-dir": {}, 25 | "--mptf-dir": {}, 26 | "--mptf-var": {}, 27 | "--mptf-var-file": {}, 28 | "--help": {}, 29 | } 30 | mptfShortHands := map[string]struct{}{ 31 | "-r": {}, 32 | "-h": {}, 33 | } 34 | for i := 0; i < len(inputArgs); i++ { 35 | arg := inputArgs[i] 36 | if _, isSubCommand := subCommands[arg]; isSubCommand { 37 | mptfArgs = append(mptfArgs, arg) 38 | } else if _, isMptfVarFlag := mptfVarFlags[arg]; isMptfVarFlag { 39 | mptfArgs = append(mptfArgs, arg) 40 | if i != len(inputArgs)-1 && !strings.HasPrefix(inputArgs[i+1], "-") { 41 | mptfArgs = append(mptfArgs, inputArgs[i+1]) 42 | i++ 43 | } 44 | } else if _, isMptfShorthand := mptfShortHands[arg]; isMptfShorthand { 45 | mptfArgs = append(mptfArgs, arg) 46 | } else { 47 | nonMptfArgs = append(nonMptfArgs, arg) 48 | } 49 | } 50 | return mptfArgs, nonMptfArgs 51 | } 52 | -------------------------------------------------------------------------------- /pkg/data_local.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/ahmetb/go-linq/v3" 7 | "github.com/zclconf/go-cty/cty" 8 | ctyjson "github.com/zclconf/go-cty/cty/json" 9 | ) 10 | 11 | var _ Data = &DataLocal{} 12 | 13 | type DataLocal struct { 14 | *BaseData 15 | *golden.BaseBlock 16 | 17 | ExpectedNameName string `hcl:"name,optional"` 18 | Result cty.Value `attribute:"result"` 19 | } 20 | 21 | func (dl *DataLocal) Type() string { 22 | return "local" 23 | } 24 | 25 | func (dl *DataLocal) ExecuteDuringPlan() error { 26 | src := dl.BaseBlock.Config().(*MetaProgrammingTFConfig).LocalBlocks() 27 | var matched []*terraform.RootBlock 28 | ds := linq.From(src) 29 | if dl.ExpectedNameName != "" { 30 | ds = ds.Where(func(i interface{}) bool { 31 | return i.(*terraform.RootBlock).Labels[0] == dl.ExpectedNameName 32 | }) 33 | } 34 | ds.ToSlice(&matched) 35 | localBlocks := make(map[string]cty.Value) 36 | for _, block := range matched { 37 | for name, attr := range block.Attributes { 38 | localBlocks[name] = cty.StringVal(attr.String()) 39 | } 40 | } 41 | dl.Result = cty.ObjectVal(localBlocks) 42 | return nil 43 | } 44 | 45 | func (dl *DataLocal) String() string { 46 | d := cty.ObjectVal(map[string]cty.Value{ 47 | "name": cty.StringVal(dl.ExpectedNameName), 48 | "result": dl.Result, 49 | }) 50 | r, err := ctyjson.Marshal(d, d.Type()) 51 | if err != nil { 52 | panic(err.Error()) 53 | } 54 | return string(r) 55 | } 56 | -------------------------------------------------------------------------------- /pkg/data_output.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/ahmetb/go-linq/v3" 7 | "github.com/zclconf/go-cty/cty" 8 | ctyjson "github.com/zclconf/go-cty/cty/json" 9 | ) 10 | 11 | var _ Data = &DataOutput{} 12 | 13 | type DataOutput struct { 14 | *BaseData 15 | *golden.BaseBlock 16 | 17 | ExpectedOutputName string `attribute:"name"` 18 | Result cty.Value `attribute:"result"` 19 | } 20 | 21 | func (d *DataOutput) Type() string { 22 | return "output" 23 | } 24 | 25 | func (d *DataOutput) ExecuteDuringPlan() error { 26 | src := d.BaseBlock.Config().(*MetaProgrammingTFConfig).OutputBlocks() 27 | var matched []*terraform.RootBlock 28 | ds := linq.From(src) 29 | 30 | if d.ExpectedOutputName != "" { 31 | ds = ds.Where(func(i interface{}) bool { 32 | return i.(*terraform.RootBlock).Labels[0] == d.ExpectedOutputName 33 | }) 34 | } 35 | 36 | ds.ToSlice(&matched) 37 | 38 | outputBlocks := make(map[string]cty.Value) 39 | for _, block := range matched { 40 | outputName := block.Labels[0] 41 | outputBlocks[outputName] = block.EvalContext() 42 | } 43 | 44 | d.Result = cty.ObjectVal(outputBlocks) 45 | return nil 46 | } 47 | 48 | func (d *DataOutput) String() string { 49 | data := map[string]cty.Value{ 50 | "result": d.Result, 51 | } 52 | 53 | r, err := ctyjson.Marshal(cty.ObjectVal(data), cty.Object(map[string]cty.Type{ 54 | "result": d.Result.Type(), 55 | })) 56 | if err != nil { 57 | panic(err.Error()) 58 | } 59 | return string(r) 60 | } 61 | -------------------------------------------------------------------------------- /doc/d/terraform.md: -------------------------------------------------------------------------------- 1 | # Data `terraform` Block 2 | 3 | The `data "terraform"` data block is used to retrieve `terraform` block declared in Terraform config. This data block allows you to access various attributes related to the Terraform configuration, such as required providers and required versions. 4 | 5 | ## Example Usage 6 | 7 | ```hcl 8 | data "terraform" "example" { 9 | } 10 | ``` 11 | 12 | ## Attributes Reference 13 | 14 | The following attributes are exported: 15 | 16 | - **required_providers** (Optional): A map of provider configurations required by the module. Each provider configuration can include the following attributes: 17 | - **source** (Optional, string): The source of the provider, typically in the format `namespace/provider`. 18 | - **version** (Optional, string): The version constraint for the provider. 19 | 20 | - **required_version** (Optional, string): The required version of Terraform for the module. 21 | 22 | ## Example 23 | 24 | ```hcl 25 | data "terraform" version { 26 | 27 | } 28 | 29 | data "provider_schema" azurerm { 30 | provider_source = data.terraform.version.required_providers["azurerm"].source 31 | provider_version = data.terraform.version.required_providers["azurerm"].version 32 | } 33 | ``` 34 | 35 | In this example, the `data "terraform"` block is used to retrieve the `terraform` block declared in Terraform config, then we use retrieved `source` and `version` to get resource schemas of `azurerm` provider. 36 | 37 | Please ensure that the provider configurations and version constraints are correctly specified to avoid runtime errors. 38 | -------------------------------------------------------------------------------- /.github/workflows/pr-check.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | env: 10 | TOKEN: ${{secrets.GITHUB_TOKEN}} 11 | 12 | jobs: 13 | test: 14 | name: Build and Test 15 | runs-on: ${{ matrix.os }} 16 | strategy: 17 | matrix: 18 | os: [ubuntu-latest, macos-13, windows-latest] 19 | permissions: 20 | # required for all workflows 21 | security-events: write 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 25 | - name: Set up Go 26 | uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b #v5.4.0 27 | with: 28 | go-version-file: go.mod 29 | - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd #v3.1.2 30 | - name: Initialize CodeQL 31 | uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 #v3.28.15 32 | with: 33 | languages: go 34 | - name: Run build 35 | run: | 36 | go build github.com/Azure/mapotf 37 | - name: Perform CodeQL Analysis 38 | uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 #v3.28.15 39 | - name: Run tests 40 | 41 | run: go test -v github.com/Azure/mapotf/... 42 | - name: golangci-lint 43 | if: runner.os == 'Linux' 44 | run: | 45 | docker run --rm -v $(pwd):/app -w /app golangci/golangci-lint:v2.1.2-alpine golangci-lint run -v --timeout=3600s 46 | - name: Run Gosec Security Scanner 47 | uses: securego/gosec@955a68d0d19f4afb7503068f95059f7d0c529017 #v2.22.3 48 | if: runner.os == 'Linux' -------------------------------------------------------------------------------- /pkg/mptf_plan_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "github.com/Azure/mapotf/pkg" 6 | filesystem "github.com/Azure/mapotf/pkg/fs" 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | "path/filepath" 10 | "testing" 11 | 12 | "github.com/prashantv/gostub" 13 | ) 14 | 15 | func TestMetaProgrammingTFPlan_OnlyTransformThatHasTargetShouldBeInThePlan(t *testing.T) { 16 | gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 17 | filepath.Join("terraform", "main.tf"): `resource "fake_resource" this { 18 | }`, 19 | filepath.Join("mptf", "main.mptf.hcl"): `data "resource" fake_resource { 20 | resource_type = "fake_resource" 21 | } 22 | 23 | data "resource" fake_resource2 { 24 | resource_type = "fake_resource2" 25 | } 26 | 27 | transform "update_in_place" fake_resource { 28 | for_each = try(data.resource.fake_resource.result.fake_resource, []) 29 | target_block_address = each.value.mptf.block_address 30 | id = each.value.mptf.block_address 31 | } 32 | 33 | transform "update_in_place" fake_resource2 { 34 | for_each = try(data.resource.fake_resource2.result.fake_resource2, []) 35 | target_block_address = each.value.mptf.block_address 36 | id = each.value.mptf.block_address 37 | } 38 | `, 39 | })) 40 | hclBlocks, err := pkg.LoadMPTFHclBlocks(false, "mptf") 41 | require.NoError(t, err) 42 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 43 | Dir: "terraform", 44 | AbsDir: "terraform", 45 | }, nil, hclBlocks, nil, context.TODO()) 46 | require.NoError(t, err) 47 | plan, err := pkg.RunMetaProgrammingTFPlan(cfg) 48 | require.NoError(t, err) 49 | assert.Len(t, plan.Transforms, 1) 50 | assert.Equal(t, "resource.fake_resource.this", plan.Transforms[0].(*pkg.UpdateInPlaceTransform).TargetBlockAddress) 51 | } 52 | -------------------------------------------------------------------------------- /pkg/mptf_funcs_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Azure/mapotf/pkg" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | func TestToHclFunc(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | input cty.Value 14 | expected string 15 | }{ 16 | { 17 | name: "String", 18 | input: cty.StringVal("value"), 19 | expected: `"value"`, 20 | }, 21 | { 22 | name: "Number", 23 | input: cty.NumberIntVal(42), 24 | expected: `42`, 25 | }, 26 | { 27 | name: "Bool", 28 | input: cty.BoolVal(true), 29 | expected: `true`, 30 | }, 31 | { 32 | name: "List", 33 | input: cty.ListVal([]cty.Value{ 34 | cty.StringVal("one"), 35 | cty.StringVal("two"), 36 | }), 37 | expected: `["one", "two"]`, 38 | }, 39 | { 40 | name: "Set", 41 | input: cty.SetVal([]cty.Value{ 42 | cty.StringVal("one"), 43 | cty.StringVal("two"), 44 | }), 45 | expected: `["one", "two"]`, 46 | }, 47 | { 48 | name: "Map", 49 | input: cty.MapVal(map[string]cty.Value{ 50 | "key1": cty.StringVal("value1"), 51 | "key2": cty.StringVal("value2"), 52 | }), 53 | expected: `{ 54 | key1 = "value1" 55 | key2 = "value2" 56 | }`, 57 | }, 58 | { 59 | name: "Object", 60 | input: cty.ObjectVal(map[string]cty.Value{ 61 | "key": cty.StringVal("value"), 62 | }), 63 | expected: `{ 64 | key = "value" 65 | }`, 66 | }, 67 | } 68 | 69 | for _, tt := range tests { 70 | t.Run(tt.name, func(t *testing.T) { 71 | result, err := pkg.ToHclFunc.Call([]cty.Value{tt.input}) 72 | if err != nil { 73 | t.Fatalf("unexpected error: %s", err) 74 | } 75 | if result.AsString() != tt.expected { 76 | t.Errorf("expected %s, got %s", tt.expected, result.AsString()) 77 | } 78 | }) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /cmd/args_test.go: -------------------------------------------------------------------------------- 1 | package cmd_test 2 | 3 | import ( 4 | "github.com/Azure/mapotf/cmd" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestFilterArgs(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | inputArgs []string 14 | expectedMptf []string 15 | expectedNonMptf []string 16 | }{ 17 | { 18 | name: "Test with mapotf specific arguments", 19 | inputArgs: []string{"mapotf", "transform", "--tf-dir", "/testTerraform", "--mptf-dir", "/testMptf"}, 20 | expectedMptf: []string{"mapotf", "transform", "--tf-dir", "/testTerraform", "--mptf-dir", "/testMptf"}, 21 | expectedNonMptf: nil, 22 | }, 23 | { 24 | name: "Test with terraform specific arguments", 25 | inputArgs: []string{"mapotf", "apply", "-compact-warnings", "input=false", "-var", "a=b", "-auto-approve", "--tf-dir", "/testTerraform", "--mptf-dir", "/testMptf"}, 26 | expectedMptf: []string{"mapotf", "apply", "--tf-dir", "/testTerraform", "--mptf-dir", "/testMptf"}, 27 | expectedNonMptf: []string{"-compact-warnings", "input=false", "-var", "a=b", "-auto-approve"}, 28 | }, 29 | { 30 | name: "Test with terraform var along with mptf var", 31 | inputArgs: []string{"mapotf", "apply", "--mptf-var", "mptfa=b", "--mptf-var-file", "mptf.var", "-var", "a=b", "-var-file=\"terraform.tfvars\"", "-var", "c=d"}, 32 | expectedMptf: []string{"mapotf", "apply", "--mptf-var", "mptfa=b", "--mptf-var-file", "mptf.var"}, 33 | expectedNonMptf: []string{"-var", "a=b", "-var-file=\"terraform.tfvars\"", "-var", "c=d"}, 34 | }, 35 | } 36 | 37 | for _, tt := range tests { 38 | t.Run(tt.name, func(t *testing.T) { 39 | mptfArgs, nonMptfArgs := cmd.FilterArgs(tt.inputArgs) 40 | assert.Equal(t, tt.expectedMptf, mptfArgs) 41 | assert.Equal(t, tt.expectedNonMptf, nonMptfArgs) 42 | }) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /pkg/data_variable.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/ahmetb/go-linq/v3" 7 | "github.com/zclconf/go-cty/cty" 8 | ctyjson "github.com/zclconf/go-cty/cty/json" 9 | ) 10 | 11 | var _ Data = &DataVariable{} 12 | 13 | type DataVariable struct { 14 | *BaseData 15 | *golden.BaseBlock 16 | 17 | ExpectedNameName string `hcl:"name,optional"` 18 | ExpectedType string `hcl:"type,optional"` 19 | Result cty.Value `attribute:"result"` 20 | } 21 | 22 | func (dd *DataVariable) Type() string { 23 | return "variable" 24 | } 25 | 26 | func (dd *DataVariable) ExecuteDuringPlan() error { 27 | src := dd.BaseBlock.Config().(*MetaProgrammingTFConfig).VariableBlocks() 28 | var matched []*terraform.RootBlock 29 | ds := linq.From(src) 30 | if dd.ExpectedNameName != "" { 31 | ds = ds.Where(func(i interface{}) bool { 32 | return i.(*terraform.RootBlock).Labels[0] == dd.ExpectedNameName 33 | }) 34 | } 35 | if dd.ExpectedType != "" { 36 | 37 | ds = ds.Where(func(i interface{}) bool { 38 | typeAttr, ok := i.(*terraform.RootBlock).Attributes["type"] 39 | if !ok { 40 | return false 41 | } 42 | typeVal := typeAttr.String() 43 | return typeVal == dd.ExpectedType 44 | }) 45 | } 46 | ds.ToSlice(&matched) 47 | variableBlocks := make(map[string]cty.Value) 48 | for _, block := range matched { 49 | variableName := block.Labels[0] 50 | variableBlocks[variableName] = block.EvalContext() 51 | } 52 | dd.Result = cty.ObjectVal(variableBlocks) 53 | return nil 54 | } 55 | 56 | func (dd *DataVariable) String() string { 57 | d := cty.ObjectVal(map[string]cty.Value{ 58 | "name": cty.StringVal(dd.ExpectedNameName), 59 | "type": cty.StringVal(dd.ExpectedType), 60 | "result": dd.Result, 61 | }) 62 | r, err := ctyjson.Marshal(d, d.Type()) 63 | if err != nil { 64 | panic(err.Error()) 65 | } 66 | return string(r) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/mptf_plan.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | "strings" 7 | ) 8 | 9 | var _ golden.Plan = &MetaProgrammingTFPlan{} 10 | 11 | func RunMetaProgrammingTFPlan(c *MetaProgrammingTFConfig) (*MetaProgrammingTFPlan, error) { 12 | if err := c.RunPlan(); err != nil { 13 | return nil, err 14 | } 15 | plan := &MetaProgrammingTFPlan{ 16 | c: c, 17 | } 18 | plan.Transforms = append(plan.Transforms, golden.Blocks[Transform](c)...) 19 | return plan, nil 20 | } 21 | 22 | type MetaProgrammingTFPlan struct { 23 | c *MetaProgrammingTFConfig 24 | Transforms []Transform 25 | } 26 | 27 | func (m *MetaProgrammingTFPlan) String() string { 28 | sb := strings.Builder{} 29 | for _, t := range m.Transforms { 30 | sb.WriteString(fmt.Sprintf("%s would be apply:\n %s\n", t.Address(), golden.BlockToString(t))) 31 | sb.WriteString("\n---\n") 32 | } 33 | return sb.String() 34 | } 35 | 36 | func (m *MetaProgrammingTFPlan) Apply() error { 37 | var err error 38 | addresses := make(map[string]struct{}) 39 | for _, transform := range m.Transforms { 40 | addresses[transform.Address()] = struct{}{} 41 | } 42 | if err = golden.Traverse[Transform](m.c.BaseConfig, func(b Transform) error { 43 | if _, ok := addresses[b.Address()]; !ok { 44 | return nil 45 | } 46 | if err := golden.Decode(b); err != nil { 47 | return fmt.Errorf("%s(%s) decode error: %+v", b.Address(), b.HclBlock().Range().String(), err) 48 | } 49 | return nil 50 | }); err != nil { 51 | return err 52 | } 53 | 54 | if err = golden.Traverse[Transform](m.c.BaseConfig, func(b Transform) error { 55 | if _, ok := addresses[b.Address()]; !ok { 56 | return nil 57 | } 58 | return b.Apply() 59 | }); err != nil { 60 | return fmt.Errorf("errors applying transforms: %+v", err) 61 | } 62 | if err = m.c.SaveToDisk(); err != nil { 63 | return fmt.Errorf("errors saving changes: %+v", err) 64 | } 65 | 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /pkg/mptf_block.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | "github.com/hashicorp/hcl/v2" 7 | "github.com/hashicorp/hcl/v2/hclsyntax" 8 | "github.com/hashicorp/hcl/v2/hclwrite" 9 | "github.com/zclconf/go-cty/cty" 10 | "github.com/zclconf/go-cty/cty/convert" 11 | ) 12 | 13 | func decodeAsStringBlock(dest *hclwrite.Block, src *golden.HclBlock, depth int, context *hcl.EvalContext) error { 14 | for n, attribute := range src.Attributes() { 15 | value, err := attribute.Value(context) 16 | if err != nil { 17 | return err 18 | } 19 | valueType := value.Type() 20 | if valueType != cty.String { 21 | value, err = convert.Convert(value, cty.String) 22 | if err != nil { 23 | return fmt.Errorf("cannot convert value to string, got: %s", valueType.FriendlyName()) 24 | } 25 | } 26 | tokens, err := stringToHclWriteTokens(value.AsString()) 27 | if err != nil { 28 | return err 29 | } 30 | dest.Body().SetAttributeRaw(n, tokens) 31 | } 32 | for _, b := range src.NestedBlocks() { 33 | blockType := b.Type 34 | newNestedBlock := dest.Body().AppendNewBlock(blockType, b.Labels) 35 | if err := decodeAsStringBlock(newNestedBlock, b, depth+1, context); err != nil { 36 | return err 37 | } 38 | } 39 | return nil 40 | } 41 | 42 | func decodeAsRawBlock(dest *hclwrite.Block, src *golden.HclBlock) error { 43 | for n, attribute := range src.Attributes() { 44 | dest.Body().SetAttributeRaw(n, attribute.ExprTokens()) 45 | } 46 | for _, b := range src.NestedBlocks() { 47 | blockType := b.Type 48 | newNestedBlock := dest.Body().AppendNewBlock(blockType, b.Labels) 49 | if err := decodeAsRawBlock(newNestedBlock, b); err != nil { 50 | return err 51 | } 52 | } 53 | return nil 54 | } 55 | 56 | func stringToHclWriteTokens(exp string) (hclwrite.Tokens, error) { 57 | tokens, diag := hclsyntax.LexExpression([]byte(exp), "", hcl.InitialPos) 58 | if diag.HasErrors() { 59 | return nil, diag 60 | } 61 | return writerTokens(tokens), nil 62 | } 63 | -------------------------------------------------------------------------------- /pkg/transform_regex_replace_expression.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/hashicorp/go-multierror" 9 | "github.com/hashicorp/hcl/v2" 10 | "github.com/hashicorp/hcl/v2/hclsyntax" 11 | "github.com/hashicorp/hcl/v2/hclwrite" 12 | ) 13 | 14 | var _ Transform = &RegexReplaceExpressionTransform{} 15 | 16 | type RegexReplaceExpressionTransform struct { 17 | *golden.BaseBlock 18 | *BaseTransform 19 | Regex string `hcl:"regex" validate:"required"` 20 | Replacement string `hcl:"replacement"` 21 | } 22 | 23 | func (r *RegexReplaceExpressionTransform) Type() string { 24 | return "regex_replace_expression" 25 | } 26 | 27 | func (r *RegexReplaceExpressionTransform) Apply() error { 28 | cfg := r.Config().(*MetaProgrammingTFConfig) 29 | re, err := regexp.Compile(r.Regex) 30 | if err != nil { 31 | return err 32 | } 33 | for _, block := range cfg.allRootBlocks { 34 | if subErr := r.applyRegexReplace(block.WriteBlock.Body(), block.Range().Filename, re); subErr != nil { 35 | err = multierror.Append(err, subErr) 36 | } 37 | } 38 | return err 39 | } 40 | 41 | func (r *RegexReplaceExpressionTransform) applyRegexReplace(body *hclwrite.Body, filename string, re *regexp.Regexp) error { 42 | var err error 43 | for name, attr := range body.Attributes() { 44 | oldValue := strings.TrimSpace(string(attr.Expr().BuildTokens(nil).Bytes())) 45 | newValue := re.ReplaceAllString(oldValue, r.Replacement) 46 | if oldValue == newValue { 47 | continue 48 | } 49 | tokens, diag := hclsyntax.LexExpression([]byte(newValue), filename, hcl.InitialPos) 50 | if diag.HasErrors() { 51 | err = multierror.Append(err, diag) 52 | continue 53 | } 54 | body.SetAttributeRaw(name, writerTokens(tokens)) 55 | } 56 | 57 | for _, block := range body.Blocks() { 58 | if subErr := r.applyRegexReplace(block.Body(), filename, re); subErr != nil { 59 | err = multierror.Append(err, subErr) 60 | continue 61 | } 62 | } 63 | return err 64 | } 65 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "github.com/spf13/cobra" 8 | "os" 9 | "os/exec" 10 | ) 11 | 12 | // rootCmd represents the base command when called without any subcommands 13 | var rootCmd = &cobra.Command{ 14 | Use: "", 15 | Short: "A brief description of your application", 16 | Long: `A longer description that spans multiple lines and likely contains 17 | examples and usage of using your application. For example: 18 | 19 | Cobra is a CLI library for Go that empowers applications. 20 | This application is a tool to generate the needed files 21 | to quickly create a Cobra application.`, 22 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 23 | UnknownFlags: true, 24 | }, 25 | SilenceErrors: false, 26 | SilenceUsage: true, 27 | } 28 | 29 | // Execute adds all child commands to the root command and sets flags appropriately. 30 | // This is called by main.main(). It only needs to happen once to the rootCmd. 31 | func Execute(ctx context.Context) { 32 | err := rootCmd.ExecuteContext(ctx) 33 | if err != nil { 34 | var pe *exec.ExitError 35 | if errors.As(err, &pe) { 36 | os.Exit(pe.ExitCode()) 37 | } 38 | os.Exit(1) 39 | } 40 | } 41 | 42 | func init() { 43 | pwd, err := os.Getwd() 44 | if err != nil { 45 | panic(fmt.Sprintf("error on getting working dir:%s", err.Error())) 46 | } 47 | rootCmd.PersistentFlags().StringVar(&cf.tfDir, "tf-dir", pwd, "Terraform directory") 48 | rootCmd.PersistentFlags().StringSliceVar(&cf.mptfDirs, "mptf-dir", nil, "MPTF directory") 49 | 50 | rootCmd.PersistentFlags().StringSlice("mptf-var", cf.mptfVars, "Set a value for one of the input variables in the root module of the configuration. Use this option more than once to set more than one variable.") 51 | rootCmd.PersistentFlags().StringSlice("mptf-var-file", cf.mptfVarFiles, "Load variable values from the given file, in addition to the default files mptf.mptfvars and *.auto.mptfvars. Use this option more than once to include more than one variables file.") 52 | } 53 | -------------------------------------------------------------------------------- /cmd/var_flag_internal.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "github.com/Azure/golden" 8 | "strings" 9 | ) 10 | 11 | var cf = &commonFlags{} 12 | 13 | type commonFlags struct { 14 | tfDir string 15 | mptfDirs []string 16 | mptfVars []string 17 | mptfVarFiles []string 18 | } 19 | 20 | type localizedMptfDir struct { 21 | path string 22 | dispose func() 23 | } 24 | 25 | func (l localizedMptfDir) Dispose() { 26 | if l.dispose != nil { 27 | l.dispose() 28 | } 29 | } 30 | 31 | func (c *commonFlags) MptfDirs(ctx context.Context) ([]localizedMptfDir, error) { 32 | var r []localizedMptfDir 33 | for _, originalDir := range c.mptfDirs { 34 | localizedPath, disposeFunc, err := localizeConfigFolder(originalDir, ctx) 35 | if err != nil { 36 | for _, localizedDir := range r { 37 | localizedDir.Dispose() 38 | } 39 | return nil, fmt.Errorf("cannot get config path: %s: %+v", originalDir, err) 40 | } 41 | r = append(r, localizedMptfDir{path: localizedPath, dispose: disposeFunc}) 42 | } 43 | return r, nil 44 | } 45 | 46 | func varFlags(args []string) ([]golden.CliFlagAssignedVariables, error) { 47 | var flags []golden.CliFlagAssignedVariables 48 | for i := 0; i < len(args); i++ { 49 | if args[i] != "--mptf-var" && args[i] != "--mptf-var-file" { 50 | continue 51 | } 52 | if i+1 == len(args) { 53 | return nil, errors.New("missing value for " + args[i]) 54 | } 55 | arg := args[i+1] 56 | if args[i] == "--mptf-var-file" { 57 | flags = append(flags, golden.NewCliFlagAssignedVariableFile(arg)) 58 | i++ 59 | continue 60 | } 61 | varAssignment := strings.Split(arg, "=") 62 | if len(varAssignment) != 2 { 63 | return nil, fmt.Errorf("the given --mptf option \"%s\" is not correctly specified. Must be a variable name and value separated by an equals sign, like --mptf-var key=value", arg) 64 | } 65 | flags = append(flags, golden.NewCliFlagAssignedVariable(varAssignment[0], varAssignment[1])) 66 | i++ // skip next arg 67 | } 68 | return flags, nil 69 | } 70 | -------------------------------------------------------------------------------- /doc/d/provider_schema.md: -------------------------------------------------------------------------------- 1 | # Data `"provider_schema"` block 2 | 3 | The `provider_schema` data source retrieves the schema from a specified Terraform provider. This schema includes information about the provider's resources, their attributes, and nested blocks. 4 | 5 | Only resource schemas would be exported this time. 6 | 7 | ## Example Usage 8 | 9 | ```hcl 10 | data "provider_schema" azurerm { 11 | provider_source = "hashicorp/azurerm" 12 | provider_version = "~> 4.0" 13 | } 14 | 15 | locals { 16 | resources_support_tags = toset([for name, r in data.provider_schema.azurerm.resources : name if try(r.block.attributes["tags"].type == ["map", "string"], false)]) 17 | } 18 | ``` 19 | 20 | ## Arguments 21 | 22 | - `provider_source` (String, Required): The source of the provider, typically in the format `hashicorp/azurerm`. 23 | - `provider_version` (String, Required): The version constraint for the provider, e.g., `~> 4.0`. 24 | 25 | ## Attributes 26 | 27 | - `resources` (Map): A map of resource schemas provided by the specified provider. Each resource schema includes: 28 | - `version` (`number`): The version of the particular resource schema. 29 | - `block` (Object): The block schema of the resource, which includes: 30 | - `attributes` (Map): The attributes defined at the particular level of this block. 31 | - `block_types` (Map): Any nested blocks within this particular block. 32 | - `description` (String): The description for this block and format of the description. If no kind is provided, it can be assumed to be plain text. 33 | 34 | ## Schema Details 35 | 36 | The `resources` attribute contains detailed information about each resource's schema. This includes the attributes and nested blocks defined for the resource. Each attribute schema includes the type, description, and other metadata. 37 | 38 | ## Under the Hood 39 | 40 | Mapotf uses the Terraform provider schema to retrieve the schema for the specified provider source and version. The schema is retrieved by `terraform providers schema -json -no-color` command. 41 | -------------------------------------------------------------------------------- /pkg/transform_append_block_body.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/Azure/mapotf/pkg/terraform" 9 | "github.com/hashicorp/hcl/v2" 10 | "github.com/hashicorp/hcl/v2/hclwrite" 11 | ) 12 | 13 | var _ Transform = &AppendBlockBodyTransform{} 14 | 15 | type AppendBlockBodyTransform struct { 16 | *golden.BaseBlock 17 | *BaseTransform 18 | TargetBlockAddress string `hcl:"target_block_address" validate:"required"` 19 | BlockBody string `hcl:"block_body" validate:"required"` 20 | } 21 | 22 | func (u *AppendBlockBodyTransform) Type() string { 23 | return "append_block_body" 24 | } 25 | 26 | func (u *AppendBlockBodyTransform) Apply() error { 27 | c := u.Config().(*MetaProgrammingTFConfig) 28 | b := c.RootBlock(u.TargetBlockAddress) 29 | if b == nil { 30 | return fmt.Errorf("cannot find block: %s", u.TargetBlockAddress) 31 | } 32 | cfg, diag := hclwrite.ParseConfig([]byte("append {\n"+u.BlockBody+"\n}"), "append.hcl", hcl.InitialPos) 33 | if diag.HasErrors() { 34 | return fmt.Errorf("failed to parse block body in %s, body is %s: %s", u.Address(), u.BlockBody, diag.Error()) 35 | } 36 | u.PatchWriteBlock(b, cfg.Body().Blocks()[0].Body()) 37 | return nil 38 | } 39 | 40 | func (u *AppendBlockBodyTransform) PatchWriteBlock(dest terraform.Block, patch *hclwrite.Body) { 41 | // we cannot patch one-line block 42 | if dest.Range().Start.Line == dest.Range().End.Line { 43 | dest.WriteBody().AppendNewline() 44 | } 45 | for name, attr := range patch.Attributes() { 46 | dest.SetAttributeRaw(name, attr.Expr().BuildTokens(nil)) 47 | } 48 | for _, nb := range patch.Blocks() { 49 | dest.AppendBlock(nb) 50 | } 51 | } 52 | 53 | func (u *AppendBlockBodyTransform) String() string { 54 | content := make(map[string]any) 55 | content["id"] = u.Id() 56 | content["target_block_address"] = u.TargetBlockAddress 57 | content["concat"] = u.BlockBody 58 | str, err := json.Marshal(content) 59 | if err != nil { 60 | panic(err.Error()) 61 | } 62 | return string(str) 63 | } 64 | -------------------------------------------------------------------------------- /example/tracing_tags/main.mptf.hcl: -------------------------------------------------------------------------------- 1 | data "resource" all { 2 | } 3 | 4 | data "terraform" version { 5 | 6 | } 7 | 8 | data "provider_schema" azurerm { 9 | provider_source = data.terraform.version.required_providers["azurerm"].source 10 | provider_version = data.terraform.version.required_providers["azurerm"].version 11 | } 12 | 13 | locals { 14 | resources_support_tags = toset([for name, r in data.provider_schema.azurerm.resources : name if try(r.block.attributes["tags"].type == ["map", "string"], false)]) 15 | resource_support_tags = flatten([for resource_type, resource_blocks in data.resource.all.result : resource_blocks if contains(local.resources_support_tags, resource_type)]) 16 | mptfs = flatten([for _, blocks in local.resource_support_tags : [for b in blocks : b.mptf]]) 17 | addresses = [for mptf in local.mptfs : mptf.block_address] 18 | all_resources = { for obj in flatten([for obj in flatten([for b in data.resource.all.result.* : [for nb in b : nb]]) : [for body in obj : body]]) : obj.mptf.block_address => obj } 19 | } 20 | 21 | transform "update_in_place" tags { 22 | for_each = try(local.addresses, []) 23 | target_block_address = each.value 24 | asstring { 25 | tags = <<-TAGS 26 | %{if try(local.all_resources[each.value].tags != "", false)}merge(${local.all_resources[each.value].tags}, var.tracing_tags_enabled ? { 27 | file = "${local.all_resources[each.value].mptf.range.file_name}" 28 | block = "${local.all_resources[each.value].mptf.terraform_address}" 29 | module_source = try(one(data.modtm_module_source.telemetry).module_source, "") 30 | module_version = try(one(data.modtm_module_source.telemetry).module_version, "") 31 | } : {}) %{else} var.tracing_tags_enabled ? { 32 | file = "${local.all_resources[each.value].mptf.range.file_name}" 33 | block = "${local.all_resources[each.value].mptf.terraform_address}" 34 | module_source = try(one(data.modtm_module_source.telemetry).module_source, "") 35 | module_version = try(one(data.modtm_module_source.telemetry).module_version, "") 36 | } : {}%{endif} 37 | TAGS 38 | } 39 | } -------------------------------------------------------------------------------- /pkg/data_terraform.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/golden" 6 | "github.com/hashicorp/hcl/v2" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | type RequiredProvider struct { 11 | Source *string `attribute:"source"` 12 | Version *string `attribute:"version"` 13 | } 14 | 15 | var _ Data = &TerraformData{} 16 | 17 | type TerraformData struct { 18 | *BaseData 19 | *golden.BaseBlock 20 | 21 | RequiredVersion *string `attribute:"required_version"` 22 | RequiredProviders map[string]RequiredProvider `attribute:"required_providers"` 23 | Block cty.Value `attribute:"block"` 24 | } 25 | 26 | func (d *TerraformData) Type() string { 27 | return "terraform" 28 | } 29 | 30 | func (d *TerraformData) ExecuteDuringPlan() error { 31 | d.Block = cty.NilVal 32 | tb := d.BaseBlock.Config().(*MetaProgrammingTFConfig).TerraformBlock() 33 | if tb == nil { 34 | return nil 35 | } 36 | d.Block = tb.EvalContext() 37 | requiredTerraformVersion, ok := tb.Attributes["required_version"] 38 | if ok { 39 | v, diag := requiredTerraformVersion.Expr.Value(&hcl.EvalContext{}) 40 | if diag.HasErrors() { 41 | return fmt.Errorf("error while evaluating terraform block's `required_version`: %+v", diag) 42 | } 43 | s := v.AsString() 44 | d.RequiredVersion = &s 45 | } 46 | rp, ok := tb.NestedBlocks["required_providers"] 47 | if !ok || len(rp) == 0 { 48 | return nil 49 | } 50 | d.RequiredProviders = make(map[string]RequiredProvider) 51 | for s, p := range rp[0].Body.Attributes { 52 | providerConfig, diag := p.Expr.Value(&hcl.EvalContext{}) 53 | if diag.HasErrors() { 54 | return fmt.Errorf("error while evaluating terraform block's `required_providers.%s`: %+v", s, diag) 55 | } 56 | p := RequiredProvider{} 57 | it := providerConfig.ElementIterator() 58 | for it.Next() { 59 | k, _ := it.Element() 60 | if k.AsString() == "source" { 61 | source := providerConfig.GetAttr("source").AsString() 62 | p.Source = &source 63 | } 64 | if k.AsString() == "version" { 65 | version := providerConfig.GetAttr("version").AsString() 66 | p.Version = &version 67 | } 68 | } 69 | d.RequiredProviders[s] = p 70 | } 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /doc/t/regex_replace_expression.md: -------------------------------------------------------------------------------- 1 | # `regex_replace_expression` Transform Block 2 | 3 | The `regex_replace_expression` transform block is a tool in Mapotf that allows you to replace specific patterns in expressions using regular expressions. This is useful when you need to modify configurations by replacing certain patterns with new values. 4 | 5 | ## Arguments 6 | 7 | - `regex`: This argument specifies the regular expression pattern to match in the expressions. The pattern is a string that follows the syntax of Go's `regexp` package. 8 | - `replacement`: This argument specifies the replacement string for the matched patterns. The replacement string can include references to captured groups from the regular expression. 9 | 10 | ## Example 11 | 12 | Here is an example of how to use the `regex_replace_expression` transform block to replace patterns in expressions: 13 | 14 | ```terraform 15 | transform "regex_replace_expression" this { 16 | regex = "azurerm_kubernetes_cluster\\.(\\s*\\r?\\n\\s*)?(\\w+)(\\[\\s*[^]]+\\s*\\])?(\\.)(\\s*\\r?\\n\\s*)?location" 17 | replacement = "azurerm_kubernetes_cluster.$${1}$${2}$${3}$${4}$${5}region" 18 | } 19 | ``` 20 | 21 | In this example, the `regex` argument specifies a pattern that matches the `location` attribute of `azurerm_kubernetes_cluster` resources. The `replacement` argument specifies that the matched pattern should be replaced with `region`. 22 | 23 | ## Detailed Behavior 24 | 25 | The `regex_replace_expression` transform block works by traversing all expressions in the Terraform configuration and applying the specified regular expression replacement. The replacement is applied to both attributes and nested blocks. 26 | 27 | ### Example Scenarios 28 | 29 | ```terraform 30 | locals { 31 | azurerm_kubernetes_cluster_location = azurerm_kubernetes_cluster.example[0].location 32 | } 33 | ``` 34 | 35 | After applying the transform: 36 | 37 | ```terraform 38 | locals { 39 | azurerm_kubernetes_cluster_location = azurerm_kubernetes_cluster.example[0].region 40 | } 41 | ``` 42 | 43 | In summary, the `regex_replace_expression` transform block is a powerful tool for modifying Terraform configurations by replacing patterns in expressions using regular expressions. This allows for flexible and precise updates to configuration files. -------------------------------------------------------------------------------- /pkg/data_data_source.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/ahmetb/go-linq/v3" 7 | "github.com/zclconf/go-cty/cty" 8 | ctyjson "github.com/zclconf/go-cty/cty/json" 9 | ) 10 | 11 | var _ Data = &DataSourceData{} 12 | 13 | type DataSourceData struct { 14 | *BaseData 15 | *golden.BaseBlock 16 | 17 | DataSourceType string `hcl:"data_source_type,optional"` 18 | UseCount bool `hcl:"use_count,optional" default:"false"` 19 | UseForEach bool `hcl:"use_for_each,optional" default:"false"` 20 | Result cty.Value `attribute:"result"` 21 | } 22 | 23 | func (dd *DataSourceData) Type() string { 24 | return "data" 25 | } 26 | 27 | func (dd *DataSourceData) ExecuteDuringPlan() error { 28 | src := dd.BaseBlock.Config().(*MetaProgrammingTFConfig).DataBlocks() 29 | var matched []*terraform.RootBlock 30 | ds := linq.From(src) 31 | if dd.DataSourceType != "" { 32 | ds = ds.Where(func(i interface{}) bool { 33 | return i.(*terraform.RootBlock).Labels[0] == dd.DataSourceType 34 | }) 35 | } 36 | if dd.UseForEach { 37 | ds = ds.Where(func(i interface{}) bool { 38 | return i.(*terraform.RootBlock).ForEach != nil 39 | }) 40 | } 41 | if dd.UseCount { 42 | ds = ds.Where(func(i interface{}) bool { 43 | return i.(*terraform.RootBlock).Count != nil 44 | }) 45 | } 46 | ds.ToSlice(&matched) 47 | dataBlocks := make(map[string]map[string]cty.Value) 48 | for _, b := range matched { 49 | dataType := b.Labels[0] 50 | m, ok := dataBlocks[dataType] 51 | if !ok { 52 | m = make(map[string]cty.Value) 53 | dataBlocks[dataType] = m 54 | } 55 | m[b.Labels[1]] = b.EvalContext() 56 | } 57 | obj := make(map[string]cty.Value) 58 | for k, m := range dataBlocks { 59 | obj[k] = cty.ObjectVal(m) 60 | } 61 | dd.Result = cty.ObjectVal(obj) 62 | return nil 63 | } 64 | 65 | func (dd *DataSourceData) String() string { 66 | d := cty.ObjectVal(map[string]cty.Value{ 67 | "data_source_type": cty.StringVal(dd.DataSourceType), 68 | "use_count": cty.BoolVal(dd.UseCount), 69 | "use_for_each": cty.BoolVal(dd.UseForEach), 70 | "result": dd.Result, 71 | }) 72 | r, err := ctyjson.Marshal(d, d.Type()) 73 | if err != nil { 74 | panic(err.Error()) 75 | } 76 | return string(r) 77 | } 78 | -------------------------------------------------------------------------------- /pkg/data_resource.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "github.com/Azure/golden" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/ahmetb/go-linq/v3" 7 | "github.com/zclconf/go-cty/cty" 8 | ctyjson "github.com/zclconf/go-cty/cty/json" 9 | ) 10 | 11 | var _ Data = &ResourceData{} 12 | 13 | type ResourceData struct { 14 | *BaseData 15 | *golden.BaseBlock 16 | 17 | ResourceType string `hcl:"resource_type,optional"` 18 | UseCount bool `hcl:"use_count,optional" default:"false"` 19 | UseForEach bool `hcl:"use_for_each,optional" default:"false"` 20 | Result cty.Value `attribute:"result"` 21 | } 22 | 23 | func (rd *ResourceData) Type() string { 24 | return "resource" 25 | } 26 | 27 | func (rd *ResourceData) ExecuteDuringPlan() error { 28 | src := rd.BaseBlock.Config().(*MetaProgrammingTFConfig).ResourceBlocks() 29 | var matched []*terraform.RootBlock 30 | res := linq.From(src) 31 | if rd.ResourceType != "" { 32 | res = res.Where(func(i interface{}) bool { 33 | return i.(*terraform.RootBlock).Labels[0] == rd.ResourceType 34 | }) 35 | } 36 | if rd.UseForEach { 37 | res = res.Where(func(i interface{}) bool { 38 | return i.(*terraform.RootBlock).ForEach != nil 39 | }) 40 | } 41 | if rd.UseCount { 42 | res = res.Where(func(i interface{}) bool { 43 | return i.(*terraform.RootBlock).Count != nil 44 | }) 45 | } 46 | res.ToSlice(&matched) 47 | resourceBlocks := make(map[string]map[string]cty.Value) 48 | for _, b := range matched { 49 | resourceType := b.Labels[0] 50 | m, ok := resourceBlocks[resourceType] 51 | if !ok { 52 | m = make(map[string]cty.Value) 53 | resourceBlocks[resourceType] = m 54 | } 55 | m[b.Labels[1]] = b.EvalContext() 56 | } 57 | obj := make(map[string]cty.Value) 58 | for k, m := range resourceBlocks { 59 | obj[k] = cty.ObjectVal(m) 60 | } 61 | rd.Result = cty.ObjectVal(obj) 62 | return nil 63 | } 64 | 65 | func (rd *ResourceData) String() string { 66 | d := cty.ObjectVal(map[string]cty.Value{ 67 | "resource_type": cty.StringVal(rd.ResourceType), 68 | "use_count": cty.BoolVal(rd.UseCount), 69 | "use_for_each": cty.BoolVal(rd.UseForEach), 70 | "result": rd.Result, 71 | }) 72 | r, err := ctyjson.Marshal(d, d.Type()) 73 | if err != nil { 74 | panic(err.Error()) 75 | } 76 | return string(r) 77 | } 78 | -------------------------------------------------------------------------------- /pkg/data_output_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/Azure/mapotf/pkg" 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/prashantv/gostub" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | "github.com/zclconf/go-cty/cty" 14 | ) 15 | 16 | func TestDataOutput_ExecuteDuringPlan(t *testing.T) { 17 | cases := []struct { 18 | desc string 19 | tfCode string 20 | expectedOutputName string 21 | expectedNotFound bool 22 | expectedValue cty.Value 23 | }{ 24 | { 25 | desc: "single output without filter", 26 | tfCode: ` 27 | output "example_output" { 28 | value = "example_value" 29 | }`, 30 | expectedOutputName: "example_output", 31 | expectedValue: cty.StringVal(`"example_value"`), 32 | }, 33 | { 34 | desc: "filter by output name", 35 | tfCode: ` 36 | output "output_one" { 37 | value = "value_one" 38 | } 39 | 40 | output "output_two" { 41 | value = "value_two" 42 | }`, 43 | expectedOutputName: "output_two", 44 | expectedValue: cty.StringVal(`"value_two"`), 45 | }, 46 | { 47 | desc: "no matching output", 48 | tfCode: ` 49 | output "existing_output" { 50 | value = "existing_value" 51 | }`, 52 | expectedOutputName: "non_existing_output", 53 | expectedNotFound: true, 54 | }, 55 | } 56 | 57 | for _, c := range cases { 58 | t.Run(c.desc, func(t *testing.T) { 59 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 60 | "/main.tf": c.tfCode, 61 | })) 62 | defer stub.Reset() 63 | 64 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 65 | Dir: "/", 66 | AbsDir: "/", 67 | }, nil, nil, nil, context.TODO()) 68 | require.NoError(t, err) 69 | 70 | data := &pkg.DataOutput{ 71 | BaseBlock: golden.NewBaseBlock(cfg, nil), 72 | BaseData: &pkg.BaseData{}, 73 | ExpectedOutputName: c.expectedOutputName, 74 | } 75 | 76 | err = data.ExecuteDuringPlan() 77 | require.NoError(t, err) 78 | if c.expectedNotFound { 79 | //data.Result.l 80 | assert.Equal(t, cty.ObjectVal(make(map[string]cty.Value)), data.Result) 81 | } else { 82 | outputBlock := data.Result.GetAttr(c.expectedOutputName) 83 | assert.Equal(t, c.expectedValue, outputBlock.GetAttr("value")) 84 | } 85 | 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /pkg/transform_ensure_local.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/Azure/golden" 7 | "github.com/hashicorp/hcl/v2" 8 | "github.com/hashicorp/hcl/v2/hclwrite" 9 | ) 10 | 11 | var _ Transform = &EnsureLocalTransform{} 12 | var _ golden.CustomDecode = &EnsureLocalTransform{} 13 | 14 | type EnsureLocalTransform struct { 15 | *golden.BaseBlock 16 | *BaseTransform 17 | LocalName string `hcl:"name" validate:"required"` 18 | FallbackFileName string `hcl:"fallback_file_name" validate:"required"` 19 | writeBlock *hclwrite.Block 20 | tokens hclwrite.Tokens 21 | newWriteBlock bool 22 | } 23 | 24 | func (u *EnsureLocalTransform) Type() string { 25 | return "ensure_local" 26 | } 27 | 28 | func (u *EnsureLocalTransform) Apply() error { 29 | u.writeBlock.Body().SetAttributeRaw(u.LocalName, u.tokens) 30 | if u.newWriteBlock { 31 | cfg := u.Config().(*MetaProgrammingTFConfig) 32 | cfg.AddBlock(u.FallbackFileName, u.writeBlock) 33 | } 34 | return nil 35 | } 36 | 37 | func (u *EnsureLocalTransform) Decode(block *golden.HclBlock, context *hcl.EvalContext) error { 38 | var err error 39 | u.LocalName, err = getRequiredStringAttribute("name", block, context) 40 | if err != nil { 41 | return err 42 | } 43 | u.FallbackFileName, err = getRequiredStringAttribute("fallback_file_name", block, context) 44 | if err != nil { 45 | return err 46 | } 47 | cfg := u.Config().(*MetaProgrammingTFConfig) 48 | if b, ok := cfg.localBlocks[fmt.Sprintf("local.%s", u.LocalName)]; ok { 49 | u.writeBlock = b.WriteBlock 50 | } else { 51 | u.writeBlock = hclwrite.NewBlock("locals", []string{}) 52 | u.newWriteBlock = true 53 | } 54 | asString, err := getOptionalStringAttribute("value_as_string", block, context) 55 | if err != nil { 56 | return err 57 | } 58 | raw, asRaw := block.Attributes()["value_as_raw"] 59 | if asString != nil && asRaw { 60 | return fmt.Errorf("cannot use both value_as_string and value_as_raw") 61 | } 62 | if asString != nil { 63 | u.tokens, err = stringToHclWriteTokens(*asString) 64 | if err != nil { 65 | return err 66 | } 67 | } 68 | if asRaw { 69 | u.tokens = raw.ExprTokens() 70 | } 71 | return nil 72 | } 73 | 74 | func (u *EnsureLocalTransform) String() string { 75 | content := make(map[string]any) 76 | content["id"] = u.Id() 77 | content["name"] = u.LocalName 78 | content["value"] = string(u.tokens.Bytes()) 79 | str, err := json.Marshal(content) 80 | if err != nil { 81 | panic(err.Error()) 82 | } 83 | return string(str) 84 | } 85 | -------------------------------------------------------------------------------- /cmd/transform_test.go: -------------------------------------------------------------------------------- 1 | package cmd_test 2 | 3 | import ( 4 | "context" 5 | "github.com/Azure/mapotf/cmd" 6 | "github.com/Azure/mapotf/pkg" 7 | filesystem "github.com/Azure/mapotf/pkg/fs" 8 | "github.com/prashantv/gostub" 9 | "github.com/spf13/afero" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | "os" 13 | "testing" 14 | ) 15 | 16 | func TestSuccessfulTransformation(t *testing.T) { 17 | // Stub the filesystem 18 | fs := afero.NewMemMapFs() 19 | _ = afero.WriteFile(fs, "/testData/main.mptf.hcl", []byte(` 20 | data resource "fake_resource" { 21 | resource_type = "fake_resource" 22 | } 23 | 24 | transform update_in_place "fake_resource" { 25 | for_each = data.resource.fake_resource.result.fake_resource 26 | target_block_address = each.value.mptf.block_address 27 | asstring{ 28 | tags = "merge(${try(coalesce(each.value.tags, "{}"), "{}")}, { \n block_address = \"${each.value.mptf.block_address}\" \n file_name = \"${each.value.mptf.range.file_name}\"\n })" 29 | } 30 | } 31 | `), 0644) 32 | terraformCode := ` 33 | resource "fake_resource" this { 34 | tags = {} 35 | } 36 | 37 | resource "fake_resource" that { 38 | } 39 | ` 40 | _ = afero.WriteFile(fs, "/testTerraform/main.tf", []byte(terraformCode), 0644) 41 | stub := gostub.Stub(&filesystem.Fs, fs).Stub( 42 | &os.Args, []string{ 43 | "mapotf", 44 | "transform", 45 | "--tf-dir", "/testTerraform", 46 | "--mptf-dir", "/testData", 47 | }).Stub(&pkg.AbsDir, func(dir string) (string, error) { 48 | return dir, nil 49 | }) 50 | defer stub.Reset() 51 | 52 | mptfArgs, nonMptfArgs := cmd.FilterArgs(os.Args) 53 | os.Args = mptfArgs 54 | cmd.NonMptfArgs = nonMptfArgs 55 | cmd.Execute(context.Background()) 56 | tfFile, err := afero.ReadFile(fs, "/testTerraform/main.tf") 57 | require.NoError(t, err) 58 | tfFileStr := string(tfFile) 59 | expected := ` 60 | resource "fake_resource" this { 61 | tags = merge({}, { 62 | block_address = "resource.fake_resource.this" 63 | file_name = "main.tf" 64 | }) 65 | } 66 | 67 | resource "fake_resource" that { 68 | tags = merge({}, { 69 | block_address = "resource.fake_resource.that" 70 | file_name = "main.tf" 71 | }) 72 | } 73 | ` 74 | assert.Equal(t, expected, tfFileStr) 75 | backupTfFilePath := "/testTerraform/main.tf.mptfbackup" 76 | exists, err := afero.Exists(fs, backupTfFilePath) 77 | require.NoError(t, err) 78 | assert.True(t, exists) 79 | backupFileContent, err := afero.ReadFile(fs, backupTfFilePath) 80 | require.NoError(t, err) 81 | assert.Equal(t, terraformCode, string(backupFileContent)) 82 | } 83 | -------------------------------------------------------------------------------- /pkg/resource_schema.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/hashicorp/terraform-exec/tfexec" 7 | tfjson "github.com/hashicorp/terraform-json" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | "runtime" 12 | "strings" 13 | ) 14 | 15 | type TerraformProviderSchemaRetriever interface { 16 | Get(providerSource, versionConstraint string) (*tfjson.ProviderSchema, error) 17 | } 18 | 19 | type TerraformCliProviderSchemaRetriever struct { 20 | ctx context.Context 21 | } 22 | 23 | func NewTerraformCliProviderSchemaRetriever(ctx context.Context) TerraformProviderSchemaRetriever { 24 | return TerraformCliProviderSchemaRetriever{ctx: ctx} 25 | } 26 | 27 | func (t TerraformCliProviderSchemaRetriever) Get(providerSource, versionConstraint string) (*tfjson.ProviderSchema, error) { 28 | tmpFolder, err := os.MkdirTemp("", "*") 29 | if err != nil { 30 | return nil, fmt.Errorf("error creating temp TF code folder: %s", err) 31 | } 32 | defer func() { 33 | _ = os.RemoveAll(tmpFolder) 34 | }() 35 | 36 | tfProviderCode := fmt.Sprintf(` 37 | terraform { 38 | required_providers { 39 | provider = { 40 | source = "%s" 41 | version = "%s" 42 | } 43 | } 44 | } 45 | `, providerSource, versionConstraint) 46 | 47 | err = os.WriteFile(filepath.Join(tmpFolder, "main.tf"), []byte(tfProviderCode), 0600) 48 | if err != nil { 49 | return nil, fmt.Errorf("error writing temp TF code file: %s", err) 50 | } 51 | 52 | execPath, err := t.getTerraformPath() 53 | if err != nil { 54 | return nil, err 55 | } 56 | workingDir := tmpFolder 57 | tf, err := tfexec.NewTerraform(workingDir, execPath) 58 | if err != nil { 59 | return nil, fmt.Errorf("error running NewTerraform: %w", err) 60 | } 61 | 62 | err = tf.Init(t.ctx, tfexec.Upgrade(true)) 63 | if err != nil { 64 | return nil, fmt.Errorf("error running Init: %s", err) 65 | } 66 | schema, err := tf.ProvidersSchema(t.ctx) 67 | if err != nil { 68 | return nil, fmt.Errorf("error running providers: %w", err) 69 | } 70 | src := fmt.Sprintf("registry.terraform.io/%s", providerSource) 71 | r, ok := schema.Schemas[src] 72 | if !ok { 73 | src = providerSource 74 | r = schema.Schemas[src] 75 | } 76 | 77 | return r, nil 78 | } 79 | 80 | func (t TerraformCliProviderSchemaRetriever) getTerraformPath() (string, error) { 81 | var cmd *exec.Cmd 82 | 83 | if t.isWindows() { 84 | cmd = exec.Command("where", "terraform") 85 | } else { 86 | cmd = exec.Command("which", "terraform") 87 | } 88 | 89 | out, err := cmd.Output() 90 | if err != nil { 91 | return "", err 92 | } 93 | 94 | path := strings.TrimSpace(string(out)) 95 | return path, nil 96 | } 97 | 98 | func (t TerraformCliProviderSchemaRetriever) isWindows() bool { 99 | return runtime.GOOS == "windows" 100 | } 101 | -------------------------------------------------------------------------------- /pkg/terraform/object.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "github.com/ahmetb/go-linq/v3" 5 | "github.com/zclconf/go-cty/cty" 6 | "github.com/zclconf/go-cty/cty/convert" 7 | ) 8 | 9 | var _ Object = &RootBlock{} 10 | var _ Object = &NestedBlock{} 11 | 12 | type Object interface { 13 | EvalContext() cty.Value 14 | } 15 | 16 | func ListOfObject[T Object](objs []T) cty.Value { 17 | var values []cty.Value 18 | allTypes := make(map[string]cty.Type) 19 | for _, b := range objs { 20 | value := b.EvalContext() 21 | values = append(values, value) 22 | attributeTypes := value.Type().AttributeTypes() 23 | for n, t := range attributeTypes { 24 | if _, ok := allTypes[n]; !ok { 25 | allTypes[n] = t 26 | continue 27 | } 28 | if !allTypes[n].Equals(t) { 29 | if allTypes[n].IsListType() && t.IsListType() { 30 | allTypes[n] = cty.List(mergeObjectType(allTypes[n].ElementType(), t.ElementType())) 31 | continue 32 | } 33 | allTypes[n] = mergeObjectType(allTypes[n], t) 34 | } 35 | } 36 | } 37 | var allFields []string 38 | linq.From(allTypes).Select(func(i interface{}) interface{} { 39 | return i.(linq.KeyValue).Key 40 | }).ToSlice(&allFields) 41 | finalType := cty.ObjectWithOptionalAttrs(allTypes, allFields) 42 | var convertedValues []cty.Value 43 | for _, v := range values { 44 | cv, err := convert.Convert(v, finalType) 45 | if err != nil { 46 | panic(err) 47 | } 48 | convertedValues = append(convertedValues, cv) 49 | } 50 | if len(convertedValues) == 0 { 51 | return cty.ListValEmpty(finalType) 52 | } 53 | return cty.ListVal(convertedValues) 54 | } 55 | 56 | func mergeObjectType(t1, t2 cty.Type) cty.Type { 57 | if t1.IsPrimitiveType() && t2.IsPrimitiveType() { 58 | return t1 59 | } 60 | if t1.IsCollectionType() && t2.IsCollectionType() { 61 | return mergeObjectTypeInCollection(t1, t2) 62 | } 63 | newAttriubtes := make(map[string]cty.Type) 64 | for n, t := range t1.AttributeTypes() { 65 | newAttriubtes[n] = t 66 | } 67 | for n, t := range t2.AttributeTypes() { 68 | if _, ok := newAttriubtes[n]; !ok { 69 | newAttriubtes[n] = t 70 | continue 71 | } 72 | newAttriubtes[n] = mergeObjectType(newAttriubtes[n], t) 73 | } 74 | var allFields []string 75 | for n := range newAttriubtes { 76 | allFields = append(allFields, n) 77 | } 78 | return cty.ObjectWithOptionalAttrs(newAttriubtes, allFields) 79 | } 80 | 81 | func mergeObjectTypeInCollection(t1, t2 cty.Type) cty.Type { 82 | if t1.ElementType().IsObjectType() && t2.ElementType().IsObjectType() { 83 | mergedElementType := mergeObjectType(t1.ElementType(), t2.ElementType()) 84 | if t1.IsListType() { 85 | return cty.List(mergedElementType) 86 | } 87 | if t1.IsMapType() { 88 | return cty.Map(mergedElementType) 89 | } 90 | if t1.IsSetType() { 91 | return cty.Set(mergedElementType) 92 | } 93 | } 94 | return t1 95 | } 96 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | -------------------------------------------------------------------------------- /pkg/transform_ensure_local_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/Azure/mapotf/pkg" 8 | filesystem "github.com/Azure/mapotf/pkg/fs" 9 | "github.com/prashantv/gostub" 10 | "github.com/spf13/afero" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestTransformEnsureLocal(t *testing.T) { 16 | cases := []struct { 17 | desc string 18 | mptfConfig string 19 | tfConfig string 20 | expectedFiles map[string]string 21 | }{ 22 | { 23 | desc: "replace value astring", 24 | mptfConfig: `transform "ensure_local" this{ 25 | name = "this" 26 | fallback_file_name = "main.tf" 27 | value_as_string = "local.that" 28 | }`, 29 | tfConfig: `locals { 30 | this = "hello" 31 | }`, 32 | expectedFiles: map[string]string{ 33 | "/main.tf": `locals { 34 | this = local.that 35 | }`, 36 | }, 37 | }, 38 | { 39 | desc: "replace value asraw", 40 | mptfConfig: `transform "ensure_local" this{ 41 | name = "this" 42 | fallback_file_name = "main.tf" 43 | value_as_raw = local.that 44 | }`, 45 | tfConfig: `locals { 46 | this = "hello" 47 | }`, 48 | expectedFiles: map[string]string{ 49 | "/main.tf": `locals { 50 | this = local.that 51 | }`, 52 | }, 53 | }, 54 | { 55 | desc: "new local without create new file", 56 | mptfConfig: `transform "ensure_local" this{ 57 | name = "this" 58 | fallback_file_name = "main.tf" 59 | value_as_raw = local.that 60 | }`, 61 | tfConfig: ``, 62 | expectedFiles: map[string]string{ 63 | "/main.tf": `locals { 64 | this = local.that 65 | }`, 66 | }, 67 | }, 68 | { 69 | desc: "new local create new file", 70 | mptfConfig: `transform "ensure_local" this{ 71 | name = "this" 72 | fallback_file_name = "locals.tf" 73 | value_as_raw = local.that 74 | }`, 75 | tfConfig: ``, 76 | expectedFiles: map[string]string{ 77 | "/locals.tf": `locals { 78 | this = local.that 79 | }`, 80 | }, 81 | }, 82 | } 83 | for _, c := range cases { 84 | t.Run(c.desc, func(t *testing.T) { 85 | mockFs := fakeFs(map[string]string{ 86 | "/main.tf": c.tfConfig, 87 | "/main.mptf.hcl": c.mptfConfig, 88 | }) 89 | stub := gostub.Stub(&filesystem.Fs, mockFs) 90 | defer stub.Reset() 91 | 92 | hclBlocks, err := pkg.LoadMPTFHclBlocks(false, "/") 93 | require.NoError(t, err) 94 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 95 | Dir: "/", 96 | AbsDir: "/", 97 | }, nil, hclBlocks, nil, context.TODO()) 98 | require.NoError(t, err) 99 | plan, err := pkg.RunMetaProgrammingTFPlan(cfg) 100 | require.NoError(t, err) 101 | require.NoError(t, plan.Apply()) 102 | 103 | for name, content := range c.expectedFiles { 104 | file, err := afero.ReadFile(mockFs, name) 105 | require.NoError(t, err) 106 | assert.Equal(t, formatHcl(content), formatHcl(string(file))) 107 | } 108 | }) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /doc/t/append_block_body.md: -------------------------------------------------------------------------------- 1 | # `append_block_body` Transform Block 2 | 3 | The `append_block_body` transform block in Mapotf is designed to append additional content to an existing block in a Terraform configuration. This is useful when you need to dynamically add attributes or nested blocks to an existing block. 4 | 5 | ## Arguments 6 | 7 | - `target_block_address`: This argument specifies the address of the block to which the content will be appended. The block address is a string that uniquely identifies a block in a Terraform configuration. 8 | - `block_body`: This argument is a string of HCL code representing the content to be appended to the target block. 9 | 10 | ## Example - Appending Attributes and Nested Blocks 11 | 12 | Here is an example of how to use the `append_block_body` transform block to add attributes and nested blocks to an existing resource block: 13 | 14 | ```terraform 15 | transform "append_block_body" example { 16 | target_block_address = "resource.fake_resource.example" 17 | block_body = <<-BODY 18 | tags = { 19 | environment = "production" 20 | } 21 | nested_block { 22 | id = 123 23 | } 24 | BODY 25 | } 26 | ``` 27 | 28 | In this example, the `target_block_address` is set to the block address of the `fake_resource` resource. The `block_body` argument specifies the content to be appended, which includes a `tags` attribute and a `nested_block`. 29 | 30 | ## Detailed Behavior 31 | 32 | The `append_block_body` transform block works by parsing the `block_body` content and appending it to the target block. The content can include both attributes and nested blocks. If the target block is a one-line block, it will be converted to a multi-line block before appending the content. 33 | 34 | ### Example Scenarios 35 | 36 | 1. **Appending Attributes**: 37 | ```terraform 38 | transform "append_block_body" example { 39 | target_block_address = "resource.fake_resource.example" 40 | block_body = "tags = { environment = \"production\" }" 41 | } 42 | ``` 43 | 44 | ```terraform 45 | resource "fake_resource" "example" { 46 | name = "example" 47 | } 48 | ``` 49 | 50 | After applying the transform: 51 | ```terraform 52 | resource "fake_resource" "example" { 53 | name = "example" 54 | tags = { environment = "production" } 55 | } 56 | ``` 57 | 58 | 2. **Appending Nested Blocks**: 59 | 60 | ```terraform 61 | transform "append_block_body" example { 62 | target_block_address = "resource.fake_resource.example" 63 | block_body = <<-BODY 64 | nested_block { 65 | id = 123 66 | } 67 | BODY 68 | } 69 | ``` 70 | 71 | ```terraform 72 | resource "fake_resource" "example" { 73 | name = "example" 74 | } 75 | ``` 76 | 77 | After applying the transform: 78 | ```terraform 79 | resource "fake_resource" "example" { 80 | name = "example" 81 | nested_block { 82 | id = 123 83 | } 84 | } 85 | ``` 86 | 87 | In summary, the `append_block_body` transform block is a powerful tool for dynamically modifying existing Terraform blocks by appending additional content. This allows for flexible and programmatic updates to Terraform configurations. -------------------------------------------------------------------------------- /cmd/debug.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/Azure/golden" 7 | "github.com/Azure/mapotf/pkg" 8 | "github.com/hashicorp/hcl/v2" 9 | "github.com/hashicorp/hcl/v2/hclsyntax" 10 | "github.com/peterh/liner" 11 | "github.com/spf13/cobra" 12 | "os" 13 | ) 14 | 15 | func NewDebugCmd() *cobra.Command { 16 | var tfDir, mptfDir string 17 | debugCmd := &cobra.Command{ 18 | Use: "debug", 19 | Short: "Start REPL mode, mapotf debug --mptf-dir [path to config files]", 20 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 21 | UnknownFlags: true, 22 | }, 23 | RunE: replFunc(&tfDir, &mptfDir), 24 | } 25 | pwd, err := os.Getwd() 26 | if err != nil { 27 | panic(fmt.Sprintf("error on getting working dir:%s", err.Error())) 28 | } 29 | debugCmd.Flags().StringVar(&tfDir, "tf-dir", pwd, "Terraform directory") 30 | debugCmd.Flags().StringVar(&mptfDir, "mptf-dir", "", "MPTF directory, you can assign only one mptf-dir for debug command") 31 | err = debugCmd.MarkFlagRequired("mptf-dir") 32 | if err != nil { 33 | panic(err) 34 | } 35 | return debugCmd 36 | } 37 | 38 | func replFunc(tfDir, mptfDir *string) func(c *cobra.Command, args []string) error { 39 | return func(c *cobra.Command, args []string) error { 40 | varFlags, err := varFlags(os.Args) 41 | if err != nil { 42 | return err 43 | } 44 | localizedDir, dispose, err := localizeConfigFolder(*mptfDir, c.Context()) 45 | if err != nil { 46 | return err 47 | } 48 | if dispose != nil { 49 | defer dispose() 50 | } 51 | hclBlocks, err := pkg.LoadMPTFHclBlocks(false, localizedDir) 52 | if err != nil { 53 | return err 54 | } 55 | mod, err := pkg.NewTerraformModuleRef(*tfDir, "", "", "") 56 | if err != nil { 57 | return err 58 | } 59 | cfg, err := pkg.NewMetaProgrammingTFConfig(mod, nil, hclBlocks, varFlags, c.Context()) 60 | if err != nil { 61 | return err 62 | } 63 | _, err = pkg.RunMetaProgrammingTFPlan(cfg) 64 | if err != nil { 65 | return err 66 | } 67 | line := liner.NewLiner() 68 | defer func() { 69 | _ = line.Close() 70 | }() 71 | 72 | line.SetCtrlCAborts(true) 73 | fmt.Println("Entering debuging mode, press `quit` or `exit` or Ctrl+C to quit.") 74 | 75 | for { 76 | if input, err := line.Prompt("debug> "); err == nil { 77 | if input == "quit" || input == "exit" { 78 | return nil 79 | } 80 | line.AppendHistory(input) 81 | expression, diag := hclsyntax.ParseExpression([]byte(input), "repl.hcl", hcl.InitialPos) 82 | if diag.HasErrors() { 83 | fmt.Printf("%s\n", diag.Error()) 84 | continue 85 | } 86 | value, diag := expression.Value(cfg.EvalContext()) 87 | if diag.HasErrors() { 88 | fmt.Printf("%s\n", diag.Error()) 89 | continue 90 | } 91 | fmt.Println(golden.CtyValueToString(value)) 92 | } else if errors.Is(err, liner.ErrPromptAborted) { 93 | fmt.Println("Aborted") 94 | break 95 | } else { 96 | fmt.Println("Error reading line: ", err) 97 | break 98 | } 99 | } 100 | 101 | return nil 102 | } 103 | } 104 | 105 | func init() { 106 | rootCmd.AddCommand(NewDebugCmd()) 107 | } 108 | -------------------------------------------------------------------------------- /pkg/transform_append_block_body_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "github.com/spf13/afero" 6 | "testing" 7 | 8 | "github.com/Azure/golden" 9 | "github.com/Azure/mapotf/pkg" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/hashicorp/hcl/v2" 12 | "github.com/hashicorp/hcl/v2/hclsyntax" 13 | "github.com/hashicorp/hcl/v2/hclwrite" 14 | "github.com/prashantv/gostub" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | func TestConcatBlockBodyTransform_Apply(t *testing.T) { 20 | cases := []struct { 21 | desc string 22 | cfg string 23 | expectedConcatedBlock string 24 | }{ 25 | { 26 | desc: "concatenate attributes", 27 | cfg: ` 28 | transform "append_block_body" this { 29 | target_block_address = "resource.fake_resource.this" 30 | block_body = "tags = { hello = world }" 31 | } 32 | `, 33 | expectedConcatedBlock: `resource "fake_resource" this { 34 | tags = { hello = world } 35 | }`, 36 | }, 37 | { 38 | desc: "concatenate nested blocks", 39 | cfg: ` 40 | transform "append_block_body" this { 41 | target_block_address = "resource.fake_resource.this" 42 | block_body = "nested_block {\n id = 123\n }" 43 | } 44 | `, 45 | expectedConcatedBlock: `resource "fake_resource" this { 46 | tags = null 47 | nested_block { 48 | id = 123 49 | } 50 | }`, 51 | }, 52 | { 53 | desc: "concatenate attributes and nested blocks", 54 | cfg: ` 55 | transform "append_block_body" this { 56 | target_block_address = "resource.fake_resource.this" 57 | block_body = "tags = {\n hello = world\n } \n nested_block {\n id = 123\n }" 58 | } 59 | `, 60 | expectedConcatedBlock: `resource "fake_resource" this { 61 | tags = { 62 | hello = world 63 | } 64 | nested_block { 65 | id = 123 66 | } 67 | }`, 68 | }, 69 | } 70 | for _, c := range cases { 71 | t.Run(c.desc, func(t *testing.T) { 72 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 73 | "/main.tf": ` 74 | resource "fake_resource" this { 75 | tags = null 76 | }`, 77 | })) 78 | defer stub.Reset() 79 | readFile, diag := hclsyntax.ParseConfig([]byte(c.cfg), "test.hcl", hcl.InitialPos) 80 | require.Falsef(t, diag.HasErrors(), diag.Error()) 81 | writeFile, diag := hclwrite.ParseConfig([]byte(c.cfg), "test.hcl", hcl.InitialPos) 82 | require.Falsef(t, diag.HasErrors(), diag.Error()) 83 | hclBlock := golden.NewHclBlock(readFile.Body.(*hclsyntax.Body).Blocks[0], writeFile.Body().Blocks()[0], nil) 84 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 85 | Dir: "/", 86 | AbsDir: "/", 87 | }, nil, []*golden.HclBlock{hclBlock}, nil, context.TODO()) 88 | require.NoError(t, err) 89 | plan, err := pkg.RunMetaProgrammingTFPlan(cfg) 90 | require.NoError(t, err) 91 | require.NotEmpty(t, plan.String()) 92 | require.NoError(t, plan.Apply()) 93 | tfFile, err := afero.ReadFile(filesystem.Fs, "/main.tf") 94 | require.NoError(t, err) 95 | actual := string(tfFile) 96 | assert.Equal(t, formatHcl(c.expectedConcatedBlock), formatHcl(actual)) 97 | }) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /cmd/transform.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/Azure/golden" 7 | "github.com/Azure/mapotf/pkg" 8 | "github.com/Azure/mapotf/pkg/backup" 9 | "github.com/spf13/cobra" 10 | "os" 11 | ) 12 | 13 | func NewTransformCmd() *cobra.Command { 14 | recursive := false 15 | 16 | transformCmd := &cobra.Command{ 17 | Use: "transform", 18 | Short: "Apply the transforms, mapotf transform [-r] --tf-dir [] --mptf-dir [path to config files], support mutilple mptf dirs", 19 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 20 | UnknownFlags: true, 21 | }, 22 | RunE: func(cmd *cobra.Command, args []string) error { 23 | _, err := transform(recursive, cmd.Context()) 24 | return err 25 | }, 26 | } 27 | 28 | transformCmd.Flags().BoolVarP(&recursive, "recursive", "r", false, "Apply transforms to all modules or not, default to the root module only.") 29 | return transformCmd 30 | } 31 | 32 | func transform(recursive bool, ctx context.Context) ([]func(), error) { 33 | var restore []func() 34 | varFlags, err := varFlags(os.Args) 35 | if err != nil { 36 | return nil, err 37 | } 38 | rootMod, err := pkg.NewTerraformRootModuleRef(cf.tfDir) 39 | if err != nil { 40 | return nil, err 41 | } 42 | moduleRefs := []*pkg.TerraformModuleRef{ 43 | rootMod, 44 | } 45 | if recursive { 46 | modulePaths, err := pkg.ModuleRefs(cf.tfDir) 47 | if err != nil { 48 | return nil, err 49 | } 50 | moduleRefs = modulePaths 51 | } 52 | for _, moduleRef := range moduleRefs { 53 | d := moduleRef 54 | err = backup.BackupFolder(d.AbsDir) 55 | restore = append(restore, func() { 56 | _ = backup.Reset(d.AbsDir) 57 | }) 58 | if err != nil { 59 | return restore, err 60 | } 61 | } 62 | var mptfDirs []string 63 | for _, dir := range cf.mptfDirs { 64 | localizedDir, dispose, err := localizeConfigFolder(dir, ctx) 65 | if err != nil { 66 | return restore, err 67 | } 68 | if dispose != nil { 69 | defer dispose() 70 | } 71 | mptfDirs = append(mptfDirs, localizedDir) 72 | } 73 | for _, mptfDir := range mptfDirs { 74 | for _, tfDir := range moduleRefs { 75 | hclBlocks, err := pkg.LoadMPTFHclBlocks(false, mptfDir) 76 | if err != nil { 77 | return nil, err 78 | } 79 | err = applyTransform(tfDir, hclBlocks, varFlags, ctx) 80 | if err != nil { 81 | return nil, err 82 | } 83 | } 84 | } 85 | fmt.Println("Transforms applied successfully.") 86 | return restore, nil 87 | } 88 | 89 | func applyTransform(m *pkg.TerraformModuleRef, hclBlocks []*golden.HclBlock, varFlags []golden.CliFlagAssignedVariables, ctx context.Context) error { 90 | cfg, err := pkg.NewMetaProgrammingTFConfig(m, &cf.tfDir, hclBlocks, varFlags, ctx) 91 | if err != nil { 92 | return err 93 | } 94 | plan, err := pkg.RunMetaProgrammingTFPlan(cfg) 95 | if err != nil { 96 | return err 97 | } 98 | if len(plan.Transforms) == 0 { 99 | fmt.Println("No transforms to apply.") 100 | return nil 101 | } 102 | fmt.Println(plan.String()) 103 | err = plan.Apply() 104 | if err != nil { 105 | return fmt.Errorf("error applying plan: %s", err.Error()) 106 | } 107 | return nil 108 | } 109 | 110 | func init() { 111 | rootCmd.AddCommand(NewTransformCmd()) 112 | } 113 | -------------------------------------------------------------------------------- /pkg/data_local_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/Azure/mapotf/pkg" 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/Azure/mapotf/pkg/terraform" 11 | "github.com/prashantv/gostub" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "github.com/zclconf/go-cty/cty" 15 | ) 16 | 17 | func TestDataLocal_QueryLocalBlocks(t *testing.T) { 18 | cases := []struct { 19 | desc string 20 | tfCode string 21 | name string 22 | expected cty.Value 23 | }{ 24 | { 25 | desc: "single local block", 26 | tfCode: `locals { 27 | foo = "bar" 28 | }`, 29 | expected: cty.ObjectVal(map[string]cty.Value{ 30 | "foo": cty.StringVal("\"bar\""), 31 | }), 32 | }, 33 | { 34 | desc: "multiple local blocks", 35 | tfCode: `locals { 36 | foo = "bar" 37 | baz = 123 38 | }`, 39 | expected: cty.ObjectVal(map[string]cty.Value{ 40 | "foo": cty.StringVal("\"bar\""), 41 | "baz": cty.StringVal("123"), 42 | }), 43 | }, 44 | { 45 | desc: "filter by name", 46 | tfCode: `locals { 47 | foo = "bar" 48 | baz = 123 49 | }`, 50 | name: "foo", 51 | expected: cty.ObjectVal(map[string]cty.Value{ 52 | "foo": cty.StringVal("\"bar\""), 53 | }), 54 | }, 55 | { 56 | desc: "empty locals", 57 | tfCode: `resource "fake_resource" "test" { 58 | id = 123 59 | }`, 60 | expected: cty.EmptyObjectVal, 61 | }, 62 | } 63 | for _, c := range cases { 64 | t.Run(c.desc, func(t *testing.T) { 65 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 66 | "/main.tf": c.tfCode, 67 | })).Stub(&terraform.RootBlockReflectionInformation, func(map[string]cty.Value, *terraform.RootBlock) {}) 68 | defer stub.Reset() 69 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 70 | Dir: "/", 71 | AbsDir: "/", 72 | }, nil, nil, nil, context.TODO()) 73 | require.NoError(t, err) 74 | 75 | // Use the config to create a DataLocal object 76 | data := &pkg.DataLocal{ 77 | BaseBlock: golden.NewBaseBlock(cfg, nil), 78 | ExpectedNameName: c.name, 79 | } 80 | 81 | err = data.ExecuteDuringPlan() 82 | require.NoError(t, err) 83 | 84 | result := golden.Value(data) 85 | 86 | expected := map[string]cty.Value{ 87 | "name": cty.StringVal(c.name), 88 | "result": c.expected, 89 | } 90 | assert.Equal(t, expected, result) 91 | }) 92 | } 93 | } 94 | 95 | func TestDataLocal_ComplexValues(t *testing.T) { 96 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 97 | "/main.tf": `locals { 98 | nested = { 99 | first = { 100 | second = "nested value" 101 | } 102 | } 103 | }`, 104 | })) 105 | defer stub.Reset() 106 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 107 | Dir: "/", 108 | AbsDir: "/", 109 | }, nil, nil, nil, context.TODO()) 110 | require.NoError(t, err) 111 | 112 | data := &pkg.DataLocal{ 113 | BaseBlock: golden.NewBaseBlock(cfg, nil), 114 | } 115 | 116 | err = data.ExecuteDuringPlan() 117 | require.NoError(t, err) 118 | 119 | result := data.Result.AsValueMap() 120 | require.Contains(t, result, "nested") 121 | nested := result["nested"].AsString() 122 | assert.Equal(t, `{ 123 | first = { 124 | second = "nested value" 125 | } 126 | }`, nested) 127 | } 128 | -------------------------------------------------------------------------------- /pkg/data_variable_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/Azure/mapotf/pkg" 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/prashantv/gostub" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | "github.com/zclconf/go-cty/cty" 14 | ) 15 | 16 | func TestDataVariable_ExecuteDuringPlan(t *testing.T) { 17 | cases := []struct { 18 | desc string 19 | tfCode string 20 | variableName string 21 | variableType string 22 | expectNoMatch bool 23 | expectedVariableName string 24 | expectedAttributes map[string]cty.Value 25 | }{ 26 | { 27 | desc: "single variable without type filter", 28 | tfCode: ` 29 | variable "example_var" { 30 | type = string 31 | default = "value" 32 | }`, 33 | expectedVariableName: "example_var", 34 | expectedAttributes: map[string]cty.Value{ 35 | "type": cty.StringVal("string"), 36 | "default": cty.StringVal(`"value"`), 37 | }, 38 | }, 39 | { 40 | desc: "filter by variable name", 41 | tfCode: ` 42 | variable "my_var" { 43 | type = string 44 | default = "hello" 45 | } 46 | 47 | variable "other_var" { 48 | type = number 49 | default = 42 50 | } 51 | `, 52 | variableName: "my_var", 53 | expectedVariableName: "my_var", 54 | expectedAttributes: map[string]cty.Value{ 55 | "type": cty.StringVal("string"), 56 | "default": cty.StringVal(`"hello"`), 57 | }, 58 | }, 59 | { 60 | desc: "filter by variable type", 61 | tfCode: ` 62 | variable "var1" { 63 | type = string 64 | } 65 | 66 | variable "var2" { 67 | type = number 68 | } 69 | `, 70 | variableType: "number", 71 | expectedVariableName: "var2", 72 | expectedAttributes: map[string]cty.Value{ 73 | "type": cty.StringVal("number"), 74 | }, 75 | }, 76 | { 77 | desc: "no match", 78 | tfCode: ` 79 | variable "var1" { 80 | type = string 81 | } 82 | `, 83 | expectNoMatch: true, 84 | variableType: "number", 85 | expectedAttributes: map[string]cty.Value{ 86 | "type": cty.StringVal("number"), 87 | }, 88 | }, 89 | } 90 | 91 | for _, c := range cases { 92 | t.Run(c.desc, func(t *testing.T) { 93 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 94 | "/main.tf": c.tfCode, 95 | })) 96 | defer stub.Reset() 97 | 98 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 99 | Dir: "/", 100 | AbsDir: "/", 101 | }, nil, nil, nil, context.TODO()) 102 | require.NoError(t, err) 103 | 104 | data := &pkg.DataVariable{ 105 | BaseBlock: golden.NewBaseBlock(cfg, nil), 106 | BaseData: &pkg.BaseData{}, 107 | ExpectedNameName: c.variableName, 108 | ExpectedType: c.variableType, 109 | } 110 | 111 | err = data.ExecuteDuringPlan() 112 | require.NoError(t, err) 113 | 114 | result := data.Result 115 | if c.expectNoMatch { 116 | assert.Equal(t, cty.ObjectVal(make(map[string]cty.Value)), result) 117 | return 118 | } 119 | object, ok := result.AsValueMap()[c.expectedVariableName] 120 | require.True(t, ok) 121 | for k, expected := range c.expectedAttributes { 122 | attr := object.GetAttr(k) 123 | assert.Equal(t, expected, attr) 124 | } 125 | }) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /pkg/terraform_module_ref.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Azure/mapotf/pkg/terraform" 6 | "github.com/go-git/go-git/v5" 7 | "os" 8 | "path/filepath" 9 | ) 10 | 11 | var AbsDir func(string) (string, error) = filepath.Abs 12 | 13 | type TerraformModuleRef struct { 14 | Key string `json:"Key"` 15 | Source string `json:"Source"` 16 | Dir string `json:"Dir"` 17 | AbsDir string 18 | Version string `json:"Version"` 19 | GitHash string 20 | } 21 | 22 | func NewTerraformRootModuleRef(dir string) (*TerraformModuleRef, error) { 23 | return NewTerraformModuleRef(dir, "", "", "") 24 | } 25 | 26 | func NewTerraformModuleRef(dir, key, source, version string) (*TerraformModuleRef, error) { 27 | m := &TerraformModuleRef{ 28 | Key: key, 29 | Source: source, 30 | Dir: dir, 31 | Version: version, 32 | } 33 | if err := m.LoadAbsDir(); err != nil { 34 | return nil, err 35 | } 36 | m.LoadGitHash() 37 | return m, nil 38 | } 39 | 40 | func (m *TerraformModuleRef) Load() error { 41 | if err := m.LoadAbsDir(); err != nil { 42 | return err 43 | } 44 | m.LoadGitHash() 45 | return nil 46 | } 47 | 48 | func (m *TerraformModuleRef) LoadGitHash() { 49 | h, err := gitHash(m.AbsDir) 50 | if err != nil { 51 | //TODO:log error 52 | return 53 | } 54 | m.GitHash = h 55 | } 56 | 57 | func (m *TerraformModuleRef) LoadAbsDir() error { 58 | absDir, err := AbsDir(m.Dir) 59 | if err != nil { 60 | return fmt.Errorf("error getting absolute path for %s: %+v", m.Dir, err) 61 | } 62 | m.AbsDir = absDir 63 | return nil 64 | } 65 | 66 | func (r *TerraformModuleRef) toTerraformPkgType() terraform.ModuleRef { 67 | return terraform.ModuleRef{ 68 | Key: r.Key, 69 | Source: r.Source, 70 | Dir: r.Dir, 71 | AbsDir: r.AbsDir, 72 | Version: r.Version, 73 | GitHash: r.GitHash, 74 | } 75 | } 76 | 77 | func gitHash(dir string) (string, error) { 78 | gitPath, err := lookupGitPath(dir) 79 | if err != nil { 80 | return "", fmt.Errorf("cannot lookup git path: %+v", err) 81 | } 82 | r, err := git.PlainOpen(filepath.Dir(gitPath)) 83 | if err != nil { 84 | return "", err 85 | } 86 | ref, err := r.Head() 87 | if err != nil { 88 | return "", err 89 | } 90 | commit, err := r.CommitObject(ref.Hash()) 91 | if err != nil { 92 | return "", err 93 | } 94 | return commit.Hash.String(), nil 95 | } 96 | 97 | func lookupGitPath(path string) (string, error) { 98 | path, err := filepath.Abs(path) 99 | if err != nil { 100 | return "", err 101 | } 102 | fi, err := os.Stat(filepath.Join(path, ".git")) 103 | if err != nil { 104 | if !os.IsNotExist(err) { 105 | return "", err 106 | } 107 | isBare, err := isBareRepo(path) 108 | if err != nil { 109 | return "", err 110 | } 111 | if isBare { 112 | return path, nil 113 | } 114 | parent := filepath.Dir(path) 115 | if parent == path { 116 | return "", fmt.Errorf(".git not found") 117 | } 118 | return lookupGitPath(parent) 119 | } 120 | if !fi.IsDir() { 121 | return "", fmt.Errorf(".git exist but is not a directory") 122 | } 123 | return filepath.Join(path, ".git"), nil 124 | } 125 | 126 | func isBareRepo(path string) (bool, error) { 127 | markers := []string{"HEAD", "objects", "refs"} 128 | for _, marker := range markers { 129 | _, err := os.Stat(filepath.Join(path, marker)) 130 | if err != nil && !os.IsNotExist(err) { 131 | return false, err 132 | } 133 | if err != nil { 134 | return false, nil 135 | } 136 | } 137 | 138 | return true, nil 139 | } 140 | -------------------------------------------------------------------------------- /pkg/transform_remove_block_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/Azure/mapotf/pkg" 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/hashicorp/hcl/v2" 11 | "github.com/hashicorp/hcl/v2/hclsyntax" 12 | "github.com/hashicorp/hcl/v2/hclwrite" 13 | "github.com/prashantv/gostub" 14 | "github.com/spf13/afero" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | func TestRemoveBlock(t *testing.T) { 20 | cases := []struct { 21 | desc string 22 | mptf string 23 | tfConfig string 24 | expected string 25 | wantErr bool 26 | }{ 27 | { 28 | desc: "remove_resource", 29 | mptf: ` 30 | transform "remove_block" this { 31 | target_block_address = "resource.fake_resource.this" 32 | } 33 | `, 34 | tfConfig: ` 35 | resource "fake_resource" "this" { 36 | attr = "value" 37 | } 38 | 39 | resource "other_resource" "keep" { 40 | attr = "keep" 41 | } 42 | `, 43 | expected: ` 44 | resource "other_resource" "keep" { 45 | attr = "keep" 46 | } 47 | `, 48 | }, 49 | { 50 | desc: "remove_data_block", 51 | mptf: ` 52 | transform "remove_block" this { 53 | target_block_address = "data.fake_data.this" 54 | } 55 | `, 56 | tfConfig: ` 57 | data "fake_data" "this" { 58 | attr = "value" 59 | } 60 | 61 | data "fake_data" "keep" { 62 | attr = "keep" 63 | } 64 | `, 65 | expected: ` 66 | data "fake_data" "keep" { 67 | attr = "keep" 68 | } 69 | `, 70 | }, 71 | { 72 | desc: "remove_module_block", 73 | mptf: ` 74 | transform "remove_block" this { 75 | target_block_address = "module.test_module" 76 | } 77 | `, 78 | tfConfig: ` 79 | module "test_module" { 80 | source = "./modules/test" 81 | attr = "value" 82 | } 83 | 84 | module "keep_module" { 85 | source = "./modules/keep" 86 | attr = "keep" 87 | } 88 | `, 89 | expected: ` 90 | module "keep_module" { 91 | source = "./modules/keep" 92 | attr = "keep" 93 | } 94 | `, 95 | }, 96 | { 97 | desc: "block_not_found", 98 | mptf: ` 99 | transform "remove_block" this { 100 | target_block_address = "resource.non_existent.block" 101 | } 102 | `, 103 | tfConfig: ` 104 | resource "fake_resource" "this" { 105 | attr = "value" 106 | } 107 | `, 108 | expected: ` 109 | resource "fake_resource" "this" { 110 | attr = "value" 111 | } 112 | `, 113 | wantErr: true, 114 | }, 115 | } 116 | 117 | for _, c := range cases { 118 | t.Run(c.desc, func(t *testing.T) { 119 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 120 | "/main.tf": c.tfConfig, 121 | })) 122 | defer stub.Reset() 123 | 124 | readFile, diag := hclsyntax.ParseConfig([]byte(c.mptf), "test.hcl", hcl.InitialPos) 125 | require.Falsef(t, diag.HasErrors(), diag.Error()) 126 | writeFile, diag := hclwrite.ParseConfig([]byte(c.mptf), "test.hcl", hcl.InitialPos) 127 | require.Falsef(t, diag.HasErrors(), diag.Error()) 128 | hclBlock := golden.NewHclBlock(readFile.Body.(*hclsyntax.Body).Blocks[0], writeFile.Body().Blocks()[0], nil) 129 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 130 | Dir: "/", 131 | AbsDir: "/", 132 | }, nil, []*golden.HclBlock{hclBlock}, nil, context.TODO()) 133 | require.NoError(t, err) 134 | plan, err := pkg.RunMetaProgrammingTFPlan(cfg) 135 | require.NoError(t, err) 136 | 137 | err = plan.Apply() 138 | if c.wantErr { 139 | require.Error(t, err) 140 | return 141 | } 142 | require.NoError(t, err) 143 | 144 | after, err := afero.ReadFile(filesystem.Fs, "/main.tf") 145 | require.NoError(t, err) 146 | expected := formatHcl(c.expected) 147 | actual := formatHcl(string(after)) 148 | assert.Equal(t, expected, actual) 149 | }) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /cmd/terraform.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type terraformCommand struct { 8 | d string 9 | transform bool 10 | } 11 | 12 | var terraformCmds = map[string]terraformCommand{ 13 | "init": { 14 | d: "Prepare your working directory for other commands", 15 | transform: false, 16 | }, 17 | "plan": { 18 | d: "Generates a plan based on the specified configuration", 19 | transform: true, 20 | }, 21 | "apply": { 22 | d: "Create or update infrastructure", 23 | transform: true, 24 | }, 25 | "destroy": { 26 | d: "Destroy previously-created infrastructure", 27 | transform: true, 28 | }, 29 | "console": { 30 | d: "Try Terraform expressions at an interactive command prompt", 31 | transform: true, 32 | }, 33 | "validate": { 34 | d: "Check whether the configuration is valid", 35 | transform: true, 36 | }, 37 | "fmt": { 38 | d: "Reformat your configuration in the standard style", 39 | transform: false, 40 | }, 41 | "force-unlock": { 42 | d: "Release a stuck lock on the current workspace", 43 | transform: true, 44 | }, 45 | "get": { 46 | d: "Install or upgrade remote Terraform modules", 47 | transform: false, 48 | }, 49 | "graph": { 50 | d: "Generate a Graphviz graph of the steps in an operation", 51 | transform: true, 52 | }, 53 | "import": { 54 | d: "Associate existing infrastructure with a Terraform resource", 55 | transform: true, 56 | }, 57 | "login": { 58 | d: "Obtain and save credentials for a remote host", 59 | transform: false, 60 | }, 61 | "logout": { 62 | d: "Remove locally-stored credentials for a remote host", 63 | transform: false, 64 | }, 65 | "metadata": { 66 | d: "Metadata related commands", 67 | transform: false, 68 | }, 69 | "output": { 70 | d: "Show output values from your root module", 71 | transform: true, 72 | }, 73 | "providers": { 74 | d: "Show the providers required for this configuration", 75 | transform: true, 76 | }, 77 | "refresh": { 78 | d: "Update the state to match remote systems", 79 | transform: true, 80 | }, 81 | "show": { 82 | d: "Show the current state or a saved plan", 83 | transform: true, 84 | }, 85 | "state": { 86 | d: "Advanced state management", 87 | transform: true, 88 | }, 89 | "taint": { 90 | d: "Mark a resource instance as not fully functional", 91 | transform: true, 92 | }, 93 | "test": { 94 | d: "Execute integration tests for Terraform modules", 95 | transform: true, 96 | }, 97 | "untaint": { 98 | d: "Remove the 'tainted' state from a resource instance", 99 | transform: true, 100 | }, 101 | "version": { 102 | d: "Show the current Terraform version", 103 | transform: false, 104 | }, 105 | "workspace": { 106 | d: "Workspace management", 107 | transform: false, 108 | }, 109 | } 110 | 111 | var terraformCommands []*cobra.Command 112 | 113 | func init() { 114 | for key, s := range terraformCmds { 115 | info := s 116 | cmd := key 117 | recursive := false 118 | run := wrapTerraformCommand(cf.tfDir, cmd) 119 | if info.transform { 120 | run = wrapTerraformCommandWithEphemeralTransform(cf.tfDir, cmd, &recursive) 121 | } 122 | c := &cobra.Command{ 123 | Use: cmd, 124 | Short: "[terraform]: " + info.d, 125 | FParseErrWhitelist: cobra.FParseErrWhitelist{ 126 | UnknownFlags: true, 127 | }, 128 | RunE: run, 129 | } 130 | 131 | c.Flags().BoolVarP(&recursive, "recursive", "r", false, "With transforms to all modules or not, default to the root module only.") 132 | rootCmd.AddCommand(c) 133 | terraformCommands = append(terraformCommands, c) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /pkg/data_provider_schema.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/Azure/golden" 8 | "github.com/hashicorp/go-multierror" 9 | tfjson "github.com/hashicorp/terraform-json" 10 | "github.com/zclconf/go-cty/cty" 11 | "github.com/zclconf/go-cty/cty/function/stdlib" 12 | ) 13 | 14 | var _ Data = &ProviderSchemaData{} 15 | var SchemaRetrieverFactory = func(ctx context.Context) TerraformProviderSchemaRetriever { 16 | return NewTerraformCliProviderSchemaRetriever(ctx) 17 | } 18 | 19 | type ProviderSchemaData struct { 20 | *BaseData 21 | *golden.BaseBlock 22 | 23 | Source string `hcl:"provider_source"` 24 | Version string `hcl:"provider_version"` 25 | Resources cty.Value `attribute:"resources"` 26 | } 27 | 28 | func (r *ProviderSchemaData) Type() string { 29 | return "provider_schema" 30 | } 31 | 32 | func (r *ProviderSchemaData) ExecuteDuringPlan() error { 33 | schemas, err := SchemaRetrieverFactory(r.Context()).Get(r.Source, r.Version) 34 | if err != nil { 35 | return fmt.Errorf("cannot read `terraform prviders schema` for source %s with version %s: %+v", r.Source, r.Version, err) 36 | } 37 | r.Resources, err = r.Convert(schemas.ResourceSchemas) 38 | return err 39 | } 40 | 41 | func (r *ProviderSchemaData) Convert(schemas map[string]*tfjson.Schema) (cty.Value, error) { 42 | resourcesMap := make(map[string]cty.Value) 43 | var convertErr error 44 | 45 | for resourceName, schema := range schemas { 46 | attributesMap, err := r.convertAttributeSchemas(schema.Block.Attributes) 47 | if err != nil { 48 | convertErr = multierror.Append(err, fmt.Errorf("cannot convert attribute schemas for resource %s: %+v", resourceName, err)) 49 | continue 50 | } 51 | nestedBlocksMap, err := r.convertNestedBlockSchemas(schema.Block.NestedBlocks) 52 | if err != nil { 53 | convertErr = multierror.Append(err, fmt.Errorf("cannot convert nested block schemas for resource %s: %+v", resourceName, err)) 54 | continue 55 | } 56 | resourcesMap[resourceName] = cty.ObjectVal(map[string]cty.Value{ 57 | "version": cty.NumberUIntVal(schema.Version), 58 | "block": cty.ObjectVal(map[string]cty.Value{ 59 | "attributes": cty.ObjectVal(attributesMap), 60 | "block_types": cty.ObjectVal(nestedBlocksMap), 61 | "description": cty.StringVal(schema.Block.Description), 62 | }), 63 | }) 64 | } 65 | if convertErr != nil { 66 | return cty.Value{}, convertErr 67 | } 68 | 69 | return cty.ObjectVal(resourcesMap), nil 70 | } 71 | 72 | func (r *ProviderSchemaData) convertAttributeSchemas(attrs map[string]*tfjson.SchemaAttribute) (map[string]cty.Value, error) { 73 | attributesMap := make(map[string]cty.Value) 74 | 75 | for attrName, attr := range attrs { 76 | marshal, err := json.Marshal(attr) 77 | if err != nil { 78 | return nil, fmt.Errorf("cannot marshal attribute schema for %s: %+v", attrName, err) 79 | } 80 | attrObj, err := stdlib.JSONDecode(cty.StringVal(string(marshal))) 81 | if err != nil { 82 | return nil, fmt.Errorf("cannot decode attribute schema for %s: %+v", attrName, err) 83 | } 84 | attributesMap[attrName] = attrObj 85 | } 86 | return attributesMap, nil 87 | } 88 | 89 | func (r *ProviderSchemaData) convertNestedBlockSchemas(blocks map[string]*tfjson.SchemaBlockType) (map[string]cty.Value, error) { 90 | nestedBlocksMap := make(map[string]cty.Value) 91 | 92 | for blockName, block := range blocks { 93 | marshal, err := json.Marshal(block) 94 | if err != nil { 95 | return nil, fmt.Errorf("cannot marshal block schema for %s: %+v", blockName, err) 96 | } 97 | nestedBlocksMap[blockName], err = stdlib.JSONDecode(cty.StringVal(string(marshal))) 98 | if err != nil { 99 | return nil, fmt.Errorf("cannot decode block schema for %s: %+v", blockName, err) 100 | } 101 | } 102 | return nestedBlocksMap, nil 103 | } 104 | -------------------------------------------------------------------------------- /doc/d/resource.md: -------------------------------------------------------------------------------- 1 | # Data "resource" Block 2 | 3 | The `data "resource"` block is used to query and retrieve `resource` blocks from a Terraform configuration. This block allows you to filter and collect resource blocks based on specific criteria such as the resource type, and whether `count` or `for_each` is used. 4 | 5 | ## Arguments 6 | 7 | - `resource_type`: This optional argument specifies the type of the resource to filter. It is a string attribute. 8 | - `use_count`: This optional argument is a boolean that, when set to `true`, filters resource blocks that use the `count` attribute. The default value is `false`. 9 | - `use_for_each`: This optional argument is a boolean that, when set to `true`, filters resource blocks that use the `for_each` attribute. The default value is `false`. 10 | 11 | ## Attributes 12 | 13 | - `result`: This attribute contains the filtered resource blocks, all expressions assigned to arguments are evaluated as strings. 14 | 15 | ## Example - Querying Resource Blocks 16 | 17 | Here is an example of how to use the `data "resource"` block to query resource blocks of a specific type: 18 | 19 | ```terraform 20 | data "resource" "aks" { 21 | resource_type = "azurerm_kubernetes_cluster" 22 | } 23 | ``` 24 | 25 | In this example, the `data "resource"` block queries all resource blocks of type `azurerm_kubernetes_cluster` and stores the result in the `example` data source. 26 | 27 | The following Mapotf expressions could help you to access the results: 28 | 29 | ```terraform 30 | locals { 31 | kubernetes_cluster_resource_blocks = flatten([for _, blocks in flatten(data.resource.all.result) : [for b in blocks : b]]) 32 | kubernetes_cluster_resource_blocks_map = { for block in local.kubernetes_cluster_resource_blocks : block.mptf.block_address => block } 33 | } 34 | ``` 35 | 36 | Assuming we have the following Terraform configuration: 37 | 38 | ```terraform 39 | resource "azurerm_kubernetes_cluster" "example" { 40 | name = "example-aks1" 41 | location = azurerm_resource_group.example.location 42 | resource_group_name = azurerm_resource_group.example.name 43 | dns_prefix = "exampleaks1" 44 | 45 | default_node_pool { 46 | name = "default" 47 | node_count = 1 48 | vm_size = "Standard_D2_v2" 49 | } 50 | 51 | identity { 52 | type = "SystemAssigned" 53 | } 54 | 55 | tags = { 56 | Environment = "Production" 57 | } 58 | } 59 | ``` 60 | 61 | In Mapotf, when you're referring `local.kubernetes_cluster_resource_blocks`, the result would be a `list(object)` that contains one element that corresponding to `resource.azurerm_kubernetes_cluster.example` block in Terraform config, and `local.kubernetes_cluster_resource_blocks_map` converted this list into a `map(object)`, with one element and `"resource.azurerm_kubernetes_cluster.example"` as the key. 62 | 63 | ## Example - Filtering Resource Blocks with `count` 64 | 65 | Here is an example of how to use the `data "resource"` block to filter resource blocks that use the `count` attribute: 66 | 67 | ```terraform 68 | data "resource" "example" { 69 | resource_type = "fake_resource" 70 | use_count = true 71 | } 72 | ``` 73 | 74 | In this example, the `data "resource"` block filters resource blocks of type `fake_resource` that use the `count` attribute and stores the result in the `example` data source. 75 | 76 | ## Example - Filtering Resource Blocks with `for_each` 77 | 78 | Here is an example of how to use the `data "resource"` block to filter resource blocks that use the `for_each` attribute: 79 | 80 | ```terraform 81 | data "resource" "example" { 82 | resource_type = "fake_resource" 83 | use_for_each = true 84 | } 85 | ``` 86 | 87 | In this example, the `data "resource"` block filters resource blocks of type `fake_resource` that use the `for_each` attribute and stores the result in the `example` data source. 88 | 89 | The data source's results would be aggregated by resource type first, then by the block labels. -------------------------------------------------------------------------------- /doc/d/data.md: -------------------------------------------------------------------------------- 1 | # Data "data" Block 2 | 3 | The `data` data block is used to query and retrieve `data` blocks from a Terraform configuration. This block allows you to filter and collect data blocks based on specific criteria such as the data source type, and whether `count` or `for_each` is used. 4 | 5 | ## Arguments 6 | 7 | - `data_source_type`: This optional argument specifies the type of the data source to filter. It is a string attribute. 8 | - `use_count`: This optional argument is a boolean that, when set to `true`, filters data blocks that use the `count` attribute. The default value is `false`. 9 | - `use_for_each`: This optional argument is a boolean that, when set to `true`, filters data blocks that use the `for_each` attribute. The default value is `false`. 10 | 11 | ## Attributes 12 | 13 | - `result`: This attribute contains the filtered data blocks, all expressions assigned to arguments are evaluated as strings. 14 | 15 | ## Example - Querying Data Blocks 16 | 17 | Here is an example of how to use the `data "data"` block to query data blocks of a specific type: 18 | 19 | ```terraform 20 | data "data" "example" { 21 | data_source_type = "azurerm_client_config" 22 | } 23 | ``` 24 | 25 | In this example, the `data "data"` block queries all data blocks of type `azurerm_client_config` and stores the result in the `example` data source. 26 | 27 | ## Example - Filtering Data Blocks with `count` 28 | 29 | Here is an example of how to use the `data "data"` block to filter data blocks that use the `count` attribute: 30 | 31 | ```terraform 32 | data "data" "example" { 33 | data_source_type = "fake_data" 34 | use_count = true 35 | } 36 | ``` 37 | 38 | In this example, the `data "data"` block filters data blocks of type `fake_data` that use the `count` attribute and stores the result in the `example` data source. 39 | 40 | ## Example - Filtering Data Blocks with `for_each` 41 | 42 | Here is an example of how to use the `data "data"` block to filter data blocks that use the `for_each` attribute: 43 | 44 | ```terraform 45 | data "data" "example" { 46 | data_source_type = "fake_data" 47 | use_for_each = true 48 | } 49 | ``` 50 | 51 | In this example, the `data "data"` block filters data blocks of type `fake_data` that use the `for_each` attribute and stores the result in the `example` data source. 52 | 53 | ## Example - Retrieving Results 54 | 55 | Assuming we have such Terraform config: 56 | 57 | ```terraform 58 | data "azurerm_resource_group" "example" { 59 | name = "existing" 60 | } 61 | ``` 62 | 63 | And our Mapotf config is: 64 | 65 | ```terraform 66 | data "data" "example" { 67 | data_source_type = "azurerm_resource_group" 68 | } 69 | ``` 70 | 71 | Here is an example of how to retrieve the results from the `data "data"` block: 72 | 73 | ```terraform 74 | locals { 75 | azurerm_resource_group_name_exp = data.data.example.result 76 | } 77 | ``` 78 | 79 | In this example, the object stored in `local.azurerm_resource_group_name_exp` looks like the following hcl object: 80 | 81 | ```text 82 | { 83 | azurerm_resource_group: { 84 | example: { 85 | mptf: { 86 | block_address: data.azurerm_resource_group.example, 87 | block_labels: [ 88 | azurerm_resource_group, 89 | example 90 | ], 91 | block_type: data, 92 | module: { 93 | abs_dir: xxx, 94 | dir: ., 95 | git_hash: xxx, 96 | key:, 97 | source:, 98 | version: 99 | }, 100 | range: { 101 | end_column: 2, 102 | end_line: 29, 103 | file_name: main.tf, 104 | start_column: 1, 105 | start_line: 27 106 | }, 107 | terraform_address: data.azurerm_resource_group.example 108 | }, 109 | name: "existing" 110 | } 111 | } 112 | } 113 | ``` 114 | 115 | The results would be aggregated by data type first, then by the block labels. 116 | -------------------------------------------------------------------------------- /pkg/transform_rename_block_element.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Azure/golden" 7 | "github.com/Azure/mapotf/pkg/terraform" 8 | "github.com/hashicorp/hcl/v2/hclsyntax" 9 | "github.com/hashicorp/hcl/v2/hclwrite" 10 | ) 11 | 12 | var _ Transform = &RenameAttributeOrNestedBlockTransform{} 13 | 14 | type Rename struct { 15 | ResourceType string `hcl:"resource_type"` 16 | // We'll deprecate `attribute_path` in favor of `element_path`. 17 | AttributePath []string `hcl:"attribute_path,optional" validate:"excluded_with=element_path"` 18 | ElementPath []string `hcl:"element_path,optional" validate:"excluded_with=attribute_path,required_without=attribute_path"` 19 | NewName string `hcl:"new_name" validate:"required"` 20 | RenameOnlyNewNameAbsent bool `hcl:"rename_only_new_name_absent,optional" default:"false"` 21 | } 22 | 23 | type RenameAttributeOrNestedBlockTransform struct { 24 | *golden.BaseBlock 25 | *BaseTransform 26 | Renames []Rename `hcl:"rename,block"` 27 | } 28 | 29 | func (r *RenameAttributeOrNestedBlockTransform) Type() string { 30 | return "rename_block_element" 31 | } 32 | 33 | func (r *RenameAttributeOrNestedBlockTransform) Apply() error { 34 | cfg := r.Config().(*MetaProgrammingTFConfig) 35 | for _, rename := range r.Renames { 36 | r.applyRename(rename, cfg) 37 | } 38 | return nil 39 | } 40 | 41 | func (r *RenameAttributeOrNestedBlockTransform) applyRename(rename Rename, cfg *MetaProgrammingTFConfig) { 42 | resourceType := rename.ResourceType 43 | blocks := cfg.resourceBlocks 44 | if strings.HasPrefix(resourceType, "data.") { 45 | resourceType = strings.TrimPrefix(resourceType, "data.") 46 | blocks = cfg.dataBlocks 47 | } 48 | var matchedBlocks []*terraform.RootBlock 49 | for _, b := range blocks { 50 | if b.Labels[0] == resourceType { 51 | matchedBlocks = append(matchedBlocks, b) 52 | } 53 | } 54 | path := rename.AttributePath 55 | if len(path) == 0 { 56 | path = rename.ElementPath 57 | } 58 | r.rename(castBlockSlice(matchedBlocks), path, rename.NewName, rename.RenameOnlyNewNameAbsent) 59 | } 60 | 61 | func (r *RenameAttributeOrNestedBlockTransform) rename(blocks []terraform.Block, attributePath []string, newName string, renameOnlyNewNameAbsent bool) { 62 | if len(attributePath) == 1 { 63 | old := attributePath[0] 64 | for _, b := range blocks { 65 | body := b.WriteBody() 66 | attributes := body.Attributes() 67 | attr, ok := attributes[old] 68 | if ok { 69 | if _, newNameExist := attributes[newName]; !newNameExist || !renameOnlyNewNameAbsent { 70 | body.SetAttributeRaw(newName, attr.Expr().BuildTokens(nil)) 71 | } 72 | body.RemoveAttribute(old) 73 | continue 74 | } 75 | for _, nb := range body.Blocks() { 76 | if r.nestedBlockType(nb) != old { 77 | continue 78 | } 79 | r.setNestedBlockType(nb, old, newName) 80 | } 81 | } 82 | return 83 | } 84 | nbName := attributePath[0] 85 | for _, b := range blocks { 86 | nestedBlocks, ok := b.GetNestedBlocks()[nbName] 87 | if !ok { 88 | continue 89 | } 90 | r.rename(castBlockSlice(nestedBlocks), attributePath[1:], newName, false) 91 | } 92 | } 93 | 94 | func (r *RenameAttributeOrNestedBlockTransform) nestedBlockType(nb *hclwrite.Block) string { 95 | if nb.Type() == "dynamic" { 96 | return nb.Labels()[0] 97 | } 98 | return nb.Type() 99 | } 100 | 101 | func (r *RenameAttributeOrNestedBlockTransform) setNestedBlockType(nb *hclwrite.Block, oldName, newName string) { 102 | if nb.Type() == "dynamic" { 103 | nb.SetLabels([]string{newName}) 104 | nb.Body().SetAttributeRaw("iterator", hclwrite.Tokens{&hclwrite.Token{ 105 | Type: hclsyntax.TokenIdent, 106 | Bytes: []byte(oldName), 107 | }}) 108 | } else { 109 | nb.SetType(newName) 110 | } 111 | } 112 | 113 | func castBlockSlice[T terraform.Block](s []T) []terraform.Block { 114 | ret := make([]terraform.Block, len(s)) 115 | for i, v := range s { 116 | ret[i] = v 117 | } 118 | return ret 119 | } 120 | -------------------------------------------------------------------------------- /doc/t/new_block.md: -------------------------------------------------------------------------------- 1 | # `new_block` Transform Block 2 | 3 | The `new_block` transform block is a powerful tool in Mapotf that allows you to create new blocks dynamically. This is particularly useful when you want to add new resources, variables, or other Terraform blocks programmatically. 4 | 5 | ## Arguments 6 | 7 | - `new_block_type`: This argument specifies the type of the new block (e.g., `resource`, `variable`, etc.). It is a required string attribute. 8 | - `filename`: This argument indicates the file where the new block will be added. It must end with `.tf` and is a required string attribute. 9 | - `labels`: This optional argument allows you to specify labels for the new block. It is a list of strings. 10 | - `body`: This optional argument allows you to specify the body content for the new block as a string of HCL code. 11 | - `asstring`: This nested block is used to specify the transformation that will be applied to the new block. The transformation is defined as a string of Terraform code. 12 | - `asraw`: This nested block is used to specify the transformation that will be applied to the new block. The transformation is defined as raw HCL code. The code is not parsed or evaluated, but is directly inserted into the Terraform configuration. This allows you to write complex transformations that cannot be expressed as a single Terraform expression. 13 | 14 | ## Example - Creating a New Resource Block 15 | 16 | Here is an example of how to use the `new_block` transform block to create a new resource block: 17 | 18 | ```terraform 19 | transform "new_block" example { 20 | new_block_type = "resource" 21 | filename = "main.tf" 22 | labels = ["azurerm_resource_group", "rg"] 23 | asraw { 24 | name = "example" 25 | location = "East US" 26 | } 27 | } 28 | ``` 29 | 30 | In this example, a new `resource` block of type `azurerm_resource_group` with the label `rg` is added to the `main.tf` file. The body of the block includes the `name` and `location` attributes. 31 | 32 | ## Example - Creating a New Variable Block, With `body` Argument 33 | 34 | Here is an example of how to use the `new_block` transform block to create a new variable block: 35 | 36 | ```terraform 37 | transform "new_block" example { 38 | new_block_type = "variable" 39 | filename = "variables.tf" 40 | labels = ["example"] 41 | body = <<-BODY 42 | type = string 43 | description = "This is an example variable" 44 | BODY 45 | } 46 | ``` 47 | 48 | In this example, a new `variable` block with the label `example` is added to the `variables.tf` file. The body of the block includes the `type` and `description` attributes. 49 | 50 | ## Example - Using `asstring` to Define the Block 51 | 52 | Here is an example of how to use the `asstring` nested block to define the new block: 53 | 54 | ```terraform 55 | transform "new_block" example { 56 | new_block_type = "resource" 57 | filename = "main.tf" 58 | labels = ["azurerm_resource_group", "example"] 59 | asstring { 60 | name = var.resource_group_name 61 | location = "East US" 62 | } 63 | } 64 | ``` 65 | 66 | In this example, the `asstring` nested block is used to define the body of the new `resource` block. `var.resource_group_name` refers to a variable defined elsewhere in the Mapotf configuration file, it would be evaluated as a string, and emitted as tokens in the resulting Terraform configuration. 67 | 68 | ## Example - Error Handling 69 | 70 | The `new_block` transform block includes error handling to ensure that only one of `asraw`, `asstring`, or `body` is set. If more than one is set, an error will be raised. 71 | 72 | ```terraform 73 | transform "new_block" example { 74 | new_block_type = "resource" 75 | filename = "main.tf" 76 | labels = ["azurerm_resource_group", "example"] 77 | body = <<-BODY 78 | name = "example" 79 | BODY 80 | asraw { 81 | location = "East US" 82 | } 83 | } 84 | ``` 85 | 86 | In this example, an error will be raised because both `body` and `asstring` are set. Only one of these attributes should be set to avoid conflicts. -------------------------------------------------------------------------------- /pkg/mptf_config_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | 9 | "github.com/Azure/mapotf/pkg" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/prashantv/gostub" 12 | "github.com/spf13/afero" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func TestNewMetaProgrammingTFConfigShouldLoadTerraformBlocks(t *testing.T) { 18 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 19 | "/main.tf": `resource "fake_resource" this {}`, 20 | })) 21 | defer stub.Reset() 22 | 23 | sut, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 24 | Dir: "/", 25 | AbsDir: "/", 26 | }, nil, nil, nil, context.TODO()) 27 | require.NoError(t, err) 28 | assert.NotEmpty(t, sut.ResourceBlocks) 29 | } 30 | 31 | func TestNewMetaProgrammingTFConfigShouldLoadTerraformBlock(t *testing.T) { 32 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 33 | "/main.tf": `terraform {}`, 34 | })) 35 | defer stub.Reset() 36 | 37 | sut, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 38 | Dir: "/", 39 | AbsDir: "/", 40 | }, nil, nil, nil, context.TODO()) 41 | require.NoError(t, err) 42 | assert.NotNil(t, sut.TerraformBlock()) 43 | } 44 | 45 | func TestMetaProgrammingTFConfigBlocks(t *testing.T) { 46 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 47 | "/main.tf": ` 48 | resource "fake_resource" "this" {} 49 | data "fake_data" "this" {} 50 | variable "fake_variable" {} 51 | output "fake_output" {} 52 | locals { 53 | fake_local = "value" 54 | } 55 | module "fake_module" { 56 | source = "./module" 57 | } 58 | terraform { 59 | required_version = ">= 0.12" 60 | } 61 | `, 62 | })) 63 | defer stub.Reset() 64 | 65 | sut, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 66 | Dir: "/", 67 | AbsDir: "/", 68 | }, nil, nil, nil, context.TODO()) 69 | require.NoError(t, err) 70 | 71 | assert.NotEmpty(t, sut.ResourceBlocks(), "resourceBlocks should not be empty") 72 | assert.NotEmpty(t, sut.DataBlocks(), "dataBlocks should not be empty") 73 | assert.NotEmpty(t, sut.VariableBlocks(), "variableBlocks should not be empty") 74 | assert.NotEmpty(t, sut.OutputBlocks(), "outputBlocks should not be empty") 75 | assert.NotEmpty(t, sut.LocalBlocks(), "localBlocks should not be empty") 76 | assert.NotEmpty(t, sut.ModuleBlocks(), "moduleBlocks should not be empty") 77 | assert.NotNil(t, sut.TerraformBlock(), "terraformBlock should not be nil") 78 | } 79 | 80 | func TestModulePathsWhenModulesJsonExists(t *testing.T) { 81 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 82 | "/.terraform/modules/modules.json": `{ 83 | "Modules": [ 84 | { 85 | "Key": "", 86 | "Source": "", 87 | "Dir": "." 88 | }, 89 | { 90 | "Key": "that", 91 | "Source": "./module", 92 | "Dir": "module" 93 | } 94 | ] 95 | }`, 96 | })) 97 | defer stub.Reset() 98 | 99 | refs, err := pkg.ModuleRefs("/") 100 | require.NoError(t, err) 101 | var paths []string 102 | for _, ref := range refs { 103 | paths = append(paths, ref.AbsDir) 104 | } 105 | pwd, err := os.Getwd() 106 | require.NoError(t, err) 107 | assert.Contains(t, paths, pwd) 108 | assert.Contains(t, paths, filepath.Join(pwd, "module")) 109 | } 110 | 111 | func TestModulePathsWhenModulesJsonDoesNotExist(t *testing.T) { 112 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{})) 113 | defer stub.Reset() 114 | 115 | refs, err := pkg.ModuleRefs(".") 116 | require.NoError(t, err) 117 | var paths []string 118 | for _, ref := range refs { 119 | paths = append(paths, ref.AbsDir) 120 | } 121 | pwd, err := os.Getwd() 122 | require.NoError(t, err) 123 | assert.Equal(t, []string{pwd}, paths) 124 | } 125 | 126 | func fakeFs(files map[string]string) afero.Fs { 127 | fs := afero.NewMemMapFs() 128 | for n, content := range files { 129 | _ = afero.WriteFile(fs, n, []byte(content), 0644) 130 | } 131 | return fs 132 | } 133 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Meta Programming for Terraform 2 | 3 | `mapotf` stands for `MetA PrOgramming for TerraForm`. 4 | 5 | `mapotf` is a meta programming tool designed to work with Terraform. 6 | 7 | As Terraform module's author, you might meet such scenario: an user required a `ignore_changes` setting in your resource block because they've used their own customized Azure Policy (or AWS config), these remediation services could modify the resources out of band, which would bring config drift to their state. Different users need to ignore different attributes, but unfortunately, Terraform doesn't support `var` in some arguments, such as `prevent_destroy` or `ignore_changes`. 8 | 9 | Another scenario is, there are some common design patterns, such as creating private endpoint for RDS, S3 bucket and so on. Different users might work on their own modules, but the patterns are the same. If we can provide a common pattern library, then the module's author won't need to search or the examples or tutorials, all they need to do is search for the patterns library, and apply. 10 | 11 | `mapotf` tools has two phases, match and transform. You can use `data` block to match the Terraform blocks you're interested in, then you can define `transform` blocks in instruct how to mutate the original Terraform code, you can update the block in place, or insert new blocks, or remove the given parts inside a Terraform block. 12 | 13 | ## How to install? 14 | 15 | `go install github.com/Azure/mapotf@latest` 16 | 17 | ## An example 18 | 19 | 1. Clone [terraform-azurerm-aks](https://github.com/Azure/terraform-azurerm-aks.git) 20 | 2. Switch into one of it's example, like `cd example/startup` 21 | 3. Run `mapotf init` or `terraform init` 22 | 3. Run `mapotf apply -r --mptf-dir git::https://github.com/Azure/mapotf.git//example/customize_aks_ignore_changes` 23 | 24 | `mapotf` would: 25 | 26 | 1. Download `example/customize_aks_ignore_changes` folder from `https://github.com/Azure/mapotf`, store the folder in a temp folder. 27 | 2. Match all `azurerm_kubernetes_cluster` resource blocks, patch them by adding `microsoft_defender[0].log_analytics_workspace_id` into it's `ignore_changes` list. 28 | 3. Run `terraform apply` for you 29 | 30 | You'll be asked for permission to carry the plan, this is output by Terraform: 31 | 32 | ```text 33 | Do you want to perform these actions? 34 | Terraform will perform the actions described above. 35 | Only 'yes' will be accepted to approve. 36 | 37 | Enter a value: 38 | ``` 39 | 40 | Meanwhile, you would see `*.tf.mptfbackup` files for each `.tf` file, which contains the original content from those `.tf` files. If you check `../../main.tf` file (referenced via `module` block's `source` `../../`), you would see the `ignore_changes` list of `azurerm_kubernetes_cluster` has been changed as expected. 41 | 42 | ```hcl 43 | lifecycle { 44 | ignore_changes = [ 45 | microsoft_defender[0].log_analytics_workspace_id, 46 | http_application_routing_enabled, 47 | http_proxy_config[0].no_proxy, 48 | kubernetes_version, 49 | public_network_access_enabled, 50 | # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. 51 | name, 52 | ] 53 | ... 54 | } 55 | ``` 56 | 57 | If you press `no`, Terraform would quit, and all `.tf` file would be reverted, all `.tf.mptfbackup` files would be removed. 58 | 59 | You can also use `transform` command to carry the transforms without invoke Terraform `mapotf transform -r --mptf-dir git::https://github.com/Azure/mapotf.git//example/customize_aks_ignore_changes`, then like `apply`, but we'll leave transformed `.tf` files along with `.tf.mptfbacup` files there for you, you can check them, apply them by calling `terraform` command, or revert all changes by `mapotf reset`. If you decide to keep these changes and remove all backup files, you can run `mapotf clean-backup`. 60 | 61 | ## Override files 62 | 63 | Since blocks defined in `override.tf` and `*_override.tf` files are meant to be patch block and might contain only partial content, they might cause analyze error in Mapotf so we WON'T process these override files. 64 | 65 | This tool is still in development, but you're welcome to give it a try. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### JetBrains template 2 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 3 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 4 | 5 | # User-specific stuff 6 | .idea/**/workspace.xml 7 | .idea/**/tasks.xml 8 | .idea/**/usage.statistics.xml 9 | .idea/**/dictionaries 10 | .idea/**/shelf 11 | 12 | # AWS User-specific 13 | .idea/**/aws.xml 14 | 15 | # Generated files 16 | .idea/**/contentModel.xml 17 | 18 | # Sensitive or high-churn files 19 | .idea/**/dataSources/ 20 | .idea/**/dataSources.ids 21 | .idea/**/dataSources.local.xml 22 | .idea/**/sqlDataSources.xml 23 | .idea/**/dynamic.xml 24 | .idea/**/uiDesigner.xml 25 | .idea/**/dbnavigator.xml 26 | 27 | # Gradle 28 | .idea/**/gradle.xml 29 | .idea/**/libraries 30 | 31 | # Gradle and Maven with auto-import 32 | # When using Gradle or Maven with auto-import, you should exclude module files, 33 | # since they will be recreated, and may cause churn. Uncomment if using 34 | # auto-import. 35 | # .idea/artifacts 36 | # .idea/compiler.xml 37 | # .idea/jarRepositories.xml 38 | # .idea/modules.xml 39 | # .idea/*.iml 40 | # .idea/modules 41 | # *.iml 42 | # *.ipr 43 | 44 | # CMake 45 | cmake-build-*/ 46 | 47 | # Mongo Explorer plugin 48 | .idea/**/mongoSettings.xml 49 | 50 | # File-based project format 51 | *.iws 52 | 53 | # IntelliJ 54 | out/ 55 | 56 | # mpeltonen/sbt-idea plugin 57 | .idea_modules/ 58 | 59 | # JIRA plugin 60 | atlassian-ide-plugin.xml 61 | 62 | # Cursive Clojure plugin 63 | .idea/replstate.xml 64 | 65 | # SonarLint plugin 66 | .idea/sonarlint/ 67 | 68 | # Crashlytics plugin (for Android Studio and IntelliJ) 69 | com_crashlytics_export_strings.xml 70 | crashlytics.properties 71 | crashlytics-build.properties 72 | fabric.properties 73 | 74 | # Editor-based Rest Client 75 | .idea/httpRequests 76 | 77 | # Android studio 3.1+ serialized cache file 78 | .idea/caches/build_file_checksums.ser 79 | 80 | ### Go template 81 | # If you prefer the allow list template instead of the deny list, see community template: 82 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 83 | # 84 | # Binaries for programs and plugins 85 | *.exe 86 | *.exe~ 87 | *.dll 88 | *.so 89 | *.dylib 90 | 91 | # Test binary, built with `go test -c` 92 | *.test 93 | 94 | # Output of the go coverage tool, specifically when used with LiteIDE 95 | *.out 96 | 97 | # Dependency directories (remove the comment below to include it) 98 | # vendor/ 99 | 100 | # Go workspace file 101 | go.work 102 | 103 | ### GoLand+iml template 104 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 105 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 106 | 107 | # User-specific stuff 108 | .idea/**/workspace.xml 109 | .idea/**/tasks.xml 110 | .idea/**/usage.statistics.xml 111 | .idea/**/dictionaries 112 | .idea/**/shelf 113 | 114 | # AWS User-specific 115 | .idea/**/aws.xml 116 | 117 | # Generated files 118 | .idea/**/contentModel.xml 119 | 120 | # Sensitive or high-churn files 121 | .idea/**/dataSources/ 122 | .idea/**/dataSources.ids 123 | .idea/**/dataSources.local.xml 124 | .idea/**/sqlDataSources.xml 125 | .idea/**/dynamic.xml 126 | .idea/**/uiDesigner.xml 127 | .idea/**/dbnavigator.xml 128 | 129 | # Gradle 130 | .idea/**/gradle.xml 131 | .idea/**/libraries 132 | 133 | # Gradle and Maven with auto-import 134 | # When using Gradle or Maven with auto-import, you should exclude module files, 135 | # since they will be recreated, and may cause churn. Uncomment if using 136 | # auto-import. 137 | # .idea/artifacts 138 | # .idea/compiler.xml 139 | # .idea/jarRepositories.xml 140 | # .idea/modules.xml 141 | # .idea/*.iml 142 | # .idea/modules 143 | # *.iml 144 | # *.ipr 145 | 146 | # CMake 147 | cmake-build-*/ 148 | 149 | # Mongo Explorer plugin 150 | .idea/**/mongoSettings.xml 151 | 152 | # File-based project format 153 | *.iws 154 | 155 | # IntelliJ 156 | out/ 157 | 158 | # mpeltonen/sbt-idea plugin 159 | .idea_modules/ 160 | 161 | # JIRA plugin 162 | atlassian-ide-plugin.xml 163 | 164 | # Cursive Clojure plugin 165 | .idea/replstate.xml 166 | 167 | # SonarLint plugin 168 | .idea/sonarlint/ 169 | 170 | # Crashlytics plugin (for Android Studio and IntelliJ) 171 | com_crashlytics_export_strings.xml 172 | crashlytics.properties 173 | crashlytics-build.properties 174 | fabric.properties 175 | 176 | # Editor-based Rest Client 177 | .idea/httpRequests 178 | 179 | # Android studio 3.1+ serialized cache file 180 | .idea/caches/build_file_checksums.ser 181 | 182 | -------------------------------------------------------------------------------- /pkg/data_terraform_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "github.com/zclconf/go-cty/cty" 6 | "testing" 7 | 8 | "github.com/Azure/golden" 9 | "github.com/Azure/mapotf/pkg" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/prashantv/gostub" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestTerraformData_BlockMptfInfo(t *testing.T) { 16 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 17 | "/main.tf": `terraform { 18 | required_providers { 19 | mycloud = { 20 | source = "mycorp/mycloud" 21 | version = "~> 1.0" 22 | } 23 | } 24 | }`, 25 | })) 26 | defer stub.Reset() 27 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 28 | Dir: "/", 29 | AbsDir: "/", 30 | }, nil, nil, nil, context.TODO()) 31 | require.NoError(t, err) 32 | 33 | data := &pkg.TerraformData{ 34 | BaseBlock: golden.NewBaseBlock(cfg, nil), 35 | } 36 | 37 | err = data.ExecuteDuringPlan() 38 | require.NoError(t, err) 39 | require.NotEqual(t, cty.NilVal, data.Block) 40 | } 41 | 42 | func TestTerraformData_RequiredProviders(t *testing.T) { 43 | 44 | cases := []struct { 45 | desc string 46 | config string 47 | wantedTerraformVersion *string 48 | wantedRequiredProviders map[string]pkg.RequiredProvider 49 | }{ 50 | { 51 | desc: "required_providers only", 52 | config: `terraform { 53 | required_providers { 54 | mycloud = { 55 | source = "mycorp/mycloud" 56 | version = "~> 1.0" 57 | } 58 | } 59 | }`, 60 | wantedRequiredProviders: map[string]pkg.RequiredProvider{ 61 | "mycloud": pkg.RequiredProvider{ 62 | Source: p("mycorp/mycloud"), 63 | Version: p("~> 1.0"), 64 | }, 65 | }, 66 | }, 67 | { 68 | desc: "required_version only", 69 | config: `terraform { 70 | required_version = ">= 1.2" 71 | }`, 72 | wantedTerraformVersion: p(">= 1.2"), 73 | }, 74 | { 75 | desc: "required_providers with version only", 76 | config: `terraform { 77 | required_providers { 78 | mycloud = { 79 | version = "~> 1.0" 80 | } 81 | } 82 | }`, 83 | wantedRequiredProviders: map[string]pkg.RequiredProvider{ 84 | "mycloud": { 85 | Version: p("~> 1.0"), 86 | }, 87 | }, 88 | }, 89 | { 90 | desc: "required_providers with source only", 91 | config: `terraform { 92 | required_providers { 93 | mycloud = { 94 | source = "mycorp/mycloud" 95 | } 96 | } 97 | }`, 98 | wantedRequiredProviders: map[string]pkg.RequiredProvider{ 99 | "mycloud": { 100 | Source: p("mycorp/mycloud"), 101 | }, 102 | }, 103 | }, 104 | { 105 | desc: "required_providers with all attributes and multiple providers", 106 | config: `terraform { 107 | required_version = ">= 1.2" 108 | required_providers { 109 | mycloud = { 110 | source = "mycorp/mycloud" 111 | version = ">= 1.0" 112 | } 113 | mycloud2 = { 114 | source = "mycorp/mycloud2" 115 | version = ">= 2.0" 116 | } 117 | } 118 | }`, 119 | wantedTerraformVersion: p(">= 1.2"), 120 | wantedRequiredProviders: map[string]pkg.RequiredProvider{ 121 | "mycloud": { 122 | Source: p("mycorp/mycloud"), 123 | Version: p(">= 1.0"), 124 | }, 125 | "mycloud2": { 126 | Source: p("mycorp/mycloud2"), 127 | Version: p(">= 2.0"), 128 | }, 129 | }, 130 | }, 131 | { 132 | desc: "empty terraform block", 133 | config: `terraform { 134 | }`, 135 | }, 136 | { 137 | desc: "terraform block with empty required providers block", 138 | config: `terraform { 139 | required_providers { 140 | } 141 | }`, 142 | wantedRequiredProviders: map[string]pkg.RequiredProvider{}, 143 | }, 144 | { 145 | desc: "no terraform block", 146 | config: ``, 147 | }, 148 | } 149 | for _, c := range cases { 150 | t.Run(c.desc, func(t *testing.T) { 151 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 152 | "/main.tf": c.config, 153 | })) 154 | defer stub.Reset() 155 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 156 | Dir: "/", 157 | AbsDir: "/", 158 | }, nil, nil, nil, context.TODO()) 159 | require.NoError(t, err) 160 | 161 | data := &pkg.TerraformData{ 162 | BaseBlock: golden.NewBaseBlock(cfg, nil), 163 | } 164 | 165 | err = data.ExecuteDuringPlan() 166 | require.NoError(t, err) 167 | require.Equal(t, c.wantedTerraformVersion, data.RequiredVersion) 168 | require.Equal(t, c.wantedRequiredProviders, data.RequiredProviders) 169 | }) 170 | } 171 | } 172 | 173 | func p[T any](v T) *T { 174 | return &v 175 | } 176 | -------------------------------------------------------------------------------- /pkg/backup/backup.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | filesystem "github.com/Azure/mapotf/pkg/fs" 10 | "github.com/spf13/afero" 11 | ) 12 | 13 | const BackupExtension = ".mptfbackup" 14 | const NewFileExtension = ".mptfnew" 15 | 16 | func BackupFolder(dir string) error { 17 | terraformFile, err := afero.Glob(filesystem.Fs, filepath.Join(dir, "*.tf")) 18 | if err != nil { 19 | return fmt.Errorf("cannot list terraform files in %s:%+v", dir, err) 20 | } 21 | for _, file := range terraformFile { 22 | backupFile := file + BackupExtension 23 | exist, err := afero.Exists(filesystem.Fs, backupFile) 24 | if err != nil { 25 | return fmt.Errorf("cannot check backup file %s:%+v", backupFile, err) 26 | } 27 | if exist { 28 | continue 29 | } 30 | // create the backup file, then copy the content of the terraform file to the backup file, with the same permission 31 | content, err := afero.ReadFile(filesystem.Fs, file) 32 | if err != nil { 33 | return fmt.Errorf("cannot read terraform file %s:%+v", file, err) 34 | } 35 | // get permission of the terraform file 36 | info, err := filesystem.Fs.Stat(file) 37 | if err != nil { 38 | return fmt.Errorf("cannot get permission of terraform file %s:%+v", file, err) 39 | } 40 | // write the content to the backup file 41 | if err = afero.WriteFile(filesystem.Fs, backupFile, content, info.Mode()); err != nil { 42 | return fmt.Errorf("cannot write backup file %s:%+v", backupFile, err) 43 | } 44 | } 45 | return nil 46 | } 47 | 48 | func Reset(dir string) error { 49 | err := restoreBackup(dir) 50 | if err != nil { 51 | return err 52 | } 53 | return removeNewFiles(dir) 54 | } 55 | 56 | func removeNewFiles(dir string) error { 57 | newFileIndicators, err := afero.Glob(filesystem.Fs, filepath.Join(dir, "*"+NewFileExtension)) 58 | if err != nil { 59 | return fmt.Errorf("cannot list new file indicators in %s:%+v", dir, err) 60 | } 61 | for _, newFileIndicator := range newFileIndicators { 62 | newFile, _ := strings.CutSuffix(newFileIndicator, NewFileExtension) 63 | if err = filesystem.Fs.Remove(newFile); err != nil { 64 | return fmt.Errorf("cannot delete new file %s:%+v", newFile, err) 65 | } 66 | if err = filesystem.Fs.Remove(newFileIndicator); err != nil { 67 | return fmt.Errorf("cannot delete new file indicator in %s:%+v", newFileIndicator, err) 68 | } 69 | } 70 | return nil 71 | } 72 | 73 | func restoreBackup(dir string) error { 74 | backupFiles, err := afero.Glob(filesystem.Fs, filepath.Join(dir, "*"+BackupExtension)) 75 | if err != nil { 76 | return fmt.Errorf("cannot list backup files in %s:%+v", dir, err) 77 | } 78 | for _, backupFile := range backupFiles { 79 | // read the content of the backup file 80 | content, err := afero.ReadFile(filesystem.Fs, backupFile) 81 | if err != nil { 82 | return fmt.Errorf("cannot read backup file %s:%+v", backupFile, err) 83 | } 84 | // write the content to the original file 85 | originalFile := backupFile[:len(backupFile)-len(BackupExtension)] // remove the extension to get the original file name 86 | info, err := getFilePerm(originalFile, backupFile, err) 87 | if err != nil { 88 | return err 89 | } 90 | if err = afero.WriteFile(filesystem.Fs, originalFile, content, info.Mode()); err != nil { 91 | return fmt.Errorf("cannot write original file %s:%+v", originalFile, err) 92 | } 93 | // delete the backup file 94 | if err = filesystem.Fs.Remove(backupFile); err != nil { 95 | return fmt.Errorf("cannot delete backup file %s:%+v", backupFile, err) 96 | } 97 | } 98 | return nil 99 | } 100 | 101 | func ClearBackup(dir string) error { 102 | backupFiles, err := afero.Glob(filesystem.Fs, filepath.Join(dir, "*"+BackupExtension)) 103 | if err != nil { 104 | return fmt.Errorf("cannot list backup files in %s:%+v", dir, err) 105 | } 106 | newFileIndicators, err := afero.Glob(filesystem.Fs, filepath.Join(dir, "*"+NewFileExtension)) 107 | if err != nil { 108 | return fmt.Errorf("cannot list new file indicators in %s:%+v", dir, err) 109 | } 110 | files := append(backupFiles, newFileIndicators...) 111 | for _, backupFile := range files { 112 | // delete the backup file 113 | if err = filesystem.Fs.Remove(backupFile); err != nil { 114 | return fmt.Errorf("cannot delete backup file %s:%+v", backupFile, err) 115 | } 116 | } 117 | return nil 118 | } 119 | 120 | func getFilePerm(originalFile string, backupFile string, err error) (os.FileInfo, error) { 121 | var info os.FileInfo 122 | for _, path := range []string{originalFile, backupFile} { 123 | info, err = filesystem.Fs.Stat(path) 124 | if err == nil { 125 | break 126 | } 127 | } 128 | if err != nil { 129 | return nil, fmt.Errorf("cannot get permission of backup file %s:%+v", backupFile, err) 130 | } 131 | return info, nil 132 | } 133 | -------------------------------------------------------------------------------- /pkg/terraform/nested_block.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "github.com/hashicorp/hcl/v2/hclsyntax" 5 | "github.com/hashicorp/hcl/v2/hclwrite" 6 | "github.com/zclconf/go-cty/cty" 7 | "strings" 8 | ) 9 | 10 | var _ Block = new(NestedBlock) 11 | 12 | type NestedBlocks map[string][]*NestedBlock 13 | 14 | type NestedBlock struct { 15 | Type string 16 | *hclsyntax.Block 17 | selfWriteBlock *hclwrite.Block 18 | WriteBlock *hclwrite.Block 19 | ForEach *Attribute 20 | Iterator *Attribute 21 | Attributes map[string]*Attribute 22 | NestedBlocks NestedBlocks 23 | } 24 | 25 | func (nb *NestedBlock) RemoveContent(path string) { 26 | segs := strings.Split(path, "/") 27 | current := segs[0] 28 | 29 | if len(segs) > 1 { 30 | myNbs, ok := nb.NestedBlocks[current] 31 | if !ok { 32 | return 33 | } 34 | nextPath := strings.Join(segs[1:], "/") 35 | for _, myNb := range myNbs { 36 | myNb.RemoveContent(nextPath) 37 | } 38 | return 39 | } 40 | _, ok := nb.Attributes[current] 41 | if ok { 42 | nb.WriteBody().RemoveAttribute(current) 43 | return 44 | } 45 | myNbs, ok := nb.NestedBlocks[current] 46 | if !ok { 47 | return 48 | } 49 | block := nb.WriteBlock 50 | if nb.Type == "dynamic" { 51 | contentBlock := nb.WriteBlock.Body().Blocks()[0] 52 | block = contentBlock 53 | } 54 | for _, myNb := range myNbs { 55 | block.Body().RemoveBlock(myNb.selfWriteBlock) 56 | } 57 | } 58 | 59 | func (nb *NestedBlock) SetAttributeRaw(name string, tokens hclwrite.Tokens) { 60 | unlock := lockBlockFile(nb) 61 | defer unlock() 62 | nb.WriteBody().SetAttributeRaw(name, tokens) 63 | } 64 | 65 | func (nb *NestedBlock) AppendBlock(block *hclwrite.Block) { 66 | unlock := lockBlockFile(nb) 67 | defer unlock() 68 | nb.WriteBody().AppendBlock(block) 69 | } 70 | 71 | func (nb *NestedBlock) WriteBody() *hclwrite.Body { 72 | return nb.WriteBlock.Body() 73 | } 74 | 75 | func (nb *NestedBlock) GetAttributes() map[string]*Attribute { 76 | return nb.Attributes 77 | } 78 | 79 | func (nb *NestedBlock) GetNestedBlocks() NestedBlocks { 80 | return nb.NestedBlocks 81 | } 82 | 83 | func NewNestedBlock(rb *hclsyntax.Block, wb *hclwrite.Block) *NestedBlock { 84 | if rb.Type == "dynamic" { 85 | return dynamicNestedBlock(rb, wb) 86 | } 87 | return staticNestedBlock(rb, wb) 88 | } 89 | 90 | func (nb *NestedBlock) EvalContext() cty.Value { 91 | v := map[string]cty.Value{} 92 | for n, a := range nb.Attributes { 93 | v[n] = cty.StringVal(a.String()) 94 | } 95 | if nb.ForEach != nil { 96 | v["for_each"] = cty.StringVal(nb.ForEach.String()) 97 | } 98 | if nb.Iterator != nil { 99 | v["iterator"] = cty.StringVal(nb.Iterator.String()) 100 | } 101 | for k, nbv := range nb.NestedBlocks.Values() { 102 | v[k] = nbv 103 | } 104 | v["mptf"] = nb.MptfObject() 105 | 106 | return cty.ObjectVal(v) 107 | } 108 | 109 | func (nb *NestedBlock) String() string { 110 | return string(nb.selfWriteBlock.BuildTokens(nil).Bytes()) 111 | } 112 | 113 | func (nb *NestedBlock) MptfObject() cty.Value { 114 | v := map[string]cty.Value{} 115 | v["tostring"] = cty.StringVal(nb.String()) 116 | v["range"] = cty.ObjectVal(map[string]cty.Value{ 117 | "file_name": cty.StringVal(nb.Range().Filename), 118 | "start_line": cty.NumberIntVal(int64(nb.Range().Start.Line)), 119 | "start_column": cty.NumberIntVal(int64(nb.Range().Start.Column)), 120 | "end_line": cty.NumberIntVal(int64(nb.Range().End.Line)), 121 | "end_column": cty.NumberIntVal(int64(nb.Range().End.Column)), 122 | }) 123 | return cty.ObjectVal(v) 124 | } 125 | 126 | func (nbs NestedBlocks) Values() map[string]cty.Value { 127 | v := map[string]cty.Value{} 128 | for k, blocks := range nbs { 129 | v[k] = ListOfObject(blocks) 130 | } 131 | return v 132 | } 133 | 134 | func dynamicNestedBlock(rb *hclsyntax.Block, wb *hclwrite.Block) *NestedBlock { 135 | nb := &NestedBlock{ 136 | Type: rb.Labels[0], 137 | selfWriteBlock: wb, 138 | Block: rb.Body.Blocks[0], 139 | WriteBlock: wb.Body().Blocks()[0], 140 | ForEach: NewAttribute("for_each", rb.Body.Attributes["for_each"], wb.Body().GetAttribute("for_each")), 141 | Attributes: attributes(rb.Body.Blocks[0].Body, wb.Body().Blocks()[0].Body()), 142 | NestedBlocks: nestedBlocks(rb.Body.Blocks[0].Body, wb.Body().Blocks()[0].Body()), 143 | } 144 | if iteratorAttr, ok := rb.Body.Attributes["iterator"]; ok { 145 | nb.Iterator = NewAttribute("iterator", iteratorAttr, wb.Body().GetAttribute("iterator")) 146 | } 147 | return nb 148 | } 149 | 150 | func staticNestedBlock(rb *hclsyntax.Block, wb *hclwrite.Block) *NestedBlock { 151 | return &NestedBlock{ 152 | Type: rb.Type, 153 | Block: rb, 154 | selfWriteBlock: wb, 155 | WriteBlock: wb, 156 | Attributes: attributes(rb.Body, wb.Body()), 157 | NestedBlocks: nestedBlocks(rb.Body, wb.Body()), 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /tutorial/1.MapoTF_DSL_Basics.md: -------------------------------------------------------------------------------- 1 | # MapoTF DSL Basics 2 | 3 | ## Introduction to the MapoTF DSL 4 | 5 | Welcome to the world of MapoTF DSL, a powerful tool that lets you customize your Terraform configurations like never before! MapoTF DSL is built on the HashiCorp Configuration Language (HCL), which means if you're already familiar with Terraform, you'll feel right at home. The main difference is that MapoTF DSL allows you to write code that can change other code – yes, it's meta-programming at its best! 6 | 7 | ## First Steps with MapoTF DSL 8 | 9 | Before we dive into writing our own MapoTF configurations, let's understand the basic structure. A typical MapoTF file will have one or more `transform` blocks, which are the heart of our DSL. Each `transform` block will look something like this: 10 | 11 | ```hcl 12 | transform "update_in_place" "my_transform" { 13 | // Our custom logic will go here 14 | } 15 | ``` 16 | 17 | Here, `update_in_place` is the type of transformation we want to apply, and `my_transform` is a unique name for this transformation. 18 | 19 | ## Querying Terraform Resources 20 | 21 | To apply transformations, we first need to know what we're working with. In MapoTF, we use data blocks to query and fetch information about resources defined in our Terraform configurations: 22 | 23 | ```hcl 24 | data "resource" "aks_clusters" { 25 | resource_type = "azurerm_kubernetes_cluster" 26 | } 27 | ``` 28 | 29 | This will allow us to access all Azure Kubernetes Service clusters defined in our Terraform state. 30 | 31 | ## Applying Transformations 32 | 33 | Now, let's apply a transformation. We'll use the transform block for this. Here's a simple example that adds some tags to our AKS clusters: 34 | 35 | ```hcl 36 | transform "update_in_place" "add_tracing_tags" { 37 | for_each = data.resource.aks_clusters.result 38 | 39 | target_block_address = each.value.mptf.block_address 40 | 41 | asstring { 42 | tags = <<-TAGS 43 | merge({ 44 | file = "${each.value.mptf.range.file_name}" 45 | block = "${each.value.mptf.terraform_address}" 46 | git_hash = "${each.value.mptf.module.git_hash}" 47 | module_source = "${each.value.mptf.module.source}" 48 | module_version = "${each.value.mptf.module.version}" 49 | }, ${try(each.value.tags, "{}")}) 50 | TAGS 51 | } 52 | } 53 | ``` 54 | 55 | In this block: 56 | 57 | * `for_each` iterates over each AKS cluster we fetched. 58 | * `target_block_address` tells MapoTF which Terraform block to apply our changes to. 59 | * Inside the `asstring` section, we define the logic for updating tags. The merge function combines existing tags with new ones we define. 60 | 61 | ## Understanding the Transformation Logic 62 | 63 | The `asstring` block is where we write our logic in string format. This might look a bit different from what you're used to in Terraform, but it's a powerful way to customize your configurations. The tags attribute is set using a heredoc (indicated by <<-TAGS and TAGS), which allows us to write multi-line strings easily. 64 | 65 | Inside the heredoc, we use the merge function to combine dictionaries. We're adding several key-value pairs that provide useful metadata about each block, such as the filename, block address, and Git hash of the module it comes from. 66 | 67 | ## Running Your First Transformation with Flags 68 | 69 | To apply your transformation, you'll use the MapoTF command-line interface (CLI). The CLI is designed to be flexible and is controlled through a set of flags that let you specify how transformations should be applied. Here's how you can run your first transformation: 70 | 71 | 1. Open your terminal or command prompt. 72 | 2. Navigate to your Terraform project directory where your .tf files and MapoTF configurations are located. 73 | 3. Run the MapoTF CLI with the transform command, using the appropriate flags: 74 | 75 | ```shell 76 | mapotf transform --tf-dir ./terraform --mptf-dir ./mapotf 77 | ``` 78 | 79 | 4. Review the output of the command. The CLI will provide feedback on what it's doing, including any transformations applied and if there are errors to be aware of. 80 | 5. (Optional) Use additional flags for more control: 81 | - `--recursive` or `-r`: Apply transformations to all Terraform modules recursively. This is useful for large projects with multiple modules. 82 | - `--help`: If you need help or want to see all available flags and their descriptions. 83 | 6. Check your Terraform .tf files to see the changes applied by your transformation. The MapoTF CLI modifies these files based on the logic you defined in your MapoTF configurations. 84 | 7. Cleanup: If you want to revert the changes, you can run `mapotf reset` 85 | 86 | ## Recap 87 | 88 | Congratulations! You've just learned the basics of the MapoTF DSL. You now know how to query resources, write transformation logic, and apply it to your Terraform configurations. In the next sections, we'll explore more advanced features and delve deeper into the power of MapoTF. Happy coding! -------------------------------------------------------------------------------- /pkg/transform_update_in_place.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/Azure/golden" 7 | "github.com/Azure/mapotf/pkg/terraform" 8 | "github.com/hashicorp/hcl/v2" 9 | "github.com/hashicorp/hcl/v2/hclsyntax" 10 | "github.com/hashicorp/hcl/v2/hclwrite" 11 | ) 12 | 13 | var _ Transform = &UpdateInPlaceTransform{} 14 | var _ golden.CustomDecode = &UpdateInPlaceTransform{} 15 | 16 | type UpdateInPlaceTransform struct { 17 | *golden.BaseBlock 18 | *BaseTransform 19 | TargetBlockAddress string `hcl:"target_block_address" validate:"required"` 20 | DynamicBlockBody string `hcl:"dynamic_block_body,optional"` 21 | updateBlock *hclwrite.Block 22 | targetBlock *terraform.RootBlock 23 | } 24 | 25 | func (u *UpdateInPlaceTransform) Type() string { 26 | return "update_in_place" 27 | } 28 | 29 | func (u *UpdateInPlaceTransform) Apply() error { 30 | u.PatchWriteBlock(u.targetBlock, u.updateBlock) 31 | return nil 32 | } 33 | 34 | func (u *UpdateInPlaceTransform) Decode(block *golden.HclBlock, context *hcl.EvalContext) error { 35 | var err error 36 | u.TargetBlockAddress, err = getRequiredStringAttribute("target_block_address", block, context) 37 | if err != nil { 38 | return err 39 | } 40 | cfg := u.Config().(*MetaProgrammingTFConfig) 41 | b := cfg.RootBlock(u.TargetBlockAddress) 42 | if b == nil { 43 | return fmt.Errorf("cannot find block: %s", u.TargetBlockAddress) 44 | } 45 | u.targetBlock = b 46 | u.updateBlock = hclwrite.NewBlock("patch", []string{}) 47 | dynamicBlockBody, err := getOptionalStringAttribute("dynamic_block_body", block, context) 48 | if err != nil { 49 | return err 50 | } 51 | if dynamicBlockBody != nil { 52 | u.DynamicBlockBody = *dynamicBlockBody 53 | patch, diag := hclwrite.ParseConfig([]byte(fmt.Sprintf("patch {\n%s\n}", u.DynamicBlockBody)), u.Address(), hcl.InitialPos) 54 | if diag.HasErrors() { 55 | return fmt.Errorf("error while parsing patch body: %s", diag.Error()) 56 | } 57 | if err = decodeAsDynamicBlockBody(u.updateBlock, patch.Body().Blocks()[0]); err != nil { 58 | return err 59 | } 60 | } 61 | for _, b := range block.NestedBlocks() { 62 | switch b.Type { 63 | case "asraw": 64 | { 65 | if err = decodeAsRawBlock(u.updateBlock, b); err != nil { 66 | return err 67 | } 68 | continue 69 | } 70 | case "asstring": 71 | { 72 | if err = decodeAsStringBlock(u.updateBlock, b, 0, context); err != nil { 73 | return err 74 | } 75 | continue 76 | } 77 | } 78 | } 79 | return nil 80 | } 81 | 82 | func decodeAsDynamicBlockBody(dest *hclwrite.Block, patch *hclwrite.Block) error { 83 | for n, attribute := range patch.Body().Attributes() { 84 | dest.Body().SetAttributeRaw(n, attribute.Expr().BuildTokens(nil)) 85 | } 86 | for _, b := range patch.Body().Blocks() { 87 | blockType := b.Type() 88 | newNestedBlock := dest.Body().AppendNewBlock(blockType, b.Labels()) 89 | if err := decodeAsDynamicBlockBody(newNestedBlock, b); err != nil { 90 | return err 91 | } 92 | } 93 | return nil 94 | } 95 | 96 | func (u *UpdateInPlaceTransform) UpdateBlock() *hclwrite.Block { 97 | return u.updateBlock 98 | } 99 | 100 | func (u *UpdateInPlaceTransform) PatchWriteBlock(dest terraform.Block, patch *hclwrite.Block) { 101 | // we cannot patch one-line block 102 | if dest.Range().Start.Line == dest.Range().End.Line { 103 | dest.WriteBody().AppendNewline() 104 | } 105 | for name, attr := range patch.Body().Attributes() { 106 | dest.SetAttributeRaw(name, attr.Expr().BuildTokens(nil)) 107 | } 108 | // Handle nested blocks 109 | for _, patchNestedBlock := range patch.Body().Blocks() { 110 | destNestedBlocks := dest.GetNestedBlocks()[patchNestedBlock.Type()] 111 | if len(destNestedBlocks) == 0 { 112 | // If the nested block does not exist in dest, add it 113 | dest.AppendBlock(patchNestedBlock) 114 | } else { 115 | for _, nb := range destNestedBlocks { 116 | u.PatchWriteBlock(nb, patchNestedBlock) 117 | } 118 | } 119 | } 120 | } 121 | 122 | func (u *UpdateInPlaceTransform) String() string { 123 | content := make(map[string]any) 124 | content["id"] = u.Id() 125 | content["target_block_address"] = u.TargetBlockAddress 126 | content["patch"] = string(u.updateBlock.BuildTokens(nil).Bytes()) 127 | str, err := json.Marshal(content) 128 | if err != nil { 129 | panic(err.Error()) 130 | } 131 | return string(str) 132 | } 133 | 134 | // Copy from https://github.com/hashicorp/hcl/blob/v2.20.1/hclwrite/parser.go#L478-L517 135 | func writerTokens(nativeTokens hclsyntax.Tokens) hclwrite.Tokens { 136 | tokBuf := make([]hclwrite.Token, len(nativeTokens)) 137 | var lastByteOffset int 138 | for i, mainToken := range nativeTokens { 139 | bytes := make([]byte, len(mainToken.Bytes)) 140 | copy(bytes, mainToken.Bytes) 141 | 142 | tokBuf[i] = hclwrite.Token{ 143 | Type: mainToken.Type, 144 | Bytes: bytes, 145 | 146 | SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, 147 | } 148 | 149 | lastByteOffset = mainToken.Range.End.Byte 150 | } 151 | 152 | ret := make(hclwrite.Tokens, len(tokBuf)) 153 | for i := range ret { 154 | ret[i] = &tokBuf[i] 155 | } 156 | 157 | return ret 158 | } 159 | -------------------------------------------------------------------------------- /pkg/transform_new_block.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/hashicorp/hcl/v2" 9 | "github.com/hashicorp/hcl/v2/hclsyntax" 10 | "github.com/hashicorp/hcl/v2/hclwrite" 11 | avmfix "github.com/lonegunmanb/avmfix/pkg" 12 | "github.com/zclconf/go-cty/cty" 13 | ) 14 | 15 | var _ golden.ApplyBlock = &NewBlockTransform{} 16 | var _ golden.CustomDecode = &NewBlockTransform{} 17 | 18 | type NewBlockTransform struct { 19 | *golden.BaseBlock 20 | *BaseTransform 21 | NewBlockType string `hcl:"new_block_type"` 22 | FileName string `hcl:"filename" validate:"endswith=.tf"` 23 | Labels []string `hcl:"labels,optional"` 24 | NewBody string `hcl:"body,optional"` 25 | newWriteBlock *hclwrite.Block 26 | } 27 | 28 | func (n *NewBlockTransform) Decode(block *golden.HclBlock, context *hcl.EvalContext) error { 29 | var err error 30 | n.NewBlockType, err = getRequiredStringAttribute("new_block_type", block, context) 31 | if err != nil { 32 | return err 33 | } 34 | n.FileName, err = getRequiredStringAttribute("filename", block, context) 35 | if err != nil { 36 | return err 37 | } 38 | var labels []string 39 | labelsAttr, ok := block.Attributes()["labels"] 40 | if ok { 41 | labelsValue, err := labelsAttr.Value(context) 42 | if err != nil { 43 | return fmt.Errorf("error while evaluating labels: %+v", err) 44 | } 45 | for i := 0; i < labelsValue.LengthInt(); i++ { 46 | labels = append(labels, labelsValue.Index(cty.NumberIntVal(int64(i))).AsString()) 47 | } 48 | } 49 | n.Labels = labels 50 | bodyStr, err := getOptionalStringAttribute("body", block, context) 51 | if err != nil { 52 | return err 53 | } 54 | if bodyStr != nil { 55 | n.NewBody = *bodyStr 56 | } 57 | n.newWriteBlock = hclwrite.NewBlock(n.NewBlockType, n.Labels) 58 | decodeByNestedBlock := false 59 | for _, b := range block.NestedBlocks() { 60 | if b.Type == "asraw" { 61 | decodeByNestedBlock = true 62 | if err := decodeAsRawBlock(n.newWriteBlock, b); err != nil { 63 | return err 64 | } 65 | continue 66 | } 67 | if b.Type == "asstring" { 68 | decodeByNestedBlock = true 69 | if err = decodeAsStringBlock(n.newWriteBlock, b, 0, context); err != nil { 70 | return err 71 | } 72 | continue 73 | } 74 | } 75 | if decodeByNestedBlock && n.NewBody != "" { 76 | return fmt.Errorf("can only set either one of `asraw`, `asstring` or `body`") 77 | } 78 | if n.NewBody != "" { 79 | newBody, diag := hclwrite.ParseConfig([]byte(fmt.Sprintf(`%s %s { 80 | %s 81 | }`, n.NewBlockType, strings.Join(n.Labels, " "), n.NewBody)), "", hcl.InitialPos) 82 | if diag.HasErrors() { 83 | return fmt.Errorf("cannot decode body %s: %+v", n.NewBody, diag) 84 | } 85 | n.newWriteBlock = newBody.Body().Blocks()[0] 86 | } 87 | formattedBlock, err := n.Format(n.newWriteBlock) 88 | if err == nil { 89 | n.newWriteBlock = formattedBlock 90 | } 91 | return nil 92 | } 93 | 94 | func (n *NewBlockTransform) Type() string { 95 | return "new_block" 96 | } 97 | 98 | func (n *NewBlockTransform) Apply() error { 99 | n.Config().(*MetaProgrammingTFConfig).AddBlock(n.FileName, n.newWriteBlock) 100 | return nil 101 | } 102 | 103 | func (n *NewBlockTransform) NewWriteBlock() *hclwrite.Block { 104 | return n.newWriteBlock 105 | } 106 | 107 | func (n *NewBlockTransform) Format(block *hclwrite.Block) (*hclwrite.Block, error) { 108 | if block.Type() != "resource" && block.Type() != "data" && block.Type() != "variable" { 109 | return block, nil 110 | } 111 | bytes := block.BuildTokens(nil).Bytes() 112 | syntaxFile, diag := hclsyntax.ParseConfig(bytes, "dummy.hcl", hcl.InitialPos) 113 | if diag.HasErrors() { 114 | return nil, diag 115 | } 116 | syntaxBlock := syntaxFile.Body.(*hclsyntax.Body).Blocks[0] 117 | avmBlock := avmfix.NewHclBlock(syntaxBlock, block) 118 | if block.Type() == "resource" || block.Type() == "data" { 119 | resourceBlock := avmfix.BuildBlockWithSchema(avmBlock, &hcl.File{}) 120 | err := resourceBlock.AutoFix() 121 | return resourceBlock.HclBlock.WriteBlock, err 122 | } 123 | if block.Type() == "variable" { 124 | variableBlock := avmfix.BuildVariableBlock(&hcl.File{}, avmBlock) 125 | err := variableBlock.AutoFix() 126 | return variableBlock.Block.WriteBlock, err 127 | } 128 | return nil, nil 129 | } 130 | 131 | func getRequiredStringAttribute(name string, block *golden.HclBlock, context *hcl.EvalContext) (string, error) { 132 | attr, ok := block.Attributes()[name] 133 | if !ok { 134 | return "", fmt.Errorf("`%s` is required", name) 135 | } 136 | v, err := attr.Value(context) 137 | if err != nil { 138 | return "", err 139 | } 140 | if v.Type() != cty.String { 141 | return "", fmt.Errorf("`%s` must be a string", name) 142 | } 143 | return v.AsString(), nil 144 | } 145 | 146 | func getOptionalStringAttribute(name string, block *golden.HclBlock, context *hcl.EvalContext) (*string, error) { 147 | attr, ok := block.Attributes()[name] 148 | if !ok { 149 | return nil, nil 150 | } 151 | v, err := attr.Value(context) 152 | if err != nil { 153 | return nil, err 154 | } 155 | if v.Type() != cty.String { 156 | return nil, fmt.Errorf("`%s` must be a string", name) 157 | } 158 | asString := v.AsString() 159 | return &asString, nil 160 | } 161 | -------------------------------------------------------------------------------- /pkg/data_data_source_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "testing" 7 | 8 | "github.com/Azure/golden" 9 | "github.com/Azure/mapotf/pkg" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/Azure/mapotf/pkg/terraform" 12 | "github.com/prashantv/gostub" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | "github.com/zclconf/go-cty/cty" 16 | ) 17 | 18 | func TestDataSourceData_QueryDataBlocks(t *testing.T) { 19 | cases := []struct { 20 | desc string 21 | tfCode string 22 | useForEach bool 23 | useCount bool 24 | expected cty.Value 25 | }{ 26 | { 27 | desc: "only one data block without count or for_each", 28 | tfCode: `data "fake_data" this { 29 | id = 123 30 | }`, 31 | expected: cty.ObjectVal(map[string]cty.Value{ 32 | "fake_data": cty.ObjectVal(map[string]cty.Value{ 33 | "this": cty.ObjectVal(map[string]cty.Value{ 34 | "id": cty.StringVal("123"), 35 | }), 36 | }), 37 | }), 38 | }, 39 | { 40 | desc: "count", 41 | tfCode: ` 42 | data "fake_data" this {} 43 | data "fake_data" that { 44 | count = 2 45 | } 46 | `, 47 | useCount: true, 48 | expected: cty.ObjectVal(map[string]cty.Value{ 49 | "fake_data": cty.ObjectVal(map[string]cty.Value{ 50 | "that": cty.ObjectVal(map[string]cty.Value{ 51 | "count": cty.StringVal("2"), 52 | }), 53 | }), 54 | }), 55 | }, 56 | { 57 | desc: "for_each", 58 | tfCode: ` 59 | data "fake_data" this {} 60 | data "fake_data" that { 61 | for_each = toset([1,2,3]) 62 | } 63 | `, 64 | useForEach: true, 65 | expected: cty.ObjectVal(map[string]cty.Value{ 66 | "fake_data": cty.ObjectVal(map[string]cty.Value{ 67 | "that": cty.ObjectVal(map[string]cty.Value{ 68 | "for_each": cty.StringVal("toset([1,2,3])"), 69 | }), 70 | }), 71 | }), 72 | }, 73 | } 74 | for _, c := range cases { 75 | t.Run(c.desc, func(t *testing.T) { 76 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 77 | "/main.tf": c.tfCode, 78 | })).Stub(&terraform.RootBlockReflectionInformation, func(map[string]cty.Value, *terraform.RootBlock) {}) 79 | defer stub.Reset() 80 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 81 | Dir: "/", 82 | AbsDir: "/", 83 | }, nil, nil, nil, context.TODO()) 84 | require.NoError(t, err) 85 | 86 | // Use the config to create a DataSourceData object 87 | data := &pkg.DataSourceData{ 88 | BaseBlock: golden.NewBaseBlock(cfg, nil), 89 | DataSourceType: "fake_data", 90 | UseCount: c.useCount, 91 | UseForEach: c.useForEach, 92 | } 93 | 94 | err = data.ExecuteDuringPlan() 95 | require.NoError(t, err) 96 | 97 | result := golden.Value(data) 98 | 99 | expected := map[string]cty.Value{ 100 | "data_source_type": cty.StringVal("fake_data"), 101 | "use_count": cty.BoolVal(c.useCount), 102 | "use_for_each": cty.BoolVal(c.useForEach), 103 | "result": c.expected, 104 | } 105 | assert.Equal(t, expected, result) 106 | }) 107 | } 108 | } 109 | 110 | func TestDataSourceData_CustomizedToStringShouldContainsAllFields(t *testing.T) { 111 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 112 | "/main.tf": `data "fake_data" this { 113 | id = 123 114 | }`, 115 | })) 116 | defer stub.Reset() 117 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 118 | Dir: "/", 119 | AbsDir: "/", 120 | }, nil, nil, nil, context.TODO()) 121 | require.NoError(t, err) 122 | 123 | data := &pkg.DataSourceData{ 124 | BaseBlock: golden.NewBaseBlock(cfg, nil), 125 | DataSourceType: "fake_data", 126 | UseCount: false, 127 | UseForEach: false, 128 | } 129 | 130 | err = data.ExecuteDuringPlan() 131 | require.NoError(t, err) 132 | 133 | var sut map[string]any 134 | err = json.Unmarshal([]byte(data.String()), &sut) 135 | require.NoError(t, err) 136 | assert.Contains(t, sut, "data_source_type") 137 | assert.Contains(t, sut, "use_count") 138 | assert.Contains(t, sut, "use_for_each") 139 | assert.Contains(t, sut, "result") 140 | } 141 | 142 | func TestDataSourceData_DifferentDataSourcesHaveAttributesWithSameNameButDifferentSchema(t *testing.T) { 143 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 144 | "/main.tf": ` 145 | data "azurerm_application_gateway" this { 146 | sku { 147 | name = "Standard_v2" 148 | tier = "Standard_v2" 149 | capacity = 1 150 | } 151 | } 152 | 153 | data "azurerm_public_ip" "pip" { 154 | sku = "Standard" 155 | } 156 | 157 | `, 158 | })) 159 | defer stub.Reset() 160 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 161 | Dir: "/", 162 | AbsDir: "/", 163 | }, nil, nil, nil, context.TODO()) 164 | require.NoError(t, err) 165 | 166 | data := &pkg.DataSourceData{ 167 | BaseBlock: golden.NewBaseBlock(cfg, nil), 168 | } 169 | 170 | err = data.ExecuteDuringPlan() 171 | require.NoError(t, err) 172 | 173 | var sut map[string]any 174 | err = json.Unmarshal([]byte(data.String()), &sut) 175 | require.NoError(t, err) 176 | assert.Contains(t, sut, "data_source_type") 177 | assert.Contains(t, sut, "use_count") 178 | assert.Contains(t, sut, "use_for_each") 179 | assert.Contains(t, sut, "result") 180 | } 181 | -------------------------------------------------------------------------------- /pkg/data_resource_test.go: -------------------------------------------------------------------------------- 1 | package pkg_test 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "testing" 7 | 8 | "github.com/Azure/golden" 9 | "github.com/Azure/mapotf/pkg" 10 | filesystem "github.com/Azure/mapotf/pkg/fs" 11 | "github.com/Azure/mapotf/pkg/terraform" 12 | "github.com/prashantv/gostub" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | "github.com/zclconf/go-cty/cty" 16 | ) 17 | 18 | func TestResourceData_QueryResourceBlocks(t *testing.T) { 19 | cases := []struct { 20 | desc string 21 | tfCode string 22 | useForEach bool 23 | useCount bool 24 | expected cty.Value 25 | }{ 26 | { 27 | desc: "only one resource block without count or for_each", 28 | tfCode: `resource "fake_resource" this { 29 | id = 123 30 | }`, 31 | expected: cty.ObjectVal(map[string]cty.Value{ 32 | "fake_resource": cty.ObjectVal(map[string]cty.Value{ 33 | "this": cty.ObjectVal(map[string]cty.Value{ 34 | "id": cty.StringVal("123"), 35 | }), 36 | }), 37 | }), 38 | }, 39 | { 40 | desc: "count", 41 | tfCode: ` 42 | resource "fake_resource" this {} 43 | resource "fake_resource" that { 44 | count = 2 45 | } 46 | `, 47 | useCount: true, 48 | expected: cty.ObjectVal(map[string]cty.Value{ 49 | "fake_resource": cty.ObjectVal(map[string]cty.Value{ 50 | "that": cty.ObjectVal(map[string]cty.Value{ 51 | "count": cty.StringVal("2"), 52 | }), 53 | }), 54 | }), 55 | }, 56 | { 57 | desc: "for_each", 58 | tfCode: ` 59 | resource "fake_resource" this {} 60 | resource "fake_resource" that { 61 | for_each = toset([1,2,3]) 62 | } 63 | `, 64 | useForEach: true, 65 | expected: cty.ObjectVal(map[string]cty.Value{ 66 | "fake_resource": cty.ObjectVal(map[string]cty.Value{ 67 | "that": cty.ObjectVal(map[string]cty.Value{ 68 | "for_each": cty.StringVal("toset([1,2,3])"), 69 | }), 70 | }), 71 | }), 72 | }, 73 | } 74 | for _, c := range cases { 75 | t.Run(c.desc, func(t *testing.T) { 76 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 77 | "/main.tf": c.tfCode, 78 | })).Stub(&terraform.RootBlockReflectionInformation, func(map[string]cty.Value, *terraform.RootBlock) {}) 79 | defer stub.Reset() 80 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 81 | Dir: "/", 82 | AbsDir: "/", 83 | }, nil, nil, nil, context.TODO()) 84 | require.NoError(t, err) 85 | 86 | // Use the config to create a ResourceData object 87 | data := &pkg.ResourceData{ 88 | BaseBlock: golden.NewBaseBlock(cfg, nil), 89 | ResourceType: "fake_resource", 90 | UseCount: c.useCount, 91 | UseForEach: c.useForEach, 92 | } 93 | 94 | err = data.ExecuteDuringPlan() 95 | require.NoError(t, err) 96 | 97 | result := golden.Value(data) 98 | 99 | expected := map[string]cty.Value{ 100 | "resource_type": cty.StringVal("fake_resource"), 101 | "use_count": cty.BoolVal(c.useCount), 102 | "use_for_each": cty.BoolVal(c.useForEach), 103 | "result": c.expected, 104 | } 105 | assert.Equal(t, expected, result) 106 | }) 107 | } 108 | } 109 | 110 | func TestResourceData_CustomizedToStringShouldContainsAllFields(t *testing.T) { 111 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 112 | "/main.tf": `resource "fake_resource" this { 113 | id = 123 114 | }`, 115 | })) 116 | defer stub.Reset() 117 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 118 | Dir: "/", 119 | AbsDir: "/", 120 | }, nil, nil, nil, context.TODO()) 121 | require.NoError(t, err) 122 | 123 | data := &pkg.ResourceData{ 124 | BaseBlock: golden.NewBaseBlock(cfg, nil), 125 | ResourceType: "fake_resource", 126 | UseCount: false, 127 | UseForEach: false, 128 | } 129 | 130 | err = data.ExecuteDuringPlan() 131 | require.NoError(t, err) 132 | 133 | var sut map[string]any 134 | err = json.Unmarshal([]byte(data.String()), &sut) 135 | require.NoError(t, err) 136 | assert.Contains(t, sut, "resource_type") 137 | assert.Contains(t, sut, "use_count") 138 | assert.Contains(t, sut, "use_for_each") 139 | assert.Contains(t, sut, "result") 140 | } 141 | 142 | func TestResourceData_DifferentResourcesHaveAttributesWithSameNameButDifferentSchema(t *testing.T) { 143 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 144 | "/main.tf": ` 145 | resource "azurerm_application_gateway" this { 146 | sku { 147 | name = "Standard_v2" 148 | tier = "Standard_v2" 149 | capacity = 1 150 | } 151 | } 152 | 153 | resource "azurerm_public_ip" "pip" { 154 | sku = "Standard" 155 | } 156 | 157 | `, 158 | })) 159 | defer stub.Reset() 160 | cfg, err := pkg.NewMetaProgrammingTFConfig(&pkg.TerraformModuleRef{ 161 | Dir: "/", 162 | AbsDir: "/", 163 | }, nil, nil, nil, context.TODO()) 164 | require.NoError(t, err) 165 | 166 | data := &pkg.ResourceData{ 167 | BaseBlock: golden.NewBaseBlock(cfg, nil), 168 | } 169 | 170 | err = data.ExecuteDuringPlan() 171 | require.NoError(t, err) 172 | 173 | var sut map[string]any 174 | err = json.Unmarshal([]byte(data.String()), &sut) 175 | require.NoError(t, err) 176 | assert.Contains(t, sut, "resource_type") 177 | assert.Contains(t, sut, "use_count") 178 | assert.Contains(t, sut, "use_for_each") 179 | assert.Contains(t, sut, "result") 180 | } 181 | -------------------------------------------------------------------------------- /pkg/backup/backup_test.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | filesystem "github.com/Azure/mapotf/pkg/fs" 5 | "github.com/stretchr/testify/assert" 6 | "github.com/stretchr/testify/require" 7 | "path/filepath" 8 | "testing" 9 | 10 | "github.com/prashantv/gostub" 11 | "github.com/spf13/afero" 12 | ) 13 | 14 | func TestMetaProgrammingTFPlan_OnlyTransformThatHasTargetShouldBeInThePlan(t *testing.T) { 15 | dir := "cfg" 16 | expectedContent := `resource "fake_resource" this { 17 | }` 18 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 19 | filepath.Join(dir, "main.tf"): expectedContent, 20 | filepath.Join(dir, "non-terraform-file.txt"): "", 21 | filepath.Join("etc", "terraform.tf"): "should_not_be_copied", 22 | })) 23 | defer stub.Reset() 24 | err := BackupFolder(dir) 25 | require.NoError(t, err) 26 | content, err := afero.ReadFile(filesystem.Fs, filepath.Join(dir, "main.tf"+BackupExtension)) 27 | require.NoError(t, err) 28 | assert.Equal(t, expectedContent, string(content)) 29 | exists, err := afero.Exists(filesystem.Fs, filepath.Join(dir, "non-terraform-file.txt"+BackupExtension)) 30 | require.NoError(t, err) 31 | assert.False(t, exists) 32 | exists, err = afero.Exists(filesystem.Fs, filepath.Join("etc", "terraform.tf"+BackupExtension)) 33 | require.NoError(t, err) 34 | assert.False(t, exists) 35 | } 36 | 37 | func TestBackupFolder_BackupFileAlreadyExists(t *testing.T) { 38 | dir := "cfg" 39 | originalContent := `resource "fake_resource" this { 40 | }` 41 | backupContent := `resource "fake_resource" this { 42 | } backup` 43 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 44 | filepath.Join(dir, "main.tf"): originalContent, 45 | filepath.Join(dir, "main.tf"+BackupExtension): backupContent, 46 | })) 47 | defer stub.Reset() 48 | err := BackupFolder(dir) 49 | require.NoError(t, err) 50 | content, err := afero.ReadFile(filesystem.Fs, filepath.Join(dir, "main.tf"+BackupExtension)) 51 | require.NoError(t, err) 52 | assert.Equal(t, backupContent, string(content)) 53 | } 54 | 55 | func TestRestoreBackup(t *testing.T) { 56 | dir := "cfg" 57 | originalContent := `resource "fake_resource" this { 58 | }` 59 | backupContent := `resource "fake_resource" this { 60 | } backup` 61 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 62 | filepath.Join(dir, "main.tf"): originalContent, 63 | filepath.Join(dir, "main.tf"+BackupExtension): backupContent, 64 | })) 65 | defer stub.Reset() 66 | err := Reset(dir) 67 | require.NoError(t, err) 68 | content, err := afero.ReadFile(filesystem.Fs, filepath.Join(dir, "main.tf")) 69 | require.NoError(t, err) 70 | assert.Equal(t, backupContent, string(content)) 71 | exists, err := afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf"+BackupExtension)) 72 | require.NoError(t, err) 73 | assert.False(t, exists) 74 | } 75 | 76 | func TestClearBackup_NewFileShouldBeRemoved(t *testing.T) { 77 | dir := "cfg" 78 | newFileContent := `resource "new_fake_resource" this { 79 | }` 80 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 81 | filepath.Join(dir, "main.tf"): newFileContent, 82 | filepath.Join(dir, "main.tf"+NewFileExtension): "", 83 | })) 84 | defer stub.Reset() 85 | err := ClearBackup(dir) 86 | require.NoError(t, err) 87 | exists, err := afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf"+NewFileExtension)) 88 | require.NoError(t, err) 89 | assert.False(t, exists) 90 | exists, err = afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf")) 91 | require.NoError(t, err) 92 | assert.True(t, exists) 93 | content, err := afero.ReadFile(filesystem.Fs, filepath.Join(dir, "main.tf")) 94 | require.NoError(t, err) 95 | assert.Equal(t, newFileContent, string(content)) 96 | } 97 | 98 | func TestReset_NewFileShouldBeRemoved(t *testing.T) { 99 | dir := "cfg" 100 | newFileContent := `resource "new_fake_resource" this { 101 | }` 102 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 103 | filepath.Join(dir, "main.tf"): newFileContent, 104 | filepath.Join(dir, "main.tf"+NewFileExtension): "", 105 | })) 106 | defer stub.Reset() 107 | err := Reset(dir) 108 | require.NoError(t, err) 109 | exists, err := afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf"+NewFileExtension)) 110 | require.NoError(t, err) 111 | assert.False(t, exists) 112 | exists, err = afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf")) 113 | require.NoError(t, err) 114 | assert.False(t, exists) 115 | } 116 | 117 | func fakeFs(files map[string]string) afero.Fs { 118 | fs := afero.NewMemMapFs() 119 | for n, content := range files { 120 | _ = afero.WriteFile(fs, n, []byte(content), 0644) 121 | } 122 | return fs 123 | } 124 | 125 | func TestClearBackup(t *testing.T) { 126 | dir := "cfg" 127 | stub := gostub.Stub(&filesystem.Fs, fakeFs(map[string]string{ 128 | filepath.Join(dir, "main.tf"): "terraform content", 129 | filepath.Join(dir, "main.tf"+BackupExtension): "backupContent", 130 | })) 131 | defer stub.Reset() 132 | err := ClearBackup(dir) 133 | require.NoError(t, err) 134 | exists, err := afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf"+BackupExtension)) 135 | require.NoError(t, err) 136 | assert.False(t, exists) 137 | exists, err = afero.Exists(filesystem.Fs, filepath.Join(dir, "main.tf")) 138 | require.NoError(t, err) 139 | assert.True(t, exists) 140 | } 141 | -------------------------------------------------------------------------------- /example/customize_aks_ignore_changes/readme.md: -------------------------------------------------------------------------------- 1 | # Customize AKS Ignore Changes Example 2 | 3 | This example demonstrates how to customize the `ignore_changes` lifecycle rule for an Azure Kubernetes Service (AKS) cluster using Terraform. 4 | 5 | ## Purpose 6 | 7 | The primary purpose of the `customize_aks_ignore_changes` configuration is, sometimes you're using a public AKS module which contains an `azurerm_kubernetes_cluster` resource block, and your organization has a policy that requires you to ignore changes to certain attributes of the AKS cluster. This configuration allows you to customize the `ignore_changes` lifecycle rule for the `azurerm_kubernetes_cluster` resource block based on your organization's policy. 8 | 9 | ## Configuration 10 | 11 | 1. **Fetch All AKS Cluster Resources**: The `data "resource" aks` block is used to fetch all `azurerm_kubernetes_cluster` resources in the Terraform configuration. 12 | 13 | ```terraform 14 | data "resource" aks { 15 | resource_type = "azurerm_kubernetes_cluster" 16 | } 17 | ``` 18 | 19 | 2. **Patch Resource's `lifecycle` attribute** 20 | 21 | ```terraform 22 | transform "update_in_place" aks_ignore_changes { 23 | for_each = try(data.resource.aks.result.azurerm_kubernetes_cluster, {}) 24 | target_block_address = each.value.mptf.block_address 25 | asstring { 26 | lifecycle { 27 | ignore_changes = "[\nmicrosoft_defender[0].log_analytics_workspace_id, ${trimprefix(try(each.value.lifecycle.0.ignore_changes, "[\n]"), "[")}" 28 | } 29 | } 30 | } 31 | ``` 32 | 33 | In this example, the `for_each` argument is set to the result of the `azurerm_kubernetes_cluster` data source. The `target_block_address` argument is set to the block address of each resource in the collection. The `asstring` attribute is used to define a transformation that updates the `ignore_changes` lifecycle setting of each resource. The `ignore_changes` attribute is set to a list of attributes that should be ignored when determining whether to update the resource. We've tried to merge the original `ignore_changes` list with the new attribute `microsoft_defender[0].log_analytics_workspace_id`. Of course, you can define your own Mapotf `variable` block here to make `microsoft_defender[0].log_analytics_workspace_id` configurable. 34 | 35 | ## Example 36 | 37 | ```terraform 38 | variable "resource_group_name" { 39 | type = string 40 | default = "aks_test" 41 | } 42 | 43 | provider "azurerm" { 44 | features {} 45 | } 46 | 47 | resource "random_pet" "this" {} 48 | 49 | resource "azurerm_resource_group" "rg" { 50 | location = "eastus" 51 | name = "${var.resource_group_name}-${random_pet.this.id}" 52 | } 53 | 54 | module "aks" { 55 | source = "Azure/aks/azurerm" 56 | version = "9.1.0" 57 | 58 | cluster_name = "aks-test" 59 | prefix = "akstest" 60 | resource_group_name = azurerm_resource_group.rg.name 61 | rbac_aad = false 62 | } 63 | ``` 64 | 65 | Run `terraform init`, or `mapotf init`, ensure that module `Azure/aks/azurerm` is downloaded at `.terraform/modules/aks` folder. 66 | 67 | After running `mapotf transform -r --mptf-dir . --tf-dir .`, the `ignore_changes` argument in `lifecycle` setting would be applied to the resources based on the configuration. You can check configs under `.terraform/modules` to see the changes, all `azurerm_kubernetes_cluster` blocks declared in the referenced module would also be updated. 68 | 69 | Or, you can run `mapotf apply -r --mptf-dir . --tf-dir .`. `mapotf` would apply the transformation and then run `terraform apply` to apply the changes to the Terraform configuration. No matter how Terraform ends up applying the changes, after apply all transformations made by `mapotf` would be reverted. These transformations only exist while Terraform is running. 70 | 71 | Before transformation, the `ignore_chagnes` in `.terraform/modules/aks/main.tf` file would look like this: 72 | 73 | ```terraform 74 | ignore_changes = [ 75 | http_application_routing_enabled, 76 | http_proxy_config[0].no_proxy, 77 | kubernetes_version, 78 | public_network_access_enabled, 79 | # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. 80 | name, 81 | ] 82 | ``` 83 | 84 | After transformation, the `ignore_chagnes` in `.terraform/modules/aks/main.tf` file would look like this: 85 | 86 | ```terraform 87 | ignore_changes = [ 88 | microsoft_defender[0].log_analytics_workspace_id, 89 | http_application_routing_enabled, 90 | http_proxy_config[0].no_proxy, 91 | kubernetes_version, 92 | public_network_access_enabled, 93 | # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. 94 | name, 95 | ] 96 | ``` 97 | 98 | You may notice that the `microsoft_defender[0].log_analytics_workspace_id` attribute is added to the `ignore_changes` list. 99 | 100 | In summary, this example demonstrates how to use the `mapotf` tool to control the `prevent_destroy` lifecycle setting of Terraform resources. The `update_in_place` transform block is a powerful tool that allows you to modify existing blocks in place, making it possible to add or modify lifecycle settings without recreating resources. -------------------------------------------------------------------------------- /doc/t/remove_block_element.md: -------------------------------------------------------------------------------- 1 | # `remove_block_element` Transform Block 2 | 3 | The `remove_block_element` transform block is a tool in Mapotf that allows you to remove specific content from existing blocks. This is useful when you need to clean up or modify configurations by removing unwanted nested blocks or attributes. 4 | 5 | ## Arguments 6 | 7 | - `target_block_address`: This argument specifies the address of the block from which the content will be removed. The block address is a string that uniquely identifies a block in a Terraform configuration. 8 | 9 | - `paths`: This argument is a list of strings, each representing a path to the content that should be removed. The path can point to nested blocks or attributes within the target block. 10 | 11 | ## Example 12 | 13 | Here is an example of how to use the `remove_block_element` transform block to remove nested blocks and attributes from a resource: 14 | 15 | ```terraform 16 | transform "remove_block_element" this { 17 | target_block_address = "resource.fake_resource.this" 18 | paths = ["nested_block", "nested_block2/attr"] 19 | } 20 | ``` 21 | 22 | In this example, the `target_block_address` is set to the block address of the `fake_resource` resource. The `paths` argument specifies two paths: `nested_block` and `nested_block2/attr`. The first path removes the `nested_block` block, and the second path removes the `attr` attribute from the `nested_block2` block. 23 | 24 | ## Detailed Behavior 25 | 26 | The `remove_block_element` transform block works by traversing the specified paths and removing the corresponding content from the target block. The paths can point to both nested blocks and attributes. If a path points to a nested block, the entire block is removed. If a path points to an attribute, only the attribute is removed. 27 | 28 | ### Example Scenarios 29 | 30 | 1. **Removing a Single Nested Block**: 31 | ```terraform 32 | transform "remove_block_element" this { 33 | target_block_address = "resource.fake_resource.this" 34 | paths = ["nested_block"] 35 | } 36 | ``` 37 | 38 | ```terraform 39 | resource "fake_resource" this { 40 | nested_block {} 41 | non_target_block {} 42 | } 43 | ``` 44 | 45 | After applying the transform: 46 | ```terraform 47 | resource "fake_resource" this { 48 | non_target_block {} 49 | } 50 | ``` 51 | 52 | 2. **Removing Multiple Nested Blocks**: 53 | ```terraform 54 | transform "remove_block_element" this { 55 | target_block_address = "resource.fake_resource.this" 56 | paths = ["nested_block", "nested_block2"] 57 | } 58 | ``` 59 | 60 | ```terraform 61 | resource "fake_resource" this { 62 | nested_block {} 63 | nested_block2 {} 64 | non_target_block {} 65 | } 66 | ``` 67 | 68 | After applying the transform: 69 | ```terraform 70 | resource "fake_resource" this { 71 | non_target_block {} 72 | } 73 | ``` 74 | 75 | 3. **Removing Deeply Nested Blocks**: 76 | ```terraform 77 | transform "remove_block_element" this { 78 | target_block_address = "resource.fake_resource.this" 79 | paths = ["nested_block/second_nested_block"] 80 | } 81 | ``` 82 | 83 | ```terraform 84 | resource "fake_resource" this { 85 | nested_block { 86 | non_target_block {} 87 | } 88 | nested_block { 89 | second_nested_block {} 90 | } 91 | non_target_block {} 92 | } 93 | ``` 94 | 95 | After applying the transform: 96 | ```terraform 97 | resource "fake_resource" this { 98 | nested_block { 99 | non_target_block {} 100 | } 101 | nested_block {} 102 | non_target_block {} 103 | } 104 | ``` 105 | 106 | 4. **Removing Attributes**: 107 | ```terraform 108 | transform "remove_block_element" this { 109 | target_block_address = "resource.fake_resource.this" 110 | paths = ["attr"] 111 | } 112 | ``` 113 | 114 | ```terraform 115 | resource "fake_resource" this { 116 | attr = 1 117 | nested_block { 118 | attr = "hello" 119 | } 120 | } 121 | ``` 122 | 123 | After applying the transform: 124 | ```terraform 125 | resource "fake_resource" this { 126 | nested_block { 127 | attr = "hello" 128 | } 129 | } 130 | ``` 131 | 132 | 5. **Removing Attributes in Nested Blocks**: 133 | ```terraform 134 | transform "remove_block_element" this { 135 | target_block_address = "resource.fake_resource.this" 136 | paths = ["nested_block/attr"] 137 | } 138 | ``` 139 | 140 | ```terraform 141 | resource "fake_resource" this { 142 | attr = 1 143 | nested_block { 144 | attr = "hello" 145 | } 146 | } 147 | ``` 148 | 149 | After applying the transform: 150 | ```terraform 151 | resource "fake_resource" this { 152 | attr = 1 153 | nested_block {} 154 | } 155 | ``` 156 | 157 | 6. **Removing Attributes in Dynamic Nested Blocks**: 158 | ```terraform 159 | transform "remove_block_element" this { 160 | target_block_address = "resource.fake_resource.this" 161 | paths = ["nested_block/attr"] 162 | } 163 | ``` 164 | 165 | ```terraform 166 | resource "fake_resource" this { 167 | attr = 1 168 | dynamic "nested_block" { 169 | for_each = [1] 170 | content { 171 | attr = "hello" 172 | } 173 | } 174 | } 175 | ``` 176 | 177 | After applying the transform: 178 | ```terraform 179 | resource "fake_resource" this { 180 | attr = 1 181 | dynamic "nested_block" { 182 | for_each = [1] 183 | content {} 184 | } 185 | } 186 | ``` 187 | 188 | In summary, the `remove_block_element` transform block is a versatile tool for cleaning up and modifying Terraform configurations by removing unwanted nested blocks and attributes. -------------------------------------------------------------------------------- /example/new_private_endpoint_for_cognitive_account/main.mptf.hcl: -------------------------------------------------------------------------------- 1 | transform "new_block" "private_endpoints_variable" { 2 | for_each = try(data.resource.cognitive_account.result.azurerm_cognitive_account, {}) 3 | new_block_type = "variable" 4 | filename = "main.tf" 5 | labels = ["private_endpoints"] 6 | asraw { 7 | type = map(object({ 8 | name = optional(string, null) 9 | role_assignments = optional(map(object({})), {}) 10 | lock = optional(object({}), {}) 11 | tags = optional(map(any), null) 12 | subnet_resource_id = string 13 | private_dns_zone_group_name = optional(string, "default") 14 | private_dns_zone_resource_ids = optional(set(string), []) 15 | application_security_group_associations = optional(map(string), {}) 16 | private_service_connection_name = optional(string, null) 17 | network_interface_name = optional(string, null) 18 | location = optional(string, null) 19 | resource_group_name = optional(string, null) 20 | ip_configurations = optional(map(object({ 21 | name = string 22 | private_ip_address = string 23 | })), {}) 24 | })) 25 | default = {} 26 | description = <<-DESCRIPTION 27 | A map of private endpoints to create on the Key Vault. The map key is deliberately arbitrary to avoid issues where map keys maybe unknown at plan time. 28 | 29 | - `name` - (Optional) The name of the private endpoint. One will be generated if not set. 30 | - `role_assignments` - (Optional) A map of role assignments to create on the private endpoint. The map key is deliberately arbitrary to avoid issues where map keys maybe unknown at plan time. See `var.role_assignments` for more information. 31 | - `lock` - (Optional) The lock level to apply to the private endpoint. Default is `None`. Possible values are `None`, `CanNotDelete`, and `ReadOnly`. 32 | - `tags` - (Optional) A mapping of tags to assign to the private endpoint. 33 | - `subnet_resource_id` - The resource ID of the subnet to deploy the private endpoint in. 34 | - `private_dns_zone_group_name` - (Optional) The name of the private DNS zone group. One will be generated if not set. 35 | - `private_dns_zone_resource_ids` - (Optional) A set of resource IDs of private DNS zones to associate with the private endpoint. If not set, no zone groups will be created and the private endpoint will not be associated with any private DNS zones. DNS records must be managed external to this module. 36 | - `application_security_group_resource_ids` - (Optional) A map of resource IDs of application security groups to associate with the private endpoint. The map key is deliberately arbitrary to avoid issues where map keys maybe unknown at plan time. 37 | - `private_service_connection_name` - (Optional) The name of the private service connection. One will be generated if not set. 38 | - `network_interface_name` - (Optional) The name of the network interface. One will be generated if not set. 39 | - `location` - (Optional) The Azure location where the resources will be deployed. Defaults to the location of the resource group. 40 | - `resource_group_name` - (Optional) The resource group where the resources will be deployed. Defaults to the resource group of the Key Vault. 41 | - `ip_configurations` - (Optional) A map of IP configurations to create on the private endpoint. If not specified the platform will create one. The map key is deliberately arbitrary to avoid issues where map keys maybe unknown at plan time. 42 | - `name` - The name of the IP configuration. 43 | - `private_ip_address` - The private IP address of the IP configuration. 44 | DESCRIPTION 45 | nullable = false 46 | } 47 | } 48 | 49 | data "resource" "cognitive_account" { 50 | resource_type = "azurerm_cognitive_account" 51 | } 52 | 53 | transform "new_block" "private_endpoints_resource" { 54 | for_each = try(data.resource.cognitive_account.result.azurerm_cognitive_account, {}) 55 | filename = "main.tf" 56 | new_block_type = "resource" 57 | labels = ["azurerm_private_endpoint", "this"] 58 | asstring { 59 | location = "${each.value.mptf.terraform_address}.location" 60 | resource_group_name = "coalesce(each.value.resource_group_name, ${each.value.mptf.terraform_address}.resource_group_name)" 61 | name = "coalesce(each.value.name, ${each.value.mptf.terraform_address}.name)" 62 | private_service_connection { 63 | private_connection_resource_id = "${each.value.mptf.terraform_address}.id" 64 | name = "coalesce(each.value.private_service_connection_name, ${each.value.mptf.terraform_address}.name)" 65 | is_manual_connection = "false" 66 | subresource_names = "[\"account\"]" 67 | } 68 | } 69 | asraw { 70 | for_each = var.private_endpoints 71 | 72 | subnet_id = each.value.subnet_resource_id 73 | tags = each.value.tags 74 | 75 | dynamic "ip_configuration" { 76 | for_each = each.value.ip_configurations 77 | 78 | content { 79 | name = ip_configuration.value.name 80 | private_ip_address = ip_configuration.value.private_ip_address 81 | member_name = "account" 82 | subresource_name = "account" 83 | } 84 | } 85 | dynamic "private_dns_zone_group" { 86 | for_each = length(each.value.private_dns_zone_resource_ids) > 0 ? ["this"] : [] 87 | 88 | content { 89 | name = each.value.private_dns_zone_group_name 90 | private_dns_zone_ids = each.value.private_dns_zone_resource_ids 91 | } 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /pkg/terraform/root_block.go: -------------------------------------------------------------------------------- 1 | package terraform 2 | 3 | import ( 4 | "sort" 5 | "strings" 6 | 7 | "github.com/Azure/golden" 8 | "github.com/hashicorp/hcl/v2/hclsyntax" 9 | "github.com/hashicorp/hcl/v2/hclwrite" 10 | "github.com/zclconf/go-cty/cty" 11 | ) 12 | 13 | var _ Block = new(RootBlock) 14 | 15 | var RootBlockReflectionInformation = func(v map[string]cty.Value, b *RootBlock) { 16 | var moduleObj = cty.ObjectVal(map[string]cty.Value{ 17 | "key": cty.StringVal(""), 18 | "version": cty.StringVal(""), 19 | "source": cty.StringVal(""), 20 | "dir": cty.StringVal(""), 21 | "abs_dir": cty.StringVal(""), 22 | "git_hash": cty.StringVal(""), 23 | }) 24 | if b.module != nil { 25 | moduleObj = cty.ObjectVal(map[string]cty.Value{ 26 | "key": cty.StringVal(b.module.Key), 27 | "version": cty.StringVal(b.module.Version), 28 | "source": cty.StringVal(b.module.Source), 29 | "dir": cty.StringVal(b.module.Dir), 30 | "abs_dir": cty.StringVal(b.module.AbsDir), 31 | "git_hash": cty.StringVal(b.module.GitHash), 32 | }) 33 | } 34 | labels := golden.ToCtyValue(b.Labels) 35 | v["mptf"] = cty.ObjectVal(map[string]cty.Value{ 36 | "block_address": cty.StringVal(b.Address), 37 | "terraform_address": cty.StringVal(blockAddressToRef(b.Address)), 38 | "block_type": cty.StringVal(b.Type), 39 | "block_labels": labels, 40 | "module": moduleObj, 41 | "range": cty.ObjectVal(map[string]cty.Value{ 42 | "file_name": cty.StringVal(b.Range().Filename), 43 | "start_line": cty.NumberIntVal(int64(b.Range().Start.Line)), 44 | "start_column": cty.NumberIntVal(int64(b.Range().Start.Column)), 45 | "end_line": cty.NumberIntVal(int64(b.Range().End.Line)), 46 | "end_column": cty.NumberIntVal(int64(b.Range().End.Column)), 47 | }), 48 | }) 49 | } 50 | 51 | func blockAddressToRef(address string) string { 52 | if strings.HasPrefix(address, "resource.") { 53 | return strings.TrimPrefix(address, "resource.") 54 | } 55 | return address 56 | } 57 | 58 | type RootBlock struct { 59 | *hclsyntax.Block 60 | module *Module 61 | WriteBlock *hclwrite.Block 62 | Count *Attribute 63 | ForEach *Attribute 64 | Attributes map[string]*Attribute 65 | NestedBlocks NestedBlocks 66 | Type string 67 | Labels []string 68 | Address string 69 | } 70 | 71 | func (b *RootBlock) RemoveContent(path string) { 72 | unlock := lockBlockFile(b) 73 | defer unlock() 74 | b.removeContent(b.WriteBlock, path) 75 | } 76 | 77 | func (b *RootBlock) removeContent(wb *hclwrite.Block, path string) { 78 | segs := strings.Split(path, ".") 79 | var nbs []*hclwrite.Block 80 | if wb.Type() == "dynamic" { 81 | nbs = wb.Body().Blocks() 82 | } 83 | for _, nb := range nbs { 84 | if nb.Type() != "content" { 85 | continue 86 | } 87 | wb = nb 88 | break 89 | } 90 | _, ok := wb.Body().Attributes()[segs[0]] 91 | if ok { 92 | wb.Body().RemoveAttribute(segs[0]) 93 | return 94 | } 95 | 96 | nbs = make([]*hclwrite.Block, 0) 97 | for _, nb := range wb.Body().Blocks() { 98 | if nb.Type() == segs[0] || (nb.Type() == "dynamic" && nb.Labels()[0] == segs[0]) { 99 | nbs = append(nbs, nb) 100 | } 101 | } 102 | if len(nbs) == 0 { 103 | return 104 | } 105 | if len(segs) == 1 { 106 | for _, nb := range nbs { 107 | wb.Body().RemoveBlock(nb) 108 | } 109 | return 110 | } 111 | for _, nb := range nbs { 112 | b.removeContent(nb, strings.Join(segs[1:], ".")) 113 | } 114 | } 115 | 116 | func (b *RootBlock) SetAttributeRaw(name string, tokens hclwrite.Tokens) { 117 | unlock := lockBlockFile(b) 118 | defer unlock() 119 | b.WriteBody().SetAttributeRaw(name, tokens) 120 | } 121 | 122 | func (b *RootBlock) AppendBlock(block *hclwrite.Block) { 123 | unlock := lockBlockFile(b) 124 | defer unlock() 125 | b.WriteBody().AppendBlock(block) 126 | } 127 | 128 | func (b *RootBlock) WriteBody() *hclwrite.Body { 129 | return b.WriteBlock.Body() 130 | } 131 | 132 | func (b *RootBlock) GetAttributes() map[string]*Attribute { 133 | return b.Attributes 134 | } 135 | 136 | func (b *RootBlock) GetNestedBlocks() NestedBlocks { 137 | return b.NestedBlocks 138 | } 139 | 140 | func NewBlock(m *Module, rb *hclsyntax.Block, wb *hclwrite.Block) *RootBlock { 141 | b := &RootBlock{ 142 | Type: rb.Type, 143 | Labels: rb.Labels, 144 | Address: strings.Join(append([]string{rb.Type}, rb.Labels...), "."), 145 | Block: rb, 146 | WriteBlock: wb, 147 | module: m, 148 | } 149 | if countAttr, ok := rb.Body.Attributes["count"]; ok { 150 | b.Count = NewAttribute("count", countAttr, wb.Body().GetAttribute("count")) 151 | } 152 | if forEachAttr, ok := rb.Body.Attributes["for_each"]; ok { 153 | b.ForEach = NewAttribute("for_each", forEachAttr, wb.Body().GetAttribute("for_each")) 154 | } 155 | b.Attributes = attributes(rb.Body, wb.Body()) 156 | b.NestedBlocks = nestedBlocks(rb.Body, wb.Body()) 157 | return b 158 | } 159 | 160 | func (b *RootBlock) EvalContext() cty.Value { 161 | v := map[string]cty.Value{} 162 | RootBlockReflectionInformation(v, b) 163 | for n, a := range b.Attributes { 164 | v[n] = cty.StringVal(a.String()) 165 | } 166 | if b.Count != nil { 167 | v["count"] = cty.StringVal(b.Count.String()) 168 | } 169 | if b.ForEach != nil { 170 | v["for_each"] = cty.StringVal(b.ForEach.String()) 171 | } 172 | for k, values := range b.NestedBlocks.Values() { 173 | v[k] = values 174 | } 175 | return cty.ObjectVal(v) 176 | } 177 | 178 | func attributes(rb *hclsyntax.Body, wb *hclwrite.Body) map[string]*Attribute { 179 | attributes := rb.Attributes 180 | r := make(map[string]*Attribute, len(attributes)) 181 | for name, attribute := range attributes { 182 | r[name] = NewAttribute(name, attribute, wb.GetAttribute(name)) 183 | } 184 | return r 185 | } 186 | 187 | func nestedBlocks(rb *hclsyntax.Body, wb *hclwrite.Body) NestedBlocks { 188 | blocks := rb.Blocks 189 | r := make(map[string][]*NestedBlock) 190 | for i, block := range blocks { 191 | nb := NewNestedBlock(block, wb.Blocks()[i]) 192 | r[nb.Type] = append(r[nb.Type], nb) 193 | } 194 | for _, v := range r { 195 | sort.Slice(v, func(i, j int) bool { 196 | return v[i].Range().Start.Line < v[j].Range().Start.Line 197 | }) 198 | } 199 | return r 200 | } 201 | --------------------------------------------------------------------------------