├── .go-version ├── diagnostics.go ├── doc.go ├── go.mod ├── go.sum ├── internal ├── dynfunc │ └── dynfunc.go ├── sdkdiags │ └── diagnostics.go └── tfplugin5 │ ├── generate.sh │ ├── tfplugin5.pb.go │ └── tfplugin5.proto ├── legacy_resource_type.go ├── legacy_schema.go ├── plugin.go ├── plugin_conv.go ├── provider.go ├── resource_type.go ├── schema.go ├── schema_test.go ├── terraform-provider-test ├── .gitignore ├── main.go ├── test │ ├── drt_echo.go │ ├── mrt_instance.go │ ├── mrt_instance_test.go │ ├── provider.go │ └── provider_test.go └── testdata │ └── valid │ └── simple │ ├── .gitignore │ └── simple.tf ├── testing.go ├── tflegacy ├── config.go ├── diff.go ├── doc.go ├── resource.go ├── resource_data.go ├── resource_diff.go ├── resource_importer.go ├── resource_mode_string.go ├── resource_timeout.go ├── schema.go ├── state.go └── valuetype_string.go ├── tfobj ├── doc.go ├── encode.go ├── object_builder.go ├── object_reader.go └── plan_builder.go └── tfschema ├── doc.go ├── nestingmode_string.go └── schema.go /.go-version: -------------------------------------------------------------------------------- 1 | 1.11.5 2 | -------------------------------------------------------------------------------- /diagnostics.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "github.com/apparentlymart/terraform-sdk/internal/sdkdiags" 5 | "github.com/zclconf/go-cty/cty" 6 | ) 7 | 8 | // Diagnostics is a collection type used to report zero or more problems that 9 | // occurred during an operation. 10 | // 11 | // A nil Diagnostics indicates no problems. However, a non-nil Diagnostics 12 | // may contain only warnings, so use method HasErrors to recognize when an 13 | // error has occurred. 14 | type Diagnostics = sdkdiags.Diagnostics 15 | 16 | // Diagnostic represents a single problem that occurred during an operation. 17 | // 18 | // Use an appropriate severity to allow the caller to properly react to the 19 | // problem. Error severity will tend to halt further processing of downstream 20 | // operations. 21 | // 22 | // If the error concerns a particular attribute within the configuration, use 23 | // the Path field to indicate that specific attribute. This allows the caller 24 | // to produce more specific problem reports, possibly containing direct 25 | // references to the problematic value. General problems, such as total 26 | // inability to reach a remote API, should be reported with a nil Path. 27 | type Diagnostic = sdkdiags.Diagnostic 28 | 29 | type DiagSeverity = sdkdiags.DiagSeverity 30 | 31 | const ( 32 | // Error is a diagnostic severity used to indicate that an option could 33 | // not be completed as requested. 34 | Error = sdkdiags.Error 35 | 36 | // Warning is a diagnostic severity used to indicate a problem that 37 | // did not block the competion of the requested operation but that the 38 | // user should be aware of nonetheless. 39 | Warning = sdkdiags.Warning 40 | ) 41 | 42 | // FormatError returns a string representation of the given error. For most 43 | // error types this is equivalent to calling .Error, but will augment a 44 | // cty.PathError by adding the indicated attribute path as a prefix. 45 | func FormatError(err error) string { 46 | return sdkdiags.FormatError(err) 47 | } 48 | 49 | // FormatPath returns a string representation of the given path using a syntax 50 | // that resembles an expression in the Terraform language. 51 | func FormatPath(path cty.Path) string { 52 | return sdkdiags.FormatPath(path) 53 | } 54 | 55 | // ValidationError is a helper for constructing a Diagnostic to report an 56 | // unsuitable value inside an attribute's ValidateFn. 57 | // 58 | // Use this function when reporting "unsuitable value" errors to ensure a 59 | // consistent user experience across providers. The error message for the given 60 | // error must make sense when used after a colon in a full English sentence. 61 | // 62 | // If the given error is a cty.PathError then it is assumed to be relative to 63 | // the value being validated and will be reported in that context. This will 64 | // be the case automatically if the cty.Value passed to the ValidateFn is used 65 | // with functions from the cty "convert" and "gocty" packages. 66 | func ValidationError(err error) Diagnostic { 67 | return sdkdiags.ValidationError(err) 68 | } 69 | 70 | // UpstreamAPIError is a helper for constructing a Diagnostic to report an 71 | // otherwise-unhandled error response from an upstream API/SDK. 72 | // 73 | // Although ideally providers will handle common error types and return 74 | // helpful, actionable error diagnostics for them, in practice there are always 75 | // errors that the provider cannot predict and, unfortunately, some SDKs do not 76 | // return errors in a way that allows providers to handle them carefully. 77 | // 78 | // In situations like these, pass the raw error value directly from the upstream 79 | // SDK to this function to produce a consistent generic error message that 80 | // adds the additional context about this being a problem reported by the 81 | // upstream API, rather than by the provider or Terraform itself directly. 82 | // 83 | // The language used in the diagnostics returned by this function is appropriate 84 | // only for errors returned when making calls to a remote API over a network. 85 | // Do not use this function for errors returned from local computation functions, 86 | // such as parsers, serializers, private key generators, etc. 87 | func UpstreamAPIError(err error) Diagnostic { 88 | return sdkdiags.UpstreamAPIError(err) 89 | } 90 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Package tfsdk is the main package of a library that implements Terraform's 2 | // plugin protocols and maps them on to a higher-level API intended to allow 3 | // convenient implementation of Terraform provider plugins in Go. 4 | // 5 | // This module is currently in an early exploration phase and not intended to 6 | // be used for production plugins. To develop plugins for Terraform today, 7 | // see the Terraform Extend documentation at https://www.terraform.io/docs/extend/ . 8 | package tfsdk 9 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/apparentlymart/terraform-sdk 2 | 3 | require ( 4 | github.com/apparentlymart/terraform-plugin-test v0.0.0-20190921180929-24e04c393f77 5 | github.com/davecgh/go-spew v1.1.1 6 | github.com/golang/protobuf v1.2.0 7 | github.com/google/go-cmp v0.2.0 8 | github.com/hashicorp/terraform-json v0.3.0 9 | github.com/zclconf/go-cty v1.1.0 10 | go.rpcplugin.org/rpcplugin v0.0.0-20190513032547-109b787efc73 11 | golang.org/x/net v0.0.0-20190328230028-74de082e2cca 12 | google.golang.org/grpc v1.19.1 13 | ) 14 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/apparentlymart/go-ctxenv v1.0.0 h1:bsRTyED+PEcifljxBd/WhXRk/BNhgCigGYGZ0pVP4lM= 4 | github.com/apparentlymart/go-ctxenv v1.0.0/go.mod h1:Fxo441RKBr/C5JmbNRwdMSAUXs7k8M9ndNHBShdNCE4= 5 | github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= 6 | github.com/apparentlymart/terraform-plugin-test v0.0.0-20190921180929-24e04c393f77 h1:YfdrN7Sa/PBxhoEqO+5dP+a+Yj/2joPHO6m5QPEW8n8= 7 | github.com/apparentlymart/terraform-plugin-test v0.0.0-20190921180929-24e04c393f77/go.mod h1:ywdJcxlX8KNuclunr9Y1Jl8GAYU0ZMbkwViaj9RNvj4= 8 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 9 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 10 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= 12 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 13 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 14 | github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 15 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 16 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 17 | github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= 18 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 19 | github.com/hashicorp/terraform-json v0.3.0 h1:DK5sWm+LJrBQtURN11EJuScdHHcTMq2gtvgP0dpXrRM= 20 | github.com/hashicorp/terraform-json v0.3.0/go.mod h1:wXV7+To33w9ism+RXUnaYk67mR6lnsflM2espC7RR58= 21 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 22 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 23 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 24 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 25 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 26 | github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= 27 | github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= 28 | github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= 29 | github.com/zclconf/go-cty v0.0.0-20190430221426-d36a6f0dbffd h1:NZOOU7h+pDtcKo6xlqm8PwnarS8nJ+6+I83jT8ZfLPI= 30 | github.com/zclconf/go-cty v0.0.0-20190430221426-d36a6f0dbffd/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= 31 | github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= 32 | github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= 33 | go.rpcplugin.org/rpcplugin v0.0.0-20190513032547-109b787efc73 h1:1qKOCl+uxhXgE8fqu3CAcc5A8mhSF9Ny1XzyWTnrbvY= 34 | go.rpcplugin.org/rpcplugin v0.0.0-20190513032547-109b787efc73/go.mod h1:h/yfvithR6YwPtMVgF6avXE937Uyv/Sp2zkKheLAT8w= 35 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= 36 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 37 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 38 | golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 39 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= 40 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 41 | golang.org/x/net v0.0.0-20190328230028-74de082e2cca h1:hyA6yiAgbUwuWqtscNvWAI7U1CtlaD1KilQ6iudt1aI= 42 | golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 43 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 44 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= 45 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 46 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 47 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= 48 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 49 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 50 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 51 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 52 | google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= 53 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 54 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= 55 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 56 | google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= 57 | google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 58 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 59 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 60 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 61 | -------------------------------------------------------------------------------- /internal/dynfunc/dynfunc.go: -------------------------------------------------------------------------------- 1 | package dynfunc 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | "github.com/apparentlymart/terraform-sdk/internal/sdkdiags" 8 | "github.com/apparentlymart/terraform-sdk/tfobj" 9 | "github.com/zclconf/go-cty/cty" 10 | "github.com/zclconf/go-cty/cty/gocty" 11 | ) 12 | 13 | var diagnosticsType = reflect.TypeOf(sdkdiags.Diagnostics(nil)) 14 | var ctyValueType = reflect.TypeOf(cty.Value{}) 15 | var ctyPathSetType = reflect.TypeOf(cty.PathSet{}) 16 | var objectReaderType = reflect.TypeOf(tfobj.ObjectReader(nil)) 17 | var objectBuilderType = reflect.TypeOf(tfobj.ObjectBuilder(nil)) 18 | var planReaderType = reflect.TypeOf(tfobj.PlanReader(nil)) 19 | var planBuilderType = reflect.TypeOf(tfobj.PlanBuilder(nil)) 20 | 21 | // WrapSimpleFunction dynamically binds the given arguments to the given 22 | // function, or returns a developer-oriented error describing why it cannot. 23 | // The given function must return only a tfsdk.Diagnostics value. 24 | // 25 | // If the requested call is valid, the result is a function that takes no 26 | // arguments, executes the requested call, and returns any diagnostics that 27 | // result. 28 | // 29 | // As a convenience, if the given function is nil then a no-op function will 30 | // be returned, for the common situation where a dynamic function is optional. 31 | func WrapSimpleFunction(f interface{}, args ...interface{}) (func() sdkdiags.Diagnostics, error) { 32 | if f == nil { 33 | return func() sdkdiags.Diagnostics { 34 | return nil 35 | }, nil 36 | } 37 | 38 | fv := reflect.ValueOf(f) 39 | if fv.Kind() != reflect.Func { 40 | return nil, fmt.Errorf("value is %s, not Func", fv.Kind().String()) 41 | } 42 | 43 | ft := fv.Type() 44 | if ft.NumOut() != 1 || !ft.Out(0).AssignableTo(diagnosticsType) { 45 | return nil, fmt.Errorf("must return Diagnostics") 46 | } 47 | 48 | convArgs, forceDiags, err := prepareDynamicCallArgs(f, args...) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | return func() sdkdiags.Diagnostics { 54 | if len(forceDiags) > 0 { 55 | return forceDiags 56 | } 57 | 58 | out := fv.Call(convArgs) 59 | return out[0].Interface().(sdkdiags.Diagnostics) 60 | }, nil 61 | } 62 | 63 | // WrapFunctionWithReturnValue is like WrapSimpleFunction but expects the 64 | // function to return another value alongside its diagnostics. The given 65 | // result pointer will receive the function's return value if no diagnostics 66 | // are returned. 67 | // 68 | // resultPtr must be a pointer, and the return type of the function must be 69 | // compatible with resultPtr's referent. 70 | func WrapFunctionWithReturnValue(f interface{}, resultPtr interface{}, args ...interface{}) (func() sdkdiags.Diagnostics, error) { 71 | rv := reflect.ValueOf(resultPtr) 72 | if rv.Kind() != reflect.Ptr { 73 | return nil, fmt.Errorf("resultPtr is %s, not Ptr", rv.Kind().String()) 74 | } 75 | wantRT := rv.Type().Elem() 76 | 77 | if f == nil { 78 | return func() sdkdiags.Diagnostics { 79 | rv.Elem().Set(reflect.Zero(wantRT)) 80 | return nil 81 | }, nil 82 | } 83 | 84 | fv := reflect.ValueOf(f) 85 | if fv.Kind() != reflect.Func { 86 | return nil, fmt.Errorf("value is %s, not Func", fv.Kind().String()) 87 | } 88 | 89 | ft := fv.Type() 90 | if ft.NumOut() != 2 { 91 | return nil, fmt.Errorf("must have two return values") 92 | } 93 | if !ft.Out(1).AssignableTo(diagnosticsType) { 94 | return nil, fmt.Errorf("second return value must be diagnostics") 95 | } 96 | if gotRT := ft.Out(0); !gotRT.AssignableTo(wantRT) { 97 | return nil, fmt.Errorf("function return type %s cannot be assigned to result of type %s", gotRT, wantRT) 98 | } 99 | 100 | convArgs, forceDiags, err := prepareDynamicCallArgs(f, args...) 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | return func() sdkdiags.Diagnostics { 106 | if len(forceDiags) > 0 { 107 | return forceDiags 108 | } 109 | 110 | out := fv.Call(convArgs) 111 | retVal := out[0] 112 | diags := out[1].Interface().(sdkdiags.Diagnostics) 113 | 114 | rv.Elem().Set(retVal) 115 | return diags 116 | }, nil 117 | } 118 | 119 | // WrapFunctionWithReturnValueCty is like WrapFunctionWithReturnValue but with 120 | // the return value specified as a cty value type rather than a Go pointer. 121 | // 122 | // Returns a function that will call the wrapped function, convert its result 123 | // to cty.Value using gocty, and return it. 124 | func WrapFunctionWithReturnValueCty(f interface{}, wantTy cty.Type, args ...interface{}) (func() (cty.Value, sdkdiags.Diagnostics), error) { 125 | if f == nil { 126 | return func() (cty.Value, sdkdiags.Diagnostics) { 127 | return cty.NullVal(wantTy), nil 128 | }, nil 129 | } 130 | 131 | fv := reflect.ValueOf(f) 132 | if fv.Kind() != reflect.Func { 133 | return nil, fmt.Errorf("value is %s, not Func", fv.Kind().String()) 134 | } 135 | 136 | ft := fv.Type() 137 | if ft.NumOut() != 2 { 138 | return nil, fmt.Errorf("must have two return values") 139 | } 140 | if !ft.Out(1).AssignableTo(diagnosticsType) { 141 | return nil, fmt.Errorf("second return value must be diagnostics") 142 | } 143 | gotRT := ft.Out(0) 144 | passthruResult := false 145 | if ctyValueType.AssignableTo(gotRT) { 146 | passthruResult = true 147 | } 148 | 149 | convArgs, forceDiags, err := prepareDynamicCallArgs(f, args...) 150 | if err != nil { 151 | return nil, err 152 | } 153 | 154 | return func() (cty.Value, sdkdiags.Diagnostics) { 155 | if len(forceDiags) > 0 { 156 | return cty.NullVal(wantTy), forceDiags 157 | } 158 | 159 | out := fv.Call(convArgs) 160 | retValRaw := out[0].Interface() 161 | diags := out[1].Interface().(sdkdiags.Diagnostics) 162 | if passthruResult { 163 | return retValRaw.(cty.Value), diags 164 | } 165 | 166 | // If we're not just passing through then we need to run gocty first 167 | // to try to derive a suitable value from whatever we've been given. 168 | 169 | retVal, err := gocty.ToCtyValue(retValRaw, wantTy) 170 | if err != nil { 171 | if !diags.HasErrors() { // If the result was errored anyway then we'll tolerate this conversion failure. 172 | diags = diags.Append(sdkdiags.Diagnostic{ 173 | Severity: sdkdiags.Error, 174 | Summary: "Invalid result from provider", 175 | Detail: fmt.Sprintf("The provider produced an invalid result: %s.\n\nThis is a bug in the provider; please report it in the provider's issue tracker.", sdkdiags.FormatError(err)), 176 | }) 177 | } 178 | retVal = cty.NullVal(wantTy) 179 | } 180 | return retVal, diags 181 | }, nil 182 | } 183 | 184 | // WrapFunctionWithReturnValueCtyAndPathSet is like WrapFunctionWithReturnValueCty 185 | // but with the function also returning an additional cty.PathSet value. This 186 | // is a more specialized situation suited to implementing "plan" functions, 187 | // where the pathset represents the attributes that require replacement. 188 | // 189 | // Returns a function that will call the wrapped function, convert its result 190 | // to cty.Value using gocty, and return it. 191 | func WrapFunctionWithReturnValueCtyAndPathSet(f interface{}, wantTy cty.Type, args ...interface{}) (func() (cty.Value, cty.PathSet, sdkdiags.Diagnostics), error) { 192 | if f == nil { 193 | return func() (cty.Value, cty.PathSet, sdkdiags.Diagnostics) { 194 | return cty.NullVal(wantTy), cty.NewPathSet(), nil 195 | }, nil 196 | } 197 | 198 | fv := reflect.ValueOf(f) 199 | if fv.Kind() != reflect.Func { 200 | return nil, fmt.Errorf("value is %s, not Func", fv.Kind().String()) 201 | } 202 | 203 | ft := fv.Type() 204 | if ft.NumOut() != 3 { 205 | return nil, fmt.Errorf("must have three return values") 206 | } 207 | if !ft.Out(1).AssignableTo(ctyPathSetType) { 208 | return nil, fmt.Errorf("second return value must be cty.PathSet") 209 | } 210 | if !ft.Out(2).AssignableTo(diagnosticsType) { 211 | return nil, fmt.Errorf("third return value must be diagnostics") 212 | } 213 | gotRT := ft.Out(0) 214 | passthruResult := false 215 | if ctyValueType.AssignableTo(gotRT) { 216 | passthruResult = true 217 | } 218 | 219 | convArgs, forceDiags, err := prepareDynamicCallArgs(f, args...) 220 | if err != nil { 221 | return nil, err 222 | } 223 | 224 | return func() (cty.Value, cty.PathSet, sdkdiags.Diagnostics) { 225 | if len(forceDiags) > 0 { 226 | return cty.NullVal(wantTy), cty.NewPathSet(), forceDiags 227 | } 228 | 229 | out := fv.Call(convArgs) 230 | retValRaw := out[0].Interface() 231 | retPathSet := out[1].Interface().(cty.PathSet) 232 | diags := out[2].Interface().(sdkdiags.Diagnostics) 233 | if passthruResult { 234 | return retValRaw.(cty.Value), retPathSet, diags 235 | } 236 | 237 | // If we're not just passing through then we need to run gocty first 238 | // to try to derive a suitable value from whatever we've been given. 239 | 240 | retVal, err := gocty.ToCtyValue(retValRaw, wantTy) 241 | if err != nil { 242 | if !diags.HasErrors() { // If the result was errored anyway then we'll tolerate this conversion failure. 243 | diags = diags.Append(sdkdiags.Diagnostic{ 244 | Severity: sdkdiags.Error, 245 | Summary: "Invalid result from provider", 246 | Detail: fmt.Sprintf("The provider produced an invalid result: %s.\n\nThis is a bug in the provider; please report it in the provider's issue tracker.", sdkdiags.FormatError(err)), 247 | }) 248 | } 249 | retVal = cty.NullVal(wantTy) 250 | } 251 | return retVal, retPathSet, diags 252 | }, nil 253 | } 254 | 255 | func prepareDynamicCallArgs(f interface{}, args ...interface{}) ([]reflect.Value, sdkdiags.Diagnostics, error) { 256 | fv := reflect.ValueOf(f) 257 | if fv.Kind() != reflect.Func { 258 | return nil, nil, fmt.Errorf("value is %s, not Func", fv.Kind().String()) 259 | } 260 | 261 | ft := fv.Type() 262 | if got, want := ft.NumIn(), len(args); got != want { 263 | // (this error assumes that "args" is defined by the SDK code and thus 264 | // correct, while f comes from the provider and so is wrong.) 265 | return nil, nil, fmt.Errorf("should have %d arguments, but has %d", want, got) 266 | } 267 | 268 | var forceDiags sdkdiags.Diagnostics 269 | 270 | convArgs := make([]reflect.Value, len(args)) 271 | for i, rawArg := range args { 272 | wantType := ft.In(i) 273 | switch arg := rawArg.(type) { 274 | case cty.Value: 275 | var moreDiags sdkdiags.Diagnostics 276 | convArgs[i], moreDiags = prepareCtyValueArg(arg, wantType) 277 | forceDiags = forceDiags.Append(moreDiags) 278 | case tfobj.ObjectReader: 279 | argVal := reflect.ValueOf(rawArg) 280 | if argVal.Type().AssignableTo(wantType) { 281 | // Easy case! We'll just pass it on verbatim 282 | convArgs[i] = argVal 283 | } else { 284 | // Otherwise we'll unpack the cty.Value inside the reader and 285 | // use gocty with it, just as we'd do for a plain cty.Value. 286 | var moreDiags sdkdiags.Diagnostics 287 | convArgs[i], moreDiags = prepareCtyValueArg(arg.ObjectVal(), wantType) 288 | forceDiags = forceDiags.Append(moreDiags) 289 | } 290 | default: 291 | // All other arguments must be directly assignable. 292 | argVal := reflect.ValueOf(rawArg) 293 | if !argVal.Type().AssignableTo(wantType) { 294 | return nil, nil, fmt.Errorf("argument %d must accept %T", i, rawArg) 295 | } 296 | convArgs[i] = argVal 297 | } 298 | } 299 | 300 | return convArgs, forceDiags, nil 301 | } 302 | 303 | func prepareCtyValueArg(arg cty.Value, wantType reflect.Type) (reflect.Value, sdkdiags.Diagnostics) { 304 | var diags sdkdiags.Diagnostics 305 | 306 | // As a special case, we handle cty.Value arguments through gocty. 307 | targetVal := reflect.New(wantType) 308 | err := gocty.FromCtyValue(arg, targetVal.Interface()) 309 | if err != nil { 310 | // While most of the errors in here are written as if the 311 | // f interface is wrong, for this particular case we invert 312 | // that to consider the f argument as a way to specify 313 | // constraints on the user-provided value. However, we don't 314 | // have much context here for what the wrapped function is for, 315 | // so our error message is necessarily generic. Providers should 316 | // generally not rely on this error form and should instead 317 | // ensure that all user-supplyable values can be accepted. 318 | diags = diags.Append(sdkdiags.Diagnostic{ 319 | Severity: sdkdiags.Error, 320 | Summary: "Unsuitable argument value", 321 | Detail: fmt.Sprintf("This value cannot be used: %s.", sdkdiags.FormatError(err)), 322 | }) 323 | } 324 | 325 | return targetVal.Elem(), diags // New created a pointer, but we want the referent 326 | } 327 | -------------------------------------------------------------------------------- /internal/sdkdiags/diagnostics.go: -------------------------------------------------------------------------------- 1 | // Package sdkdiags contains the diagnostics helpers for the SDK. 2 | // 3 | // They are separated into this separate package so that other packages in the 4 | // SDK module itself can use them without creating import cycles. The public 5 | // interface to all of these symbols in the main tfsdk package. 6 | package sdkdiags 7 | 8 | import ( 9 | "fmt" 10 | "strings" 11 | 12 | "github.com/zclconf/go-cty/cty" 13 | ) 14 | 15 | // Diagnostics is a collection type used to report zero or more problems that 16 | // occurred during an operation. 17 | // 18 | // A nil Diagnostics indicates no problems. However, a non-nil Diagnostics 19 | // may contain only warnings, so use method HasErrors to recognize when an 20 | // error has occurred. 21 | type Diagnostics []Diagnostic 22 | 23 | // Diagnostic represents a single problem that occurred during an operation. 24 | // 25 | // Use an appropriate severity to allow the caller to properly react to the 26 | // problem. Error severity will tend to halt further processing of downstream 27 | // operations. 28 | // 29 | // If the error concerns a particular attribute within the configuration, use 30 | // the Path field to indicate that specific attribute. This allows the caller 31 | // to produce more specific problem reports, possibly containing direct 32 | // references to the problematic value. General problems, such as total 33 | // inability to reach a remote API, should be reported with a nil Path. 34 | type Diagnostic struct { 35 | Severity DiagSeverity 36 | Summary string 37 | Detail string 38 | Path cty.Path 39 | } 40 | 41 | func (diags Diagnostics) Append(vals ...interface{}) Diagnostics { 42 | for _, rawVal := range vals { 43 | switch val := rawVal.(type) { 44 | case Diagnostics: 45 | diags = append(diags, val...) 46 | case Diagnostic: 47 | diags = append(diags, val) 48 | case error: 49 | // We'll generate a generic error diagnostic then, to more easily 50 | // adapt from existing APIs that deal only in errors. 51 | diags = append(diags, Diagnostic{ 52 | Severity: Error, 53 | Summary: "Error from provider", 54 | Detail: fmt.Sprintf("Provider error: %s", FormatError(val)), 55 | }) 56 | default: 57 | panic(fmt.Sprintf("Diagnostics.Append does not support %T", rawVal)) 58 | } 59 | } 60 | return diags 61 | } 62 | 63 | func (diags Diagnostics) HasErrors() bool { 64 | for _, diag := range diags { 65 | if diag.Severity == Error { 66 | return true 67 | } 68 | } 69 | return false 70 | } 71 | 72 | // UnderPath rewrites the Path fields of the receiving diagnostics to be 73 | // relative to the given path. This can be used to gradually build up 74 | // a full path while working backwards from leaf values, avoiding the 75 | // need to pass full paths throughout validation and other processing 76 | // walks. 77 | // 78 | // This function modifies the reciever in-place, but also returns the receiver 79 | // for convenient use in function return statements. 80 | func (diags Diagnostics) UnderPath(base cty.Path) Diagnostics { 81 | for i, diag := range diags { 82 | path := make(cty.Path, 0, len(base)+len(diag.Path)) 83 | path = append(path, base...) 84 | path = append(path, diag.Path...) 85 | diags[i].Path = path 86 | } 87 | return diags 88 | } 89 | 90 | type DiagSeverity int 91 | 92 | const ( 93 | diagSeverityInvalid DiagSeverity = iota 94 | 95 | // Error is a diagnostic severity used to indicate that an option could 96 | // not be completed as requested. 97 | Error 98 | 99 | // Warning is a diagnostic severity used to indicate a problem that 100 | // did not block the competion of the requested operation but that the 101 | // user should be aware of nonetheless. 102 | Warning 103 | ) 104 | 105 | // FormatError returns a string representation of the given error. For most 106 | // error types this is equivalent to calling .Error, but will augment a 107 | // cty.PathError by adding the indicated attribute path as a prefix. 108 | func FormatError(err error) string { 109 | switch tErr := err.(type) { 110 | case cty.PathError: 111 | if len(tErr.Path) == 0 { 112 | // No prefix to render, then 113 | return tErr.Error() 114 | } 115 | 116 | return fmt.Sprintf("%s: %s", FormatPath(tErr.Path), tErr.Error()) 117 | default: 118 | return err.Error() 119 | } 120 | } 121 | 122 | // FormatPath returns a string representation of the given path using a syntax 123 | // that resembles an expression in the Terraform language. 124 | func FormatPath(path cty.Path) string { 125 | var buf strings.Builder 126 | for _, rawStep := range path { 127 | switch step := rawStep.(type) { 128 | case cty.GetAttrStep: 129 | fmt.Fprintf(&buf, ".%s", step.Name) 130 | case cty.IndexStep: 131 | switch step.Key.Type() { 132 | case cty.String: 133 | // fmt's %q isn't quite the same as Terraform quoted string syntax, 134 | // but it's close enough for error reporting. 135 | fmt.Fprintf(&buf, "[%q]", step.Key.AsString()) 136 | case cty.Number: 137 | fmt.Fprintf(&buf, "[%s]", step.Key.AsBigFloat()) 138 | default: 139 | // A path through a set can contain a key of any type in principle, 140 | // but it will never be anything we can render compactly in a 141 | // path expression string, so we'll just use a placeholder. 142 | buf.WriteString("[...]") 143 | } 144 | default: 145 | // Should never happen because there are no other step types 146 | buf.WriteString(".(invalid path step)") 147 | } 148 | } 149 | return buf.String() 150 | } 151 | 152 | // ValidationError is a helper for constructing a Diagnostic to report an 153 | // unsuitable value inside an attribute's ValidateFn. 154 | // 155 | // Use this function when reporting "unsuitable value" errors to ensure a 156 | // consistent user experience across providers. The error message for the given 157 | // error must make sense when used after a colon in a full English sentence. 158 | // 159 | // If the given error is a cty.PathError then it is assumed to be relative to 160 | // the value being validated and will be reported in that context. This will 161 | // be the case automatically if the cty.Value passed to the ValidateFn is used 162 | // with functions from the cty "convert" and "gocty" packages. 163 | func ValidationError(err error) Diagnostic { 164 | var path cty.Path 165 | if pErr, ok := err.(cty.PathError); ok { 166 | path = pErr.Path 167 | } 168 | 169 | return Diagnostic{ 170 | Severity: Error, 171 | Summary: "Unsuitable argument value", 172 | Detail: fmt.Sprintf("This value cannot be used: %s.", FormatError(err)), 173 | Path: path, 174 | } 175 | } 176 | 177 | // UpstreamAPIError is a helper for constructing a Diagnostic to report an 178 | // otherwise-unhandled error response from an upstream API/SDK. 179 | // 180 | // Although ideally providers will handle common error types and return 181 | // helpful, actionable error diagnostics for them, in practice there are always 182 | // errors that the provider cannot predict and, unfortunately, some SDKs do not 183 | // return errors in a way that allows providers to handle them carefully. 184 | // 185 | // In situations like these, pass the raw error value directly from the upstream 186 | // SDK to this function to produce a consistent generic error message that 187 | // adds the additional context about this being a problem reported by the 188 | // upstream API, rather than by the provider or Terraform itself directly. 189 | // 190 | // The language used in the diagnostics returned by this function is appropriate 191 | // only for errors returned when making calls to a remote API over a network. 192 | // Do not use this function for errors returned from local computation functions, 193 | // such as parsers, serializers, private key generators, etc. 194 | func UpstreamAPIError(err error) Diagnostic { 195 | return Diagnostic{ 196 | Severity: Error, 197 | Summary: "Remote operation failed", 198 | Detail: fmt.Sprintf("The remote API returned an error that the provider was unable to handle:\n\n%s", err), 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /internal/tfplugin5/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # We do not run protoc under go:generate because we want to ensure that all 4 | # dependencies of go:generate are "go get"-able for general dev environment 5 | # usability. 6 | # 7 | # To adopt a new minor version of plugin protocol 5: 8 | # - copy the new tfplugin5.proto from the commit associated with latest tagged 9 | # release of Terraform CLI over the top of this directory's tfplugin5.proto. 10 | # - Run this generate.sh script to in turn run protoc to regenerate 11 | # tfplugin5.pb.go. 12 | # 13 | # Terraform's protocol versioning conventions call for all new minor releases 14 | # of protocol 5 to be supersets of all earlier versions. This procedure is not 15 | # appropriate for a hypothetical future major version, which should instead 16 | # have its own package alongside this one to allow the SDK to implement both 17 | # versions at once for a while before removing protocol 5. 18 | 19 | set -eu 20 | 21 | SOURCE="${BASH_SOURCE[0]}" 22 | while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done 23 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 24 | 25 | cd "$DIR" 26 | 27 | protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ 28 | -------------------------------------------------------------------------------- /internal/tfplugin5/tfplugin5.proto: -------------------------------------------------------------------------------- 1 | // Terraform Plugin RPC protocol version 5.0 2 | // 3 | // This file defines version 5.0 of the RPC protocol. To implement a plugin 4 | // against this protocol, copy this definition into your own codebase and 5 | // use protoc to generate stubs for your target language. 6 | // 7 | // This file will be updated in-place in the source Terraform repository for 8 | // any minor versions of protocol 5, but later minor versions will always be 9 | // backwards compatible. Breaking changes, if any are required, will come 10 | // in a subsequent major version with its own separate proto definition. 11 | // 12 | // Note that only the proto files included in a release tag of Terraform are 13 | // official protocol releases. Proto files taken from other commits may include 14 | // incomplete changes or features that did not make it into a final release. 15 | // In all reasonable cases, plugin developers should take the proto file from 16 | // the tag of the most recent release of Terraform, and not from the master 17 | // branch or any other development branch. 18 | // 19 | syntax = "proto3"; 20 | 21 | package tfplugin5; 22 | 23 | // DynamicValue is an opaque encoding of terraform data, with the field name 24 | // indicating the encoding scheme used. 25 | message DynamicValue { 26 | bytes msgpack = 1; 27 | bytes json = 2; 28 | } 29 | 30 | message Diagnostic { 31 | enum Severity { 32 | INVALID = 0; 33 | ERROR = 1; 34 | WARNING = 2; 35 | } 36 | Severity severity = 1; 37 | string summary = 2; 38 | string detail = 3; 39 | AttributePath attribute = 4; 40 | } 41 | 42 | message AttributePath { 43 | message Step { 44 | oneof selector { 45 | // Set "attribute_name" to represent looking up an attribute 46 | // in the current object value. 47 | string attribute_name = 1; 48 | // Set "element_key_*" to represent looking up an element in 49 | // an indexable collection type. 50 | string element_key_string = 2; 51 | int64 element_key_int = 3; 52 | } 53 | } 54 | repeated Step steps = 1; 55 | } 56 | 57 | message Stop { 58 | message Request { 59 | } 60 | message Response { 61 | string Error = 1; 62 | } 63 | } 64 | 65 | // RawState holds the stored state for a resource to be upgraded by the 66 | // provider. It can be in one of two formats, the current json encoded format 67 | // in bytes, or the legacy flatmap format as a map of strings. 68 | message RawState { 69 | bytes json = 1; 70 | map flatmap = 2; 71 | } 72 | 73 | // Schema is the configuration schema for a Resource, Provider, or Provisioner. 74 | message Schema { 75 | message Block { 76 | int64 version = 1; 77 | repeated Attribute attributes = 2; 78 | repeated NestedBlock block_types = 3; 79 | } 80 | 81 | message Attribute { 82 | string name = 1; 83 | bytes type = 2; 84 | string description = 3; 85 | bool required = 4; 86 | bool optional = 5; 87 | bool computed = 6; 88 | bool sensitive = 7; 89 | } 90 | 91 | message NestedBlock { 92 | enum NestingMode { 93 | INVALID = 0; 94 | SINGLE = 1; 95 | LIST = 2; 96 | SET = 3; 97 | MAP = 4; 98 | GROUP = 5; 99 | } 100 | 101 | string type_name = 1; 102 | Block block = 2; 103 | NestingMode nesting = 3; 104 | int64 min_items = 4; 105 | int64 max_items = 5; 106 | } 107 | 108 | // The version of the schema. 109 | // Schemas are versioned, so that providers can upgrade a saved resource 110 | // state when the schema is changed. 111 | int64 version = 1; 112 | 113 | // Block is the top level configuration block for this schema. 114 | Block block = 2; 115 | } 116 | 117 | service Provider { 118 | //////// Information about what a provider supports/expects 119 | rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); 120 | rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); 121 | rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); 122 | rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); 123 | rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); 124 | 125 | //////// One-time initialization, called before other functions below 126 | rpc Configure(Configure.Request) returns (Configure.Response); 127 | 128 | //////// Managed Resource Lifecycle 129 | rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); 130 | rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); 131 | rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); 132 | rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); 133 | 134 | rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); 135 | 136 | //////// Graceful Shutdown 137 | rpc Stop(Stop.Request) returns (Stop.Response); 138 | } 139 | 140 | message GetProviderSchema { 141 | message Request { 142 | } 143 | message Response { 144 | Schema provider = 1; 145 | map resource_schemas = 2; 146 | map data_source_schemas = 3; 147 | repeated Diagnostic diagnostics = 4; 148 | } 149 | } 150 | 151 | message PrepareProviderConfig { 152 | message Request { 153 | DynamicValue config = 1; 154 | } 155 | message Response { 156 | DynamicValue prepared_config = 1; 157 | repeated Diagnostic diagnostics = 2; 158 | } 159 | } 160 | 161 | message UpgradeResourceState { 162 | message Request { 163 | string type_name = 1; 164 | 165 | // version is the schema_version number recorded in the state file 166 | int64 version = 2; 167 | 168 | // raw_state is the raw states as stored for the resource. Core does 169 | // not have access to the schema of prior_version, so it's the 170 | // provider's responsibility to interpret this value using the 171 | // appropriate older schema. The raw_state will be the json encoded 172 | // state, or a legacy flat-mapped format. 173 | RawState raw_state = 3; 174 | } 175 | message Response { 176 | // new_state is a msgpack-encoded data structure that, when interpreted with 177 | // the _current_ schema for this resource type, is functionally equivalent to 178 | // that which was given in prior_state_raw. 179 | DynamicValue upgraded_state = 1; 180 | 181 | // diagnostics describes any errors encountered during migration that could not 182 | // be safely resolved, and warnings about any possibly-risky assumptions made 183 | // in the upgrade process. 184 | repeated Diagnostic diagnostics = 2; 185 | } 186 | } 187 | 188 | message ValidateResourceTypeConfig { 189 | message Request { 190 | string type_name = 1; 191 | DynamicValue config = 2; 192 | } 193 | message Response { 194 | repeated Diagnostic diagnostics = 1; 195 | } 196 | } 197 | 198 | message ValidateDataSourceConfig { 199 | message Request { 200 | string type_name = 1; 201 | DynamicValue config = 2; 202 | } 203 | message Response { 204 | repeated Diagnostic diagnostics = 1; 205 | } 206 | } 207 | 208 | message Configure { 209 | message Request { 210 | string terraform_version = 1; 211 | DynamicValue config = 2; 212 | } 213 | message Response { 214 | repeated Diagnostic diagnostics = 1; 215 | } 216 | } 217 | 218 | message ReadResource { 219 | message Request { 220 | string type_name = 1; 221 | DynamicValue current_state = 2; 222 | } 223 | message Response { 224 | DynamicValue new_state = 1; 225 | repeated Diagnostic diagnostics = 2; 226 | } 227 | } 228 | 229 | message PlanResourceChange { 230 | message Request { 231 | string type_name = 1; 232 | DynamicValue prior_state = 2; 233 | DynamicValue proposed_new_state = 3; 234 | DynamicValue config = 4; 235 | bytes prior_private = 5; 236 | } 237 | 238 | message Response { 239 | DynamicValue planned_state = 1; 240 | repeated AttributePath requires_replace = 2; 241 | bytes planned_private = 3; 242 | repeated Diagnostic diagnostics = 4; 243 | 244 | 245 | // This may be set only by the helper/schema "SDK" in the main Terraform 246 | // repository, to request that Terraform Core >=0.12 permit additional 247 | // inconsistencies that can result from the legacy SDK type system 248 | // and its imprecise mapping to the >=0.12 type system. 249 | // The change in behavior implied by this flag makes sense only for the 250 | // specific details of the legacy SDK type system, and are not a general 251 | // mechanism to avoid proper type handling in providers. 252 | // 253 | // ==== DO NOT USE THIS ==== 254 | // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== 255 | // ==== DO NOT USE THIS ==== 256 | bool legacy_type_system = 5; 257 | } 258 | } 259 | 260 | message ApplyResourceChange { 261 | message Request { 262 | string type_name = 1; 263 | DynamicValue prior_state = 2; 264 | DynamicValue planned_state = 3; 265 | DynamicValue config = 4; 266 | bytes planned_private = 5; 267 | } 268 | message Response { 269 | DynamicValue new_state = 1; 270 | bytes private = 2; 271 | repeated Diagnostic diagnostics = 3; 272 | 273 | // This may be set only by the helper/schema "SDK" in the main Terraform 274 | // repository, to request that Terraform Core >=0.12 permit additional 275 | // inconsistencies that can result from the legacy SDK type system 276 | // and its imprecise mapping to the >=0.12 type system. 277 | // The change in behavior implied by this flag makes sense only for the 278 | // specific details of the legacy SDK type system, and are not a general 279 | // mechanism to avoid proper type handling in providers. 280 | // 281 | // ==== DO NOT USE THIS ==== 282 | // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== 283 | // ==== DO NOT USE THIS ==== 284 | bool legacy_type_system = 4; 285 | } 286 | } 287 | 288 | message ImportResourceState { 289 | message Request { 290 | string type_name = 1; 291 | string id = 2; 292 | } 293 | 294 | message ImportedResource { 295 | string type_name = 1; 296 | DynamicValue state = 2; 297 | bytes private = 3; 298 | } 299 | 300 | message Response { 301 | repeated ImportedResource imported_resources = 1; 302 | repeated Diagnostic diagnostics = 2; 303 | } 304 | } 305 | 306 | message ReadDataSource { 307 | message Request { 308 | string type_name = 1; 309 | DynamicValue config = 2; 310 | } 311 | message Response { 312 | DynamicValue state = 1; 313 | repeated Diagnostic diagnostics = 2; 314 | } 315 | } 316 | 317 | service Provisioner { 318 | rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); 319 | rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); 320 | rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); 321 | rpc Stop(Stop.Request) returns (Stop.Response); 322 | } 323 | 324 | message GetProvisionerSchema { 325 | message Request { 326 | } 327 | message Response { 328 | Schema provisioner = 1; 329 | repeated Diagnostic diagnostics = 2; 330 | } 331 | } 332 | 333 | message ValidateProvisionerConfig { 334 | message Request { 335 | DynamicValue config = 1; 336 | } 337 | message Response { 338 | repeated Diagnostic diagnostics = 1; 339 | } 340 | } 341 | 342 | message ProvisionResource { 343 | message Request { 344 | DynamicValue config = 1; 345 | DynamicValue connection = 2; 346 | } 347 | message Response { 348 | string output = 1; 349 | repeated Diagnostic diagnostics = 2; 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /legacy_resource_type.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/apparentlymart/terraform-sdk/tflegacy" 7 | "github.com/apparentlymart/terraform-sdk/tfschema" 8 | "github.com/zclconf/go-cty/cty" 9 | ) 10 | 11 | // LegacyManagedResourceType wraps a managed resource type implemented as 12 | // tflegacy.Resource (formerly schema.Resource) to behave as a 13 | // ManagedResourceType, by applying shims from the new protocol to the older 14 | // types that legacy implementations depend on. 15 | func LegacyManagedResourceType(def *tflegacy.Resource) ManagedResourceType { 16 | return legacyManagedResourceType{def} 17 | } 18 | 19 | // LegacyDataResourceType wraps a data resource type implemented as 20 | // tflegacy.Resource (formerly schema.Resource) to behave as a 21 | // DataResourceType, by applying shims from the new protocol to the older 22 | // types that legacy implementations depend on. 23 | func LegacyDataResourceType(def *tflegacy.Resource) DataResourceType { 24 | return legacyDataResourceType{def} 25 | } 26 | 27 | type legacyManagedResourceType struct { 28 | r *tflegacy.Resource 29 | } 30 | 31 | func (rt legacyManagedResourceType) getSchema() (schema *tfschema.BlockType, version int64) { 32 | schema = prepareLegacyResourceTypeSchema(rt.r, false) 33 | version = int64(rt.r.SchemaVersion) 34 | return 35 | } 36 | 37 | func (rt legacyManagedResourceType) validate(obj cty.Value) Diagnostics { 38 | // TODO: Implement 39 | panic("not implemented") 40 | } 41 | 42 | func (rt legacyManagedResourceType) upgradeState(oldJSON []byte, oldVersion int) (cty.Value, Diagnostics) { 43 | // TODO: Implement 44 | panic("not implemented") 45 | } 46 | 47 | func (rt legacyManagedResourceType) refresh(ctx context.Context, client interface{}, current cty.Value) (cty.Value, Diagnostics) { 48 | // TODO: Implement 49 | panic("not implemented") 50 | } 51 | 52 | func (rt legacyManagedResourceType) planChange(ctx context.Context, client interface{}, prior, config, proposed cty.Value) (cty.Value, cty.PathSet, Diagnostics) { 53 | // TODO: Implement 54 | panic("not implemented") 55 | } 56 | 57 | func (rt legacyManagedResourceType) applyChange(ctx context.Context, client interface{}, prior, planned cty.Value) (cty.Value, Diagnostics) { 58 | // TODO: Implement 59 | panic("not implemented") 60 | } 61 | 62 | func (rt legacyManagedResourceType) importState(ctx context.Context, client interface{}, id string) (cty.Value, Diagnostics) { 63 | // TODO: Implement 64 | panic("not implemented") 65 | } 66 | 67 | type legacyDataResourceType struct { 68 | r *tflegacy.Resource 69 | } 70 | 71 | func (rt legacyDataResourceType) getSchema() *tfschema.BlockType { 72 | return prepareLegacySchema(rt.r.Schema, false) 73 | } 74 | 75 | func (rt legacyDataResourceType) validate(obj cty.Value) Diagnostics { 76 | // TODO: Implement 77 | panic("not implemented") 78 | } 79 | 80 | func (rt legacyDataResourceType) read(ctx context.Context, client interface{}, config cty.Value) (cty.Value, Diagnostics) { 81 | // TODO: Implement 82 | panic("not implemented") 83 | } 84 | -------------------------------------------------------------------------------- /legacy_schema.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apparentlymart/terraform-sdk/tflegacy" 7 | "github.com/apparentlymart/terraform-sdk/tfschema" 8 | "github.com/zclconf/go-cty/cty" 9 | ) 10 | 11 | // prepareLegacySchema converts a subset of the information in the given legacy 12 | // schema map to the new schema representation. It converts only the minimal 13 | // information required to support the shimming to the old API and to support 14 | // returning an even smaller subset of the schema to Terraform Core when 15 | // requested. 16 | func prepareLegacySchema(old map[string]*tflegacy.Schema, enableAsSingle bool) *tfschema.BlockType { 17 | ret := &tfschema.BlockType{ 18 | Attributes: map[string]*tfschema.Attribute{}, 19 | NestedBlockTypes: map[string]*tfschema.NestedBlockType{}, 20 | } 21 | 22 | for name, schema := range old { 23 | if schema.Elem == nil { 24 | ret.Attributes[name] = prepareLegacySchemaAttribute(schema, enableAsSingle) 25 | continue 26 | } 27 | if schema.Type == tflegacy.TypeMap { 28 | // For TypeMap in particular, it isn't valid for Elem to be a 29 | // *Resource (since that would be ambiguous in flatmap) and 30 | // so Elem is treated as a TypeString schema if so. This matches 31 | // how the field readers treat this situation, for compatibility 32 | // with configurations targeting Terraform 0.11 and earlier. 33 | if _, isResource := schema.Elem.(*tflegacy.Resource); isResource { 34 | sch := *schema // shallow copy 35 | sch.Elem = &tflegacy.Schema{ 36 | Type: tflegacy.TypeString, 37 | } 38 | ret.Attributes[name] = prepareLegacySchemaAttribute(&sch, enableAsSingle) 39 | continue 40 | } 41 | } 42 | switch schema.ConfigMode { 43 | case tflegacy.SchemaConfigModeAttr: 44 | ret.Attributes[name] = prepareLegacySchemaAttribute(schema, enableAsSingle) 45 | case tflegacy.SchemaConfigModeBlock: 46 | ret.NestedBlockTypes[name] = prepareLegacySchemaNestedBlockType(schema, enableAsSingle) 47 | default: // SchemaConfigModeAuto, or any other invalid value 48 | if schema.Computed && !schema.Optional { 49 | // Computed-only schemas are always handled as attributes, 50 | // because they never appear in configuration. 51 | ret.Attributes[name] = prepareLegacySchemaAttribute(schema, enableAsSingle) 52 | continue 53 | } 54 | switch schema.Elem.(type) { 55 | case *tflegacy.Schema, tflegacy.ValueType: 56 | ret.Attributes[name] = prepareLegacySchemaAttribute(schema, enableAsSingle) 57 | case *tflegacy.Resource: 58 | ret.NestedBlockTypes[name] = prepareLegacySchemaNestedBlockType(schema, enableAsSingle) 59 | default: 60 | // Should never happen for a valid schema 61 | panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) 62 | } 63 | } 64 | } 65 | 66 | return ret 67 | } 68 | 69 | func prepareLegacySchemaAttribute(legacy *tflegacy.Schema, enableAsSingle bool) *tfschema.Attribute { 70 | // The Schema.DefaultFunc capability adds some extra weirdness here since 71 | // it can be combined with "Required: true" to create a sitution where 72 | // required-ness is conditional. Terraform Core doesn't share this concept, 73 | // so we must sniff for this possibility here and conditionally turn 74 | // off the "Required" flag if it looks like the DefaultFunc is going 75 | // to provide a value. 76 | // This is not 100% true to the original interface of DefaultFunc but 77 | // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc 78 | // situations, which are the main cases we care about. 79 | // 80 | // Note that this also has a consequence for commands that return schema 81 | // information for documentation purposes: running those for certain 82 | // providers will produce different results depending on which environment 83 | // variables are set. We accept that weirdness in order to keep this 84 | // interface to core otherwise simple. 85 | reqd := legacy.Required 86 | opt := legacy.Optional 87 | if reqd && legacy.DefaultFunc != nil { 88 | v, err := legacy.DefaultFunc() 89 | // We can't report errors from here, so we'll instead just force 90 | // "Required" to false and let the provider try calling its 91 | // DefaultFunc again during the validate step, where it can then 92 | // return the error. 93 | if err != nil || (err == nil && v != nil) { 94 | reqd = false 95 | opt = true 96 | } 97 | } 98 | 99 | return &tfschema.Attribute{ 100 | Type: prepareLegacySchemaType(legacy, enableAsSingle), 101 | Optional: opt, 102 | Required: reqd, 103 | Computed: legacy.Computed, 104 | Sensitive: legacy.Sensitive, 105 | Description: legacy.Description, 106 | } 107 | } 108 | 109 | func prepareLegacySchemaType(legacy *tflegacy.Schema, enableAsSingle bool) cty.Type { 110 | switch legacy.Type { 111 | case tflegacy.TypeString: 112 | return cty.String 113 | case tflegacy.TypeBool: 114 | return cty.Bool 115 | case tflegacy.TypeInt, tflegacy.TypeFloat: 116 | // configschema doesn't distinguish int and float, so helper/schema 117 | // will deal with this as an additional validation step after 118 | // configuration has been parsed and decoded. 119 | return cty.Number 120 | case tflegacy.TypeList, tflegacy.TypeSet, tflegacy.TypeMap: 121 | var elemType cty.Type 122 | switch set := legacy.Elem.(type) { 123 | case *tflegacy.Schema: 124 | elemType = prepareLegacySchemaType(set, enableAsSingle) 125 | case tflegacy.ValueType: 126 | // This represents a mistake in the provider code, but it's a 127 | // common one so we'll just shim it. 128 | elemType = prepareLegacySchemaType(&tflegacy.Schema{Type: set}, enableAsSingle) 129 | case *tflegacy.Resource: 130 | // By default we construct a NestedBlock in this case, but this 131 | // behavior is selected either for computed-only schemas or 132 | // when ConfigMode is explicitly SchemaConfigModeBlock. 133 | // See schemaMap.CoreConfigSchema for the exact rules. 134 | elemType = prepareLegacySchema(set.Schema, enableAsSingle).ImpliedCtyType() 135 | default: 136 | if set != nil { 137 | // Should never happen for a valid schema 138 | panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", legacy.Elem)) 139 | } 140 | // Some pre-existing schemas assume string as default, so we need 141 | // to be compatible with them. 142 | elemType = cty.String 143 | } 144 | if legacy.AsSingle && enableAsSingle { 145 | // In AsSingle mode, we artifically force a TypeList or TypeSet 146 | // attribute in the SDK to be treated as a single value by Terraform Core. 147 | // This must then be fixed up in the shim code (in helper/plugin) so 148 | // that the SDK still sees the lists or sets it's expecting. 149 | return elemType 150 | } 151 | switch legacy.Type { 152 | case tflegacy.TypeList: 153 | return cty.List(elemType) 154 | case tflegacy.TypeSet: 155 | return cty.Set(elemType) 156 | case tflegacy.TypeMap: 157 | return cty.Map(elemType) 158 | default: 159 | // can never get here in practice, due to the case we're inside 160 | panic("invalid collection type") 161 | } 162 | default: 163 | // should never happen for a valid schema 164 | panic(fmt.Errorf("invalid Schema.Type %s", legacy.Type)) 165 | } 166 | } 167 | 168 | func prepareLegacySchemaNestedBlockType(legacy *tflegacy.Schema, enableAsSingle bool) *tfschema.NestedBlockType { 169 | ret := &tfschema.NestedBlockType{} 170 | if nested := prepareLegacySchema(legacy.Elem.(*tflegacy.Resource).Schema, enableAsSingle); nested != nil { 171 | ret.Content = *nested 172 | } 173 | switch legacy.Type { 174 | case tflegacy.TypeList: 175 | ret.Nesting = tfschema.NestingList 176 | case tflegacy.TypeSet: 177 | ret.Nesting = tfschema.NestingSet 178 | case tflegacy.TypeMap: 179 | ret.Nesting = tfschema.NestingMap 180 | default: 181 | // Should never happen for a valid schema 182 | panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", legacy.Type)) 183 | } 184 | 185 | ret.MinItems = legacy.MinItems 186 | ret.MaxItems = legacy.MaxItems 187 | 188 | if legacy.AsSingle && enableAsSingle { 189 | // In AsSingle mode, we artifically force a TypeList or TypeSet 190 | // attribute in the SDK to be treated as a single block by Terraform Core. 191 | // This must then be fixed up in the shim code (in helper/plugin) so 192 | // that the SDK still sees the lists or sets it's expecting. 193 | ret.Nesting = tfschema.NestingSingle 194 | } 195 | 196 | if legacy.Required && legacy.MinItems == 0 { 197 | // new schema doesn't have a "required" representation for nested 198 | // blocks, but we can fake it by requiring at least one item. 199 | ret.MinItems = 1 200 | } 201 | if legacy.Optional && legacy.MinItems > 0 { 202 | // Historically helper/schema would ignore MinItems if Optional were 203 | // set, so we must mimic this behavior here to ensure that providers 204 | // relying on that undocumented behavior can continue to operate as 205 | // they did before. 206 | ret.MinItems = 0 207 | } 208 | if legacy.Computed && !legacy.Optional { 209 | // MinItems/MaxItems are meaningless for computed nested blocks, since 210 | // they are never set by the user anyway. This ensures that we'll never 211 | // generate weird errors about them. 212 | ret.MinItems = 0 213 | ret.MaxItems = 0 214 | } 215 | 216 | return ret 217 | } 218 | 219 | func prepareLegacyResourceTypeSchema(legacy *tflegacy.Resource, shimmed bool) *tfschema.BlockType { 220 | enableAsSingle := !shimmed 221 | block := prepareLegacySchema(legacy.Schema, enableAsSingle) 222 | 223 | if block.Attributes == nil { 224 | block.Attributes = map[string]*tfschema.Attribute{} 225 | } 226 | 227 | // Add the implicitly required "id" field if it doesn't exist 228 | if block.Attributes["id"] == nil { 229 | block.Attributes["id"] = &tfschema.Attribute{ 230 | Type: cty.String, 231 | Optional: true, 232 | Computed: true, 233 | } 234 | } 235 | 236 | _, timeoutsAttr := block.Attributes[tflegacy.TimeoutsConfigKey] 237 | _, timeoutsBlock := block.NestedBlockTypes[tflegacy.TimeoutsConfigKey] 238 | 239 | // Insert configured timeout values into the schema, as long as the schema 240 | // didn't define anything else by that name. 241 | if legacy.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { 242 | timeouts := tfschema.BlockType{ 243 | Attributes: map[string]*tfschema.Attribute{}, 244 | } 245 | 246 | if legacy.Timeouts.Create != nil { 247 | timeouts.Attributes[tflegacy.TimeoutCreate] = &tfschema.Attribute{ 248 | Type: cty.String, 249 | Optional: true, 250 | } 251 | } 252 | 253 | if legacy.Timeouts.Read != nil { 254 | timeouts.Attributes[tflegacy.TimeoutRead] = &tfschema.Attribute{ 255 | Type: cty.String, 256 | Optional: true, 257 | } 258 | } 259 | 260 | if legacy.Timeouts.Update != nil { 261 | timeouts.Attributes[tflegacy.TimeoutUpdate] = &tfschema.Attribute{ 262 | Type: cty.String, 263 | Optional: true, 264 | } 265 | } 266 | 267 | if legacy.Timeouts.Delete != nil { 268 | timeouts.Attributes[tflegacy.TimeoutDelete] = &tfschema.Attribute{ 269 | Type: cty.String, 270 | Optional: true, 271 | } 272 | } 273 | 274 | if legacy.Timeouts.Default != nil { 275 | timeouts.Attributes[tflegacy.TimeoutDefault] = &tfschema.Attribute{ 276 | Type: cty.String, 277 | Optional: true, 278 | } 279 | } 280 | 281 | block.NestedBlockTypes[tflegacy.TimeoutsConfigKey] = &tfschema.NestedBlockType{ 282 | Nesting: tfschema.NestingSingle, 283 | Content: timeouts, 284 | } 285 | } 286 | 287 | return block 288 | } 289 | -------------------------------------------------------------------------------- /plugin.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "log" 8 | "net" 9 | "os" 10 | 11 | "github.com/apparentlymart/terraform-sdk/internal/tfplugin5" 12 | "go.rpcplugin.org/rpcplugin" 13 | "go.rpcplugin.org/rpcplugin/plugintrace" 14 | "google.golang.org/grpc" 15 | grpcCodes "google.golang.org/grpc/codes" 16 | ) 17 | 18 | // ServeProviderPlugin starts a plugin server for the given provider, which will 19 | // first deal with the plugin protocol handshake and then, once initialized, 20 | // serve RPC requests from the client (usually Terraform CLI). 21 | // 22 | // This should be called in the main function for the plugin program. 23 | // ServeProviderPlugin returns only once the plugin has been requested to exit 24 | // by its client. 25 | func ServeProviderPlugin(p *Provider) { 26 | ctx := plugintrace.WithServerTracer(context.Background(), &plugintrace.ServerTracer{ 27 | Listening: func(addr net.Addr, tlsConfig *tls.Config, protoVersion int) { 28 | log.Printf("[INFO] provider plugin server (protocol %d) listening on %s", protoVersion, addr) 29 | }, 30 | }) 31 | 32 | err := rpcplugin.Serve(ctx, &rpcplugin.ServerConfig{ 33 | Handshake: rpcplugin.HandshakeConfig{ 34 | CookieKey: "TF_PLUGIN_MAGIC_COOKIE", 35 | CookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", 36 | }, 37 | ProtoVersions: map[int]rpcplugin.Server{ 38 | 5: protocolVersion5{p}, 39 | }, 40 | }) 41 | 42 | if err != nil { 43 | fmt.Fprintln(os.Stderr, err.Error()) 44 | os.Exit(1) 45 | } 46 | } 47 | 48 | func (p *Provider) tfplugin5Server() tfplugin5.ProviderServer { 49 | // This single shared context will be passed (directly or indirectly) to 50 | // each provider method that can make network requests and cancelled if 51 | // the Terraform operation recieves an interrupt request. 52 | ctx, cancel := context.WithCancel(context.Background()) 53 | 54 | return &tfplugin5Server{ 55 | p: p, 56 | ctx: ctx, 57 | stop: cancel, 58 | } 59 | } 60 | 61 | type tfplugin5Server struct { 62 | p *Provider 63 | ctx context.Context 64 | stop func() 65 | } 66 | 67 | func (s *tfplugin5Server) GetSchema(context.Context, *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { 68 | resp := &tfplugin5.GetProviderSchema_Response{} 69 | 70 | resp.Provider = &tfplugin5.Schema{ 71 | Block: convertSchemaBlockToTFPlugin5(s.p.ConfigSchema), 72 | } 73 | 74 | resp.ResourceSchemas = make(map[string]*tfplugin5.Schema) 75 | for name, rt := range s.p.ManagedResourceTypes { 76 | schema, version := rt.getSchema() 77 | resp.ResourceSchemas[name] = &tfplugin5.Schema{ 78 | Version: version, 79 | Block: convertSchemaBlockToTFPlugin5(schema), 80 | } 81 | } 82 | 83 | resp.DataSourceSchemas = make(map[string]*tfplugin5.Schema) 84 | for name, rt := range s.p.DataResourceTypes { 85 | schema := rt.getSchema() 86 | resp.DataSourceSchemas[name] = &tfplugin5.Schema{ 87 | Block: convertSchemaBlockToTFPlugin5(schema), 88 | } 89 | } 90 | 91 | return resp, nil 92 | } 93 | 94 | // requireManagedResourceType is a helper to conveniently retrieve a particular 95 | // managed resource type or produce an error message if it is invalid. 96 | // 97 | // The usage pattern for this method is: 98 | // 99 | // var rt ManagedResourceType 100 | // if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 101 | // return resp, nil 102 | // } 103 | func (s *tfplugin5Server) requireManagedResourceType(typeName string, diagsPtr *[]*tfplugin5.Diagnostic) ManagedResourceType { 104 | rt := s.p.managedResourceType(typeName) 105 | if rt == nil { 106 | var diags Diagnostics 107 | diags = diags.Append(Diagnostic{ 108 | Severity: Error, 109 | Summary: "Unsupported resource type", 110 | Detail: fmt.Sprintf("This provider does not support managed resource type %q", typeName), 111 | }) 112 | *diagsPtr = encodeDiagnosticsToTFPlugin5(diags) 113 | } 114 | return rt 115 | } 116 | 117 | // requireDataResourceType is a helper to conveniently retrieve a particular 118 | // data resource type or produce an error message if it is invalid. 119 | // 120 | // The usage pattern for this method is: 121 | // 122 | // var rt DataResourceType 123 | // if rt = s.requireDataResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 124 | // return resp, nil 125 | // } 126 | func (s *tfplugin5Server) requireDataResourceType(typeName string, diagsPtr *[]*tfplugin5.Diagnostic) DataResourceType { 127 | rt := s.p.dataResourceType(typeName) 128 | if rt == nil { 129 | var diags Diagnostics 130 | diags = diags.Append(Diagnostic{ 131 | Severity: Error, 132 | Summary: "Unsupported resource type", 133 | Detail: fmt.Sprintf("This provider does not support data resource type %q", typeName), 134 | }) 135 | *diagsPtr = encodeDiagnosticsToTFPlugin5(diags) 136 | } 137 | return rt 138 | } 139 | 140 | func (s *tfplugin5Server) PrepareProviderConfig(ctx context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { 141 | resp := &tfplugin5.PrepareProviderConfig_Response{} 142 | 143 | proposedVal, diags := decodeTFPlugin5DynamicValue(req.Config, s.p.ConfigSchema) 144 | if diags.HasErrors() { 145 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 146 | return resp, nil 147 | } 148 | 149 | preparedVal, diags := s.p.prepareConfig(proposedVal) 150 | resp.PreparedConfig = encodeTFPlugin5DynamicValue(preparedVal, s.p.ConfigSchema) 151 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 152 | return resp, nil 153 | } 154 | 155 | func (s *tfplugin5Server) ValidateResourceTypeConfig(ctx context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { 156 | resp := &tfplugin5.ValidateResourceTypeConfig_Response{} 157 | 158 | var rt ManagedResourceType 159 | if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 160 | return resp, nil 161 | } 162 | 163 | schema, _ := rt.getSchema() 164 | configVal, diags := decodeTFPlugin5DynamicValue(req.Config, schema) 165 | if diags.HasErrors() { 166 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 167 | return resp, nil 168 | } 169 | 170 | diags = rt.validate(configVal) 171 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 172 | return resp, nil 173 | } 174 | 175 | func (s *tfplugin5Server) ValidateDataSourceConfig(ctx context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { 176 | resp := &tfplugin5.ValidateDataSourceConfig_Response{} 177 | 178 | var rt DataResourceType 179 | if rt = s.requireDataResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 180 | return resp, nil 181 | } 182 | 183 | schema := rt.getSchema() 184 | configVal, diags := decodeTFPlugin5DynamicValue(req.Config, schema) 185 | if diags.HasErrors() { 186 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 187 | return resp, nil 188 | } 189 | 190 | diags = rt.validate(configVal) 191 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 192 | return resp, nil 193 | } 194 | 195 | func (s *tfplugin5Server) UpgradeResourceState(ctx context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { 196 | // TODO: Do some fixups we can do automatically, like transforming primitive 197 | // types, and then give the provider code an opportunity to do its own 198 | // fixups as needed. 199 | // Might also need to deal with converting flatmap to JSON here, but maybe 200 | // flatmap states will be rare enough that it's okay to just fail those? 201 | 202 | resp := &tfplugin5.UpgradeResourceState_Response{} 203 | 204 | var rt ManagedResourceType 205 | if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 206 | return resp, nil 207 | } 208 | 209 | schema, _ := rt.getSchema() 210 | stateVal, diags := decodeTFPlugin5RawState(req.RawState, schema) 211 | if diags.HasErrors() { 212 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 213 | return resp, nil 214 | } 215 | 216 | resp.UpgradedState = encodeTFPlugin5DynamicValue(stateVal, schema) 217 | return resp, nil 218 | } 219 | 220 | func (s *tfplugin5Server) Configure(ctx context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { 221 | resp := &tfplugin5.Configure_Response{} 222 | 223 | configVal, diags := decodeTFPlugin5DynamicValue(req.Config, s.p.ConfigSchema) 224 | if diags.HasErrors() { 225 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 226 | return resp, nil 227 | } 228 | 229 | stoppableCtx := s.stoppableContext(ctx) 230 | diags = s.p.configure(stoppableCtx, configVal) 231 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 232 | return resp, nil 233 | } 234 | 235 | func (s *tfplugin5Server) ReadResource(ctx context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { 236 | resp := &tfplugin5.ReadResource_Response{} 237 | 238 | var rt ManagedResourceType 239 | if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 240 | return resp, nil 241 | } 242 | schema, _ := rt.getSchema() 243 | 244 | currentVal, diags := decodeTFPlugin5DynamicValue(req.CurrentState, schema) 245 | if diags.HasErrors() { 246 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 247 | return resp, nil 248 | } 249 | 250 | stoppableCtx := s.stoppableContext(ctx) 251 | newVal, diags := s.p.readResource(stoppableCtx, rt, currentVal) 252 | 253 | // Safety check 254 | wantTy := schema.ImpliedCtyType() 255 | for _, err := range newVal.Type().TestConformance(wantTy) { 256 | diags = diags.Append(Diagnostic{ 257 | Severity: Error, 258 | Summary: "Invalid result from provider", 259 | Detail: fmt.Sprintf("Provider produced an invalid new object for %s: %s", req.TypeName, FormatError(err)), 260 | }) 261 | } 262 | 263 | resp.NewState = encodeTFPlugin5DynamicValue(newVal, schema) 264 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 265 | return resp, nil 266 | } 267 | 268 | func (s *tfplugin5Server) PlanResourceChange(ctx context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { 269 | resp := &tfplugin5.PlanResourceChange_Response{} 270 | 271 | var rt ManagedResourceType 272 | if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 273 | return resp, nil 274 | } 275 | schema, _ := rt.getSchema() 276 | 277 | priorVal, diags := decodeTFPlugin5DynamicValue(req.PriorState, schema) 278 | if diags.HasErrors() { 279 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 280 | return resp, nil 281 | } 282 | configVal, diags := decodeTFPlugin5DynamicValue(req.Config, schema) 283 | if diags.HasErrors() { 284 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 285 | return resp, nil 286 | } 287 | proposedVal, diags := decodeTFPlugin5DynamicValue(req.ProposedNewState, schema) 288 | if diags.HasErrors() { 289 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 290 | return resp, nil 291 | } 292 | 293 | stoppableCtx := s.stoppableContext(ctx) 294 | plannedVal, requiresReplace, diags := s.p.planResourceChange(stoppableCtx, rt, priorVal, configVal, proposedVal) 295 | 296 | // Safety check 297 | wantTy := schema.ImpliedCtyType() 298 | for _, err := range plannedVal.Type().TestConformance(wantTy) { 299 | diags = diags.Append(Diagnostic{ 300 | Severity: Error, 301 | Summary: "Invalid result from provider", 302 | Detail: fmt.Sprintf("Provider produced an invalid planned new object for %s: %s", req.TypeName, FormatError(err)), 303 | }) 304 | } 305 | 306 | resp.PlannedState = encodeTFPlugin5DynamicValue(plannedVal, schema) 307 | resp.RequiresReplace = encodeAttrPathSetToTFPlugin5(requiresReplace) 308 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 309 | return resp, nil 310 | } 311 | 312 | func (s *tfplugin5Server) ApplyResourceChange(ctx context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { 313 | resp := &tfplugin5.ApplyResourceChange_Response{} 314 | 315 | var rt ManagedResourceType 316 | if rt = s.requireManagedResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 317 | return resp, nil 318 | } 319 | schema, _ := rt.getSchema() 320 | 321 | priorVal, diags := decodeTFPlugin5DynamicValue(req.PriorState, schema) 322 | if diags.HasErrors() { 323 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 324 | return resp, nil 325 | } 326 | plannedVal, diags := decodeTFPlugin5DynamicValue(req.PlannedState, schema) 327 | if diags.HasErrors() { 328 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 329 | return resp, nil 330 | } 331 | 332 | stoppableCtx := s.stoppableContext(ctx) 333 | newVal, diags := s.p.applyResourceChange(stoppableCtx, rt, priorVal, plannedVal) 334 | 335 | // Safety check 336 | wantTy := schema.ImpliedCtyType() 337 | for _, err := range newVal.Type().TestConformance(wantTy) { 338 | diags = diags.Append(Diagnostic{ 339 | Severity: Error, 340 | Summary: "Invalid result from provider", 341 | Detail: fmt.Sprintf("Provider produced an invalid new object for %s: %s", req.TypeName, FormatError(err)), 342 | }) 343 | } 344 | 345 | resp.NewState = encodeTFPlugin5DynamicValue(newVal, schema) 346 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 347 | return resp, nil 348 | } 349 | 350 | func (s *tfplugin5Server) ImportResourceState(context.Context, *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { 351 | return nil, grpc.Errorf(grpcCodes.Unimplemented, "not implemented") 352 | } 353 | 354 | func (s *tfplugin5Server) ReadDataSource(ctx context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { 355 | resp := &tfplugin5.ReadDataSource_Response{} 356 | 357 | var rt DataResourceType 358 | if rt = s.requireDataResourceType(req.TypeName, &resp.Diagnostics); rt == nil { 359 | return resp, nil 360 | } 361 | schema := rt.getSchema() 362 | 363 | currentVal, diags := decodeTFPlugin5DynamicValue(req.Config, schema) 364 | if diags.HasErrors() { 365 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 366 | return resp, nil 367 | } 368 | 369 | stoppableCtx := s.stoppableContext(ctx) 370 | newVal, diags := s.p.readDataSource(stoppableCtx, rt, currentVal) 371 | 372 | // Safety check 373 | wantTy := schema.ImpliedCtyType() 374 | for _, err := range newVal.Type().TestConformance(wantTy) { 375 | diags = diags.Append(Diagnostic{ 376 | Severity: Error, 377 | Summary: "Invalid result from provider", 378 | Detail: fmt.Sprintf("Provider produced an invalid new object for %s: %s", req.TypeName, FormatError(err)), 379 | }) 380 | } 381 | 382 | resp.State = encodeTFPlugin5DynamicValue(newVal, schema) 383 | resp.Diagnostics = encodeDiagnosticsToTFPlugin5(diags) 384 | return resp, nil 385 | } 386 | 387 | func (s *tfplugin5Server) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { 388 | // This cancels our server's root context, in the hope that the provider 389 | // operations will respond to this by safely cancelling their in-flight 390 | // actions and returning (possibly with an error) as quickly as possible. 391 | s.stop() 392 | return &tfplugin5.Stop_Response{}, nil 393 | } 394 | 395 | // stoppableContext returns a new context that will get cancelled if either the 396 | // given context is cancelled or if the provider is asked to stop. 397 | // 398 | // This function starts a goroutine that exits only when the given context is 399 | // cancelled, so it's important that the given context be cancelled shortly 400 | // after the request it represents is completed. 401 | func (s *tfplugin5Server) stoppableContext(ctx context.Context) context.Context { 402 | stoppable, cancel := context.WithCancel(s.ctx) 403 | go func() { 404 | <-ctx.Done() 405 | cancel() 406 | }() 407 | return stoppable 408 | } 409 | 410 | // protocolVersion5 is an implementation of rpcplugin.Server that implements 411 | // protocol version 5. 412 | type protocolVersion5 struct { 413 | p *Provider 414 | } 415 | 416 | var _ rpcplugin.Server = protocolVersion5{} 417 | 418 | func (p protocolVersion5) RegisterServer(server *grpc.Server) error { 419 | tfplugin5.RegisterProviderServer(server, p.p.tfplugin5Server()) 420 | return nil 421 | } 422 | -------------------------------------------------------------------------------- /plugin_conv.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | 7 | "github.com/apparentlymart/terraform-sdk/internal/tfplugin5" 8 | "github.com/apparentlymart/terraform-sdk/tfschema" 9 | "github.com/zclconf/go-cty/cty" 10 | "github.com/zclconf/go-cty/cty/json" 11 | "github.com/zclconf/go-cty/cty/msgpack" 12 | ) 13 | 14 | func convertSchemaBlockToTFPlugin5(src *tfschema.BlockType) *tfplugin5.Schema_Block { 15 | ret := &tfplugin5.Schema_Block{} 16 | if src == nil { 17 | // Weird, but we'll allow it. 18 | return ret 19 | } 20 | 21 | for name, attrS := range src.Attributes { 22 | tyJSON, err := attrS.Type.MarshalJSON() 23 | if err != nil { 24 | // Should never happen, since types should always be valid 25 | panic(fmt.Sprintf("failed to serialize %#v as JSON: %s", attrS.Type, err)) 26 | } 27 | ret.Attributes = append(ret.Attributes, &tfplugin5.Schema_Attribute{ 28 | Name: name, 29 | Type: tyJSON, 30 | Description: attrS.Description, 31 | Required: attrS.Required, 32 | Optional: attrS.Optional, 33 | Computed: attrS.Computed || attrS.Default != nil, 34 | Sensitive: attrS.Sensitive, 35 | }) 36 | } 37 | 38 | for name, blockS := range src.NestedBlockTypes { 39 | nested := convertSchemaBlockToTFPlugin5(&blockS.Content) 40 | var nesting tfplugin5.Schema_NestedBlock_NestingMode 41 | switch blockS.Nesting { 42 | case tfschema.NestingSingle: 43 | nesting = tfplugin5.Schema_NestedBlock_SINGLE 44 | case tfschema.NestingGroup: 45 | nesting = tfplugin5.Schema_NestedBlock_GROUP 46 | case tfschema.NestingList: 47 | nesting = tfplugin5.Schema_NestedBlock_LIST 48 | case tfschema.NestingMap: 49 | nesting = tfplugin5.Schema_NestedBlock_MAP 50 | case tfschema.NestingSet: 51 | nesting = tfplugin5.Schema_NestedBlock_SET 52 | default: 53 | // Should never happen because the above is exhaustive. 54 | panic(fmt.Sprintf("unsupported block nesting mode %#v", blockS.Nesting)) 55 | } 56 | ret.BlockTypes = append(ret.BlockTypes, &tfplugin5.Schema_NestedBlock{ 57 | TypeName: name, 58 | Nesting: nesting, 59 | Block: nested, 60 | MaxItems: int64(blockS.MaxItems), 61 | MinItems: int64(blockS.MinItems), 62 | }) 63 | } 64 | 65 | sort.Slice(ret.Attributes, func(i, j int) bool { 66 | return ret.Attributes[i].Name < ret.Attributes[j].Name 67 | }) 68 | 69 | return ret 70 | } 71 | 72 | func decodeTFPlugin5DynamicValue(src *tfplugin5.DynamicValue, schema *tfschema.BlockType) (cty.Value, Diagnostics) { 73 | switch { 74 | case len(src.Json) > 0: 75 | return decodeJSONObject(src.Json, schema) 76 | default: 77 | return decodeMsgpackObject(src.Msgpack, schema) 78 | } 79 | } 80 | 81 | func encodeTFPlugin5DynamicValue(src cty.Value, schema *tfschema.BlockType) *tfplugin5.DynamicValue { 82 | msgpackSrc := encodeMsgpackObject(src, schema) 83 | return &tfplugin5.DynamicValue{ 84 | Msgpack: msgpackSrc, 85 | } 86 | } 87 | 88 | func decodeTFPlugin5RawState(src *tfplugin5.RawState, schema *tfschema.BlockType) (cty.Value, Diagnostics) { 89 | switch { 90 | case len(src.Json) > 0: 91 | return decodeJSONObject(src.Json, schema) 92 | default: 93 | diags := Diagnostics{ 94 | { 95 | Severity: Error, 96 | Summary: "Can't upgrade legacy state", 97 | // FIXME: Terrible unactionable error message 98 | Detail: "The state for this object is in a legacy format that is no longer supported. You must first apply a change to it with an older version of the provider.", 99 | }, 100 | } 101 | return cty.DynamicVal, diags 102 | } 103 | } 104 | 105 | func decodeJSONObject(src []byte, schema *tfschema.BlockType) (cty.Value, Diagnostics) { 106 | var diags Diagnostics 107 | wantTy := schema.ImpliedCtyType() 108 | ret, err := json.Unmarshal(src, wantTy) 109 | if err != nil { 110 | var path cty.Path 111 | if pErr, ok := err.(cty.PathError); ok { 112 | path = pErr.Path 113 | } 114 | diags = diags.Append(Diagnostic{ 115 | Severity: Error, 116 | Summary: "Invalid object from Terraform Core", 117 | Detail: fmt.Sprintf("Provider recieved an object value from Terraform Core that could not be decoded: %s.\n\nThis is a bug in either Terraform Core or in the plugin SDK; please report it in Terraform Core's repository.", err), 118 | Path: path, 119 | }) 120 | } 121 | return ret, diags 122 | } 123 | 124 | func decodeMsgpackObject(src []byte, schema *tfschema.BlockType) (cty.Value, Diagnostics) { 125 | var diags Diagnostics 126 | wantTy := schema.ImpliedCtyType() 127 | ret, err := msgpack.Unmarshal(src, wantTy) 128 | if err != nil { 129 | var path cty.Path 130 | if pErr, ok := err.(cty.PathError); ok { 131 | path = pErr.Path 132 | } 133 | diags = diags.Append(Diagnostic{ 134 | Severity: Error, 135 | Summary: "Invalid object from Terraform Core", 136 | Detail: fmt.Sprintf("Provider recieved an object value from Terraform Core that could not be decoded: %s.\n\nThis is a bug in either Terraform Core or in the plugin SDK; please report it in Terraform Core's repository.", err), 137 | Path: path, 138 | }) 139 | } 140 | return ret, diags 141 | } 142 | 143 | func encodeMsgpackObject(src cty.Value, schema *tfschema.BlockType) []byte { 144 | wantTy := schema.ImpliedCtyType() 145 | ret, err := msgpack.Marshal(src, wantTy) 146 | if err != nil { 147 | // Errors in _encoding_ always indicate programming errors in the SDK, 148 | // since it should be checking these things on the way out. 149 | panic(fmt.Sprintf("invalid object to encode: %s", err)) 150 | } 151 | return ret 152 | } 153 | 154 | func encodeDiagnosticsToTFPlugin5(src Diagnostics) []*tfplugin5.Diagnostic { 155 | var ret []*tfplugin5.Diagnostic 156 | for _, diag := range src { 157 | var severity tfplugin5.Diagnostic_Severity 158 | switch diag.Severity { 159 | case Error: 160 | severity = tfplugin5.Diagnostic_ERROR 161 | case Warning: 162 | severity = tfplugin5.Diagnostic_WARNING 163 | } 164 | 165 | ret = append(ret, &tfplugin5.Diagnostic{ 166 | Severity: severity, 167 | Summary: diag.Summary, 168 | Detail: diag.Detail, 169 | Attribute: encodeAttrPathToTFPlugin5(diag.Path), 170 | }) 171 | } 172 | return ret 173 | } 174 | 175 | func encodeAttrPathToTFPlugin5(path cty.Path) *tfplugin5.AttributePath { 176 | ret := &tfplugin5.AttributePath{} 177 | for _, rawStep := range path { 178 | switch step := rawStep.(type) { 179 | case cty.GetAttrStep: 180 | ret.Steps = append(ret.Steps, &tfplugin5.AttributePath_Step{ 181 | Selector: &tfplugin5.AttributePath_Step_AttributeName{ 182 | AttributeName: step.Name, 183 | }, 184 | }) 185 | case cty.IndexStep: 186 | switch step.Key.Type() { 187 | case cty.String: 188 | ret.Steps = append(ret.Steps, &tfplugin5.AttributePath_Step{ 189 | Selector: &tfplugin5.AttributePath_Step_ElementKeyString{ 190 | ElementKeyString: step.Key.AsString(), 191 | }, 192 | }) 193 | case cty.Number: 194 | idx, _ := step.Key.AsBigFloat().Int64() 195 | ret.Steps = append(ret.Steps, &tfplugin5.AttributePath_Step{ 196 | Selector: &tfplugin5.AttributePath_Step_ElementKeyInt{ 197 | ElementKeyInt: idx, 198 | }, 199 | }) 200 | default: 201 | // no other key types are valid, so we'll produce garbage in this case 202 | // and have Terraform Core report it as such. 203 | ret.Steps = append(ret.Steps, nil) 204 | } 205 | } 206 | } 207 | return ret 208 | } 209 | 210 | func encodeAttrPathSetToTFPlugin5(s cty.PathSet) []*tfplugin5.AttributePath { 211 | l := s.List() 212 | ret := make([]*tfplugin5.AttributePath, len(l)) 213 | for i, path := range l { 214 | ret[i] = encodeAttrPathToTFPlugin5(path) 215 | } 216 | return ret 217 | } 218 | -------------------------------------------------------------------------------- /provider.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/apparentlymart/terraform-sdk/internal/dynfunc" 8 | "github.com/apparentlymart/terraform-sdk/tfschema" 9 | "github.com/zclconf/go-cty/cty" 10 | ) 11 | 12 | // Provider is the main type for describing a Terraform provider 13 | // implementation. The primary Go package for a provider should include 14 | // a function that returns a pointer to a Provider object describing the 15 | // resource types and other objects exposed by the provider. 16 | type Provider struct { 17 | ConfigSchema *tfschema.BlockType 18 | ManagedResourceTypes map[string]ManagedResourceType 19 | DataResourceTypes map[string]DataResourceType 20 | 21 | ConfigureFn interface{} 22 | 23 | client interface{} 24 | } 25 | 26 | // ManagedResourceType is the interface implemented by managed resource type 27 | // implementations. 28 | // 29 | // This is a closed interface, meaning that all of its implementations are 30 | // inside this package. To implement a managed resource type, create a 31 | // *ResourceType value and pass it to NewManagedResourceType. 32 | type ManagedResourceType interface { 33 | getSchema() (schema *tfschema.BlockType, version int64) 34 | validate(obj cty.Value) Diagnostics 35 | upgradeState(oldJSON []byte, oldVersion int) (cty.Value, Diagnostics) 36 | refresh(ctx context.Context, client interface{}, old cty.Value) (cty.Value, Diagnostics) 37 | planChange(ctx context.Context, client interface{}, prior, config, proposed cty.Value) (planned cty.Value, requiresReplace cty.PathSet, diags Diagnostics) 38 | applyChange(ctx context.Context, client interface{}, prior, planned cty.Value) (cty.Value, Diagnostics) 39 | importState(ctx context.Context, client interface{}, id string) (cty.Value, Diagnostics) 40 | } 41 | 42 | // DataResourceType is an interface implemented by data resource type 43 | // implementations. 44 | // 45 | // This is a closed interface, meaning that all of its implementations are 46 | // inside this package. To implement a managed resource type, create a 47 | // *ResourceType value and pass it to NewDataResourceType. 48 | type DataResourceType interface { 49 | getSchema() *tfschema.BlockType 50 | validate(obj cty.Value) Diagnostics 51 | read(ctx context.Context, client interface{}, config cty.Value) (cty.Value, Diagnostics) 52 | } 53 | 54 | // prepareConfig accepts an object decoded from the user-provided configuration 55 | // (whose type must conform to the schema) and validates it, possibly also 56 | // altering some of the values within to produce a final configuration for 57 | // Terraform Core to use when interacting with this provider instance. 58 | func (p *Provider) prepareConfig(proposedVal cty.Value) (cty.Value, Diagnostics) { 59 | diags := ValidateBlockObject(p.ConfigSchema, proposedVal) 60 | return proposedVal, diags 61 | } 62 | 63 | // configure recieves the finalized configuration for the provider and passes 64 | // it to the provider's configuration function to produce the client object 65 | // that will be recieved by the various resource operations. 66 | func (p *Provider) configure(ctx context.Context, config cty.Value) Diagnostics { 67 | var diags Diagnostics 68 | var client interface{} 69 | fn, err := dynfunc.WrapFunctionWithReturnValue(p.ConfigureFn, &client, ctx, config) 70 | if err != nil { 71 | diags = diags.Append(Diagnostic{ 72 | Severity: Error, 73 | Summary: "Invalid provider implementation", 74 | Detail: fmt.Sprintf("Invalid ConfigureFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err), 75 | }) 76 | return diags 77 | } 78 | 79 | moreDiags := fn() 80 | diags = diags.Append(moreDiags) 81 | if !diags.HasErrors() { 82 | p.client = client 83 | } 84 | return diags 85 | } 86 | 87 | func (p *Provider) managedResourceType(typeName string) ManagedResourceType { 88 | return p.ManagedResourceTypes[typeName] 89 | } 90 | 91 | func (p *Provider) dataResourceType(typeName string) DataResourceType { 92 | return p.DataResourceTypes[typeName] 93 | } 94 | 95 | func (p *Provider) readResource(ctx context.Context, rt ManagedResourceType, currentVal cty.Value) (cty.Value, Diagnostics) { 96 | return rt.refresh(ctx, p.client, currentVal) 97 | } 98 | 99 | func (p *Provider) readDataSource(ctx context.Context, rt DataResourceType, configVal cty.Value) (cty.Value, Diagnostics) { 100 | return rt.read(ctx, p.client, configVal) 101 | } 102 | 103 | func (p *Provider) planResourceChange(ctx context.Context, rt ManagedResourceType, priorVal, configVal, proposedVal cty.Value) (cty.Value, cty.PathSet, Diagnostics) { 104 | return rt.planChange(ctx, p.client, priorVal, configVal, proposedVal) 105 | } 106 | 107 | func (p *Provider) applyResourceChange(ctx context.Context, rt ManagedResourceType, priorVal, plannedVal cty.Value) (cty.Value, Diagnostics) { 108 | return rt.applyChange(ctx, p.client, priorVal, plannedVal) 109 | } 110 | -------------------------------------------------------------------------------- /resource_type.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/apparentlymart/terraform-sdk/internal/dynfunc" 8 | "github.com/apparentlymart/terraform-sdk/tfobj" 9 | "github.com/apparentlymart/terraform-sdk/tfschema" 10 | "github.com/zclconf/go-cty/cty" 11 | ) 12 | 13 | // ResourceTypeDef is the type that provider packages should instantiate to 14 | // describe the implementation of a specific resource type. 15 | // 16 | // "Def" in the type name is short for "Definition"; a ResourceTypeDef is not 17 | // actually itself a resource type, but pointers to instances of this type can 18 | // be passed to the functions NewManagedResourceType and NewDataResourceType to 19 | // provide managed and data resource type implementations respectively. Each 20 | // specific resource type kind has its own constraints on what can and must 21 | // be set in a ResourceTypeDef for that kind; see the resource type constructor 22 | // functions' documentation for more information. 23 | type ResourceTypeDef struct { 24 | ConfigSchema *tfschema.BlockType 25 | SchemaVersion int64 // Only used for managed resource types; leave as zero otherwise 26 | 27 | // CreateFn is a function called when creating an instance of your resource 28 | // type for the first time. It must be a function compatible with the 29 | // following signature: 30 | // 31 | // func (ctx context.Context, client interface{}, planned tfobj.ObjectReader) (new cty.Value, diags tfsdk.Diagnostics) 32 | // 33 | // If the create was not completely successful, you may still return a 34 | // partially-created object alongside error diagnostics to retain the parts 35 | // that _were_ created. 36 | CreateFn interface{} 37 | 38 | // ReadFn is a function called to read the current upstream values for an 39 | // instance of your resource type. It must be a function compatible with the 40 | // following signature: 41 | // 42 | // func (ctx context.Context, client interface{}, planned tfobj.ObjectReader) (new cty.Value, diags tfsdk.Diagnostics) 43 | // 44 | // If the given object appears to have been deleted upstream, return a null 45 | // value to indicate that. The object will then be removed from the Terraform 46 | // state. 47 | ReadFn interface{} 48 | 49 | // UpdateFn is a function called when performing an in-place update of an 50 | // instance of your resource type. It must be a function compatible with the 51 | // following signature: 52 | // 53 | // func (ctx context.Context, client interface{}, prior tfobj.ObjectReader, planned tfobj.PlanReader) (new cty.Value, diags tfsdk.Diagnostics) 54 | // 55 | // If the update is not completely successful, you may still return a 56 | // partially-updated object alongside error diagnostics to retain the 57 | // parts that _were_ updated. If error diagnostics are returned and the 58 | // returned value is null then we assume that the update failed completely 59 | // and retain the prior value in the Terraform state. 60 | UpdateFn interface{} 61 | 62 | // DeleteFn is a function called to delete an instance of your resource type. 63 | // It must be a function compatible with the following signature: 64 | // 65 | // func (ctx context.Context, client interface{}, prior tfobj.ObjectReader) tfsdk.Diagnostics 66 | // 67 | // If error diagnostics are returned, the SDK will assume that the delete 68 | // failed and that the object still exists. If it actually was deleted 69 | // before the failure, this should be detected on the next Read call. 70 | DeleteFn interface{} 71 | 72 | // PlanFn can be set for managed resource types in order to make adjustments 73 | // to a planned change for an instance. It must be a function compatible 74 | // with the following signature: 75 | // 76 | // func (ctx context.Context, client interface{}, plan tfobj.PlanBuilder) (planned cty.Value, requiresReplace cty.PathSet, diags tfsdk.Diagnostics) 77 | // 78 | // If possible, the provider should also perform validation of the planned 79 | // change and return errors or warnings early, rather than waiting until 80 | // the apply step. 81 | PlanFn interface{} 82 | } 83 | 84 | // NewManagedResourceType prepares a ManagedResourceType implementation using 85 | // the definition from the given ResourceType instance. 86 | // 87 | // This function is intended to be called during startup with a valid 88 | // ResourceType, so it will panic if the given ResourceType is not valid. 89 | func NewManagedResourceType(def *ResourceTypeDef) ManagedResourceType { 90 | if def == nil { 91 | panic("NewManagedResourceType called with nil definition") 92 | } 93 | 94 | schema := def.ConfigSchema 95 | if schema == nil { 96 | schema = &tfschema.BlockType{} 97 | } 98 | 99 | readFn := def.ReadFn 100 | if readFn == nil { 101 | readFn = defaultReadFn 102 | } 103 | 104 | // TODO: Check thoroughly to make sure def is correctly populated for a 105 | // managed resource type, so we can panic early. 106 | 107 | return managedResourceType{ 108 | configSchema: schema, 109 | 110 | createFn: def.CreateFn, 111 | readFn: readFn, 112 | updateFn: def.UpdateFn, 113 | deleteFn: def.DeleteFn, 114 | planFn: def.PlanFn, 115 | } 116 | } 117 | 118 | // NewDataResourceType prepares a DataResourceType implementation using the 119 | // definition from the given ResourceType instance. 120 | // 121 | // This function is intended to be called during startup with a valid 122 | // ResourceType, so it will panic if the given ResourceType is not valid. 123 | func NewDataResourceType(def *ResourceTypeDef) DataResourceType { 124 | if def == nil { 125 | panic("NewDataResourceType called with nil definition") 126 | } 127 | 128 | schema := def.ConfigSchema 129 | if schema == nil { 130 | schema = &tfschema.BlockType{} 131 | } 132 | if def.SchemaVersion != 0 { 133 | panic("NewDataResourceType requires def.SchemaVersion == 0") 134 | } 135 | 136 | readFn := def.ReadFn 137 | if readFn == nil { 138 | readFn = defaultReadFn 139 | } 140 | 141 | // TODO: Check thoroughly to make sure def is correctly populated for a data 142 | // resource type, so we can panic early. 143 | 144 | return dataResourceType{ 145 | configSchema: schema, 146 | readFn: readFn, 147 | } 148 | } 149 | 150 | type managedResourceType struct { 151 | configSchema *tfschema.BlockType 152 | schemaVersion int64 153 | 154 | createFn, readFn, updateFn, deleteFn interface{} 155 | planFn interface{} 156 | } 157 | 158 | func (rt managedResourceType) getSchema() (schema *tfschema.BlockType, version int64) { 159 | return rt.configSchema, rt.schemaVersion 160 | } 161 | 162 | func (rt managedResourceType) validate(obj cty.Value) Diagnostics { 163 | return ValidateBlockObject(rt.configSchema, obj) 164 | } 165 | 166 | func (rt managedResourceType) upgradeState(oldJSON []byte, oldVersion int) (cty.Value, Diagnostics) { 167 | return cty.NilVal, nil 168 | } 169 | 170 | func (rt managedResourceType) refresh(ctx context.Context, client interface{}, current cty.Value) (cty.Value, Diagnostics) { 171 | var diags Diagnostics 172 | wantTy := rt.configSchema.ImpliedCtyType() 173 | 174 | currentReader := tfobj.NewObjectReader(rt.configSchema, current) 175 | fn, err := dynfunc.WrapFunctionWithReturnValueCty(rt.readFn, wantTy, ctx, client, currentReader) 176 | if err != nil { 177 | diags = diags.Append(Diagnostic{ 178 | Severity: Error, 179 | Summary: "Invalid provider implementation", 180 | Detail: fmt.Sprintf("Invalid ReadFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err), 181 | }) 182 | return rt.configSchema.Null(), diags 183 | } 184 | 185 | newVal, moreDiags := fn() 186 | diags = diags.Append(moreDiags) 187 | 188 | // We'll make life easier on the provider implementer by normalizing null 189 | // and unknown values to the correct type automatically, so they can just 190 | // return dynamically-typed nulls and unknowns. 191 | switch { 192 | case newVal.IsNull(): 193 | newVal = cty.NullVal(wantTy) 194 | case !newVal.IsKnown(): 195 | newVal = cty.UnknownVal(wantTy) 196 | } 197 | 198 | return newVal, diags 199 | } 200 | 201 | func (rt managedResourceType) planChange(ctx context.Context, client interface{}, prior, config, proposed cty.Value) (cty.Value, cty.PathSet, Diagnostics) { 202 | var diags Diagnostics 203 | requiresReplace := cty.NewPathSet() 204 | wantTy := rt.configSchema.ImpliedCtyType() 205 | 206 | // Terraform Core has already done a lot of the work in merging prior with 207 | // config to produce "proposed". Our main job here is inserting any additional 208 | // default values called for in the provider schema. 209 | planned := rt.configSchema.ApplyDefaults(proposed) 210 | 211 | if !planned.RawEquals(prior) { 212 | // If there are already changes planned then the provider code gets 213 | // an opportunity to refine the changeset in case there are any 214 | // side-effects of the configuration change that could affect any 215 | // pre-existing computed attribute values. 216 | planBuilder := tfobj.NewPlanBuilder(rt.configSchema, prior, config, planned) 217 | fn, err := dynfunc.WrapFunctionWithReturnValueCtyAndPathSet(rt.planFn, wantTy, ctx, client, planBuilder) 218 | if err != nil { 219 | diags = diags.Append(Diagnostic{ 220 | Severity: Error, 221 | Summary: "Invalid provider implementation", 222 | Detail: fmt.Sprintf("Invalid PlanFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err), 223 | }) 224 | return rt.configSchema.Null(), requiresReplace, diags 225 | } 226 | 227 | var moreDiags Diagnostics 228 | planned, requiresReplace, moreDiags = fn() 229 | diags = diags.Append(moreDiags) 230 | 231 | // We'll make life easier on the provider implementer by normalizing null 232 | // and unknown values to the correct type automatically, so they can just 233 | // return dynamically-typed nulls and unknowns. 234 | switch { 235 | case planned.IsNull(): 236 | planned = cty.NullVal(wantTy) 237 | case !planned.IsKnown(): 238 | planned = cty.UnknownVal(wantTy) 239 | } 240 | } 241 | 242 | return planned, requiresReplace, diags 243 | } 244 | 245 | func (rt managedResourceType) applyChange(ctx context.Context, client interface{}, prior, planned cty.Value) (cty.Value, Diagnostics) { 246 | var diags Diagnostics 247 | wantTy := rt.configSchema.ImpliedCtyType() 248 | 249 | // The planned object will contain unknown values for anything that is to 250 | // be determined during the apply step, but we'll replace these with nulls 251 | // before calling the provider's operation implementation functions so that 252 | // they can easily use gocty to work with the whole object and not get 253 | // tripped up with dealing with those unknown values. 254 | // 255 | // FIXME: This is a bit unfortunate because it means that the apply functions 256 | // can't easily tell the difference between something that was returned as 257 | // explicitly null in the plan vs. being unknown, but we're accepting that 258 | // for now because it seems unlikely that such a distinction would ever 259 | // matter in practice: the plan logic should just be consistent about whether 260 | // a particular attribute becomes unknown when it's unset. We might need to 261 | // do something better here if real-world experience indicates otherwise. 262 | // 263 | // This will also cause set values that differ only by being unknown to 264 | // be conflated together, but we're ignoring that here because we want to 265 | // phase out the idea of set-backed blocks with unknown attributes inside: 266 | // they cause too much ambiguity in our diffing logic. 267 | planned = cty.UnknownAsNull(planned) 268 | 269 | // We could actually be doing either a Create, an Update, or a Delete here 270 | // depending on the null-ness of the values we've been given. At least one 271 | // of them will always be non-null. 272 | var fn func() (cty.Value, Diagnostics) 273 | var err error 274 | var errMsg string 275 | switch { 276 | case prior.IsNull(): 277 | plannedReader := tfobj.NewObjectReader(rt.configSchema, planned) 278 | fn, err = dynfunc.WrapFunctionWithReturnValueCty(rt.createFn, wantTy, ctx, client, plannedReader) 279 | if err != nil { 280 | errMsg = fmt.Sprintf("Invalid CreateFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err) 281 | } 282 | case planned.IsNull(): 283 | priorReader := tfobj.NewObjectReader(rt.configSchema, prior) 284 | fn, err = dynfunc.WrapFunctionWithReturnValueCty(rt.deleteFn, wantTy, ctx, client, priorReader) 285 | if err != nil { 286 | errMsg = fmt.Sprintf("Invalid DeleteFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err) 287 | } 288 | default: 289 | priorReader := tfobj.NewObjectReader(rt.configSchema, prior) 290 | plannedReader := tfobj.NewPlanReader(rt.configSchema, prior, planned) 291 | fn, err = dynfunc.WrapFunctionWithReturnValueCty(rt.updateFn, wantTy, ctx, client, priorReader, plannedReader) 292 | if err != nil { 293 | errMsg = fmt.Sprintf("Invalid UpdateFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err) 294 | } 295 | } 296 | if err != nil { 297 | diags = diags.Append(Diagnostic{ 298 | Severity: Error, 299 | Summary: "Invalid provider implementation", 300 | Detail: errMsg, 301 | }) 302 | return rt.configSchema.Null(), diags 303 | } 304 | 305 | newVal, moreDiags := fn() 306 | diags = diags.Append(moreDiags) 307 | 308 | // We'll make life easier on the provider implementer by normalizing null 309 | // and unknown values to the correct type automatically, so they can just 310 | // return dynamically-typed nulls and unknowns. 311 | switch { 312 | case newVal.IsNull(): 313 | newVal = cty.NullVal(wantTy) 314 | case !newVal.IsKnown(): 315 | newVal = cty.UnknownVal(wantTy) 316 | } 317 | 318 | return newVal, diags 319 | } 320 | 321 | func (rt managedResourceType) importState(ctx context.Context, client interface{}, id string) (cty.Value, Diagnostics) { 322 | return cty.NilVal, nil 323 | } 324 | 325 | type dataResourceType struct { 326 | configSchema *tfschema.BlockType 327 | 328 | readFn interface{} 329 | } 330 | 331 | func (rt dataResourceType) getSchema() *tfschema.BlockType { 332 | return rt.configSchema 333 | } 334 | 335 | func (rt dataResourceType) validate(obj cty.Value) Diagnostics { 336 | return ValidateBlockObject(rt.configSchema, obj) 337 | } 338 | 339 | func (rt dataResourceType) read(ctx context.Context, client interface{}, config cty.Value) (cty.Value, Diagnostics) { 340 | var diags Diagnostics 341 | wantTy := rt.configSchema.ImpliedCtyType() 342 | 343 | configReader := tfobj.NewObjectReader(rt.configSchema, config) 344 | fn, err := dynfunc.WrapFunctionWithReturnValueCty(rt.readFn, wantTy, ctx, client, configReader) 345 | if err != nil { 346 | diags = diags.Append(Diagnostic{ 347 | Severity: Error, 348 | Summary: "Invalid provider implementation", 349 | Detail: fmt.Sprintf("Invalid ReadFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err), 350 | }) 351 | return rt.configSchema.Null(), diags 352 | } 353 | 354 | newVal, moreDiags := fn() 355 | diags = diags.Append(moreDiags) 356 | 357 | // We'll make life easier on the provider implementer by normalizing null 358 | // and unknown values to the correct type automatically, so they can just 359 | // return dynamically-typed nulls and unknowns. 360 | switch { 361 | case newVal.IsNull(): 362 | newVal = cty.NullVal(wantTy) 363 | case !newVal.IsKnown(): 364 | newVal = cty.UnknownVal(wantTy) 365 | } 366 | 367 | return newVal, diags 368 | } 369 | 370 | func defaultReadFn(ctx context.Context, client interface{}, v cty.Value) (cty.Value, Diagnostics) { 371 | return cty.UnknownAsNull(v), nil 372 | } 373 | -------------------------------------------------------------------------------- /schema.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apparentlymart/terraform-sdk/internal/dynfunc" 7 | "github.com/apparentlymart/terraform-sdk/tfschema" 8 | "github.com/zclconf/go-cty/cty" 9 | "github.com/zclconf/go-cty/cty/convert" 10 | ) 11 | 12 | // ValidateBlockObject checks that the given object value is suitable for the 13 | // recieving block type, returning diagnostics if not. 14 | // 15 | // The given value must already have a type conforming to the schema. This 16 | // function validates instead the attribute values and block definitions within 17 | // the object. 18 | func ValidateBlockObject(schema *tfschema.BlockType, val cty.Value) Diagnostics { 19 | var diags Diagnostics 20 | if !val.Type().IsObjectType() { 21 | diags = diags.Append(Diagnostic{ 22 | Severity: Error, 23 | Summary: "Invalid block object", 24 | Detail: "An object value is required to represent this block.", 25 | }) 26 | return diags 27 | } 28 | 29 | // Capacity 3 here is so that we have room for a nested block type, an 30 | // index, and a nested attribute name without allocating more. Each loop 31 | // below will mutate this backing array but not the original empty slice. 32 | path := make(cty.Path, 0, 3) 33 | 34 | for name, attrS := range schema.Attributes { 35 | path := path.GetAttr(name) 36 | av := val.GetAttr(name) 37 | if av.IsNull() && attrS.Required { 38 | diags = diags.Append(Diagnostic{ 39 | Severity: Error, 40 | Summary: "Missing required argument", 41 | Detail: fmt.Sprintf("The argument %q must be set.", name), 42 | }) 43 | continue 44 | } 45 | attrDiags := ValidateAttrValue(attrS, av) 46 | diags = diags.Append(attrDiags.UnderPath(path)) 47 | } 48 | 49 | for name, blockS := range schema.NestedBlockTypes { 50 | path := path.GetAttr(name) 51 | av := val.GetAttr(name) 52 | 53 | switch blockS.Nesting { 54 | case tfschema.NestingSingle, tfschema.NestingGroup: 55 | if !av.IsNull() { 56 | blockDiags := ValidateBlockObject(&blockS.Content, av) 57 | diags = diags.Append(blockDiags.UnderPath(path)) 58 | } 59 | case tfschema.NestingList, tfschema.NestingMap: 60 | for it := av.ElementIterator(); it.Next(); { 61 | ek, ev := it.Element() 62 | path := path.Index(ek) 63 | blockDiags := ValidateBlockObject(&blockS.Content, ev) 64 | diags = diags.Append(blockDiags.UnderPath(path)) 65 | } 66 | case tfschema.NestingSet: 67 | // We handle sets separately because we can't describe a path 68 | // through a set element (it has no key to use) and so any errors 69 | // in a set block are indicated at the set itself. Nested blocks 70 | // backed by sets are fraught with oddities like these, so providers 71 | // should avoid using them except for historical compatibilty. 72 | for it := av.ElementIterator(); it.Next(); { 73 | _, ev := it.Element() 74 | blockDiags := ValidateBlockObject(&blockS.Content, ev) 75 | diags = diags.Append(blockDiags.UnderPath(path)) 76 | } 77 | default: 78 | diags = diags.Append(Diagnostic{ 79 | Severity: Error, 80 | Summary: "Unsupported nested block mode", 81 | Detail: fmt.Sprintf("Block type %q has an unsupported nested block mode %#v. This is a bug in the provider; please report it in the provider's own issue tracker.", name, blockS.Nesting), 82 | Path: path, 83 | }) 84 | } 85 | } 86 | 87 | return diags 88 | } 89 | 90 | // ValidateAttrValue checks that the given value is a suitable value for the 91 | // given attribute schema, returning diagnostics if not. 92 | // 93 | // This method is usually used only indirectly via ValidateBlockObject. 94 | func ValidateAttrValue(schema *tfschema.Attribute, val cty.Value) Diagnostics { 95 | var diags Diagnostics 96 | 97 | if schema.Required && val.IsNull() { 98 | // This is a poor error message due to our lack of context here. In 99 | // normal use a whole-schema validation driver should detect this 100 | // case before calling SchemaAttribute.Validate and return a message 101 | // with better context. 102 | diags = diags.Append(Diagnostic{ 103 | Severity: Error, 104 | Summary: "Missing required argument", 105 | Detail: "This argument is required.", 106 | }) 107 | } 108 | 109 | convVal, err := convert.Convert(val, schema.Type) 110 | if err != nil { 111 | diags = diags.Append(Diagnostic{ 112 | Severity: Error, 113 | Summary: "Invalid argument value", 114 | Detail: fmt.Sprintf("Incorrect value type: %s.", FormatError(err)), 115 | }) 116 | } 117 | 118 | if diags.HasErrors() { 119 | // If we've already got errors then we'll skip calling the provider's 120 | // custom validate function, since this avoids the need for that 121 | // function to be resilient to already-detected problems, and avoids 122 | // producing duplicate error messages. 123 | return diags 124 | } 125 | 126 | if convVal.IsNull() { 127 | // Null-ness is already handled by the a.Required flag, so if an 128 | // optional argument is null we'll save the validation function from 129 | // having to also deal with it. 130 | return diags 131 | } 132 | 133 | if !convVal.IsKnown() { 134 | // If the value isn't known yet then we'll defer any further validation 135 | // of it until it becomes known, since custom validation functions 136 | // are not expected to deal with unknown values. 137 | return diags 138 | } 139 | 140 | // The validation function gets the already-converted value, for convenience. 141 | validate, err := dynfunc.WrapSimpleFunction(schema.ValidateFn, convVal) 142 | if err != nil { 143 | diags = diags.Append(Diagnostic{ 144 | Severity: Error, 145 | Summary: "Invalid provider schema", 146 | Detail: fmt.Sprintf("Invalid ValidateFn: %s.\nThis is a bug in the provider that should be reported in its own issue tracker.", err), 147 | }) 148 | return diags 149 | } 150 | 151 | moreDiags := validate() 152 | diags = diags.Append(moreDiags) 153 | return diags 154 | } 155 | -------------------------------------------------------------------------------- /schema_test.go: -------------------------------------------------------------------------------- 1 | package tfsdk_test 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | tfsdk "github.com/apparentlymart/terraform-sdk" 8 | "github.com/apparentlymart/terraform-sdk/tfschema" 9 | "github.com/google/go-cmp/cmp" 10 | "github.com/zclconf/go-cty/cty" 11 | ) 12 | 13 | func TestValidateAttrValue(t *testing.T) { 14 | tests := map[string]struct { 15 | Schema *tfschema.Attribute 16 | Try cty.Value 17 | WantDiags []string 18 | }{ 19 | "simple primitive type ok": { 20 | &tfschema.Attribute{ 21 | Type: cty.String, 22 | Optional: true, 23 | }, 24 | cty.StringVal("ok"), 25 | nil, 26 | }, 27 | "missing required argument": { 28 | &tfschema.Attribute{ 29 | Type: cty.String, 30 | Required: true, 31 | }, 32 | cty.NullVal(cty.String), 33 | []string{ 34 | `[ERROR] Missing required argument: This argument is required.`, 35 | }, 36 | }, 37 | "missing optional argument": { 38 | &tfschema.Attribute{ 39 | Type: cty.String, 40 | Optional: true, 41 | }, 42 | cty.NullVal(cty.String), 43 | nil, 44 | }, 45 | "simple primitive type conversion ok": { 46 | &tfschema.Attribute{ 47 | Type: cty.String, 48 | Optional: true, 49 | }, 50 | cty.True, // can become the string "true", so okay 51 | nil, 52 | }, 53 | "simple primitive type conversion fail": { 54 | &tfschema.Attribute{ 55 | Type: cty.Bool, 56 | Optional: true, 57 | }, 58 | cty.StringVal("not a bool"), 59 | []string{ 60 | `[ERROR] Invalid argument value: Incorrect value type: a bool is required.`, 61 | }, 62 | }, 63 | "object type missing attribute": { 64 | &tfschema.Attribute{ 65 | Type: cty.Object(map[string]cty.Type{ 66 | "foo": cty.String, 67 | }), 68 | Optional: true, 69 | }, 70 | cty.EmptyObjectVal, 71 | []string{ 72 | `[ERROR] Invalid argument value: Incorrect value type: attribute "foo" is required.`, 73 | }, 74 | }, 75 | "custom validate function ok": { 76 | &tfschema.Attribute{ 77 | Type: cty.String, 78 | Required: true, 79 | ValidateFn: func(v string) tfsdk.Diagnostics { 80 | if v != "ok" { 81 | return tfsdk.Diagnostics{ 82 | { 83 | Severity: tfsdk.Error, 84 | Summary: "Not ok", 85 | }, 86 | } 87 | } 88 | return nil 89 | }, 90 | }, 91 | cty.StringVal("ok"), 92 | nil, 93 | }, 94 | "custom validate function wrong": { 95 | &tfschema.Attribute{ 96 | Type: cty.String, 97 | Required: true, 98 | ValidateFn: func(v string) tfsdk.Diagnostics { 99 | if v != "ok" { 100 | return tfsdk.Diagnostics{ 101 | { 102 | Severity: tfsdk.Error, 103 | Summary: "Not ok", 104 | }, 105 | } 106 | } 107 | return nil 108 | }, 109 | }, 110 | cty.StringVal("not ok"), 111 | []string{ 112 | `[ERROR] Not ok`, 113 | }, 114 | }, 115 | "custom validate function type conversion error": { 116 | &tfschema.Attribute{ 117 | Type: cty.String, 118 | Required: true, 119 | // This is not something any provider should really do, but 120 | // we want to make sure it produces a reasonable result. 121 | ValidateFn: func(v bool) tfsdk.Diagnostics { 122 | return nil 123 | }, 124 | }, 125 | cty.StringVal("not a bool"), 126 | []string{ 127 | `[ERROR] Unsuitable argument value: This value cannot be used: bool value is required.`, 128 | }, 129 | }, 130 | "custom validate function type with incorrect return type": { 131 | &tfschema.Attribute{ 132 | Type: cty.String, 133 | Optional: true, 134 | ValidateFn: func(string) string { 135 | return "" 136 | }, 137 | }, 138 | cty.StringVal("ok"), 139 | []string{ 140 | "[ERROR] Invalid provider schema: Invalid ValidateFn: must return Diagnostics.\nThis is a bug in the provider that should be reported in its own issue tracker.", 141 | }, 142 | }, 143 | "custom validate function type with no return type": { 144 | &tfschema.Attribute{ 145 | Type: cty.String, 146 | Optional: true, 147 | ValidateFn: func(string) {}, 148 | }, 149 | cty.StringVal("ok"), 150 | []string{ 151 | "[ERROR] Invalid provider schema: Invalid ValidateFn: must return Diagnostics.\nThis is a bug in the provider that should be reported in its own issue tracker.", 152 | }, 153 | }, 154 | } 155 | 156 | for name, test := range tests { 157 | t.Run(name, func(t *testing.T) { 158 | gotDiags := tfsdk.ValidateAttrValue(test.Schema, test.Try) 159 | 160 | if len(test.WantDiags) > 0 { 161 | gotDiagsStr := diagnosticStringsForTests(gotDiags) 162 | if !cmp.Equal(gotDiagsStr, test.WantDiags) { 163 | t.Fatalf("wrong diagnostics\n%s", cmp.Diff(test.WantDiags, gotDiagsStr)) 164 | } 165 | return 166 | } 167 | 168 | for _, diagStr := range diagnosticStringsForTests(gotDiags) { 169 | t.Errorf("unexpected problem: %s", diagStr) 170 | } 171 | }) 172 | } 173 | } 174 | 175 | // diagnosticStringForTests converts a diagnostic into a compact string that 176 | // is easier to use for matching in test assertions. 177 | func diagnosticStringForTests(diag tfsdk.Diagnostic) string { 178 | var buf strings.Builder 179 | switch diag.Severity { 180 | case tfsdk.Error: 181 | buf.WriteString("[ERROR] ") 182 | case tfsdk.Warning: 183 | buf.WriteString("[WARNING] ") 184 | default: 185 | buf.WriteString("[???] ") 186 | } 187 | buf.WriteString(diag.Summary) 188 | if diag.Detail != "" { 189 | buf.WriteString(": ") 190 | buf.WriteString(diag.Detail) 191 | } 192 | if len(diag.Path) != 0 { 193 | buf.WriteString(" (in ") 194 | buf.WriteString(tfsdk.FormatPath(diag.Path)) 195 | buf.WriteString(")") 196 | } 197 | return buf.String() 198 | } 199 | 200 | func diagnosticStringsForTests(diags tfsdk.Diagnostics) []string { 201 | ret := make([]string, len(diags)) 202 | for i, diag := range diags { 203 | ret[i] = diagnosticStringForTests(diag) 204 | } 205 | return ret 206 | } 207 | -------------------------------------------------------------------------------- /terraform-provider-test/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/* 2 | crash.log 3 | -------------------------------------------------------------------------------- /terraform-provider-test/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | tfsdk "github.com/apparentlymart/terraform-sdk" 5 | "github.com/apparentlymart/terraform-sdk/terraform-provider-test/test" 6 | ) 7 | 8 | func main() { 9 | tfsdk.ServeProviderPlugin(test.Provider()) 10 | } 11 | -------------------------------------------------------------------------------- /terraform-provider-test/test/drt_echo.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | tfsdk "github.com/apparentlymart/terraform-sdk" 8 | "github.com/apparentlymart/terraform-sdk/tfschema" 9 | "github.com/zclconf/go-cty/cty" 10 | ) 11 | 12 | type echoDRT struct { 13 | Given string `cty:"given"` 14 | Result *string `cty:"result"` 15 | 16 | Dynamic cty.Value `cty:"dynamic"` 17 | } 18 | 19 | func echoDataResourceType() tfsdk.DataResourceType { 20 | return tfsdk.NewDataResourceType(&tfsdk.ResourceTypeDef{ 21 | ConfigSchema: &tfschema.BlockType{ 22 | Attributes: map[string]*tfschema.Attribute{ 23 | "given": {Type: cty.String, Required: true}, 24 | "result": {Type: cty.String, Computed: true}, 25 | 26 | "dynamic": {Type: cty.DynamicPseudoType, Optional: true}, 27 | }, 28 | }, 29 | 30 | ReadFn: func(cty context.Context, client *Client, obj *echoDRT) (*echoDRT, tfsdk.Diagnostics) { 31 | log.Printf("reading %#v", obj) 32 | obj.Result = &obj.Given 33 | return obj, nil 34 | }, 35 | }) 36 | } 37 | -------------------------------------------------------------------------------- /terraform-provider-test/test/mrt_instance.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | 8 | tfsdk "github.com/apparentlymart/terraform-sdk" 9 | "github.com/apparentlymart/terraform-sdk/tfobj" 10 | "github.com/apparentlymart/terraform-sdk/tfschema" 11 | "github.com/zclconf/go-cty/cty" 12 | ) 13 | 14 | type instanceMRT struct { 15 | ID *string `cty:"id"` 16 | Version *int `cty:"version"` 17 | Type string `cty:"type"` 18 | Image string `cty:"image"` 19 | 20 | Access *instanceMRTAccess `cty:"access"` 21 | NetworkInterfaces map[string]*instanceMRTNetworkInterface `cty:"network_interface"` 22 | } 23 | 24 | type instanceMRTNetworkInterface struct { 25 | CreatePublicAddrs bool `cty:"create_public_addrs"` 26 | } 27 | 28 | type instanceMRTAccess struct { 29 | Policy cty.Value `cty:"policy"` 30 | } 31 | 32 | func instanceManagedResourceType() tfsdk.ManagedResourceType { 33 | return tfsdk.NewManagedResourceType(&tfsdk.ResourceTypeDef{ 34 | ConfigSchema: &tfschema.BlockType{ 35 | Attributes: map[string]*tfschema.Attribute{ 36 | "id": {Type: cty.String, Computed: true}, 37 | "version": {Type: cty.Number, Computed: true}, 38 | 39 | "type": {Type: cty.String, Required: true}, 40 | "image": {Type: cty.String, Required: true}, 41 | }, 42 | NestedBlockTypes: map[string]*tfschema.NestedBlockType{ 43 | "network_interface": { 44 | Nesting: tfschema.NestingMap, 45 | Content: tfschema.BlockType{ 46 | Attributes: map[string]*tfschema.Attribute{ 47 | "create_public_addrs": { 48 | Type: cty.Bool, 49 | Optional: true, 50 | Default: true, 51 | }, 52 | }, 53 | }, 54 | }, 55 | "access": { 56 | Nesting: tfschema.NestingGroup, 57 | Content: tfschema.BlockType{ 58 | Attributes: map[string]*tfschema.Attribute{ 59 | "policy": { 60 | Type: cty.DynamicPseudoType, 61 | Optional: true, 62 | Default: cty.EmptyObjectVal, 63 | 64 | ValidateFn: func(val cty.Value) tfsdk.Diagnostics { 65 | var diags tfsdk.Diagnostics 66 | if !(val.Type().IsObjectType() || val.Type().IsMapType()) { 67 | diags = diags.Append( 68 | tfsdk.ValidationError(fmt.Errorf("must be an object, using { ... } syntax")), 69 | ) 70 | } 71 | return diags 72 | }, 73 | }, 74 | }, 75 | }, 76 | }, 77 | }, 78 | }, 79 | 80 | PlanFn: func(ctx context.Context, client *Client, plan tfobj.PlanBuilder) (cty.Value, cty.PathSet, tfsdk.Diagnostics) { 81 | prior, planned := plan.AttrChange("type") 82 | log.Printf("'type' value was %#v and is now %#v", prior, planned) 83 | switch plan.Action() { 84 | case tfobj.Create: 85 | plan.SetAttr("version", cty.NumberIntVal(1)) 86 | default: 87 | if plan.AttrHasChange("type") || plan.AttrHasChange("image") { 88 | plan.SetAttrUnknown("version") // we'll allocate a new version at apply time 89 | } 90 | } 91 | return plan.ObjectVal(), plan.RequiresReplace(), nil 92 | }, 93 | ReadFn: func(ctx context.Context, client *Client, current *instanceMRT) (*instanceMRT, tfsdk.Diagnostics) { 94 | log.Printf("reading %#v", current) 95 | return current, nil // No changes 96 | }, 97 | CreateFn: func(ctx context.Context, client *Client, new *instanceMRT) (*instanceMRT, tfsdk.Diagnostics) { 98 | log.Printf("creating %#v", new) 99 | id := "placeholder" 100 | version := 1 101 | new.ID = &id 102 | new.Version = &version 103 | return new, nil 104 | }, 105 | UpdateFn: func(ctx context.Context, client *Client, prior, new *instanceMRT) (*instanceMRT, tfsdk.Diagnostics) { 106 | log.Printf("updating %#v", new) 107 | if new.Version == nil { 108 | newVersion := 1 109 | if prior.Version != nil { 110 | newVersion = *prior.Version + 1 111 | } 112 | new.Version = &newVersion 113 | } 114 | return new, nil 115 | }, 116 | DeleteFn: func(ctx context.Context, client *Client, prior *instanceMRT) (*instanceMRT, tfsdk.Diagnostics) { 117 | log.Printf("deleting %#v", prior) 118 | return nil, nil 119 | }, 120 | }) 121 | } 122 | -------------------------------------------------------------------------------- /terraform-provider-test/test/mrt_instance_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestMRTInstance(t *testing.T) { 8 | wd := testHelper.RequireNewWorkingDir(t) 9 | defer wd.Close() 10 | 11 | wd.RequireSetConfig(t, ` 12 | resource "test_instance" "test" { 13 | type = "z2.wheezy" 14 | image = "img-abc123" 15 | 16 | access { 17 | policy = {} 18 | } 19 | 20 | network_interface "main" { 21 | create_public_addrs = false 22 | } 23 | network_interface "public" { 24 | create_public_addrs = true 25 | } 26 | } 27 | 28 | output "id" { 29 | value = test_instance.test.id 30 | } 31 | `) 32 | 33 | t.Log("init") 34 | wd.RequireInit(t) 35 | 36 | t.Log("create initial plan") 37 | wd.RequireCreatePlan(t) 38 | t.Log("read initial plan") 39 | plan := wd.RequireSavedPlan(t) 40 | if got, want := len(plan.PlannedValues.RootModule.Resources), 1; got != want { 41 | t.Fatalf("wrong number of planned resource changes %d; want %d", got, want) 42 | } 43 | plannedChange := plan.PlannedValues.RootModule.Resources[0] 44 | if got, want := plannedChange.Type, "test_instance"; got != want { 45 | t.Errorf("wrong resource type in plan\ngot: %s\nwant: %s", got, want) 46 | } 47 | if got, want := plannedChange.AttributeValues["version"], 1.0; got != want { 48 | // All numbers are decoded as float64 by RequireSavedPlan, because 49 | // Terraform itself does not distinguish number types. 50 | t.Errorf("wrong 'version' value in create plan\ngot: %#v (%T)\nwant: %#v (%T)", got, got, want, want) 51 | } 52 | 53 | t.Log("apply initial plan") 54 | wd.RequireApply(t) 55 | 56 | t.Log("read state after initial apply") 57 | state := wd.RequireState(t) 58 | outputs := state.Values.Outputs 59 | idOutput := outputs["id"] 60 | if idOutput == nil { 61 | t.Fatal("missing 'id' output") 62 | } 63 | if got, want := idOutput.Value, "placeholder"; got != want { 64 | t.Errorf("wrong value for id output\ngot: %s\nwant: %s", got, want) 65 | } 66 | if got, want := len(state.Values.RootModule.Resources), 1; got != want { 67 | t.Fatalf("wrong number of resource instance objects in state %d; want %d", got, want) 68 | } 69 | instanceState := state.Values.RootModule.Resources[0] 70 | if got, want := instanceState.Type, "test_instance"; got != want { 71 | t.Errorf("wrong resource type in state\ngot: %s\nwant: %s", got, want) 72 | } 73 | if got, want := instanceState.AttributeValues["version"], 1.0; got != want { 74 | // All numbers are decoded as float64 by RequireSavedPlan, because 75 | // Terraform itself does not distinguish number types. 76 | t.Errorf("wrong 'version' value\ngot: %#v (%T)\nwant: %#v (%T)", got, got, want, want) 77 | } 78 | 79 | initialID := idOutput.Value 80 | 81 | // Update existing object 82 | wd.RequireSetConfig(t, ` 83 | resource "test_instance" "test" { 84 | type = "z2.wheezy" 85 | image = "img-abc456" # image has changed 86 | 87 | access { 88 | policy = {} 89 | } 90 | 91 | network_interface "main" { 92 | create_public_addrs = false 93 | } 94 | network_interface "public" { 95 | create_public_addrs = true 96 | } 97 | } 98 | 99 | output "id" { 100 | value = test_instance.test.id 101 | } 102 | `) 103 | 104 | t.Log("create followup plan") 105 | wd.RequireCreatePlan(t) 106 | t.Log("read followup plan") 107 | plan = wd.RequireSavedPlan(t) 108 | if got, want := len(plan.PlannedValues.RootModule.Resources), 1; got != want { 109 | t.Fatalf("wrong number of planned resource changes %d; want %d", got, want) 110 | } 111 | plannedChange = plan.PlannedValues.RootModule.Resources[0] 112 | if got, want := plannedChange.Type, "test_instance"; got != want { 113 | t.Errorf("wrong resource type in update plan\ngot: %s\nwant: %s", got, want) 114 | } 115 | if got, want := plannedChange.AttributeValues["version"], (interface{})(nil); got != want { 116 | // Version should not be present because we changed "image" and so now 117 | // it is unknown until after apply. 118 | t.Errorf("wrong 'version' value in update plan\ngot: %#v (%T)\nwant: %#v (%T)", got, got, want, want) 119 | } 120 | 121 | t.Log("apply followup plan") 122 | wd.RequireApply(t) 123 | 124 | state = wd.RequireState(t) 125 | t.Log("read state after followup apply") 126 | outputs = state.Values.Outputs 127 | idOutput = outputs["id"] 128 | if idOutput == nil { 129 | t.Fatal("missing 'id' output") 130 | } 131 | if got, want := idOutput.Value, initialID; got != want { 132 | t.Errorf("wrong value for id output after update\ngot: %s\nwant: %s", got, want) 133 | } 134 | if got, want := len(state.Values.RootModule.Resources), 1; got != want { 135 | t.Fatalf("wrong number of resource instance objects in state %d; want %d", got, want) 136 | } 137 | instanceState = state.Values.RootModule.Resources[0] 138 | if got, want := instanceState.Type, "test_instance"; got != want { 139 | t.Errorf("wrong resource type in state after update\ngot: %s\nwant: %s", got, want) 140 | } 141 | if got, want := instanceState.AttributeValues["version"], 2.0; got != want { 142 | // Version should've been incremented because the 'image' changed 143 | t.Errorf("wrong 'version' value after update\ngot: %#v (%T)\nwant: %#v (%T)", got, got, want, want) 144 | } 145 | 146 | } 147 | -------------------------------------------------------------------------------- /terraform-provider-test/test/provider.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net/url" 7 | 8 | tfsdk "github.com/apparentlymart/terraform-sdk" 9 | "github.com/apparentlymart/terraform-sdk/tfschema" 10 | "github.com/davecgh/go-spew/spew" 11 | "github.com/zclconf/go-cty/cty" 12 | ) 13 | 14 | func Provider() *tfsdk.Provider { 15 | return &tfsdk.Provider{ 16 | ConfigSchema: &tfschema.BlockType{ 17 | Attributes: map[string]*tfschema.Attribute{ 18 | "optional_string": { 19 | Type: cty.String, 20 | Optional: true, 21 | }, 22 | "optional_url": { 23 | Type: cty.String, 24 | Optional: true, 25 | ValidateFn: func(v string) tfsdk.Diagnostics { 26 | var diags tfsdk.Diagnostics 27 | u, err := url.Parse(v) 28 | if err != nil || u.Scheme != "https" { 29 | diags = diags.Append(tfsdk.Diagnostic{ 30 | Severity: tfsdk.Error, 31 | Summary: "Invalid URL", 32 | Detail: "Must be a valid absolute HTTPS URL.", 33 | }) 34 | } 35 | return diags 36 | }, 37 | }, 38 | }, 39 | }, 40 | ConfigureFn: func(ctx context.Context, config *Config) (*Client, tfsdk.Diagnostics) { 41 | var diags tfsdk.Diagnostics 42 | log.Printf("test provider configured with %s", spew.Sdump(config)) 43 | return &Client{}, diags 44 | }, 45 | 46 | ManagedResourceTypes: map[string]tfsdk.ManagedResourceType{ 47 | "test_instance": instanceManagedResourceType(), 48 | }, 49 | DataResourceTypes: map[string]tfsdk.DataResourceType{ 50 | "test_echo": echoDataResourceType(), 51 | }, 52 | } 53 | } 54 | 55 | type Config struct { 56 | OptionalString *string `cty:"optional_string"` 57 | OptionalURL *string `cty:"optional_url"` 58 | } 59 | 60 | type Client struct { 61 | } 62 | -------------------------------------------------------------------------------- /terraform-provider-test/test/provider_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | tfsdk "github.com/apparentlymart/terraform-sdk" 8 | tftest "github.com/apparentlymart/terraform-plugin-test" 9 | ) 10 | 11 | var testHelper *tftest.Helper 12 | 13 | func TestMain(m *testing.M) { 14 | testHelper = tfsdk.InitProviderTesting("test", Provider()) 15 | status := m.Run() 16 | testHelper.Close() 17 | os.Exit(status) 18 | } 19 | -------------------------------------------------------------------------------- /terraform-provider-test/testdata/valid/simple/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/* 2 | crash.log 3 | -------------------------------------------------------------------------------- /terraform-provider-test/testdata/valid/simple/simple.tf: -------------------------------------------------------------------------------- 1 | provider "test" { 2 | } 3 | 4 | data "test_echo" "test" { 5 | given = "img-abc123" 6 | 7 | dynamic = {} 8 | } 9 | 10 | resource "test_instance" "test" { 11 | type = "z4.weedy" 12 | image = data.test_echo.test.result 13 | 14 | access { 15 | policy = { 16 | statements: [ 17 | { 18 | action: "foo:Create", 19 | principal: { 20 | service: "foo.example.com", 21 | }, 22 | }, 23 | ] 24 | } 25 | } 26 | 27 | network_interface "foo" { 28 | } 29 | network_interface "bar" { 30 | create_public_addrs = false 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /testing.go: -------------------------------------------------------------------------------- 1 | package tfsdk 2 | 3 | import ( 4 | "os" 5 | 6 | tftest "github.com/apparentlymart/terraform-plugin-test" 7 | ) 8 | 9 | // InitProviderTesting is the main entrypoint for testing provider plugins 10 | // using this package. It is intended to be called during TestMain to prepare 11 | // for provider testing. 12 | // 13 | // When testing a provider, the test executable (generated by "go test") serves 14 | // double-duty both as the driver of the tests themselves and as the plugin 15 | // executable, and InitProvider deals with that switching as follows: 16 | // 17 | // On the initial test run, InitProviderTesting will discover the location of a 18 | // current Terraform CLI executable to test against, detect whether a prior 19 | // version of the plugin is available for upgrade tests, and then will 20 | // return an object containing the results of that initialization which can 21 | // then be stored in a global variable for use in other tests. 22 | // 23 | // If the test program detects that it is being run as a Terraform plugin 24 | // server then it will instead call into the SDK to serve the given provider 25 | // and then exit. This sub-process does not run any tests itself, but merely 26 | // waits for provider RPC calls like any other provider plugin process. 27 | // 28 | // If a suitable Terraform CLI executable cannot be found, or some other 29 | // environmental problem is detected, this function will print an error message 30 | // to stderr and exit the process immediately with status 1. 31 | // 32 | // The usual pattern for initialization in TestMain is: 33 | // 34 | // var testHelper *tftest.Helper 35 | // 36 | // func TestMain(m *testing.M) { 37 | // testHelper = tfsdk.InitProviderTesting("example", exampleProvider()) 38 | // status := m.Run() 39 | // testHelper.Close() 40 | // os.Exit(status) 41 | // } 42 | // 43 | func InitProviderTesting(name string, provider *Provider) *tftest.Helper { 44 | if tftest.RunningAsPlugin() { 45 | // The test program is being re-launched as a provider plugin via our 46 | // stub program. 47 | ServeProviderPlugin(provider) 48 | os.Exit(0) 49 | } 50 | 51 | return tftest.AutoInitProviderHelper(name) 52 | } 53 | -------------------------------------------------------------------------------- /tflegacy/config.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | type ResourceMode int 4 | 5 | //go:generate stringer -type=ResourceMode -output=resource_mode_string.go config.go 6 | 7 | const ( 8 | ManagedResourceMode ResourceMode = iota 9 | DataResourceMode 10 | ) 11 | 12 | // ResourceConfig is a legacy type that was formerly used to represent 13 | // interpolatable configuration blocks. It is now only used to shim to old 14 | // APIs that still use this type, via NewResourceConfigShimmed. 15 | type ResourceConfig struct { 16 | ComputedKeys []string 17 | Raw map[string]interface{} 18 | Config map[string]interface{} 19 | } 20 | -------------------------------------------------------------------------------- /tflegacy/diff.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // InstanceDiff is the diff of a resource from some state to another. 9 | type InstanceDiff struct { 10 | mu sync.Mutex 11 | Attributes map[string]*ResourceAttrDiff 12 | Destroy bool 13 | DestroyDeposed bool 14 | DestroyTainted bool 15 | 16 | // Meta is a simple K/V map that is stored in a diff and persisted to 17 | // plans but otherwise is completely ignored by Terraform core. It is 18 | // meant to be used for additional data a resource may want to pass through. 19 | // The value here must only contain Go primitives and collections. 20 | Meta map[string]interface{} 21 | } 22 | 23 | func (d *InstanceDiff) Lock() { d.mu.Lock() } 24 | func (d *InstanceDiff) Unlock() { d.mu.Unlock() } 25 | 26 | // ResourceAttrDiff is the diff of a single attribute of a resource. 27 | type ResourceAttrDiff struct { 28 | Old string // Old Value 29 | New string // New Value 30 | NewComputed bool // True if new value is computed (unknown currently) 31 | NewRemoved bool // True if this attribute is being removed 32 | NewExtra interface{} // Extra information for the provider 33 | RequiresNew bool // True if change requires new resource 34 | Sensitive bool // True if the data should not be displayed in UI output 35 | Type DiffAttrType 36 | } 37 | 38 | // Empty returns true if the diff for this attr is neutral 39 | func (d *ResourceAttrDiff) Empty() bool { 40 | return d.Old == d.New && !d.NewComputed && !d.NewRemoved 41 | } 42 | 43 | func (d *ResourceAttrDiff) GoString() string { 44 | return fmt.Sprintf("*%#v", *d) 45 | } 46 | 47 | // DiffAttrType is an enum type that says whether a resource attribute 48 | // diff is an input attribute (comes from the configuration) or an 49 | // output attribute (comes as a result of applying the configuration). An 50 | // example input would be "ami" for AWS and an example output would be 51 | // "private_ip". 52 | type DiffAttrType byte 53 | 54 | const ( 55 | DiffAttrUnknown DiffAttrType = iota 56 | DiffAttrInput 57 | DiffAttrOutput 58 | ) 59 | -------------------------------------------------------------------------------- /tflegacy/doc.go: -------------------------------------------------------------------------------- 1 | // Package tflegacy is the final resting place for various legacy Terraform 2 | // types and associated functions that used to live in Terraform packages like 3 | // "helper/schema", "helper/resource", and "terraform" itself, so that 4 | // resource type implementations written in terms of these can be fixed up just 5 | // by simple package selector rewriting (replace existing imports with this 6 | // tflegacy package) and then a provider can be adapted to the new SDK one 7 | // resource type at a time. 8 | package tflegacy 9 | -------------------------------------------------------------------------------- /tflegacy/resource.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | import ( 4 | "github.com/zclconf/go-cty/cty" 5 | ) 6 | 7 | // Resource represents a thing in Terraform that has a set of configurable 8 | // attributes and a lifecycle (create, read, update, delete). 9 | // 10 | // The Resource schema is an abstraction that allows provider writers to 11 | // worry only about CRUD operations while off-loading validation, diff 12 | // generation, etc. to this higher level library. 13 | // 14 | // In spite of the name, this struct is not used only for terraform resources, 15 | // but also for data sources. In the case of data sources, the Create, 16 | // Update and Delete functions must not be provided. 17 | type Resource struct { 18 | // Schema is the schema for the configuration of this resource. 19 | // 20 | // The keys of this map are the configuration keys, and the values 21 | // describe the schema of the configuration value. 22 | // 23 | // The schema is used to represent both configurable data as well 24 | // as data that might be computed in the process of creating this 25 | // resource. 26 | Schema map[string]*Schema 27 | 28 | // SchemaVersion is the version number for this resource's Schema 29 | // definition. The current SchemaVersion stored in the state for each 30 | // resource. Provider authors can increment this version number 31 | // when Schema semantics change. If the State's SchemaVersion is less than 32 | // the current SchemaVersion, the InstanceState is yielded to the 33 | // MigrateState callback, where the provider can make whatever changes it 34 | // needs to update the state to be compatible to the latest version of the 35 | // Schema. 36 | // 37 | // When unset, SchemaVersion defaults to 0, so provider authors can start 38 | // their Versioning at any integer >= 1 39 | SchemaVersion int 40 | 41 | // MigrateState is deprecated and any new changes to a resource's schema 42 | // should be handled by StateUpgraders. Existing MigrateState implementations 43 | // should remain for compatibility with existing state. MigrateState will 44 | // still be called if the stored SchemaVersion is less than the 45 | // first version of the StateUpgraders. 46 | // 47 | // MigrateState is responsible for updating an InstanceState with an old 48 | // version to the format expected by the current version of the Schema. 49 | // 50 | // It is called during Refresh if the State's stored SchemaVersion is less 51 | // than the current SchemaVersion of the Resource. 52 | // 53 | // The function is yielded the state's stored SchemaVersion and a pointer to 54 | // the InstanceState that needs updating, as well as the configured 55 | // provider's configured meta interface{}, in case the migration process 56 | // needs to make any remote API calls. 57 | MigrateState StateMigrateFunc 58 | 59 | // StateUpgraders contains the functions responsible for upgrading an 60 | // existing state with an old schema version to a newer schema. It is 61 | // called specifically by Terraform when the stored schema version is less 62 | // than the current SchemaVersion of the Resource. 63 | // 64 | // StateUpgraders map specific schema versions to a StateUpgrader 65 | // function. The registered versions are expected to be ordered, 66 | // consecutive values. The initial value may be greater than 0 to account 67 | // for legacy schemas that weren't recorded and can be handled by 68 | // MigrateState. 69 | StateUpgraders []StateUpgrader 70 | 71 | // The functions below are the CRUD operations for this resource. 72 | // 73 | // The only optional operation is Update. If Update is not implemented, 74 | // then updates will not be supported for this resource. 75 | // 76 | // The ResourceData parameter in the functions below are used to 77 | // query configuration and changes for the resource as well as to set 78 | // the ID, computed data, etc. 79 | // 80 | // The interface{} parameter is the result of the ConfigureFunc in 81 | // the provider for this resource. If the provider does not define 82 | // a ConfigureFunc, this will be nil. This parameter should be used 83 | // to store API clients, configuration structures, etc. 84 | // 85 | // If any errors occur during each of the operation, an error should be 86 | // returned. If a resource was partially updated, be careful to enable 87 | // partial state mode for ResourceData and use it accordingly. 88 | // 89 | // Exists is a function that is called to check if a resource still 90 | // exists. If this returns false, then this will affect the diff 91 | // accordingly. If this function isn't set, it will not be called. It 92 | // is highly recommended to set it. The *ResourceData passed to Exists 93 | // should _not_ be modified. 94 | Create CreateFunc 95 | Read ReadFunc 96 | Update UpdateFunc 97 | Delete DeleteFunc 98 | Exists ExistsFunc 99 | 100 | // CustomizeDiff is a custom function for working with the diff that 101 | // Terraform has created for this resource - it can be used to customize the 102 | // diff that has been created, diff values not controlled by configuration, 103 | // or even veto the diff altogether and abort the plan. It is passed a 104 | // *ResourceDiff, a structure similar to ResourceData but lacking most write 105 | // functions like Set, while introducing new functions that work with the 106 | // diff such as SetNew, SetNewComputed, and ForceNew. 107 | // 108 | // The phases Terraform runs this in, and the state available via functions 109 | // like Get and GetChange, are as follows: 110 | // 111 | // * New resource: One run with no state 112 | // * Existing resource: One run with state 113 | // * Existing resource, forced new: One run with state (before ForceNew), 114 | // then one run without state (as if new resource) 115 | // * Tainted resource: No runs (custom diff logic is skipped) 116 | // * Destroy: No runs (standard diff logic is skipped on destroy diffs) 117 | // 118 | // This function needs to be resilient to support all scenarios. 119 | // 120 | // If this function needs to access external API resources, remember to flag 121 | // the RequiresRefresh attribute mentioned below to ensure that 122 | // -refresh=false is blocked when running plan or apply, as this means that 123 | // this resource requires refresh-like behaviour to work effectively. 124 | // 125 | // For the most part, only computed fields can be customized by this 126 | // function. 127 | // 128 | // This function is only allowed on regular resources (not data sources). 129 | CustomizeDiff CustomizeDiffFunc 130 | 131 | // Importer is the ResourceImporter implementation for this resource. 132 | // If this is nil, then this resource does not support importing. If 133 | // this is non-nil, then it supports importing and ResourceImporter 134 | // must be validated. The validity of ResourceImporter is verified 135 | // by InternalValidate on Resource. 136 | Importer *ResourceImporter 137 | 138 | // If non-empty, this string is emitted as a warning during Validate. 139 | DeprecationMessage string 140 | 141 | // Timeouts allow users to specify specific time durations in which an 142 | // operation should time out, to allow them to extend an action to suit their 143 | // usage. For example, a user may specify a large Creation timeout for their 144 | // AWS RDS Instance due to it's size, or restoring from a snapshot. 145 | // Resource implementors must enable Timeout support by adding the allowed 146 | // actions (Create, Read, Update, Delete, Default) to the Resource struct, and 147 | // accessing them in the matching methods. 148 | Timeouts *ResourceTimeout 149 | } 150 | 151 | // See Resource documentation. 152 | type CreateFunc func(*ResourceData, interface{}) error 153 | 154 | // See Resource documentation. 155 | type ReadFunc func(*ResourceData, interface{}) error 156 | 157 | // See Resource documentation. 158 | type UpdateFunc func(*ResourceData, interface{}) error 159 | 160 | // See Resource documentation. 161 | type DeleteFunc func(*ResourceData, interface{}) error 162 | 163 | // See Resource documentation. 164 | type ExistsFunc func(*ResourceData, interface{}) (bool, error) 165 | 166 | // See Resource documentation. 167 | type StateMigrateFunc func(int, *InstanceState, interface{}) (*InstanceState, error) 168 | 169 | type StateUpgrader struct { 170 | // Version is the version schema that this Upgrader will handle, converting 171 | // it to Version+1. 172 | Version int 173 | 174 | // Type describes the schema that this function can upgrade. Type is 175 | // required to decode the schema if the state was stored in a legacy 176 | // flatmap format. 177 | Type cty.Type 178 | 179 | // Upgrade takes the JSON encoded state and the provider meta value, and 180 | // upgrades the state one single schema version. The provided state is 181 | // deocded into the default json types using a map[string]interface{}. It 182 | // is up to the StateUpgradeFunc to ensure that the returned value can be 183 | // encoded using the new schema. 184 | Upgrade StateUpgradeFunc 185 | } 186 | 187 | // See StateUpgrader 188 | type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) 189 | 190 | // See Resource documentation. 191 | type CustomizeDiffFunc func(*ResourceDiff, interface{}) error 192 | -------------------------------------------------------------------------------- /tflegacy/resource_data.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | // ResourceData is used to query and set the attributes of a resource. 4 | // 5 | // ResourceData is the primary argument received for CRUD operations on 6 | // a resource as well as configuration of a provider. It is a powerful 7 | // structure that can be used to not only query data, but check for changes, 8 | // define partial state updates, etc. 9 | // 10 | // The most relevant methods to take a look at are Get, Set, and Partial. 11 | type ResourceData struct { 12 | // Settable (internally) 13 | schema map[string]*Schema 14 | config *ResourceConfig 15 | state *InstanceState 16 | diff *InstanceDiff 17 | meta map[string]interface{} 18 | timeouts *ResourceTimeout 19 | 20 | // Don't set 21 | //multiReader *MultiLevelFieldReader 22 | //setWriter *MapFieldWriter 23 | //newState *InstanceState 24 | //partial bool 25 | //partialMap map[string]struct{} 26 | //once sync.Once 27 | //isNew bool 28 | 29 | panicOnError bool 30 | } 31 | -------------------------------------------------------------------------------- /tflegacy/resource_diff.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | // ResourceDiff is used to query and make custom changes to an in-flight diff. 4 | // It can be used to veto particular changes in the diff, customize the diff 5 | // that has been created, or diff values not controlled by config. 6 | // 7 | // The object functions similar to ResourceData, however most notably lacks 8 | // Set, SetPartial, and Partial, as it should be used to change diff values 9 | // only. Most other first-class ResourceData functions exist, namely Get, 10 | // GetOk, HasChange, and GetChange exist. 11 | // 12 | // All functions in ResourceDiff, save for ForceNew, can only be used on 13 | // computed fields. 14 | type ResourceDiff struct { 15 | // The schema for the resource being worked on. 16 | schema map[string]*Schema 17 | 18 | // The current config for this resource. 19 | config *ResourceConfig 20 | 21 | // The state for this resource as it exists post-refresh, after the initial 22 | // diff. 23 | state *InstanceState 24 | 25 | // The diff created by Terraform. This diff is used, along with state, 26 | // config, and custom-set diff data, to provide a multi-level reader 27 | // experience similar to ResourceData. 28 | diff *InstanceDiff 29 | 30 | // The internal reader structure that contains the state, config, the default 31 | // diff, and the new diff. 32 | //multiReader *MultiLevelFieldReader 33 | 34 | // A writer that writes overridden new fields. 35 | //newWriter *newValueWriter 36 | 37 | // Tracks which keys have been updated by ResourceDiff to ensure that the 38 | // diff does not get re-run on keys that were not touched, or diffs that were 39 | // just removed (re-running on the latter would just roll back the removal). 40 | updatedKeys map[string]bool 41 | 42 | // Tracks which keys were flagged as forceNew. These keys are not saved in 43 | // newWriter, but we need to track them so that they can be re-diffed later. 44 | forcedNewKeys map[string]bool 45 | } 46 | -------------------------------------------------------------------------------- /tflegacy/resource_importer.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | // ResourceImporter defines how a resource is imported in Terraform. This 4 | // can be set onto a Resource struct to make it Importable. Not all resources 5 | // have to be importable; if a Resource doesn't have a ResourceImporter then 6 | // it won't be importable. 7 | // 8 | // "Importing" in Terraform is the process of taking an already-created 9 | // resource and bringing it under Terraform management. This can include 10 | // updating Terraform state, generating Terraform configuration, etc. 11 | type ResourceImporter struct { 12 | // The functions below must all be implemented for importing to work. 13 | 14 | // State is called to convert an ID to one or more InstanceState to 15 | // insert into the Terraform state. If this isn't specified, then 16 | // the ID is passed straight through. 17 | State StateFunc 18 | } 19 | 20 | // StateFunc is the function called to import a resource into the 21 | // Terraform state. It is given a ResourceData with only ID set. This 22 | // ID is going to be an arbitrary value given by the user and may not map 23 | // directly to the ID format that the resource expects, so that should 24 | // be validated. 25 | // 26 | // This should return a slice of ResourceData that turn into the state 27 | // that was imported. This might be as simple as returning only the argument 28 | // that was given to the function. In other cases (such as AWS security groups), 29 | // an import may fan out to multiple resources and this will have to return 30 | // multiple. 31 | // 32 | // To create the ResourceData structures for other resource types (if 33 | // you have to), instantiate your resource and call the Data function. 34 | type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) 35 | 36 | // InternalValidate should be called to validate the structure of this 37 | // importer. This should be called in a unit test. 38 | // 39 | // Resource.InternalValidate() will automatically call this, so this doesn't 40 | // need to be called manually. Further, Resource.InternalValidate() is 41 | // automatically called by Provider.InternalValidate(), so you only need 42 | // to internal validate the provider. 43 | func (r *ResourceImporter) InternalValidate() error { 44 | return nil 45 | } 46 | 47 | // ImportStatePassthrough is an implementation of StateFunc that can be 48 | // used to simply pass the ID directly through. This should be used only 49 | // in the case that an ID-only refresh is possible. 50 | func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { 51 | return []*ResourceData{d}, nil 52 | } 53 | -------------------------------------------------------------------------------- /tflegacy/resource_mode_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go config.go"; DO NOT EDIT. 2 | 3 | package tflegacy 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[ManagedResourceMode-0] 12 | _ = x[DataResourceMode-1] 13 | } 14 | 15 | const _ResourceMode_name = "ManagedResourceModeDataResourceMode" 16 | 17 | var _ResourceMode_index = [...]uint8{0, 19, 35} 18 | 19 | func (i ResourceMode) String() string { 20 | if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { 21 | return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" 22 | } 23 | return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] 24 | } 25 | -------------------------------------------------------------------------------- /tflegacy/resource_timeout.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" 8 | const TimeoutsConfigKey = "timeouts" 9 | 10 | const ( 11 | TimeoutCreate = "create" 12 | TimeoutRead = "read" 13 | TimeoutUpdate = "update" 14 | TimeoutDelete = "delete" 15 | TimeoutDefault = "default" 16 | ) 17 | 18 | type ResourceTimeout struct { 19 | Create, Read, Update, Delete, Default *time.Duration 20 | } 21 | -------------------------------------------------------------------------------- /tflegacy/schema.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | ) 7 | 8 | //go:generate stringer -type=ValueType 9 | 10 | // ValueType is an enum of the type kinds that can be represented by a schema. 11 | type ValueType int 12 | 13 | const ( 14 | TypeInvalid ValueType = iota 15 | TypeBool 16 | TypeInt 17 | TypeFloat 18 | TypeString 19 | TypeList 20 | TypeMap 21 | TypeSet 22 | typeObject 23 | ) 24 | 25 | // Schema is used by older providers to describe a single attribute or a nested 26 | // block type, depending on the value of ConfigMode. 27 | // 28 | // Read the documentation of the struct fields for important details. 29 | type Schema struct { 30 | // Type is the type of the value and must be one of the ValueType values. 31 | // 32 | // This type not only determines what type is expected/valid in configuring 33 | // this value, but also what type is returned when ResourceData.Get is 34 | // called. The types returned by Get are: 35 | // 36 | // TypeBool - bool 37 | // TypeInt - int 38 | // TypeFloat - float64 39 | // TypeString - string 40 | // TypeList - []interface{} 41 | // TypeMap - map[string]interface{} 42 | // TypeSet - *schema.Set 43 | // 44 | Type ValueType 45 | 46 | // ConfigMode allows for overriding the default behaviors for mapping 47 | // schema entries onto configuration constructs. 48 | // 49 | // By default, the Elem field is used to choose whether a particular 50 | // schema is represented in configuration as an attribute or as a nested 51 | // block; if Elem is a *schema.Resource then it's a block and it's an 52 | // attribute otherwise. 53 | // 54 | // If Elem is *schema.Resource then setting ConfigMode to 55 | // SchemaConfigModeAttr will force it to be represented in configuration 56 | // as an attribute, which means that the Computed flag can be used to 57 | // provide default elements when the argument isn't set at all, while still 58 | // allowing the user to force zero elements by explicitly assigning an 59 | // empty list. 60 | // 61 | // When Computed is set without Optional, the attribute is not settable 62 | // in configuration at all and so SchemaConfigModeAttr is the automatic 63 | // behavior, and SchemaConfigModeBlock is not permitted. 64 | ConfigMode SchemaConfigMode 65 | 66 | // If one of these is set, then this item can come from the configuration. 67 | // Both cannot be set. If Optional is set, the value is optional. If 68 | // Required is set, the value is required. 69 | // 70 | // One of these must be set if the value is not computed. That is: 71 | // value either comes from the config, is computed, or is both. 72 | Optional bool 73 | Required bool 74 | 75 | // If this is non-nil, the provided function will be used during diff 76 | // of this field. If this is nil, a default diff for the type of the 77 | // schema will be used. 78 | // 79 | // This allows comparison based on something other than primitive, list 80 | // or map equality - for example SSH public keys may be considered 81 | // equivalent regardless of trailing whitespace. 82 | DiffSuppressFunc SchemaDiffSuppressFunc 83 | 84 | // If this is non-nil, then this will be a default value that is used 85 | // when this item is not set in the configuration. 86 | // 87 | // DefaultFunc can be specified to compute a dynamic default. 88 | // Only one of Default or DefaultFunc can be set. If DefaultFunc is 89 | // used then its return value should be stable to avoid generating 90 | // confusing/perpetual diffs. 91 | // 92 | // Changing either Default or the return value of DefaultFunc can be 93 | // a breaking change, especially if the attribute in question has 94 | // ForceNew set. If a default needs to change to align with changing 95 | // assumptions in an upstream API then it may be necessary to also use 96 | // the MigrateState function on the resource to change the state to match, 97 | // or have the Read function adjust the state value to align with the 98 | // new default. 99 | // 100 | // If Required is true above, then Default cannot be set. DefaultFunc 101 | // can be set with Required. If the DefaultFunc returns nil, then there 102 | // will be no default and the user will be asked to fill it in. 103 | // 104 | // If either of these is set, then the user won't be asked for input 105 | // for this key if the default is not nil. 106 | Default interface{} 107 | DefaultFunc SchemaDefaultFunc 108 | 109 | // Description is used as the description for docs or asking for user 110 | // input. It should be relatively short (a few sentences max) and should 111 | // be formatted to fit a CLI. 112 | Description string 113 | 114 | // InputDefault is the default value to use for when inputs are requested. 115 | // This differs from Default in that if Default is set, no input is 116 | // asked for. If Input is asked, this will be the default value offered. 117 | InputDefault string 118 | 119 | // The fields below relate to diffs. 120 | // 121 | // If Computed is true, then the result of this value is computed 122 | // (unless specified by config) on creation. 123 | // 124 | // If ForceNew is true, then a change in this resource necessitates 125 | // the creation of a new resource. 126 | // 127 | // StateFunc is a function called to change the value of this before 128 | // storing it in the state (and likewise before comparing for diffs). 129 | // The use for this is for example with large strings, you may want 130 | // to simply store the hash of it. 131 | Computed bool 132 | ForceNew bool 133 | StateFunc SchemaStateFunc 134 | 135 | // The following fields are only set for a TypeList, TypeSet, or TypeMap. 136 | // 137 | // Elem represents the element type. For a TypeMap, it must be a *Schema 138 | // with a Type of TypeString, otherwise it may be either a *Schema or a 139 | // *Resource. If it is *Schema, the element type is just a simple value. 140 | // If it is *Resource, the element type is a complex structure, 141 | // potentially with its own lifecycle. 142 | Elem interface{} 143 | 144 | // The following fields are only set for a TypeList or TypeSet. 145 | // 146 | // MaxItems defines a maximum amount of items that can exist within a 147 | // TypeSet or TypeList. Specific use cases would be if a TypeSet is being 148 | // used to wrap a complex structure, however more than one instance would 149 | // cause instability. 150 | // 151 | // MinItems defines a minimum amount of items that can exist within a 152 | // TypeSet or TypeList. Specific use cases would be if a TypeSet is being 153 | // used to wrap a complex structure, however less than one instance would 154 | // cause instability. 155 | // 156 | // If the field Optional is set to true then MinItems is ignored and thus 157 | // effectively zero. 158 | // 159 | // If MaxItems is 1, you may optionally also set AsSingle in order to have 160 | // Terraform v0.12 or later treat a TypeList or TypeSet as if it were a 161 | // single value. It will remain a list or set in Terraform v0.10 and v0.11. 162 | // Enabling this for an existing attribute after you've made at least one 163 | // v0.12-compatible provider release is a breaking change. AsSingle is 164 | // likely to misbehave when used with deeply-nested set structures due to 165 | // the imprecision of set diffs, so be sure to test it thoroughly, 166 | // including updates that change the set members at all levels. AsSingle 167 | // exists primarily to be used in conjunction with ConfigMode when forcing 168 | // a nested resource to be treated as an attribute, so it can be considered 169 | // an attribute of object type rather than of list/set of object. 170 | MaxItems int 171 | MinItems int 172 | AsSingle bool 173 | 174 | // PromoteSingle originally allowed for a single element to be assigned 175 | // where a primitive list was expected, but this no longer works from 176 | // Terraform v0.12 onwards (Terraform Core will require a list to be set 177 | // regardless of what this is set to) and so only applies to Terraform v0.11 178 | // and earlier, and so should be used only to retain this functionality 179 | // for those still using v0.11 with a provider that formerly used this. 180 | PromoteSingle bool 181 | 182 | // The following fields are only valid for a TypeSet type. 183 | // 184 | // Set defines a function to determine the unique ID of an item so that 185 | // a proper set can be built. 186 | Set SchemaSetFunc 187 | 188 | // ComputedWhen is a set of queries on the configuration. Whenever any 189 | // of these things is changed, it will require a recompute (this requires 190 | // that Computed is set to true). 191 | // 192 | // NOTE: This currently does not work. 193 | ComputedWhen []string 194 | 195 | // ConflictsWith is a set of schema keys that conflict with this schema. 196 | // This will only check that they're set in the _config_. This will not 197 | // raise an error for a malfunctioning resource that sets a conflicting 198 | // key. 199 | ConflictsWith []string 200 | 201 | // When Deprecated is set, this attribute is deprecated. 202 | // 203 | // A deprecated field still works, but will probably stop working in near 204 | // future. This string is the message shown to the user with instructions on 205 | // how to address the deprecation. 206 | Deprecated string 207 | 208 | // When Removed is set, this attribute has been removed from the schema 209 | // 210 | // Removed attributes can be left in the Schema to generate informative error 211 | // messages for the user when they show up in resource configurations. 212 | // This string is the message shown to the user with instructions on 213 | // what do to about the removed attribute. 214 | Removed string 215 | 216 | // ValidateFunc allows individual fields to define arbitrary validation 217 | // logic. It is yielded the provided config value as an interface{} that is 218 | // guaranteed to be of the proper Schema type, and it can yield warnings or 219 | // errors based on inspection of that value. 220 | // 221 | // ValidateFunc currently only works for primitive types. 222 | ValidateFunc SchemaValidateFunc 223 | 224 | // Sensitive ensures that the attribute's value does not get displayed in 225 | // logs or regular output. It should be used for passwords or other 226 | // secret fields. Future versions of Terraform may encrypt these 227 | // values. 228 | Sensitive bool 229 | } 230 | 231 | // SchemaConfigMode is used to influence how a schema item is mapped into a 232 | // corresponding configuration construct, using the ConfigMode field of 233 | // Schema. 234 | type SchemaConfigMode int 235 | 236 | const ( 237 | SchemaConfigModeAuto SchemaConfigMode = iota 238 | SchemaConfigModeAttr 239 | SchemaConfigModeBlock 240 | ) 241 | 242 | // SchemaDiffSuppressFunc is a function which can be used to determine 243 | // whether a detected diff on a schema element is "valid" or not, and 244 | // suppress it from the plan if necessary. 245 | // 246 | // Return true if the diff should be suppressed, false to retain it. 247 | //type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool 248 | 249 | // SchemaDefaultFunc is a function called to return a default value for 250 | // a field. 251 | type SchemaDefaultFunc func() (interface{}, error) 252 | 253 | // SchemaDiffSuppressFunc is a function which can be used to determine 254 | // whether a detected diff on a schema element is "valid" or not, and 255 | // suppress it from the plan if necessary. 256 | // 257 | // Return true if the diff should be suppressed, false to retain it. 258 | type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool 259 | 260 | // EnvDefaultFunc is a helper function that returns the value of the 261 | // given environment variable, if one exists, or the default value 262 | // otherwise. 263 | func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { 264 | return func() (interface{}, error) { 265 | if v := os.Getenv(k); v != "" { 266 | return v, nil 267 | } 268 | 269 | return dv, nil 270 | } 271 | } 272 | 273 | // MultiEnvDefaultFunc is a helper function that returns the value of the first 274 | // environment variable in the given list that returns a non-empty value. If 275 | // none of the environment variables return a value, the default value is 276 | // returned. 277 | func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { 278 | return func() (interface{}, error) { 279 | for _, k := range ks { 280 | if v := os.Getenv(k); v != "" { 281 | return v, nil 282 | } 283 | } 284 | return dv, nil 285 | } 286 | } 287 | 288 | // SchemaSetFunc is a function that must return a unique ID for the given 289 | // element. This unique ID is used to store the element in a hash. 290 | type SchemaSetFunc func(interface{}) int 291 | 292 | // SchemaStateFunc is a function used to convert some type to a string 293 | // to be stored in the state. 294 | type SchemaStateFunc func(interface{}) string 295 | 296 | // SchemaValidateFunc is a function used to validate a single field in the 297 | // schema. 298 | type SchemaValidateFunc func(interface{}, string) ([]string, []error) 299 | 300 | func (s *Schema) GoString() string { 301 | return fmt.Sprintf("*%#v", *s) 302 | } 303 | -------------------------------------------------------------------------------- /tflegacy/state.go: -------------------------------------------------------------------------------- 1 | package tflegacy 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "sync" 8 | ) 9 | 10 | // State is a snapshot of the state in a legacy format. 11 | type State struct { 12 | // Version is the state file protocol version. 13 | Version int `json:"version"` 14 | 15 | // TFVersion is the version of Terraform that wrote this state. 16 | TFVersion string `json:"terraform_version,omitempty"` 17 | 18 | // Serial is incremented on any operation that modifies 19 | // the State file. It is used to detect potentially conflicting 20 | // updates. 21 | Serial int64 `json:"serial"` 22 | 23 | // Lineage is set when a new, blank state is created and then 24 | // never updated. This allows us to determine whether the serials 25 | // of two states can be meaningfully compared. 26 | // Apart from the guarantee that collisions between two lineages 27 | // are very unlikely, this value is opaque and external callers 28 | // should only compare lineage strings byte-for-byte for equality. 29 | Lineage string `json:"lineage"` 30 | 31 | // Modules contains all the modules in a breadth-first order 32 | Modules []*ModuleState `json:"modules"` 33 | 34 | mu sync.Mutex 35 | } 36 | 37 | // OutputState is a legacy representation of an output value from a State. 38 | type OutputState struct { 39 | // Sensitive describes whether the output is considered sensitive, 40 | // which may lead to masking the value on screen in some cases. 41 | Sensitive bool `json:"sensitive"` 42 | // Type describes the structure of Value. Valid values are "string", 43 | // "map" and "list" 44 | Type string `json:"type"` 45 | // Value contains the value of the output, in the structure described 46 | // by the Type field. 47 | Value interface{} `json:"value"` 48 | 49 | mu sync.Mutex 50 | } 51 | 52 | // ModuleState is a legacy representation of a module from a State. 53 | type ModuleState struct { 54 | // Path is the import path from the root module. Modules imports are 55 | // always disjoint, so the path represents amodule tree 56 | Path []string `json:"path"` 57 | 58 | // Locals are kept only transiently in-memory, because we can always 59 | // re-compute them. 60 | Locals map[string]interface{} `json:"-"` 61 | 62 | // Outputs declared by the module and maintained for each module 63 | // even though only the root module technically needs to be kept. 64 | // This allows operators to inspect values at the boundaries. 65 | Outputs map[string]*OutputState `json:"outputs"` 66 | 67 | // Resources is a mapping of the logically named resource to 68 | // the state of the resource. Each resource may actually have 69 | // N instances underneath, although a user only needs to think 70 | // about the 1:1 case. 71 | Resources map[string]*ResourceState `json:"resources"` 72 | 73 | // Dependencies are a list of things that this module relies on 74 | // existing to remain intact. For example: an module may depend 75 | // on a VPC ID given by an aws_vpc resource. 76 | // 77 | // Terraform uses this information to build valid destruction 78 | // orders and to warn the user if they're destroying a module that 79 | // another resource depends on. 80 | // 81 | // Things can be put into this list that may not be managed by 82 | // Terraform. If Terraform doesn't find a matching ID in the 83 | // overall state, then it assumes it isn't managed and doesn't 84 | // worry about it. 85 | Dependencies []string `json:"depends_on"` 86 | 87 | mu sync.Mutex 88 | } 89 | 90 | // ResourceStateKey is a structured representation of the key used for the 91 | // ModuleState.Resources mapping 92 | type ResourceStateKey struct { 93 | Name string 94 | Type string 95 | Mode ResourceMode 96 | Index int 97 | } 98 | 99 | // Equal determines whether two ResourceStateKeys are the same 100 | func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { 101 | if rsk == nil || other == nil { 102 | return false 103 | } 104 | if rsk.Mode != other.Mode { 105 | return false 106 | } 107 | if rsk.Type != other.Type { 108 | return false 109 | } 110 | if rsk.Name != other.Name { 111 | return false 112 | } 113 | if rsk.Index != other.Index { 114 | return false 115 | } 116 | return true 117 | } 118 | 119 | func (rsk *ResourceStateKey) String() string { 120 | if rsk == nil { 121 | return "" 122 | } 123 | var prefix string 124 | switch rsk.Mode { 125 | case ManagedResourceMode: 126 | prefix = "" 127 | case DataResourceMode: 128 | prefix = "data." 129 | default: 130 | panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) 131 | } 132 | if rsk.Index == -1 { 133 | return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) 134 | } 135 | return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) 136 | } 137 | 138 | // ParseResourceStateKey accepts a key in the format used by 139 | // ModuleState.Resources and returns a resource name and resource index. In the 140 | // state, a resource has the format "type.name.index" or "type.name". In the 141 | // latter case, the index is returned as -1. 142 | func ParseResourceStateKey(k string) (*ResourceStateKey, error) { 143 | parts := strings.Split(k, ".") 144 | mode := ManagedResourceMode 145 | if len(parts) > 0 && parts[0] == "data" { 146 | mode = DataResourceMode 147 | // Don't need the constant "data" prefix for parsing 148 | // now that we've figured out the mode. 149 | parts = parts[1:] 150 | } 151 | if len(parts) < 2 || len(parts) > 3 { 152 | return nil, fmt.Errorf("Malformed resource state key: %s", k) 153 | } 154 | rsk := &ResourceStateKey{ 155 | Mode: mode, 156 | Type: parts[0], 157 | Name: parts[1], 158 | Index: -1, 159 | } 160 | if len(parts) == 3 { 161 | index, err := strconv.Atoi(parts[2]) 162 | if err != nil { 163 | return nil, fmt.Errorf("Malformed resource state key index: %s", k) 164 | } 165 | rsk.Index = index 166 | } 167 | return rsk, nil 168 | } 169 | 170 | // ResourceState is a legacy representation of the state of a particular 171 | // resource instance. It should actually be called InstanceState, but the 172 | // bad legacy name is preserved for compatibility with existing callers. 173 | type ResourceState struct { 174 | // This is filled in and managed by Terraform, and is the resource 175 | // type itself such as "mycloud_instance". If a resource provider sets 176 | // this value, it won't be persisted. 177 | Type string `json:"type"` 178 | 179 | // Dependencies are a list of things that this resource relies on 180 | // existing to remain intact. For example: an AWS instance might 181 | // depend on a subnet (which itself might depend on a VPC, and so 182 | // on). 183 | // 184 | // Terraform uses this information to build valid destruction 185 | // orders and to warn the user if they're destroying a resource that 186 | // another resource depends on. 187 | // 188 | // Things can be put into this list that may not be managed by 189 | // Terraform. If Terraform doesn't find a matching ID in the 190 | // overall state, then it assumes it isn't managed and doesn't 191 | // worry about it. 192 | Dependencies []string `json:"depends_on"` 193 | 194 | // Primary is the current active instance for this resource. 195 | // It can be replaced but only after a successful creation. 196 | // This is the instances on which providers will act. 197 | Primary *InstanceState `json:"primary"` 198 | 199 | // Deposed is used in the mechanics of CreateBeforeDestroy: the existing 200 | // Primary is Deposed to get it out of the way for the replacement Primary to 201 | // be created by Apply. If the replacement Primary creates successfully, the 202 | // Deposed instance is cleaned up. 203 | // 204 | // If there were problems creating the replacement Primary, the Deposed 205 | // instance and the (now tainted) replacement Primary will be swapped so the 206 | // tainted replacement will be cleaned up instead. 207 | // 208 | // An instance will remain in the Deposed list until it is successfully 209 | // destroyed and purged. 210 | Deposed []*InstanceState `json:"deposed"` 211 | 212 | // Provider is used when a resource is connected to a provider with an alias. 213 | // If this string is empty, the resource is connected to the default provider, 214 | // e.g. "aws_instance" goes with the "aws" provider. 215 | // If the resource block contained a "provider" key, that value will be set here. 216 | Provider string `json:"provider"` 217 | 218 | mu sync.Mutex 219 | } 220 | 221 | // InstanceState is a legacy representation of the state of a single resource 222 | // instance object. It should really be called InstanceObjectState, but the 223 | // bad legacy name is preserved for compatibility with existing callers. 224 | type InstanceState struct { 225 | // A unique ID for this resource. This is opaque to Terraform 226 | // and is only meant as a lookup mechanism for the providers. 227 | ID string `json:"id"` 228 | 229 | // Attributes are basic information about the resource. Any keys here 230 | // are accessible in variable format within Terraform configurations: 231 | // ${resourcetype.name.attribute}. 232 | Attributes map[string]string `json:"attributes"` 233 | 234 | // Ephemeral is used to store any state associated with this instance 235 | // that is necessary for the Terraform run to complete, but is not 236 | // persisted to a state file. 237 | Ephemeral EphemeralState `json:"-"` 238 | 239 | // Meta is a simple K/V map that is persisted to the State but otherwise 240 | // ignored by Terraform core. It's meant to be used for accounting by 241 | // external client code. The value here must only contain Go primitives 242 | // and collections. 243 | Meta map[string]interface{} `json:"meta"` 244 | 245 | // Tainted is used to mark a resource for recreation. 246 | Tainted bool `json:"tainted"` 247 | 248 | mu sync.Mutex 249 | } 250 | 251 | // EphemeralState is a legacy type now used only to represent the resource type 252 | // of a new resource instance object during a "terraform import" operation. 253 | type EphemeralState struct { 254 | // ConnInfo is no longer used and is now ignored. 255 | ConnInfo map[string]string `json:"-"` 256 | 257 | // Type is used to specify the resource type for this instance. This is only 258 | // required for import operations (as documented). If the documentation 259 | // doesn't state that you need to set this, then don't worry about 260 | // setting it. 261 | Type string `json:"-"` 262 | } 263 | -------------------------------------------------------------------------------- /tflegacy/valuetype_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=ValueType"; DO NOT EDIT. 2 | 3 | package tflegacy 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[TypeInvalid-0] 12 | _ = x[TypeBool-1] 13 | _ = x[TypeInt-2] 14 | _ = x[TypeFloat-3] 15 | _ = x[TypeString-4] 16 | _ = x[TypeList-5] 17 | _ = x[TypeMap-6] 18 | _ = x[TypeSet-7] 19 | _ = x[typeObject-8] 20 | } 21 | 22 | const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" 23 | 24 | var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} 25 | 26 | func (i ValueType) String() string { 27 | if i < 0 || i >= ValueType(len(_ValueType_index)-1) { 28 | return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" 29 | } 30 | return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] 31 | } 32 | -------------------------------------------------------------------------------- /tfobj/doc.go: -------------------------------------------------------------------------------- 1 | // Package tfobj contains helper types for working with Terraform object values 2 | // (resource configurations, etc) in a higher-level way while still retaining 3 | // the Terraform schema concepts. 4 | // 5 | // This provides a middle-ground between working directly with the object value 6 | // representation (cty.Value) and directly decoding into a custom struct using 7 | // gocty. 8 | package tfobj 9 | -------------------------------------------------------------------------------- /tfobj/encode.go: -------------------------------------------------------------------------------- 1 | package tfobj 2 | 3 | import ( 4 | "github.com/zclconf/go-cty/cty/gocty" 5 | ) 6 | 7 | // Decode attempts to unpack the data from the given reader's underlying object 8 | // using the gocty package. 9 | func Decode(r ObjectReader, to interface{}) error { 10 | obj := r.ObjectVal() 11 | return gocty.FromCtyValue(obj, to) 12 | } 13 | 14 | // TODO: Also an Encode function that takes an ObjectBuilderFull and populates 15 | // it with the result of reverse-gocty on a given interface{}. 16 | -------------------------------------------------------------------------------- /tfobj/object_builder.go: -------------------------------------------------------------------------------- 1 | package tfobj 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apparentlymart/terraform-sdk/internal/sdkdiags" 7 | "github.com/apparentlymart/terraform-sdk/tfschema" 8 | "github.com/zclconf/go-cty/cty" 9 | "github.com/zclconf/go-cty/cty/convert" 10 | ) 11 | 12 | // An ObjectBuilder is a helper for gradually constructing a new value that 13 | // conforms to a particular schema through mutation. 14 | // 15 | // Terraform type system values are normally immutable, but ObjectBuilder 16 | // provides a mutable representation of an object value that can, once ready, 17 | // be frozen into an immutable object value. 18 | type ObjectBuilder interface { 19 | // ObjectBuilder extends ObjectReader, providing access to the current 20 | // state of the object under construction. 21 | // 22 | // Call ObjectVal for a cty.Value representation of the whole object, once 23 | // all mutations are complete. 24 | ObjectReader 25 | 26 | // SetAttr replaces the value of the specified attribute with the given 27 | // value. It will panic if the given name is not defined as an attribute 28 | // for this object or if the given value is not compatible with the 29 | // type constraint given for the attribute in the schema. 30 | SetAttr(name string, val cty.Value) 31 | 32 | // The Block... family of methods echoes the methods with similar names on 33 | // ObjectReader but each returns an ObjectBuilder that can be used to 34 | // mutate the content of the requested block. 35 | // 36 | // ObjectBuilder does not permit modifying the collection of nested blocks 37 | // itself, because most Terraform operations require the result to contain 38 | // exactly the same blocks as given in configuration. 39 | BlockBuilderSingle(blockType string) ObjectBuilder 40 | BlockBuilderList(blockType string) []ObjectBuilder 41 | BlockBuilderMap(blockType string) map[string]ObjectBuilder 42 | BlockBuilderFromList(blockType string, idx int) ObjectBuilder 43 | BlockBuilderFromMap(blockType string, key string) ObjectBuilder 44 | } 45 | 46 | // NewObjectBuilder creates and returns a new ObjectBuilder with the receiving 47 | // schema, whose initial value is a copy of the given object value. 48 | // 49 | // The given value must be an object type conforming to the schema, or 50 | // this function may panic or have other undefined behavior. To start with 51 | // a value that has all attributes null and no nested blocks, pass cty.NilVal 52 | // as the initial value. 53 | func NewObjectBuilder(schema *tfschema.BlockType, initial cty.Value) ObjectBuilder { 54 | return newObjectBuilder(schema, initial) 55 | } 56 | 57 | // DeriveNewObject constructs an ObjectBuilderFull with the same schema as the 58 | // given ObjectReader and an initial object value equal to that of the reader. 59 | // 60 | // This is useful when a new value is mostly equal to an existing value but 61 | // needs a few surgical changes made in-place. 62 | func DeriveNewObject(r ObjectReader) ObjectBuilderFull { 63 | return objectBuilderFull{newObjectBuilder(r.Schema(), r.ObjectVal())} 64 | } 65 | 66 | // ObjectBuilderFull is an extension of ObjectBuilder that additionally allows 67 | // totally replacing the collection of nested blocks of a given type. 68 | // 69 | // This interface is separate because most Terraform operations do not permit 70 | // this change. For resource types, it is allowed only for the ReadFn 71 | // implementation in order to synchronize the collection of nested blocks with 72 | // the collection of corresponding objects in the remote system. 73 | type ObjectBuilderFull interface { 74 | ObjectBuilder 75 | 76 | // NewBlockBuilder returns an ObjectBuilderFull that can construct an object 77 | // of a type suitable to build a new nested block of the given type. It will 78 | // panic if no nested block type of the given name is defined. 79 | // 80 | // The returned builder is disconnected from the object that creates it 81 | // in the sense that modifications won't be reflected anywhere in the 82 | // creator. To make use of the result, call ObjectVal to obtain an 83 | // object value and pass it to one of the "ReplaceBlock..." methods. 84 | NewBlockBuilder(blockType string) ObjectBuilderFull 85 | 86 | // The ReplaceBlock... family of methods remove all blocks of the given 87 | // type and then construct new blocks from the given object(s) in their 88 | // place. The given nested builders must have been originally returned 89 | // from NewBlockBuilder on the same builder or these methods will panic. 90 | // These will panic also if the method used doesn't correspond with the 91 | // nesting mode of the given nested block type. 92 | ReplaceBlockSingle(blockType string, nb ObjectBuilderFull) 93 | ReplaceBlocksList(blockType string, nbs []ObjectBuilderFull) 94 | ReplaceBlocksMap(blockType string, nbs map[string]ObjectBuilderFull) 95 | } 96 | 97 | // NewObjectBuilderFull is like NewObjectBuilder except that it constructs an 98 | // ObjectBuilderFull instead of just an ObjectBuilder. 99 | func NewObjectBuilderFull(schema *tfschema.BlockType, initial cty.Value) ObjectBuilderFull { 100 | ob := newObjectBuilder(schema, initial) 101 | return objectBuilderFull{ob} 102 | } 103 | 104 | type objectBuilder struct { 105 | schema *tfschema.BlockType 106 | attrs map[string]cty.Value 107 | singleBlocks map[string]*objectBuilder 108 | listBlocks map[string][]*objectBuilder 109 | mapBlocks map[string]map[string]*objectBuilder 110 | } 111 | 112 | func newObjectBuilder(schema *tfschema.BlockType, initial cty.Value) *objectBuilder { 113 | ret := &objectBuilder{ 114 | schema: schema, 115 | attrs: make(map[string]cty.Value), 116 | singleBlocks: make(map[string]*objectBuilder), 117 | listBlocks: make(map[string][]*objectBuilder), 118 | mapBlocks: make(map[string]map[string]*objectBuilder), 119 | } 120 | 121 | for name, attrS := range schema.Attributes { 122 | if initial == cty.NilVal { 123 | ret.attrs[name] = cty.NullVal(attrS.Type) 124 | continue 125 | } 126 | ret.attrs[name] = initial.GetAttr(name) 127 | } 128 | 129 | for name, blockS := range schema.NestedBlockTypes { 130 | switch blockS.Nesting { 131 | case tfschema.NestingSingle, tfschema.NestingGroup: 132 | if initial == cty.NilVal { 133 | ret.singleBlocks[name] = nil 134 | continue 135 | } 136 | nv := initial.GetAttr(name) 137 | if nv.IsNull() { 138 | ret.singleBlocks[name] = nil 139 | continue 140 | } 141 | ret.singleBlocks[name] = newObjectBuilder(&blockS.Content, nv) 142 | case tfschema.NestingList, tfschema.NestingSet: 143 | if initial == cty.NilVal { 144 | ret.listBlocks[name] = make([]*objectBuilder, 0) 145 | continue 146 | } 147 | nv := initial.GetAttr(name) 148 | if nv.IsKnown() && !nv.IsNull() { 149 | ret.listBlocks[name] = make([]*objectBuilder, 0, nv.LengthInt()) 150 | for it := nv.ElementIterator(); it.Next(); { 151 | _, ev := it.Element() 152 | ret.listBlocks[name] = append( 153 | ret.listBlocks[name], 154 | newObjectBuilder(&blockS.Content, ev), 155 | ) 156 | } 157 | } 158 | case tfschema.NestingMap: 159 | if initial == cty.NilVal { 160 | ret.mapBlocks[name] = make(map[string]*objectBuilder) 161 | continue 162 | } 163 | nv := initial.GetAttr(name) 164 | if nv.IsKnown() && !nv.IsNull() { 165 | ret.mapBlocks[name] = make(map[string]*objectBuilder, nv.LengthInt()) 166 | for it := nv.ElementIterator(); it.Next(); { 167 | ek, ev := it.Element() 168 | ret.mapBlocks[name][ek.AsString()] = newObjectBuilder(&blockS.Content, ev) 169 | } 170 | } 171 | default: 172 | panic(fmt.Sprintf("unknown block type nesting mode %s for %q", blockS.Nesting, name)) 173 | } 174 | } 175 | 176 | return ret 177 | } 178 | 179 | func (b *objectBuilder) Schema() *tfschema.BlockType { 180 | return b.schema 181 | } 182 | 183 | func (b *objectBuilder) ObjectVal() cty.Value { 184 | vals := make(map[string]cty.Value, len(b.attrs)+len(b.singleBlocks)+len(b.listBlocks)+len(b.mapBlocks)) 185 | for name, val := range b.attrs { 186 | vals[name] = val 187 | } 188 | for name, nb := range b.singleBlocks { 189 | vals[name] = nb.ObjectVal() 190 | } 191 | for name, nbs := range b.listBlocks { 192 | blockS := b.schema.NestedBlockTypes[name] 193 | wantEty := blockS.Content.ImpliedCtyType() 194 | if len(nbs) == 0 { 195 | switch blockS.Nesting { 196 | case tfschema.NestingList: 197 | if wantEty.HasDynamicTypes() { 198 | vals[name] = cty.EmptyTupleVal 199 | } else { 200 | vals[name] = cty.ListValEmpty(wantEty) 201 | } 202 | case tfschema.NestingSet: 203 | vals[name] = cty.SetValEmpty(wantEty) 204 | } 205 | continue 206 | } 207 | subVals := make([]cty.Value, len(nbs)) 208 | for i, nb := range nbs { 209 | subVals[i] = nb.ObjectVal() 210 | } 211 | switch blockS.Nesting { 212 | case tfschema.NestingList: 213 | if wantEty.HasDynamicTypes() { 214 | vals[name] = cty.TupleVal(subVals) 215 | } else { 216 | vals[name] = cty.ListVal(subVals) 217 | } 218 | case tfschema.NestingSet: 219 | vals[name] = cty.SetVal(subVals) 220 | } 221 | } 222 | for name, nbs := range b.mapBlocks { 223 | blockS := b.schema.NestedBlockTypes[name] 224 | wantEty := blockS.Content.ImpliedCtyType() 225 | if len(nbs) == 0 { 226 | if wantEty.HasDynamicTypes() { 227 | vals[name] = cty.EmptyObjectVal 228 | } else { 229 | vals[name] = cty.MapValEmpty(wantEty) 230 | } 231 | continue 232 | } 233 | subVals := make(map[string]cty.Value, len(nbs)) 234 | for k, nb := range nbs { 235 | subVals[k] = nb.ObjectVal() 236 | } 237 | if wantEty.HasDynamicTypes() { 238 | vals[name] = cty.ObjectVal(subVals) 239 | } else { 240 | vals[name] = cty.MapVal(subVals) 241 | } 242 | } 243 | return cty.ObjectVal(vals) 244 | } 245 | 246 | func (b *objectBuilder) Attr(name string) cty.Value { 247 | if _, ok := b.schema.Attributes[name]; !ok { 248 | panic(fmt.Sprintf("no attribute named %q", name)) 249 | } 250 | return b.attrs[name] 251 | } 252 | 253 | func (b *objectBuilder) SetAttr(name string, val cty.Value) { 254 | attrS, ok := b.schema.Attributes[name] 255 | if !ok { 256 | panic(fmt.Sprintf("no attribute named %q", name)) 257 | } 258 | val, err := convert.Convert(val, attrS.Type) 259 | if err != nil { 260 | panic(fmt.Sprintf("unsuitable value for %q: %s", name, sdkdiags.FormatError(err))) 261 | } 262 | b.attrs[name] = val 263 | } 264 | 265 | func (b *objectBuilder) BlockCount(typeName string) int { 266 | blockS, ok := b.schema.NestedBlockTypes[typeName] 267 | if !ok { 268 | panic(fmt.Sprintf("no block type named %q", typeName)) 269 | } 270 | switch blockS.Nesting { 271 | case tfschema.NestingSingle: 272 | if b.singleBlocks[typeName] == nil { 273 | return 0 274 | } 275 | return 1 276 | case tfschema.NestingList, tfschema.NestingSet: 277 | return len(b.listBlocks[typeName]) 278 | case tfschema.NestingMap: 279 | return len(b.mapBlocks[typeName]) 280 | default: 281 | panic(fmt.Sprintf("unknown block type nesting mode %s for %q", blockS.Nesting, typeName)) 282 | } 283 | } 284 | 285 | func (b *objectBuilder) BlockSingle(typeName string) ObjectReader { 286 | ret := b.BlockBuilderSingle(typeName) 287 | if ret == nil { 288 | return nil // avoid returning typed nil 289 | } 290 | return ret 291 | } 292 | 293 | func (b *objectBuilder) BlockList(typeName string) []ObjectReader { 294 | bbs := b.BlockBuilderList(typeName) 295 | if len(bbs) == 0 { 296 | return nil 297 | } 298 | ret := make([]ObjectReader, len(bbs)) 299 | for i, bb := range bbs { 300 | ret[i] = bb 301 | } 302 | return ret 303 | } 304 | 305 | func (b *objectBuilder) BlockFromList(typeName string, idx int) ObjectReader { 306 | ret := b.BlockBuilderFromList(typeName, idx) 307 | if ret == nil { 308 | return nil // avoid returning typed nil 309 | } 310 | return ret 311 | } 312 | 313 | func (b *objectBuilder) BlockMap(typeName string) map[string]ObjectReader { 314 | bbs := b.BlockBuilderMap(typeName) 315 | if len(bbs) == 0 { 316 | return nil 317 | } 318 | ret := make(map[string]ObjectReader, len(bbs)) 319 | for k, bb := range bbs { 320 | ret[k] = bb 321 | } 322 | return ret 323 | } 324 | 325 | func (b *objectBuilder) BlockFromMap(typeName string, key string) ObjectReader { 326 | ret := b.BlockBuilderFromMap(typeName, key) 327 | if ret == nil { 328 | return nil // avoid returning typed nil 329 | } 330 | return ret 331 | } 332 | 333 | func (b *objectBuilder) BlockBuilderSingle(typeName string) ObjectBuilder { 334 | if blockS, ok := b.schema.NestedBlockTypes[typeName]; !ok || (blockS.Nesting != tfschema.NestingSingle && blockS.Nesting != tfschema.NestingGroup) { 335 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingSingle or tfschema.NestingGroup", typeName)) 336 | } 337 | ret := b.singleBlocks[typeName] 338 | if ret == nil { 339 | return nil // avoid returning typed nil 340 | } 341 | return ret 342 | } 343 | 344 | func (b *objectBuilder) BlockBuilderList(typeName string) []ObjectBuilder { 345 | if blockS, ok := b.schema.NestedBlockTypes[typeName]; !ok || (blockS.Nesting != tfschema.NestingList && blockS.Nesting != tfschema.NestingSet) { 346 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingList or tfschema.NestingSet", typeName)) 347 | } 348 | nbs := b.listBlocks[typeName] 349 | if len(nbs) == 0 { 350 | return nil 351 | } 352 | ret := make([]ObjectBuilder, len(nbs)) 353 | for i, nb := range nbs { 354 | ret[i] = nb 355 | } 356 | return ret 357 | } 358 | 359 | func (b *objectBuilder) BlockBuilderFromList(typeName string, idx int) ObjectBuilder { 360 | if blockS, ok := b.schema.NestedBlockTypes[typeName]; !ok || blockS.Nesting != tfschema.NestingList { 361 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingList", typeName)) 362 | } 363 | ret := b.listBlocks[typeName][idx] 364 | if ret == nil { 365 | return nil // avoid returning typed nil 366 | } 367 | return ret 368 | } 369 | 370 | func (b *objectBuilder) BlockBuilderMap(typeName string) map[string]ObjectBuilder { 371 | if blockS, ok := b.schema.NestedBlockTypes[typeName]; !ok || blockS.Nesting != tfschema.NestingMap { 372 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingMap", typeName)) 373 | } 374 | nbs := b.mapBlocks[typeName] 375 | if len(nbs) == 0 { 376 | return nil 377 | } 378 | ret := make(map[string]ObjectBuilder, len(nbs)) 379 | for k, nb := range nbs { 380 | ret[k] = nb 381 | } 382 | return ret 383 | } 384 | 385 | func (b *objectBuilder) BlockBuilderFromMap(typeName string, key string) ObjectBuilder { 386 | if blockS, ok := b.schema.NestedBlockTypes[typeName]; !ok || blockS.Nesting != tfschema.NestingMap { 387 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingMap", typeName)) 388 | } 389 | ret := b.mapBlocks[typeName][key] 390 | if ret == nil { 391 | return nil // avoid returning typed nil 392 | } 393 | return ret 394 | } 395 | 396 | type objectBuilderFull struct { 397 | *objectBuilder 398 | } 399 | 400 | func (b objectBuilderFull) NewBlockBuilder(typeName string) ObjectBuilderFull { 401 | blockS, ok := b.schema.NestedBlockTypes[typeName] 402 | if !ok { 403 | panic(fmt.Sprintf("%q is not a nested block type", typeName)) 404 | } 405 | 406 | nb := newObjectBuilder(&blockS.Content, cty.NilVal) 407 | return objectBuilderFull{nb} 408 | } 409 | 410 | func (b objectBuilderFull) ReplaceBlockSingle(typeName string, nb ObjectBuilderFull) { 411 | blockS, ok := b.schema.NestedBlockTypes[typeName] 412 | if !ok || blockS.Nesting != tfschema.NestingSingle { 413 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingSingle", typeName)) 414 | } 415 | if nb == nil { 416 | b.objectBuilder.singleBlocks[typeName] = nil 417 | return 418 | } 419 | b.objectBuilder.singleBlocks[typeName] = nb.(objectBuilderFull).objectBuilder 420 | } 421 | 422 | func (b objectBuilderFull) ReplaceBlocksList(typeName string, nbs []ObjectBuilderFull) { 423 | blockS, ok := b.schema.NestedBlockTypes[typeName] 424 | if !ok || (blockS.Nesting != tfschema.NestingList && blockS.Nesting != tfschema.NestingSet) { 425 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingList or tfschema.NestingSet", typeName)) 426 | } 427 | if len(nbs) == 0 { 428 | b.objectBuilder.listBlocks[typeName] = make([]*objectBuilder, 0) 429 | return 430 | } 431 | new := make([]*objectBuilder, len(nbs)) 432 | for i, nb := range nbs { 433 | new[i] = nb.(objectBuilderFull).objectBuilder 434 | } 435 | b.objectBuilder.listBlocks[typeName] = new 436 | } 437 | 438 | func (b objectBuilderFull) ReplaceBlocksMap(typeName string, nbs map[string]ObjectBuilderFull) { 439 | blockS, ok := b.schema.NestedBlockTypes[typeName] 440 | if !ok || blockS.Nesting != tfschema.NestingMap { 441 | panic(fmt.Sprintf("%q is not a nested block type of tfschema.NestingMap", typeName)) 442 | } 443 | if len(nbs) == 0 { 444 | b.objectBuilder.listBlocks[typeName] = make([]*objectBuilder, 0) 445 | return 446 | } 447 | new := make(map[string]*objectBuilder, len(nbs)) 448 | for k, nb := range nbs { 449 | new[k] = nb.(objectBuilderFull).objectBuilder 450 | } 451 | b.objectBuilder.mapBlocks[typeName] = new 452 | } 453 | -------------------------------------------------------------------------------- /tfobj/object_reader.go: -------------------------------------------------------------------------------- 1 | package tfobj 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apparentlymart/terraform-sdk/tfschema" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // An ObjectReader has methods to read data from a value that conforms to a 11 | // particular schema, such as a resource type configuration. 12 | type ObjectReader interface { 13 | // Schema returns the schema that the object conforms to. Do not modify 14 | // any part of the returned schema. 15 | Schema() *tfschema.BlockType 16 | 17 | // ObjectVal returns the whole object that the ObjectReader is providing 18 | // access to. The result has a type that conforms to the reader's schema. 19 | ObjectVal() cty.Value 20 | 21 | // Attr returns the value for the attribute of the given name. It will 22 | // panic if the given name is not defined as an attribute for this object 23 | // in its schema. 24 | Attr(name string) cty.Value 25 | 26 | // BlockCount returns the number of blocks present of the given type, or 27 | // panics if the given name isn't declared as a block type in the schema. 28 | BlockCount(blockType string) int 29 | 30 | // The "Block..." family of methods all interact with nested blocks. 31 | // 32 | // BlockSingle, BlockList, and BlockMap allow reading all of the blocks of 33 | // a particular type, with each one being appropriate for a different 34 | // tfschema.NestingMode. These methods will panic if the method called isn't 35 | // compatible with the nesting mode. (BlockList can be used with NestingSet). 36 | // 37 | // BlockFromList and BlockFromMap similarly allow extracting a single nested 38 | // block from a collection of blocks of a particular type using a suitable 39 | // key. BlockFromList can be used only with NestingList block types and 40 | // BlockFromMap only with NestingMap block types. Neither method can be 41 | // used with NestingSet block types because set elements do not have keys. 42 | // These methods will panic if used with an incompatible block type. 43 | BlockSingle(blockType string) ObjectReader 44 | BlockList(blockType string) []ObjectReader 45 | BlockMap(blockType string) map[string]ObjectReader 46 | BlockFromList(blockType string, idx int) ObjectReader 47 | BlockFromMap(blockType string, key string) ObjectReader 48 | } 49 | 50 | // NewObjectReader constructs a new ObjectReader for reading the given object 51 | // value, which must be a non-null, known value whose type conforms to the 52 | // implied type of the recieving schema, or the results are undefined. 53 | func NewObjectReader(schema *tfschema.BlockType, obj cty.Value) ObjectReader { 54 | if obj.IsNull() || !obj.IsKnown() { 55 | panic("ObjectReader called with object that isn't known and non-null") 56 | } 57 | if !obj.Type().IsObjectType() { 58 | panic("ObjectReader called with non-object value") 59 | } 60 | return &objectReaderVal{ 61 | schema: schema, 62 | v: obj, 63 | } 64 | } 65 | 66 | type objectReaderVal struct { 67 | schema *tfschema.BlockType 68 | v cty.Value 69 | } 70 | 71 | var _ ObjectReader = (*objectReaderVal)(nil) 72 | 73 | func (r *objectReaderVal) Schema() *tfschema.BlockType { 74 | return r.schema 75 | } 76 | 77 | func (r *objectReaderVal) ObjectVal() cty.Value { 78 | return r.v 79 | } 80 | 81 | func (r *objectReaderVal) Attr(name string) cty.Value { 82 | _, exists := r.schema.Attributes[name] 83 | if !exists { 84 | panic(fmt.Sprintf("attempt to read non-attribute %q with Attr", name)) 85 | } 86 | return r.v.GetAttr(name) 87 | } 88 | 89 | func (r *objectReaderVal) BlockCount(blockType string) int { 90 | blockS, obj := r.blockVal(blockType) 91 | switch blockS.Nesting { 92 | case tfschema.NestingSingle: 93 | if obj.IsNull() { 94 | return 0 95 | } 96 | return 1 97 | default: 98 | if obj.IsNull() || !obj.IsKnown() { 99 | // Should never happen when Terraform is behaving itself, but 100 | // we'll be robust to avoid a crash here. 101 | return 0 102 | } 103 | return obj.LengthInt() 104 | } 105 | } 106 | 107 | func (r *objectReaderVal) BlockSingle(blockType string) ObjectReader { 108 | blockS, obj := r.blockVal(blockType) 109 | if blockS.Nesting != tfschema.NestingSingle && blockS.Nesting != tfschema.NestingGroup { 110 | panic(fmt.Sprintf("attempt to read block type %q (%s) with BlockSingle method", blockType, blockS.Nesting)) 111 | } 112 | return &objectReaderVal{ 113 | schema: &blockS.Content, 114 | v: obj, 115 | } 116 | } 117 | 118 | func (r *objectReaderVal) BlockList(blockType string) []ObjectReader { 119 | blockS, list := r.blockVal(blockType) 120 | if blockS.Nesting != tfschema.NestingList && blockS.Nesting != tfschema.NestingSet { 121 | panic(fmt.Sprintf("attempt to read block type %q (%s) with BlockList method", blockType, blockS.Nesting)) 122 | } 123 | if list.IsNull() || !list.IsKnown() { 124 | // Should never happen when Terraform is behaving itself, but 125 | // we'll be robust to avoid a crash here. 126 | return nil 127 | } 128 | l := list.LengthInt() 129 | ret := make([]ObjectReader, 0, l) 130 | for it := list.ElementIterator(); it.Next(); { 131 | _, v := it.Element() 132 | ret = append(ret, &objectReaderVal{ 133 | schema: &blockS.Content, 134 | v: v, 135 | }) 136 | } 137 | return ret 138 | } 139 | 140 | func (r *objectReaderVal) BlockMap(blockType string) map[string]ObjectReader { 141 | blockS, m := r.blockVal(blockType) 142 | if blockS.Nesting != tfschema.NestingMap { 143 | panic(fmt.Sprintf("attempt to read block type %q (%s) with BlockMap method", blockType, blockS.Nesting)) 144 | } 145 | if m.IsNull() || !m.IsKnown() { 146 | // Should never happen when Terraform is behaving itself, but 147 | // we'll be robust to avoid a crash here. 148 | return nil 149 | } 150 | l := m.LengthInt() 151 | ret := make(map[string]ObjectReader, l) 152 | for it := m.ElementIterator(); it.Next(); { 153 | k, v := it.Element() 154 | ret[k.AsString()] = &objectReaderVal{ 155 | schema: &blockS.Content, 156 | v: v, 157 | } 158 | } 159 | return ret 160 | } 161 | 162 | func (r *objectReaderVal) BlockFromList(blockType string, idx int) ObjectReader { 163 | blockS, list := r.blockVal(blockType) 164 | if blockS.Nesting != tfschema.NestingList { 165 | panic(fmt.Sprintf("attempt to read block type %q (%s) with BlockFromList method", blockType, blockS.Nesting)) 166 | } 167 | v := list.Index(cty.NumberIntVal(int64(idx))) 168 | return &objectReaderVal{ 169 | schema: &blockS.Content, 170 | v: v, 171 | } 172 | } 173 | 174 | func (r *objectReaderVal) BlockFromMap(blockType string, key string) ObjectReader { 175 | blockS, list := r.blockVal(blockType) 176 | if blockS.Nesting != tfschema.NestingMap { 177 | panic(fmt.Sprintf("attempt to read block type %q (%s) with BlockFromMap method", blockType, blockS.Nesting)) 178 | } 179 | v := list.Index(cty.StringVal(key)) 180 | return &objectReaderVal{ 181 | schema: &blockS.Content, 182 | v: v, 183 | } 184 | } 185 | 186 | func (r *objectReaderVal) blockVal(blockType string) (*tfschema.NestedBlockType, cty.Value) { 187 | blockS, exists := r.schema.NestedBlockTypes[blockType] 188 | if !exists { 189 | panic(fmt.Sprintf("attempt to read non-block-type %q with block method", blockType)) 190 | } 191 | return blockS, r.v.GetAttr(blockType) 192 | } 193 | -------------------------------------------------------------------------------- /tfschema/doc.go: -------------------------------------------------------------------------------- 1 | // Package tfschema contains types and functions for working with Terraform 2 | // configuration schemas, such as the schema for a resource type in a provider. 3 | package tfschema 4 | -------------------------------------------------------------------------------- /tfschema/nestingmode_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=NestingMode"; DO NOT EDIT. 2 | 3 | package tfschema 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[nestingInvalid-0] 12 | _ = x[NestingSingle-1] 13 | _ = x[NestingGroup-2] 14 | _ = x[NestingList-3] 15 | _ = x[NestingMap-4] 16 | _ = x[NestingSet-5] 17 | } 18 | 19 | const _NestingMode_name = "nestingInvalidNestingSingleNestingGroupNestingListNestingMapNestingSet" 20 | 21 | var _NestingMode_index = [...]uint8{0, 14, 27, 39, 50, 60, 70} 22 | 23 | func (i NestingMode) String() string { 24 | if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { 25 | return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" 26 | } 27 | return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] 28 | } 29 | -------------------------------------------------------------------------------- /tfschema/schema.go: -------------------------------------------------------------------------------- 1 | package tfschema 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/zclconf/go-cty/cty" 7 | "github.com/zclconf/go-cty/cty/gocty" 8 | ) 9 | 10 | type BlockType struct { 11 | Attributes map[string]*Attribute 12 | NestedBlockTypes map[string]*NestedBlockType 13 | } 14 | 15 | type Attribute struct { 16 | // Type defines the Terraform Language type that is required for values of 17 | // this attribute. Set Type to cty.DynamicPseudoType to indicate that any 18 | // type is allowed. The ValidateFunc field can be used to provide more 19 | // specific constraints on acceptable values. 20 | Type cty.Type 21 | 22 | // Required, Optional, and Computed together define how this attribute 23 | // behaves in configuration and during change actions. 24 | // 25 | // Required and Optional are mutually exclusive. If Required is set then 26 | // a value for the attribute must always be provided as an argument in 27 | // the configuration. If Optional is set then the configuration may omit 28 | // definition of the attribute, causing it to be set to a null value. 29 | // Optional can also be used in conjunction with computed, as described 30 | // below. 31 | // 32 | // Set Computed to indicate that the provider itself decides the value for 33 | // the attribute. When Computed is used in isolation, the attribute may not 34 | // be used as an argument in configuration at all. When Computed is combined 35 | // with Optional, the attribute may optionally be defined in configuration 36 | // but the provider supplies a default value when it is not set. 37 | // 38 | // Required may not be used in combination with either Optional or Computed. 39 | Required, Optional, Computed bool 40 | 41 | // Sensitive is a request to protect values of this attribute from casual 42 | // display in the default Terraform UI. It may also be used in future for 43 | // more complex propagation of derived sensitive values. Set this flag 44 | // for any attribute that may contain passwords, private keys, etc. 45 | Sensitive bool 46 | 47 | // Description is an English language description of the meaning of values 48 | // of this attribute, written as at least one full sentence with a leading 49 | // capital letter and trailing period. Use multiple full sentences if any 50 | // clarifying remarks are needed, but try to keep descriptions consise. 51 | Description string 52 | 53 | // ValidateFn, if non-nil, must be set to a function that takes a single 54 | // argument and returns Diagnostics. The function will be called during 55 | // validation and passed a representation of the attribute value converted 56 | // to the type of the function argument using package gocty. 57 | // 58 | // If a given value cannot be converted to the first argument type, the 59 | // function will not be called and instead a generic type-related error 60 | // will be returned automatically to the user. If the given function has 61 | // the wrong number of arguments or an incorrect return value, validation 62 | // will fail with an error indicating a bug in the provider. 63 | // 64 | // Diagnostics returned from the function must have Path values relative 65 | // to the given value, which will be appended to the base path by the 66 | // caller during a full validation walk. For primitive values (which have 67 | // no elements or attributes), set Path to nil. 68 | ValidateFn interface{} 69 | 70 | // Default, if non-nil, must be set to a value that can be converted to 71 | // the attribute's value type to be used as a default value for the 72 | // (presumably optional) attribute. 73 | // 74 | // For attributes whose "default" values cannot be assigned statically, 75 | // leave Default as nil and mark the attribute instead as Computed, allowing 76 | // the value to be assigned either during planning or during apply. 77 | Default interface{} 78 | } 79 | 80 | type NestedBlockType struct { 81 | Nesting NestingMode 82 | Content BlockType 83 | 84 | MaxItems, MinItems int 85 | } 86 | 87 | type NestingMode int 88 | 89 | const ( 90 | nestingInvalid NestingMode = iota 91 | NestingSingle 92 | NestingGroup 93 | NestingList 94 | NestingMap 95 | NestingSet 96 | ) 97 | 98 | //go:generate stringer -type=NestingMode 99 | 100 | // DefaultValue returns the cty.Value representation of the receiving attribute's 101 | // default, as specified in the Default field. 102 | // 103 | // Will panic if the configured default cannot be converted to the attribute's 104 | // value type. 105 | func (a *Attribute) DefaultValue() cty.Value { 106 | if a.Default == nil { 107 | return cty.NullVal(a.Type) 108 | } 109 | 110 | v, err := gocty.ToCtyValue(a.Default, a.Type) 111 | if err != nil { 112 | panic(fmt.Sprintf("invalid default value %#v for %#v: %s", a.Default, a.Type, err)) 113 | } 114 | return v 115 | } 116 | 117 | // Null returns a null value of the type implied by the receiving schema. 118 | func (b *BlockType) Null() cty.Value { 119 | return cty.NullVal(b.ImpliedCtyType()) 120 | } 121 | 122 | // Unknown returns an unknown value of the type implied by the receiving schema. 123 | func (b *BlockType) Unknown() cty.Value { 124 | return cty.UnknownVal(b.ImpliedCtyType()) 125 | } 126 | 127 | // ImpliedCtyType derives a cty.Type value to represent values conforming to 128 | // the receiving schema. The returned type is always an object type, with its 129 | // attributes derived from the attributes and nested block types defined in 130 | // the schema. 131 | // 132 | // This corresponds with similar logic in Terraform itself, and so must be 133 | // compatible enough with that logic to communicate with Terraform's own 134 | // object serializer/deserializer. 135 | // 136 | // This function produces reasonable results only for a valid schema. Use 137 | // InternalValidate on the schema in provider tests to check that it is correct. 138 | // When called on an invalid schema, the result may be incorrect or incomplete. 139 | func (b *BlockType) ImpliedCtyType() cty.Type { 140 | atys := make(map[string]cty.Type) 141 | for name, attrS := range b.Attributes { 142 | atys[name] = attrS.Type 143 | } 144 | for name, blockS := range b.NestedBlockTypes { 145 | atys[name] = blockS.impliedCtyType() 146 | } 147 | return cty.Object(atys) 148 | } 149 | 150 | func (b *NestedBlockType) impliedCtyType() cty.Type { 151 | nested := b.Content.ImpliedCtyType() 152 | if b.Nesting == NestingSingle || b.Nesting == NestingGroup { 153 | return nested // easy case 154 | } 155 | 156 | if nested.HasDynamicTypes() { 157 | // If a multi-nesting block contains any dynamic-typed attributes then 158 | // it'll be passed in as either a tuple or an object type with full 159 | // type information in the payload, so for the purposes of our static 160 | // type constraint, the whole block type attribute is itself 161 | // dynamically-typed. 162 | return cty.DynamicPseudoType 163 | } 164 | 165 | switch b.Nesting { 166 | case NestingList: 167 | return cty.List(nested) 168 | case NestingSet: 169 | return cty.Set(nested) 170 | case NestingMap: 171 | return cty.Map(nested) 172 | default: 173 | // Invalid, so what we return here is undefined as far as our godoc is 174 | // concerned. 175 | return cty.DynamicPseudoType 176 | } 177 | } 178 | 179 | // ApplyDefaults takes an object value (that must conform to the receiving 180 | // schema) and returns a new object value where any null attribute values in 181 | // the given object are replaced with their default values from the schema. 182 | // 183 | // The result is guaranteed to also conform to the schema. This function may 184 | // panic if the schema is incorrectly specified. 185 | func (b *BlockType) ApplyDefaults(given cty.Value) cty.Value { 186 | vals := make(map[string]cty.Value) 187 | 188 | for name, attrS := range b.Attributes { 189 | gv := given.GetAttr(name) 190 | rv := gv 191 | if gv.IsNull() { 192 | switch { 193 | case attrS.Computed: 194 | rv = cty.UnknownVal(attrS.Type) 195 | default: 196 | rv = attrS.DefaultValue() 197 | } 198 | } 199 | vals[name] = rv 200 | } 201 | 202 | for name, blockS := range b.NestedBlockTypes { 203 | gv := given.GetAttr(name) 204 | vals[name] = blockS.ApplyDefaults(gv) 205 | } 206 | 207 | return cty.ObjectVal(vals) 208 | } 209 | 210 | // ApplyDefaults takes a value conforming to the type that represents blocks of 211 | // the recieving nested block type and returns a new value, also conforming 212 | // to that type, with the result of SchemaBlockType.ApplyDefaults applied to 213 | // each element. 214 | // 215 | // This function expects that the given value will meet the guarantees offered 216 | // by Terraform Core for values representing nested block types: they will always 217 | // be known, and (aside from SchemaNestedSingle) never be null. If these 218 | // guarantees don't hold then this function will panic. 219 | func (b *NestedBlockType) ApplyDefaults(given cty.Value) cty.Value { 220 | wantTy := b.impliedCtyType() 221 | switch b.Nesting { 222 | case NestingSingle, NestingGroup: 223 | if given.IsNull() { 224 | return given 225 | } 226 | return b.Content.ApplyDefaults(given) 227 | case NestingList: 228 | vals := make([]cty.Value, 0, given.LengthInt()) 229 | for it := given.ElementIterator(); it.Next(); { 230 | _, gv := it.Element() 231 | vals = append(vals, b.Content.ApplyDefaults(gv)) 232 | } 233 | if !wantTy.IsListType() { 234 | // Schema must contain dynamically-typed attributes then, so we'll 235 | // return a tuple to properly capture the possibly-inconsistent 236 | // element object types. 237 | return cty.TupleVal(vals) 238 | } 239 | if len(vals) == 0 { 240 | return cty.ListValEmpty(wantTy.ElementType()) 241 | } 242 | return cty.ListVal(vals) 243 | case NestingMap: 244 | vals := make(map[string]cty.Value, given.LengthInt()) 245 | for it := given.ElementIterator(); it.Next(); { 246 | k, gv := it.Element() 247 | vals[k.AsString()] = b.Content.ApplyDefaults(gv) 248 | } 249 | if !wantTy.IsMapType() { 250 | // Schema must contain dynamically-typed attributes then, so we'll 251 | // return an object to properly capture the possibly-inconsistent 252 | // element object types. 253 | return cty.ObjectVal(vals) 254 | } 255 | if len(vals) == 0 { 256 | return cty.MapValEmpty(wantTy.ElementType()) 257 | } 258 | return cty.MapVal(vals) 259 | case NestingSet: 260 | vals := make([]cty.Value, 0, given.LengthInt()) 261 | for it := given.ElementIterator(); it.Next(); { 262 | _, gv := it.Element() 263 | vals = append(vals, b.Content.ApplyDefaults(gv)) 264 | } 265 | // Dynamically-typed attributes are not supported with SchemaNestingSet, 266 | // so we just always return a set value for these. 267 | if len(vals) == 0 { 268 | return cty.SetValEmpty(wantTy.ElementType()) 269 | } 270 | return cty.SetVal(vals) 271 | default: 272 | panic(fmt.Sprintf("invalid block nesting mode %#v", b.Nesting)) 273 | } 274 | } 275 | --------------------------------------------------------------------------------