├── go.mod ├── generic ├── go.mod ├── README.md ├── register.go ├── register_test.go ├── go.sum ├── api_test.go ├── arena_test.go ├── api.go └── arena.go ├── arena.go ├── arena_go120.go ├── mapiter_go112.go ├── headers.go ├── memory.go ├── .gitignore ├── mapiter.go ├── clone_benchmark_test.go ├── cloner.go ├── go.sum ├── clone_test.go ├── .github └── workflows │ └── go.yml ├── wrapper_benchmark_test.go ├── wrapper_sample_test.go ├── LICENSE ├── interfacedata.go ├── clone_sample_test.go ├── allocator_test.go ├── atomic_go119.go ├── wrapper_test.go ├── allocator_sample_test.go ├── allocatormethods.go ├── allocatormethods_test.go ├── wrapper.go ├── structtype_sample_test.go ├── structtype_test.go ├── allocator.go ├── structtype.go ├── README.md ├── clone_common_test.go └── clone.go /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/huandu/go-clone 2 | 3 | go 1.13 4 | 5 | require github.com/huandu/go-assert v1.1.5 6 | -------------------------------------------------------------------------------- /generic/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/huandu/go-clone/generic 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/huandu/go-assert v1.1.5 7 | github.com/huandu/go-clone v1.7.3 8 | ) 9 | 10 | require github.com/davecgh/go-spew v1.1.1 // indirect 11 | -------------------------------------------------------------------------------- /arena.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build !(go1.20 && goexperiment.arenas) 5 | // +build !go1.20 !goexperiment.arenas 6 | 7 | package clone 8 | 9 | const arenaIsEnabled = false 10 | -------------------------------------------------------------------------------- /arena_go120.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.20 && goexperiment.arenas 5 | // +build go1.20,goexperiment.arenas 6 | 7 | package clone 8 | 9 | const arenaIsEnabled = true 10 | -------------------------------------------------------------------------------- /mapiter_go112.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | // +build go1.12 5 | 6 | package clone 7 | 8 | import ( 9 | "reflect" 10 | ) 11 | 12 | func mapIter(m reflect.Value) *reflect.MapIter { 13 | return m.MapRange() 14 | } 15 | -------------------------------------------------------------------------------- /headers.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import "reflect" 7 | 8 | // As golint reports warning on possible misuse of these headers, 9 | // avoid to use these header types directly to silience golint. 10 | 11 | type sliceHeader reflect.SliceHeader 12 | type stringHeader reflect.StringHeader 13 | -------------------------------------------------------------------------------- /memory.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | // maxByteSize is a large enough value to cheat Go compiler 7 | // when converting unsafe address to []byte. 8 | // It's not actually used in runtime. 9 | // 10 | // The value 2^30 is the max value AFAIK to make Go compiler happy on all archs. 11 | const maxByteSize = 1 << 30 12 | -------------------------------------------------------------------------------- /generic/README.md: -------------------------------------------------------------------------------- 1 | # Generic `go-clone` API 2 | 3 | [![Go](https://github.com/huandu/go-clone/workflows/Go/badge.svg)](https://github.com/huandu/go-clone/actions) 4 | [![Go Doc](https://godoc.org/github.com/huandu/go-clone/generic?status.svg)](https://pkg.go.dev/github.com/huandu/go-clone/generic) 5 | 6 | This package is a set of generic API for `go-clone`. Almost all methods are simple proxies with a few exceptions. It requires `go1.18` or later to build this package. 7 | 8 | Please read document in [the main project](../README.md) for more information. 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | 26 | # Intellij 27 | *.iml 28 | .idea/ 29 | 30 | # VS Code 31 | debug 32 | debug_test 33 | .vscode/ 34 | 35 | # Mac 36 | .DS_Store 37 | 38 | # go workspace 39 | go.work 40 | go.work.sum 41 | -------------------------------------------------------------------------------- /mapiter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | // +build !go1.12 5 | 6 | package clone 7 | 8 | import ( 9 | "reflect" 10 | ) 11 | 12 | type iter struct { 13 | m reflect.Value 14 | k reflect.Value 15 | keys []reflect.Value 16 | } 17 | 18 | func mapIter(m reflect.Value) *iter { 19 | return &iter{ 20 | m: m, 21 | keys: m.MapKeys(), 22 | } 23 | } 24 | 25 | func (it *iter) Next() bool { 26 | if len(it.keys) == 0 { 27 | return false 28 | } 29 | 30 | it.k = it.keys[0] 31 | it.keys = it.keys[1:] 32 | return true 33 | } 34 | 35 | func (it *iter) Key() reflect.Value { 36 | return it.k 37 | } 38 | 39 | func (it *iter) Value() reflect.Value { 40 | return it.m.MapIndex(it.k) 41 | } 42 | -------------------------------------------------------------------------------- /clone_benchmark_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import "testing" 7 | 8 | func BenchmarkSimpleClone(b *testing.B) { 9 | orig := &testSimple{ 10 | Foo: 123, 11 | Bar: "abcd", 12 | } 13 | b.ResetTimer() 14 | 15 | for i := 0; i < b.N; i++ { 16 | Clone(orig) 17 | } 18 | } 19 | 20 | func BenchmarkComplexClone(b *testing.B) { 21 | m := map[string]*T{ 22 | "abc": { 23 | Foo: 123, 24 | Bar: map[string]interface{}{ 25 | "abc": 321, 26 | }, 27 | }, 28 | "def": { 29 | Foo: 456, 30 | Bar: map[string]interface{}{ 31 | "def": 789, 32 | }, 33 | }, 34 | } 35 | b.ResetTimer() 36 | 37 | for i := 0; i < b.N; i++ { 38 | Clone(m) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cloner.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | // Cloner implements clone API with given allocator. 7 | type Cloner struct { 8 | allocator *Allocator 9 | } 10 | 11 | // MakeCloner creates a cloner with given allocator. 12 | func MakeCloner(allocator *Allocator) Cloner { 13 | return Cloner{ 14 | allocator: allocator, 15 | } 16 | } 17 | 18 | // Clone clones v with given allocator. 19 | func (c Cloner) Clone(v interface{}) interface{} { 20 | return clone(c.allocator, v) 21 | } 22 | 23 | // CloneSlowly clones v with given allocator. 24 | // It can clone v with cycle pointer. 25 | func (c Cloner) CloneSlowly(v interface{}) interface{} { 26 | return cloneSlowly(c.allocator, v) 27 | } 28 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= 5 | github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= 6 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 7 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 8 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 9 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 10 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 11 | -------------------------------------------------------------------------------- /generic/register.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.19 5 | // +build go1.19 6 | 7 | package clone 8 | 9 | import ( 10 | "reflect" 11 | "sync/atomic" 12 | ) 13 | 14 | // Record the count of cloning atomic.Pointer[T] for test purpose only. 15 | var registerAtomicPointerCalled int32 16 | 17 | // RegisterAtomicPointer registers a custom clone function for atomic.Pointer[T]. 18 | func RegisterAtomicPointer[T any]() { 19 | SetCustomFunc(reflect.TypeOf(atomic.Pointer[T]{}), func(allocator *Allocator, old, new reflect.Value) { 20 | if !old.CanAddr() { 21 | return 22 | } 23 | 24 | // Clone value inside atomic.Pointer[T]. 25 | oldValue := old.Addr().Interface().(*atomic.Pointer[T]) 26 | newValue := new.Addr().Interface().(*atomic.Pointer[T]) 27 | v := oldValue.Load() 28 | newValue.Store(v) 29 | 30 | atomic.AddInt32(®isterAtomicPointerCalled, 1) 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /generic/register_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.19 5 | // +build go1.19 6 | 7 | package clone 8 | 9 | import ( 10 | "sync/atomic" 11 | "testing" 12 | 13 | "github.com/huandu/go-assert" 14 | ) 15 | 16 | type RegisteredPayload struct { 17 | T string 18 | } 19 | 20 | type UnregisteredPayload struct { 21 | T string 22 | } 23 | 24 | type Pointers struct { 25 | P1 atomic.Pointer[RegisteredPayload] 26 | P2 atomic.Pointer[UnregisteredPayload] 27 | } 28 | 29 | func TestRegisterAtomicPointer(t *testing.T) { 30 | a := assert.New(t) 31 | s := &Pointers{} 32 | stackPointerCannotBeCloned := atomic.Pointer[RegisteredPayload]{} 33 | 34 | // Register atomic.Pointer[RegisteredPayload] only. 35 | RegisterAtomicPointer[RegisteredPayload]() 36 | 37 | prev := registerAtomicPointerCalled 38 | Clone(s) 39 | Clone(stackPointerCannotBeCloned) 40 | a.Equal(registerAtomicPointerCalled, prev+1) 41 | } 42 | -------------------------------------------------------------------------------- /clone_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "sync/atomic" 8 | "testing" 9 | 10 | "github.com/huandu/go-assert" 11 | ) 12 | 13 | func TestCloneAll(t *testing.T) { 14 | for name, fn := range testFuncMap { 15 | t.Run(name, func(t *testing.T) { 16 | fn(t, defaultAllocator) 17 | }) 18 | } 19 | } 20 | 21 | // TestIssue21 tests issue #21. 22 | func TestIssue21(t *testing.T) { 23 | a := assert.New(t) 24 | 25 | type Foo string 26 | type Bar struct { 27 | foo *Foo 28 | } 29 | 30 | foo := Foo("hello") 31 | 32 | src := Bar{ 33 | foo: &foo, 34 | } 35 | 36 | dst := Clone(src).(Bar) 37 | 38 | a.Equal(dst.foo, src.foo) 39 | a.Assert(dst.foo != src.foo) 40 | a.Equal(dst, src) 41 | } 42 | 43 | // TestIssue25 tests issue #25. 44 | func TestIssue25(t *testing.T) { 45 | a := assert.New(t) 46 | cloned := Clone(new(atomic.Value)).(*atomic.Value) 47 | 48 | a.Equal(cloned.Load(), nil) 49 | } 50 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Set up Go 1.x 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ^1.13 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v2 21 | 22 | - name: Get dependencies 23 | run: | 24 | go mod download 25 | go get 26 | 27 | - name: Test 28 | run: go test -v -coverprofile=covprofile.cov ./... 29 | 30 | - name: Test generic 31 | run: | 32 | cd generic 33 | go test -v ./... 34 | cd .. 35 | 36 | - name: Send coverage 37 | env: 38 | COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} 39 | run: | 40 | go get github.com/mattn/goveralls 41 | go run github.com/mattn/goveralls -coverprofile=covprofile.cov -service=github 42 | -------------------------------------------------------------------------------- /generic/go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= 5 | github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= 6 | github.com/huandu/go-clone v1.7.3 h1:rtQODA+ABThEn6J5LBTppJfKmZy/FwfpMUWa8d01TTQ= 7 | github.com/huandu/go-clone v1.7.3/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= 8 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 9 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 10 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 11 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 12 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 13 | -------------------------------------------------------------------------------- /wrapper_benchmark_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import "testing" 7 | 8 | func BenchmarkUnwrap(b *testing.B) { 9 | orig := &testType{ 10 | Foo: "abcd", 11 | Bar: map[string]interface{}{ 12 | "def": 123, 13 | "ghi": 78.9, 14 | }, 15 | Player: []float64{ 16 | 12.3, 45.6, -78.9, 17 | }, 18 | } 19 | wrapped := Wrap(orig) 20 | b.ResetTimer() 21 | 22 | for i := 0; i < b.N; i++ { 23 | Unwrap(wrapped) 24 | } 25 | } 26 | 27 | func BenchmarkSimpleWrap(b *testing.B) { 28 | orig := &testSimple{ 29 | Foo: 123, 30 | Bar: "abcd", 31 | } 32 | b.ResetTimer() 33 | 34 | for i := 0; i < b.N; i++ { 35 | Wrap(orig) 36 | } 37 | } 38 | 39 | func BenchmarkComplexWrap(b *testing.B) { 40 | orig := &testType{ 41 | Foo: "abcd", 42 | Bar: map[string]interface{}{ 43 | "def": 123, 44 | "ghi": 78.9, 45 | }, 46 | Player: []float64{ 47 | 12.3, 45.6, -78.9, 48 | }, 49 | } 50 | b.ResetTimer() 51 | 52 | for i := 0; i < b.N; i++ { 53 | Wrap(orig) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /wrapper_sample_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import "fmt" 7 | 8 | func ExampleWrap() { 9 | // Suppose we have a type T defined as following. 10 | // type T struct { 11 | // Foo int 12 | // } 13 | v := &T{ 14 | Foo: 123, 15 | } 16 | w := Wrap(v).(*T) // Wrap value to protect it. 17 | 18 | // Use w freely. The type of w is the same as that of v. 19 | 20 | // It's OK to modify w. The change will not affect v. 21 | w.Foo = 456 22 | fmt.Println(w.Foo) // 456 23 | fmt.Println(v.Foo) // 123 24 | 25 | // Once we need the original value stored in w, call `Unwrap`. 26 | orig := Unwrap(w).(*T) 27 | fmt.Println(orig == v) // true 28 | fmt.Println(orig.Foo) // 123 29 | 30 | // Or, we can simply undo any change made in w. 31 | // Note that `Undo` is significantly slower than `Unwrap`, thus 32 | // the latter is always preferred. 33 | Undo(w) 34 | fmt.Println(w.Foo) // 123 35 | 36 | // Output: 37 | // 456 38 | // 123 39 | // true 40 | // 123 41 | // 123 42 | } 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Huan Du 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /generic/api_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "reflect" 8 | "testing" 9 | 10 | "github.com/huandu/go-assert" 11 | ) 12 | 13 | type MyType struct { 14 | Foo int 15 | bar string 16 | } 17 | 18 | func TestGenericAPI(t *testing.T) { 19 | a := assert.New(t) 20 | original := &MyType{ 21 | Foo: 123, 22 | bar: "player", 23 | } 24 | 25 | var v *MyType = Clone(original) 26 | a.Equal(v, original) 27 | 28 | v = Slowly(original) 29 | a.Equal(v, original) 30 | 31 | v = Wrap(original) 32 | a.Equal(v, original) 33 | a.Assert(Unwrap(v) == original) 34 | 35 | v.Foo = 777 36 | a.Equal(Unwrap(v).Foo, original.Foo) 37 | 38 | Undo(v) 39 | a.Equal(v, original) 40 | } 41 | 42 | type MyPointer struct { 43 | Foo *int 44 | P *MyPointer 45 | } 46 | 47 | func TestMarkAsAPI(t *testing.T) { 48 | a := assert.New(t) 49 | MarkAsScalar(reflect.TypeOf(MyPointer{})) 50 | MarkAsOpaquePointer(reflect.TypeOf(&MyPointer{})) 51 | 52 | n := 0 53 | orignal := MyPointer{ 54 | Foo: &n, 55 | } 56 | orignal.P = &orignal 57 | 58 | v := Clone(orignal) 59 | a.Assert(v.Foo == orignal.Foo) 60 | a.Assert(v.P == &orignal) 61 | } 62 | -------------------------------------------------------------------------------- /interfacedata.go: -------------------------------------------------------------------------------- 1 | package clone 2 | 3 | import ( 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | const sizeOfPointers = unsafe.Sizeof((interface{})(0)) / unsafe.Sizeof(uintptr(0)) 9 | 10 | // interfaceData is the underlying data of an interface. 11 | // As the reflect.Value's interfaceData method is deprecated, 12 | // it may be broken in any Go release. 13 | // It's better to create a custom to hold the data. 14 | // 15 | // The type of interfaceData fields must be poniters. 16 | // It's a way to cheat Go compile to generate calls to write barrier 17 | // when copying interfaces. 18 | type interfaceData struct { 19 | _ [sizeOfPointers]unsafe.Pointer 20 | } 21 | 22 | var reflectValuePtrOffset uintptr 23 | 24 | func init() { 25 | t := reflect.TypeOf(reflect.Value{}) 26 | found := false 27 | fields := t.NumField() 28 | 29 | for i := 0; i < fields; i++ { 30 | field := t.Field(i) 31 | 32 | if field.Type.Kind() == reflect.UnsafePointer { 33 | found = true 34 | reflectValuePtrOffset = field.Offset 35 | break 36 | } 37 | } 38 | 39 | if !found { 40 | panic("go-clone: fail to find internal ptr field in reflect.Value") 41 | } 42 | } 43 | 44 | // parseReflectValue returns the underlying interface data in a reflect value. 45 | // It assumes that v is an interface value. 46 | func parseReflectValue(v reflect.Value) interfaceData { 47 | pv := (unsafe.Pointer)(uintptr(unsafe.Pointer(&v)) + reflectValuePtrOffset) 48 | ptr := *(*unsafe.Pointer)(pv) 49 | return *(*interfaceData)(ptr) 50 | } 51 | -------------------------------------------------------------------------------- /clone_sample_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "fmt" 8 | ) 9 | 10 | func ExampleSlowly() { 11 | type ListNode struct { 12 | Data int 13 | Next *ListNode 14 | } 15 | node1 := &ListNode{ 16 | Data: 1, 17 | } 18 | node2 := &ListNode{ 19 | Data: 2, 20 | } 21 | node3 := &ListNode{ 22 | Data: 3, 23 | } 24 | node1.Next = node2 25 | node2.Next = node3 26 | node3.Next = node1 27 | 28 | // We must use `Slowly` to clone a circular linked list. 29 | node := Slowly(node1).(*ListNode) 30 | 31 | for i := 0; i < 10; i++ { 32 | fmt.Println(node.Data) 33 | node = node.Next 34 | } 35 | 36 | // Output: 37 | // 1 38 | // 2 39 | // 3 40 | // 1 41 | // 2 42 | // 3 43 | // 1 44 | // 2 45 | // 3 46 | // 1 47 | } 48 | 49 | func ExampleClone_tags() { 50 | type T struct { 51 | Normal *int 52 | Foo *int `clone:"skip"` // Skip cloning this field so that Foo will be nil in cloned value. 53 | Bar *int `clone:"-"` // "-" is an alias of skip. 54 | Baz *int `clone:"shadowcopy"` // Copy this field by value so that Baz will the same pointer as the original one. 55 | } 56 | 57 | a := 1 58 | t := &T{ 59 | Normal: &a, 60 | Foo: &a, 61 | Bar: &a, 62 | Baz: &a, 63 | } 64 | v := Clone(t).(*T) 65 | 66 | fmt.Println(v.Normal == t.Normal) // false 67 | fmt.Println(v.Foo == nil) // true 68 | fmt.Println(v.Bar == nil) // true 69 | fmt.Println(v.Baz == t.Baz) // true 70 | 71 | // Output: 72 | // false 73 | // true 74 | // true 75 | // true 76 | } 77 | -------------------------------------------------------------------------------- /generic/arena_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.20 && goexperiment.arenas 5 | // +build go1.20,goexperiment.arenas 6 | 7 | package clone 8 | 9 | import ( 10 | "arena" 11 | "reflect" 12 | "runtime" 13 | "testing" 14 | "unsafe" 15 | 16 | "github.com/huandu/go-assert" 17 | ) 18 | 19 | func TestArenaClone(t *testing.T) { 20 | a := assert.New(t) 21 | 22 | type FooInner struct { 23 | Value float64 24 | } 25 | 26 | type Foo struct { 27 | A string 28 | B []int 29 | C *FooInner 30 | D map[int]string 31 | } 32 | 33 | foo := &Foo{ 34 | A: "hello", 35 | B: []int{1, 2, 3}, 36 | C: &FooInner{ 37 | Value: 45.6, 38 | }, 39 | D: map[int]string{ 40 | 7: "7", 41 | }, 42 | } 43 | 44 | ar := arena.NewArena() 45 | 46 | cloned := ArenaClone(ar, foo) 47 | a.Equal(foo, cloned) 48 | 49 | // If a pointer is not allocated by arena, arena.Clone() will return the pointer as it is. 50 | // Use this feature to check whether a pointer is allocated by arena. 51 | prevStr := foo.A 52 | str := arena.Clone(cloned.A) 53 | a.Assert(((*reflect.StringHeader)(unsafe.Pointer(&str))).Data != ((*reflect.StringHeader)(unsafe.Pointer(&prevStr))).Data) 54 | 55 | a.Assert(arena.Clone(cloned) != foo) 56 | 57 | slice := arena.Clone(cloned.B) 58 | a.Assert(&slice[0] != &foo.B[0]) 59 | 60 | a.Assert(arena.Clone(cloned.C) != foo.C) 61 | 62 | prevStr = foo.D[7] 63 | str = arena.Clone(cloned.D[7]) 64 | a.Assert(((*reflect.StringHeader)(unsafe.Pointer(&str))).Data != ((*reflect.StringHeader)(unsafe.Pointer(&prevStr))).Data) 65 | 66 | // Make sure ar is alive. 67 | runtime.KeepAlive(ar) 68 | } 69 | -------------------------------------------------------------------------------- /generic/api.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | // Package clone provides functions to deep clone any Go data. 5 | // It also provides a wrapper to protect a pointer from any unexpected mutation. 6 | // 7 | // This package is only a proxy to original go-clone package with generic support. 8 | // To minimize the maintenace cost, there is no doc in this package. 9 | // Please read the document in https://pkg.go.dev/github.com/huandu/go-clone instead. 10 | package clone 11 | 12 | import ( 13 | "reflect" 14 | "unsafe" 15 | 16 | "github.com/huandu/go-clone" 17 | ) 18 | 19 | type Func = clone.Func 20 | type Allocator = clone.Allocator 21 | type AllocatorMethods = clone.AllocatorMethods 22 | type Cloner = clone.Cloner 23 | 24 | func Clone[T any](t T) T { 25 | return clone.Clone(t).(T) 26 | } 27 | 28 | func Slowly[T any](t T) T { 29 | return clone.Slowly(t).(T) 30 | } 31 | 32 | func Wrap[T any](t T) T { 33 | return clone.Wrap(t).(T) 34 | } 35 | 36 | func Unwrap[T any](t T) T { 37 | return clone.Unwrap(t).(T) 38 | } 39 | 40 | func Undo[T any](t T) { 41 | clone.Undo(t) 42 | } 43 | 44 | func MarkAsOpaquePointer(t reflect.Type) { 45 | clone.MarkAsOpaquePointer(t) 46 | } 47 | 48 | func MarkAsScalar(t reflect.Type) { 49 | clone.MarkAsScalar(t) 50 | } 51 | 52 | func SetCustomFunc(t reflect.Type, fn Func) { 53 | clone.SetCustomFunc(t, fn) 54 | } 55 | 56 | func FromHeap() *Allocator { 57 | return clone.FromHeap() 58 | } 59 | 60 | func NewAllocator(pool unsafe.Pointer, methods *AllocatorMethods) (allocator *Allocator) { 61 | return clone.NewAllocator(pool, methods) 62 | } 63 | 64 | func IsScalar(k reflect.Kind) bool { 65 | return clone.IsScalar(k) 66 | } 67 | 68 | func MakeCloner(allocator *Allocator) Cloner { 69 | return clone.MakeCloner(allocator) 70 | } 71 | -------------------------------------------------------------------------------- /allocator_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "reflect" 8 | "testing" 9 | "unsafe" 10 | 11 | "github.com/huandu/go-assert" 12 | ) 13 | 14 | func TestAllocatorClone(t *testing.T) { 15 | a := assert.New(t) 16 | cnt := 0 17 | allocator := NewAllocator(nil, &AllocatorMethods{ 18 | New: func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 19 | cnt++ 20 | return heapNew(pool, t) 21 | }, 22 | }) 23 | 24 | type dataNode struct { 25 | Data int 26 | Next *dataNode 27 | } 28 | data := &dataNode{ 29 | Data: 1, 30 | Next: &dataNode{ 31 | Data: 2, 32 | }, 33 | } 34 | cloned := allocator.Clone(reflect.ValueOf(data)).Interface().(*dataNode) 35 | a.Equal(data, cloned) 36 | 37 | // Should allocate following value. 38 | // - allocator 39 | // - data 40 | // - data.Next 41 | a.Equal(cnt, 3) 42 | } 43 | 44 | func TestAllocatorCloneSlowly(t *testing.T) { 45 | a := assert.New(t) 46 | cnt := 0 47 | allocator := NewAllocator(nil, &AllocatorMethods{ 48 | New: func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 49 | cnt++ 50 | return heapNew(pool, t) 51 | }, 52 | }) 53 | 54 | type dataNode struct { 55 | Data int 56 | Next *dataNode 57 | } 58 | 59 | // data is a cycle linked list. 60 | data := &dataNode{ 61 | Data: 1, 62 | Next: &dataNode{ 63 | Data: 2, 64 | Next: &dataNode{ 65 | Data: 3, 66 | }, 67 | }, 68 | } 69 | data.Next.Next.Next = data 70 | 71 | cloned := allocator.CloneSlowly(reflect.ValueOf(data)).Interface().(*dataNode) 72 | 73 | a.Equal(data.Data, cloned.Data) 74 | a.Equal(data.Next.Data, cloned.Next.Data) 75 | a.Equal(data.Next.Next.Data, cloned.Next.Next.Data) 76 | a.Equal(data.Next.Next.Next.Data, cloned.Next.Next.Next.Data) 77 | a.Equal(data.Next.Next.Next.Next.Data, cloned.Next.Next.Next.Next.Data) 78 | a.Assert(cloned.Next.Next.Next == cloned) 79 | 80 | // Should allocate following value. 81 | // - allocator 82 | // - data 83 | // - data.Next 84 | // - data.Next.Next 85 | a.Equal(cnt, 4) 86 | } 87 | -------------------------------------------------------------------------------- /atomic_go119.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.19 5 | // +build go1.19 6 | 7 | package clone 8 | 9 | import ( 10 | "reflect" 11 | "sync/atomic" 12 | ) 13 | 14 | func init() { 15 | SetCustomFunc(reflect.TypeOf(atomic.Bool{}), func(allocator *Allocator, old, new reflect.Value) { 16 | if !old.CanAddr() { 17 | return 18 | } 19 | 20 | // Clone value inside atomic.Bool. 21 | oldValue := old.Addr().Interface().(*atomic.Bool) 22 | newValue := new.Addr().Interface().(*atomic.Bool) 23 | v := oldValue.Load() 24 | newValue.Store(v) 25 | }) 26 | SetCustomFunc(reflect.TypeOf(atomic.Int32{}), func(allocator *Allocator, old, new reflect.Value) { 27 | if !old.CanAddr() { 28 | return 29 | } 30 | 31 | // Clone value inside atomic.Int32. 32 | oldValue := old.Addr().Interface().(*atomic.Int32) 33 | newValue := new.Addr().Interface().(*atomic.Int32) 34 | v := oldValue.Load() 35 | newValue.Store(v) 36 | }) 37 | SetCustomFunc(reflect.TypeOf(atomic.Int64{}), func(allocator *Allocator, old, new reflect.Value) { 38 | if !old.CanAddr() { 39 | return 40 | } 41 | 42 | // Clone value inside atomic.Int64. 43 | oldValue := old.Addr().Interface().(*atomic.Int64) 44 | newValue := new.Addr().Interface().(*atomic.Int64) 45 | v := oldValue.Load() 46 | newValue.Store(v) 47 | }) 48 | SetCustomFunc(reflect.TypeOf(atomic.Uint32{}), func(allocator *Allocator, old, new reflect.Value) { 49 | if !old.CanAddr() { 50 | return 51 | } 52 | 53 | // Clone value inside atomic.Uint32. 54 | oldValue := old.Addr().Interface().(*atomic.Uint32) 55 | newValue := new.Addr().Interface().(*atomic.Uint32) 56 | v := oldValue.Load() 57 | newValue.Store(v) 58 | }) 59 | SetCustomFunc(reflect.TypeOf(atomic.Uint64{}), func(allocator *Allocator, old, new reflect.Value) { 60 | if !old.CanAddr() { 61 | return 62 | } 63 | 64 | // Clone value inside atomic.Uint64. 65 | oldValue := old.Addr().Interface().(*atomic.Uint64) 66 | newValue := new.Addr().Interface().(*atomic.Uint64) 67 | v := oldValue.Load() 68 | newValue.Store(v) 69 | }) 70 | SetCustomFunc(reflect.TypeOf(atomic.Uintptr{}), func(allocator *Allocator, old, new reflect.Value) { 71 | if !old.CanAddr() { 72 | return 73 | } 74 | 75 | // Clone value inside atomic.Uintptr. 76 | oldValue := old.Addr().Interface().(*atomic.Uintptr) 77 | newValue := new.Addr().Interface().(*atomic.Uintptr) 78 | v := oldValue.Load() 79 | newValue.Store(v) 80 | }) 81 | } 82 | -------------------------------------------------------------------------------- /wrapper_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "testing" 8 | 9 | "github.com/huandu/go-assert" 10 | ) 11 | 12 | type testType struct { 13 | Foo string 14 | Bar map[string]interface{} 15 | Player []float64 16 | } 17 | 18 | type testSimple struct { 19 | Foo int 20 | Bar string 21 | } 22 | 23 | func TestWrap(t *testing.T) { 24 | a := assert.New(t) 25 | a.Equal(Wrap(nil), nil) 26 | 27 | orig := &testType{ 28 | Foo: "abcd", 29 | Bar: map[string]interface{}{ 30 | "def": 123, 31 | "ghi": 78.9, 32 | }, 33 | Player: []float64{ 34 | 12.3, 45.6, -78.9, 35 | }, 36 | } 37 | wrapped := Wrap(orig).(*testType) 38 | a.Use(&orig, &wrapped) 39 | 40 | a.Equal(orig, wrapped) 41 | a.Equal(Wrap(wrapped), wrapped) 42 | 43 | wrapped.Foo = "xyz" 44 | wrapped.Bar["ghi"] = 98.7 45 | wrapped.Player[1] = 65.4 46 | 47 | a.Equal(orig.Foo, "abcd") 48 | a.Equal(orig.Bar["ghi"], 78.9) 49 | a.Equal(orig.Player[1], 45.6) 50 | 51 | actual := Unwrap(wrapped).(*testType) 52 | a.Assert(orig == actual) 53 | } 54 | 55 | func TestWrapScalarPtr(t *testing.T) { 56 | a := assert.New(t) 57 | i := 123 58 | c := &i 59 | v := Wrap(c).(*int) 60 | orig := Unwrap(v).(*int) 61 | a.Use(&a, &i, &c, &v) 62 | 63 | a.Assert(*v == *c) 64 | a.Assert(orig == c) 65 | } 66 | 67 | func TestWrapNonePtr(t *testing.T) { 68 | a := assert.New(t) 69 | cases := []interface{}{ 70 | 123, nil, "abcd", []string{"ghi"}, map[string]int{"xyz": 123}, 71 | } 72 | 73 | for _, c := range cases { 74 | v := Wrap(c) 75 | a.Equal(c, v) 76 | } 77 | } 78 | 79 | func TestUnwrapValueWhichIsNotWrapped(t *testing.T) { 80 | a := assert.New(t) 81 | s := &testType{ 82 | Foo: "abcd", 83 | Bar: map[string]interface{}{ 84 | "def": 123, 85 | "ghi": 78.9, 86 | }, 87 | Player: []float64{ 88 | 12.3, 45.6, -78.9, 89 | }, 90 | } 91 | v := Unwrap(s).(*testType) 92 | v.Foo = "xyz" 93 | 94 | a.Equal(s, v) 95 | } 96 | 97 | func TestUnwrapPlainValueWhichIsNotWrapped(t *testing.T) { 98 | a := assert.New(t) 99 | i := 0 100 | cases := []interface{}{ 101 | 123, "abc", nil, &i, 102 | } 103 | 104 | for _, c := range cases { 105 | v := Unwrap(c) 106 | 107 | a.Equal(c, v) 108 | 109 | old := c 110 | Undo(c) 111 | 112 | a.Equal(c, old) 113 | } 114 | } 115 | 116 | func TestUndo(t *testing.T) { 117 | a := assert.New(t) 118 | orig := &testType{ 119 | Foo: "abcd", 120 | Bar: map[string]interface{}{ 121 | "def": 123, 122 | "ghi": 78.9, 123 | }, 124 | Player: []float64{ 125 | 12.3, 45.6, -78.9, 126 | }, 127 | } 128 | wrapped := Wrap(orig).(*testType) 129 | a.Use(&orig, &wrapped) 130 | 131 | wrapped.Foo = "xyz" 132 | wrapped.Bar["ghi"] = 98.7 133 | wrapped.Player[1] = 65.4 134 | 135 | a.Equal(orig.Foo, "abcd") 136 | a.Equal(orig.Bar["ghi"], 78.9) 137 | a.Equal(orig.Player[1], 45.6) 138 | 139 | Undo(wrapped) 140 | a.Equal(orig, wrapped) 141 | } 142 | -------------------------------------------------------------------------------- /allocator_sample_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build !goexperiment.arenas 5 | 6 | package clone 7 | 8 | import ( 9 | "fmt" 10 | "reflect" 11 | "runtime" 12 | "sync" 13 | "unsafe" 14 | ) 15 | 16 | func ExampleAllocator() { 17 | // We can create a new allocator to hold customized config without poluting the default allocator. 18 | // Calling FromHeap() is a convenient way to create a new allocator which allocates memory from heap. 19 | allocator := FromHeap() 20 | 21 | // Mark T as scalar only in the allocator. 22 | type T struct { 23 | Value *int 24 | } 25 | allocator.MarkAsScalar(reflect.TypeOf(new(T))) 26 | 27 | t := &T{ 28 | Value: new(int), 29 | } 30 | cloned1 := allocator.Clone(reflect.ValueOf(t)).Interface().(*T) 31 | cloned2 := Clone(t).(*T) 32 | 33 | fmt.Println(t.Value == cloned1.Value) 34 | fmt.Println(t.Value == cloned2.Value) 35 | 36 | // Output: 37 | // true 38 | // false 39 | } 40 | 41 | func ExampleAllocator_syncPool() { 42 | type Foo struct { 43 | Bar int 44 | } 45 | 46 | typeOfFoo := reflect.TypeOf(Foo{}) 47 | poolUsed := 0 // For test only. 48 | 49 | // A sync pool to allocate Foo. 50 | p := &sync.Pool{ 51 | New: func() interface{} { 52 | return &Foo{} 53 | }, 54 | } 55 | 56 | // Creates a custom allocator using p as pool. 57 | allocator := NewAllocator(unsafe.Pointer(p), &AllocatorMethods{ 58 | New: func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 59 | // If t is Foo, allocate value from the sync pool p. 60 | if t == typeOfFoo { 61 | poolUsed++ // For test only. 62 | 63 | p := (*sync.Pool)(pool) 64 | v := p.Get() 65 | runtime.SetFinalizer(v, func(v *Foo) { 66 | *v = Foo{} 67 | p.Put(v) 68 | }) 69 | 70 | return reflect.ValueOf(v) 71 | } 72 | 73 | // Fallback to reflect API. 74 | return reflect.New(t) 75 | }, 76 | }) 77 | 78 | // Do clone. 79 | target := []*Foo{ 80 | {Bar: 1}, 81 | {Bar: 2}, 82 | } 83 | cloned := allocator.Clone(reflect.ValueOf(target)).Interface().([]*Foo) 84 | 85 | fmt.Println(reflect.DeepEqual(target, cloned)) 86 | fmt.Println(poolUsed) 87 | 88 | // Output: 89 | // true 90 | // 2 91 | } 92 | 93 | func ExampleAllocator_deepCloneString() { 94 | // By default, string is considered as scalar and copied by value. 95 | // In some cases, we may need to clone string deeply, that is, copy the underlying bytes. 96 | // We can use a custom allocator to do this. 97 | allocator := NewAllocator(nil, &AllocatorMethods{ 98 | IsScalar: func(t reflect.Kind) bool { 99 | return t != reflect.String && IsScalar(t) 100 | }, 101 | }) 102 | cloner := MakeCloner(allocator) 103 | 104 | data := []byte("bytes") 105 | s1 := *(*string)(unsafe.Pointer(&data)) // Unsafe conversion from []byte to string. 106 | s2 := Clone(s1).(string) // s2 shares the same underlying bytes with s1. 107 | s3 := cloner.Clone(s1).(string) // s3 has its own underlying bytes. 108 | 109 | copy(data, "magic") // Change the underlying bytes. 110 | fmt.Println(s1) 111 | fmt.Println(s2) 112 | fmt.Println(s3) 113 | 114 | // Output: 115 | // magic 116 | // magic 117 | // bytes 118 | } 119 | -------------------------------------------------------------------------------- /generic/arena.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | //go:build go1.20 && goexperiment.arenas 5 | // +build go1.20,goexperiment.arenas 6 | 7 | package clone 8 | 9 | import ( 10 | "arena" 11 | "reflect" 12 | "runtime" 13 | "unsafe" 14 | 15 | "github.com/huandu/go-clone" 16 | ) 17 | 18 | // The arenaAllocator allocates memory from arena. 19 | var arenaAllocatorMethods = &clone.AllocatorMethods{ 20 | New: arenaNew, 21 | MakeSlice: arenaMakeSlice, 22 | MakeMap: arenaMakeMap, 23 | MakeChan: arenaMakeChan, 24 | } 25 | 26 | // FromArena creates an allocator using arena a to allocate memory. 27 | func FromArena(a *arena.Arena) *clone.Allocator { 28 | return clone.NewAllocator(unsafe.Pointer(a), arenaAllocatorMethods) 29 | } 30 | 31 | // ArenaClone recursively deep clones v to a new value in arena a. 32 | // It works in the same way as Clone, except it allocates all memory from arena. 33 | func ArenaClone[T any](a *arena.Arena, v T) (nv T) { 34 | src := reflect.ValueOf(v) 35 | cloned := FromArena(a).Clone(src) 36 | 37 | if !cloned.IsValid() { 38 | return 39 | } 40 | 41 | dst := reflect.ValueOf(&nv).Elem() 42 | dst.Set(cloned) 43 | return 44 | } 45 | 46 | // ArenaCloneSlowly recursively deep clones v to a new value in arena a. 47 | // It works in the same way as Slowly, except it allocates all memory from arena. 48 | func ArenaCloneSlowly[T any](a *arena.Arena, v T) (nv T) { 49 | src := reflect.ValueOf(v) 50 | cloned := FromArena(a).CloneSlowly(src) 51 | 52 | if !cloned.IsValid() { 53 | return 54 | } 55 | 56 | dst := reflect.ValueOf(&nv).Elem() 57 | dst.Set(cloned) 58 | return 59 | } 60 | 61 | func arenaNew(pool unsafe.Pointer, t reflect.Type) reflect.Value { 62 | return reflect.ArenaNew((*arena.Arena)(pool), reflect.PtrTo(t)) 63 | } 64 | 65 | // Define the slice header again to mute golint's warning. 66 | type sliceHeader reflect.SliceHeader 67 | 68 | func arenaMakeSlice(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { 69 | a := (*arena.Arena)(pool) 70 | 71 | // As of go1.20, there is no reflect method to allocate slice in arena. 72 | // Following code is a hack to allocate a large enough byte buffer 73 | // and then cast it to T[]. 74 | et := t.Elem() 75 | l := int(et.Size()) 76 | total := l * cap 77 | 78 | data := arena.MakeSlice[byte](a, total, total) 79 | ptr := unsafe.Pointer(&data[0]) 80 | elem := reflect.NewAt(et, ptr) 81 | slicePtr := reflect.ArenaNew(a, reflect.PtrTo(t)) 82 | *(*sliceHeader)(slicePtr.UnsafePointer()) = sliceHeader{ 83 | Data: elem.Pointer(), 84 | Len: l, 85 | Cap: cap, 86 | } 87 | runtime.KeepAlive(elem) 88 | 89 | slice := slicePtr.Elem() 90 | return slice.Slice3(0, len, cap) 91 | } 92 | 93 | func arenaMakeMap(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { 94 | // As of go1.20, there is no way to allocate map in arena. 95 | // Fallback to heap allocation. 96 | return reflect.MakeMapWithSize(t, n) 97 | } 98 | 99 | func arenaMakeChan(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { 100 | // As of go1.20, there is no way to allocate chan in arena. 101 | // Fallback to heap allocation. 102 | return reflect.MakeChan(t, buffer) 103 | } 104 | -------------------------------------------------------------------------------- /allocatormethods.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "reflect" 8 | "unsafe" 9 | ) 10 | 11 | // AllocatorMethods defines all methods required by allocator. 12 | // If any of these methods is nil, allocator will use default method which allocates memory from heap. 13 | type AllocatorMethods struct { 14 | // Parent is the allocator which handles all unhandled methods. 15 | // If it's nil, it will be the default allocator. 16 | Parent *Allocator 17 | 18 | New func(pool unsafe.Pointer, t reflect.Type) reflect.Value 19 | MakeSlice func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value 20 | MakeMap func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value 21 | MakeChan func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value 22 | IsScalar func(k reflect.Kind) bool 23 | } 24 | 25 | func (am *AllocatorMethods) parent() *Allocator { 26 | if am != nil && am.Parent != nil { 27 | return am.Parent 28 | } 29 | 30 | return nil 31 | } 32 | 33 | func (am *AllocatorMethods) new(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 34 | if am != nil && am.New != nil { 35 | return am.New 36 | } 37 | 38 | if parent != nil { 39 | if parent.pool == pool { 40 | return parent.new 41 | } else { 42 | return func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 43 | return parent.New(t) 44 | } 45 | } 46 | } 47 | 48 | return defaultAllocator.new 49 | } 50 | 51 | func (am *AllocatorMethods) makeSlice(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { 52 | if am != nil && am.MakeSlice != nil { 53 | return am.MakeSlice 54 | } 55 | 56 | if parent != nil { 57 | if parent.pool == pool { 58 | return parent.makeSlice 59 | } else { 60 | return func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { 61 | return parent.MakeSlice(t, len, cap) 62 | } 63 | } 64 | } 65 | 66 | return defaultAllocator.makeSlice 67 | } 68 | 69 | func (am *AllocatorMethods) makeMap(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { 70 | if am != nil && am.MakeMap != nil { 71 | return am.MakeMap 72 | } 73 | 74 | if parent != nil { 75 | if parent.pool == pool { 76 | return parent.makeMap 77 | } else { 78 | return func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { 79 | return parent.MakeMap(t, n) 80 | } 81 | } 82 | } 83 | 84 | return defaultAllocator.makeMap 85 | } 86 | 87 | func (am *AllocatorMethods) makeChan(parent *Allocator, pool unsafe.Pointer) func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { 88 | if am != nil && am.MakeChan != nil { 89 | return am.MakeChan 90 | } 91 | 92 | if parent != nil { 93 | if parent.pool == pool { 94 | return parent.makeChan 95 | } else { 96 | return func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { 97 | return parent.MakeChan(t, buffer) 98 | } 99 | } 100 | } 101 | 102 | return defaultAllocator.makeChan 103 | } 104 | 105 | func (am *AllocatorMethods) isScalar(parent *Allocator) func(t reflect.Kind) bool { 106 | if am != nil && am.IsScalar != nil { 107 | return am.IsScalar 108 | } 109 | 110 | if parent != nil { 111 | return parent.isScalar 112 | } 113 | 114 | return defaultAllocator.isScalar 115 | } 116 | -------------------------------------------------------------------------------- /allocatormethods_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "reflect" 8 | "sync" 9 | "testing" 10 | "unsafe" 11 | 12 | "github.com/huandu/go-assert" 13 | ) 14 | 15 | func TestAllocatorMethodsParent(t *testing.T) { 16 | a := assert.New(t) 17 | parent := NewAllocator(nil, &AllocatorMethods{ 18 | IsScalar: func(k reflect.Kind) bool { 19 | return k == reflect.Int 20 | }, 21 | }) 22 | allocator := NewAllocator(nil, &AllocatorMethods{ 23 | Parent: parent, 24 | }) 25 | 26 | a.Assert(parent.parent == defaultAllocator) 27 | a.Assert(allocator.parent == parent) 28 | 29 | // Set up customizations in parent. 30 | type T1 struct { 31 | Data []byte 32 | } 33 | type T2 struct { 34 | Data []byte 35 | } 36 | type T3 struct { 37 | Data []byte 38 | } 39 | typeOfT1 := reflect.TypeOf(new(T1)) 40 | typeOfT2 := reflect.TypeOf(new(T2)) 41 | typeOfT3 := reflect.TypeOf(new(T3)) 42 | customFuncCalled := 0 43 | parent.MarkAsScalar(typeOfT1) 44 | parent.MarkAsOpaquePointer(typeOfT2) 45 | parent.SetCustomFunc(typeOfT3, func(allocator *Allocator, old, new reflect.Value) { 46 | customFuncCalled++ 47 | }) 48 | 49 | // All customizations should be inherited from parent. 50 | st1 := allocator.loadStructType(typeOfT1.Elem()) 51 | st2 := allocator.loadStructType(typeOfT2.Elem()) 52 | st3 := allocator.loadStructType(typeOfT3.Elem()) 53 | a.Equal(len(st1.PointerFields), 0) 54 | a.Assert(st1.fn == nil) 55 | a.Equal(len(st3.PointerFields), 1) 56 | a.Assert(st2.fn == nil) 57 | a.Equal(len(st3.PointerFields), 1) 58 | a.Assert(st3.fn != nil) 59 | a.Assert(!allocator.isOpaquePointer(typeOfT1)) 60 | a.Assert(allocator.isOpaquePointer(typeOfT2)) 61 | a.Assert(!allocator.isOpaquePointer(typeOfT3)) 62 | a.Assert(allocator.isScalar(reflect.Int)) 63 | a.Assert(!allocator.isScalar(reflect.Uint)) 64 | } 65 | 66 | func TestAllocatorMethodsPool(t *testing.T) { 67 | a := assert.New(t) 68 | pool1Called := 0 69 | pool1 := &sync.Pool{ 70 | New: func() interface{} { 71 | pool1Called++ 72 | return nil 73 | }, 74 | } 75 | pool2Called := 0 76 | pool2 := &sync.Pool{ 77 | New: func() interface{} { 78 | pool2Called++ 79 | return nil 80 | }, 81 | } 82 | parent := NewAllocator(unsafe.Pointer(pool1), &AllocatorMethods{ 83 | New: func(pool unsafe.Pointer, t reflect.Type) reflect.Value { 84 | p := (*sync.Pool)(pool) 85 | p.Get() 86 | return defaultAllocator.New(t) 87 | }, 88 | MakeSlice: func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { 89 | p := (*sync.Pool)(pool) 90 | p.Get() 91 | return defaultAllocator.MakeSlice(t, len, cap) 92 | }, 93 | MakeMap: func(pool unsafe.Pointer, t reflect.Type, size int) reflect.Value { 94 | p := (*sync.Pool)(pool) 95 | p.Get() 96 | return defaultAllocator.MakeMap(t, size) 97 | }, 98 | }) 99 | allocator := NewAllocator(unsafe.Pointer(pool2), &AllocatorMethods{ 100 | Parent: parent, 101 | MakeChan: func(pool unsafe.Pointer, t reflect.Type, size int) reflect.Value { 102 | p := (*sync.Pool)(pool) 103 | p.Get() 104 | return defaultAllocator.MakeChan(t, size) 105 | }, 106 | }) 107 | 108 | // All allocation should be implemented by parent. 109 | allocator.New(reflect.TypeOf(1)) 110 | allocator.MakeSlice(reflect.TypeOf([]int{}), 0, 0) 111 | allocator.MakeMap(reflect.TypeOf(map[int]int{}), 0) 112 | allocator.MakeChan(reflect.TypeOf(make(chan int)), 0) 113 | 114 | // 1 for new parent allocator itself. 115 | // 1 for new allocator itself. 116 | // 3 for New, MakeSlice and MakeMap. 117 | a.Equal(pool1Called, 5) 118 | 119 | // 1 for MakeChan. 120 | a.Equal(pool2Called, 1) 121 | } 122 | -------------------------------------------------------------------------------- /wrapper.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "encoding/binary" 8 | "hash/crc64" 9 | "reflect" 10 | "sync" 11 | "unsafe" 12 | ) 13 | 14 | var ( 15 | sizeOfChecksum = unsafe.Sizeof(uint64(0)) 16 | 17 | crc64Table = crc64.MakeTable(crc64.ECMA) 18 | 19 | cachedWrapperTypes sync.Map 20 | ) 21 | 22 | // Wrap creates a wrapper of v, which must be a pointer. 23 | // If v is not a pointer, Wrap simply returns v and do nothing. 24 | // 25 | // The wrapper is a deep clone of v's value. It holds a shadow copy to v internally. 26 | // 27 | // t := &T{Foo: 123} 28 | // v := Wrap(t).(*T) // v is a clone of t. 29 | // reflect.DeepEqual(t, v) == true // v equals t. 30 | // v.Foo = 456 // v.Foo is changed, but t.Foo doesn't change. 31 | // orig := Unwrap(v) // Use `Unwrap` to discard wrapper and return original value, which is t. 32 | // orig.(*T) == t // orig and t is exactly the same. 33 | // Undo(v) // Use `Undo` to discard any change on v. 34 | // v.Foo == t.Foo // Now, the value of v and t are the same again. 35 | func Wrap(v interface{}) interface{} { 36 | if v == nil { 37 | return v 38 | } 39 | 40 | val := reflect.ValueOf(v) 41 | pt := val.Type() 42 | 43 | if val.Kind() != reflect.Ptr { 44 | return v 45 | } 46 | 47 | t := pt.Elem() 48 | elem := val.Elem() 49 | ptr := unsafe.Pointer(val.Pointer()) 50 | cache, ok := cachedWrapperTypes.Load(t) 51 | 52 | if !ok { 53 | cache = reflect.StructOf([]reflect.StructField{ 54 | { 55 | Name: "T", 56 | Type: t, 57 | Anonymous: true, 58 | }, 59 | { 60 | Name: "Checksum", 61 | Type: reflect.TypeOf(uint64(0)), 62 | }, 63 | { 64 | Name: "Origin", 65 | Type: pt, 66 | }, 67 | }) 68 | cachedWrapperTypes.Store(t, cache) 69 | } 70 | 71 | wrapperType := cache.(reflect.Type) 72 | pw := defaultAllocator.New(wrapperType) 73 | 74 | wrapperPtr := unsafe.Pointer(pw.Pointer()) 75 | wrapper := pw.Elem() 76 | 77 | // Equivalent code: wrapper.T = Clone(v) 78 | field := wrapper.Field(0) 79 | field.Set(heapCloneState.clone(elem)) 80 | 81 | // Equivalent code: wrapper.Checksum = makeChecksum(v) 82 | checksumPtr := unsafe.Pointer((uintptr(wrapperPtr) + t.Size())) 83 | *(*uint64)(checksumPtr) = makeChecksum(t, uintptr(wrapperPtr), uintptr(ptr)) 84 | 85 | // Equivalent code: wrapper.Origin = v 86 | originPtr := unsafe.Pointer((uintptr(wrapperPtr) + t.Size() + sizeOfChecksum)) 87 | *(*uintptr)(originPtr) = uintptr(ptr) 88 | 89 | return field.Addr().Interface() 90 | } 91 | 92 | func validateChecksum(t reflect.Type, ptr unsafe.Pointer) bool { 93 | pw := uintptr(ptr) 94 | orig := uintptr(getOrigin(t, ptr)) 95 | checksum := *(*uint64)(unsafe.Pointer(uintptr(ptr) + t.Size())) 96 | expected := makeChecksum(t, pw, orig) 97 | 98 | return checksum == expected 99 | } 100 | 101 | func makeChecksum(t reflect.Type, pw uintptr, orig uintptr) uint64 { 102 | var data [binary.MaxVarintLen64 * 2]byte 103 | binary.PutUvarint(data[:binary.MaxVarintLen64], uint64(pw)) 104 | binary.PutUvarint(data[binary.MaxVarintLen64:], uint64(orig)) 105 | return crc64.Checksum(data[:], crc64Table) 106 | } 107 | 108 | func getOrigin(t reflect.Type, ptr unsafe.Pointer) unsafe.Pointer { 109 | return *(*unsafe.Pointer)(unsafe.Pointer(uintptr(ptr) + t.Size() + sizeOfChecksum)) 110 | } 111 | 112 | // Unwrap returns v's original value if v is a wrapped value. 113 | // Otherwise, simply returns v itself. 114 | func Unwrap(v interface{}) interface{} { 115 | if v == nil { 116 | return v 117 | } 118 | 119 | val := reflect.ValueOf(v) 120 | 121 | if !isWrapped(val) { 122 | return v 123 | } 124 | 125 | origVal := origin(val) 126 | return origVal.Interface() 127 | } 128 | 129 | func origin(val reflect.Value) reflect.Value { 130 | pt := val.Type() 131 | t := pt.Elem() 132 | ptr := unsafe.Pointer(val.Pointer()) 133 | orig := getOrigin(t, ptr) 134 | origVal := reflect.NewAt(t, orig) 135 | return origVal 136 | } 137 | 138 | // Undo discards any change made in wrapped value. 139 | // If v is not a wrapped value, nothing happens. 140 | func Undo(v interface{}) { 141 | if v == nil { 142 | return 143 | } 144 | 145 | val := reflect.ValueOf(v) 146 | 147 | if !isWrapped(val) { 148 | return 149 | } 150 | 151 | origVal := origin(val) 152 | elem := val.Elem() 153 | elem.Set(heapCloneState.clone(origVal.Elem())) 154 | } 155 | 156 | func isWrapped(val reflect.Value) bool { 157 | pt := val.Type() 158 | 159 | if pt.Kind() != reflect.Ptr { 160 | return false 161 | } 162 | 163 | t := pt.Elem() 164 | ptr := unsafe.Pointer(val.Pointer()) 165 | return validateChecksum(t, ptr) 166 | } 167 | -------------------------------------------------------------------------------- /structtype_sample_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "os" 10 | "reflect" 11 | ) 12 | 13 | func ExampleMarkAsScalar() { 14 | type ScalarType struct { 15 | stderr *os.File 16 | } 17 | 18 | MarkAsScalar(reflect.TypeOf(new(ScalarType))) 19 | 20 | scalar := &ScalarType{ 21 | stderr: os.Stderr, 22 | } 23 | cloned := Clone(scalar).(*ScalarType) 24 | 25 | // cloned is a shadow copy of scalar 26 | // so that the pointer value should be the same. 27 | fmt.Println(scalar.stderr == cloned.stderr) 28 | 29 | // Output: 30 | // true 31 | } 32 | 33 | func ExampleMarkAsOpaquePointer() { 34 | type OpaquePointerType struct { 35 | foo int 36 | } 37 | 38 | MarkAsOpaquePointer(reflect.TypeOf(new(OpaquePointerType))) 39 | 40 | opaque := &OpaquePointerType{ 41 | foo: 123, 42 | } 43 | cloned := Clone(opaque).(*OpaquePointerType) 44 | 45 | // cloned is a shadow copy of opaque. 46 | // so that opaque and cloned should be the same. 47 | fmt.Println(opaque == cloned) 48 | 49 | // Output: 50 | // true 51 | } 52 | 53 | func ExampleSetCustomFunc() { 54 | type MyStruct struct { 55 | Data []interface{} 56 | } 57 | 58 | // Filter nil values in Data when cloning old value. 59 | SetCustomFunc(reflect.TypeOf(MyStruct{}), func(allocator *Allocator, old, new reflect.Value) { 60 | // The new is a zero value of MyStruct. 61 | // We can get its address to update it. 62 | value := new.Addr().Interface().(*MyStruct) 63 | 64 | // The old is guaranteed to be a MyStruct. 65 | // As old.CanAddr() may be false, we'd better to read Data field directly. 66 | data := old.FieldByName("Data") 67 | l := data.Len() 68 | 69 | for i := 0; i < l; i++ { 70 | val := data.Index(i) 71 | 72 | if val.IsNil() { 73 | continue 74 | } 75 | 76 | n := allocator.Clone(val).Interface() 77 | value.Data = append(value.Data, n) 78 | } 79 | }) 80 | 81 | slice := &MyStruct{ 82 | Data: []interface{}{ 83 | "abc", nil, 123, nil, 84 | }, 85 | } 86 | cloned := Clone(slice).(*MyStruct) 87 | fmt.Println(cloned.Data) 88 | 89 | // Output: 90 | // [abc 123] 91 | } 92 | 93 | func ExampleSetCustomFunc_partiallyClone() { 94 | type T struct { 95 | Value int 96 | } 97 | 98 | type MyStruct struct { 99 | S1 *T 100 | S2 string 101 | S3 int 102 | } 103 | 104 | SetCustomFunc(reflect.TypeOf(T{}), func(allocator *Allocator, old, new reflect.Value) { 105 | oldField := old.FieldByName("Value") 106 | newField := new.FieldByName("Value") 107 | newField.SetInt(oldField.Int() + 100) 108 | }) 109 | 110 | SetCustomFunc(reflect.TypeOf(MyStruct{}), func(allocator *Allocator, old, new reflect.Value) { 111 | // We can call allocator.Clone to clone the old value without worrying about dead loop. 112 | // This custom func is temporary disabled for the old value in allocator. 113 | new.Set(allocator.Clone(old)) 114 | 115 | oldField := old.FieldByName("S2") 116 | newField := new.FieldByName("S2") 117 | newField.SetString(oldField.String() + "_suffix") 118 | }) 119 | 120 | st := &MyStruct{ 121 | S1: &T{ 122 | Value: 1, 123 | }, 124 | S2: "abc", 125 | S3: 2, 126 | } 127 | cloned := Clone(st).(*MyStruct) 128 | 129 | data, _ := json.Marshal(st) 130 | fmt.Println(string(data)) 131 | data, _ = json.Marshal(cloned) 132 | fmt.Println(string(data)) 133 | 134 | // Output: 135 | // {"S1":{"Value":1},"S2":"abc","S3":2} 136 | // {"S1":{"Value":101},"S2":"abc_suffix","S3":2} 137 | } 138 | 139 | func ExampleSetCustomFunc_conditionalClonePointer() { 140 | type T struct { 141 | shouldClone bool 142 | data []string 143 | } 144 | 145 | type Pointer struct { 146 | *T 147 | } 148 | 149 | values := map[string]Pointer{ 150 | "shouldClone": { 151 | T: &T{ 152 | shouldClone: true, 153 | data: []string{"a", "b", "c"}, 154 | }, 155 | }, 156 | "shouldNotClone": { 157 | T: &T{ 158 | shouldClone: false, 159 | data: []string{"a", "b", "c"}, 160 | }, 161 | }, 162 | } 163 | SetCustomFunc(reflect.TypeOf(Pointer{}), func(allocator *Allocator, old, new reflect.Value) { 164 | p := old.Interface().(Pointer) 165 | 166 | if p.shouldClone { 167 | np := allocator.Clone(old).Interface().(Pointer) 168 | 169 | // Update the cloned value to make the change very obvious. 170 | np.shouldClone = false 171 | np.data = append(np.data, "cloned") 172 | new.Set(reflect.ValueOf(np)) 173 | } else { 174 | new.Set(old) 175 | } 176 | }) 177 | 178 | cloned := Clone(values).(map[string]Pointer) 179 | fmt.Println(cloned["shouldClone"].data) 180 | fmt.Println(cloned["shouldNotClone"].data) 181 | 182 | // Output: 183 | // [a b c cloned] 184 | // [a b c] 185 | } 186 | -------------------------------------------------------------------------------- /structtype_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "crypto/elliptic" 8 | "reflect" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | "unsafe" 14 | 15 | "github.com/huandu/go-assert" 16 | ) 17 | 18 | type NoPointer struct { 19 | Foo int 20 | Bar string 21 | } 22 | 23 | type WithPointer struct { 24 | foo map[string]string 25 | bar []int 26 | } 27 | 28 | func TestMarkAsScalar(t *testing.T) { 29 | a := assert.New(t) 30 | oldCnt := 0 31 | newCnt := 0 32 | a.Use(&oldCnt, &newCnt) 33 | 34 | // Count cache. 35 | defaultAllocator.cachedStructTypes.Range(func(key, value interface{}) bool { 36 | oldCnt++ 37 | return true 38 | }) 39 | 40 | // Add 2 valid types. 41 | MarkAsScalar(reflect.TypeOf(new(NoPointer))) 42 | MarkAsScalar(reflect.TypeOf(new(WithPointer))) 43 | MarkAsScalar(reflect.TypeOf(new(int))) // Should be ignored. 44 | 45 | // Count cache against. 46 | defaultAllocator.cachedStructTypes.Range(func(key, value interface{}) bool { 47 | newCnt++ 48 | return true 49 | }) 50 | 51 | a.Assert(oldCnt+2 == newCnt) 52 | 53 | // As WithPointer is marked as scalar, Clone returns a shadow copy. 54 | value := &WithPointer{ 55 | foo: map[string]string{ 56 | "key": "value", 57 | }, 58 | bar: []int{1, 2, 3}, 59 | } 60 | cloned := Clone(value).(*WithPointer) 61 | a.Use(&value, &cloned) 62 | 63 | // cloned is a shadow copy. 64 | a.Equal(value, cloned) 65 | value.foo["key"] = "modified" 66 | value.bar[1] = 2000 67 | a.Equal(value, cloned) 68 | } 69 | 70 | type MapKeys struct { 71 | mb map[bool]interface{} 72 | mi map[int]interface{} 73 | mi8 map[int8]interface{} 74 | mi16 map[int16]interface{} 75 | mi32 map[int32]interface{} 76 | mi64 map[int64]interface{} 77 | mui map[uint]interface{} 78 | mu8 map[uint8]interface{} 79 | mu16 map[uint16]interface{} 80 | mu32 map[uint32]interface{} 81 | mu64 map[uint64]interface{} 82 | muintptr map[uintptr]interface{} 83 | ms map[string]interface{} 84 | mf32 map[float32]interface{} 85 | mf64 map[float64]interface{} 86 | mc64 map[complex64]interface{} 87 | mc128 map[complex128]interface{} 88 | miface map[interface{}]interface{} 89 | mis map[Simple]interface{} 90 | // misp map[*Simple]interface{} 91 | munsafe map[unsafe.Pointer]interface{} 92 | } 93 | 94 | func TestCopyScalarValue(t *testing.T) { 95 | a := assert.New(t) 96 | st := &MapKeys{ 97 | mb: map[bool]interface{}{true: 2}, 98 | mi: map[int]interface{}{-1: 2}, 99 | mi8: map[int8]interface{}{-8: 2}, 100 | mi16: map[int16]interface{}{-16: 2}, 101 | mi32: map[int32]interface{}{-32: 2}, 102 | mi64: map[int64]interface{}{-64: 2}, 103 | mui: map[uint]interface{}{1: 2}, 104 | mu8: map[uint8]interface{}{8: 2}, 105 | mu16: map[uint16]interface{}{16: 2}, 106 | mu32: map[uint32]interface{}{32: 2}, 107 | mu64: map[uint64]interface{}{64: 2}, 108 | muintptr: map[uintptr]interface{}{0xDEADC0DE: 2}, 109 | ms: map[string]interface{}{"str": 2}, 110 | mf32: map[float32]interface{}{3.2: 2}, 111 | mf64: map[float64]interface{}{6.4: 2}, 112 | mc64: map[complex64]interface{}{complex(6, 4): 2}, 113 | mc128: map[complex128]interface{}{complex(1.2, 8): 2}, 114 | miface: map[interface{}]interface{}{"iface": 2}, 115 | mis: map[Simple]interface{}{{Foo: 123}: 2}, 116 | munsafe: map[unsafe.Pointer]interface{}{unsafe.Pointer(t): 2}, 117 | } 118 | cloned := Clone(st).(*MapKeys) 119 | 120 | a.Equal(st, cloned) 121 | } 122 | 123 | type noCopyValues struct { 124 | syncMutex sync.Mutex 125 | syncRWMutex sync.RWMutex 126 | syncWaitGroup sync.WaitGroup 127 | syncCond *sync.Cond 128 | syncPool sync.Pool 129 | syncMap sync.Map 130 | syncOnce sync.Once 131 | atomicValue atomic.Value 132 | } 133 | 134 | func TestCloneNoCopyValues(t *testing.T) { 135 | a := assert.New(t) 136 | v := &noCopyValues{ 137 | syncCond: sync.NewCond(func() *sync.Mutex { 138 | return &sync.Mutex{} 139 | }()), 140 | syncPool: sync.Pool{ 141 | New: func() interface{} { 142 | return "pool" 143 | }, 144 | }, 145 | } 146 | 147 | v.syncMutex.Lock() 148 | defer v.syncMutex.Unlock() 149 | v.syncRWMutex.RLock() 150 | defer v.syncRWMutex.RUnlock() 151 | v.syncWaitGroup.Add(1) 152 | defer v.syncWaitGroup.Done() 153 | v.syncCond.L.Lock() 154 | defer v.syncCond.L.Unlock() 155 | poolValue := v.syncPool.Get() 156 | v.syncPool.Put(poolValue) 157 | v.syncMap.Store("foo", "bar") 158 | v.syncOnce.Do(func() {}) 159 | v.atomicValue.Store("value") 160 | 161 | cloned := Clone(v).(*noCopyValues) 162 | done := make(chan bool, 1) 163 | ticker := time.NewTicker(100 * time.Millisecond) 164 | defer ticker.Stop() 165 | 166 | a.Run("race", func(t *testing.T) { 167 | a := assert.New(t) 168 | 169 | cloned.syncMutex.Lock() 170 | _ = 0 171 | cloned.syncMutex.Unlock() 172 | 173 | cloned.syncRWMutex.RLock() 174 | _ = 0 175 | cloned.syncRWMutex.RUnlock() 176 | 177 | cloned.syncWaitGroup.Add(1) 178 | cloned.syncWaitGroup.Done() 179 | cloned.syncWaitGroup.Wait() 180 | 181 | cloned.syncCond.L.Lock() 182 | _ = 0 183 | cloned.syncCond.L.Unlock() 184 | 185 | poolValue := cloned.syncPool.Get() 186 | a.Equal(poolValue, "pool") 187 | 188 | mapValue, ok := cloned.syncMap.Load("foo") 189 | a.Equal(mapValue, "bar") 190 | a.Assert(ok) 191 | 192 | onceValueShouldBeTrue := true 193 | cloned.syncOnce.Do(func() { 194 | onceValueShouldBeTrue = false 195 | }) 196 | a.Assert(onceValueShouldBeTrue) 197 | 198 | value := cloned.atomicValue.Load() 199 | a.Equal(value, "value") 200 | 201 | done <- true 202 | }) 203 | 204 | select { 205 | case <-done: 206 | case <-ticker.C: 207 | a.Fatalf("unexpected lock is detected.") 208 | } 209 | } 210 | 211 | func TestCloneCurveAsScalar(t *testing.T) { 212 | a := assert.New(t) 213 | curves := []elliptic.Curve{elliptic.P224(), elliptic.P256(), elliptic.P384(), elliptic.P521()} 214 | cloned := Clone(curves).([]elliptic.Curve) 215 | 216 | for i, curve := range curves { 217 | c := cloned[i] 218 | a.Assert(curve == c) 219 | } 220 | } 221 | 222 | type testOpaquePointer struct { 223 | foo int 224 | } 225 | 226 | func TestMarkAsOpaquePointer(t *testing.T) { 227 | a := assert.New(t) 228 | 229 | // Mark *testOpaquePointer as opaque pointer. 230 | MarkAsOpaquePointer(reflect.TypeOf(&testOpaquePointer{})) 231 | 232 | // No-op if set a struct type as opaque. 233 | MarkAsOpaquePointer(reflect.TypeOf(testOpaquePointer{})) 234 | 235 | opaque := &testOpaquePointer{ 236 | foo: 1234, 237 | } 238 | cloned := Clone(&opaque).(**testOpaquePointer) 239 | 240 | a.Assert(&opaque != cloned) 241 | a.Assert(opaque == *cloned) 242 | } 243 | -------------------------------------------------------------------------------- /allocator.go: -------------------------------------------------------------------------------- 1 | // Copyright 2023 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "reflect" 8 | "runtime" 9 | "sync" 10 | "unsafe" 11 | ) 12 | 13 | const fieldTagName = "clone" 14 | const fieldTagValueSkip = "skip" 15 | const fieldTagValueSkipAlias = "-" 16 | const fieldTagValueShadowCopy = "shadowcopy" 17 | 18 | var typeOfAllocator = reflect.TypeOf(Allocator{}) 19 | 20 | // defaultAllocator is the default allocator and allocates memory from heap. 21 | var defaultAllocator = &Allocator{ 22 | new: heapNew, 23 | makeSlice: heapMakeSlice, 24 | makeMap: heapMakeMap, 25 | makeChan: heapMakeChan, 26 | isScalar: IsScalar, 27 | } 28 | 29 | // Allocator is a utility type for memory allocation. 30 | type Allocator struct { 31 | parent *Allocator 32 | 33 | pool unsafe.Pointer 34 | new func(pool unsafe.Pointer, t reflect.Type) reflect.Value 35 | makeSlice func(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value 36 | makeMap func(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value 37 | makeChan func(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value 38 | isScalar func(t reflect.Kind) bool 39 | 40 | cachedStructTypes sync.Map 41 | cachedPointerTypes sync.Map 42 | cachedCustomFuncTypes sync.Map 43 | } 44 | 45 | // FromHeap creates an allocator which allocate memory from heap. 46 | func FromHeap() *Allocator { 47 | return NewAllocator(nil, nil) 48 | } 49 | 50 | // NewAllocator creates an allocator which allocate memory from the pool. 51 | // Both pool and methods are optional. 52 | // 53 | // If methods.New is not nil, the allocator itself is created by calling methods.New. 54 | // 55 | // The pool is a pointer to the memory pool which is opaque to the allocator. 56 | // It's methods's responsibility to allocate memory from the pool properly. 57 | func NewAllocator(pool unsafe.Pointer, methods *AllocatorMethods) (allocator *Allocator) { 58 | parent := methods.parent() 59 | new := methods.new(parent, pool) 60 | 61 | // Allocate the allocator from the pool. 62 | val := new(pool, typeOfAllocator) 63 | allocator = (*Allocator)(unsafe.Pointer(val.Pointer())) 64 | runtime.KeepAlive(val) 65 | 66 | allocator.pool = pool 67 | allocator.new = new 68 | allocator.makeSlice = methods.makeSlice(parent, pool) 69 | allocator.makeMap = methods.makeMap(parent, pool) 70 | allocator.makeChan = methods.makeChan(parent, pool) 71 | allocator.isScalar = methods.isScalar(parent) 72 | 73 | if parent == nil { 74 | parent = defaultAllocator 75 | } 76 | 77 | allocator.parent = parent 78 | return 79 | } 80 | 81 | // New returns a new zero value of t. 82 | func (a *Allocator) New(t reflect.Type) reflect.Value { 83 | return a.new(a.pool, t) 84 | } 85 | 86 | // MakeSlice creates a new zero-initialized slice value of t with len and cap. 87 | func (a *Allocator) MakeSlice(t reflect.Type, len, cap int) reflect.Value { 88 | return a.makeSlice(a.pool, t, len, cap) 89 | } 90 | 91 | // MakeMap creates a new map with minimum size n. 92 | func (a *Allocator) MakeMap(t reflect.Type, n int) reflect.Value { 93 | return a.makeMap(a.pool, t, n) 94 | } 95 | 96 | // MakeChan creates a new chan with buffer. 97 | func (a *Allocator) MakeChan(t reflect.Type, buffer int) reflect.Value { 98 | return a.makeChan(a.pool, t, buffer) 99 | } 100 | 101 | // Clone recursively deep clone val to a new value with memory allocated from a. 102 | func (a *Allocator) Clone(val reflect.Value) reflect.Value { 103 | return a.clone(val, true) 104 | } 105 | 106 | func (a *Allocator) clone(val reflect.Value, inCustomFunc bool) reflect.Value { 107 | if !val.IsValid() { 108 | return val 109 | } 110 | 111 | state := &cloneState{ 112 | allocator: a, 113 | } 114 | 115 | if inCustomFunc { 116 | state.skipCustomFuncValue = val 117 | } 118 | 119 | return state.clone(val) 120 | } 121 | 122 | // CloneSlowly recursively deep clone val to a new value with memory allocated from a. 123 | // It marks all cloned values internally, thus it can clone v with cycle pointer. 124 | func (a *Allocator) CloneSlowly(val reflect.Value) reflect.Value { 125 | return a.cloneSlowly(val, true) 126 | } 127 | 128 | func (a *Allocator) cloneSlowly(val reflect.Value, inCustomFunc bool) reflect.Value { 129 | if !val.IsValid() { 130 | return val 131 | } 132 | 133 | state := &cloneState{ 134 | allocator: a, 135 | visited: visitMap{}, 136 | invalid: invalidPointers{}, 137 | } 138 | 139 | if inCustomFunc { 140 | state.skipCustomFuncValue = val 141 | } 142 | 143 | cloned := state.clone(val) 144 | state.fix(cloned) 145 | return cloned 146 | } 147 | 148 | func (a *Allocator) loadStructType(t reflect.Type) (st structType) { 149 | st, ok := a.lookupStructType(t) 150 | 151 | if ok { 152 | return 153 | } 154 | 155 | num := t.NumField() 156 | zeroFeilds := make([]structFieldSize, 0, num) 157 | pointerFields := make([]structFieldType, 0, num) 158 | 159 | // Find pointer fields in depth-first order. 160 | for i := 0; i < num; i++ { 161 | field := t.Field(i) 162 | ft := field.Type 163 | k := ft.Kind() 164 | tag := field.Tag.Get(fieldTagName) 165 | 166 | if tag == fieldTagValueSkip || tag == fieldTagValueSkipAlias { 167 | zeroFeilds = append(zeroFeilds, structFieldSize{ 168 | Offset: field.Offset, 169 | Size: uintptr(ft.Size()), 170 | }) 171 | continue 172 | } 173 | 174 | if tag == fieldTagValueShadowCopy || a.isScalar(k) { 175 | continue 176 | } 177 | 178 | switch k { 179 | case reflect.Array: 180 | if ft.Len() == 0 { 181 | continue 182 | } 183 | 184 | elem := ft.Elem() 185 | 186 | if a.isScalar(elem.Kind()) { 187 | continue 188 | } 189 | 190 | if elem.Kind() == reflect.Struct { 191 | if fst := a.loadStructType(elem); fst.CanShadowCopy() { 192 | continue 193 | } 194 | } 195 | case reflect.Struct: 196 | if fst := a.loadStructType(ft); fst.CanShadowCopy() { 197 | continue 198 | } 199 | } 200 | 201 | pointerFields = append(pointerFields, structFieldType{ 202 | Offset: field.Offset, 203 | Index: i, 204 | }) 205 | } 206 | 207 | st = structType{} 208 | 209 | if len(zeroFeilds) != 0 { 210 | st.ZeroFields = append(st.ZeroFields, zeroFeilds...) 211 | } 212 | 213 | if len(pointerFields) != 0 { 214 | st.PointerFields = append(st.PointerFields, pointerFields...) 215 | } 216 | 217 | // Load custom function. 218 | current := a 219 | 220 | for current != nil { 221 | if fn, ok := current.cachedCustomFuncTypes.Load(t); ok { 222 | st.fn = fn.(Func) 223 | break 224 | } 225 | 226 | current = current.parent 227 | } 228 | 229 | a.cachedStructTypes.LoadOrStore(t, st) 230 | return 231 | } 232 | 233 | func (a *Allocator) lookupStructType(t reflect.Type) (st structType, ok bool) { 234 | var v interface{} 235 | current := a 236 | 237 | for current != nil { 238 | v, ok = current.cachedStructTypes.Load(t) 239 | 240 | if ok { 241 | st = v.(structType) 242 | return 243 | } 244 | 245 | current = current.parent 246 | } 247 | 248 | return 249 | } 250 | 251 | func (a *Allocator) isOpaquePointer(t reflect.Type) (ok bool) { 252 | current := a 253 | 254 | for current != nil { 255 | if _, ok = current.cachedPointerTypes.Load(t); ok { 256 | return 257 | } 258 | 259 | current = current.parent 260 | } 261 | 262 | return 263 | } 264 | 265 | // MarkAsScalar marks t as a scalar type so that all clone methods will copy t by value. 266 | // If t is not struct or pointer to struct, MarkAsScalar ignores t. 267 | // 268 | // In the most cases, it's not necessary to call it explicitly. 269 | // If a struct type contains scalar type fields only, the struct will be marked as scalar automatically. 270 | // 271 | // Here is a list of types marked as scalar by default: 272 | // - time.Time 273 | // - reflect.Value 274 | func (a *Allocator) MarkAsScalar(t reflect.Type) { 275 | for t.Kind() == reflect.Ptr { 276 | t = t.Elem() 277 | } 278 | 279 | if t.Kind() != reflect.Struct { 280 | return 281 | } 282 | 283 | a.cachedStructTypes.Store(t, zeroStructType) 284 | } 285 | 286 | // MarkAsOpaquePointer marks t as an opaque pointer so that all clone methods will copy t by value. 287 | // If t is not a pointer, MarkAsOpaquePointer ignores t. 288 | // 289 | // Here is a list of types marked as opaque pointers by default: 290 | // - `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`; 291 | // - `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. 292 | func (a *Allocator) MarkAsOpaquePointer(t reflect.Type) { 293 | if t.Kind() != reflect.Ptr { 294 | return 295 | } 296 | 297 | a.cachedPointerTypes.Store(t, struct{}{}) 298 | } 299 | 300 | // SetCustomFunc sets a custom clone function for type t. 301 | // If t is not struct or pointer to struct, SetCustomFunc ignores t. 302 | // 303 | // If fn is nil, remove the custom clone function for type t. 304 | func (a *Allocator) SetCustomFunc(t reflect.Type, fn Func) { 305 | if fn == nil { 306 | a.cachedCustomFuncTypes.Delete(t) 307 | return 308 | } 309 | 310 | for t.Kind() == reflect.Ptr { 311 | t = t.Elem() 312 | } 313 | 314 | if t.Kind() != reflect.Struct { 315 | return 316 | } 317 | 318 | a.cachedCustomFuncTypes.Store(t, fn) 319 | } 320 | 321 | func heapNew(pool unsafe.Pointer, t reflect.Type) reflect.Value { 322 | return reflect.New(t) 323 | } 324 | 325 | func heapMakeSlice(pool unsafe.Pointer, t reflect.Type, len, cap int) reflect.Value { 326 | return reflect.MakeSlice(t, len, cap) 327 | } 328 | 329 | func heapMakeMap(pool unsafe.Pointer, t reflect.Type, n int) reflect.Value { 330 | return reflect.MakeMapWithSize(t, n) 331 | } 332 | 333 | func heapMakeChan(pool unsafe.Pointer, t reflect.Type, buffer int) reflect.Value { 334 | return reflect.MakeChan(t, buffer) 335 | } 336 | -------------------------------------------------------------------------------- /structtype.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "crypto/elliptic" 8 | "fmt" 9 | "reflect" 10 | "sync" 11 | "sync/atomic" 12 | "time" 13 | "unsafe" 14 | ) 15 | 16 | type structType struct { 17 | ZeroFields []structFieldSize 18 | PointerFields []structFieldType 19 | fn Func 20 | } 21 | 22 | type structFieldSize struct { 23 | Offset uintptr // The offset from the beginning of the struct. 24 | Size uintptr // The size of the field. 25 | } 26 | 27 | type structFieldType struct { 28 | Offset uintptr // The offset from the beginning of the struct. 29 | Index int // The index of the field. 30 | } 31 | 32 | var zeroStructType = structType{} 33 | 34 | func init() { 35 | // Some well-known scalar-like structs. 36 | MarkAsScalar(reflect.TypeOf(time.Time{})) 37 | MarkAsScalar(reflect.TypeOf(reflect.Value{})) 38 | 39 | // Special case for elliptic.Curve which is used by TLS ECC certificate. 40 | // Package crypto/tls uses elliptic.Curve as enum values 41 | // so that they should be treated as opaque pointers. 42 | // 43 | // As elliptic.Curve is an interface, it can be *elliptic.CurveParam or elliptic.p256Curve. 44 | MarkAsOpaquePointer(reflect.TypeOf(&elliptic.CurveParams{})) 45 | curves := []elliptic.Curve{ 46 | elliptic.P224(), 47 | elliptic.P256(), 48 | elliptic.P384(), 49 | elliptic.P521(), 50 | } 51 | 52 | for _, curve := range curves { 53 | MarkAsOpaquePointer(reflect.ValueOf(curve).Type()) 54 | } 55 | 56 | // Special case for reflect.Type (actually *reflect.rtype): 57 | // The *reflect.rtype should not be copied as it is immutable and 58 | // may point to a variable that actual type is not reflect.rtype, 59 | // e.g. *reflect.arrayType or *reflect.chanType. 60 | MarkAsOpaquePointer(reflect.TypeOf(reflect.TypeOf(0))) 61 | 62 | // Some well-known no-copy structs. 63 | // 64 | // Almost all structs defined in package "sync" and "sync/atomic" are set 65 | // except `sync.Once` which can be safely cloned with a correct done value. 66 | SetCustomFunc(reflect.TypeOf(sync.Mutex{}), emptyCloneFunc) 67 | SetCustomFunc(reflect.TypeOf(sync.RWMutex{}), emptyCloneFunc) 68 | SetCustomFunc(reflect.TypeOf(sync.WaitGroup{}), emptyCloneFunc) 69 | SetCustomFunc(reflect.TypeOf(sync.Cond{}), func(allocator *Allocator, old, new reflect.Value) { 70 | // Copy the New func from old value. 71 | oldL := old.FieldByName("L") 72 | newL := allocator.Clone(oldL) 73 | new.FieldByName("L").Set(newL) 74 | }) 75 | SetCustomFunc(reflect.TypeOf(sync.Pool{}), func(allocator *Allocator, old, new reflect.Value) { 76 | // Copy the New func from old value. 77 | oldFn := old.FieldByName("New") 78 | newFn := allocator.Clone(oldFn) 79 | new.FieldByName("New").Set(newFn) 80 | }) 81 | SetCustomFunc(reflect.TypeOf(sync.Map{}), func(allocator *Allocator, old, new reflect.Value) { 82 | if !old.CanAddr() { 83 | return 84 | } 85 | 86 | // Clone all values inside sync.Map. 87 | oldMap := old.Addr().Interface().(*sync.Map) 88 | newMap := new.Addr().Interface().(*sync.Map) 89 | oldMap.Range(func(key, value interface{}) bool { 90 | k := clone(allocator, key) 91 | v := clone(allocator, value) 92 | newMap.Store(k, v) 93 | return true 94 | }) 95 | }) 96 | SetCustomFunc(reflect.TypeOf(atomic.Value{}), func(allocator *Allocator, old, new reflect.Value) { 97 | if !old.CanAddr() { 98 | return 99 | } 100 | 101 | // Clone value inside atomic.Value. 102 | oldValue := old.Addr().Interface().(*atomic.Value) 103 | newValue := new.Addr().Interface().(*atomic.Value) 104 | 105 | if v := oldValue.Load(); v != nil { 106 | cloned := clone(allocator, v) 107 | newValue.Store(cloned) 108 | } 109 | }) 110 | } 111 | 112 | // MarkAsScalar marks t as a scalar type in heap allocator, 113 | // so that all clone methods will copy t by value. 114 | // If t is not struct or pointer to struct, MarkAsScalar ignores t. 115 | // 116 | // In the most cases, it's not necessary to call it explicitly. 117 | // If a struct type contains scalar type fields only, the struct will be marked as scalar automatically. 118 | // 119 | // Here is a list of types marked as scalar by default: 120 | // - time.Time 121 | // - reflect.Value 122 | func MarkAsScalar(t reflect.Type) { 123 | defaultAllocator.MarkAsScalar(t) 124 | } 125 | 126 | // MarkAsOpaquePointer marks t as an opaque pointer in heap allocator, 127 | // so that all clone methods will copy t by value. 128 | // If t is not a pointer, MarkAsOpaquePointer ignores t. 129 | // 130 | // Here is a list of types marked as opaque pointers by default: 131 | // - `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`; 132 | // - `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. 133 | func MarkAsOpaquePointer(t reflect.Type) { 134 | defaultAllocator.MarkAsOpaquePointer(t) 135 | } 136 | 137 | // Func is a custom func to clone value from old to new. 138 | // The new is a zero value 139 | // which `new.CanSet()` and `new.CanAddr()` is guaranteed to be true. 140 | // 141 | // Func must update the new to return result. 142 | type Func func(allocator *Allocator, old, new reflect.Value) 143 | 144 | // emptyCloneFunc is used to disable shadow copy. 145 | // It's useful when cloning sync.Mutex as cloned value must be a zero value. 146 | func emptyCloneFunc(allocator *Allocator, old, new reflect.Value) {} 147 | 148 | // SetCustomFunc sets a custom clone function for type t in heap allocator. 149 | // If t is not struct or pointer to struct, SetCustomFunc ignores t. 150 | // 151 | // If fn is nil, remove the custom clone function for type t. 152 | func SetCustomFunc(t reflect.Type, fn Func) { 153 | defaultAllocator.SetCustomFunc(t, fn) 154 | } 155 | 156 | // Init creates a new value of src.Type() and shadow copies all content from src. 157 | // If noCustomFunc is set to true, custom clone function will be ignored. 158 | // 159 | // Init returns true if the value is cloned by a custom func. 160 | // Caller should skip cloning struct fields in depth. 161 | func (st *structType) Init(allocator *Allocator, src, nv reflect.Value, noCustomFunc bool) (done bool) { 162 | dst := nv.Elem() 163 | 164 | if !noCustomFunc && st.fn != nil { 165 | if !src.CanInterface() { 166 | src = forceClearROFlag(src) 167 | } 168 | 169 | st.fn(allocator, src, dst) 170 | done = true 171 | return 172 | } 173 | 174 | ptr := unsafe.Pointer(nv.Pointer()) 175 | shadowCopy(src, ptr) 176 | done = len(st.PointerFields) == 0 177 | return 178 | } 179 | 180 | func (st *structType) CanShadowCopy() bool { 181 | return len(st.PointerFields) == 0 && st.fn == nil 182 | } 183 | 184 | // IsScalar returns true if k should be considered as a scalar type. 185 | // 186 | // For the sake of performance, string is considered as a scalar type unless arena is enabled. 187 | // If we need to deep copy string value in some cases, we can create a new allocator with custom isScalar function 188 | // in which we can return false when k is reflect.String. 189 | // 190 | // // Create a new allocator which treats string as non-scalar type. 191 | // allocator := NewAllocator(nil, &AllocatorMethods{ 192 | // IsScalar: func(k reflect.Kind) bool { 193 | // return k != reflect.String && IsScalar(k) 194 | // }, 195 | // }) 196 | func IsScalar(k reflect.Kind) bool { 197 | switch k { 198 | case reflect.Bool, 199 | reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, 200 | reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, 201 | reflect.Float32, reflect.Float64, 202 | reflect.Complex64, reflect.Complex128, 203 | reflect.Func, 204 | reflect.UnsafePointer, 205 | reflect.Invalid: 206 | return true 207 | 208 | case reflect.String: 209 | // If arena is not enabled, string can be copied as scalar safely 210 | // as it's immutable by design. 211 | return !arenaIsEnabled 212 | } 213 | 214 | return false 215 | } 216 | 217 | func copyScalarValue(src reflect.Value) reflect.Value { 218 | if src.CanInterface() { 219 | return src 220 | } 221 | 222 | dst := newScalarValue(src) 223 | return dst.Convert(src.Type()) 224 | } 225 | 226 | func newScalarValue(src reflect.Value) reflect.Value { 227 | // src is an unexported field value. Copy its value. 228 | switch src.Kind() { 229 | case reflect.Bool: 230 | return reflect.ValueOf(src.Bool()) 231 | 232 | case reflect.Int: 233 | return reflect.ValueOf(int(src.Int())) 234 | case reflect.Int8: 235 | return reflect.ValueOf(int8(src.Int())) 236 | case reflect.Int16: 237 | return reflect.ValueOf(int16(src.Int())) 238 | case reflect.Int32: 239 | return reflect.ValueOf(int32(src.Int())) 240 | case reflect.Int64: 241 | return reflect.ValueOf(src.Int()) 242 | 243 | case reflect.Uint: 244 | return reflect.ValueOf(uint(src.Uint())) 245 | case reflect.Uint8: 246 | return reflect.ValueOf(uint8(src.Uint())) 247 | case reflect.Uint16: 248 | return reflect.ValueOf(uint16(src.Uint())) 249 | case reflect.Uint32: 250 | return reflect.ValueOf(uint32(src.Uint())) 251 | case reflect.Uint64: 252 | return reflect.ValueOf(src.Uint()) 253 | case reflect.Uintptr: 254 | return reflect.ValueOf(uintptr(src.Uint())) 255 | 256 | case reflect.Float32: 257 | return reflect.ValueOf(float32(src.Float())) 258 | case reflect.Float64: 259 | return reflect.ValueOf(src.Float()) 260 | 261 | case reflect.Complex64: 262 | return reflect.ValueOf(complex64(src.Complex())) 263 | case reflect.Complex128: 264 | return reflect.ValueOf(src.Complex()) 265 | 266 | case reflect.String: 267 | return reflect.ValueOf(src.String()) 268 | case reflect.Func: 269 | t := src.Type() 270 | 271 | if src.IsNil() { 272 | return reflect.Zero(t) 273 | } 274 | 275 | // Don't use this trick unless we have no choice. 276 | return forceClearROFlag(src) 277 | case reflect.UnsafePointer: 278 | return reflect.ValueOf(unsafe.Pointer(src.Pointer())) 279 | } 280 | 281 | panic(fmt.Errorf("go-clone: impossible type `%v` when cloning private field", src.Type())) 282 | } 283 | 284 | var typeOfInterface = reflect.TypeOf((*interface{})(nil)).Elem() 285 | 286 | // forceClearROFlag clears all RO flags in v to make v accessible. 287 | // It's a hack based on the fact that InterfaceData is always available on RO data. 288 | // This hack can be broken in any Go version. 289 | // Don't use it unless we have no choice, e.g. copying func in some edge cases. 290 | func forceClearROFlag(v reflect.Value) reflect.Value { 291 | var i interface{} 292 | indirect := 0 293 | 294 | // Save flagAddr. 295 | for v.CanAddr() { 296 | v = v.Addr() 297 | indirect++ 298 | } 299 | 300 | v = v.Convert(typeOfInterface) 301 | nv := reflect.ValueOf(&i) 302 | *(*interfaceData)(unsafe.Pointer(nv.Pointer())) = parseReflectValue(v) 303 | cleared := nv.Elem().Elem() 304 | 305 | for indirect > 0 { 306 | cleared = cleared.Elem() 307 | indirect-- 308 | } 309 | 310 | return cleared 311 | } 312 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-clone: Clone any Go data structure deeply and thoroughly 2 | 3 | [![Go](https://github.com/huandu/go-clone/workflows/Go/badge.svg)](https://github.com/huandu/go-clone/actions) 4 | [![Go Doc](https://godoc.org/github.com/huandu/go-clone?status.svg)](https://pkg.go.dev/github.com/huandu/go-clone) 5 | [![Go Report](https://goreportcard.com/badge/github.com/huandu/go-clone)](https://goreportcard.com/report/github.com/huandu/go-clone) 6 | [![Coverage Status](https://coveralls.io/repos/github/huandu/go-clone/badge.svg?branch=master)](https://coveralls.io/github/huandu/go-clone?branch=master) 7 | 8 | Package `clone` provides functions to deep clone any Go data. It also provides a wrapper to protect a pointer from any unexpected mutation. 9 | 10 | For users who use Go 1.18+, it's recommended to import `github.com/huandu/go-clone/generic` for generic APIs and arena support. 11 | 12 | `Clone`/`Slowly` can clone unexported fields and "no-copy" structs as well. Use this feature wisely. 13 | 14 | ## Install 15 | 16 | Use `go get` to install this package. 17 | 18 | ```shell 19 | go get github.com/huandu/go-clone 20 | ``` 21 | 22 | ## Usage 23 | 24 | ### `Clone` and `Slowly` 25 | 26 | If we want to clone any Go value, use `Clone`. 27 | 28 | ```go 29 | t := &T{...} 30 | v := clone.Clone(t).(*T) 31 | reflect.DeepEqual(t, v) // true 32 | ``` 33 | 34 | For the sake of performance, `Clone` doesn't deal with values containing pointer cycles. 35 | If we need to clone such values, use `Slowly` instead. 36 | 37 | ```go 38 | type ListNode struct { 39 | Data int 40 | Next *ListNode 41 | } 42 | node1 := &ListNode{ 43 | Data: 1, 44 | } 45 | node2 := &ListNode{ 46 | Data: 2, 47 | } 48 | node3 := &ListNode{ 49 | Data: 3, 50 | } 51 | node1.Next = node2 52 | node2.Next = node3 53 | node3.Next = node1 54 | 55 | // We must use `Slowly` to clone a circular linked list. 56 | node := Slowly(node1).(*ListNode) 57 | 58 | for i := 0; i < 10; i++ { 59 | fmt.Println(node.Data) 60 | node = node.Next 61 | } 62 | ``` 63 | 64 | ### Generic APIs 65 | 66 | Starting from go1.18, Go started to support generic. With generic syntax, `Clone`/`Slowly` and other APIs can be called much cleaner like following. 67 | 68 | ```go 69 | import "github.com/huandu/go-clone/generic" 70 | 71 | type MyType struct { 72 | Foo string 73 | } 74 | 75 | original := &MyType{ 76 | Foo: "bar", 77 | } 78 | 79 | // The type of cloned is *MyType instead of interface{}. 80 | cloned := Clone(original) 81 | println(cloned.Foo) // Output: bar 82 | ``` 83 | 84 | It's required to update minimal Go version to 1.18 to opt-in generic syntax. It may not be a wise choice to update this package's `go.mod` and drop so many old Go compilers for such syntax candy. Therefore, I decide to create a new standalone package `github.com/huandu/go-clone/generic` to provide APIs with generic syntax. 85 | 86 | For new users who use Go 1.18+, the generic package is preferred and recommended. 87 | 88 | ### Arena support 89 | 90 | Starting from Go1.20, arena is introduced as a new way to allocate memory. It's quite useful to improve overall performance in special scenarios. 91 | In order to clone a value with memory allocated from an arena, there are new methods `ArenaClone` and `ArenaCloneSlowly` available in `github.com/huandu/go-clone/generic`. 92 | 93 | ```go 94 | // ArenaClone recursively deep clones v to a new value in arena a. 95 | // It works in the same way as Clone, except it allocates all memory from arena. 96 | func ArenaClone[T any](a *arena.Arena, v T) (nv T) 97 | 98 | // ArenaCloneSlowly recursively deep clones v to a new value in arena a. 99 | // It works in the same way as Slowly, except it allocates all memory from arena. 100 | func ArenaCloneSlowly[T any](a *arena.Arena, v T) (nv T) 101 | ``` 102 | 103 | Due to limitations in arena API, memory of the internal data structure of `map` and `chan` is always allocated in heap by Go runtime ([see this issue](https://github.com/golang/go/issues/56230)). 104 | 105 | **Warning**: Per [discussion in the arena proposal](https://github.com/golang/go/issues/51317), the arena package may be changed incompatibly or removed in future. All arena related APIs in this package will be changed accordingly. 106 | 107 | ### Struct tags 108 | 109 | There are some struct tags to control how to clone a struct field. 110 | 111 | ```go 112 | type T struct { 113 | Normal *int 114 | Foo *int `clone:"skip"` // Skip cloning this field so that Foo will be zero in cloned value. 115 | Bar *int `clone:"-"` // "-" is an alias of skip. 116 | Baz *int `clone:"shadowcopy"` // Copy this field by shadow copy. 117 | } 118 | 119 | a := 1 120 | t := &T{ 121 | Normal: &a, 122 | Foo: &a, 123 | Bar: &a, 124 | Baz: &a, 125 | } 126 | v := clone.Clone(t).(*T) 127 | 128 | fmt.Println(v.Normal == t.Normal) // false 129 | fmt.Println(v.Foo == nil) // true 130 | fmt.Println(v.Bar == nil) // true 131 | fmt.Println(v.Baz == t.Baz) // true 132 | ``` 133 | 134 | ### Memory allocations and the `Allocator` 135 | 136 | The `Allocator` is designed to allocate memory when cloning. It's also used to hold all customizations, e.g. custom clone functions, scalar types and opaque pointers, etc. There is a default allocator which allocates memory from heap. Almost all public APIs in this package use this default allocator to do their job. 137 | 138 | We can control how to allocate memory by creating a new `Allocator` by `NewAllocator`. It enables us to take full control over memory allocation when cloning. See [Allocator sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-Allocator) to understand how to customize an allocator. 139 | 140 | Let's take a closer look at the `NewAllocator` function. 141 | 142 | ```go 143 | func NewAllocator(pool unsafe.Pointer, methods *AllocatorMethods) *Allocator 144 | ``` 145 | 146 | - The first parameter `pool` is a pointer to a memory pool. It's used to allocate memory for cloning. It can be `nil` if we don't need a memory pool. 147 | - The second parameter `methods` is a pointer to a struct which contains all methods to allocate memory. It can be `nil` if we don't need to customize memory allocation. 148 | - The `Allocator` struct is allocated from the `methods.New` or the `methods.Parent` allocator or from heap. 149 | 150 | The `Parent` in `AllocatorMethods` is used to indicate the parent of the new allocator. With this feature, we can orgnize allocators into a tree structure. All customizations, including custom clone functions, scalar types and opaque pointers, etc, are inherited from parent allocators. 151 | 152 | There are some APIs designed for convenience. 153 | 154 | - We can create dedicated allocators for heap or arena by calling `FromHeap()` or `FromArena(a *arena.Arena)`. 155 | - We can call `MakeCloner(allocator)` to create a helper struct with `Clone` and `CloneSlowly` methods in which the type of in and out parameters is `interface{}`. 156 | 157 | ### Mark struct type as scalar 158 | 159 | Some struct types can be considered as scalar. 160 | 161 | A well-known case is `time.Time`. 162 | Although there is a pointer `loc *time.Location` inside `time.Time`, we always use `time.Time` by value in all methods. 163 | When cloning `time.Time`, it should be OK to return a shadow copy. 164 | 165 | Currently, following types are marked as scalar by default. 166 | 167 | - `time.Time` 168 | - `reflect.Value` 169 | 170 | If there is any type defined in built-in package should be considered as scalar, please open new issue to let me know. 171 | I will update the default. 172 | 173 | If there is any custom type should be considered as scalar, call `MarkAsScalar` to mark it manually. See [MarkAsScalar sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-MarkAsScalar) for more details. 174 | 175 | ### Mark pointer type as opaque 176 | 177 | Some pointer values are used as enumerable const values. 178 | 179 | A well-known case is `elliptic.Curve`. In package `crypto/tls`, curve type of a certificate is checked by comparing values to pre-defined curve values, e.g. `elliptic.P521()`. In this case, the curve values, which are pointers or structs, cannot be cloned deeply. 180 | 181 | Currently, following types are marked as scalar by default. 182 | 183 | - `elliptic.Curve`, which is `*elliptic.CurveParam` or `elliptic.p256Curve`. 184 | - `reflect.Type`, which is `*reflect.rtype` defined in `runtime`. 185 | 186 | If there is any pointer type defined in built-in package should be considered as opaque, please open new issue to let me know. 187 | I will update the default. 188 | 189 | If there is any custom pointer type should be considered as opaque, call `MarkAsOpaquePointer` to mark it manually. See [MarkAsOpaquePointer sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-MarkAsOpaquePointer) for more details. 190 | 191 | ### Clone "no-copy" types defined in `sync` and `sync/atomic` 192 | 193 | There are some "no-copy" types like `sync.Mutex`, `atomic.Value`, etc. 194 | They cannot be cloned by copying all fields one by one, but we can alloc a new zero value and call methods to do proper initialization. 195 | 196 | Currently, all "no-copy" types defined in `sync` and `sync/atomic` can be cloned properly using following strategies. 197 | 198 | - `sync.Mutex`: Cloned value is a newly allocated zero mutex. 199 | - `sync.RWMutex`: Cloned value is a newly allocated zero mutex. 200 | - `sync.WaitGroup`: Cloned value is a newly allocated zero wait group. 201 | - `sync.Cond`: Cloned value is a cond with a newly allocated zero lock. 202 | - `sync.Pool`: Cloned value is an empty pool with the same `New` function. 203 | - `sync.Map`: Cloned value is a sync map with cloned key/value pairs. 204 | - `sync.Once`: Cloned value is a once type with the same done flag. 205 | - `atomic.Value`/`atomic.Bool`/`atomic.Int32`/`atomic.Int64`/`atomic.Uint32`/`atomic.Uint64`/`atomic.Uintptr`: Cloned value is a new atomic value with the same value. 206 | 207 | If there is any type defined in built-in package should be considered as "no-copy" types, please open new issue to let me know. 208 | I will update the default. 209 | 210 | ### Set custom clone functions 211 | 212 | If default clone strategy doesn't work for a struct type, we can call `SetCustomFunc` to register a custom clone function. 213 | 214 | ```go 215 | SetCustomFunc(reflect.TypeOf(MyType{}), func(allocator *Allocator, old, new reflect.Value) { 216 | // Customized logic to copy the old to the new. 217 | // The old's type is MyType. 218 | // The new is a zero value of MyType and new.CanAddr() always returns true. 219 | }) 220 | ``` 221 | 222 | We can use `allocator` to clone any value or allocate new memory. 223 | It's allowed to call `allocator.Clone` or `allocator.CloneSlowly` on `old` to clone its struct fields in depth without worrying about dead loop. 224 | 225 | See [SetCustomFunc sample code](https://pkg.go.dev/github.com/huandu/go-clone#example-SetCustomFunc) for more details. 226 | 227 | ### Clone `atomic.Pointer[T]` 228 | 229 | As there is no way to predefine a custom clone function for generic type `atomic.Pointer[T]`, cloning such atomic type is not supported by default. If we want to support it, we need to register a custom clone function manually. 230 | 231 | Suppose we instantiate `atomic.Pointer[T]` with type `MyType1` and `MyType2` in a project, and then we can register custom clone functions like following. 232 | 233 | ```go 234 | import "github.com/huandu/go-clone/generic" 235 | 236 | func init() { 237 | // Register all instantiated atomic.Pointer[T] types in this project. 238 | clone.RegisterAtomicPointer[MyType1]() 239 | clone.RegisterAtomicPointer[MyType2]() 240 | } 241 | ``` 242 | 243 | ### `Wrap`, `Unwrap` and `Undo` 244 | 245 | Package `clone` provides `Wrap`/`Unwrap` functions to protect a pointer value from any unexpected mutation. 246 | It's useful when we want to protect a variable which should be immutable by design, 247 | e.g. global config, the value stored in context, the value sent to a chan, etc. 248 | 249 | ```go 250 | // Suppose we have a type T defined as following. 251 | // type T struct { 252 | // Foo int 253 | // } 254 | v := &T{ 255 | Foo: 123, 256 | } 257 | w := Wrap(v).(*T) // Wrap value to protect it. 258 | 259 | // Use w freely. The type of w is the same as that of v. 260 | 261 | // It's OK to modify w. The change will not affect v. 262 | w.Foo = 456 263 | fmt.Println(w.Foo) // 456 264 | fmt.Println(v.Foo) // 123 265 | 266 | // Once we need the original value stored in w, call `Unwrap`. 267 | orig := Unwrap(w).(*T) 268 | fmt.Println(orig == v) // true 269 | fmt.Println(orig.Foo) // 123 270 | 271 | // Or, we can simply undo any change made in w. 272 | // Note that `Undo` is significantly slower than `Unwrap`, thus 273 | // the latter is always preferred. 274 | Undo(w) 275 | fmt.Println(w.Foo) // 123 276 | ``` 277 | 278 | ## Performance 279 | 280 | Here is the performance data running on my dev machine. 281 | 282 | ```text 283 | go 1.20.1 284 | goos: darwin 285 | goarch: amd64 286 | cpu: Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz 287 | BenchmarkSimpleClone-12 7164530 156.7 ns/op 24 B/op 1 allocs/op 288 | BenchmarkComplexClone-12 628056 1871 ns/op 1488 B/op 21 allocs/op 289 | BenchmarkUnwrap-12 15498139 78.02 ns/op 0 B/op 0 allocs/op 290 | BenchmarkSimpleWrap-12 3882360 309.7 ns/op 72 B/op 2 allocs/op 291 | BenchmarkComplexWrap-12 949654 1245 ns/op 736 B/op 15 allocs/op 292 | ``` 293 | 294 | ## License 295 | 296 | This package is licensed under MIT license. See LICENSE for details. 297 | -------------------------------------------------------------------------------- /clone_common_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | package clone 5 | 6 | import ( 7 | "bytes" 8 | "container/list" 9 | "io" 10 | "reflect" 11 | "testing" 12 | "unsafe" 13 | 14 | "github.com/huandu/go-assert" 15 | ) 16 | 17 | var testFuncMap = map[string]func(t *testing.T, allocator *Allocator){ 18 | "Basic Clone": testClone, 19 | "Slowly linked list": testSlowlyLinkedList, 20 | "Slowly cycle linked list": testSlowlyCycleLinkedList, 21 | "Slowly fix invalid cycle pointers": testSlowlyFixInvalidCyclePointers, 22 | "Slowly fix invalid linked pointers": testSlowlyFixInvalidLinkedPointers, 23 | "Clone array": testCloneArray, 24 | "Clone map": testCloneMap, 25 | "Clone bytes buffer": testCloneBytesBuffer, 26 | "Clone unexported fields": testCloneUnexportedFields, 27 | "Clone unexported struct method": testCloneUnexportedStructMethod, 28 | "Clone reflect type": testCloneReflectType, 29 | "Clone with skip fields": testCloneSkipFields, 30 | } 31 | 32 | type T struct { 33 | Foo int 34 | Bar map[string]interface{} 35 | } 36 | 37 | func testClone(t *testing.T, allocator *Allocator) { 38 | arr := [4]string{"abc", "def", "ghi"} 39 | ch := make(chan int, 2) 40 | fn := func(int) {} 41 | var it io.Writer = &bytes.Buffer{} 42 | m := map[interface{}]string{ 43 | "abc": "efg", 44 | 123: "ghi", 45 | } 46 | slice := []string{"xyz", "opq"} 47 | st := T{ 48 | Foo: 1234, 49 | Bar: map[string]interface{}{ 50 | "abc": 123, 51 | "def": "ghi", 52 | }, 53 | } 54 | ptr := &st 55 | complex := []map[string][]*T{ 56 | { 57 | "abc": { 58 | {Foo: 456, Bar: map[string]interface{}{"abc": "def"}}, 59 | }, 60 | }, 61 | { 62 | "def": { 63 | {Foo: 987, Bar: map[string]interface{}{"abc": "def"}}, 64 | {Foo: 321, Bar: map[string]interface{}{"ghi": "xyz"}}, 65 | }, 66 | "ghi": { 67 | {Foo: 654, Bar: map[string]interface{}{"def": "abc"}}, 68 | }, 69 | }, 70 | } 71 | nested := func() interface{} { 72 | var nested []map[string][]*T 73 | var nestedPtr *T 74 | var nestedIf interface{} 75 | var nestedMap map[string]interface{} 76 | nested = []map[string][]*T{ 77 | { 78 | "abc": { 79 | {Foo: 987, Bar: map[string]interface{}{"def": nil, "nil": nil}}, 80 | {Foo: 321, Bar: map[string]interface{}{"ghi": nil, "def": nil, "cba": nil}}, 81 | {Foo: 456}, 82 | nil, 83 | }, 84 | }, 85 | } 86 | nestedPtr = &T{ 87 | Foo: 654, 88 | Bar: map[string]interface{}{ 89 | "xyz": nested, 90 | "opq": nil, 91 | }, 92 | } 93 | nestedIf = map[string]interface{}{ 94 | "rst": nested, 95 | } 96 | nestedMap = map[string]interface{}{} 97 | 98 | // Don't test it due to bug in Go. 99 | // https://github.com/golang/go/issues/33907 100 | //nestedMap["opq"] = nestedMap 101 | 102 | nested[0]["abc"][0].Bar["def"] = nested 103 | nested[0]["abc"][1].Bar["ghi"] = nestedPtr 104 | nested[0]["abc"][1].Bar["def"] = nestedIf 105 | nested[0]["abc"][1].Bar["cba"] = nested 106 | nested[0]["abc"][2].Bar = nestedMap 107 | nested[0]["abc"][3] = nestedPtr 108 | nestedPtr.Bar["opq"] = nestedPtr 109 | return nested 110 | }() 111 | var nilSlice []int 112 | var nilChan chan bool 113 | var nilPtr *float64 114 | cases := []interface{}{ 115 | 123, "abc", nil, true, testing.TB(nil), 116 | arr, ch, fn, it, m, ptr, slice, st, nested, 117 | complex, nilSlice, nilChan, nilPtr, 118 | } 119 | 120 | for _, c := range cases { 121 | var v1, v2 interface{} 122 | 123 | if reflect.DeepEqual(c, nested) { 124 | // Clone doesn't work on nested data. 125 | v1 = c 126 | } else { 127 | v1 = clone(allocator, c) 128 | } 129 | 130 | v2 = cloneSlowly(allocator, c) 131 | deepEqual(t, c, v1) 132 | deepEqual(t, c, v2) 133 | } 134 | } 135 | 136 | func deepEqual(t *testing.T, expected, actual interface{}) { 137 | a := assert.New(t) 138 | a.Use(&expected, &actual) 139 | 140 | val := reflect.ValueOf(actual) 141 | 142 | // It's not possible to compare chan value. 143 | if val.Kind() == reflect.Chan { 144 | cval := reflect.ValueOf(expected) 145 | a.Equal(cval.Type(), val.Type()) 146 | a.Equal(cval.Cap(), val.Cap()) 147 | return 148 | } 149 | 150 | if val.Kind() == reflect.Func { 151 | // It's not possible to compare func value either. 152 | cval := reflect.ValueOf(expected) 153 | a.Assert(cval.Type() == val.Type()) 154 | return 155 | } 156 | 157 | a.Equal(actual, expected) 158 | } 159 | 160 | func testSlowlyLinkedList(t *testing.T, allocator *Allocator) { 161 | a := assert.New(t) 162 | l := list.New() 163 | l.PushBack("v1") 164 | l.PushBack("v2") 165 | cloned := cloneSlowly(allocator, l).(*list.List) 166 | 167 | a.Equal(l.Len(), cloned.Len()) 168 | a.Equal(l.Front().Value, cloned.Front().Value) 169 | a.Equal(l.Back().Value, cloned.Back().Value) 170 | 171 | // There must be only two elements in cloned. 172 | a.Equal(cloned.Back(), cloned.Front().Next()) 173 | a.Equal(cloned.Back().Next(), nil) 174 | } 175 | 176 | type cycleLinkedList struct { 177 | elems []*list.Element 178 | elem *list.Element 179 | 180 | list *list.List 181 | } 182 | 183 | func testSlowlyCycleLinkedList(t *testing.T, allocator *Allocator) { 184 | a := assert.New(t) 185 | l := list.New() 186 | elem := l.PushBack("123") 187 | cycle := &cycleLinkedList{ 188 | elems: []*list.Element{elem}, 189 | elem: elem, 190 | list: l, 191 | } 192 | cloned := cloneSlowly(allocator, cycle).(*cycleLinkedList) 193 | 194 | a.Equal(l.Len(), cloned.list.Len()) 195 | a.Equal(elem.Value, cloned.list.Front().Value) 196 | 197 | // There must be only one element in cloned. 198 | a.Equal(cloned.list.Front(), cloned.list.Back()) 199 | a.Equal(cloned.list.Front().Next(), nil) 200 | a.Equal(cloned.list.Back().Next(), nil) 201 | } 202 | 203 | type cycleList struct { 204 | root cycleElement 205 | elem *cycleElement 206 | } 207 | 208 | type cycleElement struct { 209 | next *cycleElement 210 | list *cycleList 211 | } 212 | 213 | type cycleComplex struct { 214 | ch chan bool 215 | scalar int 216 | scalarArray *[1]int 217 | scalarSlice []string 218 | scalarStruct *reflect.Value 219 | 220 | _ []*cycleElement 221 | _ map[*cycleElement]*cycleElement 222 | _ interface{} 223 | 224 | array [2]*cycleElement 225 | slice []*cycleElement 226 | iface1, iface2 interface{} 227 | ptr1, ptr2 *cycleElement 228 | 229 | scalarMap map[string]int 230 | plainMap map[int]*cycleElement 231 | simpleMap map[*cycleList]*cycleElement 232 | complexMap map[*cycleElement]*cycleElement 233 | 234 | pair cycleElementPair 235 | pairValue interface{} 236 | 237 | refSlice *[]*cycleElement 238 | refComplexMap *map[*cycleElement]*cycleElement 239 | } 240 | 241 | type cycleElementPair struct { 242 | elem1, elem2 *cycleElement 243 | } 244 | 245 | func makeCycleElement() *cycleElement { 246 | list := &cycleList{} 247 | elem := &cycleElement{ 248 | next: &list.root, 249 | list: list, 250 | } 251 | list.root.next = elem 252 | list.root.list = list 253 | list.elem = elem 254 | return &list.root 255 | } 256 | 257 | func (elem *cycleElement) validateCycle(t *testing.T) { 258 | a := assert.New(t) 259 | 260 | // elem is the &list.root. 261 | a.Assert(elem == &elem.list.root) 262 | a.Assert(elem.next == elem.list.elem) 263 | a.Assert(elem.next.next == elem) 264 | } 265 | 266 | func testSlowlyFixInvalidCyclePointers(t *testing.T, allocator *Allocator) { 267 | var scalarArray [1]int 268 | scalarStruct := reflect.ValueOf(1) 269 | value := &cycleComplex{ 270 | ch: make(chan bool), 271 | scalar: 123, 272 | scalarArray: &scalarArray, 273 | scalarSlice: []string{"hello"}, 274 | scalarStruct: &scalarStruct, 275 | 276 | array: [2]*cycleElement{makeCycleElement(), makeCycleElement()}, 277 | slice: []*cycleElement{makeCycleElement(), makeCycleElement()}, 278 | iface1: makeCycleElement(), 279 | iface2: makeCycleElement(), 280 | ptr1: makeCycleElement(), 281 | ptr2: makeCycleElement(), 282 | 283 | scalarMap: map[string]int{ 284 | "foo": 123, 285 | }, 286 | plainMap: map[int]*cycleElement{ 287 | 123: makeCycleElement(), 288 | }, 289 | simpleMap: map[*cycleList]*cycleElement{ 290 | makeCycleElement().list: makeCycleElement(), 291 | }, 292 | complexMap: map[*cycleElement]*cycleElement{ 293 | makeCycleElement(): makeCycleElement(), 294 | }, 295 | } 296 | value.refSlice = &value.slice 297 | value.refComplexMap = &value.complexMap 298 | cloned := cloneSlowly(allocator, value).(*cycleComplex) 299 | 300 | cloned.array[0].validateCycle(t) 301 | cloned.array[1].validateCycle(t) 302 | cloned.slice[0].validateCycle(t) 303 | cloned.slice[1].validateCycle(t) 304 | 305 | cloned.iface1.(*cycleElement).validateCycle(t) 306 | cloned.iface2.(*cycleElement).validateCycle(t) 307 | cloned.ptr1.validateCycle(t) 308 | cloned.ptr2.validateCycle(t) 309 | cloned.plainMap[123].validateCycle(t) 310 | 311 | for k, v := range cloned.simpleMap { 312 | k.root.validateCycle(t) 313 | k.elem.next.validateCycle(t) 314 | v.validateCycle(t) 315 | } 316 | 317 | for k, v := range cloned.complexMap { 318 | k.validateCycle(t) 319 | v.validateCycle(t) 320 | } 321 | 322 | a := assert.New(t) 323 | a.Assert(cloned.refSlice == &cloned.slice) 324 | a.Assert(cloned.refComplexMap == &cloned.complexMap) 325 | } 326 | 327 | func makeLinkedElements() (elem1, elem2 *cycleElement) { 328 | list := &cycleList{} 329 | elem1 = &list.root 330 | elem2 = &cycleElement{ 331 | next: &list.root, 332 | list: list, 333 | } 334 | list.root.next = &cycleElement{} 335 | list.elem = elem2 336 | 337 | return 338 | } 339 | 340 | func (elem *cycleElement) validateLinked(t *testing.T) { 341 | a := assert.New(t) 342 | 343 | // elem is the elem2. 344 | a.Assert(elem == elem.list.elem) 345 | a.Assert(elem.next == &elem.list.root) 346 | a.Assert(elem.next.next.next == nil) 347 | } 348 | 349 | func testSlowlyFixInvalidLinkedPointers(t *testing.T, allocator *Allocator) { 350 | value := &cycleComplex{ 351 | array: func() (elems [2]*cycleElement) { 352 | elems[0], elems[1] = makeLinkedElements() 353 | return 354 | }(), 355 | slice: func() []*cycleElement { 356 | elem1, elem2 := makeLinkedElements() 357 | return []*cycleElement{elem1, elem2} 358 | }(), 359 | 360 | scalarMap: map[string]int{ 361 | "foo": 123, 362 | }, 363 | plainMap: func() map[int]*cycleElement { 364 | elem1, elem2 := makeLinkedElements() 365 | return map[int]*cycleElement{ 366 | 1: elem1, 367 | 2: elem2, 368 | } 369 | }(), 370 | simpleMap: func() map[*cycleList]*cycleElement { 371 | elem1, elem2 := makeLinkedElements() 372 | return map[*cycleList]*cycleElement{ 373 | elem2.list: elem1, 374 | } 375 | }(), 376 | complexMap: func() map[*cycleElement]*cycleElement { 377 | elem1, elem2 := makeLinkedElements() 378 | return map[*cycleElement]*cycleElement{ 379 | elem1: elem2, 380 | } 381 | }(), 382 | } 383 | value.refSlice = &value.slice 384 | value.refComplexMap = &value.complexMap 385 | value.iface1, value.iface2 = makeLinkedElements() 386 | value.ptr1, value.ptr2 = makeLinkedElements() 387 | value.pair.elem1, value.pair.elem2 = makeLinkedElements() 388 | var pair cycleElementPair 389 | pair.elem1, pair.elem2 = makeLinkedElements() 390 | value.pairValue = pair 391 | cloned := cloneSlowly(allocator, value).(*cycleComplex) 392 | 393 | cloned.array[1].validateLinked(t) 394 | cloned.slice[1].validateLinked(t) 395 | 396 | cloned.iface2.(*cycleElement).validateLinked(t) 397 | cloned.ptr2.validateLinked(t) 398 | cloned.plainMap[2].validateLinked(t) 399 | 400 | for k := range cloned.simpleMap { 401 | k.elem.validateLinked(t) 402 | } 403 | 404 | for _, v := range cloned.complexMap { 405 | v.validateLinked(t) 406 | } 407 | 408 | value.pair.elem2.validateLinked(t) 409 | value.pairValue.(cycleElementPair).elem2.validateLinked(t) 410 | 411 | a := assert.New(t) 412 | a.Assert(cloned.refSlice == &cloned.slice) 413 | a.Assert(cloned.refComplexMap == &cloned.complexMap) 414 | } 415 | 416 | func testCloneArray(t *testing.T, allocator *Allocator) { 417 | a := assert.New(t) 418 | arr := [2]*T{ 419 | { 420 | Foo: 123, 421 | Bar: map[string]interface{}{ 422 | "abc": 123, 423 | }, 424 | }, 425 | { 426 | Foo: 456, 427 | Bar: map[string]interface{}{ 428 | "def": 456, 429 | "ghi": 789, 430 | }, 431 | }, 432 | } 433 | cloned := clone(allocator, arr).([2]*T) 434 | a.Use(&arr, &cloned) 435 | 436 | a.Equal(arr, cloned) 437 | 438 | // arr is not changed if cloned is mutated. 439 | cloned[0].Foo = 987 440 | cloned[1].Bar["ghi"] = 321 441 | a.Equal(arr[0].Foo, 123) 442 | a.Equal(arr[1].Bar["ghi"], 789) 443 | } 444 | 445 | func testCloneMap(t *testing.T, allocator *Allocator) { 446 | a := assert.New(t) 447 | m := map[string]*T{ 448 | "abc": { 449 | Foo: 123, 450 | Bar: map[string]interface{}{ 451 | "abc": 321, 452 | }, 453 | }, 454 | "def": { 455 | Foo: 456, 456 | Bar: map[string]interface{}{ 457 | "def": 789, 458 | }, 459 | }, 460 | } 461 | cloned := clone(allocator, m).(map[string]*T) 462 | a.Use(&m, &cloned) 463 | 464 | a.Equal(m, cloned) 465 | 466 | // m is not changed if cloned is mutated. 467 | cloned["abc"].Foo = 321 468 | cloned["def"].Bar["def"] = 987 469 | a.Equal(m["abc"].Foo, 123) 470 | a.Equal(m["def"].Bar["def"], 789) 471 | } 472 | 473 | func testCloneBytesBuffer(t *testing.T, allocator *Allocator) { 474 | a := assert.New(t) 475 | buf := &bytes.Buffer{} 476 | buf.WriteString("Hello, world!") 477 | dummy := make([]byte, len("Hello, ")) 478 | buf.Read(dummy) 479 | cloned := clone(allocator, buf).(*bytes.Buffer) 480 | a.Use(&buf, &cloned) 481 | 482 | // Data must be cloned. 483 | a.Equal(buf.Len(), cloned.Len()) 484 | a.Equal(buf.String(), cloned.String()) 485 | 486 | // Data must not share the same address. 487 | from := buf.Bytes() 488 | to := cloned.Bytes() 489 | a.Assert(&from[0] != &to[0]) 490 | 491 | buf.WriteString("!!!!!") 492 | a.NotEqual(buf.Len(), cloned.Len()) 493 | a.NotEqual(buf.String(), cloned.String()) 494 | } 495 | 496 | type Simple struct { 497 | Foo int 498 | Bar string 499 | } 500 | 501 | type Unexported struct { 502 | insider 503 | } 504 | 505 | type myString string 506 | 507 | type insider struct { 508 | i int 509 | i8 int8 510 | i16 int16 511 | i32 int32 512 | i64 int64 513 | u uint 514 | u8 uint8 515 | u16 uint16 516 | u32 uint32 517 | u64 uint64 518 | uptr uintptr 519 | b bool 520 | s string 521 | f32 float32 522 | f64 float64 523 | c64 complex64 524 | c128 complex128 525 | sptr *myString 526 | arr [4]string 527 | arrPtr *[10]byte 528 | ch chan bool 529 | fn func(s string) string 530 | method func([]byte) (int, error) 531 | iface io.Writer 532 | ifaceScalar io.Writer 533 | _ interface{} 534 | m map[string]interface{} 535 | ptr *Unexported 536 | _ *Unexported 537 | slice []*Unexported 538 | st Simple 539 | unsafePointer unsafe.Pointer 540 | t reflect.Type 541 | 542 | Simple 543 | } 544 | 545 | type scalarWriter int8 546 | 547 | func (scalarWriter) Write(p []byte) (n int, err error) { return } 548 | 549 | func testCloneUnexportedFields(t *testing.T, allocator *Allocator) { 550 | a := assert.New(t) 551 | var myStr myString = "myString" 552 | unexported := &Unexported{ 553 | insider: insider{ 554 | i: -1, 555 | i8: -8, 556 | i16: -16, 557 | i32: -32, 558 | i64: -64, 559 | u: 1, 560 | u8: 8, 561 | u16: 16, 562 | u32: 32, 563 | u64: 64, 564 | uptr: uintptr(0xDEADC0DE), 565 | b: true, 566 | s: "hello", 567 | f32: 3.2, 568 | f64: 6.4, 569 | c64: complex(6, 4), 570 | c128: complex(12, 8), 571 | sptr: &myStr, 572 | arr: [4]string{ 573 | "a", "b", "c", "d", 574 | }, 575 | arrPtr: &[10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 576 | ch: make(chan bool, 5), 577 | fn: func(s string) string { 578 | return s + ", world!" 579 | }, 580 | method: bytes.NewBufferString("method").Write, 581 | iface: bytes.NewBufferString("interface"), 582 | ifaceScalar: scalarWriter(123), 583 | m: map[string]interface{}{ 584 | "key": "value", 585 | }, 586 | unsafePointer: unsafe.Pointer(&Unexported{}), 587 | st: Simple{ 588 | Foo: 123, 589 | Bar: "bar1", 590 | }, 591 | Simple: Simple{ 592 | Foo: 456, 593 | Bar: "bar2", 594 | }, 595 | t: reflect.TypeOf(&Simple{}), 596 | }, 597 | } 598 | unexported.m["loop"] = &unexported.m 599 | 600 | // Make pointer cycles. 601 | unexported.ptr = unexported 602 | unexported.slice = []*Unexported{unexported} 603 | cloned := cloneSlowly(allocator, unexported).(*Unexported) 604 | a.Use(&unexported, &cloned) 605 | 606 | // unsafe.Pointer is shadow copied. 607 | a.Assert(cloned.unsafePointer == unexported.unsafePointer) 608 | unexported.unsafePointer = nil 609 | cloned.unsafePointer = nil 610 | 611 | // chan cannot be compared, but its buffer can be verified. 612 | a.Equal(cap(cloned.ch), cap(unexported.ch)) 613 | unexported.ch = nil 614 | cloned.ch = nil 615 | 616 | // fn cannot be compared, but it can be called. 617 | a.Equal(cloned.fn("Hello"), unexported.fn("Hello")) 618 | unexported.fn = nil 619 | cloned.fn = nil 620 | 621 | // method cannot be compared, but it can be called. 622 | a.Assert(cloned.method != nil) 623 | a.NilError(cloned.method([]byte("1234"))) 624 | unexported.method = nil 625 | cloned.method = nil 626 | 627 | // cloned.m["loop"] must be exactly the same map of cloned.m. 628 | a.Assert(reflect.ValueOf(cloned.m["loop"]).Elem().Pointer() == reflect.ValueOf(cloned.m).Pointer()) 629 | 630 | // Don't test this map in reflect.DeepEqual due to bug in Go. 631 | // https://github.com/golang/go/issues/33907 632 | unexported.m["loop"] = nil 633 | cloned.m["loop"] = nil 634 | 635 | // reflect.Type should be copied by value. 636 | a.Equal(reflect.ValueOf(cloned.t).Pointer(), reflect.ValueOf(unexported.t).Pointer()) 637 | 638 | // Finally, everything else should equal. 639 | a.Equal(unexported, cloned) 640 | } 641 | 642 | func testCloneUnexportedStructMethod(t *testing.T, allocator *Allocator) { 643 | a := assert.New(t) 644 | 645 | // Another complex case: clone a struct and a map of struct instead of ptr to a struct. 646 | st := insider{ 647 | m: map[string]interface{}{ 648 | "insider": insider{ 649 | method: bytes.NewBufferString("method").Write, 650 | }, 651 | }, 652 | } 653 | cloned := clone(allocator, st).(insider) 654 | a.Use(&st, &cloned) 655 | 656 | // For a struct copy, there is a tricky way to copy method. Test it. 657 | a.Assert(cloned.m["insider"].(insider).method != nil) 658 | n, err := cloned.m["insider"].(insider).method([]byte("1234")) 659 | a.NilError(err) 660 | a.Equal(n, 4) 661 | } 662 | 663 | func testCloneReflectType(t *testing.T, allocator *Allocator) { 664 | a := assert.New(t) 665 | 666 | // reflect.rtype should not be deeply cloned. 667 | foo := reflect.TypeOf("foo") 668 | cloned := clone(allocator, foo).(reflect.Type) 669 | a.Use(&foo, &cloned) 670 | 671 | from := reflect.ValueOf(foo) 672 | to := reflect.ValueOf(cloned) 673 | 674 | a.Assert(from.Pointer() == to.Pointer()) 675 | } 676 | 677 | const testBytes = 332 678 | 679 | type skipFields struct { 680 | Int int 681 | IntSkip int `clone:"-"` 682 | privateInt64 int64 683 | privateUint64Skip uint64 `clone:"skip"` 684 | str string 685 | StrSkip string `clone:"-"` 686 | privateStrSkip string `clone:"-"` 687 | float float32 688 | FloatSkip float32 `clone:"-"` 689 | privateFloatSkip float64 `clone:"-"` 690 | t *T 691 | TSkip *T `clone:"-"` 692 | privateTSkip *T `clone:"-"` 693 | privateTShadow *T `clone:"shadowcopy"` 694 | privateTSlice []*T 695 | privateTSliceSkip []*T `clone:"-"` 696 | bytes [testBytes]byte 697 | bytesSkip [testBytes]byte `clone:"-"` 698 | } 699 | 700 | func testCloneSkipFields(t *testing.T, allocator *Allocator) { 701 | a := assert.New(t) 702 | 703 | from := &skipFields{ 704 | Int: 123, 705 | IntSkip: 456, 706 | privateInt64: 789, 707 | privateUint64Skip: 987, 708 | str: "abc", 709 | StrSkip: "def", 710 | privateStrSkip: "ghi", 711 | float: 3.2, 712 | FloatSkip: 6.4, 713 | privateFloatSkip: 9.6, 714 | t: &T{ 715 | Foo: 123, 716 | Bar: map[string]interface{}{ 717 | "abc": 123, 718 | }, 719 | }, 720 | TSkip: &T{ 721 | Foo: 456, 722 | Bar: map[string]interface{}{ 723 | "def": 456, 724 | "ghi": 789, 725 | }, 726 | }, 727 | privateTSkip: &T{ 728 | Foo: 789, 729 | Bar: map[string]interface{}{ 730 | "jkl": 987, 731 | "mno": 654, 732 | }, 733 | }, 734 | privateTShadow: &T{ 735 | Foo: 321, 736 | Bar: map[string]interface{}{ 737 | "pqr": 321, 738 | "stu": 654, 739 | }, 740 | }, 741 | privateTSlice: []*T{ 742 | { 743 | Foo: 123, 744 | Bar: map[string]interface{}{ 745 | "abc": 123, 746 | }, 747 | }, 748 | { 749 | Foo: 456, 750 | Bar: map[string]interface{}{ 751 | "def": 456, 752 | "ghi": 789, 753 | }, 754 | }, 755 | }, 756 | privateTSliceSkip: []*T{ 757 | { 758 | Foo: 789, 759 | Bar: map[string]interface{}{ 760 | "jkl": 987, 761 | "mno": 654, 762 | }, 763 | }, 764 | { 765 | Foo: 321, 766 | Bar: map[string]interface{}{ 767 | "pqr": 321, 768 | "stu": 654, 769 | }, 770 | }, 771 | }, 772 | } 773 | 774 | for i := 0; i < testBytes; i++ { 775 | from.bytes[i] = byte(3 + (i % 128)) 776 | from.bytesSkip[i] = byte(3 + (i % 128)) 777 | } 778 | 779 | to := clone(allocator, from).(*skipFields) 780 | 781 | a.Equal(from.Int, to.Int) 782 | a.Equal(to.IntSkip, int(0)) 783 | a.Equal(from.privateInt64, to.privateInt64) 784 | a.Equal(to.privateUint64Skip, uint64(0)) 785 | a.Equal(from.str, to.str) 786 | a.Equal(to.StrSkip, "") 787 | a.Equal(to.privateStrSkip, "") 788 | a.Equal(from.float, to.float) 789 | a.Equal(to.FloatSkip, float32(0)) 790 | a.Equal(to.privateFloatSkip, float64(0)) 791 | a.Equal(from.t, to.t) 792 | a.Equal(to.TSkip, (*T)(nil)) 793 | a.Equal(to.privateTSkip, (*T)(nil)) 794 | a.Assert(from.privateTShadow == to.privateTShadow) 795 | a.Equal(from.privateTSlice, to.privateTSlice) 796 | a.Equal(to.privateTSliceSkip, ([]*T)(nil)) 797 | a.Equal(from.bytes, to.bytes) 798 | a.Equal(to.bytesSkip, [testBytes]byte{}) 799 | } 800 | -------------------------------------------------------------------------------- /clone.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Huan Du. All rights reserved. 2 | // Licensed under the MIT license that can be found in the LICENSE file. 3 | 4 | // Package clone provides functions to deep clone any Go data. 5 | // It also provides a wrapper to protect a pointer from any unexpected mutation. 6 | package clone 7 | 8 | import ( 9 | "fmt" 10 | "reflect" 11 | "unsafe" 12 | ) 13 | 14 | var heapCloneState = &cloneState{ 15 | allocator: defaultAllocator, 16 | } 17 | var cloner = MakeCloner(defaultAllocator) 18 | 19 | const zeroBytesCount = 256 20 | 21 | var zeroBytes [zeroBytesCount]byte 22 | var zero = zeroBytes[:] 23 | 24 | // Clone recursively deep clone v to a new value in heap. 25 | // It assumes that there is no pointer cycle in v, 26 | // e.g. v has a pointer points to v itself. 27 | // If there is a pointer cycle, use Slowly instead. 28 | // 29 | // Clone allocates memory and deeply copies values inside v in depth-first sequence. 30 | // There are a few special rules for following types. 31 | // 32 | // - Scalar types: all number-like types are copied by value. 33 | // - func: Copied by value as func is an opaque pointer at runtime. 34 | // - string: Copied by value as string is immutable by design. 35 | // - unsafe.Pointer: Copied by value as we don't know what's in it. 36 | // - chan: A new empty chan is created as we cannot read data inside the old chan. 37 | // 38 | // Unlike many other packages, Clone is able to clone unexported fields of any struct. 39 | // Use this feature wisely. 40 | func Clone(v interface{}) interface{} { 41 | return cloner.Clone(v) 42 | } 43 | 44 | func clone(allocator *Allocator, v interface{}) interface{} { 45 | if v == nil { 46 | return nil 47 | } 48 | 49 | val := reflect.ValueOf(v) 50 | cloned := allocator.clone(val, false) 51 | return cloned.Interface() 52 | } 53 | 54 | // Slowly recursively deep clone v to a new value in heap. 55 | // It marks all cloned values internally, thus it can clone v with cycle pointer. 56 | // 57 | // Slowly works exactly the same as Clone. See Clone doc for more details. 58 | func Slowly(v interface{}) interface{} { 59 | return cloner.CloneSlowly(v) 60 | } 61 | 62 | func cloneSlowly(allocator *Allocator, v interface{}) interface{} { 63 | if v == nil { 64 | return nil 65 | } 66 | 67 | val := reflect.ValueOf(v) 68 | cloned := allocator.cloneSlowly(val, false) 69 | return cloned.Interface() 70 | } 71 | 72 | type cloneState struct { 73 | allocator *Allocator 74 | visited visitMap 75 | invalid invalidPointers 76 | 77 | // The value that should not be cloned by custom func. 78 | // It's useful to avoid infinite loop when custom func calls allocator.Clone(). 79 | skipCustomFuncValue reflect.Value 80 | } 81 | 82 | type visit struct { 83 | p uintptr 84 | extra int 85 | t reflect.Type 86 | } 87 | 88 | type visitMap map[visit]reflect.Value 89 | type invalidPointers map[visit]reflect.Value 90 | 91 | func (state *cloneState) clone(v reflect.Value) reflect.Value { 92 | if state.allocator.isScalar(v.Kind()) { 93 | return copyScalarValue(v) 94 | } 95 | 96 | switch v.Kind() { 97 | case reflect.Array: 98 | return state.cloneArray(v) 99 | case reflect.Chan: 100 | return state.allocator.MakeChan(v.Type(), v.Cap()) 101 | case reflect.Interface: 102 | return state.cloneInterface(v) 103 | case reflect.Map: 104 | return state.cloneMap(v) 105 | case reflect.Ptr: 106 | return state.clonePtr(v) 107 | case reflect.Slice: 108 | return state.cloneSlice(v) 109 | case reflect.Struct: 110 | return state.cloneStruct(v) 111 | case reflect.String: 112 | return state.cloneString(v) 113 | default: 114 | panic(fmt.Errorf("go-clone: unsupported type `%v`", v.Type())) 115 | } 116 | } 117 | 118 | func (state *cloneState) cloneArray(v reflect.Value) reflect.Value { 119 | dst := state.allocator.New(v.Type()) 120 | state.copyArray(v, dst) 121 | return dst.Elem() 122 | } 123 | 124 | func (state *cloneState) copyArray(src, nv reflect.Value) { 125 | p := unsafe.Pointer(nv.Pointer()) // dst must be a Ptr. 126 | dst := nv.Elem() 127 | num := src.Len() 128 | 129 | if state.allocator.isScalar(src.Type().Elem().Kind()) { 130 | shadowCopy(src, p) 131 | return 132 | } 133 | 134 | for i := 0; i < num; i++ { 135 | dst.Index(i).Set(state.clone(src.Index(i))) 136 | } 137 | } 138 | 139 | func (state *cloneState) cloneInterface(v reflect.Value) reflect.Value { 140 | if v.IsNil() { 141 | return reflect.Zero(v.Type()) 142 | } 143 | 144 | t := v.Type() 145 | elem := v.Elem() 146 | return state.clone(elem).Convert(elem.Type()).Convert(t) 147 | } 148 | 149 | func (state *cloneState) cloneMap(v reflect.Value) reflect.Value { 150 | if v.IsNil() { 151 | return reflect.Zero(v.Type()) 152 | } 153 | 154 | t := v.Type() 155 | 156 | if state.visited != nil { 157 | vst := visit{ 158 | p: v.Pointer(), 159 | t: t, 160 | } 161 | 162 | if val, ok := state.visited[vst]; ok { 163 | return val 164 | } 165 | } 166 | 167 | nv := state.allocator.MakeMap(t, v.Len()) 168 | 169 | if state.visited != nil { 170 | vst := visit{ 171 | p: v.Pointer(), 172 | t: t, 173 | } 174 | state.visited[vst] = nv 175 | } 176 | 177 | for iter := mapIter(v); iter.Next(); { 178 | key := state.clone(iter.Key()) 179 | value := state.clone(iter.Value()) 180 | nv.SetMapIndex(key, value) 181 | } 182 | 183 | return nv 184 | } 185 | 186 | func (state *cloneState) clonePtr(v reflect.Value) reflect.Value { 187 | if v.IsNil() { 188 | return reflect.Zero(v.Type()) 189 | } 190 | 191 | t := v.Type() 192 | 193 | if state.allocator.isOpaquePointer(t) { 194 | if v.CanInterface() { 195 | return v 196 | } 197 | 198 | ptr := state.allocator.New(t) 199 | p := unsafe.Pointer(ptr.Pointer()) 200 | shadowCopy(v, p) 201 | return ptr.Elem() 202 | } 203 | 204 | if state.visited != nil { 205 | vst := visit{ 206 | p: v.Pointer(), 207 | t: t, 208 | } 209 | 210 | if val, ok := state.visited[vst]; ok { 211 | return val 212 | } 213 | } 214 | 215 | src := v.Elem() 216 | elemType := src.Type() 217 | elemKind := src.Kind() 218 | nv := state.allocator.New(elemType) 219 | 220 | if state.visited != nil { 221 | vst := visit{ 222 | p: v.Pointer(), 223 | t: t, 224 | } 225 | state.visited[vst] = nv 226 | } 227 | 228 | switch elemKind { 229 | case reflect.Struct: 230 | state.copyStruct(src, nv) 231 | case reflect.Array: 232 | state.copyArray(src, nv) 233 | default: 234 | nv.Elem().Set(state.clone(src)) 235 | } 236 | 237 | // If this pointer is the address of a struct field and it's a cycle pointer, 238 | // it may be updated. 239 | if state.visited != nil { 240 | vst := visit{ 241 | p: v.Pointer(), 242 | t: t, 243 | } 244 | nv = state.visited[vst] 245 | } 246 | 247 | return nv 248 | } 249 | 250 | func (state *cloneState) cloneSlice(v reflect.Value) reflect.Value { 251 | if v.IsNil() { 252 | return reflect.Zero(v.Type()) 253 | } 254 | 255 | t := v.Type() 256 | num := v.Len() 257 | 258 | if state.visited != nil { 259 | vst := visit{ 260 | p: v.Pointer(), 261 | extra: num, 262 | t: t, 263 | } 264 | 265 | if val, ok := state.visited[vst]; ok { 266 | return val 267 | } 268 | } 269 | 270 | c := v.Cap() 271 | nv := state.allocator.MakeSlice(t, num, c) 272 | 273 | if state.visited != nil { 274 | vst := visit{ 275 | p: v.Pointer(), 276 | extra: num, 277 | t: t, 278 | } 279 | state.visited[vst] = nv 280 | } 281 | 282 | // For scalar slice, copy underlying values directly. 283 | if state.allocator.isScalar(t.Elem().Kind()) { 284 | src := unsafe.Pointer(v.Pointer()) 285 | dst := unsafe.Pointer(nv.Pointer()) 286 | sz := int(t.Elem().Size()) 287 | l := num * sz 288 | cc := c * sz 289 | copy((*[maxByteSize]byte)(dst)[:l:cc], (*[maxByteSize]byte)(src)[:l:cc]) 290 | } else { 291 | for i := 0; i < num; i++ { 292 | nv.Index(i).Set(state.clone(v.Index(i))) 293 | } 294 | } 295 | 296 | return nv 297 | } 298 | 299 | func (state *cloneState) cloneStruct(v reflect.Value) reflect.Value { 300 | t := v.Type() 301 | nv := state.allocator.New(t) 302 | state.copyStruct(v, nv) 303 | return nv.Elem() 304 | } 305 | 306 | var typeOfByteSlice = reflect.TypeOf([]byte(nil)) 307 | 308 | func (state *cloneState) cloneString(v reflect.Value) reflect.Value { 309 | t := v.Type() 310 | l := v.Len() 311 | data := state.allocator.MakeSlice(typeOfByteSlice, l, l) 312 | 313 | // The v is an unexported struct field. 314 | if !v.CanInterface() { 315 | v = reflect.ValueOf(v.String()) 316 | } 317 | 318 | reflect.Copy(data, v) 319 | 320 | nv := state.allocator.New(t) 321 | slice := data.Interface().([]byte) 322 | *(*stringHeader)(unsafe.Pointer(nv.Pointer())) = *(*stringHeader)(unsafe.Pointer(&slice)) 323 | 324 | return nv.Elem() 325 | } 326 | 327 | func (state *cloneState) copyStruct(src, nv reflect.Value) { 328 | t := src.Type() 329 | st := state.allocator.loadStructType(t) 330 | ptr := unsafe.Pointer(nv.Pointer()) 331 | 332 | if st.Init(state.allocator, src, nv, state.skipCustomFuncValue == src) { 333 | return 334 | } 335 | 336 | for _, pf := range st.ZeroFields { 337 | p := unsafe.Pointer(uintptr(ptr) + pf.Offset) 338 | sz := pf.Size 339 | 340 | for sz > zeroBytesCount { 341 | copy((*[zeroBytesCount]byte)(p)[:zeroBytesCount:zeroBytesCount], zero) 342 | sz -= zeroBytesCount 343 | p = unsafe.Pointer(uintptr(p) + zeroBytesCount) 344 | } 345 | 346 | copy((*[zeroBytesCount]byte)(p)[:sz:sz], zero) 347 | } 348 | 349 | for _, pf := range st.PointerFields { 350 | i := int(pf.Index) 351 | p := unsafe.Pointer(uintptr(ptr) + pf.Offset) 352 | field := src.Field(i) 353 | 354 | // This field can be referenced by a pointer or interface inside itself. 355 | // Put the pointer to this field to visited to avoid any error. 356 | // 357 | // See https://github.com/huandu/go-clone/issues/3. 358 | if state.visited != nil && field.CanAddr() { 359 | ft := field.Type() 360 | fp := field.Addr().Pointer() 361 | vst := visit{ 362 | p: fp, 363 | t: reflect.PtrTo(ft), 364 | } 365 | nv := reflect.NewAt(ft, p) 366 | 367 | // The address of this field was visited, so fp must be a cycle pointer. 368 | // As this field is not fully cloned, the val stored in visited[visit] must be wrong. 369 | // It must be replaced by nv which will be the right value (it's incomplete right now). 370 | // 371 | // Unfortunately, if the val was used by previous clone routines, 372 | // there is no easy way to fix wrong values - all pointers must be traversed and fixed. 373 | if val, ok := state.visited[vst]; ok { 374 | state.invalid[visit{ 375 | p: val.Pointer(), 376 | t: vst.t, 377 | }] = nv 378 | } 379 | 380 | state.visited[vst] = nv 381 | } 382 | 383 | v := state.clone(field) 384 | shadowCopy(v, p) 385 | } 386 | } 387 | 388 | var typeOfString = reflect.TypeOf("") 389 | 390 | func shadowCopy(src reflect.Value, p unsafe.Pointer) { 391 | switch src.Kind() { 392 | case reflect.Bool: 393 | *(*bool)(p) = src.Bool() 394 | case reflect.Int: 395 | *(*int)(p) = int(src.Int()) 396 | case reflect.Int8: 397 | *(*int8)(p) = int8(src.Int()) 398 | case reflect.Int16: 399 | *(*int16)(p) = int16(src.Int()) 400 | case reflect.Int32: 401 | *(*int32)(p) = int32(src.Int()) 402 | case reflect.Int64: 403 | *(*int64)(p) = src.Int() 404 | case reflect.Uint: 405 | *(*uint)(p) = uint(src.Uint()) 406 | case reflect.Uint8: 407 | *(*uint8)(p) = uint8(src.Uint()) 408 | case reflect.Uint16: 409 | *(*uint16)(p) = uint16(src.Uint()) 410 | case reflect.Uint32: 411 | *(*uint32)(p) = uint32(src.Uint()) 412 | case reflect.Uint64: 413 | *(*uint64)(p) = src.Uint() 414 | case reflect.Uintptr: 415 | *(*uintptr)(p) = uintptr(src.Uint()) 416 | case reflect.Float32: 417 | *(*float32)(p) = float32(src.Float()) 418 | case reflect.Float64: 419 | *(*float64)(p) = src.Float() 420 | case reflect.Complex64: 421 | *(*complex64)(p) = complex64(src.Complex()) 422 | case reflect.Complex128: 423 | *(*complex128)(p) = src.Complex() 424 | 425 | case reflect.Array: 426 | t := src.Type() 427 | 428 | if src.CanAddr() { 429 | srcPtr := unsafe.Pointer(src.UnsafeAddr()) 430 | sz := t.Size() 431 | copy((*[maxByteSize]byte)(p)[:sz:sz], (*[maxByteSize]byte)(srcPtr)[:sz:sz]) 432 | return 433 | } 434 | 435 | val := reflect.NewAt(t, p).Elem() 436 | 437 | if src.CanInterface() { 438 | val.Set(src) 439 | return 440 | } 441 | 442 | sz := t.Elem().Size() 443 | num := src.Len() 444 | 445 | for i := 0; i < num; i++ { 446 | elemPtr := unsafe.Pointer(uintptr(p) + uintptr(i)*sz) 447 | shadowCopy(src.Index(i), elemPtr) 448 | } 449 | case reflect.Chan: 450 | *((*uintptr)(p)) = src.Pointer() 451 | case reflect.Func: 452 | t := src.Type() 453 | src = copyScalarValue(src) 454 | val := reflect.NewAt(t, p).Elem() 455 | val.Set(src) 456 | case reflect.Interface: 457 | *((*interfaceData)(p)) = parseReflectValue(src) 458 | case reflect.Map: 459 | *((*uintptr)(p)) = src.Pointer() 460 | case reflect.Ptr: 461 | *((*uintptr)(p)) = src.Pointer() 462 | case reflect.Slice: 463 | *(*sliceHeader)(p) = sliceHeader{ 464 | Data: src.Pointer(), 465 | Len: src.Len(), 466 | Cap: src.Cap(), 467 | } 468 | case reflect.String: 469 | s := src.String() 470 | val := reflect.NewAt(typeOfString, p).Elem() 471 | val.SetString(s) 472 | case reflect.Struct: 473 | t := src.Type() 474 | val := reflect.NewAt(t, p).Elem() 475 | 476 | if src.CanInterface() { 477 | val.Set(src) 478 | return 479 | } 480 | 481 | num := t.NumField() 482 | 483 | for i := 0; i < num; i++ { 484 | field := t.Field(i) 485 | fieldPtr := unsafe.Pointer(uintptr(p) + field.Offset) 486 | shadowCopy(src.Field(i), fieldPtr) 487 | } 488 | case reflect.UnsafePointer: 489 | // There is no way to copy unsafe.Pointer value. 490 | *((*uintptr)(p)) = src.Pointer() 491 | 492 | default: 493 | panic(fmt.Errorf("go-clone: impossible type `%v` when cloning private field", src.Type())) 494 | } 495 | } 496 | 497 | // fix tranverses v to update all pointer values in state.invalid. 498 | func (state *cloneState) fix(v reflect.Value) { 499 | if state == nil || len(state.invalid) == 0 { 500 | return 501 | } 502 | 503 | fix := &fixState{ 504 | allocator: state.allocator, 505 | fixed: fixMap{}, 506 | invalid: state.invalid, 507 | } 508 | fix.fix(v) 509 | } 510 | 511 | type fixState struct { 512 | allocator *Allocator 513 | fixed fixMap 514 | invalid invalidPointers 515 | } 516 | 517 | type fixMap map[visit]struct{} 518 | 519 | func (fix *fixState) new(t reflect.Type) reflect.Value { 520 | return fix.allocator.New(t) 521 | } 522 | 523 | func (fix *fixState) fix(v reflect.Value) (copied reflect.Value, changed int) { 524 | if fix.allocator.isScalar(v.Kind()) { 525 | return 526 | } 527 | 528 | switch v.Kind() { 529 | case reflect.Array: 530 | return fix.fixArray(v) 531 | case reflect.Chan: 532 | // Do nothing. 533 | return 534 | case reflect.Interface: 535 | return fix.fixInterface(v) 536 | case reflect.Map: 537 | return fix.fixMap(v) 538 | case reflect.Ptr: 539 | return fix.fixPtr(v) 540 | case reflect.Slice: 541 | return fix.fixSlice(v) 542 | case reflect.Struct: 543 | return fix.fixStruct(v) 544 | case reflect.String: 545 | // Do nothing. 546 | return 547 | default: 548 | panic(fmt.Errorf("go-clone: unsupported type `%v`", v.Type())) 549 | } 550 | } 551 | 552 | func (fix *fixState) fixArray(v reflect.Value) (copied reflect.Value, changed int) { 553 | t := v.Type() 554 | et := t.Elem() 555 | kind := et.Kind() 556 | 557 | if fix.allocator.isScalar(kind) { 558 | return 559 | } 560 | 561 | l := v.Len() 562 | 563 | for i := 0; i < l; i++ { 564 | elem := v.Index(i) 565 | 566 | if kind == reflect.Ptr { 567 | vst := visit{ 568 | p: elem.Pointer(), 569 | t: et, 570 | } 571 | 572 | if nv, ok := fix.invalid[vst]; ok { 573 | // If elem cannot be set, v must be copied to make it settable. 574 | // Don't do it unless there is no other choices. 575 | if !elem.CanSet() { 576 | copied = fix.new(t).Elem() 577 | shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) 578 | _, changed = fix.fixArray(copied) 579 | return 580 | } 581 | 582 | elem.Set(nv) 583 | changed++ 584 | continue 585 | } 586 | } 587 | 588 | fixed, c := fix.fix(elem) 589 | changed += c 590 | 591 | if fixed.IsValid() { 592 | // If elem cannot be set, v must be copied to make it settable. 593 | // Don't do it unless there is no other choices. 594 | if !elem.CanSet() { 595 | copied = fix.new(t).Elem() 596 | shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) 597 | _, changed = fix.fixArray(copied) 598 | return 599 | } 600 | 601 | elem.Set(fixed) 602 | } 603 | } 604 | 605 | return 606 | } 607 | 608 | func (fix *fixState) fixInterface(v reflect.Value) (copied reflect.Value, changed int) { 609 | if v.IsNil() { 610 | return 611 | } 612 | 613 | elem := v.Elem() 614 | t := elem.Type() 615 | kind := elem.Kind() 616 | 617 | if kind == reflect.Ptr { 618 | vst := visit{ 619 | p: elem.Pointer(), 620 | t: t, 621 | } 622 | 623 | if nv, ok := fix.invalid[vst]; ok { 624 | copied = nv.Convert(v.Type()) 625 | changed++ 626 | return 627 | } 628 | } 629 | 630 | copied, changed = fix.fix(elem) 631 | 632 | if copied.IsValid() { 633 | copied = copied.Convert(v.Type()) 634 | } 635 | 636 | return 637 | } 638 | 639 | func (fix *fixState) fixMap(v reflect.Value) (copied reflect.Value, changed int) { 640 | if v.IsNil() { 641 | return 642 | } 643 | 644 | t := v.Type() 645 | vst := visit{ 646 | p: v.Pointer(), 647 | t: t, 648 | } 649 | 650 | if _, ok := fix.fixed[vst]; ok { 651 | return 652 | } 653 | 654 | fix.fixed[vst] = struct{}{} 655 | 656 | kt := t.Key() 657 | et := t.Elem() 658 | keyKind := kt.Kind() 659 | elemKind := et.Kind() 660 | 661 | if isScalar := fix.allocator.isScalar; isScalar(keyKind) && isScalar(elemKind) { 662 | return 663 | } 664 | 665 | invalidKeys := map[reflect.Value][2]reflect.Value{} 666 | 667 | for iter := mapIter(v); iter.Next(); { 668 | key := iter.Key() 669 | elem := iter.Value() 670 | var fixed reflect.Value 671 | c := 0 672 | 673 | if elemKind == reflect.Ptr { 674 | vst := visit{ 675 | p: elem.Pointer(), 676 | t: et, 677 | } 678 | 679 | if nv, ok := fix.invalid[vst]; ok { 680 | fixed = nv 681 | c++ 682 | } else { 683 | fixed, c = fix.fixPtr(elem) 684 | } 685 | } else { 686 | fixed, c = fix.fix(elem) 687 | } 688 | 689 | changed += c 690 | c = 0 691 | 692 | if fixed.IsValid() { 693 | v = forceSetMapIndex(v, key, fixed) 694 | elem = fixed 695 | fixed = reflect.Value{} 696 | } 697 | 698 | if keyKind == reflect.Ptr { 699 | vst := visit{ 700 | p: key.Pointer(), 701 | t: kt, 702 | } 703 | 704 | if nv, ok := fix.invalid[vst]; ok { 705 | fixed = nv 706 | c++ 707 | } else { 708 | fixed, c = fix.fixPtr(key) 709 | } 710 | } else { 711 | fixed, c = fix.fix(key) 712 | } 713 | 714 | changed += c 715 | 716 | // Key cannot be changed immediately inside map range iteration. 717 | // Do it later. 718 | if fixed.IsValid() { 719 | invalidKeys[key] = [2]reflect.Value{fixed, elem} 720 | } 721 | } 722 | 723 | for key, kv := range invalidKeys { 724 | v = forceSetMapIndex(v, key, reflect.Value{}) 725 | v = forceSetMapIndex(v, kv[0], kv[1]) 726 | } 727 | 728 | return 729 | } 730 | 731 | func forceSetMapIndex(v, key, elem reflect.Value) (nv reflect.Value) { 732 | nv = v 733 | 734 | if !v.CanInterface() { 735 | nv = forceClearROFlag(v) 736 | } 737 | 738 | if !key.CanInterface() { 739 | key = forceClearROFlag(key) 740 | } 741 | 742 | if elem.IsValid() && !elem.CanInterface() { 743 | elem = forceClearROFlag(elem) 744 | } 745 | 746 | nv.SetMapIndex(key, elem) 747 | return 748 | } 749 | 750 | func (fix *fixState) fixPtr(v reflect.Value) (copied reflect.Value, changed int) { 751 | if v.IsNil() { 752 | return 753 | } 754 | 755 | vst := visit{ 756 | p: v.Pointer(), 757 | t: v.Type(), 758 | } 759 | 760 | if _, ok := fix.invalid[vst]; ok { 761 | panic(fmt.Errorf("go-clone: invalid pointers must have been fixed in other methods")) 762 | } 763 | 764 | if _, ok := fix.fixed[vst]; ok { 765 | return 766 | } 767 | 768 | fix.fixed[vst] = struct{}{} 769 | 770 | elem := v.Elem() 771 | _, changed = fix.fix(elem) 772 | return 773 | } 774 | 775 | func (fix *fixState) fixSlice(v reflect.Value) (copied reflect.Value, changed int) { 776 | if v.IsNil() { 777 | return 778 | } 779 | 780 | t := v.Type() 781 | et := t.Elem() 782 | kind := et.Kind() 783 | 784 | if fix.allocator.isScalar(kind) { 785 | return 786 | } 787 | 788 | l := v.Len() 789 | p := unsafe.Pointer(v.Pointer()) 790 | vst := visit{ 791 | p: uintptr(p), 792 | extra: l, 793 | t: t, 794 | } 795 | 796 | if _, ok := fix.fixed[vst]; ok { 797 | return 798 | } 799 | 800 | fix.fixed[vst] = struct{}{} 801 | 802 | for i := 0; i < l; i++ { 803 | elem := v.Index(i) 804 | var fixed reflect.Value 805 | c := 0 806 | 807 | if kind == reflect.Ptr { 808 | vst := visit{ 809 | p: elem.Pointer(), 810 | t: et, 811 | } 812 | 813 | if nv, ok := fix.invalid[vst]; ok { 814 | fixed = nv 815 | } else { 816 | fixed, c = fix.fixPtr(elem) 817 | } 818 | } else { 819 | fixed, c = fix.fix(elem) 820 | } 821 | 822 | changed += c 823 | 824 | if fixed.IsValid() { 825 | sz := et.Size() 826 | elemPtr := unsafe.Pointer(uintptr(p) + sz*uintptr(i)) 827 | shadowCopy(fixed, elemPtr) 828 | } 829 | } 830 | 831 | return 832 | } 833 | 834 | func (fix *fixState) fixStruct(v reflect.Value) (copied reflect.Value, changed int) { 835 | t := v.Type() 836 | st := fix.allocator.loadStructType(t) 837 | 838 | if len(st.PointerFields) == 0 { 839 | return 840 | } 841 | 842 | for _, pf := range st.PointerFields { 843 | i := int(pf.Index) 844 | field := v.Field(i) 845 | 846 | ft := field.Type() 847 | 848 | if ft.Kind() == reflect.Ptr { 849 | vst := visit{ 850 | p: field.Pointer(), 851 | t: ft, 852 | } 853 | 854 | if nv, ok := fix.invalid[vst]; ok { 855 | // If v is not addressable, a new struct must be allocated. 856 | // Don't do it unless there is no other choices. 857 | if !v.CanAddr() { 858 | copied = fix.new(t).Elem() 859 | shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) 860 | _, changed = fix.fixStruct(copied) 861 | return 862 | } 863 | 864 | ptr := unsafe.Pointer(v.Addr().Pointer()) 865 | p := unsafe.Pointer(uintptr(ptr) + pf.Offset) 866 | shadowCopy(nv, p) 867 | continue 868 | } 869 | } 870 | 871 | fixed, c := fix.fix(field) 872 | changed += c 873 | 874 | if fixed.IsValid() { 875 | // If v is not addressable, a new struct must be allocated. 876 | // Don't do it unless there is no other choices. 877 | if !v.CanAddr() { 878 | copied = fix.new(t).Elem() 879 | shadowCopy(v, unsafe.Pointer(copied.Addr().Pointer())) 880 | _, changed = fix.fixStruct(copied) 881 | return 882 | } 883 | 884 | ptr := unsafe.Pointer(v.Addr().Pointer()) 885 | p := unsafe.Pointer(uintptr(ptr) + pf.Offset) 886 | shadowCopy(fixed, p) 887 | } 888 | } 889 | 890 | return 891 | } 892 | --------------------------------------------------------------------------------