├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ └── open_an_issue.md ├── config.yml └── workflows │ ├── go-check.yml │ ├── go-test-config.json │ ├── go-test.yml │ ├── release-check.yml │ ├── releaser.yml │ ├── stale.yml │ └── tagpush.yml ├── .gitignore ├── .gx └── lastpubver ├── LICENSE ├── Makefile ├── README.md ├── cli_test.go ├── config ├── util.go ├── validate.go └── validate_test.go ├── convert ├── checks_test.go ├── convert.go ├── convert_test.go ├── copy.go ├── copy_test.go ├── prepare.go └── util.go ├── go.mod ├── go.sum ├── main.go ├── repo ├── badgerds.go ├── const.go ├── flatfsds.go ├── levelds.go ├── logds.go ├── measureds.go ├── memds.go ├── mountds.go └── open.go ├── revert ├── log.go ├── log_test.go ├── revert.go ├── revert_test.go ├── steps.go └── steps_test.go ├── strategy ├── strategies.go ├── strategies_test.go ├── strategy.go ├── strategy_test.go └── strategy_util.go ├── testfiles ├── absPathSpec ├── badgerSpec ├── defaultSpec ├── equalSpec ├── invalidSpec ├── lossySpec ├── reusePathSpec ├── singleSpec ├── skipableDstSpec └── skipableSpec ├── testutil ├── basictest.go ├── ds_utils.go ├── repo.go └── repo_test.go └── version.json /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Getting Help on IPFS 4 | url: https://ipfs.io/help 5 | about: All information about how and where to get help on IPFS. 6 | - name: IPFS Official Forum 7 | url: https://discuss.ipfs.io 8 | about: Please post general questions, support requests, and discussions here. 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/open_an_issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Open an issue 3 | about: Only for actionable issues relevant to this repository. 4 | title: '' 5 | labels: need/triage 6 | assignees: '' 7 | 8 | --- 9 | 20 | -------------------------------------------------------------------------------- /.github/config.yml: -------------------------------------------------------------------------------- 1 | # Configuration for welcome - https://github.com/behaviorbot/welcome 2 | 3 | # Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome 4 | # Comment to be posted to on first time issues 5 | newIssueWelcomeComment: > 6 | Thank you for submitting your first issue to this repository! A maintainer 7 | will be here shortly to triage and review. 8 | 9 | In the meantime, please double-check that you have provided all the 10 | necessary information to make this process easy! Any information that can 11 | help save additional round trips is useful! We currently aim to give 12 | initial feedback within **two business days**. If this does not happen, feel 13 | free to leave a comment. 14 | 15 | Please keep an eye on how this issue will be labeled, as labels give an 16 | overview of priorities, assignments and additional actions requested by the 17 | maintainers: 18 | 19 | - "Priority" labels will show how urgent this is for the team. 20 | - "Status" labels will show if this is ready to be worked on, blocked, or in progress. 21 | - "Need" labels will indicate if additional input or analysis is required. 22 | 23 | Finally, remember to use https://discuss.ipfs.io if you just need general 24 | support. 25 | 26 | # Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome 27 | # Comment to be posted to on PRs from first time contributors in your repository 28 | newPRWelcomeComment: > 29 | Thank you for submitting this PR! 30 | 31 | A maintainer will be here shortly to review it. 32 | 33 | We are super grateful, but we are also overloaded! Help us by making sure 34 | that: 35 | 36 | * The context for this PR is clear, with relevant discussion, decisions 37 | and stakeholders linked/mentioned. 38 | 39 | * Your contribution itself is clear (code comments, self-review for the 40 | rest) and in its best form. Follow the [code contribution 41 | guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines) 42 | if they apply. 43 | 44 | Getting other community members to do a review would be great help too on 45 | complex PRs (you can ask in the chats/forums). If you are unsure about 46 | something, just leave us a comment. 47 | 48 | Next steps: 49 | 50 | * A maintainer will triage and assign priority to this PR, commenting on 51 | any missing things and potentially assigning a reviewer for high 52 | priority items. 53 | 54 | * The PR gets reviews, discussed and approvals as needed. 55 | 56 | * The PR is merged by maintainers when it has been approved and comments addressed. 57 | 58 | We currently aim to provide initial feedback/triaging within **two business 59 | days**. Please keep an eye on any labelling actions, as these will indicate 60 | priorities and status of your contribution. 61 | 62 | We are very grateful for your contribution! 63 | 64 | 65 | # Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge 66 | # Comment to be posted to on pull requests merged by a first time user 67 | # Currently disabled 68 | #firstPRMergeComment: "" 69 | -------------------------------------------------------------------------------- /.github/workflows/go-check.yml: -------------------------------------------------------------------------------- 1 | name: Go Checks 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-check: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 19 | -------------------------------------------------------------------------------- /.github/workflows/go-test-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "shuffle": false 3 | } 4 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yml: -------------------------------------------------------------------------------- 1 | name: Go Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["master"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | go-test: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 19 | secrets: 20 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: [ 'version.json' ] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: [ 'version.json' ] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 18 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close and mark stale issue 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | 7 | permissions: 8 | issues: write 9 | pull-requests: write 10 | 11 | jobs: 12 | stale: 13 | uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3 14 | -------------------------------------------------------------------------------- /.github/workflows/tagpush.yml: -------------------------------------------------------------------------------- 1 | name: Tag Push Checker 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | permissions: 9 | contents: read 10 | issues: write 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | releaser: 18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.out 2 | *.coverprofile 3 | *.test 4 | *~ 5 | 6 | coverage.txt 7 | -------------------------------------------------------------------------------- /.gx/lastpubver: -------------------------------------------------------------------------------- 1 | 0.4.0: QmZZ9PEJcYYXkhpW7Es7jKEU92ZwdMZQsxDd8GLJSHHDmF 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Łukasz Magiera 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | covertools: 3 | go get github.com/mattn/goveralls 4 | go get golang.org/x/tools/cmd/cover 5 | go get github.com/wadey/gocovmerge 6 | 7 | deps: covertools 8 | go get golang.org/x/sys/unix 9 | go get golang.org/x/net/trace 10 | 11 | test: 12 | go test ./... -v 13 | 14 | install: 15 | go install 16 | 17 | circle: deps 18 | go vet 19 | $(eval PKGS := $(shell go list ./...)) 20 | $(eval PKGS_DELIM := $(shell echo $(PKGS) | sed -e 's/ /,/g')) 21 | go list -f '{{if or (len .TestGoFiles) (len .XTestGoFiles)}}go test -test.v -covermode=atomic -coverprofile={{.Name}}_{{len .Imports}}_{{len .Deps}}.coverprofile -coverpkg $(PKGS_DELIM) {{.ImportPath}}{{end}}' $(PKGS) | xargs -I {} bash -c {} 22 | gocovmerge `ls *.coverprofile` > coverage.out 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > [!WARNING] 2 | > 3 | > # ⚠️ `ipfs-ds-convert` is no longer maintained 4 | > 5 | > This tool was not updated since 2021 and go-ipfs v0.8.0 and repo version 11. 6 | > Badgerv1 is considered unstable and is deprecated. 7 | > 8 | > Modern [Kubo](https://github.com/ipfs/kubo) supports various datastores via plugin system, 9 | > and instead of conversion tool like this, users are advised to create a new repository. 10 | 11 | ipfs-ds-convert 12 | ================== 13 | 14 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) 15 | [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) 16 | [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) 17 | [![Coverage Status](https://coveralls.io/repos/github/ipfs/ipfs-ds-convert/badge.svg)](https://coveralls.io/github/ipfs/ipfs-ds-convert) 18 | [![Travis CI](https://circleci.com/gh/ipfs/ipfs-ds-convert/tree/master.svg?style=shield)](https://circleci.com/gh/ipfs/ipfs-ds-convert/tree/master) 19 | 20 | > Datastore converter for go-ipfs 21 | 22 | This tool is WIP and may damage your data. Make sure to make a backup first. 23 | 24 | TODO: 25 | - [x] Finish basic conversion code 26 | - [x] package.json for gx 27 | - [ ] Tests 28 | - [x] CI (needs https://github.com/ipfs/go-ipfs/pull/4007, https://github.com/ipfs/go-ipfs/pull/3575) 29 | - [ ] Coverage > 80% or more 30 | - [ ] Review 31 | - [ ] Standard readme 32 | - [x] Revert on error / from backup 33 | - [x] Cleanup backup subcommand 34 | - [x] Optimize some standard cases 35 | - [x] Don't copy directories when not needed 36 | - [ ] Detect renames 37 | - Not that common 38 | - [x] Report progress 39 | - [ ] Don't depend on go-ipfs 40 | 41 | ## Install 42 | 43 | ### Build From Source 44 | 45 | These instructions assume that go has been installed as described [here](https://github.com/ipfs/go-ipfs#install-go). 46 | 47 | ``` 48 | $ GO111MODULE=on go get github.com/ipfs/ipfs-ds-convert@latest 49 | ``` 50 | 51 | ## Usage 52 | 53 | ### Convert to Badger Datastore 54 | 55 | Apply the Badger Datastore profile: 56 | 57 | 58 | ``` 59 | ipfs config profile apply badgerds 60 | ``` 61 | 62 | Then, start the conversion using 63 | 64 | ``` 65 | $ ipfs-ds-convert convert 66 | ``` 67 | 68 | This can take a very long time to complete depending on the size of the datastore. If running this on a headless server it's recommended to use something like `screen` or `tmux` to run this command in a persistent shell. 69 | 70 | ## Contribute 71 | 72 | PRs are welcome! 73 | 74 | Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. 75 | 76 | ## License 77 | 78 | MIT © Łukasz Magiera 79 | -------------------------------------------------------------------------------- /cli_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "path" 5 | "testing" 6 | 7 | "github.com/ipfs/ipfs-ds-convert/testutil" 8 | "os" 9 | ) 10 | 11 | func TestBasicConvert(t *testing.T) { 12 | dir, _close, s1, s2 := testutil.PrepareTest(t, 2000, 2000) 13 | defer _close(t) 14 | 15 | testutil.PatchConfig(t, path.Join(dir, "config"), "testfiles/badgerSpec") 16 | 17 | os.Setenv(EnvDir, dir) 18 | run([]string{".", "convert"}) 19 | 20 | testutil.FinishTest(t, dir, s1, s2, 2000, 2000) 21 | } 22 | 23 | func TestBasicRevert(t *testing.T) { 24 | dir, _close, s1, s2 := testutil.PrepareTest(t, 200, 200) 25 | defer _close(t) 26 | 27 | testutil.PatchConfig(t, path.Join(dir, "config"), "testfiles/badgerSpec") 28 | 29 | os.Setenv(EnvDir, dir) 30 | run([]string{".", "convert", "--keep"}) 31 | 32 | testutil.FinishTest(t, dir, s1, s2, 200, 200) 33 | 34 | os.Setenv(EnvDir, dir) 35 | run([]string{".", "revert", "--force", "--fix-config"}) 36 | 37 | testutil.FinishTest(t, dir, s1, s2, 200, 200) 38 | } 39 | 40 | func TestBasicCleanup(t *testing.T) { 41 | dir, _close, s1, s2 := testutil.PrepareTest(t, 200, 200) 42 | defer _close(t) 43 | 44 | testutil.PatchConfig(t, path.Join(dir, "config"), "testfiles/badgerSpec") 45 | 46 | os.Setenv(EnvDir, dir) 47 | run([]string{".", "convert", "--keep"}) 48 | 49 | testutil.FinishTest(t, dir, s1, s2, 200, 200) 50 | 51 | os.Setenv(EnvDir, dir) 52 | run([]string{".", "cleanup"}) 53 | 54 | testutil.FinishTest(t, dir, s1, s2, 200, 200) 55 | } 56 | -------------------------------------------------------------------------------- /config/util.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | ) 7 | 8 | func Load(path string, out *map[string]interface{}) error { 9 | cfgbytes, err := ioutil.ReadFile(path) 10 | if err != nil { 11 | return err 12 | } 13 | 14 | err = json.Unmarshal(cfgbytes, out) 15 | if err != nil { 16 | return err 17 | } 18 | 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /config/validate.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "path/filepath" 7 | ) 8 | 9 | var ( 10 | ErrInvalidType = errors.New("invalid type entry in config") 11 | ) 12 | 13 | type validatorContext struct { 14 | usedPaths map[string]bool 15 | fillDefault bool 16 | } 17 | 18 | var validators = map[string]func(*validatorContext, map[string]interface{}) error{} 19 | 20 | func init() { 21 | validators["badgerds"] = badgerdsValidator 22 | validators["flatfs"] = flatfsValidator 23 | validators["levelds"] = leveldsValidator 24 | validators["log"] = logValidator 25 | validators["measure"] = measureValidator 26 | validators["mount"] = mountValidator 27 | } 28 | 29 | func Validate(dsConfiguration map[string]interface{}, fillDefault bool) (dirs []string, err error) { 30 | ctx := validatorContext{ 31 | usedPaths: map[string]bool{}, 32 | fillDefault: fillDefault, 33 | } 34 | err = validate(&ctx, dsConfiguration) 35 | 36 | paths := make([]string, 0, len(ctx.usedPaths)) 37 | for k := range ctx.usedPaths { 38 | paths = append(paths, k) 39 | } 40 | 41 | return paths, err 42 | } 43 | 44 | func validate(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 45 | t, ok := dsConfiguration["type"].(string) 46 | if !ok { 47 | return ErrInvalidType 48 | } 49 | 50 | validator := validators[t] 51 | if validator == nil { 52 | return fmt.Errorf("unsupported type entry in config: %s", t) 53 | } 54 | 55 | return validator(ctx, dsConfiguration) 56 | } 57 | 58 | func checkPath(ctx *validatorContext, p interface{}) error { 59 | path, ok := p.(string) 60 | if !ok { 61 | return errors.New("invalid 'path' type in datastore") 62 | } 63 | 64 | clean := filepath.Clean(path) 65 | if clean[0] == '/' || clean[0] == '.' { 66 | return errors.New("only paths inside ipfs repo are supported") 67 | } 68 | 69 | if ctx.usedPaths[path] { 70 | return fmt.Errorf("path '%s' is already in use", path) 71 | } 72 | 73 | ctx.usedPaths[path] = true 74 | 75 | return nil 76 | } 77 | 78 | ////////////// 79 | 80 | func flatfsValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 81 | err := checkPath(ctx, dsConfiguration["path"]) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | _, ok := dsConfiguration["sync"] 87 | if !ok && ctx.fillDefault { 88 | dsConfiguration["sync"] = true 89 | } else { 90 | if !ok { 91 | return errors.New("no sync field in flatfs spec") 92 | } 93 | _, ok := dsConfiguration["sync"].(bool) 94 | if !ok { 95 | return errors.New("invalid sync field type in flatfs spec") 96 | } 97 | } 98 | 99 | return nil 100 | } 101 | 102 | func leveldsValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 103 | err := checkPath(ctx, dsConfiguration["path"]) 104 | if err != nil { 105 | return err 106 | } 107 | 108 | _, ok := dsConfiguration["compression"] 109 | if !ok && ctx.fillDefault { 110 | dsConfiguration["compression"] = "none" 111 | } else { 112 | if !ok { 113 | return errors.New("no compression field in leveldb spec") 114 | } 115 | _, ok := dsConfiguration["compression"].(string) 116 | if !ok { 117 | return errors.New("invalid compression field type in leveldb spec") 118 | } 119 | } 120 | 121 | return nil 122 | } 123 | 124 | func badgerdsValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 125 | err := checkPath(ctx, dsConfiguration["path"]) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | return nil 131 | } 132 | 133 | func mountValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 134 | mounts, ok := dsConfiguration["mounts"].([]interface{}) 135 | if !ok { 136 | return errors.New("invalid 'mounts' in mount datastore") 137 | } 138 | 139 | mountPoints := map[string]bool{} 140 | 141 | for _, m := range mounts { 142 | mount, ok := m.(map[string]interface{}) 143 | if !ok { 144 | return errors.New("mounts entry has invalid type") 145 | } 146 | 147 | mountPoint, ok := mount["mountpoint"].(string) 148 | if !ok { 149 | return errors.New("'mountpoint' must be a string") 150 | } 151 | 152 | if mountPoints[mountPoint] { 153 | return errors.New("multiple mounts under one path are not allowed") 154 | } 155 | 156 | mountPoints[mountPoint] = true 157 | 158 | err := validate(ctx, mount) 159 | if err != nil { 160 | return err 161 | } 162 | } 163 | 164 | return nil 165 | } 166 | 167 | func measureValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 168 | _, ok := dsConfiguration["prefix"].(string) 169 | if !ok { 170 | return errors.New("invalid 'prefix' in measure datastore") 171 | } 172 | 173 | child, ok := dsConfiguration["child"].(map[string]interface{}) 174 | if !ok { 175 | return errors.New("child of measure datastore has invalid type") 176 | } 177 | 178 | return validate(ctx, child) 179 | } 180 | 181 | func logValidator(ctx *validatorContext, dsConfiguration map[string]interface{}) error { 182 | _, ok := dsConfiguration["name"].(string) 183 | if !ok { 184 | return errors.New("invalid 'name' in log datastore") 185 | } 186 | 187 | child, ok := dsConfiguration["child"].(map[string]interface{}) 188 | if !ok { 189 | return errors.New("child of log datastore has invalid type") 190 | } 191 | 192 | return validate(ctx, child) 193 | } 194 | -------------------------------------------------------------------------------- /config/validate_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "sort" 5 | "strings" 6 | "testing" 7 | ) 8 | 9 | var ( 10 | TestSpec = map[string]interface{}{ 11 | "type": "mount", 12 | "mounts": []interface{}{ 13 | map[string]interface{}{ 14 | "mountpoint": "/blocks", 15 | "type": "measure", 16 | "prefix": "flatfs.datastore", 17 | "child": map[string]interface{}{ 18 | "type": "flatfs", 19 | "path": "blocks", 20 | "sync": true, 21 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 22 | }, 23 | }, 24 | map[string]interface{}{ 25 | "mountpoint": "/", 26 | "type": "measure", 27 | "prefix": "leveldb.datastore", 28 | "child": map[string]interface{}{ 29 | "type": "levelds", 30 | "path": "levelDatastore", 31 | "compression": "none", 32 | }, 33 | }, 34 | map[string]interface{}{ 35 | "mountpoint": "/other", 36 | "type": "measure", 37 | "prefix": "badger.datastore", 38 | "child": map[string]interface{}{ 39 | "type": "badgerds", 40 | "path": "badgerDatastore", 41 | "compression": "none", 42 | }, 43 | }, 44 | }, 45 | } 46 | 47 | EmptySpec = map[string]interface{}{} 48 | 49 | InvalidTypeSpec = map[string]interface{}{ 50 | "type": 2, 51 | } 52 | 53 | InvalidFlatfsPathSpec = map[string]interface{}{ 54 | "type": "flatfs", 55 | "sync": true, 56 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 57 | } 58 | 59 | InvalidBadgerdsPathSpec = map[string]interface{}{ 60 | "type": "badgerds", 61 | "compression": "none", 62 | } 63 | 64 | LeveldbNoCompression = map[string]interface{}{ 65 | "type": "levelds", 66 | "path": "levelDatastore", 67 | } 68 | 69 | LeveldbNumericCompression = map[string]interface{}{ 70 | "type": "levelds", 71 | "path": "levelDatastore", 72 | "compression": 2, 73 | } 74 | 75 | MountlessMount = map[string]interface{}{ 76 | "type": "mount", 77 | } 78 | 79 | InvalidMount = map[string]interface{}{ 80 | "type": "mount", 81 | "mounts": []interface{}{ 82 | 3, 83 | }, 84 | } 85 | 86 | NoMountpoint = map[string]interface{}{ 87 | "type": "mount", 88 | "mounts": []interface{}{ 89 | map[string]interface{}{ 90 | "type": "levelds", 91 | "path": "levelDatastore", 92 | "compression": 2, 93 | }, 94 | }, 95 | } 96 | 97 | DoubledMountpoint = map[string]interface{}{ 98 | "type": "mount", 99 | "mounts": []interface{}{ 100 | map[string]interface{}{ 101 | "type": "levelds", 102 | "path": "levelDatastore1", 103 | "compression": "none", 104 | "mountpoint": "/", 105 | }, 106 | map[string]interface{}{ 107 | "type": "levelds", 108 | "path": "levelDatastore2", 109 | "compression": "none", 110 | "mountpoint": "/", 111 | }, 112 | }, 113 | } 114 | 115 | PrefixlessMeasure = map[string]interface{}{ 116 | "mountpoint": "/blocks", 117 | "type": "measure", 118 | "child": map[string]interface{}{ 119 | "type": "flatfs", 120 | "path": "blocks", 121 | "sync": true, 122 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 123 | }, 124 | } 125 | 126 | ChildlessMeasure = map[string]interface{}{ 127 | "mountpoint": "/blocks", 128 | "type": "measure", 129 | "prefix": "foo", 130 | } 131 | 132 | LogSpec = map[string]interface{}{ 133 | "mountpoint": "/blocks", 134 | "type": "log", 135 | "name": "blocks", 136 | "child": map[string]interface{}{ 137 | "type": "flatfs", 138 | "path": "blocks", 139 | "sync": true, 140 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 141 | }, 142 | } 143 | 144 | NamelessLogSpec = map[string]interface{}{ 145 | "mountpoint": "/blocks", 146 | "type": "log", 147 | "child": map[string]interface{}{ 148 | "type": "flatfs", 149 | "path": "blocks", 150 | "sync": true, 151 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 152 | }, 153 | } 154 | 155 | ChildlessLogSpec = map[string]interface{}{ 156 | "mountpoint": "/blocks", 157 | "type": "log", 158 | "name": "foo", 159 | } 160 | ) 161 | 162 | func TestValidate(t *testing.T) { 163 | dirs, err := Validate(TestSpec, false) 164 | if err != nil { 165 | t.Errorf("should not return error: %s", err) 166 | } 167 | 168 | sort.Strings(dirs) 169 | 170 | if dirs[0] != "badgerDatastore" { 171 | t.Errorf(`dirs[0] != "badgerDatastore" got %s `, dirs[0]) 172 | } 173 | if dirs[1] != "blocks" { 174 | t.Errorf(`dirs[0] != "blocks" got %s `, dirs[1]) 175 | } 176 | if dirs[2] != "levelDatastore" { 177 | t.Errorf(`dirs[0] != "levelDatastore" got %s `, dirs[2]) 178 | } 179 | } 180 | 181 | func TestEmptySpec(t *testing.T) { 182 | _, err := Validate(EmptySpec, false) 183 | if err != nil { 184 | if strings.Contains(err.Error(), "invalid type entry in config") { 185 | return 186 | } 187 | t.Errorf("unexpected error: %s", err) 188 | } 189 | 190 | t.Errorf("expected error") 191 | } 192 | 193 | func TestInvalidTypeSpec(t *testing.T) { 194 | _, err := Validate(InvalidTypeSpec, false) 195 | if err != nil { 196 | if strings.Contains(err.Error(), "invalid type entry in config") { 197 | return 198 | } 199 | t.Errorf("unexpected error: %s", err) 200 | } 201 | 202 | t.Errorf("expected error") 203 | } 204 | 205 | func TestInvalidFlatfsPathSpec(t *testing.T) { 206 | _, err := Validate(InvalidFlatfsPathSpec, false) 207 | if err != nil { 208 | if strings.Contains(err.Error(), "invalid 'path' type in datastore") { 209 | return 210 | } 211 | t.Errorf("unexpected error: %s", err) 212 | } 213 | 214 | t.Errorf("expected error") 215 | } 216 | 217 | func TestInvalidBadgerdsPathSpec(t *testing.T) { 218 | _, err := Validate(InvalidBadgerdsPathSpec, false) 219 | if err != nil { 220 | if strings.Contains(err.Error(), "invalid 'path' type in datastore") { 221 | return 222 | } 223 | t.Errorf("unexpected error: %s", err) 224 | } 225 | 226 | t.Errorf("expected error") 227 | } 228 | 229 | func TestLeveldbSpec(t *testing.T) { 230 | _, err := Validate(LeveldbNoCompression, false) 231 | if err != nil { 232 | if strings.Contains(err.Error(), "no compression field in leveldb spec") { 233 | _, err := Validate(LeveldbNoCompression, true) 234 | if err != nil { 235 | t.Errorf("unexpected error: %s", err) 236 | } 237 | 238 | if LeveldbNoCompression["compression"] != "none" { 239 | t.Errorf("compression field not injected to leveldb spec") 240 | } 241 | return 242 | } 243 | t.Errorf("unexpected error: %s", err) 244 | } 245 | 246 | t.Errorf("expected error") 247 | } 248 | 249 | func TestLeveldbNumSpec(t *testing.T) { 250 | _, err := Validate(LeveldbNumericCompression, false) 251 | if err != nil { 252 | if strings.Contains(err.Error(), "invalid compression field type in leveldb spec") { 253 | return 254 | } 255 | t.Errorf("unexpected error: %s", err) 256 | } 257 | 258 | t.Errorf("expected error") 259 | } 260 | 261 | func TestMountlessMountSpec(t *testing.T) { 262 | _, err := Validate(MountlessMount, false) 263 | if err != nil { 264 | if strings.Contains(err.Error(), "invalid 'mounts' in mount datastore") { 265 | return 266 | } 267 | t.Errorf("unexpected error: %s", err) 268 | } 269 | 270 | t.Errorf("expected error") 271 | } 272 | 273 | func TestInvalidMountSpec(t *testing.T) { 274 | _, err := Validate(InvalidMount, false) 275 | if err != nil { 276 | if strings.Contains(err.Error(), "mounts entry has invalid type") { 277 | return 278 | } 279 | t.Errorf("unexpected error: %s", err) 280 | } 281 | 282 | t.Errorf("expected error") 283 | } 284 | 285 | func TestNoMountpointSpec(t *testing.T) { 286 | _, err := Validate(NoMountpoint, false) 287 | if err != nil { 288 | if strings.Contains(err.Error(), "'mountpoint' must be a string") { 289 | return 290 | } 291 | t.Errorf("unexpected error: %s", err) 292 | } 293 | 294 | t.Errorf("expected error") 295 | } 296 | 297 | func TestDoubledMountpointSpec(t *testing.T) { 298 | _, err := Validate(DoubledMountpoint, false) 299 | if err != nil { 300 | if strings.Contains(err.Error(), "multiple mounts under one path are not allowed") { 301 | return 302 | } 303 | t.Errorf("unexpected error: %s", err) 304 | } 305 | 306 | t.Errorf("expected error") 307 | } 308 | 309 | func TestPrefixlessMeasureSpec(t *testing.T) { 310 | _, err := Validate(PrefixlessMeasure, false) 311 | if err != nil { 312 | if strings.Contains(err.Error(), "invalid 'prefix' in measure datastore") { 313 | return 314 | } 315 | t.Errorf("unexpected error: %s", err) 316 | } 317 | 318 | t.Errorf("expected error") 319 | } 320 | 321 | func TestChildlessMeasureSpec(t *testing.T) { 322 | _, err := Validate(ChildlessMeasure, false) 323 | if err != nil { 324 | if strings.Contains(err.Error(), "child of measure datastore has invalid type") { 325 | return 326 | } 327 | t.Errorf("unexpected error: %s", err) 328 | } 329 | 330 | t.Errorf("expected error") 331 | } 332 | 333 | func TestLogSpec(t *testing.T) { 334 | _, err := Validate(LogSpec, false) 335 | if err != nil { 336 | t.Errorf("unexpected error: %s", err) 337 | } 338 | } 339 | 340 | func TestNamelessLogSpec(t *testing.T) { 341 | _, err := Validate(NamelessLogSpec, false) 342 | if err != nil { 343 | if strings.Contains(err.Error(), "invalid 'name' in log datastore") { 344 | return 345 | } 346 | t.Errorf("unexpected error: %s", err) 347 | } 348 | 349 | t.Errorf("expected error") 350 | } 351 | 352 | func TestChildlessLogSpec(t *testing.T) { 353 | _, err := Validate(ChildlessLogSpec, false) 354 | if err != nil { 355 | if strings.Contains(err.Error(), "child of log datastore has invalid type") { 356 | return 357 | } 358 | t.Errorf("unexpected error: %s", err) 359 | } 360 | 361 | t.Errorf("expected error") 362 | } 363 | -------------------------------------------------------------------------------- /convert/checks_test.go: -------------------------------------------------------------------------------- 1 | package convert_test 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path" 8 | "runtime" 9 | "strings" 10 | "testing" 11 | 12 | "github.com/ipfs/ipfs-ds-convert/convert" 13 | "github.com/ipfs/ipfs-ds-convert/repo" 14 | "github.com/ipfs/ipfs-ds-convert/revert" 15 | "github.com/ipfs/ipfs-ds-convert/testutil" 16 | 17 | lock "github.com/ipfs/go-fs-lock" 18 | ) 19 | 20 | func TestInvalidRepoVersion(t *testing.T) { 21 | //Prepare repo 22 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 23 | defer _close(t) 24 | 25 | err := ioutil.WriteFile(path.Join(dir, "version"), []byte("147258369"), 0664) 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | 30 | //Convert! 31 | err = convert.Convert(dir, false) 32 | if err == nil { 33 | t.Fatal(fmt.Errorf("No error, expected invalid repo version")) 34 | } 35 | 36 | if !strings.Contains(err.Error(), "unsupported fsrepo version: 147258369") { 37 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 38 | } 39 | } 40 | 41 | func TestLockedRepo(t *testing.T) { 42 | //Prepare repo 43 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 44 | defer _close(t) 45 | 46 | unlock, err := lock.Lock(dir, "repo.lock") 47 | if err != nil { 48 | t.Fatal(err) 49 | } 50 | defer unlock.Close() 51 | 52 | //Convert! 53 | err = convert.Convert(dir, false) 54 | if err == nil { 55 | t.Fatal(fmt.Errorf("No error, expected invalid repo version")) 56 | } 57 | 58 | if !strings.Contains(err.Error(), "lock is already held") { 59 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 60 | } 61 | } 62 | 63 | func TestNoSpec(t *testing.T) { 64 | //Prepare repo 65 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 66 | defer _close(t) 67 | 68 | err := os.Remove(path.Join(dir, repo.SpecsFile)) 69 | if err != nil { 70 | t.Fatal(err) 71 | } 72 | 73 | //Convert! 74 | err = convert.Convert(dir, false) 75 | if err == nil { 76 | t.Fatal(fmt.Errorf("No error, expected no such file or directory")) 77 | } 78 | 79 | if !strings.Contains(err.Error(), "datastore_spec: ") { 80 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 81 | } 82 | } 83 | 84 | func TestNoVersion(t *testing.T) { 85 | //Prepare repo 86 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 87 | defer _close(t) 88 | 89 | err := os.Remove(path.Join(dir, "version")) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | 94 | //Convert! 95 | err = convert.Convert(dir, false) 96 | if err == nil { 97 | t.Fatal(fmt.Errorf("No error, expected no such file or directory")) 98 | } 99 | 100 | if !strings.Contains(err.Error(), "version: ") { 101 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 102 | } 103 | } 104 | 105 | func TestInvalidVersion(t *testing.T) { 106 | //Prepare repo 107 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 108 | defer _close(t) 109 | 110 | err := ioutil.WriteFile(path.Join(dir, "version"), []byte("a"), 0600) 111 | if err != nil { 112 | t.Fatal(err) 113 | } 114 | 115 | //Convert! 116 | err = convert.Convert(dir, false) 117 | if err == nil { 118 | t.Fatal(fmt.Errorf("No error, expected strconv.Atoi: parsing")) 119 | } 120 | 121 | if !strings.Contains(err.Error(), `strconv.Atoi: parsing "a": invalid syntax`) { 122 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 123 | } 124 | } 125 | 126 | func TestInvalidSpecJson(t *testing.T) { 127 | //Prepare repo 128 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 129 | defer _close(t) 130 | 131 | err := ioutil.WriteFile(path.Join(dir, repo.SpecsFile), []byte("}"), 0600) 132 | if err != nil { 133 | t.Fatal(err) 134 | } 135 | 136 | //Convert! 137 | err = convert.Convert(dir, false) 138 | if err == nil { 139 | t.Fatal(fmt.Errorf("No error, expected invalid character '}' looking for beginning of value")) 140 | } 141 | 142 | if !strings.Contains(err.Error(), "invalid character '}' looking for beginning of value") { 143 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 144 | } 145 | } 146 | 147 | func TestInvalidSpecFile(t *testing.T) { 148 | //Prepare repo 149 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 150 | defer _close(t) 151 | 152 | err := ioutil.WriteFile(path.Join(dir, repo.SpecsFile), []byte("{}"), 0600) 153 | if err != nil { 154 | t.Fatal(err) 155 | } 156 | 157 | //Convert! 158 | err = convert.Convert(dir, false) 159 | if err == nil { 160 | t.Fatal(fmt.Errorf("No error, expected validating datastore_spec spec: invalid type entry in config")) 161 | } 162 | 163 | if !strings.Contains(err.Error(), "validating datastore_spec spec: invalid type entry in config") { 164 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 165 | } 166 | } 167 | 168 | func TestNoConfig(t *testing.T) { 169 | //Prepare repo 170 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 171 | defer _close(t) 172 | 173 | err := os.Remove(path.Join(dir, repo.ConfigFile)) 174 | if err != nil { 175 | t.Fatal(err) 176 | } 177 | 178 | //Convert! 179 | err = convert.Convert(dir, false) 180 | if err == nil { 181 | t.Fatal(fmt.Errorf("No error, expected no such file or directory")) 182 | } 183 | 184 | if !strings.Contains(err.Error(), "config: ") { 185 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 186 | } 187 | } 188 | 189 | func TestInvalidConfigJson(t *testing.T) { 190 | //Prepare repo 191 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 192 | defer _close(t) 193 | 194 | err := ioutil.WriteFile(path.Join(dir, repo.ConfigFile), []byte("}"), 0600) 195 | if err != nil { 196 | t.Fatal(err) 197 | } 198 | 199 | //Convert! 200 | err = convert.Convert(dir, false) 201 | if err == nil { 202 | t.Fatal(fmt.Errorf("No error, expected invalid character '}' looking for beginning of value")) 203 | } 204 | 205 | if !strings.Contains(err.Error(), "invalid character '}' looking for beginning of value") { 206 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 207 | } 208 | } 209 | 210 | func TestInvalidConfigFile(t *testing.T) { 211 | //Prepare repo 212 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 213 | defer _close(t) 214 | 215 | err := ioutil.WriteFile(path.Join(dir, repo.ConfigFile), []byte("{}"), 0600) 216 | if err != nil { 217 | t.Fatal(err) 218 | } 219 | 220 | err = convert.Convert(dir, false) 221 | if err == nil { 222 | t.Fatal(fmt.Errorf("No error, expected no 'Datastore' or invalid type in")) 223 | } 224 | 225 | if !strings.Contains(err.Error(), "no 'Datastore' or invalid type") { 226 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 227 | } 228 | 229 | err = os.Remove(path.Join(dir, revert.ConvertLog)) 230 | if err != nil { 231 | t.Fatal(err) 232 | } 233 | 234 | err = ioutil.WriteFile(path.Join(dir, repo.ConfigFile), []byte(`{"Datastore":{}}`), 0600) 235 | if err != nil { 236 | t.Fatal(err) 237 | } 238 | 239 | //Convert! 240 | err = convert.Convert(dir, false) 241 | if err == nil { 242 | t.Fatal(fmt.Errorf("No error, expected no 'Datastore.Spec' or invalid type")) 243 | } 244 | 245 | if !strings.Contains(err.Error(), "no 'Datastore.Spec' or invalid type in") { 246 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 247 | } 248 | } 249 | 250 | func TestInvalidSpec(t *testing.T) { 251 | //Prepare repo 252 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 253 | defer _close(t) 254 | 255 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/invalidSpec") 256 | 257 | //Convert! 258 | err := convert.Convert(dir, false) 259 | if err == nil { 260 | t.Fatal(fmt.Errorf("No error, expected error validating datastore spec")) 261 | } 262 | 263 | if !strings.Contains(err.Error(), "unsupported type entry in config: notAValidDatastoreType") { 264 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 265 | } 266 | } 267 | 268 | func TestAbsolutePathSpec(t *testing.T) { 269 | if runtime.GOOS == "windows" { 270 | t.Skip("the test uses unix paths in test vectors") 271 | } 272 | 273 | //Prepare repo 274 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 275 | defer _close(t) 276 | 277 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/absPathSpec") 278 | 279 | //Convert! 280 | err := convert.Convert(dir, false) 281 | if err == nil { 282 | t.Fatal(fmt.Errorf("No error, expected error validating datastore spec")) 283 | } 284 | 285 | if !strings.Contains(err.Error(), "only paths inside ipfs repo are supported") { 286 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 287 | } 288 | } 289 | 290 | func TestReusePathSpec(t *testing.T) { 291 | //Prepare repo 292 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 293 | defer _close(t) 294 | 295 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/reusePathSpec") 296 | 297 | //Convert! 298 | err := convert.Convert(dir, false) 299 | if err == nil { 300 | t.Fatal(fmt.Errorf("No error, expected error validating datastore spec")) 301 | } 302 | 303 | if !strings.Contains(err.Error(), "path 'datastore' is already in use") { 304 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 305 | } 306 | } 307 | 308 | func TestROSpec(t *testing.T) { 309 | //Prepare repo 310 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 311 | defer _close(t) 312 | 313 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 314 | if err := os.Chmod(path.Join(dir, repo.SpecsFile), 0400); err != nil { 315 | t.Fatal(err) 316 | } 317 | 318 | //Convert! 319 | err := convert.Convert(dir, false) 320 | if err == nil { 321 | t.Fatal(fmt.Errorf("No error, expected error validating datastore spec")) 322 | } 323 | 324 | if !strings.Contains(err.Error(), "datastore_spec is not writable") { 325 | t.Fatal(fmt.Errorf("unexpected error: %s", err)) 326 | } 327 | } 328 | -------------------------------------------------------------------------------- /convert/convert.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | 9 | logging "log" 10 | 11 | "github.com/ipfs/ipfs-ds-convert/repo" 12 | "github.com/ipfs/ipfs-ds-convert/revert" 13 | "github.com/ipfs/ipfs-ds-convert/strategy" 14 | 15 | lock "github.com/ipfs/go-fs-lock" 16 | ) 17 | 18 | var Log = logging.New(os.Stderr, "convert ", logging.LstdFlags) 19 | 20 | // Conversion holds Conversion state and progress 21 | type Conversion struct { 22 | steps []string 23 | log *revert.ActionLogger 24 | 25 | path string 26 | 27 | fromSpec map[string]interface{} 28 | toSpec map[string]interface{} 29 | } 30 | 31 | func Convert(repoPath string, keepBackup bool) error { 32 | c := Conversion{ 33 | path: repoPath, 34 | } 35 | 36 | c.addStep("begin with tool version %s", repo.ToolVersion) 37 | 38 | err := c.checkRepoVersion() 39 | if err != nil { 40 | return err 41 | } 42 | 43 | unlock, err := lock.Lock(c.path, repo.LockFile) 44 | if err != nil { 45 | return err 46 | } 47 | defer unlock.Close() 48 | 49 | c.log, err = revert.NewActionLogger(c.path) 50 | if err != nil { 51 | return err 52 | } 53 | defer c.log.Close() 54 | 55 | err = c.loadSpecs() 56 | if err != nil { 57 | return err 58 | } 59 | 60 | s, err := strategy.NewStrategy(c.fromSpec, c.toSpec) 61 | if err != nil { 62 | return c.wrapErr(err) 63 | } 64 | 65 | strat := s.Spec() 66 | conversionType, _ := strat.Type() 67 | switch conversionType { 68 | case "copy": 69 | from, _ := strat.Sub("from") 70 | to, _ := strat.Sub("to") 71 | 72 | copy := NewCopy(c.path, from, to, c.log, c.addStep) 73 | err := copy.Run() 74 | if err != nil { 75 | return c.wrapErr(err) 76 | } 77 | 78 | err = copy.Verify() 79 | if err != nil { 80 | return c.wrapErr(err) 81 | } 82 | 83 | if !keepBackup { 84 | err = copy.Clean() 85 | if err != nil { 86 | return c.wrapErr(err) 87 | } 88 | } 89 | case "noop": 90 | default: 91 | panic(fmt.Sprintf("unexpected strategy %s", conversionType)) 92 | } 93 | 94 | Log.Println("Saving new spec") 95 | err = c.saveNewSpec(keepBackup) 96 | if err != nil { 97 | return c.wrapErr(err) 98 | } 99 | 100 | c.log.Log(revert.ActionDone) 101 | 102 | if !keepBackup { 103 | err = c.log.CloseFinal() 104 | if err != nil { 105 | return err 106 | } 107 | } 108 | 109 | if keepBackup { 110 | Log.Println(">> Backup files were not removed <<") 111 | Log.Println(">> To revert to previous state run 'revert' subcommand <<") 112 | Log.Println(">> To remove backup files run 'cleanup' subcommand <<") 113 | } 114 | 115 | Log.Println("All tasks finished") 116 | return nil 117 | } 118 | 119 | func (c *Conversion) saveNewSpec(backup bool) (err error) { 120 | 121 | if backup { 122 | err = c.backupSpec() 123 | if err != nil { 124 | return err 125 | } 126 | } else { 127 | err = c.log.Log(revert.ActionManual, "restore datastore_spec to previous state") 128 | if err != nil { 129 | return err 130 | } 131 | } 132 | 133 | toDiskId, err := repo.DatastoreSpec(c.toSpec) 134 | if err != nil { 135 | return err 136 | } 137 | 138 | err = ioutil.WriteFile(filepath.Join(c.path, repo.SpecsFile), []byte(toDiskId), 0660) 139 | if err != nil { 140 | return err 141 | } 142 | 143 | if backup { 144 | err = c.log.Log(revert.ActionRemove, filepath.Join(c.path, repo.SpecsFile)) 145 | if err != nil { 146 | return err 147 | } 148 | } 149 | 150 | return nil 151 | } 152 | 153 | func (c *Conversion) backupSpec() error { 154 | backupFile, err := ioutil.TempFile(c.path, "datastore_spec_backup") 155 | if err != nil { 156 | return err 157 | } 158 | 159 | specData, err := ioutil.ReadFile(filepath.Join(c.path, repo.SpecsFile)) 160 | if err != nil { 161 | return err 162 | } 163 | 164 | n, err := backupFile.Write(specData) 165 | if err != nil { 166 | return err 167 | } 168 | 169 | if n != len(specData) { 170 | return fmt.Errorf("failed to create backup of datastore_spec") 171 | } 172 | 173 | err = c.log.Log(revert.ActionMove, backupFile.Name(), filepath.Join(c.path, repo.SpecsFile)) 174 | if err != nil { 175 | return err 176 | } 177 | 178 | err = c.log.Log(revert.ActionCleanup, backupFile.Name()) 179 | if err != nil { 180 | return err 181 | } 182 | 183 | err = backupFile.Close() 184 | if err != nil { 185 | return err 186 | } 187 | 188 | return nil 189 | } 190 | -------------------------------------------------------------------------------- /convert/convert_test.go: -------------------------------------------------------------------------------- 1 | package convert_test 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/ipfs/ipfs-ds-convert/config" 10 | 11 | convert "github.com/ipfs/ipfs-ds-convert/convert" 12 | testutil "github.com/ipfs/ipfs-ds-convert/testutil" 13 | ) 14 | 15 | func TestBasicConvert(t *testing.T) { 16 | //Prepare repo 17 | dir, _close, s1, s2 := testutil.PrepareTest(t, 3000, 3000) 18 | defer _close(t) 19 | 20 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 21 | 22 | //Convert! 23 | err := convert.Convert(dir, false) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | 28 | testutil.FinishTest(t, dir, s1, s2, 3000, 3000) 29 | } 30 | 31 | func TestLossyConvert(t *testing.T) { 32 | //Prepare repo 33 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 34 | defer _close(t) 35 | 36 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/lossySpec") 37 | 38 | //Convert! 39 | err := convert.Convert(dir, false) 40 | if err != nil { 41 | if !strings.Contains(err.Error(), "adding missing to src spec: couldn't find best match for specA /") { 42 | t.Fatal(err) 43 | } 44 | return 45 | } 46 | 47 | t.Errorf("expected error 'adding missing to src spec: couldn't find best match for specA /'") 48 | } 49 | 50 | //should cover noop case in convert.go 51 | func TestNoopConvert(t *testing.T) { 52 | //Prepare repo 53 | dir, _close, s1, s2 := testutil.PrepareTest(t, 3000, 3000) 54 | defer _close(t) 55 | 56 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/equalSpec") 57 | 58 | //Convert! 59 | err := convert.Convert(dir, false) 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | 64 | testutil.FinishTest(t, dir, s1, s2, 3000, 3000) 65 | } 66 | 67 | func TestSkipCopyConvert(t *testing.T) { 68 | spec := make(map[string]interface{}) 69 | err := config.Load("../testfiles/skipableSpec", &spec) 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | 74 | dir, _close := testutil.NewTestRepo(t, spec) 75 | defer _close(t) 76 | 77 | r, err := testutil.OpenRepo(dir) 78 | if err != nil { 79 | t.Fatal(err) 80 | } 81 | 82 | prefixes := []string{"a/", "b/", "c/", "d/", "e/"} 83 | seeds := []int64{} 84 | 85 | for _, prefix := range prefixes { 86 | fmt.Println("Generating " + prefix) 87 | seed, err := testutil.InsertRandomKeys(prefix, 1000, r) 88 | if err != nil { 89 | t.Fatal(err) 90 | } 91 | seeds = append(seeds, seed) 92 | } 93 | 94 | err = r.Close() 95 | if err != nil { 96 | t.Fatal(err) 97 | } 98 | 99 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/skipableDstSpec") 100 | 101 | err = convert.Convert(dir, false) 102 | if err != nil { 103 | t.Fatal(err) 104 | } 105 | 106 | r, err = testutil.OpenRepo(dir) 107 | if err != nil { 108 | t.Fatal(err) 109 | } 110 | 111 | for i, prefix := range prefixes { 112 | err = testutil.Verify(prefix, 1000, seeds[i], r) 113 | if err != nil { 114 | t.Fatal(err) 115 | } 116 | } 117 | 118 | err = r.Close() 119 | if err != nil { 120 | t.Fatal(err) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /convert/copy.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "io/ioutil" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | 11 | "github.com/ipfs/ipfs-ds-convert/config" 12 | "github.com/ipfs/ipfs-ds-convert/repo" 13 | "github.com/ipfs/ipfs-ds-convert/revert" 14 | "github.com/ipfs/ipfs-ds-convert/strategy" 15 | 16 | ds "github.com/ipfs/go-datastore" 17 | dsq "github.com/ipfs/go-datastore/query" 18 | errors "github.com/pkg/errors" 19 | ) 20 | 21 | type Copy struct { 22 | path string 23 | 24 | fromSpec strategy.Spec 25 | toSpec strategy.Spec 26 | 27 | newDsDir string 28 | oldDsDir string //used after conversion 29 | 30 | oldPaths []string 31 | newPaths []string 32 | 33 | fromDs repo.Datastore 34 | toDs repo.Datastore 35 | 36 | log *revert.ActionLogger 37 | logStep func(string, ...interface{}) 38 | } 39 | 40 | func NewCopy(path string, fromSpec strategy.Spec, toSpec strategy.Spec, log *revert.ActionLogger, logStep func(string, ...interface{})) *Copy { 41 | return &Copy{ 42 | path: path, 43 | fromSpec: fromSpec, 44 | toSpec: toSpec, 45 | log: log, 46 | logStep: logStep, 47 | } 48 | } 49 | 50 | func (c *Copy) Run() error { 51 | err := c.validateSpecs() 52 | if err != nil { 53 | return err 54 | } 55 | 56 | Log.Println("Checks OK") 57 | 58 | err = c.openDatastores() 59 | if err != nil { 60 | return err 61 | } 62 | 63 | Log.Println("Copying keys, this can take a long time") 64 | 65 | err = CopyKeys(c.fromDs, c.toDs) 66 | if err != nil { 67 | return err 68 | } 69 | 70 | err = c.closeDatastores() 71 | if err != nil { 72 | return err 73 | } 74 | 75 | Log.Println("All data copied, swapping repo") 76 | 77 | err = c.swapDatastores() 78 | if err != nil { 79 | return err 80 | } 81 | 82 | return nil 83 | } 84 | 85 | func (c *Copy) Verify() error { 86 | err := c.openSwappedDatastores() 87 | if err != nil { 88 | return err 89 | } 90 | 91 | Log.Println("Verifying key integrity") 92 | verified, err := c.verifyKeys() 93 | if err != nil { 94 | err2 := c.closeDatastores() 95 | if err2 != nil { 96 | return err2 97 | } 98 | 99 | return err 100 | } 101 | Log.Printf("%d keys OK\n", verified) 102 | 103 | err = c.closeDatastores() 104 | if err != nil { 105 | return err 106 | } 107 | 108 | return nil 109 | } 110 | 111 | func (c *Copy) validateSpecs() error { 112 | oldPaths, err := config.Validate(c.fromSpec, false) 113 | if err != nil { 114 | return errors.Wrapf(err, "error validating datastore spec in %s", filepath.Join(c.path, repo.SpecsFile)) 115 | } 116 | c.oldPaths = oldPaths 117 | 118 | newPaths, err := config.Validate(c.toSpec, false) 119 | if err != nil { 120 | return errors.Wrapf(err, "error validating datastore spec in %s", filepath.Join(c.path, repo.ConfigFile)) 121 | } 122 | c.newPaths = newPaths 123 | 124 | return nil 125 | } 126 | 127 | func (c *Copy) openDatastores() (err error) { 128 | c.fromDs, err = repo.OpenDatastore(c.path, c.fromSpec) 129 | if err != nil { 130 | return errors.Wrapf(err, "error opening datastore at %s", c.path) 131 | } 132 | c.logStep("open datastore at %s", c.path) 133 | 134 | c.newDsDir, err = ioutil.TempDir(c.path, "ds-convert") 135 | if err != nil { 136 | return errors.Wrapf(err, "error creating temp datastore at %s", c.path) 137 | } 138 | 139 | err = c.log.Log(revert.ActionRemove, c.newDsDir) 140 | if err != nil { 141 | return err 142 | } 143 | 144 | c.logStep("create temp datastore directory at %s", c.newDsDir) 145 | 146 | c.toDs, err = repo.OpenDatastore(c.newDsDir, c.toSpec) 147 | if err != nil { 148 | return errors.Wrapf(err, "error opening new datastore at %s", c.newDsDir) 149 | } 150 | c.logStep("open new datastore at %s", c.newDsDir) 151 | 152 | return nil 153 | } 154 | 155 | func CopyKeys(fromDs repo.Datastore, toDs repo.Datastore) error { 156 | //flatfs only supports KeysOnly:true 157 | //TODO: try to optimize this 158 | res, err := fromDs.Query(dsq.Query{Prefix: "/", KeysOnly: true}) 159 | if err != nil { 160 | return errors.Wrapf(err, "error opening query") 161 | } 162 | defer res.Close() 163 | 164 | maxBatchEntries := 1024 165 | maxBatchSize := 16 << 20 166 | 167 | doneEntries := 0 168 | curEntries := 0 169 | curSize := 0 170 | 171 | var curBatch ds.Batch 172 | 173 | for { 174 | entry, ok := res.NextSync() 175 | if entry.Error != nil { 176 | return errors.Wrapf(entry.Error, "entry.Error was not nil") 177 | } 178 | if !ok { 179 | break 180 | } 181 | 182 | if curBatch == nil { 183 | curBatch, err = toDs.Batch() 184 | if entry.Error != nil { 185 | return errors.Wrapf(err, "error creating batch") 186 | } 187 | if curBatch == nil { 188 | return errors.New("failed to create new batch") 189 | } 190 | } 191 | 192 | val, err := fromDs.Get(ds.RawKey(entry.Key)) 193 | if err != nil { 194 | return errors.Wrapf(err, "get from old datastore failed (dskey %s)", entry.Key) 195 | } 196 | 197 | curBatch.Put(ds.RawKey(entry.Key), val) 198 | curEntries++ 199 | 200 | curSize += len(val) 201 | 202 | if curEntries == maxBatchEntries || curSize >= maxBatchSize { 203 | err := curBatch.Commit() 204 | if err != nil { 205 | return errors.Wrapf(err, "batch commit failed") 206 | } 207 | 208 | doneEntries += curEntries 209 | fmt.Printf("\rcopied %d keys", doneEntries) 210 | 211 | curEntries = 0 212 | curSize = 0 213 | curBatch = nil 214 | } 215 | } 216 | 217 | fmt.Printf("\rcopied %d keys", doneEntries+curEntries) 218 | fmt.Printf("\n") 219 | 220 | if curEntries > 0 { 221 | if curBatch == nil { 222 | return errors.New("nil curBatch when there are unflushed entries") 223 | } 224 | 225 | err := curBatch.Commit() 226 | if err != nil { 227 | return errors.Wrapf(err, "batch commit failed") 228 | } 229 | } 230 | return nil 231 | } 232 | 233 | func (c *Copy) swapDatastores() (err error) { 234 | c.oldDsDir, err = ioutil.TempDir(c.path, "ds-convert-old") 235 | if err != nil { 236 | return errors.Wrapf(err, "error creating temp datastore at %s", c.path) 237 | } 238 | 239 | err = c.log.Log(revert.ActionRemove, c.oldDsDir) 240 | if err != nil { 241 | return err 242 | } 243 | 244 | err = c.log.Log(revert.ActionCleanup, c.oldDsDir) 245 | if err != nil { 246 | return err 247 | } 248 | 249 | c.logStep("create temp datastore directory at %s", c.oldDsDir) 250 | 251 | //TODO: Check if old dirs aren't mount points 252 | for _, dir := range c.oldPaths { 253 | err := os.Rename(path.Join(c.path, dir), path.Join(c.oldDsDir, dir)) 254 | if err != nil { 255 | return errors.Wrapf(err, "error moving old datastore dir %s to %s", dir, c.oldDsDir) 256 | } 257 | 258 | err = c.log.Log(revert.ActionMove, path.Join(c.oldDsDir, dir), path.Join(c.path, dir)) 259 | if err != nil { 260 | return err 261 | } 262 | 263 | c.logStep("> move %s to %s", path.Join(c.path, dir), path.Join(c.oldDsDir, dir)) 264 | 265 | //Those are theoretically not needed, but having them won't hurt 266 | if _, err := os.Stat(path.Join(c.path, dir)); !os.IsNotExist(err) { 267 | return fmt.Errorf("failed to move old datastore dir %s from %s", dir, c.path) 268 | } 269 | 270 | if s, err := os.Stat(path.Join(c.oldDsDir, dir)); err != nil || !s.IsDir() { 271 | return fmt.Errorf("failed to move old datastore dir %s to %s", dir, c.oldDsDir) 272 | } 273 | } 274 | c.logStep("move old DS to %s", c.oldDsDir) 275 | 276 | for _, dir := range c.newPaths { 277 | err := os.Rename(path.Join(c.newDsDir, dir), path.Join(c.path, dir)) 278 | if err != nil { 279 | return errors.Wrapf(err, "error moving new datastore dir %s from %s", dir, c.newDsDir) 280 | } 281 | 282 | err = c.log.Log(revert.ActionMove, path.Join(c.path, dir), path.Join(c.newDsDir, dir)) 283 | if err != nil { 284 | return err 285 | } 286 | 287 | c.logStep("> move %s to %s", path.Join(c.newDsDir, dir), path.Join(c.path, dir)) 288 | } 289 | c.logStep("move new DS from %s", c.oldDsDir) 290 | 291 | //check if toDs dir is empty 292 | err = checkDirEmpty(c.newDsDir) 293 | if err != nil { 294 | return err 295 | } 296 | 297 | err = os.Remove(c.newDsDir) 298 | if err != nil { 299 | return fmt.Errorf("failed to remove toDs temp directory after swapping repos") 300 | } 301 | 302 | err = c.log.Log(revert.ActionMkdir, c.newDsDir) 303 | if err != nil { 304 | return err 305 | } 306 | 307 | c.logStep("remove temp toDs directory %s", c.newDsDir) 308 | 309 | return nil 310 | } 311 | 312 | func (c *Copy) openSwappedDatastores() (err error) { 313 | c.fromDs, err = repo.OpenDatastore(c.oldDsDir, c.fromSpec) 314 | if err != nil { 315 | return errors.Wrapf(err, "error opening datastore at %s", c.oldDsDir) 316 | } 317 | c.logStep("open datastore at %s", c.oldDsDir) 318 | 319 | c.toDs, err = repo.OpenDatastore(c.path, c.toSpec) 320 | if err != nil { 321 | return errors.Wrapf(err, "error opening new datastore at %s", c.path) 322 | } 323 | c.logStep("open new datastore at %s", c.path) 324 | 325 | return nil 326 | } 327 | 328 | func (c *Copy) verifyKeys() (n int, err error) { 329 | c.logStep("verify keys") 330 | 331 | res, err := c.fromDs.Query(dsq.Query{Prefix: "/", KeysOnly: true}) 332 | if err != nil { 333 | return n, errors.Wrapf(err, "error opening query") 334 | } 335 | defer res.Close() 336 | 337 | for { 338 | entry, ok := res.NextSync() 339 | if entry.Error != nil { 340 | return n, errors.Wrapf(entry.Error, "entry.Error was not nil") 341 | } 342 | if !ok { 343 | break 344 | } 345 | 346 | has, err := c.toDs.Has(ds.RawKey(entry.Key)) 347 | if err != nil { 348 | return n, errors.Wrapf(err, "toDs.Has returned error") 349 | } 350 | 351 | if !has { 352 | return n, fmt.Errorf("key %s was not present in new datastore", entry.Key) 353 | } 354 | 355 | n++ 356 | } 357 | 358 | return n, nil 359 | } 360 | 361 | func (c *Copy) closeDatastores() error { 362 | err := c.fromDs.Close() 363 | if err != nil { 364 | return errors.Wrapf(err, "error closing old datastore") 365 | } 366 | c.logStep("close old datastore") 367 | 368 | err = c.toDs.Close() 369 | if err != nil { 370 | return errors.Wrapf(err, "error closing new datastore") 371 | } 372 | c.logStep("close new datastore") 373 | return nil 374 | } 375 | 376 | func (c *Copy) Clean() error { 377 | err := c.log.Log(revert.ActionManual, "no backup data present for revert") 378 | if err != nil { 379 | return err 380 | } 381 | 382 | err = os.RemoveAll(c.oldDsDir) 383 | if err != nil { 384 | return fmt.Errorf("failed to remove oldDsDir temp directory") 385 | } 386 | 387 | return nil 388 | } 389 | 390 | func checkDirEmpty(path string) error { 391 | dir, err := os.Open(path) 392 | if err != nil { 393 | return errors.Wrapf(err, "failed to open %s", path) 394 | } 395 | 396 | _, err = dir.Readdirnames(1) 397 | if err != io.EOF { 398 | dir.Close() 399 | return fmt.Errorf("%s is not empty", path) 400 | } 401 | return dir.Close() 402 | } 403 | -------------------------------------------------------------------------------- /convert/copy_test.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/ipfs/ipfs-ds-convert/repo" 12 | "github.com/ipfs/ipfs-ds-convert/testutil" 13 | 14 | ds "github.com/ipfs/go-datastore" 15 | ) 16 | 17 | var ( 18 | ValidSpec = map[string]interface{}{ 19 | "type": "flatfs", 20 | "path": "blocks", 21 | "sync": true, 22 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 23 | } 24 | 25 | InvalidSpec = map[string]interface{}{} 26 | 27 | DefaultSpec = map[string]interface{}{ 28 | "type": "mount", 29 | "mounts": []interface{}{ 30 | map[string]interface{}{ 31 | "mountpoint": "/blocks", 32 | "type": "flatfs", 33 | "path": "blocks", 34 | "sync": true, 35 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 36 | }, 37 | map[string]interface{}{ 38 | "mountpoint": "/", 39 | "type": "levelds", 40 | "path": "levelDatastore", 41 | "compression": "none", 42 | }, 43 | }, 44 | } 45 | 46 | SingleSpec = map[string]interface{}{ 47 | "type": "mount", 48 | "mounts": []interface{}{ 49 | map[string]interface{}{ 50 | "mountpoint": "/", 51 | "type": "levelds", 52 | "path": "levelDatastore", 53 | "compression": "none", 54 | }, 55 | }, 56 | } 57 | ) 58 | 59 | func TestInvalidSpecLeft(t *testing.T) { 60 | d, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 61 | if err != nil { 62 | t.Fatalf(err.Error()) 63 | } 64 | 65 | c := NewCopy(d, InvalidSpec, ValidSpec, nil, func(string, ...interface{}) {}) 66 | err = c.Run() 67 | if err != nil { 68 | expect := fmt.Sprintf("error validating datastore spec in %s: invalid type entry in config", filepath.Join(d, "datastore_spec")) 69 | if strings.Contains(err.Error(), expect) { 70 | return 71 | } 72 | t.Errorf("unexpected error: '%s', expected to get '%s'", err, expect) 73 | } 74 | 75 | t.Errorf("expected error") 76 | } 77 | 78 | func TestInvalidSpecRight(t *testing.T) { 79 | d, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 80 | if err != nil { 81 | t.Fatalf(err.Error()) 82 | } 83 | 84 | c := NewCopy(d, ValidSpec, InvalidSpec, nil, func(string, ...interface{}) {}) 85 | err = c.Run() 86 | if err != nil { 87 | if strings.Contains(err.Error(), fmt.Sprintf("error validating datastore spec in %s: invalid type entry in config", filepath.Join(d, "config"))) { 88 | return 89 | } 90 | t.Errorf("unexpected error: %s", err) 91 | } 92 | 93 | t.Errorf("expected error") 94 | } 95 | 96 | func TestOpenNonexist(t *testing.T) { 97 | d, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 98 | if err != nil { 99 | t.Fatalf(err.Error()) 100 | } 101 | 102 | p := filepath.Join(d, "hopefully/nonexistent/repo") 103 | expect := fmt.Sprintf("error opening datastore at %s: mkdir %s: ", p, filepath.Join(p, "blocks")) 104 | 105 | c := NewCopy(p, ValidSpec, ValidSpec, nil, func(string, ...interface{}) {}) 106 | err = c.Run() 107 | if err != nil { 108 | if strings.Contains(err.Error(), expect) { 109 | return 110 | } 111 | t.Errorf("unexpected error: %s", err) 112 | t.Errorf("expected : %s", expect) 113 | } 114 | 115 | t.Errorf("expected error") 116 | } 117 | 118 | func TestVerifyKeysFail(t *testing.T) { 119 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 120 | defer _close(t) 121 | 122 | testutil.PatchConfig(t, filepath.Join(dir, "config"), "../testfiles/singleSpec") 123 | 124 | c := NewCopy(dir, DefaultSpec, SingleSpec, nil, func(string, ...interface{}) {}) 125 | if err := c.Run(); err != nil { 126 | t.Fatal(err) 127 | } 128 | 129 | r, err := repo.OpenDatastore(dir, SingleSpec) 130 | if err != nil { 131 | t.Fatal(err) 132 | } 133 | 134 | if err := r.Delete(ds.NewKey("/blocks/NOTARANDOMKEY")); err != nil { 135 | t.Fatal(err) 136 | } 137 | 138 | if err := r.Close(); err != nil { 139 | t.Fatal(err) 140 | } 141 | 142 | if err := c.Verify(); err.Error() != "key /blocks/NOTARANDOMKEY was not present in new datastore" { 143 | t.Fatal(err) 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /convert/prepare.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "strconv" 9 | "strings" 10 | 11 | "github.com/ipfs/ipfs-ds-convert/config" 12 | "github.com/ipfs/ipfs-ds-convert/repo" 13 | 14 | "github.com/pkg/errors" 15 | ) 16 | 17 | func (c *Conversion) checkRepoVersion() error { 18 | vstr, err := ioutil.ReadFile(filepath.Join(c.path, "version")) 19 | if err != nil { 20 | return err 21 | } 22 | 23 | version, err := strconv.Atoi(strings.TrimSpace(string(vstr))) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | if version != repo.SupportedRepoVersion { 29 | return fmt.Errorf("unsupported fsrepo version: %d", version) 30 | } 31 | 32 | return nil 33 | } 34 | 35 | func (c *Conversion) loadSpecs() error { 36 | specStat, err := os.Stat(filepath.Join(c.path, repo.SpecsFile)) 37 | if os.IsNotExist(err) { 38 | return err 39 | } 40 | 41 | if specStat.Mode()&0200 == 0 { 42 | return errors.New("datastore_spec is not writable") 43 | } 44 | 45 | oldSpec := make(map[string]interface{}) 46 | err = config.Load(filepath.Join(c.path, repo.SpecsFile), &oldSpec) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | _, err = config.Validate(oldSpec, true) 52 | if err != nil { 53 | return errors.Wrapf(err, "validating datastore_spec spec") 54 | } 55 | 56 | c.fromSpec = oldSpec 57 | 58 | repoConfig := make(map[string]interface{}) 59 | err = config.Load(filepath.Join(c.path, repo.ConfigFile), &repoConfig) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | dsConfig, ok := repoConfig["Datastore"].(map[string]interface{}) 65 | if !ok { 66 | return fmt.Errorf("no 'Datastore' or invalid type in %s", filepath.Join(c.path, repo.ConfigFile)) 67 | } 68 | 69 | dsSpec, ok := dsConfig["Spec"].(map[string]interface{}) 70 | if !ok { 71 | return fmt.Errorf("no 'Datastore.Spec' or invalid type in %s", filepath.Join(c.path, repo.ConfigFile)) 72 | } 73 | 74 | _, err = config.Validate(dsSpec, false) 75 | if err != nil { 76 | return errors.Wrapf(err, "validating new spec") 77 | } 78 | 79 | c.toSpec = dsSpec 80 | return nil 81 | } 82 | -------------------------------------------------------------------------------- /convert/util.go: -------------------------------------------------------------------------------- 1 | package convert 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | errors "github.com/pkg/errors" 8 | ) 9 | 10 | func (c *Conversion) addStep(format string, args ...interface{}) { 11 | c.steps = append(c.steps, fmt.Sprintf(format, args...)) 12 | } 13 | 14 | func (c *Conversion) wrapErr(err error) error { 15 | s := strings.Join(c.steps, "\n") 16 | 17 | return errors.Wrapf(err, "CONVERSION ERROR\n----------\nConversion steps done so far:\n%s\n----------\n", s) 18 | } 19 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ipfs/ipfs-ds-convert 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/ipfs/go-datastore v0.4.6 7 | github.com/ipfs/go-ds-badger v0.2.7 8 | github.com/ipfs/go-ds-flatfs v0.4.5 9 | github.com/ipfs/go-ds-leveldb v0.4.2 10 | github.com/ipfs/go-ds-measure v0.1.0 11 | github.com/ipfs/go-fs-lock v0.0.7 12 | github.com/ipfs/go-ipfs v0.10.0 13 | github.com/ipfs/go-ipfs-config v0.16.0 14 | github.com/mitchellh/go-homedir v1.1.0 15 | github.com/pkg/errors v0.9.1 16 | github.com/syndtr/goleveldb v1.0.0 17 | github.com/urfave/cli v1.22.4 18 | ) 19 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "path" 6 | 7 | "github.com/ipfs/ipfs-ds-convert/convert" 8 | "github.com/ipfs/ipfs-ds-convert/repo" 9 | "github.com/ipfs/ipfs-ds-convert/revert" 10 | homedir "github.com/mitchellh/go-homedir" 11 | 12 | cli "github.com/urfave/cli" 13 | ) 14 | 15 | const ( 16 | DefaultPathName = ".ipfs" 17 | DefaultPathRoot = "~/" + DefaultPathName 18 | DefaultConfigFile = "config" 19 | EnvDir = "IPFS_PATH" 20 | ) 21 | 22 | func main() { 23 | run(os.Args) 24 | } 25 | 26 | func run(args []string) { 27 | app := cli.NewApp() 28 | 29 | app.Version = repo.ToolVersion 30 | 31 | app.Flags = []cli.Flag{ 32 | cli.BoolFlag{ 33 | Name: "verbose", 34 | Usage: "print verbose logging information", 35 | }, 36 | } 37 | 38 | app.Before = func(c *cli.Context) error { 39 | return nil 40 | } 41 | 42 | app.Commands = []cli.Command{ 43 | ConvertCommand, 44 | RevertCommand, 45 | CleanupCommand, 46 | } 47 | 48 | if err := app.Run(args); err != nil { 49 | convert.Log.Fatal(err) 50 | } 51 | } 52 | 53 | var ConvertCommand = cli.Command{ 54 | Name: "convert", 55 | Usage: "convert datastore ", 56 | Description: `'convert' converts existing ipfs datastore setup to another based on the 57 | ipfs configuration and repo specs. 58 | 59 | Note that depending on configuration you are converting to up to double the 60 | disk space may be required. 61 | 62 | If you have any doubts about your configuration, run the tool conversion with 63 | --keep option enabled 64 | 65 | IPFS_PATH environmental variable is respected 66 | `, 67 | Flags: []cli.Flag{ 68 | cli.BoolFlag{ 69 | Name: "keep", 70 | Usage: "don't remove backup files after successful conversion", 71 | }, 72 | }, 73 | Action: func(c *cli.Context) error { 74 | baseDir, err := getBaseDir() 75 | if err != nil { 76 | convert.Log.Fatal(err) 77 | } 78 | 79 | err = convert.Convert(baseDir, c.Bool("keep")) 80 | if err != nil { 81 | convert.Log.Fatal(err) 82 | } 83 | return err 84 | }, 85 | } 86 | 87 | var RevertCommand = cli.Command{ 88 | Name: "revert", 89 | Usage: "revert conversion steps", 90 | Description: `'reverts' attempts to revert changes done to ipfs repo by 'convert'. 91 | It's possible to run revert when conversion failed in middle of the process or 92 | if it was run with --keep option enabled. 93 | 94 | Note that in some cases revert may fail in a non-graceful way. When running 95 | revert after other programs used the datastore (like ipfs daemon), changes made 96 | by it between 'convert' and 'revert' may be lost. This may lead to repo 97 | corruption in extreme cases. 98 | 99 | Use this command with care, make sure you have some free disk space. 100 | If you have any important data in the repo it's highly recommended to backup the 101 | repo before running this command if you haven't already. 102 | 103 | IPFS_PATH environmental variable is respected 104 | `, 105 | Flags: []cli.Flag{ 106 | cli.BoolFlag{ 107 | Name: "force", 108 | Usage: "revert even if last conversion was successful", 109 | }, 110 | cli.BoolFlag{ 111 | Name: "fix-config", 112 | Usage: "revert repo config from datastore_spec", 113 | }, 114 | }, 115 | Action: func(c *cli.Context) error { 116 | baseDir, err := getBaseDir() 117 | if err != nil { 118 | convert.Log.Fatal(err) 119 | } 120 | 121 | err = revert.Revert(baseDir, c.Bool("force"), c.Bool("fix-config"), false) 122 | if err != nil { 123 | convert.Log.Fatal(err) 124 | } 125 | return err 126 | }, 127 | } 128 | 129 | var CleanupCommand = cli.Command{ 130 | Name: "cleanup", 131 | Usage: "remove leftover backup files", 132 | Description: `'cleanup' removes backup files left after successful convert --keep 133 | was run. 134 | 135 | IPFS_PATH environmental variable is respected 136 | `, 137 | Flags: []cli.Flag{}, 138 | Action: func(c *cli.Context) error { 139 | baseDir, err := getBaseDir() 140 | if err != nil { 141 | convert.Log.Fatal(err) 142 | } 143 | 144 | err = revert.Revert(baseDir, c.Bool("force"), false, true) 145 | if err != nil { 146 | convert.Log.Fatal(err) 147 | } 148 | return err 149 | }, 150 | } 151 | 152 | //TODO: Patch config util command 153 | 154 | func getBaseDir() (string, error) { 155 | baseDir := os.Getenv(EnvDir) 156 | if baseDir == "" { 157 | baseDir = DefaultPathRoot 158 | } 159 | 160 | baseDir, err := homedir.Expand(baseDir) 161 | if err != nil { 162 | return "", err 163 | } 164 | 165 | configFile := path.Join(baseDir, DefaultConfigFile) 166 | 167 | _, err = os.Stat(configFile) 168 | if err != nil { 169 | return "", err 170 | } 171 | 172 | return baseDir, nil 173 | } 174 | -------------------------------------------------------------------------------- /repo/badgerds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "path/filepath" 7 | 8 | badgerds "github.com/ipfs/go-ds-badger" 9 | ) 10 | 11 | type badgerdsDatastoreConfig struct { 12 | path string 13 | syncWrites bool 14 | } 15 | 16 | // BadgerdsDatastoreConfig returns a configuration stub for a badger datastore 17 | // from the given parameters 18 | func BadgerdsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 19 | var c badgerdsDatastoreConfig 20 | var ok bool 21 | 22 | c.path, ok = params["path"].(string) 23 | if !ok { 24 | return nil, errors.New("'path' field is missing or not string") 25 | } 26 | 27 | sw, ok := params["syncWrites"] 28 | if !ok { 29 | c.syncWrites = true 30 | } else { 31 | if swb, ok := sw.(bool); ok { 32 | c.syncWrites = swb 33 | } else { 34 | return nil, errors.New("'syncWrites' field was not a boolean") 35 | } 36 | } 37 | 38 | return &c, nil 39 | } 40 | 41 | func (c *badgerdsDatastoreConfig) DiskSpec() DiskSpec { 42 | return map[string]interface{}{ 43 | "type": "badgerds", 44 | "path": c.path, 45 | } 46 | } 47 | 48 | func (c *badgerdsDatastoreConfig) Create(path string) (Datastore, error) { 49 | p := c.path 50 | if !filepath.IsAbs(p) { 51 | p = filepath.Join(path, p) 52 | } 53 | 54 | err := os.MkdirAll(p, 0755) 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | defopts := badgerds.DefaultOptions 60 | defopts.SyncWrites = c.syncWrites 61 | 62 | return badgerds.NewDatastore(p, &defopts) 63 | } 64 | -------------------------------------------------------------------------------- /repo/const.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | const ( 4 | LockFile = "repo.lock" 5 | ConfigFile = "config" 6 | SpecsFile = "datastore_spec" 7 | 8 | SupportedRepoVersion = 11 9 | ToolVersion = "0.6.0" 10 | ) 11 | -------------------------------------------------------------------------------- /repo/flatfsds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | flatfs "github.com/ipfs/go-ds-flatfs" 8 | ) 9 | 10 | type flatfsDatastoreConfig struct { 11 | path string 12 | shardFun *flatfs.ShardIdV1 13 | syncField bool 14 | } 15 | 16 | // FlatfsDatastoreConfig returns a flatfs DatastoreConfig from a spec 17 | func FlatfsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 18 | var c flatfsDatastoreConfig 19 | var ok bool 20 | var err error 21 | 22 | c.path, ok = params["path"].(string) 23 | if !ok { 24 | return nil, fmt.Errorf("'path' field is missing or not boolean") 25 | } 26 | 27 | sshardFun, ok := params["shardFunc"].(string) 28 | if !ok { 29 | return nil, fmt.Errorf("'shardFunc' field is missing or not a string") 30 | } 31 | c.shardFun, err = flatfs.ParseShardFunc(sshardFun) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | c.syncField, ok = params["sync"].(bool) 37 | if !ok { 38 | return nil, fmt.Errorf("'sync' field is missing or not boolean") 39 | } 40 | return &c, nil 41 | } 42 | 43 | func (c *flatfsDatastoreConfig) DiskSpec() DiskSpec { 44 | return map[string]interface{}{ 45 | "type": "flatfs", 46 | "path": c.path, 47 | "shardFunc": c.shardFun.String(), 48 | } 49 | } 50 | 51 | func (c *flatfsDatastoreConfig) Create(path string) (Datastore, error) { 52 | p := c.path 53 | if !filepath.IsAbs(p) { 54 | p = filepath.Join(path, p) 55 | } 56 | 57 | return flatfs.CreateOrOpen(p, c.shardFun, c.syncField) 58 | } 59 | -------------------------------------------------------------------------------- /repo/levelds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "path/filepath" 7 | 8 | levelds "github.com/ipfs/go-ds-leveldb" 9 | ldbopts "github.com/syndtr/goleveldb/leveldb/opt" 10 | ) 11 | 12 | type leveldsDatastoreConfig struct { 13 | path string 14 | compression ldbopts.Compression 15 | } 16 | 17 | // LeveldsDatastoreConfig returns a levelds DatastoreConfig from a spec 18 | func LeveldsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 19 | var c leveldsDatastoreConfig 20 | var ok bool 21 | 22 | c.path, ok = params["path"].(string) 23 | if !ok { 24 | return nil, errors.New("'path' field is missing or not string") 25 | } 26 | 27 | switch cm := params["compression"].(string); cm { 28 | case "none": 29 | c.compression = ldbopts.NoCompression 30 | case "snappy": 31 | c.compression = ldbopts.SnappyCompression 32 | case "": 33 | c.compression = ldbopts.DefaultCompression 34 | default: 35 | return nil, fmt.Errorf("unrecognized value for compression: %s", cm) 36 | } 37 | 38 | return &c, nil 39 | } 40 | 41 | func (c *leveldsDatastoreConfig) DiskSpec() DiskSpec { 42 | return map[string]interface{}{ 43 | "type": "levelds", 44 | "path": c.path, 45 | } 46 | } 47 | 48 | func (c *leveldsDatastoreConfig) Create(path string) (Datastore, error) { 49 | p := c.path 50 | if !filepath.IsAbs(p) { 51 | p = filepath.Join(path, p) 52 | } 53 | 54 | return levelds.NewDatastore(p, &levelds.Options{ 55 | Compression: c.compression, 56 | }) 57 | } 58 | -------------------------------------------------------------------------------- /repo/logds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "errors" 5 | 6 | ds "github.com/ipfs/go-datastore" 7 | ) 8 | 9 | type logDatastoreConfig struct { 10 | child DatastoreConfig 11 | name string 12 | } 13 | 14 | // LogDatastoreConfig returns a log DatastoreConfig from a spec 15 | func LogDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 16 | childField, ok := params["child"].(map[string]interface{}) 17 | if !ok { 18 | return nil, errors.New("'child' field is missing or not a map") 19 | } 20 | child, err := AnyDatastoreConfig(childField) 21 | if err != nil { 22 | return nil, err 23 | } 24 | name, ok := params["name"].(string) 25 | if !ok { 26 | return nil, errors.New("'name' field was missing or not a string") 27 | } 28 | return &logDatastoreConfig{child, name}, nil 29 | 30 | } 31 | 32 | func (c *logDatastoreConfig) Create(path string) (Datastore, error) { 33 | child, err := c.child.Create(path) 34 | if err != nil { 35 | return nil, err 36 | } 37 | return ds.NewLogDatastore(child, c.name), nil 38 | } 39 | 40 | func (c *logDatastoreConfig) DiskSpec() DiskSpec { 41 | return c.child.DiskSpec() 42 | } 43 | -------------------------------------------------------------------------------- /repo/measureds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "errors" 5 | 6 | measure "github.com/ipfs/go-ds-measure" 7 | ) 8 | 9 | // MeasureDatastoreConfig returns a measure DatastoreConfig from a spec 10 | func MeasureDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 11 | childField, ok := params["child"].(map[string]interface{}) 12 | if !ok { 13 | return nil, errors.New("'child' field is missing or not a map") 14 | } 15 | child, err := AnyDatastoreConfig(childField) 16 | if err != nil { 17 | return nil, err 18 | } 19 | prefix, ok := params["prefix"].(string) 20 | if !ok { 21 | return nil, errors.New("'prefix' field was missing or not a string") 22 | } 23 | return &measureDatastoreConfig{child, prefix}, nil 24 | } 25 | 26 | func (c *measureDatastoreConfig) DiskSpec() DiskSpec { 27 | return c.child.DiskSpec() 28 | } 29 | 30 | func (c measureDatastoreConfig) Create(path string) (Datastore, error) { 31 | child, err := c.child.Create(path) 32 | if err != nil { 33 | return nil, err 34 | } 35 | return measure.New(c.prefix, child), nil 36 | } 37 | -------------------------------------------------------------------------------- /repo/memds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | ds "github.com/ipfs/go-datastore" 5 | ) 6 | 7 | type memDatastoreConfig struct { 8 | cfg map[string]interface{} 9 | } 10 | 11 | // MemDatastoreConfig returns a memory DatastoreConfig from a spec 12 | func MemDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 13 | return &memDatastoreConfig{params}, nil 14 | } 15 | 16 | func (c *memDatastoreConfig) DiskSpec() DiskSpec { 17 | return nil 18 | } 19 | 20 | func (c *memDatastoreConfig) Create(string) (Datastore, error) { 21 | return ds.NewMapDatastore(), nil 22 | } 23 | 24 | type measureDatastoreConfig struct { 25 | child DatastoreConfig 26 | prefix string 27 | } 28 | -------------------------------------------------------------------------------- /repo/mountds.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "errors" 5 | "sort" 6 | 7 | ds "github.com/ipfs/go-datastore" 8 | mount "github.com/ipfs/go-datastore/mount" 9 | ) 10 | 11 | type mountDatastoreConfig struct { 12 | mounts []premount 13 | } 14 | 15 | type premount struct { 16 | ds DatastoreConfig 17 | prefix ds.Key 18 | } 19 | 20 | // MountDatastoreConfig returns a mount DatastoreConfig from a spec 21 | func MountDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 22 | var res mountDatastoreConfig 23 | mounts, ok := params["mounts"].([]interface{}) 24 | if !ok { 25 | return nil, errors.New("'mounts' field is missing or not an array") 26 | } 27 | for _, iface := range mounts { 28 | cfg, ok := iface.(map[string]interface{}) 29 | if !ok { 30 | return nil, errors.New("expected map for mountpoint") 31 | } 32 | 33 | child, err := AnyDatastoreConfig(cfg) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | prefix, found := cfg["mountpoint"] 39 | if !found { 40 | return nil, errors.New("no 'mountpoint' on mount") 41 | } 42 | 43 | res.mounts = append(res.mounts, premount{ 44 | ds: child, 45 | prefix: ds.NewKey(prefix.(string)), 46 | }) 47 | } 48 | sort.Slice(res.mounts, 49 | func(i, j int) bool { 50 | return res.mounts[i].prefix.String() > res.mounts[j].prefix.String() 51 | }) 52 | 53 | return &res, nil 54 | } 55 | 56 | func (c *mountDatastoreConfig) DiskSpec() DiskSpec { 57 | cfg := map[string]interface{}{"type": "mount"} 58 | mounts := make([]interface{}, len(c.mounts)) 59 | for i, m := range c.mounts { 60 | c := m.ds.DiskSpec() 61 | if c == nil { 62 | c = make(map[string]interface{}) 63 | } 64 | c["mountpoint"] = m.prefix.String() 65 | mounts[i] = c 66 | } 67 | cfg["mounts"] = mounts 68 | return cfg 69 | } 70 | 71 | func (c *mountDatastoreConfig) Create(path string) (Datastore, error) { 72 | mounts := make([]mount.Mount, len(c.mounts)) 73 | for i, m := range c.mounts { 74 | ds, err := m.ds.Create(path) 75 | if err != nil { 76 | return nil, err 77 | } 78 | mounts[i].Datastore = ds 79 | mounts[i].Prefix = m.prefix 80 | } 81 | return mount.New(mounts), nil 82 | } 83 | -------------------------------------------------------------------------------- /repo/open.go: -------------------------------------------------------------------------------- 1 | package repo 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "os" 9 | "syscall" 10 | "time" 11 | 12 | ds "github.com/ipfs/go-datastore" 13 | retry "github.com/ipfs/go-datastore/retrystore" 14 | ) 15 | 16 | //TODO: extract and use fsrepo from go-ipfs 17 | 18 | type Datastore interface { 19 | ds.Batching 20 | } 21 | 22 | type Retry struct { 23 | *retry.Datastore 24 | io.Closer 25 | } 26 | 27 | func (ds *Retry) Close() error { 28 | return ds.Batching.(Datastore).Close() 29 | } 30 | 31 | func isTooManyFDError(err error) bool { 32 | perr, ok := err.(*os.PathError) 33 | if ok && perr.Err == syscall.EMFILE { 34 | return true 35 | } 36 | 37 | return false 38 | } 39 | 40 | func OpenDatastore(path string, params map[string]interface{}) (Datastore, error) { 41 | dsc, err := AnyDatastoreConfig(params) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | d, err := dsc.Create(path) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | rds := &retry.Datastore{ 52 | Batching: d, 53 | Delay: time.Millisecond * 200, 54 | Retries: 6, 55 | TempErrFunc: isTooManyFDError, 56 | } 57 | 58 | return &Retry{ 59 | Datastore: rds, 60 | Closer: d, 61 | }, nil 62 | } 63 | 64 | func DatastoreSpec(params map[string]interface{}) (string, error) { 65 | dsc, err := AnyDatastoreConfig(params) 66 | if err != nil { 67 | return "", err 68 | } 69 | return dsc.DiskSpec().String(), nil 70 | } 71 | 72 | // From https://github.com/ipfs/go-ipfs/blob/8525be5990d3a0b5ece0d6773764756f9cbf15e9/repo/fsrepo/datastores.go 73 | // ConfigFromMap creates a new datastore config from a map 74 | type ConfigFromMap func(map[string]interface{}) (DatastoreConfig, error) 75 | 76 | // DatastoreConfig is an abstraction of a datastore config. A "spec" 77 | // is first converted to a DatastoreConfig and then Create() is called 78 | // to instantiate a new datastore 79 | type DatastoreConfig interface { 80 | // DiskSpec returns a minimal configuration of the datastore 81 | // represting what is stored on disk. Run time values are 82 | // excluded. 83 | DiskSpec() DiskSpec 84 | 85 | // Create instantiate a new datastore from this config 86 | Create(path string) (Datastore, error) 87 | } 88 | 89 | // DiskSpec is the type returned by the DatastoreConfig's DiskSpec method 90 | type DiskSpec map[string]interface{} 91 | 92 | // Bytes returns a minimal JSON encoding of the DiskSpec 93 | func (spec DiskSpec) Bytes() []byte { 94 | b, err := json.Marshal(spec) 95 | if err != nil { 96 | // should not happen 97 | panic(err) 98 | } 99 | return bytes.TrimSpace(b) 100 | } 101 | 102 | // String returns a minimal JSON encoding of the DiskSpec 103 | func (spec DiskSpec) String() string { 104 | return string(spec.Bytes()) 105 | } 106 | 107 | var datastores map[string]ConfigFromMap 108 | 109 | func init() { 110 | datastores = map[string]ConfigFromMap{ 111 | "mount": MountDatastoreConfig, 112 | "flatfs": FlatfsDatastoreConfig, 113 | "levelds": LeveldsDatastoreConfig, 114 | "badgerds": BadgerdsDatastoreConfig, 115 | "mem": MemDatastoreConfig, 116 | "log": LogDatastoreConfig, 117 | "measure": MeasureDatastoreConfig, 118 | } 119 | } 120 | 121 | // AnyDatastoreConfig returns a DatastoreConfig from a spec based on 122 | // the "type" parameter 123 | func AnyDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { 124 | which, ok := params["type"].(string) 125 | if !ok { 126 | return nil, fmt.Errorf("'type' field missing or not a string") 127 | } 128 | fun, ok := datastores[which] 129 | if !ok { 130 | return nil, fmt.Errorf("unknown datastore type: %s", which) 131 | } 132 | return fun(params) 133 | } 134 | -------------------------------------------------------------------------------- /revert/log.go: -------------------------------------------------------------------------------- 1 | package revert 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "path" 8 | ) 9 | 10 | const ( 11 | ConvertLog = "convertlog" 12 | 13 | ActionRemove = Action("rm") 14 | ActionMove = Action("mv") 15 | ActionMkdir = Action("mkdir") 16 | ActionDone = Action("done") 17 | 18 | //For breaking things that can't be easily recovered from, say writing new spec 19 | ActionManual = Action("manual") 20 | 21 | //ActionManual marks backup files that can be cleaned up after conversion with --keep 22 | ActionCleanup = Action("cleanup") 23 | ) 24 | 25 | type Action string 26 | 27 | type ActionLogger struct { 28 | repo string 29 | file *os.File 30 | } 31 | 32 | //NewActionLogger creates revert action logger which logs actions needed to 33 | //revert conversion steps 34 | func NewActionLogger(repoPath string) (*ActionLogger, error) { 35 | if _, err := os.Stat(path.Join(repoPath, ConvertLog)); !os.IsNotExist(err) { 36 | return nil, fmt.Errorf("Log file %s already exists, you may want to run revert", path.Join(repoPath, ConvertLog)) 37 | } 38 | 39 | f, err := os.Create(path.Join(repoPath, ConvertLog)) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | return &ActionLogger{ 45 | repo: repoPath, 46 | file: f, 47 | }, nil 48 | } 49 | 50 | func (a *ActionLogger) Log(action Action, params ...string) error { 51 | if a == nil { 52 | return nil 53 | } 54 | 55 | d, err := action.Line(params...) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | n, err := a.file.Write(d) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | if n != len(d) { 66 | return fmt.Errorf("failed to write steps, wrote %d, expected %d", n, len(d)) 67 | } 68 | 69 | return a.file.Sync() 70 | } 71 | 72 | func (a *ActionLogger) Close() { 73 | a.file.Close() 74 | } 75 | 76 | func (a *ActionLogger) CloseFinal() error { 77 | a.file.Close() 78 | 79 | return os.Remove(path.Join(a.repo, ConvertLog)) 80 | } 81 | 82 | func (a Action) Line(arg ...string) ([]byte, error) { 83 | b, err := json.Marshal(map[string]interface{}{ 84 | "action": a, 85 | "arg": arg, 86 | }) 87 | 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | return append(b, "\n"...), nil 93 | } 94 | -------------------------------------------------------------------------------- /revert/log_test.go: -------------------------------------------------------------------------------- 1 | package revert_test 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/ipfs/ipfs-ds-convert/revert" 11 | ) 12 | 13 | func TestNewActionLogger(t *testing.T) { 14 | d, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 15 | if err != nil { 16 | t.Fatal(err) 17 | } 18 | 19 | err = ioutil.WriteFile(path.Join(d, revert.ConvertLog), []byte{}, 0664) 20 | if err != nil { 21 | t.Fatal(err) 22 | } 23 | 24 | _, err = revert.NewActionLogger(d) 25 | if !strings.Contains(err.Error(), "convertlog already exists, you may want to run revert") { 26 | t.Fatalf("expected error, got %s", err) 27 | } 28 | 29 | err = os.RemoveAll(d) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | 34 | _, err = revert.NewActionLogger(path.Join(d, "non/existent/path")) 35 | if !strings.Contains(err.Error(), "/non/existent/path/convertlog:") { 36 | t.Fatalf("expected error, got %s", err) 37 | } 38 | } 39 | 40 | func TestLog(t *testing.T) { 41 | d, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | //defer os.RemoveAll(d) 46 | 47 | lg, err := revert.NewActionLogger(d) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | 52 | err = lg.Log("abc", "def") 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | 57 | b, err := ioutil.ReadFile(path.Join(d, revert.ConvertLog)) 58 | if err != nil { 59 | t.Fatal(err) 60 | } 61 | 62 | if !strings.Contains(string(b), `{"action":"abc","arg":["def"]}`) { 63 | t.Errorf("unexpected revert log, got: `%s`", string(b)) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /revert/revert.go: -------------------------------------------------------------------------------- 1 | package revert 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | logging "log" 9 | 10 | "github.com/ipfs/ipfs-ds-convert/repo" 11 | 12 | "encoding/json" 13 | lock "github.com/ipfs/go-fs-lock" 14 | "github.com/ipfs/ipfs-ds-convert/config" 15 | "github.com/pkg/errors" 16 | "io/ioutil" 17 | ) 18 | 19 | var Log = logging.New(os.Stderr, "revert ", logging.LstdFlags) 20 | 21 | type process struct { 22 | repo string 23 | force bool 24 | 25 | steps Steps 26 | } 27 | 28 | func Revert(repoPath string, force bool, fixSpec bool, cleanupMode bool) (err error) { 29 | //TODO: validate repo dir 30 | 31 | p := process{ 32 | repo: repoPath, 33 | force: force, 34 | } 35 | 36 | unlock, err := lock.Lock(p.repo, repo.LockFile) 37 | if err != nil { 38 | return err 39 | } 40 | defer unlock.Close() 41 | 42 | p.steps, err = loadLog(p.repo) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | if cleanupMode { 48 | Log.Println("Start cleanup") 49 | } else { 50 | Log.Println("Start revert") 51 | } 52 | 53 | n := 0 54 | for { 55 | step := p.steps.top() 56 | if step.action == "" { 57 | break 58 | } 59 | 60 | if !cleanupMode { 61 | err = p.executeStep(step) 62 | } else { 63 | err = p.executeCleanupStep(step, n) 64 | } 65 | 66 | if err != nil { 67 | return err 68 | } 69 | 70 | err = p.steps.pop(p.repo) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | n++ 76 | } 77 | 78 | p.steps.write(p.repo) 79 | 80 | if fixSpec { 81 | Log.Println("Save datastore_spec into config") 82 | 83 | err := fixConfig(p.repo) 84 | if err != nil { 85 | return err 86 | } 87 | } 88 | 89 | Log.Println("All tasks finished") 90 | return nil 91 | } 92 | 93 | func (p *process) executeStep(step Step) error { 94 | switch step.action { 95 | case ActionDone: 96 | if !p.force { 97 | return fmt.Errorf("last conversion was successful, run with --force to revert") 98 | } 99 | 100 | case ActionRemove: 101 | if len(step.arg) != 1 { 102 | return fmt.Errorf("revert remove: arg count %d != 1", len(step.arg)) 103 | } 104 | Log.Printf("remove '%s'", step.arg[0]) 105 | 106 | err := os.RemoveAll(step.arg[0]) 107 | if err != nil { 108 | return err //TODO: wrap with more context? 109 | } 110 | 111 | Log.Println("\\-> ok") 112 | 113 | case ActionMove: 114 | if len(step.arg) != 2 { 115 | return fmt.Errorf("revert move: arg count %d != 2", len(step.arg)) 116 | } 117 | Log.Printf("move '%s' -> '%s': ", step.arg[0], step.arg[1]) 118 | 119 | if _, err := os.Stat(step.arg[0]); os.IsNotExist(err) { 120 | return fmt.Errorf("revert move: source file '%s' didn't exist", step.arg[0]) 121 | } 122 | 123 | if _, err := os.Stat(step.arg[1]); !os.IsNotExist(err) { 124 | return fmt.Errorf("revert move: destination file '%s' did exist", step.arg[1]) 125 | } 126 | 127 | err := os.Rename(step.arg[0], step.arg[1]) 128 | if err != nil { 129 | return err //TODO: wrap with more context? 130 | } 131 | 132 | Log.Println("\\-> ok") 133 | 134 | case ActionMkdir: 135 | if len(step.arg) != 1 { 136 | return fmt.Errorf("revert mkdir: arg count %d != 1", len(step.arg)) 137 | } 138 | Log.Printf("mkdir '%s': ", step.arg[0]) 139 | 140 | if _, err := os.Stat(step.arg[0]); !os.IsNotExist(err) { 141 | return fmt.Errorf("revert mkdir: destination '%s' did exist", step.arg[0]) 142 | } 143 | 144 | err := os.MkdirAll(step.arg[0], 0755) 145 | if err != nil { 146 | return err //TODO: wrap with more context? 147 | } 148 | 149 | Log.Println("\\-> ok") 150 | 151 | case ActionCleanup: 152 | default: 153 | return fmt.Errorf("unknown revert step '%s'", step.action) 154 | } 155 | 156 | return nil 157 | } 158 | 159 | func (p *process) executeCleanupStep(step Step, n int) error { 160 | if n == 0 && step.action != ActionDone { 161 | return fmt.Errorf("cannot cleanup after failed conversion") 162 | } 163 | 164 | switch step.action { 165 | case ActionDone: 166 | case ActionRemove: 167 | case ActionMove: 168 | case ActionMkdir: 169 | 170 | case ActionCleanup: 171 | if len(step.arg) != 1 { 172 | return fmt.Errorf("cleanup arg count %d != 1", len(step.arg)) 173 | } 174 | Log.Printf("cleanup '%s'", step.arg[0]) 175 | 176 | err := os.RemoveAll(step.arg[0]) 177 | if err != nil { 178 | return err //TODO: wrap with more context? 179 | } 180 | 181 | Log.Println("\\-> ok") 182 | 183 | default: 184 | return fmt.Errorf("unknown cleanup step '%s'", step.action) 185 | } 186 | 187 | return nil 188 | } 189 | 190 | func fixConfig(repoPath string) error { 191 | spec := make(map[string]interface{}) 192 | err := config.Load(filepath.Join(repoPath, repo.SpecsFile), &spec) 193 | if err != nil { 194 | return err 195 | } 196 | 197 | _, err = config.Validate(spec, true) 198 | if err != nil { 199 | return errors.Wrapf(err, "validating datastore_spec spec") 200 | } 201 | 202 | repoConfig := make(map[string]interface{}) 203 | err = config.Load(filepath.Join(repoPath, repo.ConfigFile), &repoConfig) 204 | if err != nil { 205 | return err 206 | } 207 | 208 | confDatastore, ok := repoConfig["Datastore"].(map[string]interface{}) 209 | if !ok { 210 | return fmt.Errorf("invalid Datastore field in config") 211 | } 212 | 213 | confDatastore["Spec"] = spec 214 | 215 | err = os.Rename(filepath.Join(repoPath, repo.ConfigFile), filepath.Join(repoPath, "config-old")) 216 | if err != nil { 217 | return err 218 | } 219 | 220 | confBytes, err := json.MarshalIndent(repoConfig, "", " ") 221 | if err != nil { 222 | return err 223 | } 224 | 225 | err = ioutil.WriteFile(filepath.Join(repoPath, repo.ConfigFile), []byte(confBytes), 0660) 226 | if err != nil { 227 | return err 228 | } 229 | 230 | //TODO: might try opening the datastore to soo if config works and revert to old 231 | //config. 232 | 233 | err = os.Remove(filepath.Join(repoPath, "config-old")) 234 | 235 | return err 236 | } 237 | -------------------------------------------------------------------------------- /revert/revert_test.go: -------------------------------------------------------------------------------- 1 | package revert_test 2 | 3 | import ( 4 | "io/ioutil" 5 | "path" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/ipfs/ipfs-ds-convert/convert" 10 | "github.com/ipfs/ipfs-ds-convert/repo" 11 | "github.com/ipfs/ipfs-ds-convert/revert" 12 | "github.com/ipfs/ipfs-ds-convert/testutil" 13 | 14 | lock "github.com/ipfs/go-fs-lock" 15 | ) 16 | 17 | func TestBasicConvertRevert(t *testing.T) { 18 | //Prepare repo 19 | dir, _close, s1, s2 := testutil.PrepareTest(t, 1000, 1000) 20 | defer _close(t) 21 | 22 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 23 | 24 | //Convert! 25 | err := convert.Convert(dir, true) 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | 30 | err = revert.Revert(dir, true, false, false) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/defaultSpec") 36 | 37 | testutil.FinishTest(t, dir, s1, s2, 1000, 1000) 38 | } 39 | 40 | func TestBasicConvertCleanup(t *testing.T) { 41 | //Prepare repo 42 | dir, _close, s1, s2 := testutil.PrepareTest(t, 100, 100) 43 | defer _close(t) 44 | 45 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 46 | 47 | //Convert! 48 | err := convert.Convert(dir, true) 49 | if err != nil { 50 | t.Fatal(err) 51 | } 52 | 53 | err = revert.Revert(dir, true, false, true) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | testutil.FinishTest(t, dir, s1, s2, 100, 100) 59 | } 60 | 61 | func TestBasicConvertRevertFix(t *testing.T) { 62 | //Prepare repo 63 | dir, _close, s1, s2 := testutil.PrepareTest(t, 100, 100) 64 | defer _close(t) 65 | 66 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 67 | 68 | //Convert! 69 | err := convert.Convert(dir, true) 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | 74 | err = revert.Revert(dir, true, true, false) 75 | if err != nil { 76 | t.Fatal(err) 77 | } 78 | 79 | testutil.FinishTest(t, dir, s1, s2, 100, 100) 80 | } 81 | 82 | func TestConvertRevertLocked(t *testing.T) { 83 | //Prepare repo 84 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 85 | defer _close(t) 86 | 87 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 88 | 89 | //Convert! 90 | err := convert.Convert(dir, true) 91 | if err != nil { 92 | t.Fatal(err) 93 | } 94 | 95 | unlock, err := lock.Lock(dir, "repo.lock") 96 | if err != nil { 97 | t.Fatal(err) 98 | } 99 | defer unlock.Close() 100 | 101 | err = revert.Revert(dir, true, false, false) 102 | if !strings.Contains(err.Error(), "lock is already held") { 103 | t.Fatal(err) 104 | } 105 | } 106 | 107 | func TestConvertNoKeepRevert(t *testing.T) { 108 | //Prepare repo 109 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 110 | defer _close(t) 111 | 112 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 113 | 114 | //Convert! 115 | err := convert.Convert(dir, false) 116 | if err != nil { 117 | t.Fatal(err) 118 | } 119 | 120 | err = revert.Revert(dir, true, false, false) 121 | if !strings.Contains(err.Error(), "/convertlog: ") { 122 | t.Fatal(err) 123 | } 124 | } 125 | 126 | func TestBasicConvertRevertUnknownStep(t *testing.T) { 127 | //Prepare repo 128 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 129 | defer _close(t) 130 | 131 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 132 | 133 | //Convert! 134 | err := convert.Convert(dir, true) 135 | if err != nil { 136 | t.Fatal(err) 137 | } 138 | 139 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"unknownactiontype","arg":[]}`), 0600) 140 | 141 | err = revert.Revert(dir, true, false, false) 142 | if !strings.Contains(err.Error(), "unknown revert step 'unknownactiontype'") { 143 | t.Fatal(err) 144 | } 145 | } 146 | 147 | func TestBasicConvertRevertNoForce(t *testing.T) { 148 | //Prepare repo 149 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 150 | defer _close(t) 151 | 152 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 153 | 154 | //Convert! 155 | err := convert.Convert(dir, true) 156 | if err != nil { 157 | t.Fatal(err) 158 | } 159 | 160 | err = revert.Revert(dir, false, false, false) 161 | if !strings.Contains(err.Error(), "last conversion was successful, run with --force to revert") { 162 | t.Fatal(err) 163 | } 164 | } 165 | 166 | func TestBasicConvertRevertInvalidArgs(t *testing.T) { 167 | //Prepare repo 168 | dir, _close, _, _ := testutil.PrepareTest(t, 100, 100) 169 | defer _close(t) 170 | 171 | testutil.PatchConfig(t, path.Join(dir, "config"), "../testfiles/badgerSpec") 172 | 173 | //Convert! 174 | err := convert.Convert(dir, true) 175 | if err != nil { 176 | t.Fatal(err) 177 | } 178 | 179 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"`+revert.ActionRemove+`","arg":[]}`), 0600) 180 | 181 | err = revert.Revert(dir, true, false, false) 182 | if !strings.Contains(err.Error(), "revert remove: arg count 0 != 1") { 183 | t.Fatal(err) 184 | } 185 | 186 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"`+revert.ActionMove+`","arg":[]}`), 0600) 187 | 188 | err = revert.Revert(dir, true, false, false) 189 | if !strings.Contains(err.Error(), "revert move: arg count 0 != 2") { 190 | t.Fatal(err) 191 | } 192 | 193 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"`+revert.ActionMkdir+`","arg":[]}`), 0600) 194 | 195 | err = revert.Revert(dir, true, false, false) 196 | if !strings.Contains(err.Error(), "revert mkdir: arg count 0 != 1") { 197 | t.Fatal(err) 198 | } 199 | 200 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"`+revert.ActionCleanup+`","arg":["a"]}`), 0600) 201 | 202 | err = revert.Revert(dir, true, false, true) 203 | if !strings.Contains(err.Error(), "cannot cleanup after failed conversion") { 204 | t.Fatal(err) 205 | } 206 | 207 | ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(`{"action":"`+revert.ActionCleanup+`","arg":[]}`+"\n"+`{"action":"`+revert.ActionDone+`","arg":[]}`), 0600) 208 | 209 | err = revert.Revert(dir, true, false, true) 210 | if !strings.Contains(err.Error(), "cleanup arg count 0 != 1") { 211 | t.Fatal(err) 212 | } 213 | } 214 | 215 | func TestRevertMkdirChecks(t *testing.T) { 216 | //Prepare repo 217 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 218 | defer _close(t) 219 | 220 | l, err := revert.ActionMkdir.Line(path.Join(dir, revert.ConvertLog)) 221 | if err != nil { 222 | t.Fatal(err) 223 | } 224 | 225 | err = ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(l), 0600) 226 | if err != nil { 227 | t.Fatal(err) 228 | } 229 | 230 | err = revert.Revert(dir, true, false, false) 231 | if !strings.Contains(err.Error(), "revert mkdir: destination ") || !strings.Contains(err.Error(), " did exist") { 232 | t.Fatal(err) 233 | } 234 | } 235 | 236 | func TestRevertMoveChecks(t *testing.T) { 237 | //Prepare repo 238 | dir, _close, _, _ := testutil.PrepareTest(t, 10, 10) 239 | defer _close(t) 240 | 241 | l, err := revert.ActionMove.Line("nonexistentfile", "config") 242 | if err != nil { 243 | t.Fatal(err) 244 | } 245 | 246 | err = ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(l), 0600) 247 | if err != nil { 248 | t.Fatal(err) 249 | } 250 | 251 | err = revert.Revert(dir, true, false, false) 252 | if err.Error() != "revert move: source file 'nonexistentfile' didn't exist" { 253 | t.Fatal(err) 254 | } 255 | 256 | l, err = revert.ActionMove.Line(path.Join(dir, repo.ConfigFile), path.Join(dir, repo.ConfigFile)) 257 | if err != nil { 258 | t.Fatal(err) 259 | } 260 | 261 | err = ioutil.WriteFile(path.Join(dir, revert.ConvertLog), []byte(l), 0600) 262 | if err != nil { 263 | t.Fatal(err) 264 | } 265 | 266 | err = revert.Revert(dir, true, false, false) 267 | if !strings.Contains(err.Error(), "revert move: destination ") || !strings.Contains(err.Error(), " did exist") { 268 | t.Fatal(err) 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /revert/steps.go: -------------------------------------------------------------------------------- 1 | package revert 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "os" 9 | "path" 10 | ) 11 | 12 | type Step struct { 13 | action Action 14 | arg []string 15 | } 16 | 17 | type Steps []Step 18 | 19 | func loadLog(repo string) (Steps, error) { 20 | var steps []Step 21 | 22 | f, err := os.Open(path.Join(repo, ConvertLog)) 23 | if err != nil { 24 | return nil, err 25 | } 26 | defer f.Close() 27 | 28 | log := bufio.NewReader(f) 29 | var readErr error 30 | for { 31 | var line string 32 | line, readErr = log.ReadString('\n') 33 | 34 | if line == "" { 35 | break 36 | } 37 | 38 | stepJson := map[string]interface{}{} 39 | err := json.Unmarshal([]byte(line), &stepJson) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | action, ok := stepJson["action"].(string) 45 | if !ok { 46 | return nil, fmt.Errorf("invalid action type in convert steps: %s", line) 47 | } 48 | 49 | rawArgs, ok := stepJson["arg"].([]interface{}) 50 | var args []string 51 | if ok { 52 | args = make([]string, 0, len(rawArgs)) 53 | for i := range rawArgs { 54 | arg, ok := rawArgs[i].(string) 55 | if !ok { 56 | return nil, fmt.Errorf("invalid arg %d in convert steps: %s", i, line) 57 | } 58 | args = append(args, arg) 59 | } 60 | } 61 | 62 | steps = append(steps, Step{ 63 | action: Action(action), 64 | arg: args, 65 | }) 66 | 67 | if readErr != nil { 68 | break 69 | } 70 | } 71 | 72 | if readErr != io.EOF { 73 | return nil, readErr 74 | } 75 | 76 | return steps, nil 77 | } 78 | 79 | func (s *Steps) top() Step { 80 | if len(*s) == 0 { 81 | return Step{} 82 | } 83 | return (*s)[len(*s)-1] 84 | } 85 | 86 | func (s *Steps) pop(repo string) error { 87 | if len(*s) == 0 { 88 | return nil 89 | } 90 | *s = (*s)[:len(*s)-1] 91 | 92 | return s.write(repo) 93 | } 94 | 95 | func (s *Steps) write(repo string) error { 96 | if len(*s) == 0 { 97 | return os.Remove(path.Join(repo, ConvertLog)) 98 | } 99 | 100 | f, err := os.Create(path.Join(repo, ConvertLog)) 101 | if err != nil { 102 | return err 103 | } 104 | defer f.Close() 105 | 106 | for _, step := range *s { 107 | d, err := step.action.Line(step.arg...) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | n, err := f.Write(d) 113 | if err != nil { 114 | return err 115 | } 116 | 117 | if n != len(d) { 118 | return fmt.Errorf("failed to write steps, wrote %d, expected %d", n, len(d)) 119 | } 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /revert/steps_test.go: -------------------------------------------------------------------------------- 1 | package revert 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func TestLoadNonexistentLog(t *testing.T) { 12 | _, err := loadLog(path.Join(os.TempDir(), "non/existent/path")) 13 | if !os.IsNotExist(err) { 14 | t.Errorf("unexpected error %s, expected no such file or directory", err) 15 | } 16 | } 17 | 18 | func TestLoadInvalidLog(t *testing.T) { 19 | dname, _ := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 20 | _ = ioutil.WriteFile(path.Join(dname, ConvertLog), []byte("not a json\n"), 0600) 21 | 22 | _, err := loadLog(dname) 23 | if !strings.Contains(err.Error(), "invalid character 'o' in literal null (expecting 'u')") { 24 | t.Errorf("unexpected error %s, expected invalid character...", err) 25 | } 26 | 27 | _ = ioutil.WriteFile(path.Join(dname, ConvertLog), []byte(`{"action":5}`), 0600) 28 | 29 | _, err = loadLog(dname) 30 | if !strings.Contains(err.Error(), "invalid action type in convert steps") { 31 | t.Errorf("unexpected error %s, expected invalid action type in convert steps", err) 32 | } 33 | 34 | _ = ioutil.WriteFile(path.Join(dname, ConvertLog), []byte(`{"action":"a","arg":[3]}`), 0600) 35 | 36 | _, err = loadLog(dname) 37 | if !strings.Contains(err.Error(), "invalid arg 0 in convert steps") { 38 | t.Errorf("unexpected error %s, expected invalid arg 0 in convert steps", err) 39 | } 40 | 41 | os.Remove(dname) 42 | } 43 | -------------------------------------------------------------------------------- /strategy/strategies.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/ipfs/ipfs-ds-convert/config" 7 | 8 | "github.com/pkg/errors" 9 | ) 10 | 11 | type Strategy interface { 12 | Spec() Spec 13 | Id() string 14 | } 15 | 16 | type copyStrategy struct { 17 | fromSpec Spec 18 | toSpec Spec 19 | } 20 | 21 | func validateCopySpec(spec Spec) error { 22 | t, ok := spec.Type() 23 | if !ok { 24 | return errors.New("copy spec has no type or field type is invalid") 25 | } 26 | 27 | if t == "mount" { 28 | mnts, ok := spec["mounts"] 29 | if !ok { 30 | return errors.New("copy spec has no mounts field") 31 | } 32 | 33 | mounts, ok := mnts.([]interface{}) 34 | if !ok { 35 | return errors.New("copy spec has invalid mounts field type") 36 | } 37 | 38 | if len(mounts) == 0 { 39 | return errors.New("copy spec has empty mounts field") 40 | } 41 | } 42 | 43 | _, err := config.Validate(spec, false) 44 | return err 45 | } 46 | 47 | func NewCopyStrategy(fromSpec Spec, toSpec Spec) (Strategy, error) { 48 | if err := validateCopySpec(fromSpec); err != nil { 49 | return nil, errors.Wrapf(err, "validating old copy spec") 50 | } 51 | if err := validateCopySpec(toSpec); err != nil { 52 | return nil, errors.Wrapf(err, "validating new copy spec") 53 | } 54 | 55 | return ©Strategy{ 56 | fromSpec: fromSpec, 57 | toSpec: toSpec, 58 | }, nil 59 | } 60 | 61 | func (s *copyStrategy) Spec() Spec { 62 | return Spec{ 63 | "type": "copy", 64 | "from": s.fromSpec, 65 | "to": s.toSpec, 66 | } 67 | } 68 | 69 | func (s *copyStrategy) Id() string { 70 | b, _ := json.Marshal(s.Spec()) 71 | return string(b) 72 | } 73 | 74 | type noopStrategy struct { 75 | } 76 | 77 | func NewNoopStrategy() (Strategy, error) { 78 | return &noopStrategy{}, nil 79 | } 80 | 81 | func (s *noopStrategy) Spec() Spec { 82 | return Spec{ 83 | "type": "noop", 84 | } 85 | } 86 | 87 | func (s *noopStrategy) Id() string { 88 | b, _ := json.Marshal(s.Spec()) 89 | return string(b) 90 | } 91 | -------------------------------------------------------------------------------- /strategy/strategies_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | var ( 9 | EmptySpec = map[string]interface{}{} 10 | 11 | NoMountSpec = map[string]interface{}{ 12 | "type": "mount", 13 | } 14 | 15 | InvalidMountSpec = map[string]interface{}{ 16 | "type": "mount", 17 | "mounts": "/", 18 | } 19 | 20 | EmptyMountSpec = map[string]interface{}{ 21 | "type": "mount", 22 | "mounts": []interface{}{}, 23 | } 24 | ) 25 | 26 | func TestValidateCopyEmptySpec(t *testing.T) { 27 | err := validateCopySpec(EmptySpec) 28 | if err != nil { 29 | if strings.Contains(err.Error(), "copy spec has no type or field type is invalid") { 30 | return 31 | } 32 | t.Errorf("unexpected error: %s", err) 33 | } 34 | 35 | t.Errorf("expected error") 36 | } 37 | 38 | func TestNoMountEmptySpec(t *testing.T) { 39 | err := validateCopySpec(NoMountSpec) 40 | if err != nil { 41 | if strings.Contains(err.Error(), "copy spec has no mounts field") { 42 | return 43 | } 44 | t.Errorf("unexpected error: %s", err) 45 | } 46 | 47 | t.Errorf("expected error") 48 | } 49 | 50 | func TestInvalidMountsEmptySpec(t *testing.T) { 51 | err := validateCopySpec(InvalidMountSpec) 52 | if err != nil { 53 | if strings.Contains(err.Error(), "copy spec has invalid mounts field type") { 54 | return 55 | } 56 | t.Errorf("unexpected error: %s", err) 57 | } 58 | 59 | t.Errorf("expected error") 60 | } 61 | 62 | func TestEmptyMountsEmptySpec(t *testing.T) { 63 | err := validateCopySpec(EmptyMountSpec) 64 | if err != nil { 65 | if strings.Contains(err.Error(), "copy spec has empty mounts field") { 66 | return 67 | } 68 | t.Errorf("unexpected error: %s", err) 69 | } 70 | 71 | t.Errorf("expected error") 72 | } 73 | -------------------------------------------------------------------------------- /strategy/strategy.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ipfs/ipfs-ds-convert/repo" 7 | 8 | ds "github.com/ipfs/go-datastore" 9 | errors "github.com/pkg/errors" 10 | ) 11 | 12 | var ErrMountNotSimple = errors.New("mount entry is not simple, mount datastores can't be nested") 13 | 14 | var skipTypes = map[string]string{ 15 | "measure": "child", 16 | "log": "child", 17 | } 18 | 19 | var dsTypes = map[string]bool{ 20 | "flatfs": true, 21 | "levelds": true, 22 | "badgerds": true, 23 | } 24 | 25 | //datastors that have one directory inside IPFS repo 26 | var simpleTypes = map[string]bool{ 27 | "flatfs": true, 28 | "levelds": true, 29 | "badgerds": true, 30 | } 31 | 32 | func NewStrategy(fromSpecIn, toSpecIn map[string]interface{}) (Strategy, error) { 33 | var fromSpec Spec 34 | var toSpec Spec 35 | 36 | fromSpec, err := cleanUp(fromSpecIn) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | toSpec, err = cleanUp(toSpecIn) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | fromType, _ := fromSpec.Type() 47 | toType, _ := toSpec.Type() 48 | 49 | if _, ok := dsTypes[fromType]; ok { 50 | if toType == fromType { 51 | //TODO: check if dirs match, can just skip conversion, else just move directories 52 | return NewCopyStrategy(fromSpec, toSpec) 53 | } 54 | 55 | //TODO: might still be able to optimize if toType is single element mount 56 | return NewCopyStrategy(fromSpec, toSpec) 57 | } 58 | 59 | if fromType == "mount" { 60 | if toType != "mount" { 61 | //TODO: this can be possible to optimize in case there is only one element 62 | //in mount, but it's probably not worth it 63 | return NewCopyStrategy(fromSpec, toSpec) 64 | } 65 | 66 | return newMountStrategy(fromSpec, toSpec) 67 | } 68 | 69 | //should not normally happen 70 | return nil, errors.New("unable to create conversion strategy") 71 | } 72 | 73 | func cleanUp(specIn Spec) (map[string]interface{}, error) { 74 | t, ok := specIn.Type() 75 | if !ok { 76 | return nil, errors.New("invalid or missing 'type' in datastore spec") 77 | } 78 | 79 | childField, skip := skipTypes[t] 80 | if skip { 81 | ch, ok := specIn[childField] 82 | if !ok { 83 | return nil, fmt.Errorf("missing '%s' field in datastore spec", childField) 84 | } 85 | 86 | var child Spec 87 | child, ok = ch.(map[string]interface{}) 88 | if !ok { 89 | return nil, fmt.Errorf("invalid '%s' field type in datastore spec", childField) 90 | } 91 | 92 | mountpoint, has := specIn.str("mountpoint") 93 | if has { 94 | child["mountpoint"] = mountpoint 95 | } 96 | 97 | return cleanUp(child) 98 | } 99 | 100 | _, isDs := dsTypes[t] 101 | if isDs { 102 | return specIn, nil 103 | } 104 | 105 | switch { 106 | case t == "mount": 107 | mounts, ok := specIn["mounts"].([]interface{}) 108 | if !ok { 109 | return nil, fmt.Errorf("'mounts' field is missing or not an array") 110 | } 111 | var outSpec = Spec{} 112 | outSpec["type"] = "mount" 113 | var outMounts []interface{} 114 | 115 | for _, m := range mounts { 116 | var mount Spec 117 | mount, ok = m.(map[string]interface{}) 118 | if !ok { 119 | return nil, fmt.Errorf("'mounts' element is of invalid type") 120 | } 121 | 122 | cleanMount, err := cleanUp(mount) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | outMounts = append(outMounts, cleanMount) 128 | } 129 | 130 | outSpec["mounts"] = outMounts 131 | 132 | return outSpec, nil 133 | default: 134 | return nil, fmt.Errorf("unknown or unsupported type '%s' in datasotre spec", t) 135 | } 136 | } 137 | 138 | func simpleMountInfo(mountSpec Spec) (SimpleMounts, error) { 139 | mounts, ok := mountSpec["mounts"].([]interface{}) 140 | if !ok { 141 | return nil, errors.New("'mounts' field is missing or not an array") 142 | } 143 | 144 | var simpleMounts []SimpleMount 145 | for _, m := range mounts { 146 | var mount Spec 147 | mount, ok := m.(map[string]interface{}) 148 | if !ok { 149 | return nil, fmt.Errorf("'mounts' element is of invalid type") 150 | } 151 | 152 | dsType, ok := mount.Type() 153 | if !ok { 154 | return nil, fmt.Errorf("mount type is not defined or of invalid type") 155 | } 156 | 157 | if _, ok := simpleTypes[dsType]; !ok { 158 | return nil, ErrMountNotSimple 159 | } 160 | 161 | prefix, ok := mount.str("mountpoint") 162 | if !ok { 163 | fmt.Println(mount) 164 | return nil, fmt.Errorf("mount field 'mountpoint' is not defined or of invalid type") 165 | } 166 | 167 | diskId, err := repo.DatastoreSpec(mount) 168 | if err != nil { 169 | return nil, err 170 | } 171 | 172 | simpleMounts = append(simpleMounts, SimpleMount{prefix: ds.NewKey(prefix), diskId: diskId, spec: mount}) 173 | } 174 | 175 | return simpleMounts, nil 176 | } 177 | 178 | // addMissingParents adds missing roots to filtered specs 179 | // spec A (source) 180 | // /a 181 | // /a/b 182 | // 183 | // spec B (dest) 184 | // /a 185 | // 186 | // Assuming /a are matching, they are filtered out and so data from /a/b would 187 | // be lost. This function adds missing mounts back to optimized spec. 188 | // Returns fixed SpecAOpt, SpecBOpt 189 | func addMissingParents(specA SimpleMounts, specB SimpleMounts, specAOpt SimpleMounts, specBOpt SimpleMounts) (SimpleMounts, SimpleMounts, error) { 190 | for _, mountA := range specA { 191 | if specB.hasPrefixed(mountA) == -1 { 192 | var bestMatch SimpleMount 193 | bestMatched := -1 194 | toParts := mountA.prefix.List() 195 | 196 | for _, mountB := range specB { 197 | matched := matchKeyPartsPrefix(toParts, mountB.prefix.List()) 198 | if matched > bestMatched { 199 | bestMatched = matched 200 | bestMatch = mountB 201 | } 202 | } 203 | 204 | if bestMatched == -1 { 205 | return nil, nil, fmt.Errorf("couldn't find best match for specA %s", mountA.prefix.String()) 206 | } 207 | 208 | if specBOpt.hasPrefixed(bestMatch) == -1 { 209 | specBOpt = append(specBOpt, bestMatch) 210 | } 211 | if specAOpt.hasPrefixed(bestMatch) == -1 { 212 | ti := specA.hasPrefixed(bestMatch) 213 | if ti == -1 { 214 | //TODO: fallback to copyAll 215 | return nil, nil, fmt.Errorf("couldn't find %s in specA, parent of %s", bestMatch.prefix.String(), mountA.prefix.String()) 216 | } 217 | specAOpt = append(specAOpt, specA[ti]) 218 | } 219 | } 220 | } 221 | return specAOpt, specBOpt, nil 222 | } 223 | 224 | func newMountStrategy(fromSpec, toSpec map[string]interface{}) (Strategy, error) { 225 | var skipable []SimpleMount 226 | 227 | fromMounts, err := simpleMountInfo(fromSpec) 228 | if err != nil { 229 | return nil, errors.Wrapf(err, "parsing old spec") 230 | } 231 | 232 | toMounts, err := simpleMountInfo(toSpec) 233 | if err != nil { 234 | return nil, errors.Wrapf(err, "parsing new spec") 235 | } 236 | 237 | for _, from := range fromMounts { 238 | if toMounts.hasMatching(from) { 239 | skipable = append(skipable, from) 240 | } 241 | } 242 | 243 | //TODO: handle renames, somehow 244 | 245 | fromMountsOpt := fromMounts.filter(skipable) 246 | toMountsOpt := toMounts.filter(skipable) 247 | 248 | fromMountsOpt.sort() 249 | toMountsOpt.sort() 250 | 251 | toMountsOpt, fromMountsOpt, err = addMissingParents(fromMounts, toMounts, fromMountsOpt, toMountsOpt) 252 | if err != nil { 253 | return nil, errors.Wrapf(err, "adding missing to src spec") 254 | } 255 | 256 | fromMountsOpt, toMountsOpt, err = addMissingParents(toMounts, fromMounts, toMountsOpt, fromMountsOpt) 257 | if err != nil { 258 | return nil, errors.Wrapf(err, "adding missing to dest spec") 259 | } 260 | 261 | if len(fromMountsOpt) == 0 { 262 | if len(toMountsOpt) != 0 { 263 | return nil, fmt.Errorf("strategy error: len(toMounts) != 0, please report") 264 | } 265 | 266 | return NewNoopStrategy() 267 | } 268 | if len(toMountsOpt) == 0 { 269 | return nil, fmt.Errorf("strategy error: len(toMounts) == 0, please report") 270 | } 271 | 272 | return NewCopyStrategy(fromMountsOpt.spec(), toMountsOpt.spec()) 273 | } 274 | 275 | func matchKeyPartsPrefix(pattern, to []string) int { 276 | if len(pattern) == 1 && pattern[0] == "" { 277 | pattern = []string{} 278 | } 279 | 280 | if len(to) == 1 && to[0] == "" { 281 | to = []string{} 282 | } 283 | 284 | if len(to) > len(pattern) { 285 | return -1 286 | } 287 | 288 | for i, part := range to { 289 | if part != pattern[i] { 290 | if i == 0 { 291 | return -1 292 | } 293 | return i 294 | } 295 | } 296 | 297 | return len(to) 298 | } 299 | -------------------------------------------------------------------------------- /strategy/strategy_test.go: -------------------------------------------------------------------------------- 1 | package strategy_test 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/ipfs/ipfs-ds-convert/strategy" 8 | ) 9 | 10 | type testCase struct { 11 | baseSpec map[string]interface{} 12 | destSpec map[string]interface{} 13 | strategy string 14 | err string 15 | } 16 | 17 | var ( 18 | basicSpec = map[string]interface{}{ 19 | "type": "mount", 20 | "mounts": []interface{}{ 21 | map[string]interface{}{ 22 | "mountpoint": "/blocks", 23 | "type": "measure", 24 | "prefix": "flatfs.datastore", 25 | "child": map[string]interface{}{ 26 | "type": "flatfs", 27 | "path": "blocks", 28 | "sync": true, 29 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 30 | }, 31 | }, 32 | map[string]interface{}{ 33 | "mountpoint": "/", 34 | "type": "measure", 35 | "prefix": "leveldb.datastore", 36 | "child": map[string]interface{}{ 37 | "type": "levelds", 38 | "path": "levelDatastore", 39 | "compression": "none", 40 | }, 41 | }, 42 | }, 43 | } 44 | 45 | testCases = []testCase{ 46 | //////////////////// 47 | // MAIN LOGIC CASES 48 | 49 | { 50 | //Only 'transparent' layers are changed, no action should be taken 51 | baseSpec: basicSpec, 52 | destSpec: map[string]interface{}{ 53 | "type": "mount", 54 | "mounts": []interface{}{ 55 | map[string]interface{}{ 56 | "mountpoint": "/blocks", 57 | "type": "log", 58 | "name": "flatfs", 59 | "child": map[string]interface{}{ 60 | "type": "flatfs", 61 | "path": "blocks", 62 | "sync": true, 63 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 64 | }, 65 | }, 66 | map[string]interface{}{ 67 | "mountpoint": "/", 68 | "type": "measure", 69 | "prefix": "otherprefix.datastore", 70 | "child": map[string]interface{}{ 71 | "type": "levelds", 72 | "path": "levelDatastore", 73 | "compression": "none", 74 | }, 75 | }, 76 | }, 77 | }, 78 | strategy: `{"type":"noop"}`, 79 | }, 80 | { 81 | //Removed 'transparent' layers, no action should be taken 82 | baseSpec: basicSpec, 83 | destSpec: map[string]interface{}{ 84 | "type": "mount", 85 | "mounts": []interface{}{ 86 | map[string]interface{}{ 87 | "mountpoint": "/blocks", 88 | "type": "flatfs", 89 | "path": "blocks", 90 | "sync": true, 91 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 92 | }, 93 | map[string]interface{}{ 94 | "mountpoint": "/", 95 | "type": "levelds", 96 | "path": "levelDatastore", 97 | "compression": "none", 98 | }, 99 | }, 100 | }, 101 | strategy: `{"type":"noop"}`, 102 | }, 103 | { 104 | //changed /blocks, rest untouched 105 | baseSpec: basicSpec, 106 | destSpec: map[string]interface{}{ 107 | "type": "mount", 108 | "mounts": []interface{}{ 109 | map[string]interface{}{ 110 | "mountpoint": "/blocks", 111 | "type": "badgerds", 112 | "path": "blocks", 113 | }, 114 | map[string]interface{}{ 115 | "mountpoint": "/", 116 | "type": "levelds", 117 | "path": "levelDatastore", 118 | "compression": "none", 119 | }, 120 | }, 121 | }, 122 | strategy: `{"from":{"mounts":[{"mountpoint":"/blocks","path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","sync":true,"type":"flatfs"}],"type":"mount"},"to":{"mounts":[{"mountpoint":"/blocks","path":"blocks","type":"badgerds"}],"type":"mount"},"type":"copy"}`, 123 | }, 124 | { 125 | //adds /foo mount, needs to copy [/,/foo] 126 | baseSpec: basicSpec, 127 | destSpec: map[string]interface{}{ 128 | "type": "mount", 129 | "mounts": []interface{}{ 130 | map[string]interface{}{ 131 | "mountpoint": "/blocks", 132 | "type": "flatfs", 133 | "path": "blocks", 134 | "sync": true, 135 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 136 | }, 137 | map[string]interface{}{ 138 | "mountpoint": "/foo", 139 | "type": "badgerds", 140 | "path": "foo", 141 | }, 142 | map[string]interface{}{ 143 | "mountpoint": "/", 144 | "type": "levelds", 145 | "path": "levelDatastore", 146 | "compression": "none", 147 | }, 148 | }, 149 | }, 150 | strategy: `{"from":{"mounts":[{"compression":"none","mountpoint":"/","path":"levelDatastore","type":"levelds"}],"type":"mount"},"to":{"mounts":[{"mountpoint":"/foo","path":"foo","type":"badgerds"},{"compression":"none","mountpoint":"/","path":"levelDatastore","type":"levelds"}],"type":"mount"},"type":"copy"}`, 151 | }, 152 | { 153 | //has single / mount, needs to copy [/,/blocks] 154 | baseSpec: basicSpec, 155 | destSpec: map[string]interface{}{ 156 | "type": "mount", 157 | "mounts": []interface{}{ 158 | map[string]interface{}{ 159 | "mountpoint": "/", 160 | "type": "levelds", 161 | "path": "levelDatastore", 162 | "compression": "none", 163 | }, 164 | }, 165 | }, 166 | strategy: `{"from":{"mounts":[{"mountpoint":"/blocks","path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","sync":true,"type":"flatfs"},{"compression":"none","mountpoint":"/","path":"levelDatastore","type":"levelds"}],"type":"mount"},"to":{"mounts":[{"compression":"none","mountpoint":"/","path":"levelDatastore","type":"levelds"}],"type":"mount"},"type":"copy"}`, 167 | }, 168 | { 169 | //skippable spec from testfiles 170 | baseSpec: map[string]interface{}{ 171 | "type": "mount", 172 | "mounts": []interface{}{ 173 | map[string]interface{}{ 174 | "mountpoint": "/a", 175 | "type": "badgerds", 176 | "path": "dsa", 177 | }, 178 | map[string]interface{}{ 179 | "mountpoint": "/b", 180 | "type": "badgerds", 181 | "path": "dsb", 182 | }, 183 | map[string]interface{}{ 184 | "mountpoint": "/c", 185 | "type": "badgerds", 186 | "path": "dsc", 187 | }, 188 | map[string]interface{}{ 189 | "mountpoint": "/", 190 | "type": "badgerds", 191 | "path": "ds", 192 | }, 193 | }, 194 | }, 195 | destSpec: map[string]interface{}{ 196 | "type": "mount", 197 | "mounts": []interface{}{ 198 | map[string]interface{}{ 199 | "mountpoint": "/a", 200 | "type": "badgerds", 201 | "path": "dsa", 202 | }, 203 | map[string]interface{}{ 204 | "mountpoint": "/b", 205 | "type": "levelds", 206 | "path": "dsb", 207 | "compression": "none", 208 | }, 209 | map[string]interface{}{ 210 | "mountpoint": "/", 211 | "type": "badgerds", 212 | "path": "ds", 213 | }, 214 | map[string]interface{}{ 215 | "mountpoint": "/d", 216 | "type": "badgerds", 217 | "path": "dsc", 218 | }, 219 | }, 220 | }, 221 | strategy: `{"from":{"mounts":[{"mountpoint":"/c","path":"dsc","type":"badgerds"},{"mountpoint":"/b","path":"dsb","type":"badgerds"},{"mountpoint":"/","path":"ds","type":"badgerds"}],"type":"mount"},"to":{"mounts":[{"mountpoint":"/d","path":"dsc","type":"badgerds"},{"compression":"none","mountpoint":"/b","path":"dsb","type":"levelds"},{"mountpoint":"/","path":"ds","type":"badgerds"}],"type":"mount"},"type":"copy"}`, 222 | }, 223 | { 224 | //from nested mount 225 | baseSpec: map[string]interface{}{ 226 | "type": "mount", 227 | "mounts": []interface{}{ 228 | map[string]interface{}{ 229 | "mountpoint": "/a", 230 | "type": "badgerds", 231 | "path": "dsa", 232 | }, 233 | map[string]interface{}{ 234 | "mountpoint": "/b", 235 | "type": "badgerds", 236 | "path": "dsb", 237 | }, 238 | map[string]interface{}{ 239 | "type": "mount", 240 | "mountpoint": "/c", 241 | "mounts": []interface{}{ 242 | map[string]interface{}{ 243 | "mountpoint": "/a", 244 | "type": "badgerds", 245 | "path": "dsc", 246 | }, 247 | map[string]interface{}{ 248 | "mountpoint": "/", 249 | "type": "badgerds", 250 | "path": "ds", 251 | }, 252 | }, 253 | }, 254 | }, 255 | }, 256 | destSpec: map[string]interface{}{ 257 | "type": "mount", 258 | "mounts": []interface{}{ 259 | map[string]interface{}{ 260 | "mountpoint": "/a", 261 | "type": "badgerds", 262 | "path": "dsa", 263 | }, 264 | map[string]interface{}{ 265 | "mountpoint": "/b", 266 | "type": "levelds", 267 | "path": "dsb", 268 | "compression": "none", 269 | }, 270 | map[string]interface{}{ 271 | "mountpoint": "/", 272 | "type": "badgerds", 273 | "path": "ds", 274 | }, 275 | map[string]interface{}{ 276 | "mountpoint": "/d", 277 | "type": "badgerds", 278 | "path": "dsc", 279 | }, 280 | }, 281 | }, 282 | err: "parsing old spec: mount entry is not simple, mount datastores can't be nested", 283 | }, 284 | //////////////////// 285 | //EDGE CASES 286 | 287 | { 288 | //no dest type 289 | baseSpec: basicSpec, 290 | destSpec: map[string]interface{}{ 291 | "mounts": []interface{}{ 292 | map[string]interface{}{ 293 | "mountpoint": "/", 294 | "type": "levelds", 295 | "path": "levelDatastore", 296 | "compression": "none", 297 | }, 298 | }, 299 | }, 300 | err: "invalid or missing 'type' in datastore spec", 301 | }, 302 | { 303 | //childless measure 304 | baseSpec: basicSpec, 305 | destSpec: map[string]interface{}{ 306 | "type": "measure", 307 | "prefix": "flatfs.datastore", 308 | }, 309 | err: "missing 'child' field in datastore spec", 310 | }, 311 | { 312 | //invalid child in measure 313 | baseSpec: basicSpec, 314 | destSpec: map[string]interface{}{ 315 | "type": "measure", 316 | "prefix": "flatfs.datastore", 317 | "child": "foo", 318 | }, 319 | err: "invalid 'child' field type in datastore spec", 320 | }, 321 | { 322 | //mountless mount 323 | baseSpec: basicSpec, 324 | destSpec: map[string]interface{}{ 325 | "type": "mount", 326 | }, 327 | err: "'mounts' field is missing or not an array", 328 | }, 329 | { 330 | //invalid mount mounts type 331 | baseSpec: basicSpec, 332 | destSpec: map[string]interface{}{ 333 | "type": "mount", 334 | "mounts": "Foo", 335 | }, 336 | err: "'mounts' field is missing or not an array", 337 | }, 338 | { 339 | //invalid mount 340 | baseSpec: basicSpec, 341 | destSpec: map[string]interface{}{ 342 | "type": "mount", 343 | "mounts": []interface{}{ 344 | "Foo", 345 | }, 346 | }, 347 | err: "'mounts' element is of invalid type", 348 | }, 349 | { 350 | //invalid mount element 351 | baseSpec: basicSpec, 352 | destSpec: map[string]interface{}{ 353 | "type": "mount", 354 | "mounts": []interface{}{ 355 | map[string]interface{}{ 356 | "type": "measure", 357 | "prefix": "flatfs.datastore", 358 | }, 359 | }, 360 | }, 361 | err: "missing 'child' field in datastore spec", 362 | }, 363 | { 364 | //invalid datastore 365 | baseSpec: basicSpec, 366 | destSpec: map[string]interface{}{ 367 | "type": "not a valid ds type", 368 | }, 369 | err: "unknown or unsupported type 'not a valid ds type' in datasotre spec", 370 | }, 371 | { 372 | //missing dest point 373 | baseSpec: map[string]interface{}{ 374 | "type": "mount", 375 | "mounts": []interface{}{ 376 | map[string]interface{}{ 377 | "mountpoint": "/foo", 378 | "type": "badgerds", 379 | "path": "foo", 380 | }, 381 | map[string]interface{}{ 382 | "mountpoint": "/bar", 383 | "type": "levelds", 384 | "path": "bar", 385 | "compression": "none", 386 | }, 387 | }, 388 | }, 389 | destSpec: map[string]interface{}{ 390 | "type": "mount", 391 | "mounts": []interface{}{ 392 | map[string]interface{}{ 393 | "mountpoint": "/foo", 394 | "type": "badgerds", 395 | "path": "foo", 396 | }, 397 | }, 398 | }, 399 | err: "adding missing to src spec: couldn't find best match for specA /bar", 400 | }, 401 | } 402 | ) 403 | 404 | func TestNewStrategy(t *testing.T) { 405 | for _, c := range testCases { 406 | strat, err := strategy.NewStrategy(c.baseSpec, c.destSpec) 407 | assert(t, (err == nil && c.err == "") || (c.err != "" && strings.Contains(err.Error(), c.err)), err) 408 | if c.err == "" { 409 | assert(t, strat.Id() == c.strategy, strat.Id()) 410 | } 411 | } 412 | } 413 | 414 | func TestStrategyReverse(t *testing.T) { 415 | for _, c := range testCases { 416 | _, err := strategy.NewStrategy(c.destSpec, c.baseSpec) 417 | assert(t, err == nil || c.err != "", err) 418 | } 419 | } 420 | 421 | func assert(t *testing.T, cond bool, err interface{}) { 422 | if !cond { 423 | t.Fatalf("assertion failed: %s", err) 424 | } 425 | } 426 | -------------------------------------------------------------------------------- /strategy/strategy_util.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "sort" 5 | 6 | "github.com/ipfs/ipfs-ds-convert/repo" 7 | 8 | ds "github.com/ipfs/go-datastore" 9 | ) 10 | 11 | type Spec map[string]interface{} 12 | 13 | func (s *Spec) Type() (string, bool) { 14 | return s.str("type") 15 | } 16 | 17 | func (s *Spec) str(key string) (string, bool) { 18 | t, ok := (*s)[key] 19 | if !ok { 20 | return "", false 21 | } 22 | ts, ok := t.(string) 23 | return ts, ok 24 | } 25 | 26 | func (s *Spec) Sub(key string) (Spec, bool) { 27 | t, ok := (*s)[key] 28 | if !ok { 29 | return nil, false 30 | } 31 | ts, ok := t.(Spec) 32 | return ts, ok 33 | } 34 | 35 | func (s *Spec) Id() (string, error) { 36 | return repo.DatastoreSpec(*s) 37 | } 38 | 39 | type SimpleMount struct { 40 | prefix ds.Key 41 | diskId string 42 | 43 | spec Spec 44 | } 45 | 46 | type SimpleMounts []SimpleMount 47 | 48 | func (m *SimpleMounts) hasPrefixed(searched SimpleMount) int { 49 | for i, mnt := range *m { 50 | if mnt.prefix.Equal(searched.prefix) { 51 | return i 52 | } 53 | } 54 | 55 | return -1 56 | } 57 | 58 | func (m *SimpleMounts) hasMatching(searched SimpleMount) bool { 59 | i := m.hasPrefixed(searched) 60 | 61 | if i != -1 { 62 | return (*m)[i].diskId == searched.diskId 63 | } 64 | 65 | return false 66 | } 67 | 68 | //filter removes matching mounts from this mounts 69 | func (m *SimpleMounts) filter(filter SimpleMounts) SimpleMounts { 70 | out := make([]SimpleMount, 0, len(*m)) 71 | 72 | for _, mount := range *m { 73 | if !filter.hasMatching(mount) { 74 | out = append(out, mount) 75 | } 76 | } 77 | 78 | return out 79 | } 80 | 81 | func (m *SimpleMounts) sort() { 82 | sort.Slice(*m, func(i, j int) bool { return (*m)[i].prefix.String() > (*m)[j].prefix.String() }) 83 | } 84 | 85 | func (m *SimpleMounts) spec() Spec { 86 | mounts := make([]interface{}, 0, len(*m)) 87 | 88 | for _, mount := range *m { 89 | var spec map[string]interface{} = mount.spec 90 | mounts = append(mounts, spec) 91 | } 92 | 93 | return map[string]interface{}{ 94 | "type": "mount", 95 | "mounts": mounts, 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /testfiles/absPathSpec: -------------------------------------------------------------------------------- 1 | { 2 | "child": { 3 | "compression": "none", 4 | "path": "/srv/datastore", 5 | "type": "levelds" 6 | }, 7 | "mountpoint": "/", 8 | "prefix": "leveldb.datastore", 9 | "type": "measure" 10 | } 11 | -------------------------------------------------------------------------------- /testfiles/badgerSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "child": { 5 | "path": "blocks", 6 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 7 | "sync": true, 8 | "type": "flatfs" 9 | }, 10 | "mountpoint": "/blocks", 11 | "prefix": "flatfs.datastore", 12 | "type": "measure" 13 | }, 14 | { 15 | "child": { 16 | "path": "badgerstore", 17 | "type": "badgerds" 18 | }, 19 | "mountpoint": "/", 20 | "prefix": "badgerdb.datastore", 21 | "type": "measure" 22 | } 23 | ], 24 | "type": "mount" 25 | } 26 | -------------------------------------------------------------------------------- /testfiles/defaultSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "child": { 5 | "path": "blocks", 6 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 7 | "sync": true, 8 | "type": "flatfs" 9 | }, 10 | "mountpoint": "/blocks", 11 | "prefix": "flatfs.datastore", 12 | "type": "measure" 13 | }, 14 | { 15 | "child": { 16 | "compression": "none", 17 | "path": "datastore", 18 | "type": "levelds" 19 | }, 20 | "mountpoint": "/", 21 | "prefix": "leveldb.datastore", 22 | "type": "measure" 23 | } 24 | ], 25 | "type": "mount" 26 | } 27 | -------------------------------------------------------------------------------- /testfiles/equalSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "child": { 5 | "compression": "none", 6 | "path": "datastore", 7 | "type": "levelds" 8 | }, 9 | "mountpoint": "/", 10 | "prefix": "otherprefix.datastore", 11 | "type": "measure" 12 | }, 13 | { 14 | "child": { 15 | "path": "blocks", 16 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 17 | "sync": true, 18 | "type": "flatfs" 19 | }, 20 | "mountpoint": "/blocks", 21 | "prefix": "flatfs.datastore", 22 | "type": "measure" 23 | } 24 | ], 25 | "type": "mount" 26 | } -------------------------------------------------------------------------------- /testfiles/invalidSpec: -------------------------------------------------------------------------------- 1 | { 2 | "child": { 3 | "compression": "none", 4 | "path": "datastore", 5 | "type": "notAValidDatastoreType" 6 | }, 7 | "prefix": "leveldb.datastore", 8 | "type": "measure" 9 | } 10 | -------------------------------------------------------------------------------- /testfiles/lossySpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "child": { 5 | "path": "blocks", 6 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 7 | "sync": true, 8 | "type": "flatfs" 9 | }, 10 | "mountpoint": "/blocks", 11 | "prefix": "flatfs.datastore", 12 | "type": "measure" 13 | } 14 | ], 15 | "type": "mount" 16 | } 17 | -------------------------------------------------------------------------------- /testfiles/reusePathSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "child": { 5 | "path": "datastore", 6 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 7 | "sync": true, 8 | "type": "flatfs" 9 | }, 10 | "mountpoint": "/blocks", 11 | "prefix": "flatfs.datastore", 12 | "type": "measure" 13 | }, 14 | { 15 | "child": { 16 | "compression": "none", 17 | "path": "datastore", 18 | "type": "levelds" 19 | }, 20 | "mountpoint": "/", 21 | "prefix": "leveldb.datastore", 22 | "type": "measure" 23 | } 24 | ], 25 | "type": "mount" 26 | } -------------------------------------------------------------------------------- /testfiles/singleSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "compression": "none", 5 | "path": "datastore", 6 | "type": "levelds", 7 | "mountpoint": "/" 8 | } 9 | ], 10 | "type": "mount" 11 | } -------------------------------------------------------------------------------- /testfiles/skipableDstSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "compression": "none", 5 | "path": "dsa", 6 | "type": "badgerds", 7 | "mountpoint": "/a" 8 | }, 9 | { 10 | "compression": "none", 11 | "path": "dsb", 12 | "type": "levelds", 13 | "mountpoint": "/b" 14 | }, 15 | { 16 | "compression": "none", 17 | "path": "ds", 18 | "type": "badgerds", 19 | "mountpoint": "/" 20 | }, 21 | { 22 | "compression": "none", 23 | "path": "dsc", 24 | "type": "badgerds", 25 | "mountpoint": "/d" 26 | } 27 | ], 28 | "type": "mount" 29 | } -------------------------------------------------------------------------------- /testfiles/skipableSpec: -------------------------------------------------------------------------------- 1 | { 2 | "mounts": [ 3 | { 4 | "compression": "none", 5 | "path": "dsa", 6 | "type": "badgerds", 7 | "mountpoint": "/a" 8 | }, 9 | { 10 | "compression": "none", 11 | "path": "dsb", 12 | "type": "badgerds", 13 | "mountpoint": "/b" 14 | }, 15 | { 16 | "compression": "none", 17 | "path": "dsc", 18 | "type": "badgerds", 19 | "mountpoint": "/c" 20 | }, 21 | { 22 | "compression": "none", 23 | "path": "ds", 24 | "type": "badgerds", 25 | "mountpoint": "/" 26 | } 27 | ], 28 | "type": "mount" 29 | } 30 | -------------------------------------------------------------------------------- /testutil/basictest.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func PrepareTest(t *testing.T, keys, blocks int) (string, func(t *testing.T), int64, int64) { 9 | dir, _close := NewTestRepo(t, nil) 10 | 11 | r, err := OpenRepo(dir) 12 | if err != nil { 13 | t.Fatal(err) 14 | } 15 | 16 | seed1, err := InsertRandomKeys("", keys, r) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | 21 | seed2, err := InsertRandomKeys("blocks/", blocks, r) 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | 26 | err = r.Close() 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | 31 | return dir, _close, seed1, seed2 32 | } 33 | 34 | func FinishTest(t *testing.T, dir string, seed1, seed2 int64, keys, blocks int) { 35 | //Test if repo can be opened 36 | r, err := OpenRepo(dir) 37 | if err != nil { 38 | t.Fatal(err) 39 | } 40 | 41 | fmt.Println("Verifying keys") 42 | err = Verify("", keys, seed1, r) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | 47 | fmt.Println("Verifying blocks") 48 | err = Verify("blocks/", blocks, seed2, r) 49 | if err != nil { 50 | t.Fatal(err) 51 | } 52 | 53 | err = r.Close() 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /testutil/ds_utils.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | crand "crypto/rand" 5 | "encoding/base32" 6 | "encoding/binary" 7 | "fmt" 8 | "math/rand" 9 | 10 | repo "github.com/ipfs/go-ipfs/repo" 11 | fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" 12 | 13 | "bytes" 14 | 15 | ds "github.com/ipfs/go-datastore" 16 | ) 17 | 18 | // OpenRepo opens a repo. 19 | func OpenRepo(repoPath string) (repo.Repo, error) { 20 | return fsrepo.Open(repoPath) 21 | } 22 | 23 | func getSeed() int64 { 24 | b := make([]byte, 8) 25 | crand.Read(b) 26 | return int64(binary.LittleEndian.Uint64(b)) 27 | } 28 | 29 | // InsertRandomKeys puts random keys in a repo. 30 | func InsertRandomKeys(prefix string, n int, r repo.Repo) (int64, error) { 31 | seed := getSeed() 32 | rnd := rand.New(rand.NewSource(seed)) 33 | 34 | batch, err := r.Datastore().Batch() 35 | if err != nil { 36 | return 0, err 37 | } 38 | 39 | for i := 1; i <= n; i++ { 40 | keyBytes := make([]byte, 16) 41 | rnd.Read(keyBytes) 42 | dataBytes := make([]byte, 1024) 43 | rnd.Read(dataBytes) 44 | 45 | err := batch.Put(ds.NewKey(fmt.Sprintf("/%s%s", prefix, base32.StdEncoding.EncodeToString(keyBytes))), dataBytes) 46 | if err != nil { 47 | return 0, err 48 | } 49 | 50 | if (i+1)%127 == 0 { 51 | err := batch.Commit() 52 | if err != nil { 53 | return 0, err 54 | } 55 | 56 | batch, err = r.Datastore().Batch() 57 | if err != nil { 58 | return 0, err 59 | } 60 | } 61 | } 62 | 63 | err = batch.Put(ds.NewKey(fmt.Sprintf("/%s%s", prefix, "NOTARANDOMKEY")), []byte("data")) 64 | if err != nil { 65 | return 0, err 66 | } 67 | 68 | err = batch.Commit() 69 | if err != nil { 70 | return 0, err 71 | } 72 | 73 | return seed, nil 74 | } 75 | 76 | // Verify checks that keys in the repository look as expected. 77 | func Verify(prefix string, n int, seed int64, r repo.Repo) error { 78 | rnd := rand.New(rand.NewSource(seed)) 79 | 80 | for i := 1; i <= n; i++ { 81 | keyBytes := make([]byte, 16) 82 | rnd.Read(keyBytes) 83 | dataBytes := make([]byte, 1024) 84 | rnd.Read(dataBytes) 85 | 86 | k := ds.NewKey(fmt.Sprintf("/%s%s", prefix, base32.StdEncoding.EncodeToString(keyBytes))) 87 | val, err := r.Datastore().Get(k) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | if !bytes.Equal(dataBytes, val) { 93 | return fmt.Errorf("non-matching data for key %s", k) 94 | } 95 | } 96 | 97 | return nil 98 | } 99 | -------------------------------------------------------------------------------- /testutil/repo.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "testing" 9 | 10 | "github.com/ipfs/go-ipfs/plugin/loader" 11 | 12 | conf "github.com/ipfs/ipfs-ds-convert/config" 13 | 14 | config "github.com/ipfs/go-ipfs-config" 15 | fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" 16 | ) 17 | 18 | func init() { 19 | // Datastores are (by default preloaded) plugins 20 | 21 | pl, err := loader.NewPluginLoader("") 22 | if err != nil { 23 | panic(err) 24 | } 25 | if err := pl.Initialize(); err != nil { 26 | panic(err) 27 | } 28 | if err := pl.Inject(); err != nil { 29 | panic(err) 30 | } 31 | } 32 | 33 | // NewTestRepo creates a new repo for testing. 34 | func NewTestRepo(t *testing.T, spec map[string]interface{}) (string, func(t *testing.T)) { 35 | conf, err := config.Init(os.Stdout, 2048) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | err = config.Profiles["test"].Transform(conf) 41 | if err != nil { 42 | t.Fatal(err) 43 | } 44 | 45 | if spec != nil { 46 | conf.Datastore.Spec = spec 47 | } 48 | 49 | repoRoot, err := ioutil.TempDir(os.TempDir(), "ds-convert-test-") 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | 54 | if err := fsrepo.Init(repoRoot, conf); err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | return repoRoot, func(t *testing.T) { 59 | err := os.RemoveAll(repoRoot) 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | } 64 | } 65 | 66 | // PatchConfig replaces the datastore configuration in an existing 67 | // configuration file. 68 | func PatchConfig(t *testing.T, configPath string, newSpecPath string) { 69 | newSpec := make(map[string]interface{}) 70 | err := conf.Load(newSpecPath, &newSpec) 71 | if err != nil { 72 | t.Fatal(err) 73 | } 74 | 75 | repoConfig := make(map[string]interface{}) 76 | err = conf.Load(configPath, &repoConfig) 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | 81 | dsConfig, ok := repoConfig["Datastore"].(map[string]interface{}) 82 | if !ok { 83 | t.Fatal(fmt.Errorf("no 'Datastore' or invalid type in %s", configPath)) 84 | } 85 | 86 | _, ok = dsConfig["Spec"].(map[string]interface{}) 87 | if !ok { 88 | t.Fatal(fmt.Errorf("no 'Datastore.Spec' or invalid type in %s", configPath)) 89 | } 90 | 91 | dsConfig["Spec"] = newSpec 92 | 93 | b, err := json.MarshalIndent(repoConfig, "", " ") 94 | if err != nil { 95 | t.Fatal(err) 96 | } 97 | ioutil.WriteFile(configPath, b, 0660) 98 | } 99 | -------------------------------------------------------------------------------- /testutil/repo_test.go: -------------------------------------------------------------------------------- 1 | package testutil_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ipfs/ipfs-ds-convert/testutil" 7 | ) 8 | 9 | func TestNewTestRepo(t *testing.T) { 10 | _, cl := testutil.NewTestRepo(t, nil) 11 | cl(t) 12 | } 13 | -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v0.6.0" 3 | } 4 | --------------------------------------------------------------------------------