├── .github └── workflows │ ├── docker.yml │ └── main.yml ├── .gitignore ├── .golangci.yaml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── cmd_gen_password.go ├── cmd_gen_seed.go ├── cmd_init_wallet.go ├── cmd_init_wallet_test.go ├── cmd_load_secret.go ├── cmd_migrate_db.go ├── cmd_migrate_db_postgres_test.go ├── cmd_migrate_db_sqlite_test.go ├── cmd_migrate_db_test.go ├── cmd_store_configmap.go ├── cmd_store_secret.go ├── cmd_wait_ready.go ├── dev.Dockerfile ├── docs └── data-migration.md ├── example-init-wallet-k8s.sh ├── go.mod ├── go.sum ├── k8s.go ├── k8s_test.go ├── log.go ├── main.go ├── migratekvdb ├── bucket_path.go ├── errors.go ├── helper.go ├── migration.go ├── migration_test.go └── state.go ├── release.sh ├── testdata └── data │ ├── chain │ └── bitcoin │ │ └── regtest │ │ ├── macaroons.db │ │ └── wallet.db │ ├── graph │ └── regtest │ │ ├── channel.db │ │ ├── sphinxreplay.db │ │ └── wtclient.db │ └── watchtower │ └── bitcoin │ └── regtest │ └── watchtower.db ├── tools ├── Dockerfile ├── go.mod ├── go.sum └── tools.go ├── utils.go └── version.go /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker image build 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'docker/v*' 7 | schedule: 8 | # Every day at 1AM (UTC). 9 | - cron: '0 1 * * *' 10 | 11 | defaults: 12 | run: 13 | shell: bash 14 | 15 | env: 16 | DOCKER_REPO: lightninglabs 17 | DOCKER_IMAGE: lndinit 18 | 19 | jobs: 20 | main: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Set up QEMU 24 | uses: lightninglabs/gh-actions/setup-qemu-action@39555064b3ae5c6d5c71a8ab304355faeaf3f4d4 25 | 26 | - name: Set up Docker Buildx 27 | uses: lightninglabs/gh-actions/setup-buildx-action@39555064b3ae5c6d5c71a8ab304355faeaf3f4d4 28 | 29 | - name: Login to DockerHub 30 | uses: lightninglabs/gh-actions/login-action@39555064b3ae5c6d5c71a8ab304355faeaf3f4d4 31 | with: 32 | username: ${{ secrets.DOCKER_USERNAME }} 33 | password: ${{ secrets.DOCKER_API_KEY }} 34 | 35 | - name: Set env for RELEASE_VERSION 36 | run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/docker/}" >> $GITHUB_ENV 37 | 38 | # We will push tags with the format docker/v0.x.y-beta-lnd-v0.aa.bb-beta 39 | # where x and y are lndinit's version and aa and bb are lnd's version. 40 | # This variable LND_VERSION extracts everything after v0.x.y-lnd- so we 41 | # know which base image we need to build on top of. 42 | - name: Set env for LND_VERSION 43 | run: echo "LND_VERSION=${RELEASE_VERSION##v*\.*\.*-beta-lnd-}" >> $GITHUB_ENV 44 | 45 | - name: Set env for LNDINIT_VERSION 46 | run: echo "LNDINIT_VERSION=${RELEASE_VERSION%%-lnd-v*\.*\.*-beta*}" >> $GITHUB_ENV 47 | 48 | - name: Set daily tag 49 | if: github.event.schedule == '0 1 * * *' 50 | run: | 51 | echo "LND_VERSION=daily-testing-only" >> $GITHUB_ENV 52 | echo "LNDINIT_VERSION=main" >> $GITHUB_ENV 53 | echo "RELEASE_VERSION=daily-testing-$(date -u +%Y%m%d),${DOCKER_REPO}/${DOCKER_IMAGE}:daily-testing-only" >> $GITHUB_ENV 54 | 55 | - name: Build and push 56 | id: docker_build 57 | uses: lightninglabs/gh-actions/build-push-action@39555064b3ae5c6d5c71a8ab304355faeaf3f4d4 58 | with: 59 | push: true 60 | platforms: linux/amd64,linux/arm64 61 | tags: "${{ env.DOCKER_REPO }}/${{ env.DOCKER_IMAGE }}:${{ env.RELEASE_VERSION }}" 62 | build-args: | 63 | checkout=${{ env.LNDINIT_VERSION }} 64 | BASE_IMAGE_VERSION=${{ env.LND_VERSION }} 65 | 66 | - name: Image digest 67 | run: echo ${{ steps.docker_build.outputs.digest }} 68 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | pull_request: 8 | branches: 9 | - "*" 10 | 11 | defaults: 12 | run: 13 | shell: bash 14 | 15 | env: 16 | # go needs absolute directories, using the $HOME variable doesn't work here. 17 | GOCACHE: /home/runner/work/go/pkg/build 18 | GOPATH: /home/runner/work/go 19 | 20 | GO_VERSION: 1.23.6 21 | 22 | jobs: 23 | ######################## 24 | # lint code 25 | ######################## 26 | lint: 27 | name: lint code 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: git checkout 31 | uses: actions/checkout@v2 32 | with: 33 | fetch-depth: 0 34 | 35 | - name: lint 36 | run: make lint 37 | 38 | ######################## 39 | # run unit tests 40 | ######################## 41 | unit-test: 42 | name: run unit tests 43 | runs-on: ubuntu-latest 44 | steps: 45 | - name: git checkout 46 | uses: actions/checkout@v2 47 | 48 | - name: setup go ${{ inputs.go-version }} 49 | uses: actions/setup-go@v5 50 | with: 51 | go-version: '${{ inputs.go-version }}' 52 | cache: 'true' 53 | 54 | - name: run unit test 55 | run: make unit 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ---> Go 2 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 3 | *.o 4 | *.a 5 | *.so 6 | 7 | # Folders 8 | _obj 9 | _test 10 | 11 | # Test data directories 12 | testdata/ 13 | 14 | # Architecture specific extensions/prefixes 15 | *.[568vq] 16 | [568vq].out 17 | 18 | *.cgo1.go 19 | *.cgo2.c 20 | _cgo_defun.c 21 | _cgo_gotypes.go 22 | _cgo_export.* 23 | 24 | _testmain.go 25 | 26 | *.exe 27 | *.test 28 | *.prof 29 | 30 | /lndinit 31 | /lndinit-debug 32 | 33 | lndinit-v*/ 34 | 35 | *.key 36 | *.hex 37 | 38 | # vim 39 | *.swp 40 | 41 | *.hex 42 | *.db 43 | *.bin 44 | 45 | vendor 46 | *.idea 47 | *.iml 48 | profile.cov 49 | profile.tmp 50 | 51 | .DS_Store 52 | 53 | .vscode 54 | 55 | # Coverage test 56 | coverage.txt 57 | 58 | # Visual Studio cache/options directory 59 | .vs/ 60 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | # timeout for analysis 3 | timeout: 10m 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=1.23.6 2 | ARG BASE_IMAGE=lightninglabs/lnd 3 | ARG BASE_IMAGE_VERSION=v0.19.0-beta 4 | 5 | FROM golang:${GO_VERSION}-alpine as builder 6 | 7 | # Force Go to use the cgo based DNS resolver. This is required to ensure DNS 8 | # queries required to connect to linked containers succeed. 9 | ENV GODEBUG netdns=cgo 10 | 11 | # Pass a tag, branch or a commit using build-arg. This allows a docker image to 12 | # be built from a specified Git state. The default image will use the Git tip of 13 | # main by default. 14 | ARG checkout="main" 15 | ARG git_url="https://github.com/lightninglabs/lndinit" 16 | 17 | # Install dependencies and build the binaries. 18 | RUN apk add --no-cache --update alpine-sdk \ 19 | git \ 20 | make \ 21 | && git clone $git_url /go/src/github.com/lightninglabs/lndinit \ 22 | && cd /go/src/github.com/lightninglabs/lndinit \ 23 | && git checkout $checkout \ 24 | && make release-install 25 | 26 | # Start a new, final image. 27 | FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION} as final 28 | 29 | # Copy the binary from the builder image. 30 | COPY --from=builder /go/bin/lndinit /bin/ 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Lightning Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PKG := github.com/lightninglabs/lndinit 2 | ESCPKG := github.com\/lightninglabs\/lndinit 3 | TOOLS_DIR := tools 4 | 5 | GO_BIN := ${GOPATH}/bin 6 | GOIMPORTS_BIN := $(GO_BIN)/gosimports 7 | 8 | COMMIT := $(shell git describe --tags --dirty) 9 | COMMIT_HASH := $(shell git rev-parse HEAD) 10 | 11 | GOBUILD := go build -v 12 | GOINSTALL := go install -v 13 | GOTEST := go test -v 14 | DOCKER_TOOLS := docker run -v $$(pwd):/build lndinit-tools 15 | 16 | GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") 17 | 18 | RM := rm -f 19 | CP := cp 20 | MAKE := make 21 | XARGS := xargs -L 1 22 | 23 | VERSION_TAG = $(shell git describe --tags) 24 | 25 | DEV_TAGS = kvdb_etcd kvdb_postgres kvdb_sqlite 26 | RELEASE_TAGS = $(DEV_TAGS) 27 | 28 | BUILD_SYSTEM = darwin-amd64 \ 29 | darwin-arm64 \ 30 | linux-386 \ 31 | linux-amd64 \ 32 | linux-armv6 \ 33 | linux-armv7 \ 34 | linux-arm64 \ 35 | windows-386 \ 36 | windows-amd64 \ 37 | windows-arm 38 | 39 | # By default we will build all systems. But with the 'sys' tag, a specific 40 | # system can be specified. This is useful to release for a subset of 41 | # systems/architectures. 42 | ifneq ($(sys),) 43 | BUILD_SYSTEM = $(sys) 44 | endif 45 | 46 | ifneq ($(tag),) 47 | VERSION_TAG = $(tag) 48 | endif 49 | 50 | # We only return the part inside the double quote here to avoid escape issues 51 | # when calling the external release script. The second parameter can be used to 52 | # add additional ldflags if needed (currently only used for the release). 53 | make_ldflags = $(2) -X main.Commit=$(COMMIT) 54 | 55 | DEV_GCFLAGS := -gcflags "all=-N -l" 56 | LDFLAGS := -ldflags "$(call make_ldflags, $(DEV_TAGS), -s -w)" 57 | DEV_LDFLAGS := -ldflags "$(call make_ldflags, $(DEV_TAGS))" 58 | 59 | # For the release, we want to remove the symbol table and debug information (-s) 60 | # and omit the DWARF symbol table (-w). Also we clear the build ID. 61 | RELEASE_LDFLAGS := $(call make_ldflags, $(RELEASE_TAGS), -s -w -buildid=) 62 | 63 | GREEN := "\\033[0;32m" 64 | NC := "\\033[0m" 65 | define print 66 | echo $(GREEN)$1$(NC) 67 | endef 68 | 69 | default: scratch 70 | 71 | all: scratch install 72 | 73 | # ============ 74 | # DEPENDENCIES 75 | # ============ 76 | $(GOIMPORTS_BIN): 77 | @$(call print, "Installing goimports.") 78 | cd $(TOOLS_DIR); go install -trimpath $(GOIMPORTS_PKG) 79 | 80 | # ============ 81 | # INSTALLATION 82 | # ============ 83 | 84 | build: 85 | @$(call print, "Building debug lndinit.") 86 | $(GOBUILD) -tags="$(DEV_TAGS)" -o lndinit-debug $(DEV_GCFLAGS) $(DEV_LDFLAGS) $(PKG) 87 | 88 | install: 89 | @$(call print, "Installing lndinit.") 90 | $(GOINSTALL) -tags="$(DEV_TAGS)" $(LDFLAGS) $(PKG) 91 | 92 | release-install: 93 | @$(call print, "Installing release lndinit.") 94 | env CGO_ENABLED=0 $(GOINSTALL) -v -trimpath -ldflags="$(RELEASE_LDFLAGS)" -tags="$(RELEASE_TAGS)" $(PKG) 95 | 96 | release: 97 | @$(call print, "Creating release of lndinit.") 98 | ./release.sh build-release "$(VERSION_TAG)" "$(BUILD_SYSTEM)" "$(RELEASE_LDFLAGS)" "$(RELEASE_TAGS)" 99 | 100 | docker-tools: 101 | @$(call print, "Building tools docker image.") 102 | docker build -q -t lndinit-tools $(TOOLS_DIR) 103 | 104 | scratch: build 105 | 106 | # ========= 107 | # UTILITIES 108 | # ========= 109 | 110 | unit: 111 | @$(call print, "Running unit tests.") 112 | $(GOTEST) -tags="$(DEV_TAGS)" ./... 113 | 114 | fmt: $(GOIMPORTS_BIN) 115 | @$(call print, "Fixing imports.") 116 | gosimports -w $(GOFILES_NOVENDOR) 117 | @$(call print, "Formatting source.") 118 | gofmt -l -w -s $(GOFILES_NOVENDOR) 119 | 120 | lint: docker-tools 121 | @$(call print, "Linting source.") 122 | $(DOCKER_TOOLS) golangci-lint run -v --build-tags="$(DEV_TAGS)"$(LINT_WORKERS) 123 | 124 | vendor: 125 | @$(call print, "Re-creating vendor directory.") 126 | rm -r vendor/; go mod vendor 127 | 128 | clean: 129 | @$(call print, "Cleaning source.$(NC)") 130 | $(RM) ./lndinit-debug 131 | $(RM) -r ./vendor .vendor-new 132 | 133 | .PHONY: all \ 134 | default \ 135 | build \ 136 | install \ 137 | scratch \ 138 | fmt \ 139 | lint \ 140 | vendor \ 141 | clean 142 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lndinit: a wallet initializer utility for lnd 2 | 3 | This repository contains the source for the `lndinit` command. 4 | The main purpose of `lndinit` is to help automate the `lnd` wallet 5 | initialization, including seed and password generation. 6 | 7 | - [Requirements](#requirements) 8 | - [Subcommands](#subcommands) 9 | - [`gen-password`](#gen-password) 10 | - [`gen-seed`](#gen-seed) 11 | - [`load-secret`](#load-secret) 12 | - [`store-secret`](#store-secret) 13 | - [`store-configmap`](#store-configmap) 14 | - [`init-wallet`](#init-wallet) 15 | - [`wait-ready`](#wait-ready) 16 | - [`migrate-db`](#migrate-db) 17 | - [Example usage](#example-usage) 18 | - [Basic setup](#example-use-case-1-basic-setup) 19 | - [Kubernetes](#example-use-case-2-kubernetes) 20 | - [Logging and idempotent operations](#logging-and-idempotent-operations) 21 | 22 | ## Requirements 23 | 24 | Most commands of this tool operate independently of `lnd` and therefore don't 25 | require a specific version to be installed. 26 | 27 | The commands `wait-ready` and `init-wallet` only work with `lnd v0.14.2-beta` 28 | and later though. 29 | 30 | A recent version of Kubernetes is needed when interacting with secrets stored in 31 | k8s. Any version `>= v1.8` should work. 32 | 33 | --- 34 | 35 | ## Subcommands 36 | 37 | Most commands work without `lnd` running, as they are designed to do some provisioning work _before_ `lnd` is started. 38 | 39 | ### gen-password 40 | `gen-password` generates a random password (no `lnd` needed) 41 | 42 | ### gen-seed 43 | `gen-seed` generates a random seed phrase 44 | 45 | No `lnd` needed, but seed will be in `lnd`-specific [`aezeed` format](https://github.com/lightningnetwork/lnd/blob/master/aezeed/README.md) 46 | 47 | ### load-secret 48 | `load-secret` interacts with kubernetes to read from secrets (no `lnd` needed) 49 | 50 | ### store-secret 51 | `store-secret` interacts with kubernetes to write to secrets (no `lnd` needed) 52 | 53 | ### store-configmap 54 | `store-configmap` interacts with kubernetes to write to configmaps (no `lnd` needed) 55 | 56 | ### init-wallet 57 | `init-wallet` has two modes: 58 | - `--init-type=file` creates an `lnd` specific `wallet.db` file 59 | - Only works if `lnd` is NOT running yet 60 | - `--init-type=rpc` calls the `lnd` RPC to create a wallet 61 | - Use this mode if you are using a remote database as `lnd`'s storage backend instead of bolt DB based file databases 62 | - Needs `lnd` to be running and no wallet to exist 63 | 64 | ### wait-ready 65 | `wait-ready` waits for `lnd` to be ready by connecting to `lnd`'s status RPC 66 | - Needs `lnd` to run, eventually 67 | 68 | ### migrate-db 69 | `migrate-db` migrates the content of one `lnd` database to another, for example 70 | from `bbolt` to Postgres. See [data migration guide](docs/data-migration.md) for 71 | more information. 72 | 73 | --- 74 | 75 | ## Example Usage 76 | 77 | ### Example use case 1: Basic setup 78 | 79 | This is a very basic example that shows the purpose of the different sub 80 | commands of the `lndinit` binary. In this example, all secrets are stored in 81 | files. This is normally not a good security practice as potentially other users 82 | or processes on a system can read those secrets if the permissions aren't set 83 | correctly. It is advised to store secrets in dedicated secret storage services 84 | like Kubernetes Secrets or HashiCorp Vault. 85 | 86 | #### 1. Generate a seed without a seed passphrase 87 | 88 | Create a new seed if one does not exist yet. 89 | 90 | ```shell 91 | $ if [[ ! -f /safe/location/seed.txt ]]; then 92 | lndinit gen-seed > /safe/location/seed.txt 93 | fi 94 | ``` 95 | 96 | #### 2. Generate a wallet password 97 | 98 | Create a new wallet password if one does not exist yet. 99 | 100 | ```shell 101 | $ if [[ ! -f /safe/location/walletpassword.txt ]]; then 102 | lndinit gen-password > /safe/location/walletpassword.txt 103 | fi 104 | ``` 105 | 106 | #### 3. Initialize the wallet 107 | 108 | Create the wallet database with the given seed and password files. If the wallet 109 | already exists, we make sure we can actually unlock it with the given password 110 | file. This will take a few seconds in any case. 111 | 112 | ```shell 113 | $ lndinit -v init-wallet \ 114 | --secret-source=file \ 115 | --file.seed=/safe/location/seed.txt \ 116 | --file.wallet-password=/safe/location/walletpassword.txt \ 117 | --init-file.output-wallet-dir=$HOME/.lnd/data/chain/bitcoin/mainnet \ 118 | --init-file.validate-password 119 | ``` 120 | 121 | #### 4. Start and auto unlock lnd 122 | 123 | With everything prepared, we can now start lnd and instruct it to auto unlock 124 | itself with the password in the file we prepared. 125 | 126 | ```shell 127 | $ lnd \ 128 | --bitcoin.active \ 129 | ... 130 | --wallet-unlock-password-file=/safe/location/walletpassword.txt 131 | ``` 132 | 133 | ### Example use case 2: Kubernetes 134 | 135 | This example shows how Kubernetes (k8s) Secrets can be used to store the wallet 136 | seed and password. The pod running those commands must be provisioned with a 137 | service account that has permissions to read/create/modify secrets in a given 138 | namespace. 139 | 140 | Here's an example of a service account, role provision and pod definition: 141 | 142 | ```yaml 143 | apiVersion: v1 144 | kind: ServiceAccount 145 | metadata: 146 | name: lnd-provision-account 147 | 148 | 149 | --- 150 | apiVersion: rbac.authorization.k8s.io/v1 151 | kind: Role 152 | metadata: 153 | name: lnd-update-secrets-role 154 | namespace: default 155 | rules: 156 | - apiGroups: [ "" ] 157 | resources: [ "secrets" ] 158 | verbs: [ "get", "list", "create", "watch", "update", "patch" ] 159 | 160 | 161 | --- 162 | apiVersion: rbac.authorization.k8s.io/v1 163 | kind: RoleBinding 164 | metadata: 165 | name: lnd-update-secrets-role-binding 166 | namespace: default 167 | roleRef: 168 | kind: Role 169 | name: lnd-update-secrets-role 170 | apiGroup: rbac.authorization.k8s.io 171 | subjects: 172 | - kind: ServiceAccount 173 | name: lnd-provision-account 174 | namespace: default 175 | 176 | 177 | --- 178 | apiVersion: apps/v1 179 | kind: Deployment 180 | metadata: 181 | name: lnd-pod 182 | spec: 183 | strategy: 184 | type: Recreate 185 | replicas: 1 186 | template: 187 | spec: 188 | # We use the special service account created, so the init script is able 189 | # to update the secret as expected. 190 | serviceAccountName: lnd-provision-account 191 | 192 | containers: 193 | # The main lnd container 194 | - name: lnd 195 | 196 | # The lndinit image is an image based on the main lnd image that just 197 | # adds the lndinit binary to it. The tag name is simply: 198 | # -lnd- 199 | image: lightninglabs/lndinit:v0.1.0-lnd-v0.14.2-beta 200 | env: 201 | - name: WALLET_SECRET_NAME 202 | value: lnd-wallet-secret 203 | - name: WALLET_DIR 204 | value: /root/.lnd/data/chain/bitcoin/mainnet 205 | - name: CERT_DIR 206 | value: /root/.lnd 207 | - name: UPLOAD_RPC_SECRETS 208 | value: '1' 209 | - name: RPC_SECRETS_NAME 210 | value: lnd-rpc-secrets 211 | command: [ '/init-wallet-k8s.sh' ] 212 | args: [ 213 | '--bitcoin.mainnet', 214 | '...', 215 | '--wallet-unlock-password-file=/tmp/wallet-password', 216 | ] 217 | ``` 218 | 219 | The `/init-wallet-k8s.sh` script that is invoked in the example above can be 220 | found in this repository: 221 | [`example-init-wallet-k8s.sh`](example-init-wallet-k8s.sh) 222 | The script executes the steps described in this example and also uploads the 223 | RPC secrets (`tls.cert` and all `*.macaroon` files) to another secret so apps 224 | using the `lnd` node can access those secrets. 225 | 226 | #### 1. Generate a seed passphrase (optional) 227 | 228 | Generate a new seed passphrase. If an entry with the key already exists in the 229 | k8s secret, it is not overwritten, and the operation is a no-op. 230 | 231 | ```shell 232 | $ lndinit gen-password \ 233 | | lndinit -v store-secret \ 234 | --target=k8s \ 235 | --k8s.secret-name=lnd-secrets \ 236 | --k8s.secret-key-name=seed-passphrase 237 | ``` 238 | 239 | #### 2. Generate a seed using the passphrase 240 | 241 | Generate a new seed with the passphrase created before. If an entry with that 242 | key already exists in the k8s secret, it is not overwritten, and the operation 243 | is a no-op. 244 | 245 | ```shell 246 | $ lndinit -v gen-seed \ 247 | --passphrase-k8s.secret-name=lnd-secrets \ 248 | --passphrase-k8s.secret-key-name=seed-passphrase \ 249 | | lndinit -v store-secret \ 250 | --target=k8s \ 251 | --k8s.secret-name=lnd-secrets \ 252 | --k8s.secret-key-name=seed 253 | ``` 254 | 255 | #### 3. Generate a wallet password 256 | 257 | Generate a new wallet password. If an entry with that key already exists in the 258 | k8s secret, it is not overwritten, and the operation is a no-op. 259 | 260 | ```shell 261 | $ lndinit gen-password \ 262 | | lndinit -v store-secret \ 263 | --target=k8s \ 264 | --k8s.secret-name=lnd-secrets \ 265 | --k8s.secret-key-name=wallet-password 266 | ``` 267 | 268 | #### 4. Initialize the wallet, attempting a test unlock with the password 269 | 270 | Create the wallet database with the given seed, seed passphrase and wallet 271 | password loaded from a k8s secret. If the wallet already exists, we make sure we 272 | can actually unlock it with the given password file. This will take a few 273 | seconds in any case. 274 | 275 | ```shell 276 | $ lndinit -v init-wallet \ 277 | --secret-source=k8s \ 278 | --k8s.secret-name=lnd-secrets \ 279 | --k8s.seed-key-name=seed \ 280 | --k8s.seed-passphrase-key-name=seed-passphrase \ 281 | --k8s.wallet-password-key-name=wallet-password \ 282 | --init-file.output-wallet-dir=$HOME/.lnd/data/chain/bitcoin/mainnet \ 283 | --init-file.validate-password 284 | ``` 285 | 286 | The above is an example for a file/bbolt based node. For such a node creating 287 | the wallet directly as a file is the most secure option, since it doesn't 288 | require the node to spin up the wallet unlocker RPC (which doesn't use macaroons 289 | and is therefore un-authenticated). 290 | 291 | But in setups where the wallet isn't a file (since all state is in a remote 292 | database such as etcd or Postgres), this method cannot be used. 293 | Instead, the wallet needs to be initialized through RPC, as shown in the next 294 | example: 295 | 296 | ```shell 297 | $ lndinit -v init-wallet \ 298 | --secret-source=k8s \ 299 | --k8s.secret-name=lnd-secrets \ 300 | --k8s.seed-key-name=seed \ 301 | --k8s.seed-passphrase-key-name=seed-passphrase \ 302 | --k8s.wallet-password-key-name=wallet-password \ 303 | --init-type=rpc \ 304 | --init-rpc.server=localhost:10009 \ 305 | --init-rpc.tls-cert-path=$HOME/.lnd/tls.cert 306 | ``` 307 | 308 | **NOTE**: If this is used in combination with the 309 | `--wallet-unlock-password-file=` flag in `lnd` for automatic unlocking, then the 310 | `--wallet-unlock-allow-create` flag also needs to be set. Otherwise, `lnd` won't 311 | be starting the wallet unlocking RPC that is used for initializing the wallet. 312 | 313 | The following example shows how to use the `lndinit init-wallet` command to 314 | create a watch-only wallet from a previously exported accounts JSON file: 315 | 316 | ```shell 317 | $ lndinit -v init-wallet \ 318 | --secret-source=k8s \ 319 | --k8s.secret-name=lnd-secrets \ 320 | --k8s.seed-key-name=seed \ 321 | --k8s.seed-passphrase-key-name=seed-passphrase \ 322 | --k8s.wallet-password-key-name=wallet-password \ 323 | --init-type=rpc \ 324 | --init-rpc.server=localhost:10009 \ 325 | --init-rpc.tls-cert-path=$HOME/.lnd/tls.cert \ 326 | --init-rpc.watch-only \ 327 | --init-rpc.accounts-file=/tmp/accounts.json 328 | ``` 329 | 330 | #### 5. Store the wallet password in a file 331 | 332 | Because we now only have the wallet password as a value in a k8s secret, we need 333 | to retrieve it and store it in a file that `lnd` can read to auto unlock. 334 | 335 | ```shell 336 | $ lndinit -v load-secret \ 337 | --source=k8s \ 338 | --k8s.secret-name=lnd-secrets \ 339 | --k8s.secret-key-name=wallet-password > /safe/location/walletpassword.txt 340 | ``` 341 | 342 | **Security notice**: 343 | 344 | Any process or user that has access to the file system of the container can 345 | potentially read the password if it's stored as a plain file. 346 | For an extra bump in security, a named pipe can be used instead of a file. That 347 | way the password can only be read exactly once from the pipe during `lnd`'s 348 | startup. 349 | 350 | ```shell 351 | # Create a FIFO pipe first. This will behave like a file except that writes to 352 | # it will only occur once there's a reader on the other end. 353 | $ mkfifo /tmp/wallet-password 354 | 355 | # Read the secret from Kubernetes and write it to the pipe. This will only 356 | # return once lnd is actually reading from the pipe. Therefore we need to run 357 | # the command as a background process (using the ampersand notation). 358 | $ lndinit load-secret \ 359 | --source=k8s \ 360 | --k8s.secret-name=lnd-secrets \ 361 | --k8s.secret-key-name=wallet-password > /tmp/wallet-password & 362 | 363 | # Now run lnd and point it to the named pipe. 364 | $ lnd \ 365 | --bitcoin.active \ 366 | ... 367 | --wallet-unlock-password-file=/tmp/wallet-password 368 | ``` 369 | 370 | #### 6. Start and auto unlock lnd 371 | 372 | With everything prepared, we can now start lnd and instruct it to auto unlock 373 | itself with the password in the file we prepared. 374 | 375 | ```shell 376 | $ lnd \ 377 | --bitcoin.active \ 378 | ... 379 | --wallet-unlock-password-file=/safe/location/walletpassword.txt 380 | ``` 381 | 382 | --- 383 | 384 | ## Logging and idempotent operations 385 | 386 | By default, `lndinit` aborts and exits with a zero return code if the desired 387 | result is already achieved (e.g. a secret key or a wallet database already 388 | exist). This can make it hard to follow exactly what is happening when debugging 389 | the initialization. To assist with debugging, the following two flags can be 390 | used: 391 | 392 | - `--verbose (-v)`: Log debug information to `stderr`. 393 | - `--error-on-existing (-e)`: Exit with a non-zero return code (128) if the 394 | result of an operation already exists. See example below. 395 | 396 | **Example**: 397 | 398 | ```shell 399 | # Treat every non-zero return code as abort condition (default for k8s container 400 | # commands). 401 | $ set -e 402 | 403 | # Run the command and catch any non-zero return code in the ret variable. The 404 | # logical OR is required to not fail because of above setting. 405 | $ ret=0 406 | $ lndinit --error-on-existing init-wallet ... || ret=$? 407 | $ if [[ $ret -eq 0 ]]; then 408 | echo "Successfully initialized wallet." 409 | elif [[ $ret -eq 128 ]]; then 410 | echo "Wallet already exists, skipping initialization." 411 | else 412 | echo "Failed to initialize wallet!" 413 | exit 1 414 | fi 415 | ``` 416 | 417 | --- 418 | 419 | ## Release Process 420 | 421 | This project is updated less often than `lnd`, so there are two main aspects to 422 | the release process. When a new `lnd` is released, and it's compatible with 423 | existing `lndinit` binary, only the container image needs to be built. 424 | 425 | ### Binary release 426 | 427 | When binary changes are required (either for dependency upgrades, or to maintain 428 | compatibility with a new `lnd` release) the `lndinit` binary must be rebuilt: 429 | 430 | 1. Apply the necessary code changes. 431 | 2. Adjust the relevant version constant(s) in `version.go`. 432 | - Usually this will be incrementing `AppPatch`. 433 | 3. Open PR and have it merged. 434 | 4. A maintainer must push a git-tag with the new version: 435 | - `git checkout main && git pull` 436 | - `TAG=v0..-beta` (e.g.: `TAG=v0.1.15-beta`) 437 | - `git tag $TAG && git push $TAG` 438 | 439 | Then proceed to the container image release process. 440 | 441 | ### Container image release 442 | 443 | When a new version of `lnd` is released, a new `lndinit` container image build 444 | is triggered by pushing a tag with the format: 445 | `docker/-lnd-` 446 | 447 | For example, to build an image based on lnd `v0.16.4-beta`, which includes 448 | lndinit `v0.1.15-beta`: 449 | 450 | ``` 451 | LNDINIT_VERSION=v0.1.15-beta 452 | LND_VERSION=v0.16.4-beta 453 | 454 | git checkout $LNDINIT_VERSION 455 | git tag docker/${LNDINIT_VERSION}-lnd-${LND_VERSION} 456 | git push docker/${LNDINIT_VERSION}-lnd-${LND_VERSION} 457 | ``` 458 | 459 | If lnd `v0.16.5-beta` is released and does not require additional `lndinit` 460 | binary changes, the desired image can be built by re-running the previous 461 | command with the lnd version adjusted. _In this case, there's no need to modify 462 | any code in this repo._ 463 | 464 | For more detail, refer to the [docker.yml](.github/workflows/docker.yml) 465 | Github workflow. 466 | -------------------------------------------------------------------------------- /cmd_gen_password.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/jessevdk/go-flags" 9 | "github.com/kkdai/bstream" 10 | "github.com/lightningnetwork/lnd/aezeed" 11 | ) 12 | 13 | const ( 14 | defaultNumMnemonicWords = 8 15 | defaultPasswordEntropyBits = aezeed.BitsPerWord * defaultNumMnemonicWords 16 | defaultPasswordEntropyBytes = defaultPasswordEntropyBits / 8 17 | ) 18 | 19 | type jsonPassword struct { 20 | Password string `json:"password"` 21 | } 22 | 23 | type genPasswordCommand struct { 24 | Output string `long:"output" short:"o" description:"Output format" choice:"raw" choice:"json"` 25 | } 26 | 27 | func newGenPasswordCommand() *genPasswordCommand { 28 | return &genPasswordCommand{ 29 | Output: outputFormatRaw, 30 | } 31 | } 32 | 33 | func (x *genPasswordCommand) Register(parser *flags.Parser) error { 34 | _, err := parser.AddCommand( 35 | "gen-password", 36 | "Generate a strong password", 37 | "Generate a strong password with 11 bytes of entropy and "+ 38 | "print it to stdout, either as raw text or "+ 39 | "formatted as JSON", 40 | x, 41 | ) 42 | return err 43 | } 44 | 45 | func (x *genPasswordCommand) Execute(_ []string) error { 46 | // Read a few bytes of random entropy. 47 | var password [defaultPasswordEntropyBytes]byte 48 | if _, err := rand.Read(password[:]); err != nil { 49 | return fmt.Errorf("unable get password entropy: %v", err) 50 | } 51 | 52 | // Then turn the password bytes into a human readable password by using 53 | // the aezeed default mnemonic wordlist. 54 | cipherBits := bstream.NewBStreamReader(password[:]) 55 | passwordWords := make([]string, defaultNumMnemonicWords) 56 | for i := 0; i < defaultNumMnemonicWords; i++ { 57 | index, err := cipherBits.ReadBits(aezeed.BitsPerWord) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | passwordWords[i] = aezeed.DefaultWordList[index] 63 | } 64 | 65 | passwordString := strings.Join(passwordWords, "-") 66 | if x.Output == outputFormatJSON { 67 | var err error 68 | passwordString, err = asJSON(&jsonPassword{ 69 | Password: passwordString, 70 | }) 71 | if err != nil { 72 | return err 73 | } 74 | } 75 | 76 | fmt.Printf("%s", passwordString) 77 | 78 | return nil 79 | } 80 | -------------------------------------------------------------------------------- /cmd_gen_seed.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | "github.com/jessevdk/go-flags" 11 | "github.com/lightningnetwork/lnd/aezeed" 12 | ) 13 | 14 | const ( 15 | defaultEntropyBytes = 16 16 | ) 17 | 18 | type jsonSeed struct { 19 | Seed string `json:"seed"` 20 | Birthday int64 `json:"birthday_timestamp"` 21 | } 22 | 23 | type genSeedCommand struct { 24 | EntropySourceFile string `long:"entropy-source-file" description:"The file descriptor to read the seed entropy from; if set lndinit will read exactly 16 bytes from the file, otherwise the default crypto/rand source will be used"` 25 | PassphraseFile string `long:"passphrase-file" description:"The file to read the seed passphrase from; if not set, no seed passphrase will be used, unless --passhprase-k8s is used"` 26 | PassphraseK8s *k8sSecretOptions `group:"Flags for reading seed passphrase from Kubernetes" namespace:"passphrase-k8s"` 27 | Output string `long:"output" short:"o" description:"Output format" choice:"raw" choice:"json"` 28 | } 29 | 30 | func newGenSeedCommand() *genSeedCommand { 31 | return &genSeedCommand{ 32 | Output: outputFormatRaw, 33 | PassphraseK8s: &k8sSecretOptions{ 34 | Namespace: defaultK8sNamespace, 35 | }, 36 | } 37 | } 38 | 39 | func (x *genSeedCommand) Register(parser *flags.Parser) error { 40 | _, err := parser.AddCommand( 41 | "gen-seed", 42 | "Generate an lnd wallet seed", 43 | "Generate a fresh lnd wallet seed (aezeed) with 16 bytes of "+ 44 | "entropy read from the given entropy file or the "+ 45 | "system's default cryptographic entropy source; the "+ 46 | "seed is printed to stdout, either as raw text or "+ 47 | "formatted as JSON", 48 | x, 49 | ) 50 | return err 51 | } 52 | 53 | func (x *genSeedCommand) Execute(_ []string) error { 54 | // First find out if we want to set a seed passphrase. 55 | var ( 56 | passPhrase string 57 | err error 58 | ) 59 | switch { 60 | // Both file and Kubernetes input set. 61 | case x.PassphraseFile != "" && x.PassphraseK8s.AnySet(): 62 | return fmt.Errorf("invalid passphrase input, either use file " + 63 | "or k8s but not both") 64 | 65 | // Read passphrase from file. 66 | case x.PassphraseFile != "": 67 | passPhrase, err = readFile(x.PassphraseFile) 68 | 69 | // Read passphrase from Kubernetes secret. 70 | case x.PassphraseK8s.AnySet(): 71 | k8sSecret := &k8sObjectOptions{ 72 | Namespace: x.PassphraseK8s.Namespace, 73 | Name: x.PassphraseK8s.SecretName, 74 | KeyName: x.PassphraseK8s.SecretKeyName, 75 | Base64: x.PassphraseK8s.Base64, 76 | ObjectType: ObjectTypeSecret, 77 | } 78 | 79 | passPhrase, _, err = readK8s(k8sSecret) 80 | 81 | } 82 | if err != nil { 83 | return err 84 | } 85 | 86 | // Next read our entropy either from the given source or the default 87 | // crypto/rand source. 88 | var entropy [defaultEntropyBytes]byte 89 | if x.EntropySourceFile != "" { 90 | file, err := os.Open(x.EntropySourceFile) 91 | if err != nil { 92 | return fmt.Errorf("unable to open entropy source file "+ 93 | "%s: %v", x.EntropySourceFile, err) 94 | } 95 | 96 | // Try to read exactly the number of bytes we require and make 97 | // sure we've actually also read that many. 98 | numRead, err := file.Read(entropy[:]) 99 | if err != nil { 100 | return fmt.Errorf("unable to read from entropy source "+ 101 | "file %s: %v", x.EntropySourceFile, err) 102 | } 103 | if numRead != defaultEntropyBytes { 104 | return fmt.Errorf("unable to read %d bytes from "+ 105 | "entropy source, only got %d", 106 | defaultEntropyBytes, numRead) 107 | } 108 | } else { 109 | if _, err := rand.Read(entropy[:]); err != nil { 110 | return fmt.Errorf("unable get seed entropy: %v", err) 111 | } 112 | } 113 | 114 | // We now have everything we need for creating the cipher seed. 115 | seed, err := aezeed.New(aezeed.CipherSeedVersion, &entropy, time.Now()) 116 | if err != nil { 117 | return fmt.Errorf("error creating cipher seed: %v", err) 118 | } 119 | mnemonic, err := seed.ToMnemonic([]byte(passPhrase)) 120 | if err != nil { 121 | return fmt.Errorf("error encrypting cipher seed: %v", err) 122 | } 123 | 124 | seedWords := strings.Join(mnemonic[:], " ") 125 | if x.Output == outputFormatJSON { 126 | seedWords, err = asJSON(&jsonSeed{ 127 | Seed: seedWords, 128 | Birthday: seed.BirthdayTime().Unix(), 129 | }) 130 | if err != nil { 131 | return fmt.Errorf("error encoding as JSON: %v", err) 132 | } 133 | } 134 | 135 | fmt.Printf("%s", seedWords) 136 | 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /cmd_init_wallet.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | "time" 11 | 12 | "github.com/btcsuite/btcd/chaincfg" 13 | "github.com/btcsuite/btcwallet/wallet" 14 | "github.com/jessevdk/go-flags" 15 | "github.com/lightninglabs/protobuf-hex-display/jsonpb" // nolint 16 | "github.com/lightningnetwork/lnd/aezeed" 17 | "github.com/lightningnetwork/lnd/lncfg" 18 | "github.com/lightningnetwork/lnd/lnrpc" 19 | "github.com/lightningnetwork/lnd/lnrpc/walletrpc" 20 | "github.com/lightningnetwork/lnd/signal" 21 | "google.golang.org/grpc" 22 | "google.golang.org/grpc/credentials" 23 | ) 24 | 25 | const ( 26 | defaultNoFreelistSync = true 27 | 28 | defaultDirPermissions os.FileMode = 0700 29 | 30 | defaultBitcoinNetwork = "mainnet" 31 | 32 | typeFile = "file" 33 | typeRpc = "rpc" 34 | ) 35 | 36 | var ( 37 | defaultWalletDBTimeout = 2 * time.Second 38 | ) 39 | 40 | type secretSourceFile struct { 41 | Seed string `long:"seed" description:"The full path to the file that contains the seed; if the file does not exist, lndinit will exit with code EXIT_CODE_INPUT_MISSING (129)"` 42 | SeedPassphrase string `long:"seed-passphrase" description:"The full path to the file that contains the seed passphrase; if not set, no passphrase will be used; if set but the file does not exist, lndinit will exit with code EXIT_CODE_INPUT_MISSING (129)"` 43 | WalletPassword string `long:"wallet-password" description:"The full path to the file that contains the wallet password; if the file does not exist, lndinit will exit with code EXIT_CODE_INPUT_MISSING (129)"` 44 | } 45 | 46 | type secretSourceK8s struct { 47 | Namespace string `long:"namespace" description:"The Kubernetes namespace the secret is located in"` 48 | SecretName string `long:"secret-name" description:"The name of the Kubernetes secret"` 49 | SeedKeyName string `long:"seed-key-name" description:"The name of the key within the secret that contains the seed"` 50 | SeedPassphraseKeyName string `long:"seed-passphrase-key-name" description:"The name of the key within the secret that contains the seed passphrase"` 51 | WalletPasswordKeyName string `long:"wallet-password-key-name" description:"The name of the key within the secret that contains the wallet password"` 52 | Base64 bool `long:"base64" description:"Encode as base64 when storing and decode as base64 when reading"` 53 | } 54 | 55 | type initTypeFile struct { 56 | OutputWalletDir string `long:"output-wallet-dir" description:"The directory in which the wallet.db file should be initialized"` 57 | ValidatePassword bool `long:"validate-password" description:"If a wallet file already exists in the output wallet directory, validate that it can be unlocked with the given password; this will try to decrypt the wallet and will take several seconds to complete"` 58 | } 59 | 60 | type initTypeRpc struct { 61 | Server string `long:"server" description:"The host:port of the RPC server to connect to"` 62 | TLSCertPath string `long:"tls-cert-path" description:"The full path to the RPC server's TLS certificate"` 63 | WatchOnly bool `long:"watch-only" description:"Don't require a seed to be set, initialize the wallet as watch-only; requires the accounts-file flag to be specified"` 64 | AccountsFile string `long:"accounts-file" description:"The JSON file that contains all accounts xpubs for initializing a watch-only wallet"` 65 | } 66 | 67 | type initWalletCommand struct { 68 | Network string `long:"network" description:"The Bitcoin network to initialize the wallet for, required for wallet internals" choice:"mainnet" choice:"testnet" choice:"testnet3" choice:"regtest" choice:"simnet"` 69 | SecretSource string `long:"secret-source" description:"Where to read the secrets from to initialize the wallet with" choice:"file" choice:"k8s"` 70 | File *secretSourceFile `group:"Flags for reading the secrets from files (use when --secret-source=file)" namespace:"file"` 71 | K8s *secretSourceK8s `group:"Flags for reading the secrets from Kubernetes (use when --secret-source=k8s)" namespace:"k8s"` 72 | InitType string `long:"init-type" description:"How to initialize the wallet" choice:"file" choice:"rpc"` 73 | InitFile *initTypeFile `group:"Flags for initializing the wallet as a file (use when --init-type=file)" namespace:"init-file"` 74 | InitRpc *initTypeRpc `group:"Flags for initializing the wallet through RPC (use when --init-type=rpc)" namespace:"init-rpc"` 75 | } 76 | 77 | func newInitWalletCommand() *initWalletCommand { 78 | return &initWalletCommand{ 79 | Network: defaultBitcoinNetwork, 80 | SecretSource: storageFile, 81 | File: &secretSourceFile{}, 82 | K8s: &secretSourceK8s{ 83 | Namespace: defaultK8sNamespace, 84 | }, 85 | InitType: typeFile, 86 | InitFile: &initTypeFile{}, 87 | InitRpc: &initTypeRpc{ 88 | Server: defaultRPCServer, 89 | }, 90 | } 91 | } 92 | 93 | func (x *initWalletCommand) Register(parser *flags.Parser) error { 94 | _, err := parser.AddCommand( 95 | "init-wallet", 96 | "Initialize an lnd wallet database", 97 | "Create an lnd wallet.db database file initialized with the "+ 98 | "given wallet seed and password", 99 | x, 100 | ) 101 | return err 102 | } 103 | 104 | func (x *initWalletCommand) Execute(_ []string) error { 105 | // Do we require a seed? We don't if we do an RPC based, watch-only 106 | // initialization. 107 | requireSeed := (x.InitType == typeFile) || 108 | (x.InitType == typeRpc && !x.InitRpc.WatchOnly) 109 | 110 | seed, seedPassPhrase, walletPassword, err := x.readInput(requireSeed) 111 | if err != nil { 112 | return fmt.Errorf("error reading input parameters: %v", err) 113 | } 114 | 115 | switch x.InitType { 116 | case typeFile: 117 | cipherSeed, err := checkSeed(seed, seedPassPhrase) 118 | if err != nil { 119 | return err 120 | } 121 | 122 | // The output directory must be specified explicitly. We don't 123 | // want to assume any defaults here! 124 | walletDir := lncfg.CleanAndExpandPath( 125 | x.InitFile.OutputWalletDir, 126 | ) 127 | if walletDir == "" { 128 | return fmt.Errorf("must specify output wallet " + 129 | "directory") 130 | } 131 | if strings.HasSuffix(walletDir, ".db") { 132 | return fmt.Errorf("output wallet directory must not " + 133 | "be a file") 134 | } 135 | 136 | return createWalletFile( 137 | cipherSeed, walletPassword, walletDir, x.Network, 138 | x.InitFile.ValidatePassword, 139 | ) 140 | 141 | case typeRpc: 142 | var ( 143 | seedWords []string 144 | watchOnly *lnrpc.WatchOnly 145 | ) 146 | 147 | if requireSeed { 148 | _, err = checkSeed(seed, seedPassPhrase) 149 | if err != nil { 150 | return err 151 | } 152 | seedWords = strings.Split(seed, " ") 153 | } 154 | 155 | // Only when initializing the wallet through RPC is it possible 156 | // to create a watch-only wallet. If we do, we don't require a 157 | // seed to be present but instead want to read an accounts JSON 158 | // file that contains all the wallet's xpubs. 159 | if x.InitRpc.WatchOnly { 160 | // For initializing a watch-only wallet we need the 161 | // accounts JSON file. 162 | logger.Info("Reading accounts from file") 163 | accountsBytes, err := readFile(x.InitRpc.AccountsFile) 164 | if err != nil { 165 | return err 166 | } 167 | 168 | jsonAccts := &walletrpc.ListAccountsResponse{} 169 | err = jsonpb.Unmarshal( 170 | strings.NewReader(accountsBytes), jsonAccts, 171 | ) 172 | if err != nil { 173 | return fmt.Errorf("error parsing JSON: %v", err) 174 | } 175 | if len(jsonAccts.Accounts) == 0 { 176 | return fmt.Errorf("cannot import empty " + 177 | "account list") 178 | } 179 | 180 | rpcAccounts, err := walletrpc.AccountsToWatchOnly( 181 | jsonAccts.Accounts, 182 | ) 183 | if err != nil { 184 | return fmt.Errorf("error converting JSON "+ 185 | "accounts to RPC: %v", err) 186 | } 187 | 188 | watchOnly = &lnrpc.WatchOnly{ 189 | MasterKeyBirthdayTimestamp: 0, 190 | Accounts: rpcAccounts, 191 | } 192 | } 193 | 194 | return createWalletRpc( 195 | seedWords, seedPassPhrase, walletPassword, 196 | x.InitRpc.Server, x.InitRpc.TLSCertPath, watchOnly, 197 | ) 198 | 199 | default: 200 | return fmt.Errorf("invalid init type %s", x.InitType) 201 | } 202 | } 203 | 204 | func (x *initWalletCommand) readInput(requireSeed bool) (string, string, string, 205 | error) { 206 | 207 | // First find out where we want to read the secrets from. 208 | var ( 209 | seed string 210 | seedPassPhrase string 211 | walletPassword string 212 | err error 213 | ) 214 | switch x.SecretSource { 215 | // Read all secrets from individual files. 216 | case storageFile: 217 | if requireSeed { 218 | logger.Info("Reading seed from file") 219 | seed, err = readFile(x.File.Seed) 220 | if err != nil { 221 | return "", "", "", err 222 | } 223 | } 224 | 225 | // The seed passphrase is optional. 226 | if x.File.SeedPassphrase != "" { 227 | logger.Info("Reading seed passphrase from file") 228 | seedPassPhrase, err = readFile(x.File.SeedPassphrase) 229 | if err != nil { 230 | return "", "", "", err 231 | } 232 | } 233 | 234 | logger.Info("Reading wallet password from file") 235 | walletPassword, err = readFile(x.File.WalletPassword) 236 | if err != nil { 237 | return "", "", "", err 238 | } 239 | 240 | // Read passphrase from Kubernetes secret. 241 | case storageK8s: 242 | k8sSecret := &k8sObjectOptions{ 243 | Namespace: x.K8s.Namespace, 244 | Name: x.K8s.SecretName, 245 | KeyName: x.K8s.SeedKeyName, 246 | Base64: x.K8s.Base64, 247 | ObjectType: ObjectTypeSecret, 248 | } 249 | 250 | if requireSeed { 251 | logger.Infof("Reading seed from k8s secret %s (namespace %s)", 252 | x.K8s.SecretName, x.K8s.Namespace) 253 | seed, _, err = readK8s(k8sSecret) 254 | if err != nil { 255 | return "", "", "", err 256 | } 257 | } 258 | 259 | // The seed passphrase is optional. 260 | if x.K8s.SeedPassphraseKeyName != "" { 261 | logger.Infof("Reading seed passphrase from k8s secret %s "+ 262 | "(namespace %s)", x.K8s.SecretName, 263 | x.K8s.Namespace) 264 | k8sSecret.KeyName = x.K8s.SeedPassphraseKeyName 265 | seedPassPhrase, _, err = readK8s(k8sSecret) 266 | if err != nil { 267 | return "", "", "", err 268 | } 269 | } 270 | 271 | logger.Infof("Reading wallet password from k8s secret %s (namespace %s)", 272 | x.K8s.SecretName, x.K8s.Namespace) 273 | k8sSecret.KeyName = x.K8s.WalletPasswordKeyName 274 | walletPassword, _, err = readK8s(k8sSecret) 275 | if err != nil { 276 | return "", "", "", err 277 | } 278 | } 279 | 280 | // The seed, its passphrase and the wallet password should all never 281 | // have a newline at their end, otherwise that might lead to errors 282 | // further down the line. 283 | seed = stripNewline(seed) 284 | seedPassPhrase = stripNewline(seedPassPhrase) 285 | walletPassword = stripNewline(walletPassword) 286 | 287 | return seed, seedPassPhrase, walletPassword, nil 288 | } 289 | 290 | func createWalletFile(cipherSeed *aezeed.CipherSeed, walletPassword, walletDir, 291 | network string, validatePassword bool) error { 292 | 293 | // The wallet directory must either not exist yet or be a directory. 294 | stat, err := os.Stat(walletDir) 295 | switch { 296 | case os.IsNotExist(err): 297 | err = os.MkdirAll(walletDir, defaultDirPermissions) 298 | if err != nil { 299 | return fmt.Errorf("error creating directory %s: %v", 300 | walletDir, err) 301 | } 302 | 303 | case !stat.IsDir(): 304 | return fmt.Errorf("output wallet directory must not be a file") 305 | } 306 | 307 | // We should now be able to properly determine if a wallet already 308 | // exists or not. Depending on the flags, we either create or validate 309 | // the wallet now. 310 | walletFile := filepath.Join(walletDir, wallet.WalletDBName) 311 | switch { 312 | case lnrpc.FileExists(walletFile) && !validatePassword: 313 | return fmt.Errorf("wallet file %s exists: %v", walletFile, 314 | errTargetExists) 315 | 316 | case !lnrpc.FileExists(walletFile): 317 | return createWallet( 318 | walletDir, cipherSeed, []byte(walletPassword), 319 | network, 320 | ) 321 | 322 | default: 323 | return validateWallet( 324 | walletDir, []byte(walletPassword), network, 325 | ) 326 | } 327 | } 328 | 329 | func createWallet(walletDir string, cipherSeed *aezeed.CipherSeed, 330 | walletPassword []byte, network string) error { 331 | 332 | logger.Infof("Creating new wallet in %s", walletDir) 333 | 334 | // The network parameters are needed for some wallet internal things 335 | // like the chain genesis hash and timestamp. 336 | netParams, err := getNetworkParams(network) 337 | if err != nil { 338 | return err 339 | } 340 | 341 | // Create the wallet now. 342 | loader := wallet.NewLoader( 343 | netParams, walletDir, defaultNoFreelistSync, 344 | defaultWalletDBTimeout, 0, 345 | ) 346 | 347 | _, err = loader.CreateNewWallet( 348 | walletPassword, walletPassword, cipherSeed.Entropy[:], 349 | cipherSeed.BirthdayTime(), 350 | ) 351 | if err != nil { 352 | return fmt.Errorf("error creating wallet from seed: %v", err) 353 | } 354 | 355 | // Close the wallet properly to release the file lock on the DB. 356 | if err := loader.UnloadWallet(); err != nil { 357 | return fmt.Errorf("error unloading wallet after creation: %v", 358 | err) 359 | } 360 | 361 | logger.Infof("Wallet created successfully in %s", walletDir) 362 | 363 | return nil 364 | } 365 | 366 | func validateWallet(walletDir string, walletPassword []byte, 367 | network string) error { 368 | 369 | logger.Infof("Validating password for wallet in %s", walletDir) 370 | 371 | // The network parameters are needed for some wallet internal things 372 | // like the chain genesis hash and timestamp. 373 | netParams, err := getNetworkParams(network) 374 | if err != nil { 375 | return err 376 | } 377 | 378 | // Try to load the wallet now. This will fail if the wallet is already 379 | // loaded by another process or does not exist yet. 380 | loader := wallet.NewLoader( 381 | netParams, walletDir, defaultNoFreelistSync, 382 | defaultWalletDBTimeout, 0, 383 | ) 384 | _, err = loader.OpenExistingWallet(walletPassword, false) 385 | if err != nil { 386 | return fmt.Errorf("error validating wallet password: %v", err) 387 | } 388 | 389 | if err := loader.UnloadWallet(); err != nil { 390 | return fmt.Errorf("error unloading wallet after validation: %v", 391 | err) 392 | } 393 | 394 | logger.Info("Wallet password validated successfully") 395 | 396 | return nil 397 | } 398 | 399 | func createWalletRpc(seedWords []string, seedPassword, walletPassword, 400 | rpcServer, tlsPath string, watchOnly *lnrpc.WatchOnly) error { 401 | 402 | // Since this will potentially run for a while (we need to wait for 403 | // compaction), make sure we catch any interrupt signals. 404 | shutdown, err := signal.Intercept() 405 | if err != nil { 406 | return fmt.Errorf("error intercepting signals: %v", err) 407 | } 408 | 409 | // First, we want to make sure the wallet doesn't actually exist. We 410 | // wait until we either get the NON_EXISTING code or an error because 411 | // the desired state wasn't achieved (a state _greater_ than 412 | // NON_EXISTING was returned, which means the wallet exists). 413 | timeout := time.Duration(math.MaxInt64) 414 | err = waitUntilStatus( 415 | rpcServer, lnrpc.WalletState_NON_EXISTING, 416 | timeout, shutdown.ShutdownChannel(), 417 | ) 418 | if err != nil { 419 | return fmt.Errorf("error waiting for lnd startup: %v", err) 420 | } 421 | 422 | // We are now certain that the wallet doesn't exist yet, so we can go 423 | // ahead and try to create it. 424 | client, err := getUnlockerConnection(rpcServer, tlsPath) 425 | if err != nil { 426 | return fmt.Errorf("error creating wallet unlocker connection: "+ 427 | "%v", err) 428 | } 429 | 430 | ctxb := context.Background() 431 | _, err = client.InitWallet(ctxb, &lnrpc.InitWalletRequest{ 432 | CipherSeedMnemonic: seedWords, 433 | AezeedPassphrase: []byte(seedPassword), 434 | WalletPassword: []byte(walletPassword), 435 | WatchOnly: watchOnly, 436 | }) 437 | return err 438 | } 439 | 440 | func checkSeed(seed, seedPassPhrase string) (*aezeed.CipherSeed, error) { 441 | // Decrypt the seed now to make sure we got valid data before we 442 | // check anything else. 443 | seedWords := strings.Split(seed, " ") 444 | if len(seedWords) != aezeed.NumMnemonicWords { 445 | return nil, fmt.Errorf("invalid seed, expected %d words but "+ 446 | "got %d", aezeed.NumMnemonicWords, len(seedWords)) 447 | } 448 | var seedMnemonic aezeed.Mnemonic 449 | copy(seedMnemonic[:], seedWords) 450 | cipherSeed, err := seedMnemonic.ToCipherSeed([]byte(seedPassPhrase)) 451 | if err != nil { 452 | return nil, fmt.Errorf("error decrypting seed with "+ 453 | "passphrase: %v", err) 454 | } 455 | 456 | return cipherSeed, nil 457 | } 458 | 459 | func getNetworkParams(network string) (*chaincfg.Params, error) { 460 | switch strings.ToLower(network) { 461 | case "mainnet": 462 | return &chaincfg.MainNetParams, nil 463 | 464 | case "testnet", "testnet3": 465 | return &chaincfg.TestNet3Params, nil 466 | 467 | case "regtest": 468 | return &chaincfg.RegressionNetParams, nil 469 | 470 | case "simnet": 471 | return &chaincfg.SimNetParams, nil 472 | 473 | default: 474 | return nil, fmt.Errorf("unknown network: %v", network) 475 | } 476 | } 477 | 478 | func getUnlockerConnection(rpcServer, 479 | tlsPath string) (lnrpc.WalletUnlockerClient, error) { 480 | 481 | creds, err := credentials.NewClientTLSFromFile(tlsPath, "") 482 | if err != nil { 483 | return nil, fmt.Errorf("error loading TLS certificate "+ 484 | "from %s: %v", tlsPath, err) 485 | } 486 | 487 | // We need to use a custom dialer so we can also connect to unix sockets 488 | // and not just TCP addresses. 489 | genericDialer := lncfg.ClientAddressDialer(defaultRPCPort) 490 | opts := []grpc.DialOption{ 491 | grpc.WithTransportCredentials(creds), 492 | grpc.WithContextDialer(genericDialer), 493 | } 494 | 495 | conn, err := grpc.Dial(rpcServer, opts...) 496 | if err != nil { 497 | return nil, fmt.Errorf("unable to connect to RPC server: %v", 498 | err) 499 | } 500 | 501 | return lnrpc.NewWalletUnlockerClient(conn), nil 502 | } 503 | -------------------------------------------------------------------------------- /cmd_init_wallet_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | var ( 11 | testSeedWithNewline = []byte("seed phrase with newline\n") 12 | testPasswordWithNewline = []byte("p4ssw0rd\r\n\n\r\r\n") 13 | ) 14 | 15 | // TestReadInput makes sure input files are always trimmed so we don't have any 16 | // newline characters left over. 17 | func TestReadInput(t *testing.T) { 18 | cmd := newInitWalletCommand() 19 | 20 | cmd.File.Seed = writeToTempFile(t, testSeedWithNewline) 21 | cmd.File.WalletPassword = writeToTempFile(t, testPasswordWithNewline) 22 | 23 | seed, seedPassphrase, walletPassword, err := cmd.readInput(true) 24 | require.NoError(t, err) 25 | require.Equal(t, "seed phrase with newline", seed) 26 | require.Equal(t, "", seedPassphrase) 27 | require.Equal(t, "p4ssw0rd", walletPassword) 28 | } 29 | 30 | func writeToTempFile(t *testing.T, data []byte) string { 31 | tempFileName, err := os.CreateTemp("", "*.txt") 32 | require.NoError(t, err) 33 | 34 | err = os.WriteFile(tempFileName.Name(), data, 0600) 35 | require.NoError(t, err) 36 | 37 | return tempFileName.Name() 38 | } 39 | -------------------------------------------------------------------------------- /cmd_load_secret.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/jessevdk/go-flags" 7 | ) 8 | 9 | type loadSecretCommand struct { 10 | Source string `long:"source" short:"s" description:"Secret storage source" choice:"k8s"` 11 | K8s *k8sSecretOptions `group:"Flags for looking up the secret as a value inside a Kubernetes Secret (use when --source=k8s)" namespace:"k8s"` 12 | Output string `long:"output" short:"o" description:"Output format" choice:"raw" choice:"json"` 13 | } 14 | 15 | func newLoadSecretCommand() *loadSecretCommand { 16 | return &loadSecretCommand{ 17 | Source: storageK8s, 18 | K8s: &k8sSecretOptions{ 19 | Namespace: defaultK8sNamespace, 20 | }, 21 | Output: outputFormatRaw, 22 | } 23 | } 24 | 25 | func (x *loadSecretCommand) Register(parser *flags.Parser) error { 26 | _, err := parser.AddCommand( 27 | "load-secret", 28 | "Load a secret from external secrets storage", 29 | "Load a secret from the selected external secrets storage and "+ 30 | "print it to stdout, either as raw text or formatted "+ 31 | "as JSON", 32 | x, 33 | ) 34 | return err 35 | } 36 | 37 | func (x *loadSecretCommand) Execute(_ []string) error { 38 | switch x.Source { 39 | case storageK8s: 40 | objectOpts := &k8sObjectOptions{ 41 | Namespace: x.K8s.Namespace, 42 | Name: x.K8s.SecretName, 43 | KeyName: x.K8s.SecretKeyName, 44 | Base64: x.K8s.Base64, 45 | ObjectType: ObjectTypeSecret, 46 | } 47 | 48 | content, secret, err := readK8s(objectOpts) 49 | if err != nil { 50 | return fmt.Errorf("error reading secret %s in "+ 51 | "namespace %s: %v", x.K8s.SecretName, 52 | x.K8s.Namespace, err) 53 | } 54 | 55 | if x.Output == outputFormatJSON { 56 | content, err = asJSON(&struct { 57 | *jsonK8sObject `json:",inline"` 58 | Value string `json:"value"` 59 | }{ 60 | jsonK8sObject: secret, 61 | Value: content, 62 | }) 63 | if err != nil { 64 | return fmt.Errorf("error encoding as JSON: %v", 65 | err) 66 | } 67 | } 68 | 69 | fmt.Printf("%s\n", content) 70 | 71 | return nil 72 | 73 | default: 74 | return fmt.Errorf("invalid secret storage source %s", x.Source) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /cmd_migrate_db.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | _ "net/http/pprof" // Register pprof handlers 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | "time" 13 | 14 | "github.com/btcsuite/btcd/btcutil" 15 | "github.com/btcsuite/btclog/v2" 16 | "github.com/jessevdk/go-flags" 17 | "github.com/lightninglabs/lndinit/migratekvdb" 18 | "github.com/lightningnetwork/lnd/channeldb" 19 | "github.com/lightningnetwork/lnd/healthcheck" 20 | "github.com/lightningnetwork/lnd/kvdb" 21 | "github.com/lightningnetwork/lnd/kvdb/postgres" 22 | "github.com/lightningnetwork/lnd/kvdb/sqlbase" 23 | "github.com/lightningnetwork/lnd/kvdb/sqlite" 24 | "github.com/lightningnetwork/lnd/lncfg" 25 | "github.com/lightningnetwork/lnd/lnrpc" 26 | "github.com/lightningnetwork/lnd/signal" 27 | "github.com/lightningnetwork/lnd/watchtower/wtdb" 28 | "go.etcd.io/bbolt" 29 | ) 30 | 31 | var ( 32 | // alreadyMigratedKey is the key under which we add a tag in the target/ 33 | // destination DB after we've successfully and completely migrated it 34 | // from a source DB. 35 | alreadyMigratedKey = []byte("data-migration-already-migrated") 36 | 37 | // defaultDataDir is the default data directory for lnd. 38 | defaultDataDir = filepath.Join(btcutil.AppDataDir("lnd", false), "data") 39 | ) 40 | 41 | const ( 42 | // walletMetaBucket is the name of the meta bucket in the wallet db 43 | // for the wallet ready marker. 44 | walletMetaBucket = "lnwallet" 45 | 46 | // walletReadyKey is the key in the wallet meta bucket for the wallet 47 | // ready marker. 48 | walletReadyKey = "ready" 49 | ) 50 | 51 | // Bolt is the configuration for a bolt database. 52 | type Bolt struct { 53 | DBTimeout time.Duration `long:"dbtimeout" description:"Specify the timeout value used when opening the database."` 54 | DataDir string `long:"data-dir" description:"Lnd data dir where bolt dbs are located."` 55 | TowerDir string `long:"tower-dir" description:"Lnd watchtower dir where bolt dbs for the watchtower server are located."` 56 | } 57 | 58 | // Sqlite is the configuration for a sqlite database. 59 | type Sqlite struct { 60 | DataDir string `long:"data-dir" description:"Lnd data dir where sqlite dbs are located."` 61 | TowerDir string `long:"tower-dir" description:"Lnd watchtower dir where sqlite dbs for the watchtower server are located."` 62 | Config *sqlite.Config `group:"sqlite-config" namespace:"sqlite-config" description:"Sqlite config."` 63 | } 64 | 65 | // SourceDB represents the source database, which can only be bolt for now. 66 | type SourceDB struct { 67 | Backend string `long:"backend" description:"The source database backend." choice:"bolt"` 68 | Bolt *Bolt `group:"bolt" namespace:"bolt" description:"Bolt settings."` 69 | } 70 | 71 | // DestDB represents the destination database, which can be either postgres or 72 | // sqlite. 73 | type DestDB struct { 74 | Backend string `long:"backend" description:"The destination database backend." choice:"postgres" choice:"sqlite"` 75 | Postgres *postgres.Config `group:"postgres" namespace:"postgres" description:"Postgres settings."` 76 | Sqlite *Sqlite `group:"sqlite" namespace:"sqlite" description:"Sqlite settings."` 77 | } 78 | 79 | // Init should be called upon start to pre-initialize database for sql 80 | // backends. If max connections are not set, the amount of connections will be 81 | // unlimited however we only use one connection during the migration. 82 | func (db *DestDB) Init() error { 83 | switch { 84 | case db.Backend == lncfg.PostgresBackend: 85 | sqlbase.Init(db.Postgres.MaxConnections) 86 | 87 | case db.Backend == lncfg.SqliteBackend: 88 | sqlbase.Init(db.Sqlite.Config.MaxConnections) 89 | } 90 | 91 | return nil 92 | } 93 | 94 | type migrateDBCommand struct { 95 | Source *SourceDB `group:"source" namespace:"source" long:"" short:"" description:""` 96 | Dest *DestDB `group:"dest" namespace:"dest" long:"" short:"" description:""` 97 | Network string `long:"network" short:"n" description:"Network of the db files to migrate (used to navigate into the right directory)"` 98 | PprofPort int `long:"pprof-port" description:"Enable pprof profiling on the specified port"` 99 | ForceNewMigration bool `long:"force-new-migration" description:"Force a new migration from the beginning of the source DB so the resume state will be discarded"` 100 | ForceVerifyDB bool `long:"force-verify-db" description:"Force a verification verifies two already marked (tombstoned and already migrated) dbs to make sure that the source db equals the content of the destination db"` 101 | ChunkSize uint64 `long:"chunk-size" description:"Chunk size for the migration in bytes"` 102 | } 103 | 104 | func newMigrateDBCommand() *migrateDBCommand { 105 | return &migrateDBCommand{ 106 | Source: &SourceDB{ 107 | Backend: lncfg.BoltBackend, 108 | Bolt: &Bolt{ 109 | DBTimeout: kvdb.DefaultDBTimeout, 110 | TowerDir: defaultDataDir, 111 | DataDir: defaultDataDir, 112 | }, 113 | }, 114 | Dest: &DestDB{ 115 | Backend: lncfg.PostgresBackend, 116 | Postgres: &postgres.Config{}, 117 | Sqlite: &Sqlite{ 118 | Config: &sqlite.Config{}, 119 | TowerDir: defaultDataDir, 120 | DataDir: defaultDataDir, 121 | }, 122 | }, 123 | Network: "mainnet", 124 | } 125 | } 126 | 127 | func (x *migrateDBCommand) Register(parser *flags.Parser) error { 128 | _, err := parser.AddCommand( 129 | "migrate-db", 130 | "Migrate the complete database state of lnd to a new backend", 131 | ` 132 | Migrate the full database state of lnd from a source (for example the 133 | set of bolt database files such as channel.db and wallet.db) database 134 | to a SQL destination database. 135 | 136 | IMPORTANT: Please read the data migration guide located in the file 137 | docs/data-migration.md of the main lnd repository before using this 138 | command! 139 | 140 | NOTE: The migration can take a long time depending on the amount of data 141 | that needs to be written! The migration happens in chunks therefore it 142 | can be resumed in case of an interruption. The migration also includes 143 | a verification to assure that the migration is consistent. 144 | As long as NEITHER the source nor destination database has been started/ 145 | run with lnd, the migration can be repeated/resumed in case of an error 146 | since the data will just be overwritten again in the destination. 147 | 148 | Once a database was successfully and completely migrated from the source 149 | to the destination, the source will be marked with a 'tombstone' tag 150 | while the destination will get an 'already migrated' tag. 151 | A database with a tombstone cannot be started with lnd anymore to 152 | prevent from an old state being used by accident. 153 | To prevent overwriting a destination database by accident, the same 154 | database/namespace pair cannot be used as the target of a data migration 155 | twice, which is checked through the 'already migrated' tag.`, 156 | x, 157 | ) 158 | return err 159 | } 160 | 161 | // optionalDBs are the databases that can be skipped if they don't 162 | // exist. 163 | var ( 164 | optionalDBs = map[string]bool{ 165 | lncfg.NSTowerClientDB: true, 166 | lncfg.NSTowerServerDB: true, 167 | lncfg.NSNeutrinoDB: true, 168 | } 169 | 170 | // allDBPrefixes defines all databases that should be migrated. 171 | allDBPrefixes = []string{ 172 | lncfg.NSChannelDB, 173 | lncfg.NSMacaroonDB, 174 | lncfg.NSDecayedLogDB, 175 | lncfg.NSTowerClientDB, 176 | lncfg.NSTowerServerDB, 177 | lncfg.NSWalletDB, 178 | lncfg.NSNeutrinoDB, 179 | } 180 | ) 181 | 182 | func (x *migrateDBCommand) Execute(_ []string) error { 183 | // We currently only allow migrations from bolt to sqlite/postgres. 184 | if err := x.validateDBBackends(); err != nil { 185 | return fmt.Errorf("invalid database configuration: %w", err) 186 | } 187 | 188 | // We keep track of the DBs that we have migrated. 189 | migratedDBs := []string{} 190 | 191 | // Add pprof server if enabled. 192 | if x.PprofPort > 0 { 193 | go func() { 194 | pprofAddr := fmt.Sprintf("localhost:%d", x.PprofPort) 195 | logger.Infof("Starting pprof server on %s", pprofAddr) 196 | err := http.ListenAndServe(pprofAddr, nil) 197 | if err != nil { 198 | logger.Errorf("Error starting pprof "+ 199 | "server: %v", err) 200 | } 201 | }() 202 | } 203 | 204 | // get the context for the migration which is tied to the signal 205 | // interceptor. 206 | ctx := getContext() 207 | 208 | for _, prefix := range allDBPrefixes { 209 | logger.Infof("Attempting to migrate DB with prefix `%s`", prefix) 210 | 211 | // Create a separate meta db for each db to store the 212 | // migration/verification state. This db will be deleted 213 | // after the migration is successful. 214 | metaDBPath := filepath.Join( 215 | x.Source.Bolt.DataDir, prefix+"-migration-meta.db", 216 | ) 217 | 218 | srcDb, err := openSourceDb( 219 | x.Source, prefix, x.Network, true, 220 | ) 221 | if err == kvdb.ErrDbDoesNotExist { 222 | // Only skip if it's an optional because it's not 223 | // required to run a wtclient or wtserver for example. 224 | if optionalDBs[prefix] { 225 | logger.Warnf("Skipping optional DB %s: not "+ 226 | "found", prefix) 227 | continue 228 | } 229 | } 230 | if err != nil { 231 | return fmt.Errorf("failed to open source db with "+ 232 | "prefix `%s`: %w", prefix, err) 233 | } 234 | defer srcDb.Close() 235 | logger.Infof("Opened source DB with prefix `%s` successfully", 236 | prefix) 237 | 238 | // We open the destination DB as well to make sure both that 239 | // both DBs are either marked or not. 240 | destDb, err := openDestDb(ctx, x.Dest, prefix, x.Network) 241 | if err != nil { 242 | return fmt.Errorf("failed to open destination "+ 243 | "db with prefix `%s`: %w", prefix, err) 244 | } 245 | defer destDb.Close() 246 | 247 | logger.Infof("Opened destination DB with prefix `%s` "+ 248 | "successfully", prefix) 249 | 250 | // Check that the source database and the destination database 251 | // are either both marked with a tombstone or a migrated marker. 252 | logger.Infof("Checking tombstone marker on source DB and "+ 253 | "migrated marker on destination DB with prefix `%s`", 254 | prefix) 255 | 256 | sourceMarker, err := checkMarkerPresent( 257 | srcDb, channeldb.TombstoneKey, 258 | ) 259 | sourceDbTombstone := err == nil 260 | if err != nil && !errors.Is(err, channeldb.ErrMarkerNotPresent) { 261 | return err 262 | } 263 | 264 | // Also make sure that the destination DB hasn't been marked as 265 | // successfully having been the target of a migration. We only 266 | // mark a destination DB as successfully migrated at the end of 267 | // a successful and complete migration. 268 | destMarker, err := checkMarkerPresent( 269 | destDb, alreadyMigratedKey, 270 | ) 271 | destDbMigrated := err == nil 272 | if err != nil && !errors.Is(err, channeldb.ErrMarkerNotPresent) { 273 | return err 274 | } 275 | switch { 276 | case sourceDbTombstone && destDbMigrated: 277 | if x.ForceVerifyDB { 278 | // Make sure the meta db is not deleted so we 279 | // can get the migration stats. 280 | // We delete the verification complete marker 281 | // only when this marker is set we clear the 282 | // verification state. then we should be good 283 | // to go. 284 | if !lnrpc.FileExists(metaDBPath) { 285 | return fmt.Errorf("cannot verify migration "+ 286 | "for db with prefix `%s` because the "+ 287 | "migration meta db does not exist", 288 | prefix) 289 | } 290 | 291 | // Open the db where we store the migration/verification state. 292 | metaDB, err := bbolt.Open(metaDBPath, 0600, nil) 293 | if err != nil { 294 | logger.Errorf("Error opening db: %v", err) 295 | } 296 | defer metaDB.Close() 297 | 298 | logger.Infof("Opened meta db at path: %s", metaDBPath) 299 | 300 | // Verify the migration. 301 | migrator, err := migratekvdb.New(migratekvdb.Config{ 302 | Logger: logger.SubSystem("MIGKV-" + prefix), 303 | ChunkSize: x.ChunkSize, 304 | MetaDB: metaDB, 305 | DBPrefixName: prefix, 306 | }) 307 | if err != nil { 308 | return err 309 | } 310 | 311 | err = migrator.VerifyMigration(ctx, srcDb, destDb, true) 312 | if err != nil { 313 | return err 314 | } 315 | 316 | logger.Infof("Verification of migration of db with prefix "+ 317 | "`%s` completed", prefix) 318 | 319 | } 320 | logger.Infof("Skipping DB with prefix `%s` because the "+ 321 | "source DB is marked with a tombstone and the "+ 322 | "destination DB is marked as already migrated. "+ 323 | "Tag reads: source: `%s`, destination: `%s`", 324 | prefix, sourceMarker, destMarker) 325 | 326 | migratedDBs = append(migratedDBs, prefix) 327 | 328 | continue 329 | 330 | case sourceDbTombstone && !destDbMigrated: 331 | return fmt.Errorf("DB with prefix `%s` source DB is "+ 332 | "marked with a tombstone but the "+ 333 | "destination DB is not marked as already "+ 334 | "migrated. This is not allowed. Tag reads: "+ 335 | "source: `%s`, destination: `%s`", 336 | prefix, sourceMarker, destMarker) 337 | 338 | case !sourceDbTombstone && destDbMigrated: 339 | return fmt.Errorf("DB with prefix `%s` source DB is "+ 340 | "not marked with a tombstone but the "+ 341 | "destination DB is marked as already migrated. "+ 342 | "This is not allowed. Tag reads: source: `%s`, "+ 343 | "destination: `%s`", 344 | prefix, sourceMarker, destMarker) 345 | } 346 | 347 | // Check that the source DB has had all its schema migrations 348 | // applied before we migrate any of its data. Currently only 349 | // migration of the channel.db and the watchtower.db exist. 350 | // Check channel.db migrations. 351 | if prefix == lncfg.NSChannelDB { 352 | logger.Info("Checking DB version of source DB " + 353 | "(channel.db)") 354 | 355 | err := checkChannelDBMigrationsApplied(srcDb) 356 | if err != nil { 357 | return err 358 | } 359 | } 360 | 361 | // Check watchtower client DB migrations. 362 | if prefix == lncfg.NSTowerClientDB { 363 | logger.Info("Checking DB version of source DB " + 364 | "(wtclient.db)") 365 | 366 | err := checkWTClientDBMigrationsApplied(srcDb) 367 | if err != nil { 368 | return err 369 | } 370 | } 371 | 372 | // In case we want to start a new migration we delete the 373 | // migration meta db if it exits. This can only be done if 374 | // the db is not already successfully migrated otherwise 375 | // previous marker checks will prevent us to reach this point. 376 | if x.ForceNewMigration { 377 | // Before proceeding with the migration we check that 378 | // the destination db is empty. 379 | var topLevelBuckets [][]byte 380 | err := kvdb.View(destDb, func(tx kvdb.RTx) error { 381 | return tx.ForEachBucket(func(bucket []byte) error { 382 | bucketCopy := make([]byte, len(bucket)) 383 | copy(bucketCopy, bucket) 384 | topLevelBuckets = append( 385 | topLevelBuckets, bucketCopy, 386 | ) 387 | 388 | return nil 389 | }) 390 | }, func() {}) 391 | if err != nil { 392 | return err 393 | } 394 | 395 | if len(topLevelBuckets) > 0 { 396 | logger.Infof("Cannot force new migration "+ 397 | "of db with prefix `%s` because the "+ 398 | "destination db has data - delete "+ 399 | "it manually first", prefix) 400 | 401 | return fmt.Errorf("destination db with prefix `%s` "+ 402 | "has data, refusing to overwrite it", prefix) 403 | } 404 | 405 | logger.Info("Forcing new migration, deleting " + 406 | "migration meta db and all previous data from" + 407 | " the destination db") 408 | 409 | if lnrpc.FileExists(metaDBPath) { 410 | err := os.Remove(metaDBPath) 411 | if err != nil { 412 | return fmt.Errorf("failed to delete "+ 413 | "migration meta db: %v", err) 414 | } 415 | 416 | logger.Infof("Deleted migration meta db at "+ 417 | "path: %s", metaDBPath) 418 | } 419 | } 420 | 421 | // Open the db where we store the migration/verification state. 422 | metaDB, err := bbolt.Open(metaDBPath, 0600, nil) 423 | if err != nil { 424 | logger.Errorf("Error opening db: %v", err) 425 | } 426 | defer metaDB.Close() 427 | 428 | logger.Infof("Opened meta db at path: %s", metaDBPath) 429 | 430 | // Configure and run migration. 431 | cfg := migratekvdb.Config{ 432 | Logger: logger.SubSystem("MIGKV-" + prefix), 433 | ChunkSize: x.ChunkSize, 434 | MetaDB: metaDB, 435 | DBPrefixName: prefix, 436 | } 437 | 438 | migrator, err := migratekvdb.New(cfg) 439 | if err != nil { 440 | return err 441 | } 442 | 443 | err = migrator.Migrate(ctx, srcDb, destDb) 444 | if err != nil { 445 | return err 446 | } 447 | logger.Infof("Migration of db with prefix %s completed", prefix) 448 | 449 | // We migrated the DB successfully, now we verify the migration. 450 | err = migrator.VerifyMigration( 451 | ctx, srcDb, destDb, false, 452 | ) 453 | if err != nil { 454 | return err 455 | } 456 | 457 | logger.Infof("Verification of migration of db with prefix "+ 458 | "`%s` completed", prefix) 459 | 460 | // Migrate wallet created marker. This is done after the 461 | // migration to ensure the verification of the migration 462 | // succeeds. 463 | // 464 | // NOTE: We always need to add the wallet marker if the db is 465 | // not a `bolt` db, which is already resticted by the 466 | // destination db config. 467 | if prefix == lncfg.NSWalletDB { 468 | err := createWalletMarker(destDb, logger) 469 | if err != nil { 470 | return err 471 | } 472 | } 473 | 474 | // If we get here, we've successfully migrated the DB and can 475 | // now set the tombstone marker on the source database and the 476 | // already migrated marker on the target database. 477 | // We need to reopen the db in write mode. 478 | srcDb.Close() 479 | logger.Infof("We are now opening the source db with prefix `%s` "+ 480 | "in write mode to set the tombstone marker. This "+ 481 | "might take a while (~10 minutes for large databases) "+ 482 | "to sync the freelist..., so please be patient it is the "+ 483 | "final step for this db.", prefix) 484 | 485 | srcDb, err = openSourceDb(x.Source, prefix, x.Network, false) 486 | if err != nil { 487 | return err 488 | } 489 | 490 | if err := addMarker(srcDb, channeldb.TombstoneKey); err != nil { 491 | return err 492 | } 493 | 494 | // Add already migrated marker to the destination DB. 495 | if err := addMarker(destDb, alreadyMigratedKey); err != nil { 496 | return err 497 | } 498 | 499 | logger.Infof("Migration of DB with prefix `%s` completed "+ 500 | "successfully", prefix) 501 | 502 | migratedDBs = append(migratedDBs, prefix) 503 | 504 | // Removing meta db. 505 | err = metaDB.Close() 506 | if err != nil { 507 | logger.Errorf("Error closing meta db: %v", err) 508 | } 509 | 510 | // Close the db connection to cleanup the state. 511 | err = srcDb.Close() 512 | if err != nil { 513 | logger.Errorf("Error closing source db: %v", err) 514 | } 515 | err = destDb.Close() 516 | if err != nil { 517 | logger.Errorf("Error closing destination db: %v", err) 518 | } 519 | 520 | // Create migration completed file, this will only create the 521 | // file for bolt databases. 522 | if err := createMigrationCompletedFile(x.Source, prefix, 523 | x.Network, x.Dest.Backend); err != nil { 524 | return err 525 | } 526 | } 527 | 528 | logger.Info("!!!Migration of all mandatory db parts completed " + 529 | "successfully!!!") 530 | 531 | logger.Infof("Migrated DBs: %v", migratedDBs) 532 | 533 | return nil 534 | } 535 | 536 | // validateDBBackends ensures that only migrations from bolt to sqlite/postgres 537 | // are allowed. 538 | func (x *migrateDBCommand) validateDBBackends() error { 539 | // Source must be bolt 540 | if x.Source.Backend != lncfg.BoltBackend { 541 | return fmt.Errorf("source database must be bolt, got: %s", 542 | x.Source.Backend) 543 | } 544 | 545 | // Destination must be sqlite or postgres. 546 | switch x.Dest.Backend { 547 | case lncfg.SqliteBackend, lncfg.PostgresBackend: 548 | return nil 549 | default: 550 | return fmt.Errorf("destination database must be sqlite or "+ 551 | "postgres, got: %s", x.Dest.Backend) 552 | } 553 | } 554 | 555 | // openSourceDb opens the source database and also checks if there is enough 556 | // free space on the source directory to hold a copy of the database. 557 | func openSourceDb(cfg *SourceDB, prefix, network string, 558 | readonly bool) (kvdb.Backend, error) { 559 | 560 | path := getBoltDBPath(cfg, prefix, network) 561 | if path == "" { 562 | return nil, fmt.Errorf("unknown prefix: %s", prefix) 563 | } 564 | 565 | const ( 566 | noFreelistSync = true 567 | timeout = time.Minute 568 | ) 569 | 570 | args := []interface{}{ 571 | path, noFreelistSync, timeout, readonly, 572 | } 573 | backend := kvdb.BoltBackendName 574 | logger.Infof("Opening bolt backend at %s for prefix '%s'", 575 | path, prefix) 576 | 577 | db, err := kvdb.Open(backend, args...) 578 | if err != nil { 579 | return nil, err 580 | } 581 | 582 | // Get the size of the database. 583 | fi, err := os.Stat(path) 584 | if err != nil { 585 | return nil, fmt.Errorf("error determining source database "+ 586 | "with prefix %s: %v", prefix, err) 587 | } 588 | dbSize := fi.Size() 589 | 590 | // Because the destination can also just be a postgres dsn, we just 591 | // check if the source dir has enough free space to hold a copy of the 592 | // db. 593 | freeSpace, err := healthcheck.AvailableDiskSpace(cfg.Bolt.DataDir) 594 | if err != nil { 595 | return nil, fmt.Errorf("error determining source directory "+ 596 | "free space: %v", err) 597 | } 598 | 599 | if freeSpace < uint64(dbSize) { 600 | return nil, fmt.Errorf("not enough free space on source "+ 601 | "directory to migrate db: %d bytes required, "+ 602 | "%d bytes available", dbSize, freeSpace) 603 | } 604 | 605 | logger.Debugf("Source DB size: %d bytes", dbSize) 606 | 607 | return db, nil 608 | } 609 | 610 | // openDestDb opens the different types of databases. 611 | func openDestDb(ctx context.Context, cfg *DestDB, prefix, 612 | network string) (kvdb.Backend, error) { 613 | 614 | backend := cfg.Backend 615 | 616 | // Init the db connections for sql backends. 617 | err := cfg.Init() 618 | if err != nil { 619 | return nil, err 620 | } 621 | 622 | // Settings to open a particular db backend. 623 | var args []interface{} 624 | 625 | switch backend { 626 | case kvdb.PostgresBackendName: 627 | args = []interface{}{ 628 | ctx, 629 | &postgres.Config{ 630 | Dsn: cfg.Postgres.Dsn, 631 | Timeout: time.Minute, 632 | MaxConnections: 10, 633 | }, 634 | prefix, 635 | } 636 | 637 | logger.Infof("Opening postgres backend at `%s` with prefix `%s`", 638 | cfg.Postgres.Dsn, prefix) 639 | 640 | case kvdb.SqliteBackendName: 641 | // Directories where the db files are located. 642 | graphDir := lncfg.CleanAndExpandPath( 643 | filepath.Join(cfg.Sqlite.DataDir, "graph", network), 644 | ) 645 | walletDir := lncfg.CleanAndExpandPath( 646 | filepath.Join( 647 | cfg.Sqlite.DataDir, "chain", "bitcoin", network, 648 | ), 649 | ) 650 | 651 | // In case the data directory was set but the watchtower is 652 | // still the default one, we use the data directory for the 653 | // watchtower as well. 654 | towerServerDir := lncfg.CleanAndExpandPath( 655 | filepath.Join( 656 | cfg.Sqlite.TowerDir, "watchtower", "bitcoin", 657 | network, 658 | ), 659 | ) 660 | if cfg.Sqlite.DataDir != defaultDataDir && 661 | cfg.Sqlite.TowerDir == defaultDataDir { 662 | 663 | towerServerDir = lncfg.CleanAndExpandPath( 664 | filepath.Join( 665 | cfg.Sqlite.DataDir, "watchtower", 666 | "bitcoin", network, 667 | ), 668 | ) 669 | } 670 | 671 | var dbName string 672 | var path string 673 | switch prefix { 674 | case lncfg.NSChannelDB: 675 | path = graphDir 676 | dbName = lncfg.SqliteChannelDBName 677 | 678 | case lncfg.NSWalletDB: 679 | path = walletDir 680 | dbName = lncfg.SqliteChainDBName 681 | 682 | case lncfg.NSMacaroonDB: 683 | path = walletDir 684 | dbName = lncfg.SqliteChainDBName 685 | 686 | case lncfg.NSDecayedLogDB: 687 | path = graphDir 688 | dbName = lncfg.SqliteChannelDBName 689 | 690 | case lncfg.NSTowerClientDB: 691 | path = graphDir 692 | dbName = lncfg.SqliteChannelDBName 693 | 694 | case lncfg.NSTowerServerDB: 695 | path = towerServerDir 696 | dbName = lncfg.SqliteChannelDBName 697 | 698 | case lncfg.NSNeutrinoDB: 699 | path = walletDir 700 | dbName = lncfg.SqliteNeutrinoDBName 701 | } 702 | 703 | // We check if the path exists to avoid receiving sqlite 704 | // misleading errors. Because sqlite will report out of 705 | // memory issues if the path does not exist. 706 | if err := checkPathExists(path); err != nil { 707 | return nil, fmt.Errorf("destination directory (%s) "+ 708 | "not found: %v", path, err) 709 | } 710 | 711 | args = []interface{}{ 712 | ctx, 713 | &sqlite.Config{ 714 | Timeout: time.Minute, 715 | }, 716 | path, 717 | dbName, 718 | prefix, 719 | } 720 | 721 | logger.Infof("Opening sqlite backend at %s "+ 722 | "for prefix '%s'", filepath.Join(path, dbName), 723 | prefix) 724 | 725 | default: 726 | return nil, fmt.Errorf("unknown backend: %v", backend) 727 | } 728 | 729 | return kvdb.Open(backend, args...) 730 | } 731 | 732 | // checkMarkerPresent checks if a marker is present in the database. 733 | func checkMarkerPresent(db kvdb.Backend, markerKey []byte) ([]byte, error) { 734 | var ( 735 | markerValue []byte 736 | err error 737 | ) 738 | err = kvdb.View(db, func(tx kvdb.RTx) error { 739 | markerValue, err = channeldb.CheckMarkerPresent(tx, markerKey) 740 | return err 741 | }, func() {}) 742 | if err != nil { 743 | return nil, err 744 | } 745 | 746 | return markerValue, nil 747 | } 748 | 749 | // addMarker adds a marker to the database. 750 | func addMarker(db kvdb.Backend, markerKey []byte) error { 751 | err := kvdb.Update(db, func(tx kvdb.RwTx) error { 752 | markerValue := []byte( 753 | fmt.Sprintf("lndinit migrate-db %s", time.Now(). 754 | Format(time.RFC3339)), 755 | ) 756 | 757 | return channeldb.AddMarker(tx, markerKey, markerValue) 758 | }, func() {}) 759 | if err != nil { 760 | return err 761 | } 762 | 763 | return nil 764 | } 765 | 766 | // createWalletMarker creates a marker in the wallet database to indicate it's 767 | // ready for use. This is only needed for non-bolt databases. 768 | func createWalletMarker(db kvdb.Backend, logger btclog.Logger) error { 769 | logger.Info("Creating 'wallet created' marker") 770 | 771 | err := kvdb.Update(db, func(tx kvdb.RwTx) error { 772 | metaBucket, err := tx.CreateTopLevelBucket( 773 | []byte(walletMetaBucket), 774 | ) 775 | if err != nil { 776 | return fmt.Errorf("failed to create meta "+ 777 | "bucket: %w", err) 778 | } 779 | 780 | return metaBucket.Put( 781 | []byte(walletReadyKey), []byte(walletReadyKey), 782 | ) 783 | }, func() {}) 784 | if err != nil { 785 | return fmt.Errorf("failed to create wallet marker: %w", err) 786 | } 787 | 788 | logger.Info("Successfully created 'wallet created' marker") 789 | 790 | return nil 791 | } 792 | 793 | // checkChannelDBMigrationsApplied checks if the channel DB migrations are 794 | // applied. 795 | func checkChannelDBMigrationsApplied(db kvdb.Backend) error { 796 | var meta channeldb.Meta 797 | err := kvdb.View(db, func(tx kvdb.RTx) error { 798 | return channeldb.FetchMeta(&meta, tx) 799 | }, func() { 800 | meta = channeldb.Meta{} 801 | }) 802 | if err != nil { 803 | return err 804 | } 805 | 806 | if meta.DbVersionNumber != channeldb.LatestDBVersion() { 807 | return fmt.Errorf("refusing to migrate source database with "+ 808 | "version %d while latest known DB version is %d; "+ 809 | "please upgrade the DB before using the data "+ 810 | "migration tool", meta.DbVersionNumber, 811 | channeldb.LatestDBVersion()) 812 | } 813 | 814 | return nil 815 | } 816 | 817 | // checkWTClientDBMigrationsApplied checks if the watchtower client DB 818 | // migrations are applied. 819 | func checkWTClientDBMigrationsApplied(db kvdb.Backend) error { 820 | version, err := wtdb.CurrentDatabaseVersion(db) 821 | if err != nil { 822 | return err 823 | } 824 | 825 | if version != wtdb.LatestDBMigrationVersion() { 826 | return fmt.Errorf("refusing to migrate source database with "+ 827 | "version %d while latest known DB version is %d; "+ 828 | "please upgrade the DB before using the data "+ 829 | "migration tool", version, wtdb.LatestDBMigrationVersion()) 830 | } 831 | 832 | return nil 833 | } 834 | 835 | // getBoltDBPath returns the full path for a given database type and prefix. 836 | func getBoltDBPath(cfg *SourceDB, prefix, network string) string { 837 | // Directories where the db files are located. 838 | graphDir := lncfg.CleanAndExpandPath( 839 | filepath.Join(cfg.Bolt.DataDir, "graph", network), 840 | ) 841 | walletDir := lncfg.CleanAndExpandPath( 842 | filepath.Join( 843 | cfg.Bolt.DataDir, "chain", "bitcoin", network, 844 | ), 845 | ) 846 | 847 | towerServerDir := lncfg.CleanAndExpandPath( 848 | filepath.Join( 849 | cfg.Bolt.TowerDir, "watchtower", "bitcoin", 850 | network, 851 | ), 852 | ) 853 | if cfg.Bolt.DataDir != defaultDataDir && 854 | cfg.Bolt.TowerDir == defaultDataDir { 855 | 856 | towerServerDir = lncfg.CleanAndExpandPath( 857 | filepath.Join( 858 | cfg.Bolt.DataDir, "watchtower", 859 | "bitcoin", network, 860 | ), 861 | ) 862 | } 863 | 864 | switch prefix { 865 | case lncfg.NSChannelDB: 866 | return filepath.Join(graphDir, lncfg.ChannelDBName) 867 | 868 | case lncfg.NSWalletDB: 869 | return filepath.Join(walletDir, lncfg.WalletDBName) 870 | 871 | case lncfg.NSMacaroonDB: 872 | return filepath.Join(walletDir, lncfg.MacaroonDBName) 873 | 874 | case lncfg.NSDecayedLogDB: 875 | return filepath.Join(graphDir, lncfg.DecayedLogDbName) 876 | 877 | case lncfg.NSTowerClientDB: 878 | return filepath.Join(graphDir, lncfg.TowerClientDBName) 879 | 880 | case lncfg.NSTowerServerDB: 881 | return filepath.Join(towerServerDir, lncfg.TowerServerDBName) 882 | 883 | case lncfg.NSNeutrinoDB: 884 | // TODO(ziggie): Can be updated as soon as new LND vesion is 885 | // available. 886 | return filepath.Join(walletDir, "neutrino.db") 887 | } 888 | 889 | return "" 890 | } 891 | 892 | // createMigrationCompletedFile creates an empty file indicating that a bolt 893 | // database was successfully migrated to a different backend. This is only 894 | // created when migrating FROM a bolt database TO another backend type. 895 | func createMigrationCompletedFile(sourceDB *SourceDB, prefix, 896 | network, targetType string) error { 897 | 898 | // Only create completion file when migrating FROM bolt. 899 | if sourceDB.Backend != lncfg.BoltBackend { 900 | return nil 901 | } 902 | 903 | dbPath := getBoltDBPath(sourceDB, prefix, network) 904 | dir := filepath.Dir(dbPath) 905 | dbName := filepath.Base(dbPath) 906 | 907 | timestamp := time.Now().Format("2006-01-02-15-04") 908 | markerName := fmt.Sprintf( 909 | "%s.migrated-to-%s-%s", dbName, targetType, timestamp, 910 | ) 911 | markerPath := filepath.Join(dir, markerName) 912 | 913 | f, err := os.Create(markerPath) 914 | if err != nil { 915 | return fmt.Errorf("failed to create migration completed "+ 916 | "file at %s: %w", markerPath, err) 917 | } 918 | defer f.Close() 919 | 920 | logger.Infof("Created migration completed file at %s", markerPath) 921 | 922 | return nil 923 | } 924 | 925 | // checkPathExists verifies that the directory exists. 926 | func checkPathExists(path string) error { 927 | dir := filepath.Dir(path) 928 | 929 | if _, err := os.Stat(dir); os.IsNotExist(err) { 930 | return fmt.Errorf("directory %s does not exist, "+ 931 | "please create it first", dir) 932 | } else if err != nil { 933 | return fmt.Errorf("failed to check directory %s: %v", dir, err) 934 | } 935 | 936 | return nil 937 | } 938 | 939 | func getContext() context.Context { 940 | // Hook interceptor for os signals. We need to except the case where 941 | // we call the function multiple times. 942 | shutdownInterceptor, err := signal.Intercept() 943 | shutdownInterceptor.ShutdownChannel() 944 | 945 | // TODO(ziggie): This is a hack to avoid an error when running the 946 | // migration tests for sqlite and postgres, because both are using the 947 | // same main routine. 948 | if err != nil && !strings.Contains(err.Error(), "intercept "+ 949 | "already started") { 950 | 951 | _, _ = fmt.Fprintln(os.Stderr, err) 952 | os.Exit(1) 953 | } 954 | 955 | ctxc, cancel := context.WithCancel(context.Background()) 956 | go func() { 957 | <-shutdownInterceptor.ShutdownChannel() 958 | cancel() 959 | }() 960 | return ctxc 961 | } 962 | -------------------------------------------------------------------------------- /cmd_migrate_db_postgres_test.go: -------------------------------------------------------------------------------- 1 | //go:build kvdb_postgres 2 | 3 | package main 4 | 5 | import ( 6 | "context" 7 | "crypto/rand" 8 | "database/sql" 9 | "encoding/hex" 10 | "fmt" 11 | "testing" 12 | 13 | embeddedpostgres "github.com/fergusstrange/embedded-postgres" 14 | "github.com/lightningnetwork/lnd/kvdb" 15 | "github.com/lightningnetwork/lnd/kvdb/postgres" 16 | "github.com/lightningnetwork/lnd/kvdb/sqlbase" 17 | "github.com/lightningnetwork/lnd/lncfg" 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | const ( 22 | testMaxConnections = 100 23 | testDsnTemplate = "postgres://postgres:postgres@127.0.0.1:9877/%s?sslmode=disable" 24 | ) 25 | 26 | // PostgresTestSetup holds the test configuration for Postgres. 27 | type PostgresTestSetup struct { 28 | tempDir string 29 | dbName string 30 | postgres *embeddedpostgres.EmbeddedPostgres 31 | stopFunc func() error 32 | } 33 | 34 | // setupEmbeddedPostgres initializes and starts the embedded postgres instance. 35 | func setupEmbeddedPostgres(t *testing.T) *PostgresTestSetup { 36 | sqlbase.Init(testMaxConnections) 37 | 38 | setup := &PostgresTestSetup{} 39 | 40 | // Initialize embedded postgres 41 | setup.postgres = embeddedpostgres.NewDatabase( 42 | embeddedpostgres.DefaultConfig(). 43 | Port(9877). 44 | StartParameters(map[string]string{ 45 | "max_connections": fmt.Sprintf("%d", testMaxConnections), 46 | }), 47 | ) 48 | 49 | // Start postgres 50 | err := setup.postgres.Start() 51 | require.NoError(t, err, "failed to start postgres") 52 | 53 | setup.stopFunc = setup.postgres.Stop 54 | 55 | return setup 56 | } 57 | 58 | // createTestDatabase creates a new random test database. 59 | func (p *PostgresTestSetup) createTestDatabase(t *testing.T) { 60 | // Generate random database name 61 | randBytes := make([]byte, 8) 62 | _, err := rand.Read(randBytes) 63 | require.NoError(t, err) 64 | p.dbName = "test_" + hex.EncodeToString(randBytes) 65 | 66 | // Create the database 67 | dbConn, err := sql.Open("pgx", p.getDsn("postgres")) 68 | require.NoError(t, err) 69 | defer dbConn.Close() 70 | 71 | _, err = dbConn.ExecContext( 72 | context.Background(), 73 | "CREATE DATABASE "+p.dbName, 74 | ) 75 | require.NoError(t, err) 76 | } 77 | 78 | // setupTestDir creates and sets up the temporary test directory. 79 | func (p *PostgresTestSetup) setupTestDir(t *testing.T) { 80 | p.tempDir = setupTestData(t) 81 | } 82 | 83 | // getDsn returns the DSN for the specified database. 84 | func (p *PostgresTestSetup) getDsn(dbName string) string { 85 | return fmt.Sprintf(testDsnTemplate, dbName) 86 | } 87 | 88 | // cleanup performs necessary cleanup. 89 | func (p *PostgresTestSetup) cleanup() error { 90 | if p.stopFunc != nil { 91 | return p.stopFunc() 92 | } 93 | return nil 94 | } 95 | 96 | // getDBConfigs returns the source and destination DB configs. 97 | func (p *PostgresTestSetup) getDBConfigs() (*SourceDB, *DestDB) { 98 | sourceDB := &SourceDB{ 99 | Backend: lncfg.BoltBackend, 100 | Bolt: &Bolt{ 101 | DBTimeout: kvdb.DefaultDBTimeout, 102 | DataDir: p.tempDir, 103 | TowerDir: p.tempDir, 104 | }, 105 | } 106 | 107 | destDB := &DestDB{ 108 | Backend: lncfg.PostgresBackend, 109 | Postgres: &postgres.Config{ 110 | Dsn: p.getDsn(p.dbName), 111 | }, 112 | } 113 | 114 | return sourceDB, destDB 115 | } 116 | 117 | // TestMigrateDBPostgres tests the migration of a database from Bolt to 118 | // Postgres. 119 | func TestMigrateDBPostgres(t *testing.T) { 120 | t.Parallel() 121 | 122 | // Setup postgres. 123 | setup := setupEmbeddedPostgres(t) 124 | defer func() { 125 | require.NoError(t, setup.cleanup()) 126 | }() 127 | 128 | // Setup test environment. 129 | setup.setupTestDir(t) 130 | setup.createTestDatabase(t) 131 | 132 | sourceDB, destDB := setup.getDBConfigs() 133 | 134 | // Create and run migration command. 135 | cmd := &migrateDBCommand{ 136 | Source: sourceDB, 137 | Dest: destDB, 138 | Network: "regtest", 139 | ChunkSize: 1024, 140 | } 141 | 142 | err := cmd.Execute(nil) 143 | require.NoError(t, err, "failed to execute migration") 144 | } 145 | -------------------------------------------------------------------------------- /cmd_migrate_db_sqlite_test.go: -------------------------------------------------------------------------------- 1 | //go:build kvdb_sqlite 2 | 3 | package main 4 | 5 | import ( 6 | "fmt" 7 | "testing" 8 | 9 | "github.com/lightningnetwork/lnd/kvdb" 10 | "github.com/lightningnetwork/lnd/kvdb/sqlite" 11 | "github.com/lightningnetwork/lnd/lncfg" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | // TestMigrateDBSqlite tests the migration of a database from Bolt to SQLite. 16 | func TestMigrateDBSqlite(t *testing.T) { 17 | t.Parallel() 18 | 19 | // Create temp dir for test databases. 20 | tempDir := setupTestData(t) 21 | 22 | fmt.Println("tempDir", tempDir) 23 | 24 | // Copy entire test directory structure. 25 | err := copyTestDataDir("testdata/data", tempDir) 26 | require.NoError(t, err, "failed to copy test data") 27 | 28 | // Set up source DB config (bolt). 29 | sourceDB := &SourceDB{ 30 | Backend: lncfg.BoltBackend, 31 | Bolt: &Bolt{ 32 | DBTimeout: kvdb.DefaultDBTimeout, 33 | DataDir: tempDir, 34 | TowerDir: tempDir, 35 | }, 36 | } 37 | 38 | // Set up destination DB config (sqlite). 39 | destDB := &DestDB{ 40 | Backend: lncfg.SqliteBackend, 41 | Sqlite: &Sqlite{ 42 | DataDir: tempDir, 43 | TowerDir: tempDir, 44 | Config: &sqlite.Config{}, 45 | }, 46 | } 47 | 48 | // Create and run migration command. 49 | cmd := &migrateDBCommand{ 50 | Source: sourceDB, 51 | Dest: destDB, 52 | Network: "regtest", 53 | // Select a small chunk size to test the chunking. 54 | ChunkSize: 1024, 55 | } 56 | 57 | err = cmd.Execute(nil) 58 | 59 | require.NoError(t, err, "migration failed") 60 | } 61 | -------------------------------------------------------------------------------- /cmd_migrate_db_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | // testDataPath is the source directory containing test data. 10 | const testDataPath = "testdata/data" 11 | 12 | // setupTestData creates a new temp directory and copies test data into it. 13 | // It returns the path to the new temp directory. 14 | func setupTestData(t *testing.T) string { 15 | // Create unique temp dir for this test. 16 | tempDir := t.TempDir() 17 | err := copyTestDataDir(testDataPath, tempDir) 18 | 19 | require.NoError(t, err, "failed to copy test data") 20 | 21 | return tempDir 22 | } 23 | -------------------------------------------------------------------------------- /cmd_store_configmap.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/jessevdk/go-flags" 10 | ) 11 | 12 | type targetK8sConfigmap struct { 13 | k8sConfigmapOptions 14 | 15 | Helm *helmOptions `group:"Flags for configuring the Helm annotations (use when --target=k8s)" namespace:"helm"` 16 | } 17 | 18 | type storeConfigmapCommand struct { 19 | Batch bool `long:"batch" description:"Instead of reading one configmap from stdin, read all files of the argument list and store them as entries in the configmap"` 20 | Overwrite bool `long:"overwrite" description:"Overwrite existing configmap entries instead of aborting"` 21 | Target string `long:"target" short:"t" description:"Configmap storage target" choice:"k8s"` 22 | K8s *targetK8sConfigmap `group:"Flags for storing the key/value pair inside a Kubernetes Configmap (use when --target=k8s)" namespace:"k8s"` 23 | } 24 | 25 | func newStoreConfigmapCommand() *storeConfigmapCommand { 26 | return &storeConfigmapCommand{ 27 | Target: storageK8s, 28 | K8s: &targetK8sConfigmap{ 29 | k8sConfigmapOptions: k8sConfigmapOptions{ 30 | Namespace: defaultK8sNamespace, 31 | }, 32 | }, 33 | } 34 | } 35 | 36 | func (x *storeConfigmapCommand) Register(parser *flags.Parser) error { 37 | _, err := parser.AddCommand( 38 | "store-configmap", 39 | "Write key/value pairs to a Kubernetes configmap", 40 | "Read a configmap from stdin and store it to the "+ 41 | "external configmaps storage indicated by the --target "+ 42 | "flag; if the --batch flag is used, instead of "+ 43 | "reading a single configmap entry from stdin, each command "+ 44 | "line argument is treated as a file and each file's "+ 45 | "content is added to the configmap with the file's name "+ 46 | "as the key name for the configmap entry", 47 | x, 48 | ) 49 | return err 50 | } 51 | 52 | func (x *storeConfigmapCommand) Execute(args []string) error { 53 | var entries []*entry 54 | 55 | switch { 56 | case x.Batch && len(args) == 0: 57 | return fmt.Errorf("at least one command line argument is " + 58 | "required when using --batch flag") 59 | 60 | case x.Batch: 61 | for _, file := range args { 62 | logger.Infof("Reading value/entry from file %s", file) 63 | content, err := readFile(file) 64 | if err != nil { 65 | return fmt.Errorf("cannot read file %s: %v", 66 | file, err) 67 | } 68 | 69 | entries = append(entries, &entry{ 70 | key: filepath.Base(file), 71 | value: content, 72 | }) 73 | } 74 | 75 | default: 76 | logger.Info("Reading value/entry from stdin") 77 | value, err := io.ReadAll(os.Stdin) 78 | if err != nil { 79 | return fmt.Errorf("error reading entry from stdin: %v", err) 80 | } 81 | entries = append(entries, &entry{value: string(value)}) 82 | } 83 | 84 | switch x.Target { 85 | case storageK8s: 86 | // Take the actual entry key from the options if we aren't in 87 | // batch mode. 88 | if len(entries) == 1 && entries[0].key == "" { 89 | entries[0].key = x.K8s.KeyName 90 | } 91 | 92 | return storeConfigmapsK8s(entries, x.K8s, x.Overwrite) 93 | 94 | default: 95 | return fmt.Errorf("invalid configmap storage target %s", x.Target) 96 | } 97 | } 98 | 99 | func storeConfigmapsK8s(entries []*entry, opts *targetK8sConfigmap, 100 | overwrite bool) error { 101 | 102 | if opts.Name == "" { 103 | return fmt.Errorf("configmap name is required") 104 | } 105 | 106 | for _, entry := range entries { 107 | if entry.key == "" { 108 | return fmt.Errorf("configmap entry key name is required") 109 | } 110 | 111 | entryOpts := &k8sObjectOptions{ 112 | Namespace: opts.Namespace, 113 | Name: opts.Name, 114 | KeyName: entry.key, 115 | ObjectType: ObjectTypeConfigMap, 116 | } 117 | 118 | logger.Infof("Storing key with name %s to configmap %s in namespace %s", 119 | entryOpts.KeyName, entryOpts.Name, 120 | entryOpts.Namespace) 121 | 122 | err := saveK8s(entry.value, entryOpts, overwrite, opts.Helm) 123 | if err != nil { 124 | return fmt.Errorf("error storing key %s in configmap %s: "+ 125 | "%v", entry.key, opts.Name, err) 126 | } 127 | } 128 | 129 | return nil 130 | } 131 | -------------------------------------------------------------------------------- /cmd_store_secret.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/jessevdk/go-flags" 11 | ) 12 | 13 | type targetK8sSecret struct { 14 | k8sSecretOptions 15 | 16 | Helm *helmOptions `group:"Flags for configuring the Helm annotations (use when --target=k8s)" namespace:"helm"` 17 | } 18 | 19 | type entry struct { 20 | key string 21 | value string 22 | } 23 | 24 | type storeSecretCommand struct { 25 | Batch bool `long:"batch" description:"Instead of reading one secret from stdin, read all files of the argument list and store them as entries in the secret"` 26 | Overwrite bool `long:"overwrite" description:"Overwrite existing secret entries instead of aborting"` 27 | Target string `long:"target" short:"t" description:"Secret storage target" choice:"k8s"` 28 | K8s *targetK8sSecret `group:"Flags for storing the secret as a value inside a Kubernetes Secret (use when --target=k8s)" namespace:"k8s"` 29 | } 30 | 31 | func newStoreSecretCommand() *storeSecretCommand { 32 | return &storeSecretCommand{ 33 | Target: storageK8s, 34 | K8s: &targetK8sSecret{ 35 | k8sSecretOptions: k8sSecretOptions{ 36 | Namespace: defaultK8sNamespace, 37 | }, 38 | Helm: &helmOptions{ 39 | ResourcePolicy: defaultK8sResourcePolicy, 40 | }, 41 | }, 42 | } 43 | } 44 | 45 | func (x *storeSecretCommand) Register(parser *flags.Parser) error { 46 | _, err := parser.AddCommand( 47 | "store-secret", 48 | "Store secret(s) to an external secrets storage", 49 | "Read a one line secret from stdin and store it to the "+ 50 | "external secrets storage indicated by the --target "+ 51 | "flag; if the --batch flag is used, instead of "+ 52 | "reading a single secret from stdin, each command "+ 53 | "line argument is treated as a file and each file's "+ 54 | "content is added to the secret with the file's name "+ 55 | "as the secret's key name", 56 | x, 57 | ) 58 | return err 59 | } 60 | 61 | func (x *storeSecretCommand) Execute(args []string) error { 62 | var entries []*entry 63 | 64 | switch { 65 | case x.Batch && len(args) == 0: 66 | return fmt.Errorf("at least one command line argument is " + 67 | "required when using --batch flag") 68 | 69 | case x.Batch: 70 | for _, file := range args { 71 | logger.Infof("Reading secret from file %s", file) 72 | content, err := readFile(file) 73 | if err != nil { 74 | return fmt.Errorf("cannot read file %s: %v", 75 | file, err) 76 | } 77 | 78 | entries = append(entries, &entry{ 79 | key: filepath.Base(file), 80 | value: content, 81 | }) 82 | } 83 | 84 | default: 85 | logger.Info("Reading secret from stdin") 86 | secret, err := bufio.NewReader(os.Stdin).ReadString('\n') 87 | if err != nil && err != io.EOF { 88 | return fmt.Errorf("error reading secret from stdin: %v", 89 | err) 90 | } 91 | entries = append(entries, &entry{value: secret}) 92 | } 93 | 94 | switch x.Target { 95 | case storageK8s: 96 | // Take the actual entry key from the options if we aren't in 97 | // batch mode. 98 | if len(entries) == 1 && entries[0].key == "" { 99 | entries[0].key = x.K8s.SecretKeyName 100 | } 101 | 102 | return storeSecretsK8s(entries, x.K8s, x.Overwrite) 103 | 104 | default: 105 | return fmt.Errorf("invalid secret storage target %s", x.Target) 106 | } 107 | } 108 | 109 | func storeSecretsK8s(entries []*entry, opts *targetK8sSecret, 110 | overwrite bool) error { 111 | 112 | if opts.SecretName == "" { 113 | return fmt.Errorf("secret name is required") 114 | } 115 | 116 | for _, entry := range entries { 117 | if entry.key == "" { 118 | return fmt.Errorf("secret key name is required") 119 | } 120 | 121 | entryOpts := &k8sObjectOptions{ 122 | Namespace: opts.Namespace, 123 | Name: opts.SecretName, 124 | KeyName: entry.key, 125 | Base64: opts.Base64, 126 | ObjectType: ObjectTypeSecret, 127 | } 128 | 129 | logger.Infof("Storing key with name %s to secret %s in namespace %s", 130 | entryOpts.KeyName, entryOpts.Name, 131 | entryOpts.Namespace) 132 | err := saveK8s(entry.value, entryOpts, overwrite, opts.Helm) 133 | if err != nil { 134 | return fmt.Errorf("error storing secret %s key %s: "+ 135 | "%v", opts.SecretName, entry.key, err) 136 | } 137 | } 138 | 139 | return nil 140 | } 141 | -------------------------------------------------------------------------------- /cmd_wait_ready.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "math" 8 | "time" 9 | 10 | "github.com/jessevdk/go-flags" 11 | "github.com/lightningnetwork/lnd/lncfg" 12 | "github.com/lightningnetwork/lnd/lnrpc" 13 | "github.com/lightningnetwork/lnd/signal" 14 | "google.golang.org/grpc" 15 | "google.golang.org/grpc/credentials" 16 | ) 17 | 18 | var ( 19 | connectionRetryInterval = time.Millisecond * 250 20 | ) 21 | 22 | type waitReadyCommand struct { 23 | RPCServer string `long:"rpcserver" description:"The host:port of lnd's RPC listener"` 24 | Timeout time.Duration `long:"timeout" description:"The maximum time we'll wait for lnd to become ready; 0 means wait forever"` 25 | } 26 | 27 | func newWaitReadyCommand() *waitReadyCommand { 28 | return &waitReadyCommand{ 29 | RPCServer: defaultRPCServer, 30 | } 31 | } 32 | 33 | func (x *waitReadyCommand) Register(parser *flags.Parser) error { 34 | _, err := parser.AddCommand( 35 | "wait-ready", 36 | "Wait for lnd to be fully ready", 37 | "Wait for lnd to be fully started, unlocked and ready to "+ 38 | "serve RPC requests; will wait and block forever "+ 39 | "until either lnd reports its status as ready or the "+ 40 | "given timeout is reached; the RPC connection to lnd "+ 41 | "is re-tried indefinitely and errors are ignored (or "+ 42 | "logged in verbose mode) until success or timeout; "+ 43 | "requires lnd v0.13.0-beta or later to work", 44 | x, 45 | ) 46 | return err 47 | } 48 | 49 | func (x *waitReadyCommand) Execute(_ []string) error { 50 | // Since this will potentially run forever, make sure we catch any 51 | // interrupt signals. 52 | shutdown, err := signal.Intercept() 53 | if err != nil { 54 | return fmt.Errorf("error intercepting signals: %v", err) 55 | } 56 | 57 | started := time.Now() 58 | timeout := time.Duration(math.MaxInt64) 59 | if x.Timeout > 0 { 60 | timeout = x.Timeout 61 | logger.Infof("Will time out in %v (%s)", timeout, started.Add(timeout)) 62 | } 63 | 64 | return waitUntilStatus( 65 | x.RPCServer, lnrpc.WalletState_SERVER_ACTIVE, timeout, 66 | shutdown.ShutdownChannel(), 67 | ) 68 | } 69 | 70 | func waitUntilStatus(rpcServer string, desiredState lnrpc.WalletState, 71 | timeout time.Duration, shutdown <-chan struct{}) error { 72 | 73 | logger.Infof("Waiting for lnd to become ready (want state %v)", desiredState) 74 | 75 | connectionRetryTicker := time.NewTicker(connectionRetryInterval) 76 | timeoutChan := time.After(timeout) 77 | 78 | connectionLoop: 79 | for { 80 | logger.Infof("Attempting to connect to RPC server %s", rpcServer) 81 | conn, err := getStatusConnection(rpcServer) 82 | if err != nil { 83 | logger.Errorf("Connection to lnd not successful: %v", err) 84 | 85 | select { 86 | case <-connectionRetryTicker.C: 87 | case <-timeoutChan: 88 | return fmt.Errorf("timeout reached") 89 | case <-shutdown: 90 | return nil 91 | } 92 | 93 | continue 94 | } 95 | 96 | logger.Info("Attempting to subscribe to the wallet state") 97 | statusStream, err := conn.SubscribeState( 98 | context.Background(), &lnrpc.SubscribeStateRequest{}, 99 | ) 100 | if err != nil { 101 | logger.Errorf("Status subscription for lnd not successful: %v", 102 | err) 103 | 104 | select { 105 | case <-connectionRetryTicker.C: 106 | case <-timeoutChan: 107 | return fmt.Errorf("timeout reached") 108 | case <-shutdown: 109 | return nil 110 | } 111 | 112 | continue 113 | } 114 | 115 | for { 116 | // Have we reached the global timeout yet? 117 | select { 118 | case <-timeoutChan: 119 | return fmt.Errorf("timeout reached") 120 | case <-shutdown: 121 | return nil 122 | default: 123 | } 124 | 125 | msg, err := statusStream.Recv() 126 | if err != nil { 127 | logger.Errorf("Error receiving status update: %v", err) 128 | 129 | select { 130 | case <-connectionRetryTicker.C: 131 | case <-timeoutChan: 132 | return fmt.Errorf("timeout reached") 133 | case <-shutdown: 134 | return nil 135 | } 136 | 137 | // Something went wrong, perhaps lnd shut down 138 | // before becoming active. Let's retry the whole 139 | // connection again. 140 | continue connectionLoop 141 | } 142 | 143 | logger.Infof("Received update from lnd, wallet status is now: "+ 144 | "%v", msg.State) 145 | 146 | // We've arrived at the final state! 147 | if msg.State == desiredState { 148 | return nil 149 | } 150 | 151 | // If we're waiting for a state that is at the very 152 | // beginning of the list (e.g. NON_EXISTENT) then we 153 | // need to return an error if a state further down the 154 | // list is returned, as that would mean we skipped over 155 | // it. The only exception is the WAITING_TO_START since 156 | // that is actually the largest number a state can have. 157 | if msg.State != lnrpc.WalletState_WAITING_TO_START && 158 | msg.State > desiredState { 159 | 160 | return fmt.Errorf("received state %v which "+ 161 | "is greater than %v", msg.State, 162 | desiredState) 163 | } 164 | 165 | // Let's wait for another status message to arrive. 166 | } 167 | } 168 | } 169 | 170 | func getStatusConnection(rpcServer string) (lnrpc.StateClient, error) { 171 | // Don't bother with checking the cert, we're not sending any macaroons 172 | // to the server anyway. 173 | creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) 174 | 175 | // We need to use a custom dialer so we can also connect to unix sockets 176 | // and not just TCP addresses. 177 | genericDialer := lncfg.ClientAddressDialer(defaultRPCPort) 178 | opts := []grpc.DialOption{ 179 | grpc.WithTransportCredentials(creds), 180 | grpc.WithContextDialer(genericDialer), 181 | } 182 | 183 | conn, err := grpc.Dial(rpcServer, opts...) 184 | if err != nil { 185 | return nil, fmt.Errorf("unable to connect to RPC server: %v", 186 | err) 187 | } 188 | 189 | return lnrpc.NewStateClient(conn), nil 190 | } 191 | -------------------------------------------------------------------------------- /dev.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=1.23.6 2 | ARG BASE_IMAGE=lightninglabs/lnd 3 | ARG BASE_IMAGE_VERSION=v0.19.0-beta 4 | 5 | FROM golang:${GO_VERSION}-alpine as builder 6 | 7 | # Force Go to use the cgo based DNS resolver. This is required to ensure DNS 8 | # queries required to connect to linked containers succeed. 9 | ENV GODEBUG netdns=cgo 10 | 11 | # Copy in the local repository to build from. 12 | COPY . /go/src/github.com/lightninglabs/lndinit 13 | 14 | # Install dependencies and build the binaries. 15 | RUN apk add --no-cache --update alpine-sdk \ 16 | git \ 17 | make \ 18 | && cd /go/src/github.com/lightninglabs/lndinit \ 19 | && make install 20 | 21 | # Start a new, final image. 22 | FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION} as final 23 | 24 | # Copy the binary from the builder image. 25 | COPY --from=builder /go/bin/lndinit /bin/ 26 | -------------------------------------------------------------------------------- /docs/data-migration.md: -------------------------------------------------------------------------------- 1 | # Data migration 2 | 3 | This document describes the process of migrating `LND`'s database state from one 4 | type of database backend (for example the `bbolt` based database files `*.db` 5 | such as the `channel.db` or `wallet.db` files) to another (for example the new 6 | `postgres` or `sqlite` databases introduced in `lnd v0.14.0-beta`). 7 | 8 | **Note:** Currently only migrations from `bolt` to either `postgres` or 9 | `sqlite` are supported. KV based databases will be phased out in the future. We 10 | are planning to add additonal support to migrate all `etcd` databases to `sql` 11 | databases. Moreover it is currently also not supported to move the database 12 | from `sqlite` to `postgres` or vice versa, because this migration only takes 13 | care of the key value data and there already exist the possiblity to have the 14 | invoices in native sql which this tool does not support. 15 | 16 | 17 | ## Prepare the destination database 18 | 19 | To be able to execute the migration successfully you have to update to [LND 20 | v0.19.0](https://github.com/lightningnetwork/lnd/releases/tag/untagged-6824d65b23dc77e94e22) 21 | which makes sure that the intial pre-migration state of the kv-db is up to date. 22 | 23 | ### Using postgres as the destination remote database 24 | 25 | Prepare a user and database as described in the [Postgres]( 26 | https://github.com/lightningnetwork/lnd/blob/master/docs/postgres.md) 27 | documentation. You'll need the Data Source Name (DSN) for both the data 28 | migration and then the `lnd` configuration, so keep that string somewhere 29 | (should be something with the format of `postgres://xx:yy@localhost:5432/zz`). 30 | 31 | No additional steps are required to prepare the Postgres database for the data 32 | migration. The migration tool will create the database schema automatically, so 33 | no DDL scripts need to be run in advance. But to speed up the migration process 34 | you should take a look at the following [postgres server tuning guide](https://gist.github.com/djkazic/526fa3e032aea9578997f88b45b91fb9) 35 | 36 | ### Using sqlite as the destination remote database 37 | 38 | No particular preparation is needed for `sqlite` compared to the `postgres` 39 | case. Similar to the `bolt` case there will be separated db files created for 40 | each individual `bolt` database. 41 | 42 | 43 | 44 | ## Prepare the source database 45 | 46 | Assuming we want to migrate the database state from the pre-0.19.0 individual 47 | `bbolt` based `*.db` files to a remote database, we first need to make sure the 48 | source files are in the correct state. 49 | 50 | The following steps should be performed *before* running the data migration ( 51 | some of them are marked as optional and can be neglected for small databases 52 | e.g. 200MB 53 | ): 54 | 1. Stop `lnd` 55 | 2. Upgrade the `lnd` binary to the latest version (e.g. `v0.19.0-beta` or later) 56 | 3. (optional) Make sure to add config options like 57 | `gc-canceled-invoices-on-startup=true` and `db.bolt.auto-compact=true` to 58 | your `lnd.conf` to optimize the source database size by removing canceled 59 | invoices and compacting it on startup. 60 | 4. Remove any data from the source database that you can. The fewer 61 | entries are in the source database, the quicker the migration will complete. 62 | For example failed payments (or their failed HTLC attempts) can be removed 63 | with `lncli deletepayments --all`. This can make a huge difference for 64 | routing nodes which rebalance a lot. Make sure you restart LND and compact 65 | the db after the failed payments were deleted so it has an effect on the size 66 | of the db. 67 | 5. (optional) Also make sure to migrate the revocation log for all channels 68 | active prior to `lnd@0.15.0` by activating the config setting `--db.prune-revocation`. 69 | This version introduced an optimized revocation log storage system that 70 | reduces the storage footprint. All channels will be automatically migrated 71 | to this new format when the setting is enabled. 72 | 6. Start `lnd` normally, using the flags mentioned above but not yet changing 73 | any database backend related configuration options. Check the log that the 74 | database schema was migrated successfully, for example: `Checking for 75 | schema update: latest_version=XX, db_version=XX`. This relates to the 76 | kv-schema NOT any SQL schema, this makes sure all inital migration of the 77 | old database were performed. This migration tool is only allowed for DBs 78 | which have all the latest db migrations applied up to LND 19. This makes sure 79 | that all LND nodes which want to migrate to the SQL world have the latest db 80 | modifications in place before migrating to a different DB type. If that is 81 | not the case the migration will be refused. 82 | 7. Stop `lnd` again and make sure it isn't started again by accident during the 83 | data migration (e.g. disable any `systemd` or other scripts that start/stop 84 | `lnd`). 85 | 86 | ## Run the migration 87 | 88 | Depending on the destination database type, run the migration with a command 89 | similar to one of the following examples: 90 | 91 | **Example: Migrate from `bbolt` to `sqlite`:** 92 | 93 | ```shell 94 | lndinit --debuglevel info migrate-db \ 95 | --source.bolt.data-dir /home/myuser/.lnd/data \ 96 | --dest.backend sqlite \ 97 | --dest.sqlite.data-dir /home/myuser/.lnd/data --network mainnet 98 | ``` 99 | If you were running a watchtower server, and it had a different directory set 100 | compared to the default `LND` directory make sure you also add the tower dir 101 | setting. It has to be the directory to the `watchtower` dir, excluding the 102 | `watchtower` name e.g.: 103 | `--source.bolt.tower-dir /home/myuser/towerdir`. 104 | 105 | 106 | **Example: Migrate from `bbolt` to `postgres`:** 107 | 108 | ```shell 109 | lndinit --debuglevel info migrate-db \ 110 | --source.bolt.data-dir /home/myuser/.lnd/data \ 111 | --dest.backend postgres \ 112 | --dest.postgres.dsn=postgres://postgres:postgres@localhost:5432/postgres 113 | ``` 114 | 115 | Also set the watchtower directory in case you used a different path, see above. 116 | 117 | This migration tool depends on the directory structure of 118 | the LND software. This means make sure you link the correct folder because the 119 | `bolt` database has several database files in serveral subfolders. This 120 | migration tool will make sure that all the required databases are migrated. It 121 | will not require a `wtclient.db` or a `watchtower.db`. 122 | 123 | The migration is resumable and happens in chunks of default 20MB to make it 124 | compatible with most of the systems including low power devices like 125 | raspberry pis. However if you have better setup with way more RAM feel free to 126 | increase the `--chunk-size` to something like 127 | 200MB which should speed up the migration. You can also change the chunk size 128 | during the migration as well. If you want to start the migration from the 129 | beginning (can only be done if the migration did still not succeed) use the 130 | flag `--force-new-migration` which can be used in combination with a 131 | new `chunksize` limit. 132 | 133 | In case you have successfully migrated several nodes and are not sure anymore 134 | which source db corresponds to which destination db there is a flag called 135 | `force-verify-db` which only works if both dbs are marked as successfully 136 | migrated. It will verify the contents of the db. 137 | 138 | 139 | ## After the migration was successful 140 | 141 | Make sure the whole migration process succeeds before starting the new node with 142 | the new underlying database. As mentioned above there are several database files 143 | at play here so all of them have to succeed to garantee a successful migration. 144 | 145 | If the migration succeeded successfully and you see the following log entry of 146 | the migration tool you are good to go to start-up your lnd node with the new 147 | database. 148 | 149 | ```shell 150 | [INF]: LNDINIT !!!Migration of all mandatory db parts completed successfully!!! 151 | ``` 152 | 153 | The mandatory dbs are: 154 | * `channel.db` 155 | * `macaroons.db` 156 | * `sphinxreplay.db` 157 | * `wallet.db` 158 | 159 | The optional dbs are: 160 | 161 | * `wtclient.db` 162 | * `watchtower.db` 163 | * `neutrino.db` 164 | 165 | ### LND config setting for `sqlite` backend 166 | 167 | ```shell 168 | [db] 169 | db.backend=sqlite 170 | ``` 171 | 172 | There are several other sqlite setttings you can tweak to make it fit your 173 | needs, take a look at the [lnd-sample-config](https://github.com/lightningnetwork/lnd/blob/b6d8ecc7479f7517368814c398b0fbe0e8c52fed/sample-lnd.conf). 174 | 175 | ### LND config setting for `postgres` backend 176 | 177 | ```shell 178 | [db] 179 | db.backend=postgres 180 | 181 | [postgres] 182 | 183 | db.postgres.dsn=postgres://xx:yy@localhost:5432/zz 184 | ``` 185 | 186 | Use the same connection string you used for the migration. Also take a look at 187 | the postgres knobs in the [lnd-sample-config](https://github.com/lightningnetwork/lnd/blob/b6d8ecc7479f7517368814c398b0fbe0e8c52fed/sample-lnd.conf). 188 | 189 | 190 | This is the output of the migration cmd help settings: 191 | 192 | ```shell 193 | lndinit migrate-db -h 194 | Usage: 195 | lndinit [OPTIONS] migrate-db [migrate-db-OPTIONS] 196 | 197 | Migrate the full database state of lnd from a source (for example the 198 | set of bolt database files such as channel.db and wallet.db) database 199 | to a SQL destination database. 200 | 201 | IMPORTANT: Please read the data migration guide located in the file 202 | docs/data-migration.md of the main lnd repository before using this 203 | command! 204 | 205 | NOTE: The migration can take a long time depending on the amount of data 206 | that needs to be written! The migration happens in chunks therefore it 207 | can be resumed in case of an interruption. The migration also includes 208 | a verification to assure that the migration is consistent. 209 | As long as NEITHER the source nor destination database has been started/ 210 | run with lnd, the migration can be repeated/resumed in case of an error 211 | since the data will just be overwritten again in the destination. 212 | 213 | Once a database was successfully and completely migrated from the source 214 | to the destination, the source will be marked with a 'tombstone' tag 215 | while the destination will get an 'already migrated' tag. 216 | A database with a tombstone cannot be started with lnd anymore to 217 | prevent from an old state being used by accident. 218 | To prevent overwriting a destination database by accident, the same 219 | database/namespace pair cannot be used as the target of a data migration 220 | twice, which is checked through the 'already migrated' tag. 221 | 222 | Application Options: 223 | -e, --error-on-existing Exit with code EXIT_CODE_TARGET_EXISTS (128) instead of 0 if the result of an action is already present 224 | -d, --debuglevel= Set the log level (Off, Critical, Error, Warn, Info, Debug, Trace) 225 | 226 | Help Options: 227 | -h, --help Show this help message 228 | 229 | [migrate-db command options] 230 | -n, --network= Network of the db files to migrate (used to navigate into the right directory) (default: mainnet) 231 | --pprof-port= Enable pprof profiling on the specified port 232 | --force-new-migration Force a new migration from the beginning of the source DB so the resume state will be discarded 233 | --force-verify-db Force a verification verifies two already marked (tombstoned and already migrated) dbs to make sure that the source db equals the 234 | content of the destination db 235 | --chunk-size= Chunk size for the migration in bytes 236 | 237 | source: 238 | --source.backend=[bolt] The source database backend. (default: bolt) 239 | 240 | bolt: 241 | --source.bolt.dbtimeout= Specify the timeout value used when opening the database. (default: 1m0s) 242 | --source.bolt.data-dir= Lnd data dir where bolt dbs are located. 243 | --source.bolt.tower-dir= Lnd watchtower dir where bolt dbs for the watchtower server are located. 244 | 245 | dest: 246 | --dest.backend=[postgres|sqlite] The destination database backend. (default: postgres) 247 | 248 | postgres: 249 | --dest.postgres.dsn= Database connection string. 250 | --dest.postgres.timeout= Database connection timeout. Set to zero to disable. 251 | --dest.postgres.maxconnections= The maximum number of open connections to the database. Set to zero for unlimited. 252 | 253 | sqlite: 254 | --dest.sqlite.data-dir= Lnd data dir where sqlite dbs are located. 255 | --dest.sqlite.tower-dir= Lnd watchtower dir where sqlite dbs for the watchtower server are located. 256 | 257 | sqlite-config: 258 | --dest.sqlite.sqlite-config.timeout= The time after which a database query should be timed out. 259 | --dest.sqlite.sqlite-config.busytimeout= The maximum amount of time to wait for a database connection to become available for a query. 260 | --dest.sqlite.sqlite-config.maxconnections= The maximum number of open connections to the database. Set to zero for unlimited. 261 | --dest.sqlite.sqlite-config.pragmaoptions= A list of pragma options to set on a database connection. For example, 'auto_vacuum=incremental'. Note that the flag must be specified multiple times if multiple options are to be set. 262 | ``` -------------------------------------------------------------------------------- /example-init-wallet-k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | WALLET_SECRET_NAME=${WALLET_SECRET_NAME:-lnd-wallet-secret} 6 | WALLET_SECRET_NAMESPACE=${WALLET_SECRET_NAMESPACE:-default} 7 | WALLET_DIR=${WALLET_DIR:-~/.lnd/data/chain/bitcoin/mainnet} 8 | WALLET_PASSWORD_FILE=${WALLET_PASSWORD_FILE:-/tmp/wallet-password} 9 | CERT_DIR=${CERT_DIR:-~/.lnd} 10 | UPLOAD_RPC_SECRETS=${UPLOAD_RPC_SECRETS:-0} 11 | RPC_SECRETS_NAME=${RPC_SECRETS_NAME:-lnd-rpc-secret} 12 | RPC_SECRETS_NAMESPACE=${RPC_SECRETS_NAMESPACE:-default} 13 | NETWORK=${NETWORK:-mainnet} 14 | RPC_SERVER=${RPC_SERVER:-localhost:10009} 15 | REMOTE_SIGNING=${REMOTE_SIGNING:0} 16 | REMOTE_SIGNER_RPC_SECRETS_DIR=${REMOTE_SIGNER_RPC_SECRETS_DIR:-/tmp} 17 | REMOTE_SIGNER_RPC_SECRETS_NAME=${REMOTE_SIGNER_RPC_SECRETS_NAME:-lnd-signer-rpc-secret} 18 | REMOTE_SIGNER_RPC_SECRETS_NAMESPACE=${REMOTE_SIGNER_RPC_SECRETS_NAMESPACE:-signer} 19 | 20 | echo "[STARTUP] Asserting wallet password exists in secret ${WALLET_SECRET_NAME}" 21 | lndinit gen-password \ 22 | | lndinit -v store-secret \ 23 | --target=k8s \ 24 | --k8s.base64 \ 25 | --k8s.namespace="${WALLET_SECRET_NAMESPACE}" \ 26 | --k8s.secret-name="${WALLET_SECRET_NAME}" \ 27 | --k8s.secret-key-name=walletpassword 28 | 29 | echo "" 30 | echo "[STARTUP] Asserting seed exists in secret ${WALLET_SECRET_NAME}" 31 | lndinit gen-seed \ 32 | | lndinit -v store-secret \ 33 | --target=k8s \ 34 | --k8s.base64 \ 35 | --k8s.namespace="${WALLET_SECRET_NAMESPACE}" \ 36 | --k8s.secret-name="${WALLET_SECRET_NAME}" \ 37 | --k8s.secret-key-name=walletseed 38 | 39 | echo "" 40 | echo "[STARTUP] Asserting wallet is created with values from secret ${WALLET_SECRET_NAME}" 41 | lndinit -v init-wallet \ 42 | --secret-source=k8s \ 43 | --k8s.base64 \ 44 | --k8s.namespace="${WALLET_SECRET_NAMESPACE}" \ 45 | --k8s.secret-name="${WALLET_SECRET_NAME}" \ 46 | --k8s.seed-key-name=walletseed \ 47 | --k8s.wallet-password-key-name=walletpassword \ 48 | --init-file.output-wallet-dir="${WALLET_DIR}" \ 49 | --init-file.validate-password 50 | 51 | echo "" 52 | echo "[STARTUP] Preparing lnd auto unlock file" 53 | 54 | # To make sure the password can be read exactly once (by lnd itself), we create 55 | # a named pipe. Because we can only write to such a pipe if there's a reader on 56 | # the other end, we need to run this in a sub process in the background. 57 | mkfifo "${WALLET_PASSWORD_FILE}" 58 | lndinit -v load-secret \ 59 | --source=k8s \ 60 | --k8s.base64 \ 61 | --k8s.namespace="${WALLET_SECRET_NAMESPACE}" \ 62 | --k8s.secret-name="${WALLET_SECRET_NAME}" \ 63 | --k8s.secret-key-name=walletpassword > "${WALLET_PASSWORD_FILE}" & 64 | 65 | # In case we have a remote signing setup, we also need to provision the RPC 66 | # secrets of the remote signer. 67 | if [[ "${REMOTE_SIGNING}" == "1" ]]; then 68 | echo "[STARTUP] Provisioning remote signer RPC secrets" 69 | lndinit -v load-secret \ 70 | --source=k8s \ 71 | --k8s.base64 \ 72 | --k8s.namespace="${REMOTE_SIGNER_RPC_SECRETS_NAMESPACE}" \ 73 | --k8s.secret-name="${REMOTE_SIGNER_RPC_SECRETS_NAME}" \ 74 | --k8s.secret-key-name=tls.cert > "${REMOTE_SIGNER_RPC_SECRETS_DIR}/tls.cert" 75 | lndinit -v load-secret \ 76 | --source=k8s \ 77 | --k8s.base64 \ 78 | --k8s.namespace="${REMOTE_SIGNER_RPC_SECRETS_NAMESPACE}" \ 79 | --k8s.secret-name="${REMOTE_SIGNER_RPC_SECRETS_NAME}" \ 80 | --k8s.secret-key-name=admin.macaroon > "${REMOTE_SIGNER_RPC_SECRETS_DIR}/admin.macaroon" 81 | fi 82 | 83 | # In case we want to upload the TLS certificate and macaroons to k8s secrets as 84 | # well once lnd is ready, we can use the wait-ready and store-secret commands in 85 | # combination to wait until lnd is ready and then batch upload the files to k8s. 86 | if [[ "${UPLOAD_RPC_SECRETS}" == "1" ]]; then 87 | echo "" 88 | echo "[STARTUP] Starting RPC secret uploader process in background" 89 | lndinit -v wait-ready \ 90 | && lncli --network "${NETWORK}" --rpcserver "${RPC_SERVER}" \ 91 | --tlscertpath "${CERT_DIR}/tls.cert" \ 92 | --macaroonpath "${WALLET_DIR}/walletkit.macaroon" \ 93 | wallet accounts list > /tmp/accounts.json \ 94 | && lndinit -v store-secret \ 95 | --batch \ 96 | --overwrite \ 97 | --target=k8s \ 98 | --k8s.base64 \ 99 | --k8s.namespace="${RPC_SECRETS_NAMESPACE}" \ 100 | --k8s.secret-name="${RPC_SECRETS_NAME}" \ 101 | "${CERT_DIR}/tls.cert" \ 102 | "${WALLET_DIR}"/*.macaroon \ 103 | /tmp/accounts.json & 104 | fi 105 | 106 | # And finally start lnd. We need to use "exec" here to make sure all signals are 107 | # forwarded correctly. 108 | echo "" 109 | echo "[STARTUP] Starting lnd with flags: $@" 110 | exec lnd "$@" 111 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/lightninglabs/lndinit 2 | 3 | require ( 4 | github.com/btcsuite/btcd v0.24.3-0.20250318170759-4f4ea81776d6 5 | github.com/btcsuite/btcd/btcutil v1.1.5 6 | github.com/btcsuite/btclog/v2 v2.0.1-0.20250602222548-9967d19bb084 7 | github.com/btcsuite/btcwallet v0.16.14 8 | github.com/btcsuite/btcwallet/walletdb v1.5.1 // indirect 9 | github.com/fergusstrange/embedded-postgres v1.25.0 10 | github.com/jessevdk/go-flags v1.4.0 11 | github.com/kkdai/bstream v1.0.0 12 | github.com/lightninglabs/protobuf-hex-display v1.4.3-hex-display 13 | github.com/lightningnetwork/lnd v0.19.1-beta.rc1 14 | github.com/lightningnetwork/lnd/kvdb v1.4.16 15 | github.com/stretchr/testify v1.9.0 16 | go.etcd.io/bbolt v1.3.11 17 | google.golang.org/grpc v1.59.0 18 | k8s.io/api v0.18.3 19 | k8s.io/apimachinery v0.18.3 20 | k8s.io/client-go v0.18.3 21 | ) 22 | 23 | require github.com/lightningnetwork/lnd/healthcheck v1.2.6 24 | 25 | require ( 26 | github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect 27 | github.com/Microsoft/go-winio v0.6.1 // indirect 28 | github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect 29 | github.com/Yawning/aez v0.0.0-20211027044916-e49e68abd344 // indirect 30 | github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect 31 | github.com/aead/siphash v1.0.1 // indirect 32 | github.com/beorn7/perks v1.0.1 // indirect 33 | github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect 34 | github.com/btcsuite/btcd/btcutil/psbt v1.1.8 // indirect 35 | github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect 36 | github.com/btcsuite/btclog v0.0.0-20241003133417-09c4e92e319c // indirect 37 | github.com/btcsuite/btcwallet/wallet/txauthor v1.3.5 // indirect 38 | github.com/btcsuite/btcwallet/wallet/txrules v1.2.2 // indirect 39 | github.com/btcsuite/btcwallet/wallet/txsizes v1.2.5 // indirect 40 | github.com/btcsuite/btcwallet/wtxmgr v1.5.6 // indirect 41 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect 42 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect 43 | github.com/btcsuite/winsvc v1.0.0 // indirect 44 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 45 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 46 | github.com/containerd/continuity v0.3.0 // indirect 47 | github.com/coreos/go-semver v0.3.0 // indirect 48 | github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect 49 | github.com/coreos/go-systemd/v22 v22.3.2 // indirect 50 | github.com/davecgh/go-spew v1.1.1 // indirect 51 | github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect 52 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect 53 | github.com/decred/dcrd/lru v1.1.2 // indirect 54 | github.com/docker/cli v20.10.17+incompatible // indirect 55 | github.com/docker/docker v24.0.7+incompatible // indirect 56 | github.com/docker/go-connections v0.4.0 // indirect 57 | github.com/docker/go-units v0.5.0 // indirect 58 | github.com/dustin/go-humanize v1.0.1 // indirect 59 | github.com/go-errors/errors v1.0.1 // indirect 60 | github.com/go-logr/logr v1.3.0 // indirect 61 | github.com/go-logr/stdr v1.2.2 // indirect 62 | github.com/gofrs/uuid v4.2.0+incompatible // indirect 63 | github.com/gogo/protobuf v1.3.2 // indirect 64 | github.com/golang-jwt/jwt/v4 v4.4.2 // indirect 65 | github.com/golang-migrate/migrate/v4 v4.17.0 // indirect 66 | github.com/golang/protobuf v1.5.3 // indirect 67 | github.com/golang/snappy v0.0.4 // indirect 68 | github.com/google/btree v1.0.1 // indirect 69 | github.com/google/gofuzz v1.1.0 // indirect 70 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 71 | github.com/google/uuid v1.6.0 // indirect 72 | github.com/googleapis/gnostic v0.1.0 // indirect 73 | github.com/gorilla/websocket v1.5.0 // indirect 74 | github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect 75 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect 76 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect 77 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 78 | github.com/hashicorp/errwrap v1.1.0 // indirect 79 | github.com/hashicorp/go-multierror v1.1.1 // indirect 80 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 81 | github.com/imdario/mergo v0.3.12 // indirect 82 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 83 | github.com/jackc/pgconn v1.14.3 // indirect 84 | github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 // indirect 85 | github.com/jackc/pgio v1.0.0 // indirect 86 | github.com/jackc/pgpassfile v1.0.0 // indirect 87 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 88 | github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect 89 | github.com/jackc/pgtype v1.14.0 // indirect 90 | github.com/jackc/pgx/v4 v4.18.2 // indirect 91 | github.com/jackc/pgx/v5 v5.3.1 // indirect 92 | github.com/jonboulle/clockwork v0.2.2 // indirect 93 | github.com/jrick/logrotate v1.1.2 // indirect 94 | github.com/json-iterator/go v1.1.11 // indirect 95 | github.com/juju/clock v0.0.0-20220203021603-d9deb868a28a // indirect 96 | github.com/juju/collections v0.0.0-20220203020748-febd7cad8a7a // indirect 97 | github.com/juju/errors v0.0.0-20220622220526-54a94488269b // indirect 98 | github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4 // indirect 99 | github.com/juju/mgo/v2 v2.0.0-20220111072304-f200228f1090 // indirect 100 | github.com/juju/retry v0.0.0-20220204093819-62423bf33287 // indirect 101 | github.com/juju/utils/v3 v3.0.0-20220203023959-c3fbc78a33b0 // indirect 102 | github.com/juju/version/v2 v2.0.0-20220204124744-fc9915e3d935 // indirect 103 | github.com/klauspost/compress v1.17.9 // indirect 104 | github.com/lib/pq v1.10.9 // indirect 105 | github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf // indirect 106 | github.com/lightninglabs/neutrino v0.16.1 // indirect 107 | github.com/lightninglabs/neutrino/cache v1.1.2 // indirect 108 | github.com/lightningnetwork/lightning-onion v1.2.1-0.20240712235311-98bd56499dfb // indirect 109 | github.com/lightningnetwork/lnd/clock v1.1.1 // indirect 110 | github.com/lightningnetwork/lnd/fn/v2 v2.0.8 // indirect 111 | github.com/lightningnetwork/lnd/queue v1.1.1 // indirect 112 | github.com/lightningnetwork/lnd/sqldb v1.0.9 // indirect 113 | github.com/lightningnetwork/lnd/ticker v1.1.1 // indirect 114 | github.com/lightningnetwork/lnd/tlv v1.3.1 // indirect 115 | github.com/lightningnetwork/lnd/tor v1.1.6 // indirect 116 | github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 // indirect 117 | github.com/mattn/go-isatty v0.0.20 // indirect 118 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 119 | github.com/miekg/dns v1.1.43 // indirect 120 | github.com/mitchellh/mapstructure v1.4.1 // indirect 121 | github.com/moby/term v0.5.0 // indirect 122 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 123 | github.com/modern-go/reflect2 v1.0.1 // indirect 124 | github.com/ncruces/go-strftime v0.1.9 // indirect 125 | github.com/opencontainers/go-digest v1.0.0 // indirect 126 | github.com/opencontainers/image-spec v1.0.2 // indirect 127 | github.com/opencontainers/runc v1.1.12 // indirect 128 | github.com/ory/dockertest/v3 v3.10.0 // indirect 129 | github.com/pkg/errors v0.9.1 // indirect 130 | github.com/pmezard/go-difflib v1.0.0 // indirect 131 | github.com/prometheus/client_golang v1.11.1 // indirect 132 | github.com/prometheus/client_model v0.2.0 // indirect 133 | github.com/prometheus/common v0.26.0 // indirect 134 | github.com/prometheus/procfs v0.6.0 // indirect 135 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 136 | github.com/rogpeppe/fastuuid v1.2.0 // indirect 137 | github.com/sirupsen/logrus v1.9.2 // indirect 138 | github.com/soheilhy/cmux v0.1.5 // indirect 139 | github.com/spf13/pflag v1.0.5 // indirect 140 | github.com/stretchr/objx v0.5.2 // indirect 141 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect 142 | github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect 143 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect 144 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect 145 | github.com/xeipuuv/gojsonschema v1.2.0 // indirect 146 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect 147 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect 148 | gitlab.com/yawning/bsaes.git v0.0.0-20190805113838-0a714cd429ec // indirect 149 | go.etcd.io/etcd/api/v3 v3.5.12 // indirect 150 | go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect 151 | go.etcd.io/etcd/client/v2 v2.305.12 // indirect 152 | go.etcd.io/etcd/client/v3 v3.5.12 // indirect 153 | go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect 154 | go.etcd.io/etcd/raft/v3 v3.5.12 // indirect 155 | go.etcd.io/etcd/server/v3 v3.5.12 // indirect 156 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect 157 | go.opentelemetry.io/otel v1.20.0 // indirect 158 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect 159 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect 160 | go.opentelemetry.io/otel/metric v1.20.0 // indirect 161 | go.opentelemetry.io/otel/sdk v1.20.0 // indirect 162 | go.opentelemetry.io/otel/trace v1.20.0 // indirect 163 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 164 | go.uber.org/atomic v1.7.0 // indirect 165 | go.uber.org/multierr v1.6.0 // indirect 166 | go.uber.org/zap v1.17.0 // indirect 167 | golang.org/x/crypto v0.22.0 // indirect 168 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect 169 | golang.org/x/mod v0.16.0 // indirect 170 | golang.org/x/net v0.24.0 // indirect 171 | golang.org/x/oauth2 v0.14.0 // indirect 172 | golang.org/x/sync v0.7.0 // indirect 173 | golang.org/x/sys v0.19.0 // indirect 174 | golang.org/x/term v0.19.0 // indirect 175 | golang.org/x/text v0.14.0 // indirect 176 | golang.org/x/time v0.3.0 // indirect 177 | golang.org/x/tools v0.19.0 // indirect 178 | google.golang.org/appengine v1.6.7 // indirect 179 | google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect 180 | google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect 181 | google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect 182 | google.golang.org/protobuf v1.33.0 // indirect 183 | gopkg.in/errgo.v1 v1.0.1 // indirect 184 | gopkg.in/inf.v0 v0.9.1 // indirect 185 | gopkg.in/macaroon-bakery.v2 v2.0.1 // indirect 186 | gopkg.in/macaroon.v2 v2.0.0 // indirect 187 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 188 | gopkg.in/yaml.v2 v2.4.0 // indirect 189 | gopkg.in/yaml.v3 v3.0.1 // indirect 190 | k8s.io/klog v1.0.0 // indirect 191 | k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect 192 | modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect 193 | modernc.org/libc v1.49.3 // indirect 194 | modernc.org/mathutil v1.6.0 // indirect 195 | modernc.org/memory v1.8.0 // indirect 196 | modernc.org/sqlite v1.29.10 // indirect 197 | modernc.org/strutil v1.2.0 // indirect 198 | modernc.org/token v1.1.0 // indirect 199 | pgregory.net/rapid v1.2.0 // indirect 200 | sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect 201 | sigs.k8s.io/yaml v1.2.0 // indirect 202 | ) 203 | 204 | // We want to format raw bytes as hex instead of base64. The forked version 205 | // allows us to specify that as an option. 206 | replace google.golang.org/protobuf => github.com/lightninglabs/protobuf-go-hex-display v1.30.0-hex-display 207 | 208 | go 1.23.6 209 | -------------------------------------------------------------------------------- /k8s.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "fmt" 7 | 8 | api "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/client-go/kubernetes" 12 | "k8s.io/client-go/rest" 13 | ) 14 | 15 | const ( 16 | defaultK8sNamespace = "default" 17 | defaultK8sResourcePolicy = "keep" 18 | ) 19 | 20 | type k8sObjectType string 21 | 22 | const ( 23 | ObjectTypeSecret k8sObjectType = "Secret" 24 | ObjectTypeConfigMap k8sObjectType = "ConfigMap" 25 | ) 26 | 27 | type k8sSecretOptions struct { 28 | Namespace string `long:"namespace" description:"The Kubernetes namespace the secret is located in"` 29 | SecretName string `long:"secret-name" description:"The name of the Kubernetes secret"` 30 | SecretKeyName string `long:"secret-key-name" description:"The name of the key/entry within the secret"` 31 | Base64 bool `long:"base64" description:"Encode as base64 when storing and decode as base64 when reading"` 32 | } 33 | 34 | func (s *k8sSecretOptions) AnySet() bool { 35 | return s.Namespace != defaultK8sNamespace || s.SecretName != "" || 36 | s.SecretKeyName != "" 37 | } 38 | 39 | type k8sConfigmapOptions struct { 40 | Namespace string `long:"namespace" description:"The Kubernetes namespace the configmap is located in"` 41 | Name string `long:"configmap-name" description:"The name of the Kubernetes configmap"` 42 | KeyName string `long:"configmap-key-name" description:"The name of the key/entry within the configmap"` 43 | } 44 | 45 | type k8sObjectOptions struct { 46 | Namespace string 47 | Name string 48 | KeyName string 49 | Base64 bool 50 | ObjectType k8sObjectType 51 | } 52 | 53 | type helmOptions struct { 54 | Annotate bool `long:"annotate" description:"Whether Helm annotations should be added to the created secret"` 55 | ReleaseName string `long:"release-name" description:"The value for the meta.helm.sh/release-name annotation"` 56 | ResourcePolicy string `long:"resource-policy" description:"The value for the helm.sh/resource-policy annotation"` 57 | } 58 | 59 | type jsonK8sObject struct { 60 | metav1.TypeMeta `json:",inline"` 61 | metav1.ObjectMeta `json:"metadata,omitempty"` 62 | } 63 | 64 | func saveK8s(content string, opts *k8sObjectOptions, 65 | overwrite bool, helm *helmOptions) error { 66 | 67 | client, err := getClientK8s() 68 | if err != nil { 69 | return err 70 | } 71 | 72 | switch opts.ObjectType { 73 | case ObjectTypeSecret: 74 | return saveSecretK8s(client, content, opts, overwrite, helm) 75 | case ObjectTypeConfigMap: 76 | return saveConfigMapK8s(client, content, opts, overwrite, helm) 77 | default: 78 | return fmt.Errorf("unsupported object type: %s", opts.ObjectType) 79 | } 80 | } 81 | 82 | func saveSecretK8s(client *kubernetes.Clientset, content string, 83 | opts *k8sObjectOptions, overwrite bool, helm *helmOptions) error { 84 | 85 | secret, exists, err := getSecretK8s(client, opts.Namespace, opts.Name) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | if exists { 91 | return updateSecretValueK8s( 92 | client, secret, opts, overwrite, content, 93 | ) 94 | } 95 | 96 | return createSecretK8s(client, opts, helm, content) 97 | } 98 | 99 | func saveConfigMapK8s(client *kubernetes.Clientset, content string, 100 | opts *k8sObjectOptions, overwrite bool, helm *helmOptions) error { 101 | 102 | configMap, exists, err := getConfigMapK8s( 103 | client, opts.Namespace, opts.Name, 104 | ) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | if exists { 110 | return updateConfigMapValueK8s( 111 | client, configMap, opts, overwrite, content, 112 | ) 113 | } 114 | 115 | return createConfigMapK8s(client, opts, helm, content) 116 | } 117 | 118 | func readK8s(opts *k8sObjectOptions) (string, *jsonK8sObject, error) { 119 | client, err := getClientK8s() 120 | if err != nil { 121 | return "", nil, err 122 | } 123 | 124 | switch opts.ObjectType { 125 | case ObjectTypeSecret: 126 | return readSecretK8s(client, opts) 127 | default: 128 | return "", nil, fmt.Errorf("unsupported object type: %s", 129 | opts.ObjectType) 130 | } 131 | } 132 | 133 | func readSecretK8s(client *kubernetes.Clientset, 134 | opts *k8sObjectOptions) (string, *jsonK8sObject, error) { 135 | 136 | // Existing logic to read a secret 137 | secret, exists, err := getSecretK8s( 138 | client, opts.Namespace, opts.Name, 139 | ) 140 | if err != nil { 141 | return "", nil, err 142 | } 143 | 144 | if !exists { 145 | return "", nil, fmt.Errorf("secret %s does not exist in "+ 146 | "namespace %s", opts.Name, opts.Namespace) 147 | } 148 | 149 | if len(secret.Data) == 0 { 150 | return "", nil, fmt.Errorf("secret %s exists but contains no "+ 151 | "data", opts.Name) 152 | } 153 | 154 | if len(secret.Data[opts.KeyName]) == 0 { 155 | return "", nil, fmt.Errorf("secret %s exists but does not "+ 156 | "contain the key %s", opts.Name, 157 | opts.KeyName) 158 | } 159 | 160 | // There is an additional layer of base64 encoding applied to each of 161 | // the secrets. Try to de-code it now. 162 | content, err := secretToString( 163 | secret.Data[opts.KeyName], opts.Base64, 164 | ) 165 | if err != nil { 166 | return "", nil, fmt.Errorf("failed to decode raw secret %s "+ 167 | "key %s: %v", opts.Name, opts.KeyName, err) 168 | } 169 | 170 | return content, &jsonK8sObject{ 171 | TypeMeta: secret.TypeMeta, 172 | ObjectMeta: secret.ObjectMeta, 173 | }, nil 174 | } 175 | 176 | func getClientK8s() (*kubernetes.Clientset, error) { 177 | logger.Info("Creating k8s cluster config") 178 | config, err := rest.InClusterConfig() 179 | if err != nil { 180 | return nil, fmt.Errorf("unable to grab cluster config: %v", err) 181 | } 182 | 183 | logger.Info("Creating k8s cluster client") 184 | client, err := kubernetes.NewForConfig(config) 185 | if err != nil { 186 | return nil, fmt.Errorf("error creating cluster config: %v", err) 187 | } 188 | 189 | logger.Info("Cluster client created successfully") 190 | return client, nil 191 | } 192 | 193 | func getSecretK8s(client *kubernetes.Clientset, namespace, 194 | name string) (*api.Secret, bool, error) { 195 | 196 | logger.Infof("Attempting to load secret %s from namespace %s", name, namespace) 197 | secret, err := client.CoreV1().Secrets(namespace).Get( 198 | context.Background(), name, metav1.GetOptions{}, 199 | ) 200 | 201 | switch { 202 | case err == nil: 203 | logger.Infof("Secret %s loaded successfully", name) 204 | return secret, true, nil 205 | 206 | case errors.IsNotFound(err): 207 | logger.Infof("Secret %s not found in namespace %s", name, namespace) 208 | return nil, false, nil 209 | 210 | default: 211 | return nil, false, fmt.Errorf("error querying secret "+ 212 | "existence: %v", err) 213 | } 214 | } 215 | 216 | func updateSecretValueK8s(client *kubernetes.Clientset, secret *api.Secret, 217 | opts *k8sObjectOptions, overwrite bool, content string) error { 218 | 219 | if len(secret.Data) == 0 { 220 | logger.Infof("Data of secret %s is empty, initializing", opts.Name) 221 | secret.Data = make(map[string][]byte) 222 | } 223 | 224 | if len(secret.Data[opts.KeyName]) > 0 && !overwrite { 225 | return fmt.Errorf("key %s in secret %s already exists: %v", 226 | opts.KeyName, opts.Name, 227 | errTargetExists) 228 | } 229 | 230 | // Do we need to add an extra layer of base64? 231 | if opts.Base64 { 232 | content = base64.StdEncoding.EncodeToString([]byte(content)) 233 | } 234 | secret.Data[opts.KeyName] = []byte(content) 235 | 236 | logger.Infof("Attempting to update key %s of secret %s in namespace %s", 237 | opts.KeyName, opts.Name, opts.Namespace) 238 | updatedSecret, err := client.CoreV1().Secrets(opts.Namespace).Update( 239 | context.Background(), secret, metav1.UpdateOptions{}, 240 | ) 241 | if err != nil { 242 | return fmt.Errorf("error updating secret %s in namespace %s: "+ 243 | "%v", opts.Name, opts.Namespace, err) 244 | } 245 | 246 | jsonSecret, _ := asJSON(jsonK8sObject{ 247 | TypeMeta: updatedSecret.TypeMeta, 248 | ObjectMeta: updatedSecret.ObjectMeta, 249 | }) 250 | logger.Infof("Updated secret: %s", jsonSecret) 251 | 252 | return nil 253 | } 254 | 255 | func createSecretK8s(client *kubernetes.Clientset, opts *k8sObjectOptions, 256 | helm *helmOptions, content string) error { 257 | 258 | meta := metav1.ObjectMeta{ 259 | Name: opts.Name, 260 | } 261 | 262 | if helm != nil && helm.Annotate { 263 | meta.Labels = map[string]string{ 264 | "app.kubernetes.io/managed-by": "Helm", 265 | } 266 | meta.Annotations = map[string]string{ 267 | "helm.sh/resource-policy": helm.ResourcePolicy, 268 | "meta.helm.sh/release-name": helm.ReleaseName, 269 | "meta.helm.sh/release-namespace": opts.Namespace, 270 | } 271 | } 272 | 273 | // Do we need to add an extra layer of base64? 274 | if opts.Base64 { 275 | content = base64.StdEncoding.EncodeToString([]byte(content)) 276 | } 277 | 278 | newSecret := &api.Secret{ 279 | Type: api.SecretTypeOpaque, 280 | ObjectMeta: meta, 281 | Data: map[string][]byte{ 282 | opts.KeyName: []byte(content), 283 | }, 284 | } 285 | 286 | updatedSecret, err := client.CoreV1().Secrets(opts.Namespace).Create( 287 | context.Background(), newSecret, metav1.CreateOptions{}, 288 | ) 289 | if err != nil { 290 | return fmt.Errorf("error creating secret %s in namespace %s: "+ 291 | "%v", opts.Name, opts.Namespace, err) 292 | } 293 | 294 | jsonSecret, _ := asJSON(jsonK8sObject{ 295 | TypeMeta: updatedSecret.TypeMeta, 296 | ObjectMeta: updatedSecret.ObjectMeta, 297 | }) 298 | logger.Infof("Created secret: %s", jsonSecret) 299 | 300 | return nil 301 | } 302 | 303 | // secretToString turns the raw bytes of a secret into a string, removing the 304 | // additional layer of base64 encoding if there is expected to be one. 305 | func secretToString(rawSecret []byte, doubleBase64 bool) (string, error) { 306 | content := string(rawSecret) 307 | if doubleBase64 { 308 | decoded, err := base64.StdEncoding.DecodeString(content) 309 | if err != nil { 310 | return "", fmt.Errorf("failed to base64 decode: %v", 311 | err) 312 | } 313 | 314 | content = string(decoded) 315 | } 316 | 317 | return content, nil 318 | } 319 | 320 | func getConfigMapK8s(client *kubernetes.Clientset, 321 | namespace, name string) (*api.ConfigMap, bool, error) { 322 | 323 | logger.Infof("Attempting to load configmap %s from namespace %s", name, namespace) 324 | configMap, err := client.CoreV1().ConfigMaps(namespace).Get( 325 | context.Background(), name, metav1.GetOptions{}, 326 | ) 327 | 328 | switch { 329 | case err == nil: 330 | logger.Infof("ConfigMap %s loaded successfully", name) 331 | return configMap, true, nil 332 | 333 | case errors.IsNotFound(err): 334 | logger.Infof("ConfigMap %s not found in namespace %s", name, namespace) 335 | return nil, false, nil 336 | 337 | default: 338 | return nil, false, fmt.Errorf("error querying configmap "+ 339 | "existence: %v", err) 340 | } 341 | } 342 | 343 | func updateConfigMapValueK8s(client *kubernetes.Clientset, 344 | configMap *api.ConfigMap, opts *k8sObjectOptions, 345 | overwrite bool, content string) error { 346 | 347 | if configMap.Data == nil { 348 | logger.Infof("Data of configmap %s is empty, initializing", opts.Name) 349 | configMap.Data = make(map[string]string) 350 | } 351 | 352 | if _, exists := configMap.Data[opts.KeyName]; exists && !overwrite { 353 | return fmt.Errorf("key %s in configmap %s already exists", 354 | opts.KeyName, opts.Name) 355 | } 356 | 357 | logger.Infof("Attempting to update key %s of configmap %s in namespace %s", 358 | opts.KeyName, opts.Name, opts.Namespace) 359 | 360 | configMap.Data[opts.KeyName] = content 361 | updatedConfigMap, err := client.CoreV1().ConfigMaps(opts.Namespace).Update( 362 | context.Background(), configMap, metav1.UpdateOptions{}, 363 | ) 364 | if err != nil { 365 | return fmt.Errorf("error updating configmap %s in namespace %s: %v", 366 | opts.Name, opts.Namespace, err) 367 | } 368 | 369 | jsonConfigMap, _ := asJSON(jsonK8sObject{ 370 | TypeMeta: updatedConfigMap.TypeMeta, 371 | ObjectMeta: updatedConfigMap.ObjectMeta, 372 | }) 373 | logger.Infof("Updated configmap: %s", jsonConfigMap) 374 | 375 | return nil 376 | } 377 | 378 | func createConfigMapK8s(client *kubernetes.Clientset, 379 | opts *k8sObjectOptions, helm *helmOptions, content string) error { 380 | 381 | meta := metav1.ObjectMeta{ 382 | Name: opts.Name, 383 | } 384 | 385 | if helm != nil && helm.Annotate { 386 | meta.Labels = map[string]string{ 387 | "app.kubernetes.io/managed-by": "Helm", 388 | } 389 | meta.Annotations = map[string]string{ 390 | "helm.sh/resource-policy": helm.ResourcePolicy, 391 | "meta.helm.sh/release-name": helm.ReleaseName, 392 | "meta.helm.sh/release-namespace": opts.Namespace, 393 | } 394 | } 395 | 396 | newConfigMap := &api.ConfigMap{ 397 | ObjectMeta: meta, 398 | Data: map[string]string{ 399 | opts.KeyName: content, 400 | }, 401 | } 402 | 403 | updatedConfigMap, err := client.CoreV1().ConfigMaps(opts.Namespace).Create( 404 | context.Background(), newConfigMap, metav1.CreateOptions{}, 405 | ) 406 | if err != nil { 407 | return fmt.Errorf("error creating configmap %s in namespace %s: %v", 408 | opts.Name, opts.Namespace, err) 409 | } 410 | 411 | jsonConfigMap, _ := asJSON(jsonK8sObject{ 412 | TypeMeta: updatedConfigMap.TypeMeta, 413 | ObjectMeta: updatedConfigMap.ObjectMeta, 414 | }) 415 | logger.Infof("Created configmap: %s", jsonConfigMap) 416 | 417 | return nil 418 | } 419 | -------------------------------------------------------------------------------- /k8s_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "encoding/base64" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | var ( 11 | dummyString = []byte("This is a simple string") 12 | dummyStringB64 = base64.StdEncoding.EncodeToString(dummyString) 13 | dummyStringNewline = []byte("This is a simple string newline\n") 14 | dummyStringNewlineB64 = base64.StdEncoding.EncodeToString( 15 | dummyStringNewline, 16 | ) 17 | ) 18 | 19 | // TestSecretToString makes sure that a raw secret can be turned into a string 20 | // correctly. 21 | func TestSecretToString(t *testing.T) { 22 | testCases := []struct { 23 | name string 24 | input []byte 25 | base64 bool 26 | expectErr bool 27 | result string 28 | }{{ 29 | name: "plain string", 30 | input: dummyString, 31 | result: string(dummyString), 32 | }, { 33 | name: "plain base64", 34 | input: []byte(dummyStringB64), 35 | base64: true, 36 | result: string(dummyString), 37 | }, { 38 | name: "invalid base64", 39 | input: dummyString, 40 | base64: true, 41 | expectErr: true, 42 | }, { 43 | name: "plain base64 with newline in encoded", 44 | input: []byte(dummyStringB64 + "\r\n"), 45 | base64: true, 46 | result: string(dummyString), 47 | }, { 48 | name: "string with newline", 49 | input: dummyStringNewline, 50 | result: string(dummyStringNewline), 51 | }, { 52 | name: "base64 with newline in original", 53 | input: []byte(dummyStringNewlineB64), 54 | base64: true, 55 | result: string(dummyStringNewline), 56 | }, { 57 | name: "base64 with newline in encoded", 58 | input: []byte(dummyStringNewlineB64 + "\r\n"), 59 | base64: true, 60 | result: string(dummyStringNewline), 61 | }} 62 | 63 | for _, tc := range testCases { 64 | tc := tc 65 | 66 | t.Run(tc.name, func(tt *testing.T) { 67 | result, err := secretToString(tc.input, tc.base64) 68 | 69 | if tc.expectErr { 70 | require.Error(t, err) 71 | return 72 | } 73 | 74 | require.NoError(t, err) 75 | require.Equal(t, tc.result, result) 76 | }) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /log.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/btcsuite/btclog/v2" 7 | ) 8 | 9 | var ( 10 | // backend is the logging backend used to create all loggers. 11 | backend = btclog.NewDefaultHandler(os.Stderr) 12 | 13 | // logger is logger for the main package of the lndinit tool. 14 | logger = btclog.NewSLogger(backend).WithPrefix("LNDINIT") 15 | ) 16 | 17 | // NewSubLogger creates a new sub logger with the given prefix. 18 | func NewSubLogger(prefix string) btclog.Logger { 19 | return logger.SubSystem(prefix) 20 | } 21 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/btcsuite/btclog/v2" 10 | "github.com/jessevdk/go-flags" 11 | "github.com/lightningnetwork/lnd/lncfg" 12 | "github.com/lightningnetwork/lnd/lnrpc" 13 | ) 14 | 15 | const ( 16 | ExitCodeSuccess int = 0 17 | ExitCodeTargetExists int = 128 18 | ExitCodeInputMissing int = 129 19 | ExitCodeFailure int = 255 20 | 21 | outputFormatRaw = "raw" 22 | outputFormatJSON = "json" 23 | 24 | storageFile = "file" 25 | storageK8s = "k8s" 26 | 27 | errTargetExists = "target exists error" 28 | errInputMissing = "input missing error" 29 | 30 | defaultRPCPort = "10009" 31 | defaultRPCServer = "localhost:" + defaultRPCPort 32 | ) 33 | 34 | type globalOptions struct { 35 | ErrorOnExisting bool `long:"error-on-existing" short:"e" description:"Exit with code EXIT_CODE_TARGET_EXISTS (128) instead of 0 if the result of an action is already present"` 36 | DebugLevel string `long:"debuglevel" short:"d" description:"Set the log level (Off, Critical, Error, Warn, Info, Debug, Trace)"` 37 | Verbose bool `long:"verbose" short:"v" description:"Turn on logging to stderr"` 38 | } 39 | 40 | func main() { 41 | globalOpts := &globalOptions{} 42 | 43 | // Set up a very minimal logging to stderr if verbose mode is on. To get 44 | // just the global options, we do a pre-parsing without any commands 45 | // registered yet. We ignore any errors as that'll be handled later. 46 | _, _ = flags.NewParser(globalOpts, flags.IgnoreUnknown).Parse() 47 | if globalOpts.Verbose { 48 | globalOpts.DebugLevel = "error" 49 | } 50 | 51 | logger.Infof("Version %s commit=%s, debuglevel=%s", Version(), Commit, 52 | globalOpts.DebugLevel) 53 | 54 | if globalOpts.DebugLevel != "" { 55 | level, ok := btclog.LevelFromString(globalOpts.DebugLevel) 56 | if !ok { 57 | logger.Errorf("Invalid debug level %s, "+ 58 | "using info", globalOpts.DebugLevel) 59 | level = btclog.LevelInfo 60 | } 61 | 62 | logger.SetLevel(level) 63 | } 64 | 65 | parser := flags.NewParser( 66 | globalOpts, flags.HelpFlag|flags.PassDoubleDash, 67 | ) 68 | if err := registerCommands(parser); err != nil { 69 | logger.Errorf("Command parser error: %v", err) 70 | os.Exit(ExitCodeFailure) 71 | } 72 | 73 | if _, err := parser.Parse(); err != nil { 74 | flagErr, isFlagErr := err.(*flags.Error) 75 | switch { 76 | case isFlagErr: 77 | if flagErr.Type != flags.ErrHelp { 78 | // Print error if not due to help request. 79 | logger.Errorf("Config error: %v", err) 80 | os.Exit(ExitCodeFailure) 81 | } else { 82 | // Help was requested, print without any log 83 | // prefix and exit normally. 84 | _, _ = fmt.Fprintln(os.Stderr, flagErr.Message) 85 | os.Exit(ExitCodeSuccess) 86 | } 87 | 88 | // Ugh, can't use errors.Is() here because the flag parser does 89 | // not wrap the returned errors properly. 90 | case strings.Contains(err.Error(), errTargetExists): 91 | // Respect the user's choice of verbose/non-verbose 92 | // logging here. The default is quietly aborting if the 93 | // target already exists. 94 | if globalOpts.ErrorOnExisting { 95 | logger.Errorf("Failing on state error: %v", err) 96 | os.Exit(ExitCodeTargetExists) 97 | } 98 | 99 | logger.Errorf("Ignoring non-fatal error: %v", err) 100 | os.Exit(ExitCodeSuccess) 101 | 102 | // Ugh, can't use errors.Is() here because the flag parser does 103 | // not wrap the returned errors properly. 104 | case strings.Contains(err.Error(), errInputMissing): 105 | logger.Errorf("Input error: %v", err) 106 | os.Exit(ExitCodeInputMissing) 107 | 108 | default: 109 | logger.Errorf("Runtime error: %v", err) 110 | os.Exit(ExitCodeFailure) 111 | } 112 | } 113 | 114 | os.Exit(ExitCodeSuccess) 115 | } 116 | 117 | type subCommand interface { 118 | Register(parser *flags.Parser) error 119 | } 120 | 121 | func registerCommands(parser *flags.Parser) error { 122 | commands := []subCommand{ 123 | newGenPasswordCommand(), 124 | newGenSeedCommand(), 125 | newInitWalletCommand(), 126 | newLoadSecretCommand(), 127 | newStoreSecretCommand(), 128 | newStoreConfigmapCommand(), 129 | newWaitReadyCommand(), 130 | newMigrateDBCommand(), 131 | } 132 | 133 | for _, command := range commands { 134 | if err := command.Register(parser); err != nil { 135 | return err 136 | } 137 | } 138 | 139 | return nil 140 | } 141 | 142 | func asJSON(resp interface{}) (string, error) { 143 | b, err := json.Marshal(resp) 144 | if err != nil { 145 | return "", err 146 | } 147 | 148 | return string(b), nil 149 | } 150 | 151 | func readFile(fileName string) (string, error) { 152 | fileName = lncfg.CleanAndExpandPath(fileName) 153 | if !lnrpc.FileExists(fileName) { 154 | return "", fmt.Errorf("input file %s missing: %v", fileName, 155 | errInputMissing) 156 | } 157 | 158 | byteContent, err := os.ReadFile(fileName) 159 | if err != nil { 160 | return "", fmt.Errorf("error reading file %s: %v", fileName, 161 | err) 162 | } 163 | 164 | return string(byteContent), nil 165 | } 166 | 167 | func stripNewline(str string) string { 168 | return strings.TrimRight(strings.TrimRight(str, "\r\n"), "\n") 169 | } 170 | -------------------------------------------------------------------------------- /migratekvdb/bucket_path.go: -------------------------------------------------------------------------------- 1 | package migratekvdb 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | ) 9 | 10 | // BucketPath represents a path in the database with both string and raw 11 | // representations. 12 | type BucketPath struct { 13 | // StringPath is the hex-encoded path with / delimiters for logging. 14 | StringPath string 15 | 16 | // RawPath contains the original bucket names as raw bytes. 17 | RawPath [][]byte 18 | } 19 | 20 | // NewBucketPath creates a bucket path from raw bucket names. 21 | func NewBucketPath(buckets [][]byte) BucketPath { 22 | // Create hex encoded version for string representation. 23 | stringParts := make([]string, len(buckets)) 24 | for i, bucket := range buckets { 25 | stringParts[i] = loggableKeyName(bucket) 26 | } 27 | 28 | return BucketPath{ 29 | StringPath: strings.Join(stringParts, "/"), 30 | RawPath: buckets, 31 | } 32 | } 33 | 34 | // HasPath returns true if the BucketPath contains any path elements. 35 | func (bp BucketPath) HasPath() bool { 36 | return len(bp.RawPath) > 0 37 | } 38 | 39 | // AppendBucket creates a new BucketPath with an additional bucket. 40 | func (bp BucketPath) AppendBucket(bucket []byte) BucketPath { 41 | newRawPath := make([][]byte, len(bp.RawPath)+1) 42 | copy(newRawPath, bp.RawPath) 43 | newRawPath[len(bp.RawPath)] = bucket 44 | 45 | // Create new string path. 46 | newStringPath := bp.StringPath 47 | if newStringPath != "" { 48 | newStringPath += "/" 49 | } 50 | newStringPath += loggableKeyName(bucket) 51 | 52 | return BucketPath{ 53 | StringPath: newStringPath, 54 | RawPath: newRawPath, 55 | } 56 | } 57 | 58 | // Equal compares two bucket paths for equality. 59 | func (bp BucketPath) Equal(other BucketPath) bool { 60 | if len(bp.RawPath) != len(other.RawPath) { 61 | return false 62 | } 63 | 64 | for i := range bp.RawPath { 65 | if !bytes.Equal(bp.RawPath[i], other.RawPath[i]) { 66 | return false 67 | } 68 | } 69 | 70 | return true 71 | } 72 | 73 | // String implements the Stringer interface. 74 | func (bp BucketPath) String() string { 75 | return bp.StringPath 76 | } 77 | 78 | // MarshalJSON implements the json.Marshaler interface by marshaling 79 | // the raw byte arrays directly. 80 | func (bp BucketPath) MarshalJSON() ([]byte, error) { 81 | // Marshal the raw paths directly. 82 | return json.Marshal(bp.RawPath) 83 | } 84 | 85 | // UnmarshalJSON implements the json.Unmarshaler interface by unmarshaling 86 | // directly into the raw byte arrays. 87 | func (bp *BucketPath) UnmarshalJSON(data []byte) error { 88 | // Unmarshal into raw paths. 89 | var rawPath [][]byte 90 | if err := json.Unmarshal(data, &rawPath); err != nil { 91 | return fmt.Errorf("failed to unmarshal bucket path: %w", err) 92 | } 93 | 94 | // Create the string path from the raw paths. 95 | stringParts := make([]string, len(rawPath)) 96 | for i, part := range rawPath { 97 | stringParts[i] = loggableKeyName(part) 98 | } 99 | 100 | // Set both representations. 101 | bp.RawPath = rawPath 102 | bp.StringPath = strings.Join(stringParts, "/") 103 | return nil 104 | } 105 | -------------------------------------------------------------------------------- /migratekvdb/errors.go: -------------------------------------------------------------------------------- 1 | package migratekvdb 2 | 3 | import "errors" 4 | 5 | var ( 6 | // errNoMetaBucket is returned when the migration metadata bucket is 7 | // not found. 8 | errNoMetaBucket = errors.New("migration metadata bucket not " + 9 | "found") 10 | 11 | // errNoStateFound is returned when the migration state is not found. 12 | errNoStateFound = errors.New("no migration state found") 13 | 14 | // errChunkSizeExceeded is returned when the chunk size limit is reached 15 | // during migration, indicating that the migration should continue 16 | // with a new transaction. It should close the reading and write 17 | // transaction and continue where it stopped. 18 | errChunkSizeExceeded = errors.New("chunk size exceeded") 19 | 20 | // errMigrationComplete is returned when the migration is already 21 | // completed. 22 | errMigrationComplete = errors.New("migration already completed") 23 | 24 | // errVerificationComplete is returned when the verification is already 25 | // completed. 26 | errVerificationComplete = errors.New("verification already completed") 27 | ) 28 | -------------------------------------------------------------------------------- /migratekvdb/helper.go: -------------------------------------------------------------------------------- 1 | package migratekvdb 2 | 3 | import ( 4 | "encoding/hex" 5 | ) 6 | 7 | // loggableKeyName returns a string representation of a bucket key suitable for 8 | // logging. 9 | func loggableKeyName(key []byte) string { 10 | // For known bucket names, return as string if printable ASCII. 11 | if isPrintableASCII(key) { 12 | return string(key) 13 | } 14 | 15 | // Otherwise return hex encoding. 16 | return "0x" + hex.EncodeToString(key) 17 | } 18 | 19 | // hasSpecialChars returns true if any of the characters in the given string 20 | // cannot be printed. 21 | func isPrintableASCII(b []byte) bool { 22 | for _, c := range b { 23 | if c < 32 || c > 126 { 24 | return false 25 | } 26 | } 27 | return true 28 | } 29 | -------------------------------------------------------------------------------- /migratekvdb/migration_test.go: -------------------------------------------------------------------------------- 1 | package migratekvdb 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strconv" 9 | "testing" 10 | "time" 11 | 12 | "github.com/btcsuite/btclog/v2" 13 | "github.com/lightningnetwork/lnd/kvdb" 14 | "github.com/stretchr/testify/require" 15 | "go.etcd.io/bbolt" 16 | ) 17 | 18 | // TestMigration tests the migration of a test database including the 19 | // verification of the migration. 20 | func TestMigration(t *testing.T) { 21 | // Create temporary directory for test databases. 22 | tempDir, err := os.MkdirTemp("", "boltdb_migration_test") 23 | require.NoError(t, err) 24 | defer os.RemoveAll(tempDir) 25 | 26 | // Create source and target database paths. 27 | sourceDBPath := filepath.Join(tempDir, "source.db") 28 | targetDBPath := filepath.Join(tempDir, "target.db") 29 | 30 | // Create and populate source database. 31 | sourceDB, err := createTestDatabase(sourceDBPath) 32 | require.NoError(t, err) 33 | defer sourceDB.Close() 34 | 35 | // Cleanup the test database files. 36 | defer os.Remove(sourceDBPath) 37 | defer os.Remove(targetDBPath) 38 | 39 | const ( 40 | noFreelistSync = true 41 | timeout = time.Minute 42 | readonly = false 43 | ) 44 | 45 | args := []interface{}{ 46 | targetDBPath, noFreelistSync, timeout, readonly, 47 | } 48 | backend := kvdb.BoltBackendName 49 | 50 | // Create empty target database. 51 | targetDB, err := kvdb.Create(backend, args...) 52 | require.NoError(t, err) 53 | defer targetDB.Close() 54 | 55 | consoleLogHandler := btclog.NewDefaultHandler( 56 | os.Stdout, 57 | ) 58 | consoleLogger := btclog.NewSLogger(consoleLogHandler) 59 | consoleLogger.SetLevel(btclog.LevelDebug) 60 | 61 | dbPath := filepath.Join(tempDir, "migration-meta.db") 62 | metaDb, err := bbolt.Open(dbPath, 0600, nil) 63 | require.NoError(t, err) 64 | defer metaDb.Close() 65 | 66 | // Configure and run migration. 67 | cfg := Config{ 68 | // Chunksize in bytes. 69 | ChunkSize: 2, 70 | Logger: consoleLogger, 71 | MetaDB: metaDb, 72 | } 73 | 74 | migrator, err := New(cfg) 75 | require.NoError(t, err) 76 | 77 | err = migrator.Migrate(context.Background(), sourceDB, targetDB) 78 | require.NoError(t, err) 79 | 80 | err = migrator.VerifyMigration(context.Background(), sourceDB, targetDB, false) 81 | require.NoError(t, err) 82 | 83 | // Verify migration by comparing values in the source and target 84 | // databases as a sanity check that the previous hash verification has 85 | // no errors. 86 | err = verifyDatabases(t, sourceDB, targetDB) 87 | require.NoError(t, err) 88 | } 89 | 90 | // createTestDatabase creates a test database with some test data. 91 | func createTestDatabase(dbPath string) (kvdb.Backend, error) { 92 | 93 | fmt.Println("creating test database") 94 | 95 | args := []interface{}{ 96 | dbPath, true, time.Minute, false, 97 | } 98 | backend := kvdb.BoltBackendName 99 | 100 | db, err := kvdb.Create(backend, args...) 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | // Create test data structure. 106 | err = db.Update(func(tx kvdb.RwTx) error { 107 | fmt.Println("Creating test data structure...") 108 | // Create root bucket "accounts". 109 | accounts, err := tx.CreateTopLevelBucket([]byte("accounts")) 110 | if err != nil { 111 | fmt.Print("bucket creation failed.") 112 | } 113 | 114 | // Create nested buckets and add some key-value pairs. 115 | for i := 1; i <= 3; i++ { 116 | userBucket, err := accounts.CreateBucketIfNotExists( 117 | []byte("user" + strconv.Itoa(i)), 118 | ) 119 | if err != nil { 120 | return err 121 | } 122 | 123 | err = userBucket.Put([]byte("name"), []byte("Alice")) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | err = userBucket.Put( 129 | []byte("email"), 130 | []byte("alice@example.com"), 131 | ) 132 | if err != nil { 133 | return err 134 | } 135 | 136 | // Create a nested bucket for transactions. 137 | txBucket, err := userBucket.CreateBucketIfNotExists( 138 | []byte("transactions"), 139 | ) 140 | if err != nil { 141 | return err 142 | } 143 | 144 | err = txBucket.Put([]byte("tx1"), []byte("100 BTC")) 145 | if err != nil { 146 | return err 147 | } 148 | } 149 | 150 | return nil 151 | }, func() {}) 152 | 153 | return db, err 154 | } 155 | 156 | // verifyDatabases verifies the migration by comparing the values in the 157 | // source and target databases. This checks every value to make sure we do not 158 | // have an error in our resume logic. So it walks the entire database without 159 | // any chunking, so we have a redundant check. 160 | func verifyDatabases(t *testing.T, sourceDB, targetDB kvdb.Backend) error { 161 | return sourceDB.View(func(sourceTx kvdb.RTx) error { 162 | return targetDB.View(func(targetTx kvdb.RTx) error { 163 | // Helper function to compare buckets recursively. 164 | var compareBuckets func(source, target kvdb.RBucket) error 165 | compareBuckets = func(source, target kvdb.RBucket) error { 166 | // Compare all key-value pairs. 167 | return source.ForEach(func(k, v []byte) error { 168 | if v == nil { 169 | // This is a nested bucket. 170 | sourceBucket := source.NestedReadBucket(k) 171 | targetBucket := target.NestedReadBucket(k) 172 | require.NotNil(t, targetBucket) 173 | return compareBuckets(sourceBucket, targetBucket) 174 | } 175 | 176 | // This is a key-value pair. 177 | targetValue := target.Get(k) 178 | require.Equal(t, v, targetValue) 179 | return nil 180 | }) 181 | } 182 | 183 | // Compare root buckets. 184 | return sourceTx.ForEachBucket(func(name []byte) error { 185 | sourceBucket := sourceTx.ReadBucket(name) 186 | targetBucket := targetTx.ReadBucket(name) 187 | require.NotNil(t, targetBucket) 188 | return compareBuckets(sourceBucket, targetBucket) 189 | }) 190 | }, func() {}) 191 | }, func() {}) 192 | } 193 | -------------------------------------------------------------------------------- /migratekvdb/state.go: -------------------------------------------------------------------------------- 1 | package migratekvdb 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | 8 | "go.etcd.io/bbolt" 9 | ) 10 | 11 | // persistedState tracks the migration or verification progress for 12 | // resumability of the process. 13 | type persistedState struct { 14 | // Currently processing bucket 15 | CurrentBucketPath BucketPath `json:"current_bucket_path"` 16 | 17 | // Last key processed in current bucket 18 | LastUnprocessedKey []byte `json:"last_unprocessed_key"` 19 | 20 | // Total processed keys 21 | ProcessedKeys int64 `json:"processed_keys"` 22 | 23 | // Number of buckets processed 24 | ProcessedBuckets int64 `json:"processed_buckets"` 25 | 26 | // Timestamp of the migration only set when the migration is started 27 | // from the beginning. In case of a resume the start time will still 28 | // be the initial time when the migration was started. 29 | StartTime time.Time `json:"start_time"` 30 | 31 | // Finished is set to true in case the verification is finished. 32 | Finished bool `json:"finished"` 33 | 34 | // FinishedTime is the time when the verification is finished. 35 | FinishedTime time.Time `json:"finished_time"` 36 | } 37 | 38 | // String returns a string representation of the persisted state. 39 | func (s *persistedState) String() string { 40 | return fmt.Sprintf("Path: %s, LastKey: %x, ProcessedKeys: %d, "+ 41 | "ProcessedBuckets: %d, Time: %s", 42 | s.CurrentBucketPath, 43 | s.LastUnprocessedKey, 44 | s.ProcessedKeys, 45 | s.ProcessedBuckets, 46 | s.StartTime.Format(time.RFC3339), 47 | ) 48 | } 49 | 50 | // newPersistedState creates a new persisted state with the required chunk size. 51 | // The chunk size needs to be persisted because the verification depends on the 52 | // same chunk size. 53 | func newPersistedState() persistedState { 54 | return persistedState{ 55 | CurrentBucketPath: NewBucketPath([][]byte{}), 56 | LastUnprocessedKey: nil, 57 | StartTime: time.Now(), 58 | } 59 | } 60 | 61 | // MigrationState holds migration-specific state for resumability of the 62 | // process. 63 | type MigrationState struct { 64 | persistedState 65 | currentChunkBytes uint64 66 | resuming bool 67 | 68 | db *bbolt.DB 69 | } 70 | 71 | // newMigrationState creates a new migration state. 72 | func newMigrationState(db *bbolt.DB) *MigrationState { 73 | return &MigrationState{ 74 | db: db, 75 | } 76 | } 77 | 78 | // read reads the migration state from the database. 79 | func (m *MigrationState) read() error { 80 | return m.db.View(func(tx *bbolt.Tx) error { 81 | metaBucket := tx.Bucket([]byte(migrationMetaBucket)) 82 | if metaBucket == nil { 83 | return errNoMetaBucket 84 | } 85 | 86 | stateBytes := metaBucket.Get([]byte(migrationStateKey)) 87 | if stateBytes == nil { 88 | return errNoStateFound 89 | } 90 | 91 | var state persistedState 92 | if err := json.Unmarshal(stateBytes, &state); err != nil { 93 | return fmt.Errorf("failed to unmarshal state: %w", err) 94 | } 95 | 96 | m.persistedState = state 97 | 98 | return nil 99 | }) 100 | } 101 | 102 | func (m *MigrationState) write() error { 103 | return m.db.Update(func(tx *bbolt.Tx) error { 104 | metaBucket, err := tx.CreateBucketIfNotExists( 105 | []byte(migrationMetaBucket), 106 | ) 107 | if err != nil { 108 | return fmt.Errorf("failed to create meta "+ 109 | "bucket: %w", err) 110 | } 111 | 112 | encoded, err := json.Marshal(m.persistedState) 113 | if err != nil { 114 | return err 115 | } 116 | 117 | return metaBucket.Put([]byte(migrationStateKey), encoded) 118 | }) 119 | } 120 | 121 | // setFinalState sets the final state of the migration. 122 | func (m *MigrationState) setFinalState() { 123 | m.persistedState.CurrentBucketPath = NewBucketPath([][]byte{ 124 | []byte("complete"), 125 | }) 126 | m.persistedState.LastUnprocessedKey = []byte("complete") 127 | m.currentChunkBytes = 0 128 | m.persistedState.Finished = true 129 | m.persistedState.FinishedTime = time.Now() 130 | } 131 | 132 | // newChunk creates a new chunk for the migration by resetting the 133 | // non-persistent state. 134 | func (m *MigrationState) newChunk() { 135 | m.currentChunkBytes = 0 136 | m.resuming = true 137 | } 138 | 139 | // VerificationState holds verification-specific state for resumability of the 140 | // process. 141 | type VerificationState struct { 142 | persistedState persistedState 143 | currentChunkBytes uint64 144 | resuming bool 145 | 146 | db *bbolt.DB 147 | } 148 | 149 | // newVerificationState creates a new verification state. 150 | func newVerificationState(db *bbolt.DB) *VerificationState { 151 | return &VerificationState{ 152 | db: db, 153 | } 154 | } 155 | 156 | // read reads the verification state from the database. 157 | func (v *VerificationState) read() error { 158 | return v.db.View(func(tx *bbolt.Tx) error { 159 | metaBucket := tx.Bucket([]byte(verificationMetaBucket)) 160 | if metaBucket == nil { 161 | return errNoMetaBucket 162 | } 163 | 164 | stateBytes := metaBucket.Get([]byte(verificationStateKey)) 165 | if stateBytes == nil { 166 | return errNoStateFound 167 | } 168 | 169 | var state persistedState 170 | if err := json.Unmarshal(stateBytes, &state); err != nil { 171 | return fmt.Errorf("failed to unmarshal state: %w", err) 172 | } 173 | 174 | v.persistedState = state 175 | return nil 176 | }) 177 | } 178 | 179 | // write writes the verification state to the database. 180 | func (v *VerificationState) write() error { 181 | return v.db.Update(func(tx *bbolt.Tx) error { 182 | metaBucket, err := tx.CreateBucketIfNotExists( 183 | []byte(verificationMetaBucket), 184 | ) 185 | if err != nil { 186 | return fmt.Errorf("failed to get meta bucket: %w", err) 187 | } 188 | 189 | encoded, err := json.Marshal(v.persistedState) 190 | if err != nil { 191 | return err 192 | } 193 | 194 | return metaBucket.Put([]byte(verificationStateKey), encoded) 195 | }) 196 | } 197 | 198 | // setFinalState sets the final state of the verification. 199 | func (v *VerificationState) setFinalState() { 200 | v.persistedState.CurrentBucketPath = NewBucketPath([][]byte{ 201 | []byte("complete"), 202 | }) 203 | v.persistedState.LastUnprocessedKey = []byte("complete") 204 | v.currentChunkBytes = 0 205 | v.persistedState.Finished = true 206 | v.persistedState.FinishedTime = time.Now() 207 | } 208 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Simple bash script to build basic lndinit for all the platforms we support 4 | # with the golang cross-compiler. 5 | # 6 | # Copyright (c) 2016 Company 0, LLC. 7 | # Use of this source code is governed by the ISC 8 | # license. 9 | 10 | set -e 11 | 12 | PKG="github.com/lightninglabs/lndinit" 13 | PACKAGE=lndinit 14 | 15 | # green prints one line of green text (if the terminal supports it). 16 | function green() { 17 | echo -e "\e[0;32m${1}\e[0m" 18 | } 19 | 20 | # red prints one line of red text (if the terminal supports it). 21 | function red() { 22 | echo -e "\e[0;31m${1}\e[0m" 23 | } 24 | 25 | # build_release builds the actual release binaries. 26 | # arguments: 27 | function build_release() { 28 | local tag=$1 29 | local sys=$2 30 | local ldflags=$3 31 | local tags=$4 32 | 33 | green " - Packaging vendor" 34 | go mod vendor 35 | tar -czf vendor.tar.gz vendor 36 | 37 | maindir=$PACKAGE-$tag 38 | mkdir -p $maindir 39 | 40 | cp vendor.tar.gz $maindir/ 41 | rm vendor.tar.gz 42 | rm -r vendor 43 | 44 | package_source="${maindir}/${PACKAGE}-source-${tag}.tar" 45 | git archive -o "${package_source}" HEAD 46 | gzip -f "${package_source}" >"${package_source}.gz" 47 | 48 | cd "${maindir}" 49 | 50 | for i in $sys; do 51 | os=$(echo $i | cut -f1 -d-) 52 | arch=$(echo $i | cut -f2 -d-) 53 | arm= 54 | 55 | if [[ $arch == "armv6" ]]; then 56 | arch=arm 57 | arm=6 58 | elif [[ $arch == "armv7" ]]; then 59 | arch=arm 60 | arm=7 61 | fi 62 | 63 | dir="${PACKAGE}-${i}-${tag}" 64 | mkdir "${dir}" 65 | pushd "${dir}" 66 | 67 | green " - Building: ${os} ${arch} ${arm}" 68 | env CGO_ENABLED=0 GOOS=$os GOARCH=$arch GOARM=$arm go build -v -trimpath -ldflags="${ldflags}" -tags="${tags}" ${PKG} 69 | popd 70 | 71 | if [[ $os == "windows" ]]; then 72 | zip -r "${dir}.zip" "${dir}" 73 | else 74 | tar -cvzf "${dir}.tar.gz" "${dir}" 75 | fi 76 | 77 | rm -r "${dir}" 78 | done 79 | 80 | shasum -a 256 * >manifest-$tag.txt 81 | } 82 | 83 | # usage prints the usage of the whole script. 84 | function usage() { 85 | red "Usage: " 86 | red "release.sh build-release " 87 | } 88 | 89 | # Whatever sub command is passed in, we need at least 2 arguments. 90 | if [ "$#" -lt 2 ]; then 91 | usage 92 | exit 1 93 | fi 94 | 95 | # Extract the sub command and remove it from the list of parameters by shifting 96 | # them to the left. 97 | SUBCOMMAND=$1 98 | shift 99 | 100 | # Call the function corresponding to the specified sub command or print the 101 | # usage if the sub command was not found. 102 | case $SUBCOMMAND in 103 | build-release) 104 | green "Building release" 105 | build_release "$@" 106 | ;; 107 | *) 108 | usage 109 | exit 1 110 | ;; 111 | esac 112 | -------------------------------------------------------------------------------- /testdata/data/chain/bitcoin/regtest/macaroons.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/chain/bitcoin/regtest/macaroons.db -------------------------------------------------------------------------------- /testdata/data/chain/bitcoin/regtest/wallet.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/chain/bitcoin/regtest/wallet.db -------------------------------------------------------------------------------- /testdata/data/graph/regtest/channel.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/graph/regtest/channel.db -------------------------------------------------------------------------------- /testdata/data/graph/regtest/sphinxreplay.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/graph/regtest/sphinxreplay.db -------------------------------------------------------------------------------- /testdata/data/graph/regtest/wtclient.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/graph/regtest/wtclient.db -------------------------------------------------------------------------------- /testdata/data/watchtower/bitcoin/regtest/watchtower.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightninglabs/lndinit/076ade53c3548d703073c400f3aaa13715ae348d/testdata/data/watchtower/bitcoin/regtest/watchtower.db -------------------------------------------------------------------------------- /tools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23.6 2 | 3 | RUN apt-get update && apt-get install -y git 4 | ENV GOCACHE=/tmp/build/.cache 5 | ENV GOMODCACHE=/tmp/build/.modcache 6 | ENV GOFLAGS="-buildvcs=false" 7 | 8 | COPY . /tmp/tools 9 | 10 | RUN cd /tmp \ 11 | && mkdir -p /tmp/build/.cache \ 12 | && mkdir -p /tmp/build/.modcache \ 13 | && cd /tmp/tools \ 14 | && go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint \ 15 | && chmod -R 777 /tmp/build/ 16 | 17 | WORKDIR /build 18 | -------------------------------------------------------------------------------- /tools/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/lightninglabs/lndinit/tools 2 | 3 | go 1.23.6 4 | 5 | require ( 6 | github.com/golangci/golangci-lint v1.64.8 7 | github.com/rinchsan/gosimports v0.1.5 8 | ) 9 | 10 | require ( 11 | 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 12 | 4d63.com/gochecknoglobals v0.2.2 // indirect 13 | github.com/4meepo/tagalign v1.4.2 // indirect 14 | github.com/Abirdcfly/dupword v0.1.3 // indirect 15 | github.com/Antonboom/errname v1.0.0 // indirect 16 | github.com/Antonboom/nilnil v1.0.1 // indirect 17 | github.com/Antonboom/testifylint v1.5.2 // indirect 18 | github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect 19 | github.com/Crocmagnon/fatcontext v0.7.1 // indirect 20 | github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect 21 | github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect 22 | github.com/Masterminds/semver/v3 v3.3.0 // indirect 23 | github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect 24 | github.com/alecthomas/go-check-sumtype v0.3.1 // indirect 25 | github.com/alexkohler/nakedret/v2 v2.0.5 // indirect 26 | github.com/alexkohler/prealloc v1.0.0 // indirect 27 | github.com/alingse/asasalint v0.0.11 // indirect 28 | github.com/alingse/nilnesserr v0.1.2 // indirect 29 | github.com/ashanbrown/forbidigo v1.6.0 // indirect 30 | github.com/ashanbrown/makezero v1.2.0 // indirect 31 | github.com/beorn7/perks v1.0.1 // indirect 32 | github.com/bkielbasa/cyclop v1.2.3 // indirect 33 | github.com/blizzy78/varnamelen v0.8.0 // indirect 34 | github.com/bombsimon/wsl/v4 v4.5.0 // indirect 35 | github.com/breml/bidichk v0.3.2 // indirect 36 | github.com/breml/errchkjson v0.4.0 // indirect 37 | github.com/butuzov/ireturn v0.3.1 // indirect 38 | github.com/butuzov/mirror v1.3.0 // indirect 39 | github.com/catenacyber/perfsprint v0.8.2 // indirect 40 | github.com/ccojocar/zxcvbn-go v1.0.2 // indirect 41 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 42 | github.com/charithe/durationcheck v0.0.10 // indirect 43 | github.com/chavacava/garif v0.1.0 // indirect 44 | github.com/ckaznocha/intrange v0.3.0 // indirect 45 | github.com/curioswitch/go-reassign v0.3.0 // indirect 46 | github.com/daixiang0/gci v0.13.5 // indirect 47 | github.com/davecgh/go-spew v1.1.1 // indirect 48 | github.com/denis-tingaikin/go-header v0.5.0 // indirect 49 | github.com/ettle/strcase v0.2.0 // indirect 50 | github.com/fatih/color v1.18.0 // indirect 51 | github.com/fatih/structtag v1.2.0 // indirect 52 | github.com/firefart/nonamedreturns v1.0.5 // indirect 53 | github.com/fsnotify/fsnotify v1.5.4 // indirect 54 | github.com/fzipp/gocyclo v0.6.0 // indirect 55 | github.com/ghostiam/protogetter v0.3.9 // indirect 56 | github.com/go-critic/go-critic v0.12.0 // indirect 57 | github.com/go-toolsmith/astcast v1.1.0 // indirect 58 | github.com/go-toolsmith/astcopy v1.1.0 // indirect 59 | github.com/go-toolsmith/astequal v1.2.0 // indirect 60 | github.com/go-toolsmith/astfmt v1.1.0 // indirect 61 | github.com/go-toolsmith/astp v1.1.0 // indirect 62 | github.com/go-toolsmith/strparse v1.1.0 // indirect 63 | github.com/go-toolsmith/typep v1.1.0 // indirect 64 | github.com/go-viper/mapstructure/v2 v2.2.1 // indirect 65 | github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect 66 | github.com/gobwas/glob v0.2.3 // indirect 67 | github.com/gofrs/flock v0.12.1 // indirect 68 | github.com/golang/protobuf v1.5.3 // indirect 69 | github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect 70 | github.com/golangci/go-printf-func-name v0.1.0 // indirect 71 | github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect 72 | github.com/golangci/misspell v0.6.0 // indirect 73 | github.com/golangci/plugin-module-register v0.1.1 // indirect 74 | github.com/golangci/revgrep v0.8.0 // indirect 75 | github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect 76 | github.com/google/go-cmp v0.7.0 // indirect 77 | github.com/gordonklaus/ineffassign v0.1.0 // indirect 78 | github.com/gostaticanalysis/analysisutil v0.7.1 // indirect 79 | github.com/gostaticanalysis/comment v1.5.0 // indirect 80 | github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect 81 | github.com/gostaticanalysis/nilerr v0.1.1 // indirect 82 | github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect 83 | github.com/hashicorp/go-version v1.7.0 // indirect 84 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 85 | github.com/hashicorp/hcl v1.0.0 // indirect 86 | github.com/hexops/gotextdiff v1.0.3 // indirect 87 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 88 | github.com/jgautheron/goconst v1.7.1 // indirect 89 | github.com/jingyugao/rowserrcheck v1.1.1 // indirect 90 | github.com/jjti/go-spancheck v0.6.4 // indirect 91 | github.com/julz/importas v0.2.0 // indirect 92 | github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect 93 | github.com/kisielk/errcheck v1.9.0 // indirect 94 | github.com/kkHAIKE/contextcheck v1.1.6 // indirect 95 | github.com/kulti/thelper v0.6.3 // indirect 96 | github.com/kunwardeep/paralleltest v1.0.10 // indirect 97 | github.com/lasiar/canonicalheader v1.1.2 // indirect 98 | github.com/ldez/exptostd v0.4.2 // indirect 99 | github.com/ldez/gomoddirectives v0.6.1 // indirect 100 | github.com/ldez/grignotin v0.9.0 // indirect 101 | github.com/ldez/tagliatelle v0.7.1 // indirect 102 | github.com/ldez/usetesting v0.4.2 // indirect 103 | github.com/leonklingele/grouper v1.1.2 // indirect 104 | github.com/macabu/inamedparam v0.1.3 // indirect 105 | github.com/magiconair/properties v1.8.6 // indirect 106 | github.com/maratori/testableexamples v1.0.0 // indirect 107 | github.com/maratori/testpackage v1.1.1 // indirect 108 | github.com/matoous/godox v1.1.0 // indirect 109 | github.com/mattn/go-colorable v0.1.14 // indirect 110 | github.com/mattn/go-isatty v0.0.20 // indirect 111 | github.com/mattn/go-runewidth v0.0.16 // indirect 112 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 113 | github.com/mgechev/revive v1.7.0 // indirect 114 | github.com/mitchellh/go-homedir v1.1.0 // indirect 115 | github.com/mitchellh/mapstructure v1.5.0 // indirect 116 | github.com/moricho/tparallel v0.3.2 // indirect 117 | github.com/nakabonne/nestif v0.3.1 // indirect 118 | github.com/nishanths/exhaustive v0.12.0 // indirect 119 | github.com/nishanths/predeclared v0.2.2 // indirect 120 | github.com/nunnatsa/ginkgolinter v0.19.1 // indirect 121 | github.com/olekukonko/tablewriter v0.0.5 // indirect 122 | github.com/pelletier/go-toml v1.9.5 // indirect 123 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect 124 | github.com/pmezard/go-difflib v1.0.0 // indirect 125 | github.com/polyfloyd/go-errorlint v1.7.1 // indirect 126 | github.com/prometheus/client_golang v1.12.1 // indirect 127 | github.com/prometheus/client_model v0.2.0 // indirect 128 | github.com/prometheus/common v0.32.1 // indirect 129 | github.com/prometheus/procfs v0.7.3 // indirect 130 | github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect 131 | github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect 132 | github.com/quasilyte/gogrep v0.5.0 // indirect 133 | github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect 134 | github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect 135 | github.com/raeperd/recvcheck v0.2.0 // indirect 136 | github.com/rivo/uniseg v0.4.7 // indirect 137 | github.com/rogpeppe/go-internal v1.14.1 // indirect 138 | github.com/ryancurrah/gomodguard v1.3.5 // indirect 139 | github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect 140 | github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect 141 | github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect 142 | github.com/sashamelentyev/interfacebloat v1.1.0 // indirect 143 | github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect 144 | github.com/securego/gosec/v2 v2.22.2 // indirect 145 | github.com/sirupsen/logrus v1.9.3 // indirect 146 | github.com/sivchari/containedctx v1.0.3 // indirect 147 | github.com/sivchari/tenv v1.12.1 // indirect 148 | github.com/sonatard/noctx v0.1.0 // indirect 149 | github.com/sourcegraph/go-diff v0.7.0 // indirect 150 | github.com/spf13/afero v1.12.0 // indirect 151 | github.com/spf13/cast v1.5.0 // indirect 152 | github.com/spf13/cobra v1.9.1 // indirect 153 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 154 | github.com/spf13/pflag v1.0.6 // indirect 155 | github.com/spf13/viper v1.12.0 // indirect 156 | github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect 157 | github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect 158 | github.com/stretchr/objx v0.5.2 // indirect 159 | github.com/stretchr/testify v1.10.0 // indirect 160 | github.com/subosito/gotenv v1.4.1 // indirect 161 | github.com/tdakkota/asciicheck v0.4.1 // indirect 162 | github.com/tetafro/godot v1.5.0 // indirect 163 | github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect 164 | github.com/timonwong/loggercheck v0.10.1 // indirect 165 | github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect 166 | github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect 167 | github.com/ultraware/funlen v0.2.0 // indirect 168 | github.com/ultraware/whitespace v0.2.0 // indirect 169 | github.com/uudashr/gocognit v1.2.0 // indirect 170 | github.com/uudashr/iface v1.3.1 // indirect 171 | github.com/xen0n/gosmopolitan v1.2.2 // indirect 172 | github.com/yagipy/maintidx v1.0.0 // indirect 173 | github.com/yeya24/promlinter v0.3.0 // indirect 174 | github.com/ykadowak/zerologlint v0.1.5 // indirect 175 | gitlab.com/bosi/decorder v0.4.2 // indirect 176 | go-simpler.org/musttag v0.13.0 // indirect 177 | go-simpler.org/sloglint v0.9.0 // indirect 178 | go.uber.org/atomic v1.7.0 // indirect 179 | go.uber.org/automaxprocs v1.6.0 // indirect 180 | go.uber.org/multierr v1.6.0 // indirect 181 | go.uber.org/zap v1.24.0 // indirect 182 | golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect 183 | golang.org/x/mod v0.24.0 // indirect 184 | golang.org/x/sync v0.12.0 // indirect 185 | golang.org/x/sys v0.31.0 // indirect 186 | golang.org/x/text v0.22.0 // indirect 187 | golang.org/x/tools v0.31.0 // indirect 188 | google.golang.org/protobuf v1.36.5 // indirect 189 | gopkg.in/ini.v1 v1.67.0 // indirect 190 | gopkg.in/yaml.v2 v2.4.0 // indirect 191 | gopkg.in/yaml.v3 v3.0.1 // indirect 192 | honnef.co/go/tools v0.6.1 // indirect 193 | mvdan.cc/gofumpt v0.7.0 // indirect 194 | mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect 195 | ) 196 | 197 | // Can be removed once this version is included in golangci-lint. 198 | replace github.com/ultraware/whitespace => github.com/ultraware/whitespace v0.1.0 199 | -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | 3 | package tools 4 | 5 | // The other imports represent our build tools. Instead of defining a commit we 6 | // want to use for those golang based tools, we use the go mod versioning system 7 | // to unify the way we manage dependencies. So we define our build tool 8 | // dependencies here and pin the version in go.mod. 9 | import ( 10 | _ "github.com/golangci/golangci-lint/cmd/golangci-lint" 11 | _ "github.com/rinchsan/gosimports/cmd/gosimports" 12 | ) 13 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "path/filepath" 7 | ) 8 | 9 | // copyTestDataDir copies the entire test directory structure so that we do not 10 | // alter any original files. 11 | func copyTestDataDir(src, dst string) error { 12 | return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { 13 | if err != nil { 14 | return err 15 | } 16 | 17 | relPath, err := filepath.Rel(src, path) 18 | if err != nil { 19 | return err 20 | } 21 | 22 | dstPath := filepath.Join(dst, relPath) 23 | 24 | if info.IsDir() { 25 | return os.MkdirAll(dstPath, info.Mode()) 26 | } 27 | 28 | return copyFile(path, dstPath) 29 | }) 30 | } 31 | 32 | // copyFile copies a file from src to dst. 33 | func copyFile(src, dst string) error { 34 | srcFile, err := os.Open(src) 35 | if err != nil { 36 | return err 37 | } 38 | defer srcFile.Close() 39 | 40 | dstFile, err := os.Create(dst) 41 | if err != nil { 42 | return err 43 | } 44 | defer dstFile.Close() 45 | 46 | _, err = io.Copy(dstFile, srcFile) 47 | return err 48 | } 49 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2017 The btcsuite developers 2 | // Copyright (c) 2015-2016 The Decred developers 3 | // Heavily inspired by https://github.com/btcsuite/btcd/blob/master/version.go 4 | // Copyright (C) 2015-2022 The Lightning Network Developers 5 | 6 | package main 7 | 8 | import ( 9 | "fmt" 10 | "strings" 11 | ) 12 | 13 | var ( 14 | // Commit stores the current commit of this build, which includes the 15 | // most recent tag, the number of commits since that tag (if non-zero), 16 | // the commit hash, and a dirty marker. This should be set using the 17 | // -ldflags during compilation. 18 | Commit string 19 | ) 20 | 21 | // semanticAlphabet is the set of characters that are permitted for use in an 22 | // AppPreRelease. 23 | const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-." 24 | 25 | // These constants define the application version and follow the semantic 26 | // versioning 2.0.0 spec (http://semver.org/). 27 | const ( 28 | // AppMajor defines the major version of this binary. 29 | AppMajor uint = 0 30 | 31 | // AppMinor defines the minor version of this binary. 32 | AppMinor uint = 1 33 | 34 | // AppPatch defines the application patch for this binary. 35 | AppPatch uint = 29 36 | 37 | // AppPreRelease MUST only contain characters from semanticAlphabet 38 | // per the semantic versioning spec. 39 | AppPreRelease = "beta" 40 | ) 41 | 42 | func init() { 43 | // Assert that AppPreRelease is valid according to the semantic 44 | // versioning guidelines for pre-release version and build metadata 45 | // strings. In particular it MUST only contain characters in 46 | // semanticAlphabet. 47 | for _, r := range AppPreRelease { 48 | if !strings.ContainsRune(semanticAlphabet, r) { 49 | panic(fmt.Errorf("rune: %v is not in the semantic "+ 50 | "alphabet", r)) 51 | } 52 | } 53 | } 54 | 55 | // Version returns the application version as a properly formed string per the 56 | // semantic versioning 2.0.0 spec (http://semver.org/). 57 | func Version() string { 58 | // Start with the major, minor, and patch versions. 59 | version := fmt.Sprintf("%d.%d.%d", AppMajor, AppMinor, AppPatch) 60 | 61 | // Append pre-release version if there is one. The hyphen called for by 62 | // the semantic versioning spec is automatically appended and should not 63 | // be contained in the pre-release string. 64 | if AppPreRelease != "" { 65 | version = fmt.Sprintf("%s-%s", version, AppPreRelease) 66 | } 67 | 68 | return version 69 | } 70 | --------------------------------------------------------------------------------