├── .github └── workflows │ ├── build-release.yaml │ └── docs-release.yaml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── assets └── logo.svg ├── bpf ├── headers │ ├── LICENSE.BSD-2-Clause │ ├── bpf_endian.h │ ├── bpf_helper_defs.h │ ├── bpf_helpers.h │ ├── bpf_tracing.h │ ├── common.h │ ├── update.sh │ ├── vmlinux.h │ └── vmlinux.oldh └── trace-blocks.c ├── cmd ├── dispatcher.go ├── root.go └── start.go ├── config.yaml ├── disconfig.yaml ├── docs ├── CNAME ├── architecture.md ├── assets │ ├── blxrepIcon.png │ ├── blxrepIcon.svg │ ├── blxrepsocialxm.jpg │ └── logo.svg ├── index.md ├── motivation.md ├── setup.md ├── troubleshoot.md └── tui.md ├── go.mod ├── go.sum ├── main.go ├── mkdocs.yml ├── package ├── etc │ ├── blxrep │ │ ├── config.yaml │ │ └── policies │ │ │ └── default.yaml │ └── systemd │ │ └── system │ │ └── blxrep.service └── usr │ └── local │ └── bin │ └── blxrep ├── pkg ├── agent │ ├── agent.go │ ├── clone.go │ ├── footprint_linux.go │ ├── footprint_windows.go │ ├── live.go │ ├── restore.go │ ├── scheduled_jobs.go │ └── websocket.go └── dispatcher │ ├── actions.go │ ├── backup.go │ ├── cleanup_jobs.go │ ├── compress.go │ ├── config.go │ ├── dispatcher.go │ ├── live.go │ └── restore.go ├── plans ├── lab.yaml └── testplan.yml ├── postinstall.sh ├── service ├── action_db_utils.go ├── agent_db_utils.go ├── backend_utils.go └── dirtyblock_db_utils.go ├── storage ├── boltdb │ └── boltdb.go └── storage.go ├── tui ├── actions.go ├── agents.go ├── checkpoints.go ├── disks.go ├── dispatcher.go ├── filebrowser.go └── partitions.go └── utils ├── banner.go ├── bpf_bpfel_x86.o ├── bpf_helpers.go ├── config.go ├── constants.go ├── dev_info.go ├── disk.go ├── duration.go ├── file_helper.go ├── logger.go ├── stream.go ├── types.go └── user_details.go /.github/workflows/build-release.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Release Packages 2 | 3 | on: 4 | release: 5 | types: [created] 6 | workflow_dispatch: 7 | inputs: 8 | branch: 9 | description: 'Branch to run the workflow on' 10 | required: true 11 | default: 'main' 12 | version: 13 | description: 'Version to use for packages' 14 | required: true 15 | default: '1.0.0' 16 | 17 | jobs: 18 | build: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v3 22 | 23 | - name: Set up Go 24 | uses: actions/setup-go@v3 25 | with: 26 | go-version: '1.23' 27 | 28 | # Set version based on release tag or manual input 29 | - name: Set Version 30 | run: | 31 | if [ "${{ github.event_name }}" = "release" ]; then 32 | VERSION=${GITHUB_REF#refs/tags/v} 33 | else 34 | VERSION=${{ github.event.inputs.version }} 35 | fi 36 | echo "VERSION=$VERSION" >> $GITHUB_ENV 37 | echo "Using version: $VERSION" 38 | 39 | - name: Build Go Application 40 | run: | 41 | GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build -o blxrep main.go 42 | cp blxrep package/usr/local/bin/ 43 | chmod +x package/usr/local/bin/blxrep 44 | 45 | - name: Package with FPM for deb 46 | uses: bpicode/github-action-fpm@master 47 | with: 48 | fpm_args: | 49 | -s dir 50 | -t deb 51 | -n blxrep 52 | -v ${{ env.VERSION }} 53 | --prefix / 54 | --config-files /etc/blxrep/config.yaml 55 | --post-install postinstall.sh 56 | -C package . 57 | 58 | - name: Package with FPM for rpm 59 | uses: bpicode/github-action-fpm@master 60 | with: 61 | fpm_args: | 62 | -s dir 63 | -t rpm 64 | -n blxrep 65 | -v ${{ env.VERSION }} 66 | --prefix / 67 | --config-files /etc/blxrep/config.yaml 68 | --post-install postinstall.sh 69 | -C package . 70 | 71 | # Rename packages to ensure consistent naming 72 | - name: Rename packages 73 | run: | 74 | mv blxrep_${{ env.VERSION }}_amd64.deb blxrep-${{ env.VERSION }}-amd64.deb 75 | mv blxrep-${{ env.VERSION }}-1.x86_64.rpm blxrep-${{ env.VERSION }}-x86_64.rpm 76 | 77 | # Upload to GitHub Release 78 | - name: Upload Release Assets 79 | if: github.event_name == 'release' 80 | uses: softprops/action-gh-release@v1 81 | with: 82 | files: | 83 | blxrep-${{ env.VERSION }}-amd64.deb 84 | blxrep-${{ env.VERSION }}-x86_64.rpm 85 | env: 86 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 87 | -------------------------------------------------------------------------------- /.github/workflows/docs-release.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: 5 | - main 6 | permissions: 7 | contents: write 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Configure Git Credentials 14 | run: | 15 | git config user.name github-actions[bot] 16 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 17 | - uses: actions/setup-python@v5 18 | with: 19 | python-version: 3.x 20 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 21 | - uses: actions/cache@v4 22 | with: 23 | key: mkdocs-material-${{ env.cache_id }} 24 | path: .cache 25 | restore-keys: | 26 | mkdocs-material- 27 | - run: pip install mkdocs-material 28 | - run: pip install mkdocs-material[imaging] 29 | - run: mkdocs gh-deploy --force -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | go.work.sum 23 | 24 | # env file 25 | .env 26 | .cache 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2025-Present XMIGRATE CONSULTANCY SERVICES PRIVATE LIMITED 2 | 3 | MIT License 4 | 5 | Portions of this software are licensed as follows: 6 | 7 | * All content that resides under the "enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "enterprise/LICENSE". 8 | * All third party components incorporated into the blxrep software are licensed under the original license provided by the owner of the applicable component. 9 | * Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below. 10 | 11 | 12 | Permission is hereby granted, free of charge, to any person obtaining a copy 13 | of this software and associated documentation files (the "Software"), to deal 14 | in the Software without restriction, including without limitation the rights 15 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 | copies of the Software, and to permit persons to whom the Software is 17 | furnished to do so, subject to the following conditions: 18 | 19 | The above copyright notice and this permission notice shall be included in all 20 | copies or substantial portions of the Software. 21 | 22 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 28 | SOFTWARE. 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GO := go 2 | GO_BUILD = CGO_ENABLED=0 $(GO) build 3 | GO_GENERATE = $(GO) generate 4 | GO_TAGS ?= bpf 5 | TARGET = blxrep-bpf 6 | BINDIR ?= /usr/local/bin 7 | VERSION=$(shell git describe --tags --always) 8 | 9 | # BPF-specific variables 10 | CLANG ?= clang 11 | CFLAGS := -O2 -g -Wall -Werror 12 | BPF_CFLAGS := $(CFLAGS) \ 13 | -target bpf \ 14 | -D__TARGET_ARCH_x86 15 | 16 | .PHONY: all clean generate $(TARGET) 17 | 18 | all: generate $(TARGET) 19 | 20 | $(TARGET): 21 | $(GO_GENERATE) 22 | $(GO_BUILD) $(if $(GO_TAGS),-tags $(GO_TAGS)) \ 23 | -ldflags "-w -s \ 24 | -X 'github.com/xmigrate/blxrep.Version=${VERSION}'" 25 | 26 | # Generate BPF code from .c to .o 27 | %.bpf.o: %.bpf.c 28 | $(CLANG) $(BPF_CFLAGS) -c $< -o $@ 29 | 30 | # Generate Go files from BPF objects 31 | generate: export BPF_CLANG := $(CLANG) 32 | generate: export BPF_CFLAGS := $(BPF_CFLAGS) 33 | generate: 34 | $(GO_GENERATE) 35 | 36 | clean: 37 | rm -f $(TARGET) 38 | rm -f *.o 39 | rm -f *.bpf.o 40 | rm -f *_bpf_*.go 41 | rm -rf ./release -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | blxrep logo 3 |

4 | 5 | # blxrep 6 | 7 | blxrep is a powerful tool designed for live data replication of disks over a network. It operates in two modes: dispatcher and agent, allowing for efficient and flexible disaster recovery setup. 8 | blxrep tracks the changes that happen on disk at sector level using eBPF tracepoints. 9 | 10 | ## Table of Contents 11 | 12 | - [Overview](#overview) 13 | - [Installation](#installation) 14 | - [Configuration](#configuration) 15 | - [Usage](#usage) 16 | - [Starting blxrep](#starting-blxrep) 17 | - [Dispatcher Commands](#dispatcher-commands) 18 | - [Modes of Operation](#modes-of-operation) 19 | - [TUI mode](#tui-mode) 20 | 21 | ## Overview 22 | 23 | Traditionally, companies have relied on kernel modules for change block tracking and bitmap-based incremental backups. While functional, this approach has several limitations: 24 | 25 | 1. Complex kernel module development and maintenance requiring deep kernel expertise 26 | 2. Challenging debugging process due to kernel space operations 27 | 3. Limited testing capabilities in kernel space 28 | 4. Scalability constraints due to kernel-level implementation 29 | 5. Steep learning curve for kernel module development 30 | 6. System reboots required for kernel module loading and updates 31 | 7. Potential for system instability and security vulnerabilities due to unrestricted kernel access 32 | 33 | blxrep modernizes this approach by leveraging eBPF tracepoints to track disk changes at the sector level. This brings several advantages: 34 | 35 | 1. Simplified development through eBPF's modern tooling, extensive documentation, and active community support 36 | 2. Enhanced debugging capabilities with user-space tools and eBPF maps 37 | 3. Comprehensive testing framework support 38 | 4. Better scalability through efficient event processing 39 | 5. More approachable learning curve with high-level eBPF programming interfaces 40 | 6. Dynamic loading without system reboots 41 | 7. Improved safety through eBPF's verifier and sandboxed execution environment 42 | 43 | ## Installation 44 | 45 | ### For Debian/Ubuntu based systems (.deb) 46 | 47 | 1. Download the package: 48 | ```bash 49 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-amd64.deb 50 | ``` 51 | 2. Install the package: 52 | ```bash 53 | sudo dpkg -i blxrep-0.1.0-amd64.deb 54 | ``` 55 | > Note: If you get an error about missing dependencies, you can install them with: 56 | ```bash 57 | sudo apt-get install -f 58 | ``` 59 | 60 | ### For Redhat/CentOS based systems (.rpm) 61 | 62 | 1. Download the package: 63 | ```bash 64 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-x86_64.rpm 65 | ``` 66 | 2. Install the package: 67 | ```bash 68 | sudo rpm -i blxrep-0.1.0-x86_64.rpm 69 | ``` 70 | 71 | ## Verify the installation: 72 | ```bash 73 | sudo blxrep --help 74 | ``` 75 | 76 | Configuration file is located at `/etc/blxrep/config.yaml` by default. 77 | Policy directory is located at `/etc/blxrep/policies` by default. 78 | 79 | ## Policy Configuration 80 | 81 | Default Policy configuration is located at `/etc/blxrep/policies/default.yaml` by default. 82 | ```yaml 83 | name: "default-backup-policy" # name of the policy 84 | description: "Backup policy for all servers" # description of the policy 85 | archive_interval: 48h # archive interval 86 | snapshot_frequency: "daily" # snapshot frequency 87 | snapshot_time: "12:00" # snapshot time 88 | bandwidth_limit: 100 # bandwidth limit 89 | snapshot_retention: 30 # snapshot retention 90 | live_sync_frequency: 2m # live sync frequency 91 | transition_after_days: 30 # transition after days 92 | delete_after_days: 90 # delete after days 93 | 94 | targets: 95 | # Range pattern 96 | - pattern: "*" # pattern of the targets which is mentioned on agent /etc/blxrep/config.yaml 97 | disks_excluded: 98 | - "/dev/xvda" # disks excluded from the policy 99 | ``` 100 | You can create your own policy by creating a new yaml file in the `/etc/blxrep/policies` directory. 101 | 102 | ## Post Installation 103 | After installation, enable and start the blxrep service: 104 | 105 | ```bash 106 | sudo systemctl enable blxrep 107 | sudo systemctl start blxrep 108 | ``` 109 | 110 | After starting the blxrep service, you can see the status of the blxrep service with the following command: 111 | 112 | ```bash 113 | sudo systemctl status blxrep 114 | ``` 115 | 116 | 117 | ## Uninstallation 118 | 119 | To uninstall blxrep, use the following command: 120 | 121 | For Debian/Ubuntu: 122 | ```bash 123 | sudo dpkg -r blxrep 124 | ``` 125 | 126 | For Redhat/CentOS: 127 | ```bash 128 | sudo rpm -e blxrep 129 | ``` 130 | 131 | ## Configuration 132 | 133 | blxrep uses a configuration file located at `/etc/blxrep/config.yaml` by default. You can specify a different configuration file using the `--config` flag. 134 | 135 | Example configuration for agent: 136 | 137 | ```yaml 138 | mode: "agent" 139 | id: "hostname" 140 | dispatcher-addr: "localhost:8080" 141 | data-dir: "/data" 142 | ``` 143 | 144 | Example configuration for dispatcher: 145 | 146 | ```yaml 147 | mode: "dispatcher" 148 | data-dir: "/data" 149 | policy-dir: "/etc/blxrep/policies" 150 | ``` 151 | 152 | ## Usage 153 | 154 | ### Starting blxrep 155 | 156 | To start blxrep, use the `start` command: 157 | 158 | ```bash 159 | blxrep start [flags] 160 | ``` 161 | 162 | Flags: 163 | | Flag | Description | Required For | 164 | |------|-------------|--------------| 165 | | `--mode` | Start mode ('dispatcher' or 'agent') | Both | 166 | | `--id` | Agent ID | Agent mode | 167 | | `--dispatcher-addr` | Dispatcher address (format: host:port) | Agent mode | 168 | | `--data-dir` | Data directory | Dispatcher mode | 169 | | `--policy-dir` | Policy directory | Dispatcher mode | 170 | | `--config` | Configuration file | Optional | 171 | 172 | 173 | ## Modes of Operation 174 | 175 | ### Dispatcher Mode 176 | 177 | In dispatcher mode, blxrep manages the overall replication process. It acts as a central collector for replicating disk data from multiple servers. It requires a data directory and policy directory to be specified. All types of disk backups are collected and stored in the specified data directory. Policy directory is used to specify the policy for the disk backups for each agent. 178 | 179 | 180 | ### Agent Mode 181 | 182 | In agent mode, blxrep runs on individual servers to send snapshot backups and live changes to the dispatcher. It requires an agent ID, dispatcher address, and device to be specified. We need the agent ID to be unique if we are connecting multiple servers to the same dispatcher. Device is the disk that needs to be backed up and monitored for live changes. 183 | 184 | ### TUI mode 185 | 186 | blxrep uses tcell for the TUI. It is a terminal UI library for Go that is easy to use and highly customizable. It is used to interact with the dispatcher and agents. With TUI mode, you can navigate throught the agegnts that are connected to the dispatcher and see the status of the disk backups. You can also mount the disk backups to any available point in time and restore the files or partitions with the help of the TUI. 187 | 188 | To start the TUI, use the `tui` command: 189 | 190 | ```bash 191 | blxrep tui --data-dir= 192 | ``` 193 | -------------------------------------------------------------------------------- /assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | blxrep 17 | 18 | -------------------------------------------------------------------------------- /bpf/headers/LICENSE.BSD-2-Clause: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: BSD-2-Clause 2 | SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html 3 | Usage-Guide: 4 | To use the BSD 2-clause "Simplified" License put the following SPDX 5 | tag/value pair into a comment according to the placement guidelines in 6 | the licensing rules documentation: 7 | SPDX-License-Identifier: BSD-2-Clause 8 | License-Text: 9 | 10 | Copyright (c) . All rights reserved. 11 | 12 | Redistribution and use in source and binary forms, with or without 13 | modification, are permitted provided that the following conditions are met: 14 | 15 | 1. Redistributions of source code must retain the above copyright notice, 16 | this list of conditions and the following disclaimer. 17 | 18 | 2. Redistributions in binary form must reproduce the above copyright 19 | notice, this list of conditions and the following disclaimer in the 20 | documentation and/or other materials provided with the distribution. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /bpf/headers/bpf_endian.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | #ifndef __BPF_ENDIAN__ 3 | #define __BPF_ENDIAN__ 4 | 5 | /* 6 | * Isolate byte #n and put it into byte #m, for __u##b type. 7 | * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: 8 | * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 9 | * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 10 | * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 11 | * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 12 | */ 13 | #define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) 14 | 15 | #define ___bpf_swab16(x) ((__u16)( \ 16 | ___bpf_mvb(x, 16, 0, 1) | \ 17 | ___bpf_mvb(x, 16, 1, 0))) 18 | 19 | #define ___bpf_swab32(x) ((__u32)( \ 20 | ___bpf_mvb(x, 32, 0, 3) | \ 21 | ___bpf_mvb(x, 32, 1, 2) | \ 22 | ___bpf_mvb(x, 32, 2, 1) | \ 23 | ___bpf_mvb(x, 32, 3, 0))) 24 | 25 | #define ___bpf_swab64(x) ((__u64)( \ 26 | ___bpf_mvb(x, 64, 0, 7) | \ 27 | ___bpf_mvb(x, 64, 1, 6) | \ 28 | ___bpf_mvb(x, 64, 2, 5) | \ 29 | ___bpf_mvb(x, 64, 3, 4) | \ 30 | ___bpf_mvb(x, 64, 4, 3) | \ 31 | ___bpf_mvb(x, 64, 5, 2) | \ 32 | ___bpf_mvb(x, 64, 6, 1) | \ 33 | ___bpf_mvb(x, 64, 7, 0))) 34 | 35 | /* LLVM's BPF target selects the endianness of the CPU 36 | * it compiles on, or the user specifies (bpfel/bpfeb), 37 | * respectively. The used __BYTE_ORDER__ is defined by 38 | * the compiler, we cannot rely on __BYTE_ORDER from 39 | * libc headers, since it doesn't reflect the actual 40 | * requested byte order. 41 | * 42 | * Note, LLVM's BPF target has different __builtin_bswapX() 43 | * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE 44 | * in bpfel and bpfeb case, which means below, that we map 45 | * to cpu_to_be16(). We could use it unconditionally in BPF 46 | * case, but better not rely on it, so that this header here 47 | * can be used from application and BPF program side, which 48 | * use different targets. 49 | */ 50 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 51 | # define __bpf_ntohs(x) __builtin_bswap16(x) 52 | # define __bpf_htons(x) __builtin_bswap16(x) 53 | # define __bpf_constant_ntohs(x) ___bpf_swab16(x) 54 | # define __bpf_constant_htons(x) ___bpf_swab16(x) 55 | # define __bpf_ntohl(x) __builtin_bswap32(x) 56 | # define __bpf_htonl(x) __builtin_bswap32(x) 57 | # define __bpf_constant_ntohl(x) ___bpf_swab32(x) 58 | # define __bpf_constant_htonl(x) ___bpf_swab32(x) 59 | # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) 60 | # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) 61 | # define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) 62 | # define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) 63 | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 64 | # define __bpf_ntohs(x) (x) 65 | # define __bpf_htons(x) (x) 66 | # define __bpf_constant_ntohs(x) (x) 67 | # define __bpf_constant_htons(x) (x) 68 | # define __bpf_ntohl(x) (x) 69 | # define __bpf_htonl(x) (x) 70 | # define __bpf_constant_ntohl(x) (x) 71 | # define __bpf_constant_htonl(x) (x) 72 | # define __bpf_be64_to_cpu(x) (x) 73 | # define __bpf_cpu_to_be64(x) (x) 74 | # define __bpf_constant_be64_to_cpu(x) (x) 75 | # define __bpf_constant_cpu_to_be64(x) (x) 76 | #else 77 | # error "Fix your compiler's __BYTE_ORDER__?!" 78 | #endif 79 | 80 | #define bpf_htons(x) \ 81 | (__builtin_constant_p(x) ? \ 82 | __bpf_constant_htons(x) : __bpf_htons(x)) 83 | #define bpf_ntohs(x) \ 84 | (__builtin_constant_p(x) ? \ 85 | __bpf_constant_ntohs(x) : __bpf_ntohs(x)) 86 | #define bpf_htonl(x) \ 87 | (__builtin_constant_p(x) ? \ 88 | __bpf_constant_htonl(x) : __bpf_htonl(x)) 89 | #define bpf_ntohl(x) \ 90 | (__builtin_constant_p(x) ? \ 91 | __bpf_constant_ntohl(x) : __bpf_ntohl(x)) 92 | #define bpf_cpu_to_be64(x) \ 93 | (__builtin_constant_p(x) ? \ 94 | __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) 95 | #define bpf_be64_to_cpu(x) \ 96 | (__builtin_constant_p(x) ? \ 97 | __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) 98 | 99 | #endif /* __BPF_ENDIAN__ */ 100 | -------------------------------------------------------------------------------- /bpf/headers/bpf_helpers.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | #ifndef __BPF_HELPERS__ 3 | #define __BPF_HELPERS__ 4 | 5 | /* 6 | * Note that bpf programs need to include either 7 | * vmlinux.h (auto-generated from BTF) or linux/types.h 8 | * in advance since bpf_helper_defs.h uses such types 9 | * as __u64. 10 | */ 11 | #include "bpf_helper_defs.h" 12 | 13 | #define __uint(name, val) int (*name)[val] 14 | #define __type(name, val) typeof(val) *name 15 | #define __array(name, val) typeof(val) *name[] 16 | 17 | /* 18 | * Helper macro to place programs, maps, license in 19 | * different sections in elf_bpf file. Section names 20 | * are interpreted by libbpf depending on the context (BPF programs, BPF maps, 21 | * extern variables, etc). 22 | * To allow use of SEC() with externs (e.g., for extern .maps declarations), 23 | * make sure __attribute__((unused)) doesn't trigger compilation warning. 24 | */ 25 | #define SEC(name) \ 26 | _Pragma("GCC diagnostic push") \ 27 | _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ 28 | __attribute__((section(name), used)) \ 29 | _Pragma("GCC diagnostic pop") \ 30 | 31 | /* Avoid 'linux/stddef.h' definition of '__always_inline'. */ 32 | #undef __always_inline 33 | #define __always_inline inline __attribute__((always_inline)) 34 | 35 | #ifndef __noinline 36 | #define __noinline __attribute__((noinline)) 37 | #endif 38 | #ifndef __weak 39 | #define __weak __attribute__((weak)) 40 | #endif 41 | 42 | /* 43 | * Use __hidden attribute to mark a non-static BPF subprogram effectively 44 | * static for BPF verifier's verification algorithm purposes, allowing more 45 | * extensive and permissive BPF verification process, taking into account 46 | * subprogram's caller context. 47 | */ 48 | #define __hidden __attribute__((visibility("hidden"))) 49 | 50 | /* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include 51 | * any system-level headers (such as stddef.h, linux/version.h, etc), and 52 | * commonly-used macros like NULL and KERNEL_VERSION aren't available through 53 | * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define 54 | * them on their own. So as a convenience, provide such definitions here. 55 | */ 56 | #ifndef NULL 57 | #define NULL ((void *)0) 58 | #endif 59 | 60 | #ifndef KERNEL_VERSION 61 | #define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) 62 | #endif 63 | 64 | /* 65 | * Helper macros to manipulate data structures 66 | */ 67 | #ifndef offsetof 68 | #define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) 69 | #endif 70 | #ifndef container_of 71 | #define container_of(ptr, type, member) \ 72 | ({ \ 73 | void *__mptr = (void *)(ptr); \ 74 | ((type *)(__mptr - offsetof(type, member))); \ 75 | }) 76 | #endif 77 | 78 | /* 79 | * Helper macro to throw a compilation error if __bpf_unreachable() gets 80 | * built into the resulting code. This works given BPF back end does not 81 | * implement __builtin_trap(). This is useful to assert that certain paths 82 | * of the program code are never used and hence eliminated by the compiler. 83 | * 84 | * For example, consider a switch statement that covers known cases used by 85 | * the program. __bpf_unreachable() can then reside in the default case. If 86 | * the program gets extended such that a case is not covered in the switch 87 | * statement, then it will throw a build error due to the default case not 88 | * being compiled out. 89 | */ 90 | #ifndef __bpf_unreachable 91 | # define __bpf_unreachable() __builtin_trap() 92 | #endif 93 | 94 | /* 95 | * Helper function to perform a tail call with a constant/immediate map slot. 96 | */ 97 | #if __clang_major__ >= 8 && defined(__bpf__) 98 | static __always_inline void 99 | bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) 100 | { 101 | if (!__builtin_constant_p(slot)) 102 | __bpf_unreachable(); 103 | 104 | /* 105 | * Provide a hard guarantee that LLVM won't optimize setting r2 (map 106 | * pointer) and r3 (constant map index) from _different paths_ ending 107 | * up at the _same_ call insn as otherwise we won't be able to use the 108 | * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel 109 | * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key 110 | * tracking for prog array pokes") for details on verifier tracking. 111 | * 112 | * Note on clobber list: we need to stay in-line with BPF calling 113 | * convention, so even if we don't end up using r0, r4, r5, we need 114 | * to mark them as clobber so that LLVM doesn't end up using them 115 | * before / after the call. 116 | */ 117 | asm volatile("r1 = %[ctx]\n\t" 118 | "r2 = %[map]\n\t" 119 | "r3 = %[slot]\n\t" 120 | "call 12" 121 | :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) 122 | : "r0", "r1", "r2", "r3", "r4", "r5"); 123 | } 124 | #endif 125 | 126 | /* 127 | * Helper structure used by eBPF C program 128 | * to describe BPF map attributes to libbpf loader 129 | */ 130 | struct bpf_map_def { 131 | unsigned int type; 132 | unsigned int key_size; 133 | unsigned int value_size; 134 | unsigned int max_entries; 135 | unsigned int map_flags; 136 | }; 137 | 138 | enum libbpf_pin_type { 139 | LIBBPF_PIN_NONE, 140 | /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ 141 | LIBBPF_PIN_BY_NAME, 142 | }; 143 | 144 | enum libbpf_tristate { 145 | TRI_NO = 0, 146 | TRI_YES = 1, 147 | TRI_MODULE = 2, 148 | }; 149 | 150 | #define __kconfig __attribute__((section(".kconfig"))) 151 | #define __ksym __attribute__((section(".ksyms"))) 152 | 153 | #ifndef ___bpf_concat 154 | #define ___bpf_concat(a, b) a ## b 155 | #endif 156 | #ifndef ___bpf_apply 157 | #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) 158 | #endif 159 | #ifndef ___bpf_nth 160 | #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N 161 | #endif 162 | #ifndef ___bpf_narg 163 | #define ___bpf_narg(...) \ 164 | ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 165 | #endif 166 | 167 | #define ___bpf_fill0(arr, p, x) do {} while (0) 168 | #define ___bpf_fill1(arr, p, x) arr[p] = x 169 | #define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) 170 | #define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) 171 | #define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) 172 | #define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) 173 | #define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) 174 | #define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) 175 | #define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) 176 | #define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) 177 | #define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) 178 | #define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) 179 | #define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) 180 | #define ___bpf_fill(arr, args...) \ 181 | ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) 182 | 183 | /* 184 | * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values 185 | * in a structure. 186 | */ 187 | #define BPF_SEQ_PRINTF(seq, fmt, args...) \ 188 | ({ \ 189 | static const char ___fmt[] = fmt; \ 190 | unsigned long long ___param[___bpf_narg(args)]; \ 191 | \ 192 | _Pragma("GCC diagnostic push") \ 193 | _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 194 | ___bpf_fill(___param, args); \ 195 | _Pragma("GCC diagnostic pop") \ 196 | \ 197 | bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ 198 | ___param, sizeof(___param)); \ 199 | }) 200 | 201 | /* 202 | * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of 203 | * an array of u64. 204 | */ 205 | #define BPF_SNPRINTF(out, out_size, fmt, args...) \ 206 | ({ \ 207 | static const char ___fmt[] = fmt; \ 208 | unsigned long long ___param[___bpf_narg(args)]; \ 209 | \ 210 | _Pragma("GCC diagnostic push") \ 211 | _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 212 | ___bpf_fill(___param, args); \ 213 | _Pragma("GCC diagnostic pop") \ 214 | \ 215 | bpf_snprintf(out, out_size, ___fmt, \ 216 | ___param, sizeof(___param)); \ 217 | }) 218 | 219 | #ifdef BPF_NO_GLOBAL_DATA 220 | #define BPF_PRINTK_FMT_MOD 221 | #else 222 | #define BPF_PRINTK_FMT_MOD static const 223 | #endif 224 | 225 | #define __bpf_printk(fmt, ...) \ 226 | ({ \ 227 | BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ 228 | bpf_trace_printk(____fmt, sizeof(____fmt), \ 229 | ##__VA_ARGS__); \ 230 | }) 231 | 232 | /* 233 | * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments 234 | * instead of an array of u64. 235 | */ 236 | #define __bpf_vprintk(fmt, args...) \ 237 | ({ \ 238 | static const char ___fmt[] = fmt; \ 239 | unsigned long long ___param[___bpf_narg(args)]; \ 240 | \ 241 | _Pragma("GCC diagnostic push") \ 242 | _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 243 | ___bpf_fill(___param, args); \ 244 | _Pragma("GCC diagnostic pop") \ 245 | \ 246 | bpf_trace_vprintk(___fmt, sizeof(___fmt), \ 247 | ___param, sizeof(___param)); \ 248 | }) 249 | 250 | /* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args 251 | * Otherwise use __bpf_vprintk 252 | */ 253 | #define ___bpf_pick_printk(...) \ 254 | ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ 255 | __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ 256 | __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ 257 | __bpf_printk /*1*/, __bpf_printk /*0*/) 258 | 259 | /* Helper macro to print out debug messages */ 260 | #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) 261 | 262 | #endif 263 | -------------------------------------------------------------------------------- /bpf/headers/common.h: -------------------------------------------------------------------------------- 1 | // This is a compact version of `vmlinux.h` to be used in the examples using C code. 2 | 3 | #pragma once 4 | 5 | typedef unsigned char __u8; 6 | typedef short int __s16; 7 | typedef short unsigned int __u16; 8 | typedef int __s32; 9 | typedef unsigned int __u32; 10 | typedef long long int __s64; 11 | typedef long long unsigned int __u64; 12 | typedef __u8 u8; 13 | typedef __s16 s16; 14 | typedef __u16 u16; 15 | typedef __s32 s32; 16 | typedef __u32 u32; 17 | typedef __s64 s64; 18 | typedef __u64 u64; 19 | typedef __u16 __le16; 20 | typedef __u16 __be16; 21 | typedef __u32 __be32; 22 | typedef __u64 __be64; 23 | typedef __u32 __wsum; 24 | 25 | #include "bpf_helpers.h" 26 | #include "bpf_tracing.h" 27 | #include "vmlinux.h" 28 | 29 | typedef __u16 __sum16; 30 | 31 | #define ETH_P_IP 0x0800 32 | 33 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 34 | * BPF_FUNC_perf_event_read_value flags. 35 | */ 36 | #define BPF_F_INDEX_MASK 0xffffffffULL 37 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 38 | -------------------------------------------------------------------------------- /bpf/headers/update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Version of libbpf to fetch headers from 4 | LIBBPF_VERSION=0.6.1 5 | 6 | # The headers we want 7 | prefix=libbpf-"$LIBBPF_VERSION" 8 | headers=( 9 | "$prefix"/LICENSE.BSD-2-Clause 10 | "$prefix"/src/bpf_endian.h 11 | "$prefix"/src/bpf_helper_defs.h 12 | "$prefix"/src/bpf_helpers.h 13 | "$prefix"/src/bpf_tracing.h 14 | ) 15 | 16 | # Fetch libbpf release and extract the desired headers 17 | curl -sL "https://github.com/libbpf/libbpf/archive/refs/tags/v${LIBBPF_VERSION}.tar.gz" | \ 18 | tar -xz --xform='s#.*/##' "${headers[@]}" 19 | -------------------------------------------------------------------------------- /bpf/trace-blocks.c: -------------------------------------------------------------------------------- 1 | #include "common.h" 2 | #define MINORBITS 20 3 | #define MINOR(dev) ((unsigned int)((dev) & ((1U << MINORBITS) - 1))) 4 | #define MAJOR(dev) ((unsigned int)((dev) >> MINORBITS)) 5 | 6 | char __license[] SEC("license") = "Dual MIT/GPL"; 7 | 8 | struct event { 9 | // u64 pid; 10 | // u64 bi_sector; 11 | u64 block_start; 12 | u64 block_end; 13 | // u32 major; 14 | // u32 minor; 15 | }; 16 | 17 | struct { 18 | __uint(type, BPF_MAP_TYPE_RINGBUF); 19 | __uint(max_entries, 1 << 24); 20 | } events SEC(".maps"); 21 | 22 | struct { 23 | __uint(type, BPF_MAP_TYPE_ARRAY); 24 | __type(key, u32); 25 | __type(value, u32); 26 | __uint(max_entries, 2); 27 | } target_disk_map SEC(".maps"); 28 | 29 | // Force emitting struct event into the ELF. 30 | const struct event *unused __attribute__((unused)); 31 | 32 | 33 | SEC("tracepoint/block/block_rq_complete") 34 | int block_rq_complete(struct trace_event_raw_block_rq_completion *ctx) { 35 | u64 id = bpf_get_current_pid_tgid(); 36 | u32 tgid = id >> 32; 37 | u32 major_key = 0; // index 0 for major number 38 | u32 minor_key = 1; // index 1 for minor number 39 | unsigned int *target_major = bpf_map_lookup_elem(&target_disk_map, &major_key); 40 | unsigned int *target_minor = bpf_map_lookup_elem(&target_disk_map, &minor_key); 41 | unsigned int major = MAJOR(ctx->dev); 42 | unsigned int minor = MINOR(ctx->dev); 43 | char local_rwbs[sizeof(ctx->rwbs)]; 44 | bpf_probe_read_str(&local_rwbs, sizeof(local_rwbs), ctx->rwbs); 45 | 46 | if (!target_major || !target_minor) { 47 | return 0; // If values are not present in the map 48 | } 49 | if (major == *target_major && minor == *target_minor) { 50 | // Manually check if 'W' is in the rwbs string 51 | int found_w = 0; 52 | for (int i = 0; i < sizeof(local_rwbs); i++) { 53 | if (local_rwbs[i] == 'W') { 54 | found_w = 1; 55 | break; 56 | } 57 | } 58 | if (!found_w) { 59 | return 0; // Exit if there is no write operation 60 | } 61 | struct event *task_info = bpf_ringbuf_reserve(&events, sizeof(struct event), 0); 62 | if (!task_info) { 63 | return 0; 64 | } 65 | 66 | task_info->block_start = ctx->sector; 67 | task_info->block_end = ctx->sector + ctx->nr_sector; 68 | // task_info->pid = tgid; 69 | // task_info->bi_sector = ctx->sector; 70 | // task_info->major = major; 71 | // task_info->minor = minor; 72 | 73 | bpf_ringbuf_submit(task_info, 0); 74 | } 75 | 76 | return 0; 77 | } -------------------------------------------------------------------------------- /cmd/dispatcher.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/xmigrate/blxrep/pkg/dispatcher" 5 | "github.com/xmigrate/blxrep/tui" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var tuiCmd = &cobra.Command{ 11 | Use: "tui", 12 | Short: "TUI commands", 13 | Long: `TUI commands for interacting with the dispatcher and agents.`, 14 | Run: func(cmd *cobra.Command, args []string) { 15 | dataDir, _ := cmd.Flags().GetString("data-dir") 16 | tui.RunDispatcherTUI(dataDir) 17 | }, 18 | } 19 | 20 | var showCmd = &cobra.Command{ 21 | Use: "show", 22 | Short: "Show dispatcher checkpoints", 23 | Run: func(cmd *cobra.Command, args []string) { 24 | start, _ := cmd.Flags().GetString("start") 25 | end, _ := cmd.Flags().GetString("end") 26 | agent, _ := cmd.Flags().GetString("agent") 27 | dataDir, _ := cmd.Flags().GetString("data-dir") 28 | disk, _ := cmd.Flags().GetString("disk") 29 | dispatcher.ShowCheckpoints(start, end, agent, dataDir, disk) 30 | }, 31 | } 32 | 33 | var restoreCmd = &cobra.Command{ 34 | Use: "restore", 35 | Short: "Restore dispatcher checkpoint", 36 | Run: func(cmd *cobra.Command, args []string) { 37 | checkpoint, _ := cmd.Flags().GetString("checkpoint") 38 | dataDir, _ := cmd.Flags().GetString("data-dir") 39 | agent, _ := cmd.Flags().GetString("agent") 40 | dispatcher.Restore(checkpoint, dataDir, agent) 41 | }, 42 | } 43 | 44 | func init() { 45 | rootCmd.AddCommand(tuiCmd) 46 | tuiCmd.Flags().String("data-dir", "", "Data directory") 47 | 48 | rootCmd.AddCommand(showCmd) 49 | showCmd.Flags().String("start", "", "Start timestamp format: YYYYMMDDHHMM") 50 | showCmd.Flags().String("end", "", "End timestamp format: YYYYMMDDHHMM") 51 | showCmd.Flags().String("agent", "", "Agent name") 52 | showCmd.Flags().String("disk", "", "Disk name") 53 | showCmd.Flags().String("data-dir", "", "Data directory") 54 | showCmd.MarkFlagRequired("agent") 55 | showCmd.MarkFlagRequired("data-dir") 56 | rootCmd.AddCommand(restoreCmd) 57 | restoreCmd.Flags().String("checkpoint", "", "Checkpoint timestamp") 58 | restoreCmd.Flags().String("agent", "", "Agent name") 59 | restoreCmd.Flags().String("data-dir", "", "Data directory") 60 | restoreCmd.MarkFlagRequired("checkpoint") 61 | restoreCmd.MarkFlagRequired("agent") 62 | restoreCmd.MarkFlagRequired("data-dir") 63 | } 64 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/newrelic/go-agent/v3/newrelic" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var nrApp *newrelic.Application 12 | var rootCmd = &cobra.Command{ 13 | Use: "blxrep", 14 | Short: "blxrep CLI application", 15 | Long: `This tool is used to do live data replication for disks over network.`, 16 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 17 | if nrApp != nil { 18 | txn := nrApp.StartTransaction(cmd.Name()) 19 | cmd.SetContext(newrelic.NewContext(cmd.Context(), txn)) 20 | } 21 | }, 22 | PersistentPostRun: func(cmd *cobra.Command, args []string) { 23 | if nrApp != nil { 24 | if txn := newrelic.FromContext(cmd.Context()); txn != nil { 25 | txn.End() 26 | } 27 | } 28 | }, 29 | } 30 | 31 | func GetRootCmd() *cobra.Command { 32 | return rootCmd 33 | } 34 | 35 | func Execute() { 36 | if err := rootCmd.Execute(); err != nil { 37 | fmt.Println(err) 38 | os.Exit(1) 39 | } 40 | } 41 | 42 | func init() { 43 | rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") 44 | } 45 | -------------------------------------------------------------------------------- /cmd/start.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/xmigrate/blxrep/pkg/agent" 8 | "github.com/xmigrate/blxrep/pkg/dispatcher" 9 | 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | var cfgFile string 15 | 16 | var startCmd = &cobra.Command{ 17 | Use: "start", 18 | Short: "Start blxrep", 19 | Run: func(cmd *cobra.Command, args []string) { 20 | mode := viper.GetString("mode") 21 | switch mode { 22 | case "dispatcher": 23 | dataDir := viper.GetString("data-dir") 24 | targets := viper.GetStringSlice("targets") 25 | policyDir := viper.GetString("policy-dir") 26 | fmt.Println("Dispatcher started...") 27 | if dataDir == "" { 28 | fmt.Println("Data directory is required") 29 | return 30 | } 31 | if policyDir == "" { 32 | fmt.Println("Policy directory is required") 33 | return 34 | } 35 | dispatcher.Start(dataDir, targets, policyDir) 36 | case "agent": 37 | agentID := viper.GetString("id") 38 | dispatcherAddr := viper.GetString("dispatcher-addr") 39 | if agentID == "" { 40 | fmt.Println("Agent ID is required") 41 | return 42 | } 43 | if dispatcherAddr == "" { 44 | fmt.Println("Dispatcher address is required") 45 | return 46 | } 47 | fmt.Printf("Starting agent with ID: %s, connecting to dispatcher at: %s\n", agentID, dispatcherAddr) 48 | agent.Start(agentID, dispatcherAddr) 49 | default: 50 | fmt.Println("Invalid mode. Use 'dispatcher' or 'agent' in config or --mode flag") 51 | } 52 | }, 53 | } 54 | 55 | func init() { 56 | cobra.OnInitialize(initConfig) 57 | 58 | rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is /etc/blxrep/config.yaml)") 59 | 60 | startCmd.Flags().String("mode", "", "Start mode: 'dispatcher' or 'agent'") 61 | startCmd.Flags().String("id", "", "Agent ID (required for agent)") 62 | startCmd.Flags().String("dispatcher-addr", "", "Dispatcher address (required for agent, format: host:port)") 63 | startCmd.Flags().String("data-dir", "", "Data directory (required for dispatcher)") 64 | startCmd.Flags().String("policy-dir", "", "Policy directory (required for dispatcher)") 65 | viper.BindPFlag("mode", startCmd.Flags().Lookup("mode")) 66 | viper.BindPFlag("id", startCmd.Flags().Lookup("id")) 67 | viper.BindPFlag("dispatcher-addr", startCmd.Flags().Lookup("dispatcher-addr")) 68 | viper.BindPFlag("data-dir", startCmd.Flags().Lookup("data-dir")) 69 | viper.BindPFlag("policy-dir", startCmd.Flags().Lookup("policy-dir")) 70 | rootCmd.AddCommand(startCmd) 71 | } 72 | 73 | func initConfig() { 74 | if cfgFile != "" { 75 | viper.SetConfigFile(cfgFile) 76 | } else { 77 | viper.AddConfigPath("/etc/blxrep") 78 | viper.SetConfigName("config") 79 | viper.SetConfigType("yaml") 80 | } 81 | 82 | viper.AutomaticEnv() 83 | 84 | if err := viper.ReadInConfig(); err == nil { 85 | fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) 86 | } else { 87 | fmt.Fprintln(os.Stderr, "Warning: Could not read config file:", err) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | mode: "agent" 2 | id: "ip-172-31-46-49" 3 | dispatcher-addr: "3.110.56.28:8080" 4 | 5 | name: "vishnu ks" 6 | email: "vishnu@xmigrate.cloud" 7 | organization: "xmigrate" -------------------------------------------------------------------------------- /disconfig.yaml: -------------------------------------------------------------------------------- 1 | mode: "dispatcher" 2 | data-dir: "/data" 3 | targets: 4 | - "AWSDemo" 5 | backend-url: "https://app.predator00758.com" 6 | backend-token: "gAAAAABnXAY8QkIodehFb2L5AeuQjkHtsC6V_RMvq92Vi6_szscXiIWtJ0GwBgy5sS8c6Tv3BsV2l89H2TNKGbc2BHyn5zReTota93VXEYLK7ikOtI_fSU2DaLSe5AyTZhXab0q__PBQ3TrpEwKt42RPfvAoiFSLWmVoOV-asIjEORuKBpdiQovFcDdNjTMw1c2hlXRVuDymrPqHDIXt_X1yJ0SjCnEs4AHn9xGkZG7JHKJtFR9TsTmLLh-kqkkG28o6GXRNVeTdWiCqNjOfgx3Y_Sai4ECwOAiWFA19HMBhe6lQIaWQ5gBEMNpd6gEHGJGUakmLPEIsf5F3T1A7UKbox8Iy1QqqAdMsI9-Vxxf6_Gqke2Usz1ui3A2oZ5J8cBa4BjwaszU8Oet_W_9Y3zLwaakZCshRzgJNWOp5FJWTwpOaW1cDoeQlfmFzQjp0NORHGZY9-6ceWiHep1UmdJkGPkejbEakD49916jkRXZaNaxxv9RL6743cKHrUQntz5D08lA3pnQ2u2Veu4dIZIwMStzhHoOcFQ==" 7 | # Add user configuration for registration 8 | name: "Vishnu ks" 9 | email: "ksvishnu56@gmail.com" 10 | organization: "appmend" 11 | license-file: "/data/xmigrate.license" 12 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | blxrep.xmigrate.cloud -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | ## Tech stack 3 | 4 | We have chosen to use the following tech stack: 5 | 6 | - Golang 1.23 7 | - BoltDB 8 | - eBPF 9 | 10 | We only have one code base for the Agent, Dispatcher and the TUI application. And it's a single binary for all at the moment. BoltDB is used to store the status of various actions like clone, pause, resume, restore, etc., to store the metadata of the servers where agent is running and to keep track of the dirty sectors. 11 | 12 | ## Overview 13 | 14 | The blxrep architecture establishes a robust, real-time connection between Agents and the Dispatcher through multiple WebSocket channels. This design enables both full disk backups and continuous incremental change tracking to operate simultaneously. When an Agent connects, it authenticates through a secure WebSocket connection and immediately begins two parallel processes: creating a complete disk image and monitoring disk sectors for changes. 15 | During the full backup process, the Agent streams disk data to the Dispatcher, which stores it as an .img file in the snapshot directory while tracking progress in the xmactions database. Simultaneously, the Agent monitors disk sectors for changes, sending these sector numbers to the Dispatcher through a dedicated WebSocket channel. The Dispatcher preserves these sector changes as .cst files in the incremental directory. 16 | 17 | At regular intervals defined by the live_sync_frequency, the Dispatcher reads the collected sector numbers and requests the corresponding data from the Agent. Upon receiving this data, the Dispatcher stores it in .bak files within the incremental directory, ensuring all changes are captured and preserved. 18 | 19 | In the current implementation, if network connectivity between the Agent and Dispatcher is interrupted, the Agent initiates a new full disk snapshot upon reconnection. While this approach ensures data consistency, it's not optimized for network efficiency or storage resources. We are actively exploring more efficient approaches that would capture only the incremental changes that occurred during the network downtime, alongside the existing live change sector tracking mechanism. 20 | 21 | This optimization would significantly reduce network bandwidth usage and backup time during reconnection scenarios. Instead of transferring the entire disk image again, the system would only need to synchronize the specific sectors that changed during the disconnection period. This enhancement would be particularly valuable in environments with unstable network connections or when dealing with large disk volumes. 22 | 23 | 24 | ```mermaid 25 | sequenceDiagram 26 | participant A as Agent 27 | participant D as Dispatcher 28 | participant XMA as xmactions DB 29 | participant XMD as xmdispatcher DB 30 | participant SNAP as /data-dir/snapshot 31 | participant INC as /data-dir/incremental 32 | 33 | A->>+D: WS: /ws/config, /ws/snapshot, /ws/live, /ws/restore 34 | A->>D: Auth (secret) 35 | D-->>-A: Auth Success 36 | A->>D: Footprint Data 37 | D->>XMD: Store Footprint 38 | 39 | par Full Backup 40 | A->>A: Start disk clone 41 | A->>+D: Metadata (/ws/snapshot) 42 | D->>XMA: Create action 43 | D-->>-A: ACK 44 | loop Backup Progress 45 | A->>D: Disk chunks 46 | D->>XMA: Update progress 47 | D->>SNAP: Write .img file 48 | end 49 | and Change Monitor 50 | A->>A: Monitor sectors 51 | loop On Changes 52 | A->>D: Changed sectors (/ws/live) 53 | D->>INC: Write sectors (.cst) 54 | end 55 | end 56 | 57 | loop Live Sync (live_sync_frequency) 58 | D->>INC: Read .cst file 59 | D->>A: Request sector data 60 | A->>A: Read sectors 61 | A->>D: Send sector data 62 | D->>INC: Write .bak file 63 | end 64 | ``` 65 | 66 | The architecture utilizes four distinct WebSocket endpoints: 67 | 68 | - /ws/config for configuration management 69 | - /ws/snapshot for full disk backup operations 70 | - /ws/live for real-time change tracking 71 | - /ws/restore for data restoration processes 72 | 73 | This separation of concerns allows for efficient handling of different types of operations while maintaining persistent connections between the Agent and Dispatcher. The combination of continuous change tracking and dedicated communication channels makes blxrep particularly effective for maintaining synchronized disk states across systems. The planned optimizations for handling network interruptions will further enhance the system's efficiency and reliability in real-world deployment scenarios. 74 | 75 | ## Deployment Architecture 76 | 77 | ```mermaid 78 | architecture-beta 79 | group dispatcher_system(cloud)[Dispatcher System] 80 | service dispatcher_core(server)[Dispatcher] in dispatcher_system 81 | service backup_storage(disk)[Backup Storage] in dispatcher_system 82 | 83 | group target_servers(server)[Target Servers] 84 | service agent1(server)[Agent 1] in target_servers 85 | 86 | 87 | service admin(internet)[Backup Administrator] 88 | 89 | dispatcher_core:B -- T:backup_storage 90 | 91 | agent1:R -- L:dispatcher_core 92 | admin:R -- L:dispatcher_core 93 | 94 | ``` 95 | 96 | Dispatcher is deployed in a different subnet or the datacenter than the target servers. The target servers can be connected to the dispatcher privately or publicly. The backup storage is a disk that is mounted to the dispatcher server where the backups are stored. 97 | -------------------------------------------------------------------------------- /docs/assets/blxrepIcon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/docs/assets/blxrepIcon.png -------------------------------------------------------------------------------- /docs/assets/blxrepIcon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /docs/assets/blxrepsocialxm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/docs/assets/blxrepsocialxm.jpg -------------------------------------------------------------------------------- /docs/assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | blxrep 17 | 18 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: An eBPF based disk replication tool 3 | description: Get started with blxrep, an eBPF based disk replication tool designed for live data replication of disks over a network. 4 | --- 5 |

6 | blxrep logo 7 |

8 | 9 | # blxrep 10 | 11 | blxrep is a powerful tool designed for live data replication of disks over a network. It operates in two modes: dispatcher and agent, allowing for efficient and flexible disaster recovery setup. 12 | blxrep tracks the changes that happen on disk at sector level using eBPF tracepoints. 13 | 14 | ## Modes of Operation 15 | blxrep can be run in three modes: dispatcher, agent, and TUI. Each mode has its own purpose and configuration. 16 | 17 | ### Dispatcher Mode 18 | 19 | In dispatcher mode, blxrep manages the overall replication process. It acts as a central collector for replicating disk data from multiple servers. It requires a data directory and policy directory to be specified. All types of disk backups are collected and stored in the specified data directory. Policy directory is used to specify the policy for the disk backups for each agent. 20 | 21 | ### Agent Mode 22 | 23 | In agent mode, blxrep runs on individual servers to send snapshot backups and live changes to the dispatcher. It requires an agent ID, dispatcher address, and device to be specified. We need the agent ID to be unique if we are connecting multiple servers to the same dispatcher. Device is the disk that needs to be backed up and monitored for live changes. 24 | 25 | ### TUI mode 26 | blxrep also provides a TUI mode to interact with dispatcher and agents. 27 | we use tcell for the TUI. It is a terminal UI library for Go that is easy to use and highly customizable. With TUI mode, you can navigate throught the agents that are connected to the dispatcher and see the status of the disk backups. You can also mount the disk backups to any available point in time and restore the files or partitions with the help of the TUI. 28 | 29 | To start the TUI, use the `tui` command: 30 | 31 | ```bash 32 | blxrep tui --data-dir= 33 | ``` 34 | -------------------------------------------------------------------------------- /docs/motivation.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Why did we build blxrep? 3 | description: Why did we build blxrep? and that too with eBPF? 4 | --- 5 | # Background and Motivation 6 | 7 | Traditionally, companies have relied on kernel modules for change block tracking and bitmap-based incremental backups. While functional, this approach has several limitations: 8 | 9 | 1. Complex kernel module development and maintenance requiring deep kernel expertise 10 | 2. Challenging debugging process due to kernel space operations 11 | 3. Limited testing capabilities in kernel space 12 | 4. Scalability constraints due to kernel-level implementation 13 | 5. Steep learning curve for kernel module development 14 | 6. System reboots required for kernel module loading and updates 15 | 7. Potential for system instability and security vulnerabilities due to unrestricted kernel access 16 | 17 | blxrep modernizes this approach by leveraging eBPF tracepoints to track disk changes at the sector level. This brings several advantages: 18 | 19 | 1. Simplified development through eBPF's modern tooling, extensive documentation, and active community support 20 | 2. Enhanced debugging capabilities with user-space tools and eBPF maps 21 | 3. Comprehensive testing framework support 22 | 4. Better scalability through efficient event processing 23 | 5. More approachable learning curve with high-level eBPF programming interfaces 24 | 6. Dynamic loading without system reboots 25 | 7. Improved safety through eBPF's verifier and sandboxed execution environment -------------------------------------------------------------------------------- /docs/setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Let's setup blxrep! 3 | description: Setting up blxrep is easy! Get started from here.. 4 | --- 5 | # Setup guide 6 | ## Agent Setup 7 | 8 | ### Agent Prerequisites 9 | - Requires a Linux kernel 5.10 or higher with eBPF support 10 | - Only supports in Linux 11 | 12 | ### Agent Installation 13 | === "Debian/Ubuntu (.deb)" 14 | 1. Download the package: 15 | ```bash {.copy} 16 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-amd64.deb 17 | ``` 18 | 19 | 2. Install the package: 20 | ```bash {.copy} 21 | sudo dpkg -i blxrep-0.1.0-amd64.deb 22 | ``` 23 | > Note: If you get an error about missing dependencies, you can install them with: 24 | ```bash {.copy} 25 | sudo apt-get install -f 26 | ``` 27 | 28 | === "RedHat/CentOS (.rpm)" 29 | 1. Download the package: 30 | ```bash {.copy} 31 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-x86_64.rpm 32 | ``` 33 | 2. Install the package: 34 | ```bash {.copy} 35 | sudo rpm -i blxrep-0.1.0-x86_64.rpm 36 | ``` 37 | 38 | ### Verify Installation 39 | ```bash {.copy} 40 | sudo systemctl status blxrep 41 | ``` 42 | ### Agent Configuration 43 | 44 | Agent configuration file is located at `/etc/blxrep/config.yaml` by default. 45 | 46 | Below is an example configuration file: 47 | 48 | ```yaml {.copy} 49 | mode: "agent" 50 | id: "hostname" 51 | dispatcher-addr: "ip:port" 52 | ``` 53 | 54 | #### Configuration Parameters 55 | 56 | | Parameter | Value | Description | 57 | |-----------|--------|-------------| 58 | | `mode` | `"agent"` | Specifies the operation mode | 59 | | `id` | `"hostname"` | A unique identifier for the agent, usually the hostname | 60 | | `dispatcher-addr` | `"ip:port"` | IP address and port of the dispatcher (default port: 8080) | 61 | 62 | ### Agent Post Installation and configuration 63 | 64 | ```bash {.copy} 65 | sudo systemctl restart blxrep 66 | sudo systemctl enable blxrep 67 | ``` 68 | 69 | ## Dispatcher Setup 70 | 71 | ### Dispatcher Prerequisites 72 | - Linux OS 73 | - Additional disk mounted to a dedicated directory to store the full backups and incremental backups 74 | 75 | ### Dispatcher Installation 76 | 77 | === "Debian/Ubuntu (.deb)" 78 | 1. Download the package: 79 | ```bash {.copy} 80 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-amd64.deb 81 | ``` 82 | 83 | 2. Install the package: 84 | ```bash {.copy} 85 | sudo dpkg -i blxrep-0.1.0-amd64.deb 86 | ``` 87 | > Note: If you get an error about missing dependencies, you can install them with: 88 | ```bash {.copy} 89 | sudo apt-get install -f 90 | ``` 91 | 92 | === "RedHat/CentOS (.rpm)" 93 | 1. Download the package: 94 | ```bash {.copy} 95 | wget https://github.com/xmigrate/blxrep/releases/download/v0.1.0/blxrep-0.1.0-x86_64.rpm 96 | ``` 97 | 2. Install the package: 98 | ```bash {.copy} 99 | sudo rpm -i blxrep-0.1.0-x86_64.rpm 100 | ``` 101 | 102 | ### Verify Installation 103 | ```bash {.copy} 104 | sudo systemctl status blxrep 105 | ``` 106 | 107 | ### Dispatcher Configuration 108 | 109 | Dispatcher configuration file is located at `/etc/blxrep/config.yaml` by default. 110 | 111 | Below is an example configuration file: 112 | 113 | ```yaml {.copy} 114 | mode: "dispatcher" 115 | data-dir: "/data" 116 | policy-dir: "/etc/blxrep/policies" 117 | ``` 118 | 119 | #### Configuration Parameters 120 | 121 | | Parameter | Value | Description | 122 | |-----------|--------|-------------| 123 | | `mode` | `"dispatcher"` | Specifies the operation mode | 124 | | `data-dir` | `"/data"` | Directory to store the full backups and incremental backups | 125 | | `policy-dir` | `"/etc/blxrep/policies"` | Directory to store the backup policies | 126 | 127 | ### Backup policy 128 | 129 | Backup policy is a YAML file that defines the backup schedule, retention policy, and other backup settings. It is located at `/etc/blxrep/policies` by default. You can create a new policy file by creating a new YAML file in this directory as you add new servers for backup. 130 | 131 | Below is an example backup policy file: 132 | 133 | ```yaml {.copy} 134 | name: "default-backup-policy" 135 | description: "Backup policy for all servers" 136 | archive_interval: 48h 137 | snapshot_frequency: "daily" 138 | snapshot_time: "12:00:00" 139 | bandwidth_limit: 100 140 | snapshot_retention: 30 141 | live_sync_frequency: 2m 142 | transition_after_days: 30 143 | delete_after_days: 90 144 | 145 | targets: 146 | # Range pattern 147 | - pattern: "*" 148 | disks_excluded: 149 | - "/dev/xvdb" 150 | ``` 151 | 152 | #### Policy Parameters 153 | 154 | | Parameter | Description | 155 | |-----------|-------------| 156 | | `name` | Name of the policy | 157 | | `description` | Description of the policy | 158 | | `archive_interval` | Interval to archive backups, eg 48h, 1d, 1w, 1m | 159 | | `snapshot_frequency` | Frequency of the snapshots (daily, weekly, monthly) | 160 | | `snapshot_time` | Time of the day to take the snapshots | 161 | | `bandwidth_limit` | Bandwidth limit for the backup in MB/s | 162 | | `snapshot_retention` | Number of days to keep the snapshots | 163 | | `live_sync_frequency` | Frequency of the live sync | 164 | | `transition_after_days` | Number of days to keep the full and incremental backups | 165 | | `delete_after_days` | Number of days to keep the full backups and incremental backups | 166 | | `targets` | List of targets to backup | 167 | | `targets[].pattern` | Pattern of the target, eg "*" or "hostname" | 168 | | `targets[].disks_excluded` | List of disks to exclude from the backup | 169 | 170 | ### Dispatcher Post Installation and configuration 171 | 172 | ```bash {.copy} 173 | sudo systemctl restart blxrep 174 | sudo systemctl enable blxrep 175 | ``` 176 | -------------------------------------------------------------------------------- /docs/troubleshoot.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: What could go wrong? 3 | description: Let's see what could go wrong with blxrep... 4 | --- 5 | # Troubleshooting 6 | 7 | ## Agent not connecting to the dispatcher 8 | 9 | If the agent is not connecting to the dispatcher, you can check the logs of the agent and dispatcher to see if there are any errors. 10 | 11 | To check the logs of the agent, you can use the following command: 12 | 13 | ```bash 14 | journalctl -xeu blxrep -f 15 | ``` 16 | 17 | To check the logs of the dispatcher, you can use the following command: 18 | 19 | ```bash 20 | tail -f /logs/blxrep.log 21 | ``` 22 | 23 | If you see any errors, you can try to fix them by checking the documentation or asking for help in the community. 24 | 25 | -------------------------------------------------------------------------------- /docs/tui.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: An interactive terminal UI 3 | description: blxrep provides an interactive terminal UI to navigate through the backups of an agent. 4 | --- 5 | # TUI Operations 6 | 7 | ## Navigating through the backups of an agent 8 | 9 | TUI is provided by the `blxrep tui` command. It is a terminal UI that allows you to navigate through the backups of an agent. You can do the following operations: 10 | 11 | - Navigate through the backups of an agent 12 | - Mount a backup to a point in time 13 | - Restore a file or partition from a backup 14 | - Check the scheduled backups progress and status 15 | - Check the status of the agent 16 | 17 | ### Quick start video 18 | 19 | 20 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/xmigrate/blxrep 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/cilium/ebpf v0.17.1 7 | github.com/fatih/color v1.18.0 8 | github.com/gdamore/tcell/v2 v2.8.1 9 | github.com/google/uuid v1.6.0 10 | github.com/gorilla/websocket v1.5.3 11 | github.com/klauspost/compress v1.17.11 12 | github.com/newrelic/go-agent/v3 v3.36.0 13 | github.com/rivo/tview v0.0.0-20241227133733-17b7edb88c57 14 | github.com/shirou/gopsutil v3.21.11+incompatible 15 | github.com/spf13/cobra v1.8.1 16 | github.com/spf13/viper v1.19.0 17 | go.etcd.io/bbolt v1.3.11 18 | golang.org/x/sys v0.29.0 19 | gopkg.in/yaml.v3 v3.0.1 20 | ) 21 | 22 | require ( 23 | github.com/fsnotify/fsnotify v1.7.0 // indirect 24 | github.com/gdamore/encoding v1.0.1 // indirect 25 | github.com/go-ole/go-ole v1.2.6 // indirect 26 | github.com/hashicorp/hcl v1.0.0 // indirect 27 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 28 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 29 | github.com/magiconair/properties v1.8.7 // indirect 30 | github.com/mattn/go-colorable v0.1.13 // indirect 31 | github.com/mattn/go-isatty v0.0.20 // indirect 32 | github.com/mattn/go-runewidth v0.0.16 // indirect 33 | github.com/mitchellh/mapstructure v1.5.0 // indirect 34 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect 35 | github.com/rivo/uniseg v0.4.7 // indirect 36 | github.com/sagikazarmark/locafero v0.4.0 // indirect 37 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect 38 | github.com/sourcegraph/conc v0.3.0 // indirect 39 | github.com/spf13/afero v1.11.0 // indirect 40 | github.com/spf13/cast v1.6.0 // indirect 41 | github.com/spf13/pflag v1.0.5 // indirect 42 | github.com/subosito/gotenv v1.6.0 // indirect 43 | github.com/tklauser/go-sysconf v0.3.14 // indirect 44 | github.com/tklauser/numcpus v0.8.0 // indirect 45 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 46 | go.uber.org/atomic v1.9.0 // indirect 47 | go.uber.org/multierr v1.9.0 // indirect 48 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect 49 | golang.org/x/net v0.25.0 // indirect 50 | golang.org/x/term v0.28.0 // indirect 51 | golang.org/x/text v0.21.0 // indirect 52 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect 53 | google.golang.org/grpc v1.65.0 // indirect 54 | google.golang.org/protobuf v1.34.2 // indirect 55 | gopkg.in/ini.v1 v1.67.0 // indirect 56 | ) 57 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 Vishnu KS vishnu@xmigrate.cloud 3 | */ 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | "github.com/xmigrate/blxrep/cmd" 11 | "github.com/xmigrate/blxrep/tui" 12 | "github.com/xmigrate/blxrep/utils" 13 | 14 | _ "embed" 15 | 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | // $BPF_CLANG and $BPF_CFLAGS are set by the Makefile. 20 | //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target native bpf bpf/trace-blocks.c -- -I./bpf/headers 21 | 22 | var publicKeyData []byte 23 | 24 | func main() { 25 | utils.PrintAnimatedLogo() 26 | 27 | utils.PublicKeyData = publicKeyData 28 | 29 | rootCmd := cmd.GetRootCmd() 30 | 31 | // Modify the dispatcher command to use the TUI 32 | for _, subCmd := range rootCmd.Commands() { 33 | if subCmd.Use == "tui" { 34 | originalRun := subCmd.Run 35 | subCmd.Run = func(cmd *cobra.Command, args []string) { 36 | dataDir, _ := cmd.Flags().GetString("data-dir") 37 | agent, _ := cmd.Flags().GetString("agent") 38 | if dataDir != "" && agent != "" { 39 | tui.RunDispatcherTUI(dataDir) 40 | } else { 41 | // Fall back to original behavior if flags are not set 42 | originalRun(cmd, args) 43 | } 44 | } 45 | break 46 | } 47 | } 48 | 49 | if err := rootCmd.Execute(); err != nil { 50 | fmt.Println(err) 51 | os.Exit(1) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: blxrep 2 | site_url: https://blxrep.xmigrate.cloud 3 | repo_url: https://github.com/xmigrate/blxrep 4 | repo_name: xmigrate/blxrep 5 | 6 | plugins: 7 | - search 8 | - social: 9 | cards_layout_options: 10 | background_color: "#dde0e6" 11 | color: "#4c1d95" 12 | background_image: assets/blxrepsocialxm.jpg 13 | 14 | theme: 15 | name: material 16 | favicon: assets/blxrepIcon.svg 17 | palette: 18 | - scheme: default 19 | primary: white 20 | accent: deep purple 21 | toggle: 22 | icon: material/brightness-7 23 | name: Switch to dark mode 24 | 25 | # Palette toggle for dark mode 26 | - scheme: slate 27 | primary: black 28 | accent: deep purple 29 | toggle: 30 | icon: material/brightness-4 31 | name: Switch to light mode 32 | # primary: white 33 | # accent: deep purple 34 | logo: assets/blxrepIcon.svg 35 | features: 36 | - navigation.sections 37 | - content.code.copy 38 | - navigation.footer 39 | nav: 40 | - Home: index.md 41 | - Motivation: motivation.md 42 | - Architecture: architecture.md 43 | - Setup: setup.md 44 | - TUI: tui.md 45 | - Troubleshoot: troubleshoot.md 46 | 47 | markdown_extensions: 48 | - pymdownx.tabbed: 49 | alternate_style: true 50 | - pymdownx.superfences: 51 | custom_fences: 52 | - name: mermaid 53 | class: mermaid 54 | format: !!python/name:pymdownx.superfences.fence_code_format 55 | - pymdownx.superfences 56 | - attr_list 57 | 58 | extra: 59 | analytics: 60 | provider: google 61 | property: G-XRMVNLVYE1 62 | 63 | copyright: Copyright © 2025 Xmigrate Inc. 64 | 65 | -------------------------------------------------------------------------------- /package/etc/blxrep/config.yaml: -------------------------------------------------------------------------------- 1 | # Sample agent configuration 2 | # mode: "agent" 3 | # id: "hostname" 4 | # dispatcher-addr: "localhost:8080" 5 | 6 | # Sample dispatcher configuration 7 | # mode: "dispatcher" 8 | # data-dir: "/data" 9 | # policy-dir: "/etc/blxrep/policies" -------------------------------------------------------------------------------- /package/etc/blxrep/policies/default.yaml: -------------------------------------------------------------------------------- 1 | name: "default-backup-policy" 2 | description: "Backup policy for all servers" 3 | archive_interval: 48h 4 | snapshot_frequency: "daily" 5 | snapshot_time: "12:00" 6 | bandwidth_limit: 100 7 | snapshot_retention: 30 8 | live_sync_frequency: 2m 9 | transition_after_days: 30 10 | delete_after_days: 90 11 | 12 | targets: 13 | # Range pattern 14 | - pattern: "*" 15 | disks_excluded: 16 | - "/dev/xvdz" # If you don't want to exclude any disks from backup put a disk name that doesn't exist 17 | -------------------------------------------------------------------------------- /package/etc/systemd/system/blxrep.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=blxrep service 3 | After=network.target 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/blxrep start --config /etc/blxrep/config.yaml 7 | Restart=always 8 | User=root 9 | Group=root 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /package/usr/local/bin/blxrep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/package/usr/local/bin/blxrep -------------------------------------------------------------------------------- /pkg/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "log" 5 | ) 6 | 7 | func Start(agentID string, dispatcherAddr string) { 8 | log.Printf("Agent %s is running...", agentID) 9 | // Connect to snapshot endpoint 10 | go ConnectToDispatcher(agentID, dispatcherAddr) 11 | // Keep the main goroutine alive 12 | select {} 13 | } 14 | -------------------------------------------------------------------------------- /pkg/agent/clone.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "io" 8 | "log" 9 | "os" 10 | "sync" 11 | "time" 12 | 13 | "github.com/xmigrate/blxrep/utils" 14 | 15 | "github.com/gorilla/websocket" 16 | ) 17 | 18 | func Clone(ctx context.Context, blockSize int, srcPath string, channelSize int, websock *websocket.Conn, cloneMutex *sync.Mutex, isCloning *bool) { 19 | 20 | // Open the source disk. 21 | src, err := os.Open(srcPath) 22 | if err != nil { 23 | log.Printf("Failed to open source disk: %v", err) 24 | } 25 | defer src.Close() 26 | 27 | // Use a buffered reader to minimize system calls. 28 | bufReader := bufio.NewReaderSize(src, blockSize*8000) 29 | 30 | // Allocate a buffer for one block. 31 | buf := make([]byte, blockSize) 32 | 33 | var blocks []utils.AgentDataBlock 34 | var blockCount uint64 35 | var batchSize int 36 | log.Printf("Cloning started for %s", srcPath) 37 | startTime := time.Now().Unix() 38 | for { 39 | select { 40 | case <-ctx.Done(): 41 | // Handle context cancellation and exit the goroutine 42 | log.Println("Cloning was paused/cancelled and goroutine is exiting.") 43 | if len(blocks) > 0 { 44 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 45 | } 46 | cloneMutex.Lock() 47 | *isCloning = false 48 | cloneMutex.Unlock() 49 | return 50 | default: 51 | // Read data in larger chunks to reduce syscall overhead 52 | n, err := bufReader.Read(buf) 53 | if n > 0 { 54 | for i := 0; i < n; i += blockSize { 55 | end := i + blockSize 56 | if end > n { 57 | end = n 58 | } 59 | blockData := utils.AgentDataBlock{ 60 | BlockNumber: blockCount, 61 | BlockData: append([]byte(nil), buf[i:end]...), 62 | } 63 | blocks = append(blocks, blockData) 64 | blockCount++ 65 | batchSize += end - i 66 | 67 | if batchSize >= channelSize { 68 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 69 | blocks = nil 70 | batchSize = 0 71 | } 72 | } 73 | } 74 | if err != nil { 75 | if err == io.EOF { 76 | if len(blocks) > 0 { 77 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 78 | } 79 | cloneMutex.Lock() 80 | *isCloning = false 81 | cloneMutex.Unlock() 82 | return 83 | } 84 | log.Fatalf("Failed to read block: %v", err) 85 | } 86 | } 87 | } 88 | } 89 | 90 | func Resume(ctx context.Context, blockSize int, srcPath string, channelSize int, readFrom int64, websock *websocket.Conn, cloneMutex *sync.Mutex, isCloning *bool) { 91 | log.Printf("Resume started block: %d", readFrom) 92 | // Open the source disk. 93 | src, err := os.Open(srcPath) 94 | if err != nil { 95 | log.Printf("Failed to open source disk: %v", err) 96 | } 97 | defer src.Close() 98 | var blocks []utils.AgentDataBlock 99 | 100 | // Loop over the blocks in the source disk. 101 | var blockCount int64 = 0 // Initialize counter to 0 102 | var batchSize int = 0 103 | // Seek to correct block number 104 | for { 105 | _, err := src.Seek(int64(blockSize), io.SeekCurrent) 106 | if err != nil && err != io.EOF { 107 | fmt.Println("Error reading from snapshot:", err) 108 | return 109 | } 110 | 111 | if blockCount == readFrom { 112 | log.Printf("Seeked to %d", blockCount) 113 | break 114 | } 115 | blockCount++ 116 | } 117 | // Loop over the blocks in the source disk. 118 | startTime := time.Now().Unix() 119 | for { 120 | select { 121 | case <-ctx.Done(): 122 | // Handle context cancellation and exit the goroutine 123 | log.Println("Cloning was paused/cancelled and goroutine is exiting.") 124 | if len(blocks) > 0 { 125 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 126 | utils.LogDebug(fmt.Sprintf("Flush remaining data of size %d", batchSize)) 127 | } 128 | return 129 | default: 130 | var bytesRead int 131 | buf := make([]byte, blockSize) 132 | for bytesRead < blockSize { 133 | n, err := src.Read(buf[bytesRead:]) 134 | if n > 0 { 135 | bytesRead += n 136 | } 137 | if err != nil { 138 | if err == io.EOF { 139 | break 140 | } 141 | log.Fatalf("Failed to read block: %v", err) 142 | } 143 | } 144 | if bytesRead > 0 { 145 | blockData := utils.AgentDataBlock{ 146 | BlockNumber: uint64(blockCount), 147 | BlockData: append([]byte(nil), buf[:bytesRead]...), 148 | } 149 | blocks = append(blocks, blockData) 150 | if batchSize >= channelSize { 151 | //Code to send data to websocket 152 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 153 | batchSize = 0 154 | blocks = nil 155 | } 156 | } else { 157 | log.Printf("No more data to read from the source %d", blockCount) 158 | if len(blocks) > 0 { 159 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime) 160 | log.Printf("Flush remaining data of size %d", batchSize) 161 | } 162 | cloneMutex.Lock() 163 | *isCloning = false 164 | cloneMutex.Unlock() 165 | return 166 | } 167 | blockCount++ 168 | batchSize++ 169 | } 170 | } 171 | 172 | } 173 | -------------------------------------------------------------------------------- /pkg/agent/footprint_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package agent 5 | 6 | import ( 7 | "fmt" 8 | "io/ioutil" 9 | "net" 10 | "os" 11 | "os/exec" 12 | "regexp" 13 | "strings" 14 | 15 | "github.com/xmigrate/blxrep/utils" 16 | 17 | "github.com/shirou/gopsutil/cpu" 18 | "github.com/shirou/gopsutil/disk" 19 | "github.com/shirou/gopsutil/mem" 20 | ) 21 | 22 | func Footprint() (*utils.VMInfo, error) { 23 | 24 | hostname, err := os.Hostname() 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | cpuInfo, err := cpu.Info() 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | cpuCores := len(cpuInfo) 35 | 36 | memInfo, err := mem.VirtualMemory() 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | interfaces, err := net.Interfaces() 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | var InterfaceInfo []struct { 47 | Name string `json:"name"` 48 | IPAddress string `json:"ip_address"` 49 | SubnetMask string `json:"subnet_mask"` 50 | CIDRNotation string `json:"cidr_notation"` 51 | NetworkCIDR string `json:"network_cidr"` 52 | } 53 | 54 | // var ipAddress, network, subnet string 55 | for _, iface := range interfaces { 56 | addrs, err := iface.Addrs() 57 | if err != nil { 58 | fmt.Println(err) 59 | continue 60 | } 61 | 62 | for _, addr := range addrs { 63 | ipNet, ok := addr.(*net.IPNet) 64 | if !ok { 65 | continue 66 | } 67 | 68 | ip4 := ipNet.IP.To4() 69 | if ip4 == nil { 70 | continue 71 | } 72 | 73 | mask := ipNet.Mask 74 | networkIP := net.IP(make([]byte, 4)) 75 | for i := range ip4 { 76 | networkIP[i] = ip4[i] & mask[i] 77 | } 78 | 79 | cidr, _ := ipNet.Mask.Size() 80 | 81 | InterfaceInfo = append(InterfaceInfo, struct { 82 | Name string `json:"name"` 83 | IPAddress string `json:"ip_address"` 84 | SubnetMask string `json:"subnet_mask"` 85 | CIDRNotation string `json:"cidr_notation"` 86 | NetworkCIDR string `json:"network_cidr"` 87 | }{ 88 | Name: iface.Name, 89 | IPAddress: ip4.String(), 90 | SubnetMask: net.IP(mask).String(), 91 | CIDRNotation: fmt.Sprintf("%s/%d", ip4, cidr), 92 | NetworkCIDR: fmt.Sprintf("%s/%d", networkIP, cidr), 93 | }) 94 | } 95 | } 96 | 97 | partitions, err := disk.Partitions(true) 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | var diskDetails []utils.DiskDetailsStruct 103 | for _, partition := range partitions { 104 | mountpoint := partition.Mountpoint 105 | if !strings.HasPrefix(mountpoint, "/var/") && !strings.HasPrefix(mountpoint, "/run/") { 106 | usage, err := disk.Usage(mountpoint) 107 | if err == nil { 108 | fsType := partition.Fstype 109 | if fsType == "xfs" || strings.HasPrefix(fsType, "ext") { 110 | partitionName := partition.Device 111 | diskName := strings.TrimRightFunc(partitionName, func(r rune) bool { 112 | return '0' <= r && r <= '9' 113 | }) 114 | 115 | if strings.Contains(diskName, "nvme") { 116 | diskName = strings.TrimRightFunc(diskName, func(r rune) bool { 117 | return 'p' <= r 118 | }) 119 | } 120 | 121 | cmd := exec.Command("sudo", "blkid", partitionName) 122 | output, err := cmd.CombinedOutput() 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | re := regexp.MustCompile(`\b(?i)UUID="([a-f0-9-]+)"`) 128 | match := re.FindStringSubmatch(string(output)) 129 | diskDetails = append(diskDetails, 130 | utils.DiskDetailsStruct{ 131 | FsType: fsType, 132 | Size: usage.Total, 133 | Uuid: match[1], 134 | Name: diskName, 135 | MountPoint: mountpoint, 136 | }) 137 | } 138 | } 139 | } 140 | } 141 | 142 | content, err := ioutil.ReadFile("/etc/os-release") 143 | if err != nil { 144 | return nil, err 145 | } 146 | 147 | osRelease := string(content) 148 | distro := getValue(osRelease, "ID") 149 | majorVersion := getValue(osRelease, "VERSION_ID") 150 | 151 | vmInfo := utils.VMInfo{ 152 | Hostname: hostname, 153 | CpuModel: cpuInfo[0].ModelName, 154 | CpuCores: cpuCores, 155 | Ram: memInfo.Total, 156 | InterfaceInfo: InterfaceInfo, 157 | DiskDetails: diskDetails, 158 | OsDistro: distro, 159 | OsVersion: majorVersion, 160 | } 161 | 162 | return &vmInfo, nil 163 | } 164 | 165 | func getValue(osRelease, key string) string { 166 | lines := strings.Split(osRelease, "\n") 167 | for _, line := range lines { 168 | if strings.HasPrefix(line, key+"=") { 169 | return strings.Trim(strings.TrimPrefix(line, key+"="), `"`) 170 | } 171 | } 172 | return "" 173 | } 174 | -------------------------------------------------------------------------------- /pkg/agent/footprint_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package agent 5 | 6 | import ( 7 | "fmt" 8 | "log" 9 | "net" 10 | "os" 11 | "os/exec" 12 | "regexp" 13 | "strconv" 14 | "strings" 15 | 16 | "blxrep/utils" 17 | 18 | "github.com/shirou/gopsutil/cpu" 19 | "github.com/shirou/gopsutil/host" 20 | "github.com/shirou/gopsutil/mem" 21 | ) 22 | 23 | func Footprint() (*utils.VMInfo, error) { 24 | 25 | hostname, err := os.Hostname() 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | cpuInfo, err := cpu.Info() 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | cpuCores := len(cpuInfo) 36 | 37 | memInfo, err := mem.VirtualMemory() 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | interfaces, err := net.Interfaces() 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | var InterfaceInfo []struct { 48 | Name string `json:"name"` 49 | IPAddress string `json:"ip_address"` 50 | SubnetMask string `json:"subnet_mask"` 51 | CIDRNotation string `json:"cidr_notation"` 52 | NetworkCIDR string `json:"network_cidr"` 53 | } 54 | 55 | for _, iface := range interfaces { 56 | addrs, err := iface.Addrs() 57 | if err != nil { 58 | fmt.Println(err) 59 | continue 60 | } 61 | 62 | for _, addr := range addrs { 63 | ipNet, ok := addr.(*net.IPNet) 64 | if !ok { 65 | continue 66 | } 67 | 68 | ip4 := ipNet.IP.To4() 69 | if ip4 == nil { 70 | continue 71 | } 72 | 73 | mask := ipNet.Mask 74 | networkIP := net.IP(make([]byte, 4)) 75 | for i := range ip4 { 76 | networkIP[i] = ip4[i] & mask[i] 77 | } 78 | 79 | cidr, _ := ipNet.Mask.Size() 80 | 81 | InterfaceInfo = append(InterfaceInfo, struct { 82 | Name string `json:"name"` 83 | IPAddress string `json:"ip_address"` 84 | SubnetMask string `json:"subnet_mask"` 85 | CIDRNotation string `json:"cidr_notation"` 86 | NetworkCIDR string `json:"network_cidr"` 87 | }{ 88 | Name: iface.Name, 89 | IPAddress: ip4.String(), 90 | SubnetMask: net.IP(mask).String(), 91 | CIDRNotation: fmt.Sprintf("%s/%d", ip4, cidr), 92 | NetworkCIDR: fmt.Sprintf("%s/%d", networkIP, cidr), 93 | }) 94 | } 95 | } 96 | 97 | diskDetails, err := getDiskDetails() 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | osVersion, err := host.Info() 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | vmInfo := utils.VMInfo{ 108 | Hostname: hostname, 109 | CpuModel: cpuInfo[0].ModelName, 110 | CpuCores: cpuCores, 111 | Ram: memInfo.Total, 112 | InterfaceInfo: InterfaceInfo, 113 | DiskDetails: diskDetails, 114 | OsDistro: osVersion.Platform, 115 | OsVersion: osVersion.PlatformVersion, 116 | } 117 | utils.LogDebug(fmt.Sprintf("Footprint %s", vmInfo)) 118 | return &vmInfo, nil 119 | } 120 | 121 | func getDiskDetails() ([]utils.DiskDetailsStruct, error) { 122 | physicalDrives, err := getPhysicalDrives() 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | cDriveDiskIndex, err := getCDriveDiskIndex() 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | var diskDetails []utils.DiskDetailsStruct 133 | for _, drive := range physicalDrives { 134 | size := drive.BytesPerSector * drive.TotalSectors 135 | mountPoint := "/data" 136 | if drive.Index == cDriveDiskIndex { 137 | mountPoint = "/" 138 | } 139 | 140 | diskDetails = append(diskDetails, utils.DiskDetailsStruct{ 141 | FsType: "NTFS", 142 | Size: size, 143 | Uuid: "", // UUID not retrieved in this approach 144 | Name: drive.DeviceID, 145 | MountPoint: mountPoint, 146 | }) 147 | } 148 | 149 | return diskDetails, nil 150 | } 151 | 152 | type PhysicalDrive struct { 153 | DeviceID string 154 | BytesPerSector uint64 155 | Partitions int 156 | TotalSectors uint64 157 | Index int 158 | } 159 | 160 | func getPhysicalDrives() ([]PhysicalDrive, error) { 161 | cmd := exec.Command("wmic", "diskdrive", "get", "DeviceID,BytesPerSector,Partitions,TotalSectors,Index") 162 | output, err := cmd.CombinedOutput() 163 | if err != nil { 164 | return nil, err 165 | } 166 | 167 | var drives []PhysicalDrive 168 | lines := strings.Split(string(output), "\n") 169 | for i, line := range lines { 170 | log.Printf("line physical drive %s", line) 171 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines 172 | continue 173 | } 174 | 175 | fields := strings.Fields(line) 176 | if len(fields) < 5 { 177 | continue 178 | } 179 | 180 | deviceID := fields[1] 181 | bytesPerSector, err := strconv.ParseUint(fields[0], 10, 64) 182 | if err != nil { 183 | log.Printf("Error parsing BytesPerSector for %s: %v", deviceID, err) 184 | continue 185 | } 186 | partitions, err := strconv.Atoi(fields[3]) 187 | if err != nil { 188 | log.Printf("Error parsing Partitions for %s: %v", deviceID, err) 189 | continue 190 | } 191 | totalSectors, err := strconv.ParseUint(fields[4], 10, 64) 192 | if err != nil { 193 | log.Printf("Error parsing TotalSectors for %s: %v", deviceID, err) 194 | continue 195 | } 196 | index, err := strconv.Atoi(fields[2]) 197 | if err != nil { 198 | log.Printf("Error parsing Index for %s: %v", deviceID, err) 199 | continue 200 | } 201 | 202 | drives = append(drives, PhysicalDrive{ 203 | DeviceID: deviceID, 204 | BytesPerSector: bytesPerSector, 205 | Partitions: partitions, 206 | TotalSectors: totalSectors, 207 | Index: index, 208 | }) 209 | } 210 | 211 | return drives, nil 212 | } 213 | 214 | type Partition struct { 215 | DeviceID string 216 | DiskIndex int 217 | } 218 | 219 | func getPartitions() ([]Partition, error) { 220 | cmd := exec.Command("wmic", "partition", "get", "DeviceID,DiskIndex") 221 | output, err := cmd.CombinedOutput() 222 | if err != nil { 223 | return nil, err 224 | } 225 | 226 | var partitions []Partition 227 | lines := strings.Split(string(output), "\n") 228 | for i, line := range lines { 229 | log.Printf("line partitions %s", line) 230 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines 231 | continue 232 | } 233 | 234 | fields := strings.Fields(line) 235 | if len(fields) < 2 { 236 | continue 237 | } 238 | 239 | deviceID := fields[0] 240 | diskIndex, err := strconv.Atoi(fields[1]) 241 | if err != nil { 242 | log.Printf("Error parsing DiskIndex for %s: %v", deviceID, err) 243 | continue 244 | } 245 | 246 | partitions = append(partitions, Partition{ 247 | DeviceID: deviceID, 248 | DiskIndex: diskIndex, 249 | }) 250 | } 251 | 252 | return partitions, nil 253 | } 254 | 255 | func getCDriveDiskIndex() (int, error) { 256 | cmd := exec.Command("wmic", "path", "Win32_LogicalDiskToPartition", "get", "Antecedent,Dependent") 257 | output, err := cmd.CombinedOutput() 258 | if err != nil { 259 | return -1, err 260 | } 261 | 262 | lines := strings.Split(string(output), "\n") 263 | re := regexp.MustCompile(`Win32_DiskPartition\.DeviceID="([^"]+)"`) 264 | for i, line := range lines { 265 | log.Printf("line c drive disk index: %s", line) 266 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines 267 | continue 268 | } 269 | 270 | parts := strings.Fields(line) 271 | if len(parts) < 2 { 272 | log.Printf("Unexpected output from wmic: %s length: %d", line, len(parts)) 273 | continue 274 | } 275 | 276 | // Combine parts for parsing 277 | combinedParts := strings.Join(parts, " ") 278 | if strings.Contains(combinedParts, "C:") { 279 | match := re.FindStringSubmatch(combinedParts) 280 | if len(match) > 1 { 281 | partitionID := match[1] 282 | diskIndex, err := getDiskIndexFromPartition(partitionID) 283 | if err != nil { 284 | return -1, err 285 | } 286 | return diskIndex, nil 287 | } 288 | } 289 | } 290 | return -1, fmt.Errorf("C: drive not found") 291 | } 292 | 293 | func getDiskIndexFromPartition(partitionID string) (int, error) { 294 | // Construct the command string 295 | command := fmt.Sprintf("wmic partition where (DeviceID='%s') get DiskIndex", partitionID) 296 | log.Printf("Executing command: %s", command) 297 | 298 | // Execute the command 299 | cmd := exec.Command("cmd", "/C", command) 300 | output, err := cmd.CombinedOutput() 301 | if err != nil { 302 | log.Printf("Error getting DiskIndex for partition %s", partitionID) 303 | log.Printf("Command output: %s", string(output)) 304 | log.Printf("Error message: %s", err.Error()) 305 | return -1, err 306 | } 307 | 308 | // Log the output for debugging 309 | log.Printf("Command output: %s", string(output)) 310 | 311 | lines := strings.Split(string(output), "\n") 312 | for i, line := range lines { 313 | log.Printf("line disk index from partition: %s", line) 314 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines 315 | continue 316 | } 317 | 318 | fields := strings.Fields(line) 319 | if len(fields) == 1 { 320 | return strconv.Atoi(fields[0]) 321 | } 322 | } 323 | 324 | return -1, fmt.Errorf("DiskIndex not found for partition: %s", partitionID) 325 | } 326 | -------------------------------------------------------------------------------- /pkg/agent/live.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/sha256" 7 | "encoding/binary" 8 | "encoding/gob" 9 | "encoding/hex" 10 | "errors" 11 | "fmt" 12 | "io" 13 | "log" 14 | "os" 15 | "os/exec" 16 | "os/signal" 17 | "runtime" 18 | "syscall" 19 | "unsafe" 20 | 21 | "github.com/cilium/ebpf/link" 22 | "github.com/cilium/ebpf/ringbuf" 23 | "github.com/cilium/ebpf/rlimit" 24 | "github.com/gorilla/websocket" 25 | "github.com/xmigrate/blxrep/utils" 26 | "golang.org/x/sys/unix" 27 | ) 28 | 29 | func setTargetDiskMajorMinor(objs *utils.BpfObjects, major uint32, minor uint32) error { 30 | majorKey := uint32(0) 31 | minorKey := uint32(1) 32 | 33 | // Put major number at index 0 34 | if err := objs.TargetDiskMap.Put(majorKey, major); err != nil { 35 | return err 36 | } 37 | 38 | // Put minor number at index 1 39 | if err := objs.TargetDiskMap.Put(minorKey, minor); err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | } 45 | 46 | func GetBlocks(ctx context.Context, blockSize int, srcPath string, websock *websocket.Conn, agentId string) { 47 | log.Printf("Block size: %d", blockSize) 48 | // Subscribe to signals for terminating the program. 49 | stopper := make(chan os.Signal, 1) 50 | signal.Notify(stopper, os.Interrupt, syscall.SIGTERM) 51 | 52 | // Allow the current process to lock memory for eBPF resources. 53 | if err := rlimit.RemoveMemlock(); err != nil { 54 | log.Printf("Error removing memlock: %v", err) 55 | } 56 | 57 | fileInfo, err := os.Stat(srcPath) 58 | if err != nil { 59 | log.Printf("Error retrieving information for /dev/xvda: %v", err) 60 | } 61 | 62 | // Asserting type to sys stat to get Sys() method 63 | sysInfo, ok := fileInfo.Sys().(*syscall.Stat_t) 64 | if !ok { 65 | log.Println("Error asserting type to syscall.Stat_t") 66 | } 67 | 68 | // Extracting major and minor numbers 69 | desiredMajor := uint32(sysInfo.Rdev / 256) 70 | desiredMinor := uint32(sysInfo.Rdev % 256) 71 | log.Printf("Major/minor: %d %d", desiredMajor, desiredMinor) 72 | 73 | // Load pre-compiled programs and maps into the kernel. 74 | objs := utils.BpfObjects{} 75 | if err := utils.LoadBpfObjects(&objs, nil); err != nil { 76 | log.Printf("loading objects: %v", err) 77 | } 78 | defer objs.Close() 79 | 80 | if err := setTargetDiskMajorMinor(&objs, desiredMajor, desiredMinor); err != nil { 81 | log.Printf("setting major/minor: %v", err) 82 | } 83 | // create a Tracepoint link 84 | tp, err := link.Tracepoint("block", "block_rq_complete", objs.BlockRqComplete, nil) 85 | if err != nil { 86 | log.Printf("opening tracepoint: %s", err) 87 | } 88 | defer tp.Close() 89 | 90 | // Open a ringbuf reader from userspace RINGBUF map described in the 91 | // eBPF C program. 92 | rd, err := ringbuf.NewReader(objs.Events) 93 | if err != nil { 94 | log.Printf("opening ringbuf reader: %s", err) 95 | } 96 | defer rd.Close() 97 | 98 | // Close the reader when the process receives a signal, which will exit 99 | // the read loop. 100 | go func() { 101 | for { 102 | select { 103 | case <-stopper: 104 | if err := rd.Close(); err != nil { 105 | log.Printf("closing ringbuf reader: %s", err) 106 | } 107 | return 108 | 109 | case <-ctx.Done(): 110 | 111 | if err := rd.Close(); err != nil { 112 | log.Printf("closing ringbuf reader: %s", err) 113 | } 114 | 115 | return 116 | 117 | } 118 | } 119 | }() 120 | 121 | log.Println("Waiting for events..") 122 | for { 123 | record, err := rd.Read() 124 | if err != nil { 125 | if errors.Is(err, ringbuf.ErrClosed) { 126 | log.Println("Received signal, exiting..") 127 | log.Println(err.Error()) 128 | os.Exit(0) 129 | 130 | } 131 | log.Printf("reading from reader: %s", err) 132 | continue 133 | } 134 | 135 | var event utils.Event 136 | // Parse the ringbuf event entry into a bpfEvent structure. 137 | if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil { 138 | log.Printf("parsing ringbuf event: %s", err) 139 | continue 140 | } 141 | var binaryBuffer bytes.Buffer 142 | var liveSectors utils.AgentBulkMessage 143 | liveSectors.AgentID = agentId 144 | liveSectors.SrcPath = srcPath 145 | liveSectors.DataType = "cst" 146 | liveSectors.StartSector = event.Block 147 | liveSectors.EndSector = event.EndBlock 148 | enc := gob.NewEncoder(&binaryBuffer) 149 | if err := enc.Encode(liveSectors); err != nil { 150 | log.Printf("Could not encode: %v", err) 151 | continue 152 | } 153 | binaryData := binaryBuffer.Bytes() 154 | 155 | if err := websock.WriteMessage(websocket.BinaryMessage, binaryData); err != nil { 156 | log.Printf("Could not send blocks: %v", err) 157 | continue 158 | } 159 | log.Printf("Sent sectors: %d-%d", event.Block, event.EndBlock) 160 | } 161 | } 162 | 163 | func syncAndClearCache(srcPath string) error { 164 | // For future, if filesystem changes are larger then we only have to use sync, if changes are smaller then we have to do all this 165 | cmd := exec.Command("sudo", "sync") 166 | if err := cmd.Run(); err != nil { 167 | return fmt.Errorf("failed to sync: %w", err) 168 | } 169 | 170 | cmd = exec.Command("sudo", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches") 171 | if err := cmd.Run(); err != nil { 172 | return fmt.Errorf("failed to clear cache: %w", err) 173 | } 174 | 175 | // Open device with read-write permissions 176 | file, err := os.OpenFile(srcPath, os.O_RDWR, 0666) 177 | if err != nil { 178 | return fmt.Errorf("failed to open device: %w", err) 179 | } 180 | defer file.Close() 181 | 182 | // File-specific sync 183 | if err := file.Sync(); err != nil { 184 | return fmt.Errorf("fsync failed: %w", err) 185 | } 186 | 187 | // Clear the buffer cache 188 | _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.BLKFLSBUF, uintptr(unsafe.Pointer(nil))) 189 | if errno != 0 { 190 | return errno 191 | } 192 | 193 | return nil 194 | } 195 | 196 | func ReadDataBlocks(blockSize int, srcPath string, pair utils.BlockPair, websock *websocket.Conn) { 197 | log.Printf("ReadDataBlocks started for blocks %d to %d on %s", pair.Start, pair.End, srcPath) 198 | 199 | var agentBlocks utils.AgentBulkMessage 200 | agentBlocks.AgentID, _ = os.Hostname() 201 | agentBlocks.DataType = "incremental" 202 | agentBlocks.SrcPath = srcPath 203 | 204 | src, err := os.OpenFile(srcPath, os.O_RDONLY|unix.O_DIRECT, 0) 205 | if err != nil { 206 | log.Printf("Error opening source: %v", err) 207 | return 208 | } 209 | defer src.Close() 210 | 211 | _, err = src.Seek(int64(pair.Start)*int64(blockSize), io.SeekStart) 212 | if err != nil { 213 | log.Printf("Error seeking to block %d: %v", pair.Start, err) 214 | return 215 | } 216 | 217 | for blockNumber := pair.Start; blockNumber <= pair.End; blockNumber++ { 218 | buf := make([]byte, blockSize) 219 | n, err := src.Read(buf) 220 | if err != nil && err != io.EOF { 221 | log.Printf("Error reading block %d: %v", blockNumber, err) 222 | continue 223 | } 224 | 225 | block := utils.AgentDataBlock{ 226 | BlockNumber: blockNumber, 227 | BlockData: buf[:n], 228 | } 229 | checksumArray := sha256.Sum256(block.BlockData) 230 | block.Checksum = hex.EncodeToString(checksumArray[:]) 231 | agentBlocks.Data = append(agentBlocks.Data, block) 232 | 233 | // Send data if we've accumulated enough or if this is the last block, 4096 should be changed to bandwidth limit 234 | if len(agentBlocks.Data) >= 4096 || blockNumber == pair.End { 235 | // Serialize and send data 236 | var binaryBuffer bytes.Buffer 237 | enc := gob.NewEncoder(&binaryBuffer) 238 | if err := enc.Encode(agentBlocks); err != nil { 239 | log.Printf("Could not encode: %v", err) 240 | continue 241 | } 242 | binaryData := binaryBuffer.Bytes() 243 | 244 | if err := websock.WriteMessage(websocket.BinaryMessage, binaryData); err != nil { 245 | log.Printf("Could not send blocks data: %v", err) 246 | continue 247 | } 248 | 249 | log.Printf("Sent batch of %d blocks", len(agentBlocks.Data)) 250 | agentBlocks.Data = []utils.AgentDataBlock{} // Clear the data for the next batch 251 | } 252 | 253 | // Allow other goroutines to run 254 | runtime.Gosched() 255 | } 256 | } 257 | 258 | func ReadBlocks(ctx context.Context, blockSize int, blockPairs []utils.BlockPair, srcPath string, websock *websocket.Conn) { 259 | log.Printf("Reading and sending block pairs for %s", srcPath) 260 | if err := syncAndClearCache(srcPath); err != nil { 261 | log.Printf("Failed to sync and clear cache: %v", err) 262 | } 263 | for _, pair := range blockPairs { 264 | select { 265 | case <-ctx.Done(): 266 | log.Println("Context cancelled, stopping ReadBlocks") 267 | return 268 | default: 269 | ReadDataBlocks(blockSize, srcPath, pair, websock) 270 | } 271 | } 272 | log.Println("Finished reading and sending block pairs") 273 | } 274 | 275 | func processSyncAction(msg utils.Message, ws *websocket.Conn) { 276 | syncData := msg.SyncMessage 277 | log.Printf("Start syncing from: %s", syncData.SrcPath) 278 | ctx, _ := context.WithCancel(context.Background()) 279 | go ReadBlocks(ctx, syncData.BlockSize, syncData.Blocks, syncData.SrcPath, ws) 280 | } 281 | 282 | func processStopSyncAction() { 283 | log.Println("Stoping sync action") 284 | if cancelSync != nil { 285 | log.Println("Stopping live migrations") 286 | cancelSync() 287 | } 288 | syncMutex.Lock() 289 | isSyncing = false 290 | syncMutex.Unlock() 291 | } 292 | -------------------------------------------------------------------------------- /pkg/agent/restore.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "compress/gzip" 7 | "fmt" 8 | "io" 9 | "log" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | "time" 14 | 15 | "github.com/xmigrate/blxrep/utils" 16 | ) 17 | 18 | func processChunk(state *utils.RestoreState, data []byte, chunkIndex int) error { 19 | log.Printf("Received chunk %d, size: %d bytes", chunkIndex, len(data)) 20 | 21 | if chunkIndex != state.ChunksReceived { 22 | return fmt.Errorf("received out-of-order chunk: expected %d, got %d", state.ChunksReceived, chunkIndex) 23 | } 24 | 25 | _, err := state.Buffer.Write(data) 26 | if err != nil { 27 | return fmt.Errorf("error writing to buffer: %v", err) 28 | } 29 | 30 | state.ChunksReceived++ 31 | 32 | // If we've received all chunks, process the data 33 | if state.ChunksReceived == state.TotalChunks { 34 | return processCompleteData(state) 35 | } 36 | 37 | return nil 38 | } 39 | 40 | func processCompleteData(state *utils.RestoreState) error { 41 | utils.LogDebug(fmt.Sprintf("Processing complete data, total size: %d bytes", state.Buffer.Len())) 42 | 43 | gzipReader, err := gzip.NewReader(bytes.NewReader(state.Buffer.Bytes())) 44 | if err != nil { 45 | return fmt.Errorf("error creating gzip reader: %v", err) 46 | } 47 | defer gzipReader.Close() 48 | 49 | tarReader := tar.NewReader(gzipReader) 50 | 51 | for { 52 | header, err := tarReader.Next() 53 | if err == io.EOF { 54 | break // End of archive 55 | } 56 | if err != nil { 57 | return fmt.Errorf("error reading tar: %v", err) 58 | } 59 | 60 | err = extractEntry(state.FilePath, header, tarReader) 61 | if err != nil { 62 | return fmt.Errorf("error extracting entry: %v", err) 63 | } 64 | } 65 | 66 | log.Println("Restore process completed successfully") 67 | return nil 68 | } 69 | 70 | func extractEntry(basePath string, header *tar.Header, tarReader *tar.Reader) error { 71 | // Handle both cases: with and without "RESTORE" prefix 72 | relPath := header.Name 73 | if strings.HasPrefix(relPath, "RESTORE/") { 74 | relPath = strings.TrimPrefix(relPath, "RESTORE/") 75 | } 76 | 77 | relPath = filepath.FromSlash(relPath) 78 | target := filepath.Join(basePath, relPath) 79 | 80 | log.Printf("Extracting: %s, type: %c, size: %d bytes", target, header.Typeflag, header.Size) 81 | 82 | switch header.Typeflag { 83 | case tar.TypeDir: 84 | return os.MkdirAll(target, 0755) 85 | case tar.TypeReg: 86 | return extractRegularFile(target, header, tarReader) 87 | default: 88 | log.Printf("Unsupported file type: %c for %s", header.Typeflag, target) 89 | return nil // Skipping unsupported types 90 | } 91 | } 92 | 93 | func extractRegularFile(target string, header *tar.Header, tarReader *tar.Reader) error { 94 | dir := filepath.Dir(target) 95 | if err := os.MkdirAll(dir, 0755); err != nil { 96 | return fmt.Errorf("error creating directory %s: %v", dir, err) 97 | } 98 | 99 | file, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode)) 100 | if err != nil { 101 | return fmt.Errorf("error creating file %s: %v", target, err) 102 | } 103 | defer file.Close() 104 | 105 | _, err = io.Copy(file, tarReader) 106 | if err != nil { 107 | return fmt.Errorf("error writing file %s: %v", target, err) 108 | } 109 | 110 | log.Printf("File extracted: %s", target) 111 | return nil 112 | } 113 | 114 | func cleanupRestore(state *utils.RestoreState) { 115 | log.Println("Cleaning up restore process") 116 | if state.GzipReader != nil { 117 | state.GzipReader.Close() 118 | } 119 | // Consider removing partially extracted files here 120 | } 121 | 122 | const BlockSize = 512 123 | 124 | func RestorePartition(c *chan utils.AgentBulkMessage, agentID string, progress *int, destPath string) { 125 | log.Printf("Starting RestorePartition process for agent %s, destPath: %s", agentID, destPath) 126 | 127 | f, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) 128 | if err != nil { 129 | log.Printf("Cannot open partition %s for agent %s: %v", destPath, agentID, err) 130 | return 131 | } 132 | defer f.Close() 133 | 134 | timeoutDuration := 20 * time.Second 135 | 136 | totalBytesWritten := int64(0) 137 | totalMessagesReceived := 0 138 | lastActivityTime := time.Now() 139 | for { 140 | select { 141 | case msg, ok := <-*c: 142 | if !ok { 143 | log.Printf("Channel closed, exiting RestorePartition for agent %s", agentID) 144 | return 145 | } 146 | lastActivityTime = time.Now() 147 | totalMessagesReceived++ 148 | batchBytesWritten := int64(0) 149 | 150 | for _, blockData := range msg.Data { 151 | n, err := f.Write(blockData.BlockData) 152 | if err != nil { 153 | log.Printf("Failed to write to file for agent %s: %v", agentID, err) 154 | return 155 | } 156 | batchBytesWritten += int64(n) 157 | totalBytesWritten += int64(n) 158 | 159 | } 160 | log.Printf("Batch received for agent %s: Wrote %d bytes", agentID, batchBytesWritten) 161 | log.Printf("Total for agent %s: Messages received: %d, Bytes written: %d", agentID, totalMessagesReceived, totalBytesWritten) 162 | // TODO: Below code might not be needed, hence commenting it out. No point in updating progress for restore from agent in dispatcher boltdb. 163 | // actionId := strings.Join([]string{agentID, fmt.Sprintf("%d", msg.StartTime)}, "_") 164 | // dispatcher.UpdateProgress(msg, progress, actionId, agentID, destPath) 165 | 166 | case <-time.After(timeoutDuration): 167 | log.Printf("No data for %v. Exiting RestorePartition for agent %s", timeoutDuration, agentID) 168 | log.Printf("Final stats for agent %s: Total messages received: %d, Total bytes written: %d", agentID, totalMessagesReceived, totalBytesWritten) 169 | log.Printf("Time since last activity: %v", time.Since(lastActivityTime)) 170 | return 171 | } 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /pkg/agent/scheduled_jobs.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/xmigrate/blxrep/utils" 12 | ) 13 | 14 | type Scheduler struct { 15 | snapshotTime string 16 | frequency utils.Frequency 17 | stopChan chan struct{} 18 | wg sync.WaitGroup 19 | } 20 | 21 | func NewScheduler(snapshotTime string, frequency utils.Frequency) (*Scheduler, error) { 22 | // Validate time format 23 | if _, err := time.Parse("15:04:00", snapshotTime); err != nil { 24 | return nil, fmt.Errorf("invalid time format. Use HH:MM:SS in 24-hour format: %v", err) 25 | } 26 | 27 | return &Scheduler{ 28 | snapshotTime: snapshotTime, 29 | frequency: frequency, 30 | stopChan: make(chan struct{}), 31 | }, nil 32 | } 33 | 34 | func (s *Scheduler) Start(job func()) error { 35 | s.wg.Add(1) 36 | go func() { 37 | defer s.wg.Done() 38 | 39 | for { 40 | nextRun := s.calculateNextRun() 41 | log.Printf("Next snapshot scheduled for: %v", nextRun) 42 | 43 | select { 44 | case <-time.After(time.Until(nextRun)): 45 | log.Println("Starting snapshot job") 46 | job() 47 | 48 | case <-s.stopChan: 49 | log.Println("Scheduler stopped") 50 | return 51 | } 52 | } 53 | }() 54 | 55 | return nil 56 | } 57 | 58 | func (s *Scheduler) Stop() { 59 | close(s.stopChan) 60 | s.wg.Wait() 61 | } 62 | 63 | func (s *Scheduler) calculateNextRun() time.Time { 64 | now := time.Now().UTC() 65 | parts := strings.Split(s.snapshotTime, ":") 66 | hour, _ := strconv.Atoi(parts[0]) 67 | minute, _ := strconv.Atoi(parts[1]) 68 | 69 | var scheduled time.Time 70 | 71 | switch s.frequency { 72 | case utils.Daily: 73 | // For daily, start from current day 74 | scheduled = time.Date( 75 | now.Year(), now.Month(), now.Day(), 76 | hour, minute, 0, 0, time.UTC, 77 | ) 78 | if now.After(scheduled) { 79 | scheduled = scheduled.AddDate(0, 0, 1) 80 | } 81 | 82 | case utils.Weekly: 83 | // For weekly, start from the first day of the week (assuming Monday is first) 84 | scheduled = time.Date( 85 | now.Year(), now.Month(), now.Day(), 86 | hour, minute, 0, 0, time.UTC, 87 | ) 88 | // Adjust to the most recent Monday 89 | for scheduled.Weekday() != time.Monday { 90 | scheduled = scheduled.AddDate(0, 0, -1) 91 | } 92 | if now.After(scheduled) { 93 | scheduled = scheduled.AddDate(0, 0, 7) 94 | } 95 | 96 | case utils.Monthly: 97 | // For monthly, always start from the first day of the month 98 | scheduled = time.Date( 99 | now.Year(), now.Month(), 1, // Use day 1 for first of month 100 | hour, minute, 0, 0, time.UTC, 101 | ) 102 | if now.After(scheduled) { 103 | scheduled = time.Date( 104 | now.Year(), now.Month()+1, 1, // Move to first day of next month 105 | hour, minute, 0, 0, time.UTC, 106 | ) 107 | } 108 | } 109 | 110 | return scheduled 111 | } 112 | 113 | func StartScheduledJobs(agentID string, snapshotURL string) error { 114 | scheduler, err := NewScheduler(utils.AgentConfiguration.SnapshotTime, utils.Frequency(utils.AgentConfiguration.SnapshotFreq)) 115 | if err != nil { 116 | return fmt.Errorf("failed to create scheduler: %v", err) 117 | } 118 | log.Printf("Scheduled job started to run at %s every %s", utils.AgentConfiguration.SnapshotTime, utils.AgentConfiguration.SnapshotFreq) 119 | // Start as a goroutine 120 | go func() { 121 | if err := scheduler.Start(func() { 122 | log.Printf("Scheduled job %s: agent %s connecting to snapshot endpoint at %s", time.Now().Format(time.RFC3339), agentID, snapshotURL) 123 | if err := connectAndHandle(agentID, snapshotURL); err != nil { 124 | log.Printf("Snapshot connection error: %v", err) 125 | } 126 | log.Printf("Snapshot job is running") 127 | }); err != nil { 128 | log.Printf("Scheduler error: %v", err) 129 | } 130 | }() 131 | 132 | return nil 133 | } 134 | -------------------------------------------------------------------------------- /pkg/dispatcher/cleanup_jobs.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "time" 10 | 11 | "github.com/xmigrate/blxrep/service" 12 | "github.com/xmigrate/blxrep/utils" 13 | ) 14 | 15 | func StartSnapshotCleanupJobs() error { 16 | utils.LogDebug(fmt.Sprintf("Cleanup jobs started to run at %s", time.Now().Format(time.RFC3339))) 17 | 18 | go func() { 19 | ctx, cancel := context.WithCancel(context.Background()) 20 | defer cancel() 21 | 22 | checkInterval := 5 * time.Minute 23 | ticker := time.NewTicker(checkInterval) 24 | defer ticker.Stop() 25 | 26 | // Run initial cleanup for all agents 27 | if err := cleanupAllAgents(ctx); err != nil { 28 | utils.LogError(fmt.Sprintf("Initial cleanup for all agents failed: %v", err)) 29 | } 30 | 31 | // Then run periodic cleanup for all agents 32 | for { 33 | select { 34 | case <-ctx.Done(): 35 | utils.LogDebug("Cleanup jobs stopped") 36 | return 37 | case <-ticker.C: 38 | if err := cleanupAllAgents(ctx); err != nil { 39 | utils.LogError(fmt.Sprintf("Periodic cleanup for all agents failed: %v", err)) 40 | } 41 | } 42 | } 43 | }() 44 | 45 | return nil 46 | } 47 | 48 | func cleanupAllAgents(ctx context.Context) error { 49 | agents, err := service.GetAllAgents(-1) 50 | if err != nil { 51 | return fmt.Errorf("failed to get agents: %w", err) 52 | } 53 | 54 | for _, agent := range agents { 55 | select { 56 | case <-ctx.Done(): 57 | return ctx.Err() 58 | default: 59 | if err := performCleanup(ctx, agent.AgentId, agent.SnapshotRetention); err != nil { 60 | utils.LogError(fmt.Sprintf("Cleanup failed for agent %s: %v", agent.AgentId, err)) 61 | // Continue with other agents even if one fails 62 | continue 63 | } 64 | utils.LogDebug(fmt.Sprintf("Cleanup completed for agent %s", agent.AgentId)) 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | 71 | func performCleanup(ctx context.Context, agentID string, snapshotRetention int) error { 72 | // Get latest snapshot times per disk first 73 | latestSnapshotTimes, err := getLatestSnapshotTimes(ctx, agentID) 74 | if err != nil { 75 | return fmt.Errorf("error getting latest snapshot times: %w", err) 76 | } 77 | 78 | // Perform both cleanups 79 | if err := cleanupSnapshots(ctx, agentID, latestSnapshotTimes, snapshotRetention); err != nil { 80 | utils.LogError(fmt.Sprintf("Snapshot cleanup failed: %v", err)) 81 | } 82 | 83 | if err := cleanupIncrementals(ctx, agentID, latestSnapshotTimes); err != nil { 84 | utils.LogError(fmt.Sprintf("Incremental cleanup failed: %v", err)) 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func getLatestSnapshotTimes(ctx context.Context, agentID string) (map[string]time.Time, error) { 91 | snapshotFolder := filepath.Join(utils.AppConfiguration.DataDir, "snapshot") 92 | files, err := os.ReadDir(snapshotFolder) 93 | if err != nil { 94 | return nil, fmt.Errorf("error reading snapshot directory: %w", err) 95 | } 96 | 97 | latestTimes := make(map[string]time.Time) 98 | 99 | for _, file := range files { 100 | if ctx.Err() != nil { 101 | return nil, ctx.Err() 102 | } 103 | 104 | fileName := file.Name() 105 | if !strings.HasPrefix(fileName, agentID) || !strings.HasSuffix(fileName, ".img") { 106 | continue 107 | } 108 | 109 | baseName := strings.TrimSuffix(fileName, filepath.Ext(fileName)) 110 | parts := strings.Split(baseName, "_") 111 | if len(parts) < 2 { 112 | continue 113 | } 114 | diskID := parts[len(parts)-2] 115 | 116 | fileInfo, err := file.Info() 117 | if err != nil { 118 | utils.LogError(fmt.Sprintf("Error getting file info for %s: %v", fileName, err)) 119 | continue 120 | } 121 | 122 | if currentTime, exists := latestTimes[diskID]; !exists || fileInfo.ModTime().After(currentTime) { 123 | latestTimes[diskID] = fileInfo.ModTime() 124 | } 125 | } 126 | 127 | return latestTimes, nil 128 | } 129 | 130 | func cleanupSnapshots(ctx context.Context, agentID string, latestSnapshotTimes map[string]time.Time, snapshotRetention int) error { 131 | snapshotFolder := filepath.Join(utils.AppConfiguration.DataDir, "snapshot") 132 | 133 | files, err := os.ReadDir(snapshotFolder) 134 | if err != nil { 135 | return fmt.Errorf("error reading snapshot directory: %w", err) 136 | } 137 | 138 | currentTime := time.Now() 139 | cleanupCount := 0 140 | var totalSpaceFreed int64 141 | 142 | diskSnapshots := make(map[string][]utils.SnapshotInfo) 143 | mostRecentPerDisk := make(map[string]utils.SnapshotInfo) 144 | 145 | // First pass: collect all valid snapshot pairs and organize by disk 146 | for _, file := range files { 147 | if ctx.Err() != nil { 148 | return ctx.Err() 149 | } 150 | 151 | if file.IsDir() { 152 | continue 153 | } 154 | 155 | fileName := file.Name() 156 | if !strings.HasPrefix(fileName, agentID) { 157 | continue 158 | } 159 | 160 | baseName := strings.TrimSuffix(fileName, filepath.Ext(fileName)) 161 | parts := strings.Split(baseName, "_") 162 | if len(parts) < 2 { 163 | utils.LogError(fmt.Sprintf("Invalid snapshot filename format: %s", fileName)) 164 | continue 165 | } 166 | diskID := parts[len(parts)-2] 167 | 168 | // Skip if already processed 169 | alreadyProcessed := false 170 | for _, info := range diskSnapshots[diskID] { 171 | if info.BaseName == baseName { 172 | alreadyProcessed = true 173 | break 174 | } 175 | } 176 | if alreadyProcessed { 177 | continue 178 | } 179 | 180 | imgPath := filepath.Join(snapshotFolder, baseName+".img") 181 | logPath := filepath.Join(snapshotFolder, baseName+".log") 182 | 183 | imgInfo, imgErr := os.Stat(imgPath) 184 | logInfo, logErr := os.Stat(logPath) 185 | 186 | if imgErr != nil || logErr != nil { 187 | utils.LogError(fmt.Sprintf("Error accessing snapshot files for base %s: img error: %v, log error: %v", 188 | baseName, imgErr, logErr)) 189 | continue 190 | } 191 | 192 | // Use the older of the two timestamps 193 | fileTime := imgInfo.ModTime() 194 | if logInfo.ModTime().Before(fileTime) { 195 | fileTime = logInfo.ModTime() 196 | } 197 | 198 | snapshotInfo := utils.SnapshotInfo{ 199 | Timestamp: fileTime, 200 | BaseName: baseName, 201 | ImgSize: imgInfo.Size(), 202 | LogSize: logInfo.Size(), 203 | } 204 | 205 | diskSnapshots[diskID] = append(diskSnapshots[diskID], snapshotInfo) 206 | 207 | // Update most recent snapshot for this disk 208 | currentMostRecent, exists := mostRecentPerDisk[diskID] 209 | if !exists || fileTime.After(currentMostRecent.Timestamp) { 210 | mostRecentPerDisk[diskID] = snapshotInfo 211 | } 212 | } 213 | 214 | // Second pass: delete old snapshots while preserving the most recent one per disk 215 | for diskID, snapshots := range diskSnapshots { 216 | if ctx.Err() != nil { 217 | return ctx.Err() 218 | } 219 | 220 | hasRecentSnapshot := false 221 | for _, snapshot := range snapshots { 222 | if currentTime.Sub(snapshot.Timestamp) <= (time.Duration(snapshotRetention) * time.Hour) { 223 | hasRecentSnapshot = true 224 | break 225 | } 226 | } 227 | 228 | for _, snapshot := range snapshots { 229 | fileAge := currentTime.Sub(snapshot.Timestamp) 230 | 231 | // Skip most recent snapshot if no recent snapshots exist 232 | if !hasRecentSnapshot && snapshot.BaseName == mostRecentPerDisk[diskID].BaseName { 233 | utils.LogDebug(fmt.Sprintf("Preserving most recent snapshot %s for disk %s (age: %v)", 234 | snapshot.BaseName, diskID, fileAge)) 235 | continue 236 | } 237 | 238 | // Delete if older than retention period 239 | if fileAge > (time.Duration(snapshotRetention) * time.Hour) { 240 | imgPath := filepath.Join(snapshotFolder, snapshot.BaseName+".img") 241 | logPath := filepath.Join(snapshotFolder, snapshot.BaseName+".log") 242 | 243 | imgErr := os.Remove(imgPath) 244 | logErr := os.Remove(logPath) 245 | 246 | if imgErr != nil || logErr != nil { 247 | utils.LogError(fmt.Sprintf("Error deleting snapshot files for base %s: img error: %v, log error: %v", 248 | snapshot.BaseName, imgErr, logErr)) 249 | continue 250 | } 251 | 252 | cleanupCount += 2 253 | totalSpaceFreed += snapshot.ImgSize + snapshot.LogSize 254 | utils.LogDebug(fmt.Sprintf("Deleted snapshot files for base %s (age: %v, freed: %d bytes)", 255 | snapshot.BaseName, fileAge, snapshot.ImgSize+snapshot.LogSize)) 256 | } 257 | } 258 | } 259 | 260 | utils.LogDebug(fmt.Sprintf("Snapshot cleanup completed for agent %s. Deleted %d files, freed %d bytes", 261 | agentID, cleanupCount, totalSpaceFreed)) 262 | 263 | return nil 264 | } 265 | 266 | func cleanupIncrementals(ctx context.Context, agentID string, latestSnapshotTimes map[string]time.Time) error { 267 | incrementalFolder := filepath.Join(utils.AppConfiguration.DataDir, "incremental") 268 | files, err := os.ReadDir(incrementalFolder) 269 | if err != nil { 270 | return fmt.Errorf("error reading incremental directory: %w", err) 271 | } 272 | 273 | var totalSpaceFreed int64 274 | cleanupCount := 0 275 | 276 | for _, file := range files { 277 | if ctx.Err() != nil { 278 | return ctx.Err() 279 | } 280 | 281 | fileName := file.Name() 282 | if !strings.HasPrefix(fileName, agentID) { 283 | continue 284 | } 285 | 286 | // Parse the disk ID from the filename 287 | parts := strings.Split(fileName, "_") 288 | if len(parts) < 2 { 289 | utils.LogError(fmt.Sprintf("Invalid incremental filename format: %s", fileName)) 290 | continue 291 | } 292 | diskID := parts[1] // Get the disk identifier part 293 | 294 | // Get the latest snapshot time for this disk 295 | latestSnapshotTime, exists := latestSnapshotTimes[diskID] 296 | if !exists { 297 | utils.LogDebug(fmt.Sprintf("No snapshot found for disk %s, skipping incremental cleanup", diskID)) 298 | continue 299 | } 300 | 301 | fileInfo, err := file.Info() 302 | if err != nil { 303 | utils.LogError(fmt.Sprintf("Error getting file info for %s: %v", fileName, err)) 304 | continue 305 | } 306 | 307 | // If the incremental file is older than the latest snapshot, delete it 308 | if fileInfo.ModTime().Before(latestSnapshotTime) { 309 | filePath := filepath.Join(incrementalFolder, fileName) 310 | fileSize := fileInfo.Size() 311 | 312 | if err := os.Remove(filePath); err != nil { 313 | utils.LogError(fmt.Sprintf("Error deleting incremental file %s: %v", fileName, err)) 314 | continue 315 | } 316 | 317 | cleanupCount++ 318 | totalSpaceFreed += fileSize 319 | utils.LogDebug(fmt.Sprintf("Deleted incremental file %s (created: %v, latest snapshot: %v)", 320 | fileName, fileInfo.ModTime(), latestSnapshotTime)) 321 | } 322 | } 323 | 324 | if cleanupCount > 0 { 325 | utils.LogDebug(fmt.Sprintf("Incremental cleanup completed for agent %s. Deleted %d files, freed %d bytes", 326 | agentID, cleanupCount, totalSpaceFreed)) 327 | } 328 | 329 | return nil 330 | } 331 | -------------------------------------------------------------------------------- /pkg/dispatcher/config.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "fmt" 5 | "io/fs" 6 | "os" 7 | "path/filepath" 8 | "regexp" 9 | "strconv" 10 | "strings" 11 | 12 | "github.com/xmigrate/blxrep/service" 13 | "github.com/xmigrate/blxrep/utils" 14 | "gopkg.in/yaml.v3" 15 | ) 16 | 17 | func Contains(slice []string, item string) bool { 18 | for _, s := range slice { 19 | if s == item { 20 | return true 21 | } 22 | } 23 | return false 24 | } 25 | 26 | // ConfigScheduler reads backup policies from files and updates agent configurations 27 | func ConfigScheduler(policyDir string) error { 28 | // Get all agents from DB as Map 29 | agentMap, err := service.GetAllAgentsMap(-1) 30 | if err != nil { 31 | return fmt.Errorf("failed to get agents from DB: %w", err) 32 | } 33 | // Walk through all YAML files in the policy directory 34 | err = filepath.WalkDir(policyDir, func(path string, d fs.DirEntry, err error) error { 35 | if err != nil { 36 | return err 37 | } 38 | 39 | // Skip if not a YAML file 40 | if !d.IsDir() && (filepath.Ext(path) == ".yaml" || filepath.Ext(path) == ".yml") { 41 | if err := processBackupPolicy(path, agentMap); err != nil { 42 | return fmt.Errorf("failed to process policy file %s: %w", path, err) 43 | } 44 | } 45 | return nil 46 | }) 47 | 48 | if err != nil { 49 | return fmt.Errorf("failed to walk policy directory: %w", err) 50 | } 51 | 52 | // Update agents in the database 53 | if err := service.InsertOrUpdateAgentsMap(agentMap); err != nil { 54 | return fmt.Errorf("failed to update agents in DB: %w", err) 55 | } 56 | 57 | return nil 58 | } 59 | 60 | func processBackupPolicy(filePath string, agentMap map[string]utils.Agent) error { 61 | // Read policy file 62 | data, err := os.ReadFile(filePath) 63 | if err != nil { 64 | return fmt.Errorf("failed to read policy file: %w", err) 65 | } 66 | 67 | // Parse YAML 68 | var policy utils.BackupPolicy 69 | if err := yaml.Unmarshal(data, &policy); err != nil { 70 | return fmt.Errorf("failed to parse policy file: %w", err) 71 | } 72 | utils.LogDebug(fmt.Sprintf("Processed policy: %s", filePath)) 73 | // Process each target group in the policy 74 | for _, targetGroup := range policy.Targets { 75 | // Expand the pattern to get all matching hostnames 76 | hostnames := expandPattern(targetGroup.Pattern, agentMap) 77 | // Update configuration for all matching agents 78 | for _, hostname := range hostnames { 79 | if agent, exists := agentMap[hostname]; exists { 80 | updateAgentConfig(&agent, policy, targetGroup) 81 | agentMap[hostname] = agent 82 | } 83 | } 84 | } 85 | 86 | return nil 87 | } 88 | 89 | // expandPattern expands patterns like "web[1-3].example.com" to ["web1.example.com", "web2.example.com", "web3.example.com"] 90 | func expandPattern(pattern string, agentMap map[string]utils.Agent) []string { 91 | // First check if pattern contains range expression 92 | rangeRegex := regexp.MustCompile(`\[(\d+)-(\d+)\]`) 93 | matches := rangeRegex.FindStringSubmatch(pattern) 94 | 95 | if len(matches) == 3 { 96 | // We found a range expression 97 | start, _ := strconv.Atoi(matches[1]) 98 | end, _ := strconv.Atoi(matches[2]) 99 | 100 | var result []string 101 | prefix := pattern[:strings.Index(pattern, "[")] 102 | suffix := pattern[strings.Index(pattern, "]")+1:] 103 | 104 | // Generate all hostnames in the range 105 | for i := start; i <= end; i++ { 106 | hostname := fmt.Sprintf("%s%d%s", prefix, i, suffix) 107 | result = append(result, hostname) 108 | } 109 | return result 110 | } 111 | 112 | // Handle comma-separated lists [1,2,3] 113 | listRegex := regexp.MustCompile(`\[([^\]]+)\]`) 114 | matches = listRegex.FindStringSubmatch(pattern) 115 | 116 | if len(matches) == 2 { 117 | items := strings.Split(matches[1], ",") 118 | var result []string 119 | prefix := pattern[:strings.Index(pattern, "[")] 120 | suffix := pattern[strings.Index(pattern, "]")+1:] 121 | 122 | for _, item := range items { 123 | hostname := fmt.Sprintf("%s%s%s", prefix, strings.TrimSpace(item), suffix) 124 | result = append(result, hostname) 125 | } 126 | return result 127 | } 128 | 129 | // Handle wildcards (* and ?) 130 | if strings.ContainsAny(pattern, "*?") { 131 | // Convert glob pattern to regex for matching 132 | regexPattern := strings.ReplaceAll(pattern, ".", "\\.") 133 | regexPattern = strings.ReplaceAll(regexPattern, "*", ".*") 134 | regexPattern = strings.ReplaceAll(regexPattern, "?", ".") 135 | regexPattern = "^" + regexPattern + "$" 136 | 137 | reg, err := regexp.Compile(regexPattern) 138 | if err != nil { 139 | utils.LogError(fmt.Sprintf("Invalid pattern %s: %v", pattern, err)) 140 | return []string{pattern} 141 | } 142 | 143 | var result []string 144 | // Match against all known hostnames 145 | for hostname := range agentMap { 146 | if reg.MatchString(hostname) { 147 | result = append(result, hostname) 148 | } 149 | } 150 | return result 151 | } 152 | 153 | // If no special pattern, return as is 154 | return []string{pattern} 155 | } 156 | 157 | func updateAgentConfig(agent *utils.Agent, policy utils.BackupPolicy, targetGroup utils.TargetGroup) { 158 | agent.CloneSchedule.Frequency = policy.SnapshotFrequency 159 | agent.CloneSchedule.Time = policy.SnapshotTime 160 | agent.CloneSchedule.Bandwidth = policy.BandwidthLimit 161 | agent.Prerequisites = true 162 | agent.SnapshotRetention = policy.SnapshotRetention 163 | agent.ArchiveInterval = policy.ArchiveInterval 164 | agent.LiveSyncFreq = policy.LiveSyncFrequency 165 | agent.TransitionAfterDays = policy.TransitionAfterDays 166 | agent.DeleteAfterDays = policy.DeleteAfterDays 167 | utils.AppConfiguration.ArchiveInterval = policy.ArchiveInterval 168 | utils.LogDebug(fmt.Sprintf("Updated agent config: %+v", agent.AgentId)) 169 | // Update disk configuration 170 | for _, disk := range agent.Footprint.DiskDetails { 171 | if !Contains(targetGroup.DisksExcluded, disk.Name) { 172 | if !Contains(agent.Disks, disk.Name) { 173 | agent.Disks = append(agent.Disks, disk.Name) 174 | } 175 | } 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /pkg/dispatcher/restore.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "archive/tar" 5 | "bufio" 6 | "bytes" 7 | "compress/gzip" 8 | "context" 9 | "encoding/base64" 10 | "fmt" 11 | "io" 12 | "log" 13 | "os" 14 | "path/filepath" 15 | "sync" 16 | "time" 17 | 18 | "github.com/xmigrate/blxrep/service" 19 | "github.com/xmigrate/blxrep/utils" 20 | 21 | "golang.org/x/sys/unix" 22 | ) 23 | 24 | const chunkSize = 1 * 1024 * 1024 // 1MB chunks 25 | 26 | const ( 27 | maxRetries = 5 28 | retryDelay = 2 * time.Second 29 | ) 30 | 31 | func RestoreFiles(agentID string, sourcePath string, destPath string, action utils.Action) error { 32 | conn := agents[agentID].RestoreConn 33 | agent, exists := agents[agentID] 34 | if !exists { 35 | utils.LogError("Agent with ID " + agentID + " does not exist in the agents map") 36 | return fmt.Errorf("agent with ID " + agentID + " not found") 37 | } 38 | 39 | utils.LogDebug("Agent details for ID " + agentID + ":") 40 | utils.LogDebug(" Agent: " + fmt.Sprintf("%+v", agent)) 41 | 42 | // Create a buffer to store the compressed data 43 | var buf bytes.Buffer 44 | gzipWriter := gzip.NewWriter(&buf) 45 | tarWriter := tar.NewWriter(gzipWriter) 46 | 47 | // Compress the file or directory 48 | err := compressPath(sourcePath, tarWriter) 49 | if err != nil { 50 | return fmt.Errorf("error compressing data: %v", err) 51 | } 52 | 53 | // Close the tar and gzip writers 54 | if err := tarWriter.Close(); err != nil { 55 | return fmt.Errorf("error closing tar writer: %v", err) 56 | } 57 | if err := gzipWriter.Close(); err != nil { 58 | return fmt.Errorf("error closing gzip writer: %v", err) 59 | } 60 | 61 | // Get the compressed data 62 | compressedData := buf.Bytes() 63 | totalSize := len(compressedData) 64 | totalChunks := (totalSize + chunkSize - 1) / chunkSize 65 | 66 | // Send start message 67 | startMsg := utils.Message{ 68 | Action: utils.CONST_AGENT_ACTION_RESTORE, 69 | RestoreMessage: utils.RestoreData{ 70 | Type: "start", 71 | TotalChunks: totalChunks, 72 | TotalSize: int64(totalSize), 73 | FilePath: destPath, 74 | }, 75 | } 76 | if err := conn.WriteJSON(startMsg); err != nil { 77 | return fmt.Errorf("error sending start message: %v", err) 78 | } 79 | lastReportedProgress := 0 80 | // Send the data in chunks 81 | for i := 0; i < totalSize; i += chunkSize { 82 | end := i + chunkSize 83 | if end > totalSize { 84 | end = totalSize 85 | } 86 | chunk := compressedData[i:end] 87 | 88 | // Encode the chunk in base64 89 | encodedChunk := base64.StdEncoding.EncodeToString(chunk) 90 | 91 | // Send the chunk 92 | chunkMsg := utils.Message{ 93 | Action: utils.CONST_AGENT_ACTION_RESTORE, 94 | RestoreMessage: utils.RestoreData{ 95 | FilePath: destPath, 96 | Type: "chunk", 97 | ChunkIndex: i / chunkSize, 98 | Data: encodedChunk, 99 | }, 100 | } 101 | 102 | // Retry loop for sending the chunk 103 | for retry := 0; retry < maxRetries; retry++ { 104 | conn := agents[agentID].RestoreConn 105 | err := conn.WriteJSON(chunkMsg) 106 | if err == nil { 107 | // Chunk sent successfully 108 | utils.LogDebug("Sent chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " of " + fmt.Sprintf("%d", totalChunks)) 109 | break 110 | } 111 | 112 | if retry == maxRetries-1 { 113 | // Last retry attempt failed 114 | return fmt.Errorf("error sending chunk %d after %d attempts: %v", i/chunkSize+1, maxRetries, err) 115 | } 116 | 117 | utils.LogError("Error sending chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " (attempt " + fmt.Sprintf("%d", retry+1) + " of " + fmt.Sprintf("%d", maxRetries) + "): " + err.Error() + ". Retrying...") 118 | time.Sleep(retryDelay) 119 | } 120 | 121 | // Log progress 122 | progress := int(float64(i) / float64(totalSize) * 100) 123 | utils.LogDebug("Sent chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " of " + fmt.Sprintf("%d", totalChunks) + " (" + fmt.Sprintf("%d", progress) + "%)") 124 | if progress >= lastReportedProgress+5 || progress == 100 { 125 | action.ActionProgress = progress 126 | service.InsertOrUpdateAction(action) 127 | lastReportedProgress = progress 128 | } 129 | time.Sleep(100 * time.Millisecond) 130 | } 131 | 132 | // Send complete message 133 | completeMsg := utils.Message{ 134 | Action: utils.CONST_AGENT_ACTION_RESTORE, 135 | RestoreMessage: utils.RestoreData{ 136 | Type: "complete", 137 | FilePath: destPath, 138 | }, 139 | } 140 | action.ActionProgress = 100 141 | action.ActionStatus = "Completed" 142 | service.InsertOrUpdateAction(action) 143 | if err := conn.WriteJSON(completeMsg); err != nil { 144 | return fmt.Errorf("error sending complete message: %v", err) 145 | } 146 | 147 | utils.LogDebug("File transfer completed: " + destPath) 148 | return nil 149 | } 150 | 151 | func compressPath(sourcePath string, tarWriter *tar.Writer) error { 152 | return filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error { 153 | if err != nil { 154 | return err 155 | } 156 | 157 | header, err := tar.FileInfoHeader(info, path) 158 | if err != nil { 159 | return fmt.Errorf("error creating tar header: %v", err) 160 | } 161 | 162 | // Create relative path 163 | relPath, err := filepath.Rel(sourcePath, path) 164 | if err != nil { 165 | return fmt.Errorf("error creating relative path: %v", err) 166 | } 167 | 168 | // Set the header name to the relative path, preserving directory structure 169 | header.Name = filepath.ToSlash(relPath) 170 | 171 | if err := tarWriter.WriteHeader(header); err != nil { 172 | return fmt.Errorf("error writing tar header: %v", err) 173 | } 174 | 175 | if !info.IsDir() { 176 | file, err := os.Open(path) 177 | if err != nil { 178 | return fmt.Errorf("error opening file: %v", err) 179 | } 180 | defer file.Close() 181 | 182 | if _, err := io.Copy(tarWriter, file); err != nil { 183 | return fmt.Errorf("error writing file to tar: %v", err) 184 | } 185 | } 186 | 187 | return nil 188 | }) 189 | } 190 | 191 | func getBlockDeviceSize(path string) (int64, error) { 192 | file, err := os.Open(path) 193 | if err != nil { 194 | return 0, err 195 | } 196 | defer file.Close() 197 | 198 | size, err := unix.IoctlGetInt(int(file.Fd()), unix.BLKGETSIZE64) 199 | if err != nil { 200 | return 0, err 201 | } 202 | 203 | return int64(size), nil 204 | } 205 | 206 | func RestorePartition(agentID string, sourcePath string, destPath string, blockSize int, channelSize int, ctx context.Context, restorePartition *sync.Mutex, isPartitionRestore *bool, action utils.Action) error { 207 | websock := agents[agentID].RestoreConn 208 | src, err := os.Open(sourcePath) 209 | if err != nil { 210 | utils.LogError(fmt.Sprintf("Failed to open source disk: %v", err)) 211 | } 212 | defer src.Close() 213 | bufReader := bufio.NewReaderSize(src, blockSize*8000) 214 | 215 | // Allocate a buffer for one block. 216 | buf := make([]byte, blockSize) 217 | 218 | var blocks []utils.AgentDataBlock 219 | var blockCount uint64 220 | var batchSize int 221 | var totalDataSent int64 222 | var lastReportedProgress int 223 | 224 | totalSize, err := getBlockDeviceSize(sourcePath) 225 | if err != nil { 226 | return fmt.Errorf("failed to get block device size: %v", err) 227 | } 228 | if totalSize == 0 { 229 | return fmt.Errorf("block device is empty or size couldn't be determined") 230 | } 231 | utils.LogDebug(fmt.Sprintf("Restoration started for %s", sourcePath)) 232 | for { 233 | select { 234 | case <-ctx.Done(): 235 | // Handle context cancellation and exit the goroutine 236 | utils.LogDebug("Restoration was paused/cancelled and goroutine is exiting.") 237 | if len(blocks) > 0 { 238 | utils.StreamData(blocks, websock, false, destPath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix()) 239 | totalDataSent += int64(len(blocks) * blockSize) 240 | } 241 | restorePartition.Lock() 242 | *isPartitionRestore = false 243 | restorePartition.Unlock() 244 | action.ActionProgress = int(float64(totalDataSent) / float64(totalSize) * 100) 245 | action.ActionStatus = "Paused" 246 | service.InsertOrUpdateAction(action) 247 | return nil 248 | default: 249 | // Read data in larger chunks to reduce syscall overhead 250 | n, err := bufReader.Read(buf) 251 | if n > 0 { 252 | for i := 0; i < n; i += blockSize { 253 | end := i + blockSize 254 | if end > n { 255 | end = n 256 | } 257 | blockData := utils.AgentDataBlock{ 258 | BlockNumber: blockCount, 259 | BlockData: append([]byte(nil), buf[i:end]...), 260 | } 261 | blocks = append(blocks, blockData) 262 | blockCount++ 263 | batchSize += end - i 264 | 265 | if batchSize >= channelSize { 266 | utils.StreamData(blocks, websock, false, destPath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix()) 267 | totalDataSent += int64(batchSize) 268 | // Update action progress 269 | progress := float64(totalDataSent) / float64(totalSize) * 100 270 | utils.LogDebug(fmt.Sprintf("Batch sent. Total data sent so far: %d bytes percentage: %.2f%%", totalDataSent, progress)) 271 | 272 | if int(progress) >= lastReportedProgress+2 || int(progress) == 100 { 273 | action.ActionProgress = int(progress) 274 | service.InsertOrUpdateAction(action) 275 | lastReportedProgress = int(progress) 276 | } 277 | 278 | blocks = nil 279 | batchSize = 0 280 | time.Sleep(100 * time.Millisecond) 281 | } 282 | } 283 | 284 | } 285 | if err != nil { 286 | if err == io.EOF { 287 | if len(blocks) > 0 { 288 | utils.StreamData(blocks, websock, false, sourcePath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix()) 289 | totalDataSent += int64(len(blocks) * blockSize) 290 | } 291 | utils.LogDebug(fmt.Sprintf("Restore completed. Total data sent: %d bytes", totalDataSent)) 292 | restorePartition.Lock() 293 | *isPartitionRestore = false 294 | restorePartition.Unlock() 295 | action.ActionProgress = 100 296 | action.ActionStatus = "Completed" 297 | service.InsertOrUpdateAction(action) 298 | return nil 299 | } 300 | action.ActionStatus = "Failed" 301 | service.InsertOrUpdateAction(action) 302 | log.Fatalf("Failed to read block: %v", err) 303 | } 304 | } 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /plans/lab.yaml: -------------------------------------------------------------------------------- 1 | name: "lab-backup-policy" 2 | description: "Backup policy for lab servers" 3 | archive_interval: 48h 4 | snapshot_frequency: "daily" 5 | snapshot_time: "12:00" 6 | bandwidth_limit: 100 7 | snapshot_retention: 30 8 | live_sync_frequency: 2m 9 | transition_after_days: 30 10 | delete_after_days: 90 11 | 12 | targets: 13 | # Range pattern 14 | - pattern: "ip-172-[31-32]-46-49" 15 | disks_excluded: 16 | - "/dev/xvda" 17 | -------------------------------------------------------------------------------- /plans/testplan.yml: -------------------------------------------------------------------------------- 1 | name: "production-backup-policy" 2 | description: "Backup policy for production servers" 3 | archive_interval: 24h 4 | snapshot_frequency: "daily" 5 | snapshot_time: "00:00" 6 | bandwidth_limit: 50 7 | snapshot_retention: 7 8 | live_sync_frequency: 1m 9 | transition_after_days: 30 10 | delete_after_days: 90 11 | 12 | targets: 13 | # Range pattern 14 | - pattern: "web[1-5].prod.example.com" 15 | disks_excluded: 16 | - "/dev/sdb" 17 | 18 | # List pattern 19 | - pattern: "db[master,slave1,slave2].prod.example.com" 20 | disks_excluded: 21 | - "/dev/sdc" 22 | 23 | # Mixed pattern 24 | - pattern: "cache[1-3,backup].prod.example.com" 25 | disks_excluded: 26 | - "/dev/sdd" 27 | 28 | # Regular wildcard pattern 29 | - pattern: "monitor-*.prod.example.com" 30 | disks_excluded: 31 | - "/dev/sde" -------------------------------------------------------------------------------- /postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | systemctl daemon-reload 3 | # systemctl enable blxrep 4 | # systemctl start blxrep 5 | -------------------------------------------------------------------------------- /service/action_db_utils.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/xmigrate/blxrep/storage" 8 | "github.com/xmigrate/blxrep/storage/boltdb" 9 | "github.com/xmigrate/blxrep/utils" 10 | ) 11 | 12 | func getActionDBInstance() storage.Service { 13 | return boltdb.New(utils.AppConfiguration.DataDir + "/xmaction.db") 14 | } 15 | 16 | func GetAction(actionID string) (utils.Action, error) { 17 | db := getActionDBInstance() 18 | 19 | if err := db.Open(); err != nil { 20 | return utils.Action{}, err 21 | } 22 | //close db and handle error inside defer 23 | defer func() { 24 | if err := db.Close(); err != nil { 25 | utils.LogError("unable to close DB : " + err.Error()) 26 | } 27 | }() 28 | 29 | actionObj, err := db.Get(actionID) 30 | 31 | if err != nil { 32 | return utils.Action{}, err 33 | } 34 | 35 | var action utils.Action 36 | err = json.Unmarshal(actionObj, &action) 37 | if err != nil { 38 | return utils.Action{}, err 39 | } 40 | 41 | return action, nil 42 | } 43 | 44 | func GetActionWithId(actionID string) (utils.Action, error) { 45 | db := getActionDBInstance() 46 | 47 | if err := db.Open(); err != nil { 48 | return utils.Action{}, err 49 | } 50 | //close db and handle error inside defer 51 | defer func() { 52 | if err := db.Close(); err != nil { 53 | utils.LogError("unable to close DB : " + err.Error()) 54 | } 55 | }() 56 | 57 | actionObj, err := db.SelectAll(-1) 58 | 59 | if err != nil { 60 | return utils.Action{}, err 61 | } 62 | for _, v := range actionObj { 63 | var action utils.Action 64 | err = json.Unmarshal(v, &action) 65 | if err != nil { 66 | return utils.Action{}, err 67 | } 68 | if action.ActionId == actionID { 69 | return action, nil 70 | } 71 | } 72 | 73 | return utils.Action{}, fmt.Errorf("action not found") 74 | } 75 | 76 | func InsertOrUpdateAction(action utils.Action) error { 77 | db := getActionDBInstance() 78 | 79 | if err := db.Open(); err != nil { 80 | return err 81 | } 82 | 83 | actionObj, err := json.Marshal(action) 84 | if err != nil { 85 | return err 86 | } 87 | actionId := action.Id 88 | err = db.Insert(actionId, actionObj) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | defer func() { 94 | if err := db.Close(); err != nil { 95 | utils.LogError("unable to close DB : " + err.Error()) 96 | } 97 | }() 98 | 99 | return nil 100 | } 101 | 102 | func GetAllActions(limit int) ([]utils.Action, error) { 103 | db := getActionDBInstance() 104 | 105 | if err := db.Open(); err != nil { 106 | return nil, err 107 | } 108 | //close db and handle error inside defer 109 | defer func() { 110 | if err := db.Close(); err != nil { 111 | utils.LogError("unable to close DB : " + err.Error()) 112 | } 113 | }() 114 | 115 | actions, err := db.SelectAll(limit) 116 | 117 | if err != nil { 118 | return nil, err 119 | } 120 | 121 | actionSlice := make([]utils.Action, 0) 122 | for _, v := range actions { 123 | action := utils.Action{} 124 | if err := json.Unmarshal(v, &action); err != nil { 125 | return nil, err 126 | } 127 | actionSlice = append(actionSlice, action) 128 | } 129 | 130 | return actionSlice, nil 131 | } 132 | 133 | func GetAllActionsWithStatus(limit int, status utils.CONST_ACTION_STATUS_TYPE) ([]utils.Action, error) { 134 | 135 | actions, err := GetAllActions(limit) 136 | if err != nil { 137 | return nil, err 138 | } 139 | 140 | filteredActions := make([]utils.Action, 0) 141 | for _, action := range actions { 142 | if action.ActionStatus == string(status) { 143 | filteredActions = append(filteredActions, action) 144 | } 145 | } 146 | 147 | return filteredActions, nil 148 | } 149 | 150 | func GetAllActionsWithUpdateStatus(limit int, status bool) ([]utils.Action, error) { 151 | actions, err := GetAllActions(limit) 152 | if err != nil { 153 | return nil, err 154 | } 155 | 156 | filteredActions := make([]utils.Action, 0) 157 | for _, action := range actions { 158 | if action.UpdateBackend == status { 159 | filteredActions = append(filteredActions, action) 160 | } 161 | } 162 | 163 | return filteredActions, nil 164 | 165 | } 166 | -------------------------------------------------------------------------------- /service/agent_db_utils.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/xmigrate/blxrep/storage" 9 | "github.com/xmigrate/blxrep/storage/boltdb" 10 | "github.com/xmigrate/blxrep/utils" 11 | ) 12 | 13 | func getDBInstance() storage.Service { 14 | return boltdb.New(utils.AppConfiguration.DataDir + "/xmdispatcher.db") 15 | } 16 | 17 | func GetAgent(agentID string) (utils.Agent, error) { 18 | 19 | db := getDBInstance() 20 | 21 | if err := db.Open(); err != nil { 22 | return utils.Agent{}, err 23 | } 24 | //close db and handle error inside defer 25 | defer func() { 26 | if err := db.Close(); err != nil { 27 | utils.LogError("unable to close DB : " + err.Error()) 28 | } 29 | }() 30 | 31 | agentObj, err := db.Get(agentID) 32 | 33 | if err != nil { 34 | return utils.Agent{}, err 35 | } 36 | var agent utils.Agent 37 | 38 | err = json.Unmarshal(agentObj, &agent) 39 | if err != nil { 40 | return utils.Agent{}, err 41 | } 42 | 43 | return agent, nil 44 | } 45 | 46 | func InsertOrUpdateAgent(agent utils.Agent) error { 47 | 48 | db := getDBInstance() 49 | 50 | if err := db.Open(); err != nil { 51 | return err 52 | } 53 | 54 | defer func() { 55 | if err := db.Close(); err != nil { 56 | utils.LogError("unable to close DB : " + err.Error()) 57 | } 58 | }() 59 | 60 | data, err := json.Marshal(agent) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | if err := db.Insert(agent.AgentId, data); err != nil { 66 | return err 67 | } 68 | 69 | return nil 70 | } 71 | 72 | func InsertOrUpdateAgents(agents []utils.Agent) error { 73 | 74 | db := getDBInstance() 75 | 76 | if err := db.Open(); err != nil { 77 | return err 78 | } 79 | 80 | defer func() { 81 | if err := db.Close(); err != nil { 82 | utils.LogError("unable to close DB : " + err.Error()) 83 | } 84 | }() 85 | 86 | for _, agent := range agents { 87 | ag, err := json.Marshal(agent) 88 | if err != nil { 89 | return err 90 | } 91 | if err := db.Insert(agent.AgentId, ag); err != nil { 92 | return err 93 | } 94 | } 95 | 96 | return nil 97 | } 98 | 99 | func InsertOrUpdateAgentsMap(agents map[string]utils.Agent) error { 100 | 101 | // convert map to slice 102 | var agentsSlice []utils.Agent 103 | for _, v := range agents { 104 | agentsSlice = append(agentsSlice, v) 105 | } 106 | return InsertOrUpdateAgents(agentsSlice) 107 | } 108 | 109 | func GetAllAgents(limit int) ([]utils.Agent, error) { 110 | 111 | db := getDBInstance() 112 | 113 | if err := db.Open(); err != nil { 114 | return []utils.Agent{}, nil 115 | } 116 | defer func() { 117 | if err := db.Close(); err != nil { 118 | utils.LogError("unable to close DB : " + err.Error()) 119 | } 120 | }() 121 | 122 | agents, err := db.SelectAll(limit) 123 | 124 | if err != nil { 125 | return []utils.Agent{}, err 126 | } 127 | 128 | // convert map to agents slice 129 | agentsSlice := make([]utils.Agent, 0) 130 | for _, v := range agents { 131 | agent := utils.Agent{} 132 | if err := json.Unmarshal(v, &agent); err != nil { 133 | return []utils.Agent{}, err 134 | } 135 | agentsSlice = append(agentsSlice, agent) 136 | } 137 | 138 | return agentsSlice, nil 139 | } 140 | 141 | func GetAllAgentsMap(limit int) (map[string]utils.Agent, error) { 142 | 143 | agents, err := GetAllAgents(limit) 144 | if err != nil { 145 | return nil, err 146 | } 147 | 148 | agentMap := make(map[string]utils.Agent) 149 | for _, i := range agents { 150 | agentMap[i.AgentId] = i 151 | } 152 | 153 | return agentMap, nil 154 | } 155 | 156 | func SetAgentAction(agentId string, action utils.CONST_AGENT_ACTION) error { 157 | db := getDBInstance() 158 | 159 | if err := db.Open(); err != nil { 160 | return nil 161 | } 162 | 163 | defer func() { 164 | if err := db.Close(); err != nil { 165 | utils.LogError("unable to close DB : " + err.Error()) 166 | } 167 | }() 168 | 169 | agentObj, err := db.Get(agentId) 170 | var agent utils.Agent 171 | 172 | if err != nil { 173 | return err 174 | } 175 | 176 | err = json.Unmarshal(agentObj, &agent) 177 | if err != nil { 178 | return err 179 | } 180 | 181 | agent.Action = action 182 | 183 | ag, err := json.Marshal(agent) 184 | if err != nil { 185 | return err 186 | } 187 | 188 | if err := db.Insert(agent.AgentId, ag); err != nil { 189 | return err 190 | } 191 | 192 | return nil 193 | } 194 | 195 | func GetConnectedAgents() ([]utils.Agent, error) { 196 | db := getDBInstance() 197 | if db == nil { 198 | return nil, fmt.Errorf("failed to get database instance") 199 | } 200 | 201 | if err := db.Open(); err != nil { 202 | return nil, fmt.Errorf("failed to open database: %v", err) 203 | } 204 | defer db.Close() // Use defer to ensure db is closed 205 | 206 | agents, err := db.SelectAll(-1) 207 | if err != nil { 208 | return nil, fmt.Errorf("failed to select agents: %v", err) 209 | } 210 | 211 | if agents == nil { 212 | return make([]utils.Agent, 0), nil 213 | } 214 | 215 | utils.LogDebug(fmt.Sprintf("Retrieved %d raw agents from database", len(agents))) 216 | 217 | agentSlice := make([]utils.Agent, 0, len(agents)) 218 | for i, v := range agents { 219 | if v == nil { 220 | utils.LogError(fmt.Sprintf("Warning: nil agent data at index %d", i)) 221 | continue 222 | } 223 | 224 | var agent utils.Agent 225 | if err := json.Unmarshal(v, &agent); err != nil { 226 | utils.LogError(fmt.Sprintf("Error unmarshaling agent at index %d: %v", i, err)) 227 | continue // Skip invalid agents instead of failing completely 228 | } 229 | 230 | // Validate critical fields 231 | if agent.AgentId == "" { 232 | utils.LogError(fmt.Sprintf("Warning: agent at index %d has empty AgentId", i)) 233 | continue 234 | } 235 | 236 | agentSlice = append(agentSlice, agent) 237 | } 238 | 239 | // Filter connected agents 240 | connectedAgents := make([]utils.Agent, 0, len(agentSlice)) 241 | for _, agent := range agentSlice { 242 | if agent.Connected { 243 | // Ensure LastSeen is not zero 244 | if agent.LastSeen.IsZero() { 245 | agent.LastSeen = time.Now() 246 | } 247 | 248 | // Initialize maps if nil 249 | if agent.CloneStatus == nil { 250 | agent.CloneStatus = make(map[string]int) 251 | } 252 | 253 | connectedAgents = append(connectedAgents, agent) 254 | } 255 | } 256 | 257 | utils.LogDebug(fmt.Sprintf("Returning %d connected agents", len(connectedAgents))) 258 | return connectedAgents, nil 259 | } 260 | 261 | func GetConnectedAgentsMap() (map[string]utils.Agent, error) { 262 | agents, err := GetConnectedAgents() 263 | if err != nil { 264 | utils.LogError("Error in GetConnectedAgents: " + err.Error()) 265 | return nil, fmt.Errorf("failed to get connected agents: %v", err) 266 | } 267 | 268 | agentMap := make(map[string]utils.Agent) 269 | for _, agent := range agents { 270 | // Double check AgentId is not empty 271 | if agent.AgentId == "" { 272 | utils.LogError("Found agent with empty AgentId") 273 | continue 274 | } 275 | agentMap[agent.AgentId] = agent 276 | } 277 | 278 | // Log the result for debugging 279 | utils.LogDebug(fmt.Sprintf("Created agent map with %d entries", len(agentMap))) 280 | 281 | return agentMap, nil 282 | } 283 | -------------------------------------------------------------------------------- /service/backend_utils.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/xmigrate/blxrep/utils" 11 | ) 12 | 13 | func GetAgentConfigFromBackend(token, url string) (utils.ApiResponse, error) { 14 | 15 | req, err := http.NewRequest("GET", url, nil) 16 | if err != nil { 17 | return utils.ApiResponse{}, err 18 | } 19 | 20 | query := req.URL.Query() 21 | query.Add("hostname", "all") 22 | req.URL.RawQuery = query.Encode() 23 | req.Header.Set("accept", "application/json") 24 | req.Header.Set("token", token) 25 | 26 | client := &http.Client{} 27 | resp, err := client.Do(req) 28 | if err != nil { 29 | return utils.ApiResponse{}, err 30 | } 31 | defer resp.Body.Close() 32 | // Read response body 33 | body, err := io.ReadAll(resp.Body) 34 | if err != nil { 35 | return utils.ApiResponse{}, err 36 | } 37 | 38 | // Unmarshal (parse) the JSON response 39 | var apiResponse utils.ApiResponse 40 | err = json.Unmarshal(body, &apiResponse) 41 | if err != nil { 42 | return utils.ApiResponse{}, err 43 | } 44 | 45 | if resp.StatusCode != http.StatusOK { 46 | return utils.ApiResponse{}, fmt.Errorf("error fetching configuration status code :- %d", apiResponse.Status) 47 | } 48 | return apiResponse, nil 49 | 50 | } 51 | 52 | func GetAgentActionFromBackend(token string, url string, action_type utils.CONST_ACTION_TYPE) ([]utils.Action, error) { 53 | 54 | req, err := http.NewRequest("GET", url, nil) 55 | if err != nil { 56 | return []utils.Action{}, err 57 | } 58 | query := req.URL.Query() 59 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_WAITING)) 60 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_PAUSED)) 61 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_RESUMED)) 62 | req.URL.RawQuery = query.Encode() 63 | req.Header.Set("accept", "application/json") 64 | req.Header.Set("token", token) 65 | 66 | client := &http.Client{} 67 | resp, err := client.Do(req) 68 | if err != nil { 69 | return []utils.Action{}, err 70 | } 71 | defer resp.Body.Close() 72 | 73 | body, err := io.ReadAll(resp.Body) 74 | if err != nil { 75 | return []utils.Action{}, err 76 | } 77 | 78 | var action utils.ApiActionResponse 79 | err = json.Unmarshal(body, &action) 80 | if err != nil { 81 | return []utils.Action{}, err 82 | } 83 | 84 | if resp.StatusCode != http.StatusOK { 85 | return []utils.Action{}, fmt.Errorf("error fetching action status code :- %d", action.Status) 86 | } 87 | actions := []utils.Action{} 88 | for _, backendAction := range action.Actions { 89 | disk := map[string]utils.DiskSnapshot{ 90 | backendAction.Disk: { 91 | Name: backendAction.Disk, 92 | }, 93 | } 94 | actions = append(actions, utils.Action{ 95 | Id: backendAction.Id, 96 | ActionId: backendAction.ActionId, 97 | SnapshotId: backendAction.SnapshotId, 98 | AgentId: backendAction.AgentId, 99 | OsVersion: backendAction.OsVersion, 100 | FileSystem: backendAction.FileSystem, 101 | Distro: backendAction.Distro, 102 | Disk: disk, 103 | Action: backendAction.Action, 104 | ActionType: backendAction.ActionType, 105 | ActionStatus: backendAction.ActionStatus, 106 | ActionProgress: backendAction.ActionProgress, 107 | Hostname: backendAction.Hostname, 108 | TargetName: backendAction.TargetName, 109 | TimeCreated: backendAction.TimeCreated, 110 | TimeStarted: backendAction.TimeStarted, 111 | TimeUpdated: backendAction.TimeUpdated, 112 | TimeFinished: backendAction.TimeFinished, 113 | SourceFilePath: backendAction.SourceFilePath, 114 | TargetFilePath: backendAction.TargetFilePath, 115 | }) 116 | } 117 | return actions, nil 118 | 119 | } 120 | 121 | func PushActionToBackend(token string, url string, action utils.ActionPutRequest) error { 122 | 123 | reqBody, err := json.Marshal(action) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | req, err := http.NewRequest("PUT", url, bytes.NewBuffer(reqBody)) 129 | 130 | if err != nil { 131 | return err 132 | } 133 | req.Header.Set("Content-Type", "application/json") 134 | req.Header.Set("token", token) 135 | 136 | client := &http.Client{} 137 | resp, err := client.Do(req) 138 | if err != nil { 139 | return err 140 | } 141 | defer resp.Body.Close() 142 | 143 | body, err := io.ReadAll(resp.Body) 144 | if err != nil { 145 | return err 146 | } 147 | utils.LogDebug(fmt.Sprintf("Response: %s", string(body))) 148 | if resp.StatusCode != http.StatusAccepted { 149 | return fmt.Errorf("Error pushing action status code: %s", string(body)) 150 | } 151 | return nil 152 | } 153 | 154 | func PostActionToBackend(token string, url string, action utils.ActionPostRequest) (string, error) { 155 | reqBody, err := json.Marshal(action) 156 | if err != nil { 157 | return "", err 158 | } 159 | 160 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(reqBody)) 161 | if err != nil { 162 | return "", err 163 | } 164 | req.Header.Set("Content-Type", "application/json") 165 | req.Header.Set("token", token) 166 | 167 | client := &http.Client{} 168 | resp, err := client.Do(req) 169 | if err != nil { 170 | return "", err 171 | } 172 | defer resp.Body.Close() 173 | 174 | body, err := io.ReadAll(resp.Body) 175 | if err != nil { 176 | return "", err 177 | } 178 | 179 | if resp.StatusCode != http.StatusCreated { 180 | return "", fmt.Errorf("Error pushing action status code: %s", string(body)) 181 | } 182 | var actionId utils.ActionIdResponse 183 | err = json.Unmarshal(body, &actionId) 184 | if err != nil { 185 | return "", err 186 | } 187 | return actionId.ActionId, nil 188 | } 189 | -------------------------------------------------------------------------------- /service/dirtyblock_db_utils.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/xmigrate/blxrep/storage" 10 | "github.com/xmigrate/blxrep/storage/boltdb" 11 | "github.com/xmigrate/blxrep/utils" 12 | ) 13 | 14 | // getDirtyBlockDBInstance returns a new instance of the BoltDB service 15 | func getDirtyBlockDBInstance() storage.Service { 16 | dbPath := utils.AppConfiguration.DataDir + "/xmdirtyblocks.db" 17 | utils.LogDebug(fmt.Sprintf("Getting DirtyBlock DB instance at: %s", dbPath)) 18 | return boltdb.New(dbPath) 19 | } 20 | 21 | // DirtyBlock represents a block that needs to be retried 22 | type DirtyBlock struct { 23 | BlockNumber int64 `json:"block_number"` 24 | TimeCreated time.Time `json:"time_created"` 25 | LastRetried time.Time `json:"last_retried"` 26 | RetryCount int `json:"retry_count"` 27 | AgentID string `json:"agent_id"` 28 | DiskPath string `json:"disk_path"` 29 | } 30 | 31 | // AddDirtyBlock adds a new dirty block to the database 32 | func AddDirtyBlock(agentID, diskPath string, blockNum int64) error { 33 | db := getDirtyBlockDBInstance() 34 | if err := db.Open(); err != nil { 35 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for adding dirty block: %v", err)) 36 | return err 37 | } 38 | defer func() { 39 | if err := db.Close(); err != nil { 40 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 41 | } 42 | }() 43 | 44 | block := DirtyBlock{ 45 | BlockNumber: blockNum, 46 | TimeCreated: time.Now().UTC(), 47 | LastRetried: time.Now().UTC(), 48 | RetryCount: 1, 49 | AgentID: agentID, 50 | DiskPath: diskPath, 51 | } 52 | 53 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum) 54 | blockData, err := json.Marshal(block) 55 | if err != nil { 56 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to marshal dirty block data: %v", err)) 57 | return err 58 | } 59 | 60 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Adding dirty block - Key: %s, Block: %+v", key, block)) 61 | err = db.Insert(key, blockData) 62 | if err != nil { 63 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to insert dirty block into DB: %v", err)) 64 | return err 65 | } 66 | 67 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully added dirty block - Key: %s", key)) 68 | return nil 69 | } 70 | 71 | // GetDirtyBlocks retrieves all dirty blocks for a specific agent and disk path 72 | func GetDirtyBlocks(agentID, diskPath string) ([]DirtyBlock, error) { 73 | db := getDirtyBlockDBInstance() 74 | if err := db.Open(); err != nil { 75 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for getting dirty blocks: %v", err)) 76 | return nil, err 77 | } 78 | defer func() { 79 | if err := db.Close(); err != nil { 80 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 81 | } 82 | }() 83 | 84 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Getting dirty blocks for agent: %s, disk: %s", agentID, diskPath)) 85 | 86 | allBlocks, err := db.SelectAll(-1) 87 | if err != nil { 88 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to select all blocks from DB: %v", err)) 89 | return nil, err 90 | } 91 | 92 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Found %d total blocks in DB", len(allBlocks))) 93 | 94 | var blocks []DirtyBlock 95 | prefix := fmt.Sprintf("%s_%s_", agentID, diskPath) 96 | 97 | for key, blockData := range allBlocks { 98 | // Unmarshal the key since BoltDB stores it as JSON 99 | var keyStr string 100 | if err := json.Unmarshal([]byte(key), &keyStr); err != nil { 101 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal key: %v", err)) 102 | continue 103 | } 104 | 105 | // Check if the key starts with our prefix 106 | if strings.HasPrefix(keyStr, prefix) { 107 | var block DirtyBlock 108 | if err := json.Unmarshal(blockData, &block); err != nil { 109 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal block data: %v", err)) 110 | continue 111 | } 112 | blocks = append(blocks, block) 113 | } 114 | } 115 | 116 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Found %d dirty blocks for agent: %s, disk: %s", len(blocks), agentID, diskPath)) 117 | return blocks, nil 118 | } 119 | 120 | // RemoveBlock removes a specific dirty block from the database 121 | func RemoveBlock(agentID, diskPath string, blockNum int64) error { 122 | db := getDirtyBlockDBInstance() 123 | if err := db.Open(); err != nil { 124 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for removing dirty block: %v", err)) 125 | return err 126 | } 127 | defer func() { 128 | if err := db.Close(); err != nil { 129 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 130 | } 131 | }() 132 | 133 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum) 134 | err := db.Delete(key) 135 | if err != nil { 136 | if err.Error() == "key not found" { 137 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block already removed - Key: %s", key)) 138 | return nil 139 | } 140 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to remove block: %v", err)) 141 | return err 142 | } 143 | 144 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully removed block - Key: %s", key)) 145 | return nil 146 | } 147 | 148 | // UpdateBlockRetry updates the retry count and last retried time for a specific block 149 | func UpdateBlockRetry(agentID, diskPath string, blockNum int64) error { 150 | db := getDirtyBlockDBInstance() 151 | if err := db.Open(); err != nil { 152 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for updating retry count: %v", err)) 153 | return err 154 | } 155 | defer func() { 156 | if err := db.Close(); err != nil { 157 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 158 | } 159 | }() 160 | 161 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum) 162 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Updating retry count for block - Key: %s", key)) 163 | 164 | blockData, err := db.Get(key) 165 | if err != nil { 166 | if err.Error() == "key does not exists" { 167 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block not found for retry update - Key: %s", key)) 168 | return nil // Not an error, block might have been removed 169 | } 170 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to get block for retry update: %v", err)) 171 | return err 172 | } 173 | 174 | var block DirtyBlock 175 | if err := json.Unmarshal(blockData, &block); err != nil { 176 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal block data for retry update: %v", err)) 177 | return err 178 | } 179 | 180 | block.LastRetried = time.Now().UTC() 181 | block.RetryCount++ 182 | 183 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Updating block retry count - Key: %s, New Count: %d", key, block.RetryCount)) 184 | 185 | updatedBlockData, err := json.Marshal(block) 186 | if err != nil { 187 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to marshal updated block data: %v", err)) 188 | return err 189 | } 190 | 191 | if err := db.Insert(key, updatedBlockData); err != nil { 192 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to save updated retry count: %v", err)) 193 | return err 194 | } 195 | 196 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully updated retry count - Key: %s", key)) 197 | return nil 198 | } 199 | 200 | // IsBlockDirty checks if a specific block is marked as dirty 201 | func IsBlockDirty(agentID, diskPath string, blockNum int64) (bool, error) { 202 | db := getDirtyBlockDBInstance() 203 | if err := db.Open(); err != nil { 204 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for checking dirty block: %v", err)) 205 | return false, err 206 | } 207 | defer func() { 208 | if err := db.Close(); err != nil { 209 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 210 | } 211 | }() 212 | 213 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum) 214 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Checking if block is dirty - Key: %s", key)) 215 | 216 | _, err := db.Get(key) 217 | if err != nil { 218 | if strings.Contains(err.Error(), "does not exists") { 219 | // This is an expected case for most blocks 220 | return false, nil 221 | } 222 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Error checking dirty block: %v", err)) 223 | return false, err 224 | } 225 | 226 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block found in dirty list - Key: %s", key)) 227 | return true, nil 228 | } 229 | 230 | // GetAllDirtyBlocks retrieves all dirty blocks from the database 231 | func GetAllDirtyBlocks() ([]DirtyBlock, error) { 232 | db := getDirtyBlockDBInstance() 233 | if err := db.Open(); err != nil { 234 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for getting all dirty blocks: %v", err)) 235 | return nil, err 236 | } 237 | defer func() { 238 | if err := db.Close(); err != nil { 239 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error()) 240 | } 241 | }() 242 | 243 | allBlocks, err := db.SelectAll(-1) 244 | if err != nil { 245 | return nil, err 246 | } 247 | 248 | var blocks []DirtyBlock 249 | for _, blockData := range allBlocks { 250 | var block DirtyBlock 251 | if err := json.Unmarshal(blockData, &block); err != nil { 252 | continue 253 | } 254 | blocks = append(blocks, block) 255 | } 256 | 257 | return blocks, nil 258 | } 259 | -------------------------------------------------------------------------------- /storage/boltdb/boltdb.go: -------------------------------------------------------------------------------- 1 | package boltdb 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/xmigrate/blxrep/storage" 8 | "github.com/xmigrate/blxrep/utils" 9 | 10 | bolt "go.etcd.io/bbolt" 11 | ) 12 | 13 | type BoltDB struct { 14 | DB *bolt.DB 15 | Path string 16 | } 17 | 18 | func New(path string) storage.Service { 19 | return &BoltDB{Path: path} 20 | } 21 | 22 | func (db *BoltDB) Open() error { 23 | 24 | dbInstance, err := bolt.Open(db.Path, 0600, nil) 25 | if err != nil { 26 | utils.LogError("unable to open db for path: " + db.Path) 27 | return err 28 | } 29 | 30 | db.DB = dbInstance 31 | 32 | err = db.DB.Update(func(tx *bolt.Tx) error { 33 | _, err := tx.CreateBucketIfNotExists([]byte("default")) 34 | if err != nil { 35 | return err 36 | } 37 | return nil 38 | }) 39 | 40 | if err != nil { 41 | utils.LogError("unable to create 'default' bucket: " + err.Error()) 42 | return err 43 | } 44 | 45 | return nil 46 | } 47 | 48 | func (db *BoltDB) Close() error { return db.DB.Close() } 49 | 50 | func (db *BoltDB) SelectAll(limit int) (map[string][]byte, error) { 51 | if limit == 0 { 52 | limit = 10 53 | } 54 | 55 | blocks := make(map[string][]byte, 0) 56 | 57 | err := db.DB.View(func(tx *bolt.Tx) error { 58 | b := tx.Bucket([]byte("default")) 59 | if b == nil { 60 | return fmt.Errorf("Bucket %q not found!", "default") 61 | } 62 | 63 | c := b.Cursor() 64 | for k, v := c.First(); k != nil; k, v = c.Next() { 65 | if limit != -1 && len(blocks) >= limit { 66 | break 67 | } 68 | 69 | blocks[string(k)] = v 70 | 71 | } 72 | 73 | return nil 74 | }) 75 | 76 | return blocks, err 77 | } 78 | 79 | func (db *BoltDB) Insert(id string, data []byte) error { 80 | 81 | return db.DB.Update(func(tx *bolt.Tx) error { 82 | b, err := tx.CreateBucketIfNotExists([]byte("default")) 83 | if err != nil { 84 | return err 85 | } 86 | 87 | key, err := json.Marshal(id) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | return b.Put(key, data) 93 | }) 94 | } 95 | 96 | func (db *BoltDB) Get(agentId string) ([]byte, error) { 97 | 98 | var agent []byte 99 | 100 | err := db.DB.View(func(tx *bolt.Tx) error { 101 | // Retrieve the bucket (assumes it exists) 102 | bucket := tx.Bucket([]byte("default")) 103 | 104 | key, err := json.Marshal(agentId) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | // Check if the key exists 110 | if value := bucket.Get(key); value != nil { 111 | // Key exists, print the value 112 | 113 | agent = value 114 | } else { 115 | // Key does not exist 116 | return fmt.Errorf("key %s does not exists", agentId) 117 | } 118 | 119 | return nil 120 | }) 121 | 122 | return agent, err 123 | } 124 | 125 | func (db *BoltDB) GetKeyCount() (uint64, error) { 126 | var keyCount uint64 127 | 128 | err := db.DB.View(func(tx *bolt.Tx) error { 129 | b := tx.Bucket([]byte("default")) 130 | if b == nil { 131 | return fmt.Errorf("Bucket %s not found!", "default") 132 | } 133 | 134 | c := b.Cursor() 135 | for k, _ := c.First(); k != nil; k, _ = c.Next() { 136 | keyCount++ 137 | } 138 | 139 | return nil 140 | }) 141 | 142 | if err != nil { 143 | return 0, err 144 | } 145 | 146 | return keyCount, nil 147 | } 148 | 149 | func (db *BoltDB) Delete(agentId string) error { 150 | return db.DB.Update(func(tx *bolt.Tx) error { 151 | b := tx.Bucket([]byte("default")) 152 | if b == nil { 153 | return fmt.Errorf("Bucket %s not found!", "default") 154 | } 155 | 156 | key, err := json.Marshal(agentId) 157 | if err != nil { 158 | return err 159 | } 160 | 161 | return b.Delete(key) 162 | }) 163 | } 164 | -------------------------------------------------------------------------------- /storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | type Storage struct { 4 | } 5 | 6 | type Service interface { 7 | Open() error 8 | Close() error 9 | SelectAll(limit int) (map[string][]byte, error) 10 | Insert(string, []byte) error 11 | Get(string) ([]byte, error) 12 | GetKeyCount() (uint64, error) 13 | Delete(string) error 14 | } 15 | -------------------------------------------------------------------------------- /tui/actions.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | "time" 8 | 9 | "github.com/xmigrate/blxrep/pkg/dispatcher" 10 | "github.com/xmigrate/blxrep/service" 11 | 12 | "github.com/gdamore/tcell/v2" 13 | "github.com/rivo/tview" 14 | ) 15 | 16 | func (t *DispatcherTUI) showActions() { 17 | t.viewState = viewActions 18 | t.updateInfoBar([]string{ 19 | "[green]

[white] Pause", 20 | "[green][white] Resume", 21 | "[green][white] Quit", 22 | "[green][white] Back", 23 | }) 24 | // Create a new table for actions 25 | actionsTable := tview.NewTable(). 26 | SetBorders(false). 27 | SetSelectable(true, false) 28 | 29 | actionsTable.SetTitle(""). 30 | SetBorder(true). 31 | SetTitleColor(tcell.ColorPurple). 32 | SetBorderColor(tcell.ColorYellowGreen).SetBorderColor(tcell.ColorGreen) 33 | 34 | // Set up table headers 35 | actionsTable.SetCell(0, 0, tview.NewTableCell("Agent ID").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 36 | actionsTable.SetCell(0, 1, tview.NewTableCell("Action ID").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 37 | actionsTable.SetCell(0, 2, tview.NewTableCell("Action").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 38 | actionsTable.SetCell(0, 3, tview.NewTableCell("Status").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 39 | actionsTable.SetCell(0, 4, tview.NewTableCell("Type").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 40 | actionsTable.SetCell(0, 5, tview.NewTableCell("Created").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 41 | actionsTable.SetCell(0, 6, tview.NewTableCell("Progress").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 42 | 43 | t.content.Clear() 44 | t.content.AddItem(actionsTable, 0, 1, true) 45 | t.table = actionsTable 46 | t.app.SetFocus(actionsTable) 47 | 48 | // Start a goroutine to update the actions periodically 49 | go t.updateActionsPeriodicallly(actionsTable) 50 | } 51 | 52 | func (t *DispatcherTUI) updateActionsPeriodicallly(actionsTable *tview.Table) { 53 | ticker := time.NewTicker(1 * time.Second) 54 | defer ticker.Stop() 55 | 56 | for { 57 | select { 58 | case <-ticker.C: 59 | t.app.QueueUpdateDraw(func() { 60 | t.updateActionsTable(actionsTable) 61 | }) 62 | } 63 | } 64 | } 65 | 66 | func (t *DispatcherTUI) updateActionsTable(actionsTable *tview.Table) { 67 | // t.tableMutex.Lock() 68 | // defer t.tableMutex.Unlock() 69 | 70 | actions, err := service.GetAllActions(100) 71 | if err != nil { 72 | t.showError(fmt.Sprintf("Error fetching actions: %v", err)) 73 | return 74 | } 75 | 76 | // Sort actions by TimeStarted in descending order (most recent first) 77 | sort.Slice(actions, func(i, j int) bool { 78 | return actions[i].TimeStarted > actions[j].TimeStarted 79 | }) 80 | 81 | // Update or add rows for each action 82 | for i, action := range actions { 83 | row := i + 1 // +1 because row 0 is the header 84 | 85 | // Ensure we have enough rows 86 | for r := actionsTable.GetRowCount(); r <= row; r++ { 87 | actionsTable.SetCell(r, 0, tview.NewTableCell("")) 88 | actionsTable.SetCell(r, 1, tview.NewTableCell("")) 89 | actionsTable.SetCell(r, 2, tview.NewTableCell("")) 90 | actionsTable.SetCell(r, 3, tview.NewTableCell("")) 91 | actionsTable.SetCell(r, 4, tview.NewTableCell("")) 92 | actionsTable.SetCell(r, 5, tview.NewTableCell("")) 93 | actionsTable.SetCell(r, 6, tview.NewTableCell("")) 94 | } 95 | 96 | actionsTable.GetCell(row, 0).SetText(action.AgentId) 97 | actionsTable.GetCell(row, 1).SetText(action.Id) 98 | actionsTable.GetCell(row, 2).SetText(action.Action) 99 | actionsTable.GetCell(row, 3).SetText(action.ActionStatus) 100 | actionsTable.GetCell(row, 4).SetText(action.ActionType) 101 | actionsTable.GetCell(row, 5).SetText(time.Unix(action.TimeStarted, 0).Format("2006-01-02 15:04:05")) 102 | progressBar := t.createProgressBar(action.ActionProgress, 20) // 20 is the width of the progress bar 103 | 104 | actionsTable.GetCell(row, 6).SetText(progressBar) 105 | 106 | } 107 | 108 | // Clear any extra rows 109 | for row := len(actions) + 1; row < actionsTable.GetRowCount(); row++ { 110 | for col := 0; col < actionsTable.GetColumnCount(); col++ { 111 | actionsTable.GetCell(row, col).SetText("") 112 | } 113 | } 114 | 115 | if len(actions) == 0 { 116 | actionsTable.GetCell(1, 0).SetText("No actions in progress").SetTextColor(tcell.ColorRed) 117 | } 118 | } 119 | 120 | func (t *DispatcherTUI) createProgressBar(progress int, width int) string { 121 | if progress < 0 { 122 | progress = 0 123 | } 124 | if progress > 100 { 125 | progress = 100 126 | } 127 | 128 | completed := int(float64(width) * float64(progress) / 100.0) 129 | remaining := width - completed 130 | 131 | bar := "[" 132 | bar += strings.Repeat("[green]█[white]", completed) 133 | if remaining > 0 { 134 | bar += strings.Repeat("[green]░[white]", remaining) 135 | } 136 | bar += "]" 137 | 138 | return fmt.Sprintf("%s %3d%%", bar, progress) 139 | } 140 | 141 | func (t *DispatcherTUI) pauseSelectedAction(agentID string, actionID string, actionStatus string) { 142 | // Debug: Print the action details 143 | t.showMessage(fmt.Sprintf("Debug: AgentID: '%s', ActionID: '%s', Status: '%s'", agentID, actionID, actionStatus)) 144 | 145 | if agentID == "" || actionID == "" { 146 | t.showError("Error: AgentID or ActionID is empty") 147 | return 148 | } 149 | 150 | if actionStatus == "" { 151 | t.showError("Error: Action status is empty") 152 | return 153 | } 154 | 155 | if strings.ToLower(actionStatus) != "in progress" { 156 | t.showMessage(fmt.Sprintf("Only actions in progress can be paused. Current status: %s", actionStatus)) 157 | return 158 | } 159 | 160 | // Send pause message to the agent 161 | err := dispatcher.PauseAction(actionID, agentID) 162 | if err != nil { 163 | t.showError(fmt.Sprintf("Failed to pause action: %v", err)) 164 | return 165 | } 166 | 167 | t.showMessage("Action paused successfully") 168 | } 169 | 170 | func (t *DispatcherTUI) resumeSelectedAction(agentID string, actionID string, actionStatus string) { 171 | // Debug: Print the action details 172 | t.showMessage(fmt.Sprintf("Debug: AgentID: '%s', ActionID: '%s', Status: '%s'", agentID, actionID, actionStatus)) 173 | 174 | if agentID == "" || actionID == "" { 175 | t.showError("Error: AgentID or ActionID is empty") 176 | return 177 | } 178 | 179 | if actionStatus == "" { 180 | t.showError("Error: Action status is empty") 181 | return 182 | } 183 | 184 | if strings.ToLower(actionStatus) != "paused" { 185 | t.showMessage(fmt.Sprintf("Only paused actions can be resumed. Current status: %s", actionStatus)) 186 | return 187 | } 188 | 189 | // Send resume message to the agent 190 | err := dispatcher.ResumeAction(actionID, agentID) 191 | if err != nil { 192 | t.showError(fmt.Sprintf("Failed to resume action: %v", err)) 193 | return 194 | } 195 | 196 | t.showMessage("Action resumed successfully") 197 | } 198 | -------------------------------------------------------------------------------- /tui/agents.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/gdamore/tcell/v2" 7 | "github.com/rivo/tview" 8 | "github.com/xmigrate/blxrep/service" 9 | ) 10 | 11 | func (t *DispatcherTUI) showAgents() { 12 | t.viewState = viewAgents 13 | t.updateInfoBar([]string{ 14 | "[green][white] Actions", 15 | "[green][white] Browse", 16 | "[green][white] Quit", 17 | }) 18 | var err error 19 | t.agents, err = service.GetConnectedAgentsMap() 20 | 21 | if err != nil { 22 | t.showError(fmt.Sprintf("Error fetching connected agents: %v", err)) 23 | return 24 | } 25 | 26 | agentsTable := tview.NewTable(). 27 | SetBorders(false). 28 | SetSelectable(true, false) 29 | 30 | agentsTable.SetTitle(""). 31 | SetBorder(true).SetTitleColor(tcell.ColorPurple).SetBorderColor(tcell.ColorGreen) 32 | 33 | agentsTable.SetCell(0, 0, tview.NewTableCell("Agent ID").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 34 | agentsTable.SetCell(0, 1, tview.NewTableCell("Status").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 35 | 36 | row := 1 37 | for id, agent := range t.agents { 38 | agentsTable.SetCell(row, 0, tview.NewTableCell(id)) 39 | status := "Disconnected" 40 | if agent.Connected { 41 | status = "Connected" 42 | } 43 | agentsTable.SetCell(row, 1, tview.NewTableCell(status)) 44 | row++ 45 | } 46 | 47 | if len(t.agents) == 0 { 48 | agentsTable.SetCell(1, 0, tview.NewTableCell("No connected agents found").SetTextColor(tcell.ColorRed)) 49 | } 50 | 51 | agentsTable.Select(1, 0).SetFixed(1, 0) 52 | 53 | t.content.Clear() 54 | t.content.AddItem(agentsTable, 0, 1, true) 55 | t.table = agentsTable 56 | t.app.SetFocus(agentsTable) 57 | } 58 | 59 | func (t *DispatcherTUI) showCheckpointsForSelectedAgent() { 60 | if t.table == nil { 61 | t.showError("No agent table available") 62 | return 63 | } 64 | 65 | row, _ := t.table.GetSelection() 66 | if row == 0 { 67 | t.showError("Please select an agent") 68 | return 69 | } 70 | 71 | agentID := t.table.GetCell(row, 0).Text 72 | t.currentAgentID = agentID 73 | t.showDisks(agentID) 74 | } 75 | -------------------------------------------------------------------------------- /tui/checkpoints.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/gdamore/tcell/v2" 8 | "github.com/rivo/tview" 9 | "github.com/xmigrate/blxrep/pkg/dispatcher" 10 | "github.com/xmigrate/blxrep/utils" 11 | ) 12 | 13 | func (t *DispatcherTUI) showCheckpoints(agentID string, disk string) { 14 | checkpoints, err := dispatcher.ShowCheckpoints("", "", agentID, t.dataDir, disk) 15 | if err != nil { 16 | t.showError(fmt.Sprintf("Error fetching checkpoints: %v", err)) 17 | return 18 | } 19 | 20 | t.viewState = viewCheckpoints 21 | t.updateInfoBar([]string{ 22 | "[green][white] Select", 23 | "[green][white] Back", 24 | "[green][white] Quit", 25 | }) 26 | t.currentAgentID = agentID 27 | 28 | t.table.Clear() 29 | t.table.SetTitle(fmt.Sprintf("", agentID)) 30 | 31 | t.table.SetCell(0, 0, tview.NewTableCell("Timestamp").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 32 | t.table.SetCell(0, 1, tview.NewTableCell("Filename").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 33 | 34 | for i, cp := range checkpoints { 35 | t.table.SetCell(i+1, 0, tview.NewTableCell(cp.Timestamp.Format("2006-01-02 15:04:05"))) 36 | t.table.SetCell(i+1, 1, tview.NewTableCell(cp.Filename)) 37 | } 38 | 39 | if len(checkpoints) == 0 { 40 | t.table.SetCell(1, 0, tview.NewTableCell("No checkpoints found").SetTextColor(tcell.ColorRed)) 41 | } 42 | 43 | t.table.Select(1, 0).SetFixed(1, 0) 44 | t.app.SetFocus(t.table) 45 | } 46 | 47 | func (t *DispatcherTUI) selectCheckpoint() { 48 | row, _ := t.table.GetSelection() 49 | if row > 0 && row <= t.table.GetRowCount() { 50 | t.selectedCheckpoint = &utils.Checkpoint{ 51 | Filename: t.table.GetCell(row, 1).Text, 52 | Timestamp: t.parseTimestamp(t.table.GetCell(row, 0).Text), 53 | } 54 | t.showCheckpointOptions() 55 | } 56 | } 57 | 58 | func (t *DispatcherTUI) showCheckpointOptions() { 59 | t.viewState = viewCheckpointOptions 60 | 61 | t.table.Clear() 62 | t.table.SetBorders(false) 63 | 64 | t.table.SetTitle(fmt.Sprintf("", t.selectedCheckpoint.Filename)) 65 | 66 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 67 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 68 | 69 | t.table.SetCell(1, 0, tview.NewTableCell("Restore").SetTextColor(tcell.ColorWhite)) 70 | t.table.SetCell(1, 1, tview.NewTableCell("Restore this checkpoint")) 71 | 72 | t.table.SetCell(2, 0, tview.NewTableCell("Browse").SetTextColor(tcell.ColorWhite)) 73 | t.table.SetCell(2, 1, tview.NewTableCell("Browse files in this checkpoint")) 74 | 75 | t.table.Select(1, 0).SetFixed(1, 0) 76 | 77 | t.content.Clear() 78 | t.content.AddItem(t.table, 0, 1, true) 79 | t.app.SetFocus(t.table) 80 | } 81 | 82 | func (t *DispatcherTUI) selectOption() { 83 | row, _ := t.table.GetSelection() 84 | switch row { 85 | case 1: 86 | t.restoreCheckpoint() 87 | case 2: 88 | t.browseCheckpoint() 89 | } 90 | } 91 | 92 | func (t *DispatcherTUI) restoreCheckpoint() { 93 | t.viewState = viewRestoreOptions 94 | 95 | t.table.Clear() 96 | t.table.SetBorders(false) 97 | 98 | t.table.SetTitle(fmt.Sprintf("", t.selectedCheckpoint.Filename)) 99 | 100 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 101 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 102 | 103 | t.table.SetCell(1, 0, tview.NewTableCell("Restore Partition").SetTextColor(tcell.ColorWhite)) 104 | t.table.SetCell(1, 1, tview.NewTableCell("Restore a specific partition from the checkpoint")) 105 | 106 | t.table.SetCell(2, 0, tview.NewTableCell("Restore Disk").SetTextColor(tcell.ColorWhite)) 107 | t.table.SetCell(2, 1, tview.NewTableCell("Restore the entire disk from the checkpoint")) 108 | 109 | t.table.Select(1, 0).SetFixed(1, 0) 110 | 111 | t.content.Clear() 112 | t.content.AddItem(t.table, 0, 1, true) 113 | t.app.SetFocus(t.table) 114 | 115 | } 116 | 117 | func (t *DispatcherTUI) parseTimestamp(timeStr string) time.Time { 118 | timestamp, err := time.Parse("2006-01-02 15:04:05", timeStr) 119 | if err != nil { 120 | // Handle the error, maybe log it or use a default time 121 | return time.Now() 122 | } 123 | return timestamp 124 | } 125 | -------------------------------------------------------------------------------- /tui/disks.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/gdamore/tcell/v2" 7 | "github.com/rivo/tview" 8 | "github.com/xmigrate/blxrep/pkg/dispatcher" 9 | ) 10 | 11 | func (t *DispatcherTUI) showDisks(agentID string) { 12 | disks, err := dispatcher.ShowDisks(agentID, t.dataDir) 13 | if err != nil { 14 | t.showError(fmt.Sprintf("Error fetching disks: %v", err)) 15 | return 16 | } 17 | 18 | t.viewState = viewDisks 19 | t.updateInfoBar([]string{ 20 | "[green][white] Select", 21 | "[green][white] Back", 22 | "[green][white] Quit", 23 | }) 24 | t.currentAgentID = agentID 25 | 26 | t.table.Clear() 27 | t.table.SetTitle(fmt.Sprintf("", agentID)) 28 | 29 | t.table.SetCell(0, 0, tview.NewTableCell("Disk").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 30 | 31 | for i, disk := range disks { 32 | t.table.SetCell(i+1, 0, tview.NewTableCell(disk)) 33 | } 34 | 35 | if len(disks) == 0 { 36 | t.table.SetCell(1, 0, tview.NewTableCell("No disks found").SetTextColor(tcell.ColorRed)) 37 | } 38 | 39 | t.table.Select(1, 0).SetFixed(1, 0) 40 | t.app.SetFocus(t.table) 41 | } 42 | 43 | func (t *DispatcherTUI) selectDisk() { 44 | row, _ := t.table.GetSelection() 45 | if row > 0 && row <= t.table.GetRowCount() { 46 | t.selectedDisk = t.table.GetCell(row, 0).Text 47 | t.showCheckpoints(t.currentAgentID, t.selectedDisk) 48 | } 49 | } 50 | 51 | func (t *DispatcherTUI) showDiskOptions() { 52 | t.viewState = viewDiskOptions 53 | 54 | t.table.Clear() 55 | t.table.SetBorders(false) 56 | 57 | t.table.SetTitle(fmt.Sprintf("", t.selectedDisk)) 58 | 59 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 60 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 61 | 62 | t.table.SetCell(1, 0, tview.NewTableCell("Restore").SetTextColor(tcell.ColorWhite)) 63 | t.table.SetCell(1, 1, tview.NewTableCell("Restore this disk")) 64 | 65 | t.table.SetCell(2, 0, tview.NewTableCell("Browse").SetTextColor(tcell.ColorWhite)) 66 | t.table.SetCell(2, 1, tview.NewTableCell("Browse files in this disk")) 67 | 68 | t.table.Select(1, 0).SetFixed(1, 0) 69 | 70 | t.content.Clear() 71 | t.content.AddItem(t.table, 0, 1, true) 72 | t.app.SetFocus(t.table) 73 | } 74 | 75 | func (t *DispatcherTUI) selectDisks() { 76 | row, _ := t.table.GetSelection() 77 | switch row { 78 | case 1: 79 | t.restoreDiskOptions() 80 | case 2: 81 | t.showCheckpoints(t.currentAgentID, t.selectedDisk) 82 | } 83 | } 84 | 85 | func (t *DispatcherTUI) restoreDiskOptions() { 86 | t.viewState = viewRestoreOptions 87 | 88 | t.table.Clear() 89 | t.table.SetBorders(false) 90 | 91 | t.table.SetTitle(fmt.Sprintf("", t.selectedDisk)) 92 | 93 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 94 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 95 | 96 | t.table.SetCell(1, 0, tview.NewTableCell("Restore Partition").SetTextColor(tcell.ColorWhite)) 97 | t.table.SetCell(1, 1, tview.NewTableCell("Restore a specific partition from the disk")) 98 | 99 | t.table.SetCell(2, 0, tview.NewTableCell("Restore Disk").SetTextColor(tcell.ColorWhite)) 100 | t.table.SetCell(2, 1, tview.NewTableCell("Restore the entire disk from the disk")) 101 | 102 | t.table.Select(1, 0).SetFixed(1, 0) 103 | 104 | t.content.Clear() 105 | t.content.AddItem(t.table, 0, 1, true) 106 | t.app.SetFocus(t.table) 107 | 108 | } 109 | -------------------------------------------------------------------------------- /tui/dispatcher.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "strings" 9 | "sync" 10 | 11 | "github.com/xmigrate/blxrep/utils" 12 | 13 | "github.com/gdamore/tcell/v2" 14 | "github.com/rivo/tview" 15 | ) 16 | 17 | type DispatcherTUI struct { 18 | app *tview.Application 19 | mainFlex *tview.Flex 20 | infoBarLeft *tview.TextView 21 | infoBarRight *tview.TextView 22 | content *tview.Flex 23 | cmdInput *tview.InputField 24 | dataDir string 25 | agents map[string]utils.Agent 26 | viewState viewState 27 | table *tview.Table 28 | selectedCheckpoint *utils.Checkpoint 29 | selectedDisk string 30 | currentAgentID string 31 | currentDir string 32 | partitions []Partition 33 | loopDev string 34 | mountDir string 35 | isRestoreFormActive bool 36 | tableMutex sync.RWMutex 37 | } 38 | 39 | type viewState int 40 | 41 | const ( 42 | viewAgents viewState = iota 43 | viewCheckpoints 44 | viewCheckpointOptions 45 | viewPartitions 46 | viewFileBrowser 47 | viewRestoreOptions 48 | viewActions 49 | viewDisks 50 | viewDiskOptions 51 | ) 52 | 53 | func RunDispatcherTUI(dataDir string) { 54 | utils.AppConfiguration.DataDir = dataDir 55 | tui := &DispatcherTUI{ 56 | app: tview.NewApplication(), 57 | dataDir: dataDir, 58 | viewState: viewAgents, 59 | } 60 | 61 | tui.setup() 62 | 63 | if err := tui.app.Run(); err != nil { 64 | panic(err) 65 | } 66 | } 67 | 68 | func (t *DispatcherTUI) setup() { 69 | dataDir := fmt.Sprintf("Data Dir: %s", t.dataDir) 70 | banner := utils.GetDiskBanner() 71 | infoText := fmt.Sprintf("%s \n %s", banner, dataDir) 72 | 73 | t.infoBarLeft = tview.NewTextView(). 74 | SetTextColor(tcell.ColorPurple). 75 | SetDynamicColors(true). 76 | SetRegions(true). 77 | SetWrap(false). 78 | SetText(infoText) 79 | t.infoBarLeft.SetBorder(false) 80 | 81 | t.infoBarRight = tview.NewTextView(). 82 | SetDynamicColors(true). 83 | SetRegions(true). 84 | SetWrap(false). 85 | SetTextAlign(tview.AlignLeft) 86 | t.infoBarRight.SetBorder(false) 87 | infoBarFlex := tview.NewFlex(). 88 | AddItem(t.infoBarLeft, 0, 2, false). 89 | AddItem(t.infoBarRight, 0, 1, false) 90 | 91 | t.content = tview.NewFlex().SetDirection(tview.FlexColumn) 92 | 93 | t.cmdInput = tview.NewInputField(). 94 | SetLabel(" Command: "). 95 | SetFieldWidth(0). 96 | SetDoneFunc(t.handleCommand).SetFieldBackgroundColor(tcell.ColorBlack).SetLabelColor(tcell.ColorWhite) 97 | // Calculate height based on banner lines plus data dir line 98 | bannerLines := len(strings.Split(banner, "\n")) 99 | totalHeight := bannerLines + 2 // +2 for data dir line and padding 100 | 101 | t.mainFlex = tview.NewFlex(). 102 | SetDirection(tview.FlexRow). 103 | AddItem(infoBarFlex, totalHeight, 1, false). // Dynamic height based on content 104 | AddItem(t.content, 0, 1, true) 105 | 106 | t.app.SetRoot(t.mainFlex, true) 107 | 108 | t.app.SetInputCapture(t.globalInputHandler) 109 | 110 | t.showAgents() 111 | } 112 | 113 | func (t *DispatcherTUI) updateInfoBar(shortcuts []string) { 114 | banner := utils.GetDiskBanner() 115 | dataDir := fmt.Sprintf("[orange]Data Dir:[white] %s", t.dataDir) 116 | 117 | // Update left column 118 | leftText := fmt.Sprintf("%s\n%s", banner, dataDir) 119 | t.infoBarLeft.SetText(leftText) 120 | 121 | // Update right column 122 | rightText := "\n\n\n" + strings.Join(shortcuts, "\n") 123 | t.infoBarRight.SetText(rightText) 124 | } 125 | 126 | func (t *DispatcherTUI) globalInputHandler(event *tcell.EventKey) *tcell.EventKey { 127 | if t.isRestoreFormActive { 128 | return event 129 | } 130 | switch event.Key() { 131 | case tcell.KeyRune: 132 | switch event.Rune() { 133 | case ':': 134 | t.showCommandInput() 135 | return nil 136 | case 'q', 'Q': 137 | t.app.Stop() 138 | return nil 139 | 140 | case 'a', 'A': 141 | if t.viewState == viewAgents { 142 | t.showActions() 143 | return nil 144 | } 145 | case 'p', 'P': 146 | if t.viewState == viewActions { 147 | row, _ := t.table.GetSelection() 148 | agentID := t.table.GetCell(row, 0).Text 149 | actionID := t.table.GetCell(row, 1).Text 150 | actionStatus := t.table.GetCell(row, 3).Text 151 | t.pauseSelectedAction(agentID, actionID, actionStatus) 152 | return nil 153 | } 154 | case 'r', 'R': 155 | if t.viewState == viewActions { 156 | row, _ := t.table.GetSelection() 157 | agentID := t.table.GetCell(row, 0).Text 158 | actionID := t.table.GetCell(row, 1).Text 159 | actionStatus := t.table.GetCell(row, 3).Text 160 | t.resumeSelectedAction(agentID, actionID, actionStatus) 161 | return nil 162 | } 163 | } 164 | case tcell.KeyEscape: 165 | switch t.viewState { 166 | case viewActions: 167 | t.showAgents() 168 | return nil 169 | case viewCheckpoints: 170 | t.showDisks(t.currentAgentID) 171 | return nil 172 | case viewDisks: 173 | t.showAgents() 174 | return nil 175 | case viewDiskOptions: 176 | t.showDisks(t.currentAgentID) 177 | return nil 178 | case viewCheckpointOptions: 179 | t.showCheckpoints(t.currentAgentID, t.selectedDisk) 180 | return nil 181 | case viewPartitions: 182 | exec.Command("losetup", "-d", t.loopDev).Run() 183 | t.showCheckpointOptions() 184 | return nil 185 | case viewFileBrowser: 186 | // Go back to partition selection when in file browser 187 | exec.Command("umount", t.mountDir).Run() 188 | t.selectPartitionTUI() 189 | return nil 190 | case viewRestoreOptions: 191 | t.showCheckpointOptions() 192 | return nil 193 | 194 | } 195 | case tcell.KeyEnter: 196 | switch t.viewState { 197 | case viewAgents: 198 | t.showCheckpointsForSelectedAgent() 199 | return nil 200 | case viewCheckpoints: 201 | t.selectCheckpoint() 202 | return nil 203 | case viewDisks: 204 | t.selectDisk() 205 | return nil 206 | case viewDiskOptions: 207 | t.selectOption() 208 | return nil 209 | case viewCheckpointOptions: 210 | t.selectOption() 211 | return nil 212 | case viewPartitions: 213 | row, _ := t.table.GetSelection() 214 | if row > 0 && row <= len(t.partitions) { 215 | selectedPartition := t.partitions[row-1] 216 | t.mountSelectedPartition(selectedPartition) 217 | } 218 | return nil 219 | case viewFileBrowser: 220 | table := t.content.GetItem(0).(*tview.Table) 221 | row, _ := table.GetSelection() 222 | if row == 1 { 223 | // Go to parent directory 224 | parentDir := filepath.Dir(t.currentDir) 225 | if parentDir != t.currentDir { 226 | t.updateFileTable(table, parentDir) 227 | t.currentDir = parentDir 228 | } 229 | } else if row > 1 { 230 | cellContent := table.GetCell(row, 0).Text 231 | fileName := strings.TrimPrefix(cellContent, "[::b]") // Remove bold formatting if present 232 | filePath := filepath.Join(t.currentDir, fileName) 233 | fileInfo, err := os.Stat(filePath) 234 | if err != nil { 235 | t.showError(fmt.Sprintf("Error accessing file: %v", err)) 236 | return nil 237 | } 238 | if fileInfo.IsDir() { 239 | t.updateFileTable(table, filePath) 240 | t.currentDir = filePath 241 | } else { 242 | // You can add file viewing functionality here if needed 243 | t.showMessage(fmt.Sprintf("Selected file: %s", fileName)) 244 | } 245 | } 246 | return nil 247 | case viewRestoreOptions: 248 | row, _ := t.table.GetSelection() 249 | switch row { 250 | case 1: 251 | t.restorePartition() 252 | case 2: 253 | t.restoreDisk() 254 | } 255 | return nil 256 | } 257 | } 258 | return event 259 | } 260 | 261 | func (t *DispatcherTUI) restoreDisk() { 262 | // Implement full disk restoration logic here 263 | t.showMessage("Restoring full disk... (Not yet implemented)") 264 | } 265 | 266 | func (t *DispatcherTUI) showCommandInput() { 267 | t.mainFlex.RemoveItem(t.content) 268 | t.mainFlex.AddItem(t.cmdInput, 1, 1, true) 269 | t.mainFlex.AddItem(t.content, 0, 1, false) 270 | t.app.SetFocus(t.cmdInput) 271 | } 272 | 273 | func (t *DispatcherTUI) hideCommandInput() { 274 | t.mainFlex.RemoveItem(t.cmdInput) 275 | t.mainFlex.RemoveItem(t.content) 276 | t.mainFlex.AddItem(t.content, 0, 1, true) 277 | t.app.SetFocus(t.content) 278 | } 279 | 280 | func (t *DispatcherTUI) handleCommand(key tcell.Key) { 281 | if key != tcell.KeyEnter { 282 | return 283 | } 284 | 285 | cmd := strings.TrimSpace(t.cmdInput.GetText()) 286 | t.cmdInput.SetText("") 287 | t.hideCommandInput() 288 | 289 | switch cmd { 290 | case "refresh": 291 | t.showAgents() 292 | default: 293 | t.showError(fmt.Sprintf("Unknown command: %s", cmd)) 294 | } 295 | } 296 | 297 | func (t *DispatcherTUI) showError(message string) { 298 | t.table.Clear() 299 | t.table.SetBorders(false) 300 | t.table.SetTitle("Error") 301 | 302 | // Split the message into words 303 | words := strings.Fields(message) 304 | lines := []string{} 305 | currentLine := "" 306 | 307 | // Create lines with a maximum width of 80 characters 308 | for _, word := range words { 309 | if len(currentLine)+len(word)+1 > 80 { 310 | lines = append(lines, strings.TrimSpace(currentLine)) 311 | currentLine = word 312 | } else { 313 | if currentLine != "" { 314 | currentLine += " " 315 | } 316 | currentLine += word 317 | } 318 | } 319 | if currentLine != "" { 320 | lines = append(lines, strings.TrimSpace(currentLine)) 321 | } 322 | 323 | // Add each line to the table 324 | for i, line := range lines { 325 | t.table.SetCell(i, 0, tview.NewTableCell(line).SetTextColor(tcell.ColorRed)) 326 | } 327 | 328 | t.app.SetFocus(t.table) 329 | } 330 | 331 | func (t *DispatcherTUI) showMessage(message string) { 332 | modal := tview.NewModal(). 333 | SetText(message). 334 | AddButtons([]string{"OK"}). 335 | SetDoneFunc(func(buttonIndex int, buttonLabel string) { 336 | t.app.SetRoot(t.mainFlex, true) 337 | }) 338 | 339 | t.app.SetRoot(modal, true) 340 | } 341 | -------------------------------------------------------------------------------- /tui/filebrowser.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "time" 9 | 10 | "github.com/xmigrate/blxrep/service" 11 | "github.com/xmigrate/blxrep/utils" 12 | 13 | "github.com/gdamore/tcell/v2" 14 | "github.com/rivo/tview" 15 | ) 16 | 17 | func (t *DispatcherTUI) showFileBrowser(rootDir string) { 18 | t.viewState = viewFileBrowser 19 | t.updateInfoBar([]string{ 20 | "[green][white] Restore", 21 | "[green][white] View/Browse", 22 | }) 23 | table := tview.NewTable(). 24 | SetBorders(false). 25 | SetSelectable(true, false) 26 | 27 | table.SetTitle(""). 28 | SetBorder(true).SetBorderColor(tcell.ColorGreen) 29 | 30 | t.updateFileTable(table, rootDir) 31 | 32 | t.content.Clear() 33 | t.content.AddItem(table, 0, 1, true) 34 | t.app.SetFocus(table) 35 | 36 | table.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { 37 | if event.Key() == tcell.KeyCtrlR { 38 | row, _ := table.GetSelection() 39 | if row > 0 { // Ignore header row 40 | cellContent := table.GetCell(row, 0).Text 41 | fileName := strings.TrimPrefix(cellContent, "[::b]") 42 | filePath := filepath.Join(t.currentDir, fileName) 43 | t.showRestorePrompt(filePath) 44 | return nil 45 | } 46 | } 47 | return event 48 | }) 49 | } 50 | 51 | func (t *DispatcherTUI) showRestorePrompt(sourcePath string) { 52 | t.isRestoreFormActive = true 53 | 54 | form := tview.NewForm() 55 | 56 | form.AddInputField("Source Path", sourcePath, 0, nil, nil) 57 | form.AddInputField("Destination Path", sourcePath, 0, nil, nil) 58 | 59 | form.AddButton("Restore", func() { 60 | sourceInput := form.GetFormItemByLabel("Source Path").(*tview.InputField) 61 | destInput := form.GetFormItemByLabel("Destination Path").(*tview.InputField) 62 | source := sourceInput.GetText() 63 | dest := destInput.GetText() 64 | t.showRestoreConfirmation(source, dest) 65 | }) 66 | 67 | form.AddButton("Cancel", func() { 68 | t.isRestoreFormActive = false 69 | t.app.SetRoot(t.mainFlex, true) 70 | }) 71 | 72 | form.SetBorder(true).SetTitle("Create Restore Action") 73 | 74 | // Set custom input capture for the form 75 | form.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { 76 | switch event.Key() { 77 | case tcell.KeyEscape: 78 | // Exit the form 79 | t.isRestoreFormActive = false 80 | t.app.SetRoot(t.mainFlex, true) 81 | return nil 82 | } 83 | // For all other keys, including Enter, let the form handle them 84 | return event 85 | }) 86 | 87 | t.app.SetRoot(form, true) 88 | } 89 | 90 | func (t *DispatcherTUI) showRestoreConfirmation(sourcePath, destPath string) { 91 | modal := tview.NewModal(). 92 | SetText(fmt.Sprintf("Are you sure you want to restore?\nFrom: %s\nTo: %s", sourcePath, destPath)). 93 | AddButtons([]string{"Restore", "Cancel"}). 94 | SetDoneFunc(func(buttonIndex int, buttonLabel string) { 95 | if buttonLabel == "Restore" { 96 | utils.LogDebug("Restore button pressed") 97 | actionId, err := t.createRestoreAction(sourcePath, destPath) 98 | if err != nil { 99 | utils.LogError(fmt.Sprintf("Failed to create restore action: %v", err)) 100 | t.showError(fmt.Sprintf("Failed to create restore action: %v", err)) 101 | } else { 102 | utils.LogDebug(fmt.Sprintf("Restore action created with ID: %s", actionId)) 103 | t.showRestoreProgress(actionId) 104 | } 105 | } else { 106 | utils.LogDebug("Cancel button pressed") 107 | t.isRestoreFormActive = false 108 | t.app.SetRoot(t.mainFlex, true) 109 | } 110 | }) 111 | 112 | t.app.SetRoot(modal, true) 113 | } 114 | 115 | func (t *DispatcherTUI) showRestoreProgress(actionId string) { 116 | utils.LogDebug(fmt.Sprintf("Showing restore progress for action ID: %s", actionId)) 117 | 118 | progressText := tview.NewTextView(). 119 | SetDynamicColors(true). 120 | SetTextAlign(tview.AlignCenter). 121 | SetText("Status: [yellow]Starting[white]\nProgress: [yellow]0%[white]") 122 | 123 | // Custom progress bar 124 | progressBar := tview.NewTextView(). 125 | SetDynamicColors(true). 126 | SetTextAlign(tview.AlignLeft) 127 | 128 | progressFlex := tview.NewFlex(). 129 | SetDirection(tview.FlexRow). 130 | AddItem(tview.NewTextView().SetText("Restore in progress...").SetTextAlign(tview.AlignCenter), 0, 1, false). 131 | AddItem(progressText, 0, 1, false). 132 | AddItem(progressBar, 1, 1, false) 133 | 134 | progressFlex.SetBorder(true).SetTitle("Restore Progress") 135 | 136 | t.app.SetRoot(progressFlex, true) 137 | 138 | go func() { 139 | ticker := time.NewTicker(1 * time.Second) 140 | defer ticker.Stop() 141 | 142 | for { 143 | select { 144 | case <-ticker.C: 145 | action, err := service.GetAction(actionId) 146 | if err != nil { 147 | utils.LogError(fmt.Sprintf("Error fetching action: %v", err)) 148 | t.app.QueueUpdateDraw(func() { 149 | progressText.SetText(fmt.Sprintf("Error: %v", err)) 150 | }) 151 | return 152 | } 153 | 154 | t.app.QueueUpdateDraw(func() { 155 | status := action.ActionStatus 156 | progress := action.ActionProgress 157 | 158 | statusColor := "yellow" 159 | if status == string(utils.CONST_ACTION_STATUS_COMPLETED) { 160 | statusColor = "green" 161 | } else if status == string(utils.CONST_ACTION_STATUS_FAILED) { 162 | statusColor = "red" 163 | } 164 | 165 | progressText.SetText(fmt.Sprintf("Status: [%s]%s[white]\nProgress: [%s]%d%%[white]", statusColor, status, statusColor, progress)) 166 | 167 | // Update custom progress bar 168 | _, _, width, _ := progressFlex.GetInnerRect() 169 | progressBarWidth := width 170 | completedWidth := int(float64(progress) / 100 * float64(progressBarWidth)) 171 | progressBar.SetText(fmt.Sprintf("[green]%s[white]%s", 172 | strings.Repeat("█", completedWidth), 173 | strings.Repeat("░", progressBarWidth-completedWidth))) 174 | 175 | if status == string(utils.CONST_ACTION_STATUS_COMPLETED) || status == string(utils.CONST_ACTION_STATUS_FAILED) { 176 | time.Sleep(2 * time.Second) // Show the final status for 2 seconds 177 | t.isRestoreFormActive = false 178 | t.app.SetRoot(t.mainFlex, true) 179 | return 180 | } 181 | }) 182 | } 183 | } 184 | }() 185 | 186 | } 187 | 188 | func (t *DispatcherTUI) createRestoreAction(sourcePath, destPath string) (string, error) { 189 | action := utils.Action{ 190 | Id: utils.GenerateUUID(), 191 | AgentId: t.currentAgentID, 192 | Action: string(utils.CONST_AGENT_ACTION_RESTORE), 193 | ActionType: string(utils.CONST_AGENT_ACTION_RESTORE), 194 | ActionStatus: string(utils.CONST_ACTION_STATUS_WAITING), 195 | SourceFilePath: sourcePath, 196 | TargetFilePath: destPath, 197 | TimeCreated: utils.NewUTCTime(time.Now()), 198 | } 199 | 200 | err := service.InsertOrUpdateAction(action) 201 | if err != nil { 202 | return "", err 203 | } 204 | 205 | return action.Id, nil 206 | } 207 | 208 | func (t *DispatcherTUI) updateFileTable(table *tview.Table, dir string) { 209 | table.Clear() 210 | 211 | table.SetCell(0, 0, tview.NewTableCell("Name").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 212 | table.SetCell(0, 1, tview.NewTableCell("Type").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 213 | table.SetCell(0, 2, tview.NewTableCell("Size").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 214 | table.SetCell(0, 3, tview.NewTableCell("Modified").SetTextColor(tcell.ColorYellow).SetSelectable(false)) 215 | 216 | files, err := os.ReadDir(dir) 217 | if err != nil { 218 | t.showError(fmt.Sprintf("Error reading directory: %v", err)) 219 | return 220 | } 221 | 222 | table.SetCell(1, 0, tview.NewTableCell("..").SetTextColor(tcell.ColorDarkCyan)) 223 | table.SetCell(1, 1, tview.NewTableCell("Directory")) 224 | table.SetCell(1, 2, tview.NewTableCell("")) 225 | table.SetCell(1, 3, tview.NewTableCell("")) 226 | 227 | row := 2 228 | for _, file := range files { 229 | info, err := file.Info() 230 | if err != nil { 231 | continue 232 | } 233 | 234 | name := file.Name() 235 | fileType := "File" 236 | size := fmt.Sprintf("%d", info.Size()) 237 | modified := info.ModTime().Format("2006-01-02 15:04:05") 238 | 239 | if file.IsDir() { 240 | fileType = "Directory" 241 | size = "" 242 | name = "[::b]" + name // Make directories bold 243 | } 244 | 245 | table.SetCell(row, 0, tview.NewTableCell(name).SetTextColor(tcell.ColorWhite)) 246 | table.SetCell(row, 1, tview.NewTableCell(fileType)) 247 | table.SetCell(row, 2, tview.NewTableCell(size)) 248 | table.SetCell(row, 3, tview.NewTableCell(modified)) 249 | 250 | row++ 251 | } 252 | 253 | table.SetTitle(fmt.Sprintf("", dir)).SetBorderColor(tcell.ColorGreen) 254 | table.Select(1, 0).SetFixed(1, 0).SetDoneFunc(func(key tcell.Key) { 255 | if key == tcell.KeyEnter { 256 | row, _ := table.GetSelection() 257 | if row == 1 { 258 | // Go to parent directory 259 | parentDir := filepath.Dir(dir) 260 | if parentDir != dir { 261 | t.updateFileTable(table, parentDir) 262 | } 263 | } else if row > 1 && row <= len(files)+1 { 264 | selectedFile := files[row-2] 265 | if selectedFile.IsDir() { 266 | t.updateFileTable(table, filepath.Join(dir, selectedFile.Name())) 267 | } else { 268 | // You can add file viewing functionality here if needed 269 | // t.showMessage(fmt.Sprintf("Selected file: %s", selectedFile.Name())) 270 | } 271 | } 272 | } 273 | }) 274 | } 275 | -------------------------------------------------------------------------------- /utils/banner.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/fatih/color" 7 | ) 8 | 9 | const logoTemplate = ` 10 | _ _ 11 | | | | | 12 | | |__ | | _ _ ____ _____ ____ 13 | | _ \ | | ( \ / ) / ___)| ___ || _ \ 14 | | |_) )| | ) X ( | | | ____|| |_| | 15 | |____/ \_)(_/ \_)|_| |_____)| __/ 16 | |_| 17 | made with ♥ by team xmigrate 18 | ` 19 | 20 | func PrintAnimatedLogo() { 21 | cyan := color.New(color.FgMagenta).SprintFunc() 22 | 23 | // Clear the console (this may not work on all systems) 24 | fmt.Print("\033[H\033[2J") 25 | 26 | logo := fmt.Sprint(logoTemplate) 27 | fmt.Println(cyan(logo)) 28 | 29 | } 30 | 31 | func GetDiskBanner() string { 32 | return ` 33 | _ _ 34 | | | | | 35 | | |__ | | _ _ ____ _____ ____ 36 | | _ \ | | ( \ / ) / ___)| ___ || _ \ 37 | | |_) )| | ) X ( | | | ____|| |_| | 38 | |____/ \_)(_/ \_)|_| |_____)| __/ 39 | |_| 40 | ` 41 | } 42 | -------------------------------------------------------------------------------- /utils/bpf_bpfel_x86.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/utils/bpf_bpfel_x86.o -------------------------------------------------------------------------------- /utils/bpf_helpers.go: -------------------------------------------------------------------------------- 1 | // Code generated by bpf2go; DO NOT EDIT. 2 | //go:build 386 || amd64 3 | 4 | package utils 5 | 6 | import ( 7 | "bytes" 8 | _ "embed" 9 | "fmt" 10 | "io" 11 | 12 | "github.com/cilium/ebpf" 13 | ) 14 | 15 | // loadBpf returns the embedded CollectionSpec for bpf. 16 | func loadBpf() (*ebpf.CollectionSpec, error) { 17 | reader := bytes.NewReader(_BpfBytes) 18 | spec, err := ebpf.LoadCollectionSpecFromReader(reader) 19 | if err != nil { 20 | return nil, fmt.Errorf("can't load bpf: %w", err) 21 | } 22 | 23 | return spec, err 24 | } 25 | 26 | // loadBpfObjects loads bpf and converts it into a struct. 27 | // 28 | // The following types are suitable as obj argument: 29 | // 30 | // *bpfObjects 31 | // *bpfPrograms 32 | // *bpfMaps 33 | // 34 | // See ebpf.CollectionSpec.LoadAndAssign documentation for details. 35 | func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { 36 | spec, err := loadBpf() 37 | if err != nil { 38 | return err 39 | } 40 | 41 | return spec.LoadAndAssign(obj, opts) 42 | } 43 | 44 | // bpfSpecs contains maps and programs before they are loaded into the kernel. 45 | // 46 | // It can be passed ebpf.CollectionSpec.Assign. 47 | type bpfSpecs struct { 48 | bpfProgramSpecs 49 | bpfMapSpecs 50 | } 51 | 52 | // bpfSpecs contains programs before they are loaded into the kernel. 53 | // 54 | // It can be passed ebpf.CollectionSpec.Assign. 55 | type bpfProgramSpecs struct { 56 | BlockRqComplete *ebpf.ProgramSpec `ebpf:"block_rq_complete"` 57 | } 58 | 59 | // bpfMapSpecs contains maps before they are loaded into the kernel. 60 | // 61 | // It can be passed ebpf.CollectionSpec.Assign. 62 | type bpfMapSpecs struct { 63 | Events *ebpf.MapSpec `ebpf:"events"` 64 | TargetDiskMap *ebpf.MapSpec `ebpf:"target_disk_map"` 65 | } 66 | 67 | // bpfObjects contains all objects after they have been loaded into the kernel. 68 | // 69 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. 70 | type BpfObjects struct { 71 | bpfPrograms 72 | bpfMaps 73 | } 74 | 75 | func (o *BpfObjects) Close() error { 76 | return _BpfClose( 77 | &o.bpfPrograms, 78 | &o.bpfMaps, 79 | ) 80 | } 81 | 82 | // bpfMaps contains all maps after they have been loaded into the kernel. 83 | // 84 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. 85 | type bpfMaps struct { 86 | Events *ebpf.Map `ebpf:"events"` 87 | TargetDiskMap *ebpf.Map `ebpf:"target_disk_map"` 88 | } 89 | 90 | func (m *bpfMaps) Close() error { 91 | return _BpfClose( 92 | m.Events, 93 | m.TargetDiskMap, 94 | ) 95 | } 96 | 97 | // bpfPrograms contains all programs after they have been loaded into the kernel. 98 | // 99 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. 100 | type bpfPrograms struct { 101 | BlockRqComplete *ebpf.Program `ebpf:"block_rq_complete"` 102 | } 103 | 104 | func (p *bpfPrograms) Close() error { 105 | return _BpfClose( 106 | p.BlockRqComplete, 107 | ) 108 | } 109 | 110 | func _BpfClose(closers ...io.Closer) error { 111 | for _, closer := range closers { 112 | if err := closer.Close(); err != nil { 113 | return err 114 | } 115 | } 116 | return nil 117 | } 118 | 119 | // Do not access this directly. 120 | // 121 | //go:embed bpf_bpfel_x86.o 122 | var _BpfBytes []byte 123 | -------------------------------------------------------------------------------- /utils/config.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | var AppConfiguration AppConfig 4 | var AgentConfiguration AgentConfig 5 | var PublicKeyData []byte 6 | -------------------------------------------------------------------------------- /utils/constants.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | type CONST_AGENT_ACTION string 4 | type CONST_DISPATCHER_ACTION string 5 | type CONST_ACTION_TYPE string 6 | type CONST_ACTION_STATUS_TYPE string 7 | 8 | const ( 9 | CONST_ADHOC_ACTION CONST_ACTION_TYPE = "ADHOC_ACTION" 10 | CONST_SCHEDULED_ACTION CONST_ACTION_TYPE = "SCHEDULED_ACTION" 11 | CONST_START_ACTION CONST_AGENT_ACTION = "started" 12 | CONST_COMPRESS_ACTION CONST_AGENT_ACTION = "compress" 13 | CONST_AGENT_ACTION_CLONE CONST_AGENT_ACTION = "clone" 14 | CONST_AGENT_ACTION_PAUSE CONST_AGENT_ACTION = "pause" 15 | CONST_AGENT_ACTION_RESUME CONST_AGENT_ACTION = "resume" 16 | CONST_AGENT_ACTION_LIVE CONST_AGENT_ACTION = "live" 17 | CONST_AGENT_ACTION_STOP_LIVE CONST_AGENT_ACTION = "stop_live" 18 | CONST_AGENT_ACTION_SYNC CONST_AGENT_ACTION = "sync" 19 | CONST_AGENT_ACTION_RESTORE CONST_AGENT_ACTION = "restore" 20 | CONST_AGENT_ACTION_PREPARE CONST_AGENT_ACTION = "prepare" 21 | CONST_AGENT_ACTION_PARTITION_RESTORE CONST_AGENT_ACTION = "partition_restore" 22 | CONST_ACTION_STATUS_IN_PROGRESS CONST_ACTION_STATUS_TYPE = "in_progress" 23 | CONST_ACTION_STATUS_COMPLETED CONST_ACTION_STATUS_TYPE = "completed" 24 | CONST_ACTION_STATUS_FAILED CONST_ACTION_STATUS_TYPE = "failed" 25 | CONST_ACTION_STATUS_PAUSED CONST_ACTION_STATUS_TYPE = "paused" 26 | CONST_ACTION_STATUS_WAITING CONST_ACTION_STATUS_TYPE = "waiting" 27 | CONST_ACTION_STATUS_RESUMED CONST_ACTION_STATUS_TYPE = "resumed" 28 | CONST_BLOCK_SIZE uint64 = 512 29 | CONST_CHANNEL_SIZE uint64 = 12000 30 | CONST_MAX_ACTIONS_TO_PROCESS uint32 = 1000 31 | Daily Frequency = "daily" 32 | Weekly Frequency = "weekly" 33 | Monthly Frequency = "monthly" 34 | ) 35 | -------------------------------------------------------------------------------- /utils/dev_info.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "syscall" 7 | "unsafe" 8 | ) 9 | 10 | const ( 11 | BLKGETSIZE64 = 0x80081272 12 | ) 13 | 14 | func GetTotalSectors(devicePath string) (uint64, error) { 15 | file, err := os.Open(devicePath) 16 | if err != nil { 17 | return 0, fmt.Errorf("failed to open device %s: %v", devicePath, err) 18 | } 19 | defer file.Close() 20 | 21 | var size uint64 22 | _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), BLKGETSIZE64, uintptr(unsafe.Pointer(&size))) 23 | if errno != 0 { 24 | return 0, fmt.Errorf("ioctl error: %v", errno) 25 | } 26 | 27 | // Assuming 512-byte sectors, which is common 28 | sectorSize := uint64(512) 29 | totalSectors := size / sectorSize 30 | 31 | return totalSectors, nil 32 | } 33 | -------------------------------------------------------------------------------- /utils/disk.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "strings" 8 | ) 9 | 10 | func MountImage(imagePath, mountDir string) (bool, error) { 11 | // Step 1: Create a loopback device 12 | loopDev, err := createLoopbackDevice(imagePath) 13 | if err != nil { 14 | return false, fmt.Errorf("failed to create loopback device: %v", err) 15 | } 16 | defer func() { 17 | if err != nil { 18 | // If an error occurred, try to clean up the loopback device 19 | exec.Command("losetup", "-d", loopDev).Run() 20 | } 21 | }() 22 | 23 | // Step 2: Mount the loopback device 24 | err = mountLoopbackDevice(loopDev, mountDir) 25 | if err != nil { 26 | exec.Command("losetup", "-d", loopDev).Run() 27 | return false, fmt.Errorf("failed to mount loopback device: %v", err) 28 | } 29 | 30 | fmt.Printf("Successfully mounted %s to %s using loopback device %s\n", imagePath, mountDir, loopDev) 31 | return true, nil 32 | } 33 | 34 | func createLoopbackDevice(imagePath string) (string, error) { 35 | cmd := exec.Command("losetup", "--partscan", "--find", "--show", imagePath) 36 | output, err := cmd.Output() 37 | if err != nil { 38 | return "", fmt.Errorf("failed to create loopback device: %v", err) 39 | } 40 | return strings.TrimSpace(string(output)), nil 41 | } 42 | 43 | func mountLoopbackDevice(loopDev, mountDir string) error { 44 | // Ensure the mount directory exists 45 | if err := os.MkdirAll(mountDir, 0755); err != nil { 46 | return fmt.Errorf("failed to create mount directory: %v", err) 47 | } 48 | 49 | cmd := exec.Command("mount", loopDev, mountDir) 50 | return cmd.Run() 51 | } 52 | -------------------------------------------------------------------------------- /utils/duration.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | "time" 8 | ) 9 | 10 | // ParseDuration parses a duration string that supports h,m,s,d,w,y units 11 | func ParseDuration(duration string) (time.Duration, error) { 12 | // Regular expression to match number followed by unit 13 | re := regexp.MustCompile(`^(\d+)([hmsdwy])$`) 14 | matches := re.FindStringSubmatch(duration) 15 | 16 | if matches == nil { 17 | return 0, fmt.Errorf("invalid duration format: %s. Expected format: number followed by h,m,s,d,w, or y", duration) 18 | } 19 | 20 | value, err := strconv.Atoi(matches[1]) 21 | if err != nil { 22 | return 0, fmt.Errorf("invalid number in duration: %v", err) 23 | } 24 | 25 | unit := matches[2] 26 | 27 | switch unit { 28 | case "h": 29 | return time.Duration(value) * time.Hour, nil 30 | case "m": 31 | return time.Duration(value) * time.Minute, nil 32 | case "s": 33 | return time.Duration(value) * time.Second, nil 34 | case "d": 35 | return time.Duration(value) * 24 * time.Hour, nil 36 | case "w": 37 | return time.Duration(value) * 7 * 24 * time.Hour, nil 38 | case "y": 39 | return time.Duration(value) * 365 * 24 * time.Hour, nil 40 | default: 41 | return 0, fmt.Errorf("unsupported duration unit: %s", unit) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /utils/file_helper.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "os" 7 | 8 | "github.com/google/uuid" 9 | ) 10 | 11 | func CopyFile(source, destination string) error { 12 | srcFile, err := os.Open(source) 13 | if err != nil { 14 | return err 15 | } 16 | defer srcFile.Close() 17 | 18 | destFile, err := os.Create(destination) 19 | if err != nil { 20 | return err 21 | } 22 | defer destFile.Close() 23 | 24 | _, err = io.Copy(destFile, srcFile) 25 | if err != nil { 26 | return err 27 | } 28 | 29 | err = destFile.Sync() 30 | if err != nil { 31 | return err 32 | } 33 | 34 | return nil 35 | } 36 | 37 | func Contains(slice []string, item string) bool { 38 | for _, s := range slice { 39 | if s == item { 40 | return true 41 | } 42 | } 43 | return false 44 | } 45 | 46 | func GenerateUUID() string { 47 | id, err := uuid.NewRandom() 48 | if err != nil { 49 | 50 | return "" 51 | } 52 | return id.String() 53 | } 54 | 55 | func ReadLastLine(filename string) (string, error) { 56 | file, err := os.Open(filename) 57 | if err != nil { 58 | return "", err 59 | } 60 | defer file.Close() 61 | 62 | var lastLine string 63 | scanner := bufio.NewScanner(file) 64 | for scanner.Scan() { 65 | lastLine = scanner.Text() 66 | } 67 | 68 | if err := scanner.Err(); err != nil { 69 | return "", err 70 | } 71 | 72 | LogDebug("Last line: " + lastLine) 73 | return lastLine, nil 74 | } 75 | -------------------------------------------------------------------------------- /utils/logger.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "path/filepath" 8 | "sync" 9 | ) 10 | 11 | var ( 12 | logFile *os.File 13 | logger *log.Logger 14 | logMu sync.Mutex 15 | ) 16 | 17 | func InitLogging(logDir string) error { 18 | logMu.Lock() 19 | defer logMu.Unlock() 20 | 21 | if logger != nil { 22 | return nil // Already initialized 23 | } 24 | 25 | if err := os.MkdirAll(logDir, 0755); err != nil { 26 | return fmt.Errorf("failed to create log directory: %v", err) 27 | } 28 | 29 | logPath := filepath.Join(logDir, "blxrep.log") 30 | file, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) 31 | if err != nil { 32 | return fmt.Errorf("failed to open log file: %v", err) 33 | } 34 | 35 | logFile = file 36 | logger = log.New(file, "", log.LstdFlags) 37 | return nil 38 | } 39 | 40 | func LogDebug(message string) { 41 | logMu.Lock() 42 | defer logMu.Unlock() 43 | 44 | if logger != nil { 45 | logger.Printf("[DEBUG] %s", message) 46 | } 47 | } 48 | 49 | func LogError(message string) { 50 | logMu.Lock() 51 | defer logMu.Unlock() 52 | 53 | if logger != nil { 54 | logger.Printf("[ERROR] %s", message) 55 | } 56 | } 57 | 58 | func CloseLogFile() { 59 | logMu.Lock() 60 | defer logMu.Unlock() 61 | 62 | if logFile != nil { 63 | logFile.Close() 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /utils/stream.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | "github.com/gorilla/websocket" 8 | ) 9 | 10 | func StreamData(blocks []AgentDataBlock, websock *websocket.Conn, resume bool, srcPath string, action CONST_AGENT_ACTION, startTime int64) { 11 | var agentBlocks AgentBulkMessage 12 | agentBlocks.StartTime = startTime 13 | agentBlocks.AgentID, _ = os.Hostname() 14 | agentBlocks.Data = blocks 15 | agentBlocks.SrcPath = srcPath 16 | agentBlocks.Action = action 17 | agentBlocks.TotalBlocks, _ = GetTotalSectors(srcPath) 18 | 19 | if resume { 20 | agentBlocks.DataType = "resume" 21 | } else { 22 | agentBlocks.DataType = "snapshot" 23 | } 24 | err := websock.WriteJSON(agentBlocks) 25 | if err != nil { 26 | log.Fatalf("Could not send snapshot data: %v", err) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /utils/user_details.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "regexp" 8 | "strings" 9 | 10 | "gopkg.in/yaml.v3" 11 | ) 12 | 13 | type UserConfig struct { 14 | Name string `json:"name"` 15 | Email string `json:"email"` 16 | Organization string `json:"organization"` 17 | } 18 | 19 | func getConfigFilePath() string { 20 | filePath := "/etc/blxrep/config.yaml" 21 | 22 | if _, err := os.Stat(filePath); os.IsNotExist(err) { 23 | LogError("Config file not found at " + filePath) 24 | return filePath 25 | } 26 | return filePath 27 | } 28 | 29 | func loadUserConfig() (UserConfig, error) { 30 | configPath := getConfigFilePath() 31 | file, err := os.Open(configPath) 32 | if err != nil { 33 | return UserConfig{}, err 34 | } 35 | defer file.Close() 36 | 37 | var config UserConfig 38 | decoder := yaml.NewDecoder(file) 39 | err = decoder.Decode(&config) 40 | return config, err 41 | } 42 | 43 | func saveUserConfig(config UserConfig) error { 44 | configPath := getConfigFilePath() 45 | file, err := os.Create(configPath) 46 | if err != nil { 47 | return err 48 | } 49 | defer file.Close() 50 | 51 | encoder := yaml.NewEncoder(file) 52 | return encoder.Encode(config) 53 | } 54 | 55 | func isValidEmail(email string) bool { 56 | emailRegex := regexp.MustCompile(`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,4}$`) 57 | return emailRegex.MatchString(email) 58 | } 59 | 60 | func GetUserInfo() UserConfig { 61 | config, err := loadUserConfig() 62 | if err == nil { 63 | fmt.Println("Existing user configuration found.") 64 | return config 65 | } 66 | 67 | reader := bufio.NewReader(os.Stdin) 68 | 69 | for config.Name == "" { 70 | fmt.Print("Enter your name: ") 71 | config.Name, _ = reader.ReadString('\n') 72 | config.Name = strings.TrimSpace(config.Name) 73 | if config.Name == "" { 74 | fmt.Println("Name cannot be empty. Please try again.") 75 | } 76 | } 77 | 78 | for config.Email == "" || !isValidEmail(config.Email) { 79 | fmt.Print("Enter your email: ") 80 | config.Email, _ = reader.ReadString('\n') 81 | config.Email = strings.TrimSpace(config.Email) 82 | if !isValidEmail(config.Email) { 83 | fmt.Println("Invalid email format. Please try again.") 84 | } 85 | } 86 | 87 | for config.Organization == "" { 88 | fmt.Print("Enter your organization: ") 89 | config.Organization, _ = reader.ReadString('\n') 90 | config.Organization = strings.TrimSpace(config.Organization) 91 | if config.Organization == "" { 92 | fmt.Println("Organization cannot be empty. Please try again.") 93 | } 94 | } 95 | 96 | err = saveUserConfig(config) 97 | if err != nil { 98 | LogError("Error saving user config: " + err.Error()) 99 | } 100 | 101 | return config 102 | } 103 | --------------------------------------------------------------------------------