/logs/blxrep.log
21 | ```
22 |
23 | If you see any errors, you can try to fix them by checking the documentation or asking for help in the community.
24 |
25 |
--------------------------------------------------------------------------------
/docs/tui.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: An interactive terminal UI
3 | description: blxrep provides an interactive terminal UI to navigate through the backups of an agent.
4 | ---
5 | # TUI Operations
6 |
7 | ## Navigating through the backups of an agent
8 |
9 | TUI is provided by the `blxrep tui` command. It is a terminal UI that allows you to navigate through the backups of an agent. You can do the following operations:
10 |
11 | - Navigate through the backups of an agent
12 | - Mount a backup to a point in time
13 | - Restore a file or partition from a backup
14 | - Check the scheduled backups progress and status
15 | - Check the status of the agent
16 |
17 | ### Quick start video
18 |
19 |
20 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/xmigrate/blxrep
2 |
3 | go 1.23.0
4 |
5 | require (
6 | github.com/cilium/ebpf v0.17.1
7 | github.com/fatih/color v1.18.0
8 | github.com/gdamore/tcell/v2 v2.8.1
9 | github.com/google/uuid v1.6.0
10 | github.com/gorilla/websocket v1.5.3
11 | github.com/klauspost/compress v1.17.11
12 | github.com/newrelic/go-agent/v3 v3.36.0
13 | github.com/rivo/tview v0.0.0-20241227133733-17b7edb88c57
14 | github.com/shirou/gopsutil v3.21.11+incompatible
15 | github.com/spf13/cobra v1.8.1
16 | github.com/spf13/viper v1.19.0
17 | go.etcd.io/bbolt v1.3.11
18 | golang.org/x/sys v0.29.0
19 | gopkg.in/yaml.v3 v3.0.1
20 | )
21 |
22 | require (
23 | github.com/fsnotify/fsnotify v1.7.0 // indirect
24 | github.com/gdamore/encoding v1.0.1 // indirect
25 | github.com/go-ole/go-ole v1.2.6 // indirect
26 | github.com/hashicorp/hcl v1.0.0 // indirect
27 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
28 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
29 | github.com/magiconair/properties v1.8.7 // indirect
30 | github.com/mattn/go-colorable v0.1.13 // indirect
31 | github.com/mattn/go-isatty v0.0.20 // indirect
32 | github.com/mattn/go-runewidth v0.0.16 // indirect
33 | github.com/mitchellh/mapstructure v1.5.0 // indirect
34 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect
35 | github.com/rivo/uniseg v0.4.7 // indirect
36 | github.com/sagikazarmark/locafero v0.4.0 // indirect
37 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect
38 | github.com/sourcegraph/conc v0.3.0 // indirect
39 | github.com/spf13/afero v1.11.0 // indirect
40 | github.com/spf13/cast v1.6.0 // indirect
41 | github.com/spf13/pflag v1.0.5 // indirect
42 | github.com/subosito/gotenv v1.6.0 // indirect
43 | github.com/tklauser/go-sysconf v0.3.14 // indirect
44 | github.com/tklauser/numcpus v0.8.0 // indirect
45 | github.com/yusufpapurcu/wmi v1.2.4 // indirect
46 | go.uber.org/atomic v1.9.0 // indirect
47 | go.uber.org/multierr v1.9.0 // indirect
48 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
49 | golang.org/x/net v0.25.0 // indirect
50 | golang.org/x/term v0.28.0 // indirect
51 | golang.org/x/text v0.21.0 // indirect
52 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
53 | google.golang.org/grpc v1.65.0 // indirect
54 | google.golang.org/protobuf v1.34.2 // indirect
55 | gopkg.in/ini.v1 v1.67.0 // indirect
56 | )
57 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright © 2024 Vishnu KS vishnu@xmigrate.cloud
3 | */
4 | package main
5 |
6 | import (
7 | "fmt"
8 | "os"
9 |
10 | "github.com/xmigrate/blxrep/cmd"
11 | "github.com/xmigrate/blxrep/tui"
12 | "github.com/xmigrate/blxrep/utils"
13 |
14 | _ "embed"
15 |
16 | "github.com/spf13/cobra"
17 | )
18 |
19 | // $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
20 | //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target native bpf bpf/trace-blocks.c -- -I./bpf/headers
21 |
22 | var publicKeyData []byte
23 |
24 | func main() {
25 | utils.PrintAnimatedLogo()
26 |
27 | utils.PublicKeyData = publicKeyData
28 |
29 | rootCmd := cmd.GetRootCmd()
30 |
31 | // Modify the dispatcher command to use the TUI
32 | for _, subCmd := range rootCmd.Commands() {
33 | if subCmd.Use == "tui" {
34 | originalRun := subCmd.Run
35 | subCmd.Run = func(cmd *cobra.Command, args []string) {
36 | dataDir, _ := cmd.Flags().GetString("data-dir")
37 | agent, _ := cmd.Flags().GetString("agent")
38 | if dataDir != "" && agent != "" {
39 | tui.RunDispatcherTUI(dataDir)
40 | } else {
41 | // Fall back to original behavior if flags are not set
42 | originalRun(cmd, args)
43 | }
44 | }
45 | break
46 | }
47 | }
48 |
49 | if err := rootCmd.Execute(); err != nil {
50 | fmt.Println(err)
51 | os.Exit(1)
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: blxrep
2 | site_url: https://blxrep.xmigrate.cloud
3 | repo_url: https://github.com/xmigrate/blxrep
4 | repo_name: xmigrate/blxrep
5 |
6 | plugins:
7 | - search
8 | - social:
9 | cards_layout_options:
10 | background_color: "#dde0e6"
11 | color: "#4c1d95"
12 | background_image: assets/blxrepsocialxm.jpg
13 |
14 | theme:
15 | name: material
16 | favicon: assets/blxrepIcon.svg
17 | palette:
18 | - scheme: default
19 | primary: white
20 | accent: deep purple
21 | toggle:
22 | icon: material/brightness-7
23 | name: Switch to dark mode
24 |
25 | # Palette toggle for dark mode
26 | - scheme: slate
27 | primary: black
28 | accent: deep purple
29 | toggle:
30 | icon: material/brightness-4
31 | name: Switch to light mode
32 | # primary: white
33 | # accent: deep purple
34 | logo: assets/blxrepIcon.svg
35 | features:
36 | - navigation.sections
37 | - content.code.copy
38 | - navigation.footer
39 | nav:
40 | - Home: index.md
41 | - Motivation: motivation.md
42 | - Architecture: architecture.md
43 | - Setup: setup.md
44 | - TUI: tui.md
45 | - Troubleshoot: troubleshoot.md
46 |
47 | markdown_extensions:
48 | - pymdownx.tabbed:
49 | alternate_style: true
50 | - pymdownx.superfences:
51 | custom_fences:
52 | - name: mermaid
53 | class: mermaid
54 | format: !!python/name:pymdownx.superfences.fence_code_format
55 | - pymdownx.superfences
56 | - attr_list
57 |
58 | extra:
59 | analytics:
60 | provider: google
61 | property: G-XRMVNLVYE1
62 |
63 | copyright: Copyright © 2025 Xmigrate Inc.
64 |
65 |
--------------------------------------------------------------------------------
/package/etc/blxrep/config.yaml:
--------------------------------------------------------------------------------
1 | # Sample agent configuration
2 | # mode: "agent"
3 | # id: "hostname"
4 | # dispatcher-addr: "localhost:8080"
5 |
6 | # Sample dispatcher configuration
7 | # mode: "dispatcher"
8 | # data-dir: "/data"
9 | # policy-dir: "/etc/blxrep/policies"
--------------------------------------------------------------------------------
/package/etc/blxrep/policies/default.yaml:
--------------------------------------------------------------------------------
1 | name: "default-backup-policy"
2 | description: "Backup policy for all servers"
3 | archive_interval: 48h
4 | snapshot_frequency: "daily"
5 | snapshot_time: "12:00"
6 | bandwidth_limit: 100
7 | snapshot_retention: 30
8 | live_sync_frequency: 2m
9 | transition_after_days: 30
10 | delete_after_days: 90
11 |
12 | targets:
13 | # Range pattern
14 | - pattern: "*"
15 | disks_excluded:
16 | - "/dev/xvdz" # If you don't want to exclude any disks from backup put a disk name that doesn't exist
17 |
--------------------------------------------------------------------------------
/package/etc/systemd/system/blxrep.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=blxrep service
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/usr/local/bin/blxrep start --config /etc/blxrep/config.yaml
7 | Restart=always
8 | User=root
9 | Group=root
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/package/usr/local/bin/blxrep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/package/usr/local/bin/blxrep
--------------------------------------------------------------------------------
/pkg/agent/agent.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "log"
5 | )
6 |
7 | func Start(agentID string, dispatcherAddr string) {
8 | log.Printf("Agent %s is running...", agentID)
9 | // Connect to snapshot endpoint
10 | go ConnectToDispatcher(agentID, dispatcherAddr)
11 | // Keep the main goroutine alive
12 | select {}
13 | }
14 |
--------------------------------------------------------------------------------
/pkg/agent/clone.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "fmt"
7 | "io"
8 | "log"
9 | "os"
10 | "sync"
11 | "time"
12 |
13 | "github.com/xmigrate/blxrep/utils"
14 |
15 | "github.com/gorilla/websocket"
16 | )
17 |
18 | func Clone(ctx context.Context, blockSize int, srcPath string, channelSize int, websock *websocket.Conn, cloneMutex *sync.Mutex, isCloning *bool) {
19 |
20 | // Open the source disk.
21 | src, err := os.Open(srcPath)
22 | if err != nil {
23 | log.Printf("Failed to open source disk: %v", err)
24 | }
25 | defer src.Close()
26 |
27 | // Use a buffered reader to minimize system calls.
28 | bufReader := bufio.NewReaderSize(src, blockSize*8000)
29 |
30 | // Allocate a buffer for one block.
31 | buf := make([]byte, blockSize)
32 |
33 | var blocks []utils.AgentDataBlock
34 | var blockCount uint64
35 | var batchSize int
36 | log.Printf("Cloning started for %s", srcPath)
37 | startTime := time.Now().Unix()
38 | for {
39 | select {
40 | case <-ctx.Done():
41 | // Handle context cancellation and exit the goroutine
42 | log.Println("Cloning was paused/cancelled and goroutine is exiting.")
43 | if len(blocks) > 0 {
44 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
45 | }
46 | cloneMutex.Lock()
47 | *isCloning = false
48 | cloneMutex.Unlock()
49 | return
50 | default:
51 | // Read data in larger chunks to reduce syscall overhead
52 | n, err := bufReader.Read(buf)
53 | if n > 0 {
54 | for i := 0; i < n; i += blockSize {
55 | end := i + blockSize
56 | if end > n {
57 | end = n
58 | }
59 | blockData := utils.AgentDataBlock{
60 | BlockNumber: blockCount,
61 | BlockData: append([]byte(nil), buf[i:end]...),
62 | }
63 | blocks = append(blocks, blockData)
64 | blockCount++
65 | batchSize += end - i
66 |
67 | if batchSize >= channelSize {
68 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
69 | blocks = nil
70 | batchSize = 0
71 | }
72 | }
73 | }
74 | if err != nil {
75 | if err == io.EOF {
76 | if len(blocks) > 0 {
77 | utils.StreamData(blocks, websock, false, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
78 | }
79 | cloneMutex.Lock()
80 | *isCloning = false
81 | cloneMutex.Unlock()
82 | return
83 | }
84 | log.Fatalf("Failed to read block: %v", err)
85 | }
86 | }
87 | }
88 | }
89 |
90 | func Resume(ctx context.Context, blockSize int, srcPath string, channelSize int, readFrom int64, websock *websocket.Conn, cloneMutex *sync.Mutex, isCloning *bool) {
91 | log.Printf("Resume started block: %d", readFrom)
92 | // Open the source disk.
93 | src, err := os.Open(srcPath)
94 | if err != nil {
95 | log.Printf("Failed to open source disk: %v", err)
96 | }
97 | defer src.Close()
98 | var blocks []utils.AgentDataBlock
99 |
100 | // Loop over the blocks in the source disk.
101 | var blockCount int64 = 0 // Initialize counter to 0
102 | var batchSize int = 0
103 | // Seek to correct block number
104 | for {
105 | _, err := src.Seek(int64(blockSize), io.SeekCurrent)
106 | if err != nil && err != io.EOF {
107 | fmt.Println("Error reading from snapshot:", err)
108 | return
109 | }
110 |
111 | if blockCount == readFrom {
112 | log.Printf("Seeked to %d", blockCount)
113 | break
114 | }
115 | blockCount++
116 | }
117 | // Loop over the blocks in the source disk.
118 | startTime := time.Now().Unix()
119 | for {
120 | select {
121 | case <-ctx.Done():
122 | // Handle context cancellation and exit the goroutine
123 | log.Println("Cloning was paused/cancelled and goroutine is exiting.")
124 | if len(blocks) > 0 {
125 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
126 | utils.LogDebug(fmt.Sprintf("Flush remaining data of size %d", batchSize))
127 | }
128 | return
129 | default:
130 | var bytesRead int
131 | buf := make([]byte, blockSize)
132 | for bytesRead < blockSize {
133 | n, err := src.Read(buf[bytesRead:])
134 | if n > 0 {
135 | bytesRead += n
136 | }
137 | if err != nil {
138 | if err == io.EOF {
139 | break
140 | }
141 | log.Fatalf("Failed to read block: %v", err)
142 | }
143 | }
144 | if bytesRead > 0 {
145 | blockData := utils.AgentDataBlock{
146 | BlockNumber: uint64(blockCount),
147 | BlockData: append([]byte(nil), buf[:bytesRead]...),
148 | }
149 | blocks = append(blocks, blockData)
150 | if batchSize >= channelSize {
151 | //Code to send data to websocket
152 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
153 | batchSize = 0
154 | blocks = nil
155 | }
156 | } else {
157 | log.Printf("No more data to read from the source %d", blockCount)
158 | if len(blocks) > 0 {
159 | utils.StreamData(blocks, websock, true, srcPath, utils.CONST_AGENT_ACTION_CLONE, startTime)
160 | log.Printf("Flush remaining data of size %d", batchSize)
161 | }
162 | cloneMutex.Lock()
163 | *isCloning = false
164 | cloneMutex.Unlock()
165 | return
166 | }
167 | blockCount++
168 | batchSize++
169 | }
170 | }
171 |
172 | }
173 |
--------------------------------------------------------------------------------
/pkg/agent/footprint_linux.go:
--------------------------------------------------------------------------------
1 | //go:build linux
2 | // +build linux
3 |
4 | package agent
5 |
6 | import (
7 | "fmt"
8 | "io/ioutil"
9 | "net"
10 | "os"
11 | "os/exec"
12 | "regexp"
13 | "strings"
14 |
15 | "github.com/xmigrate/blxrep/utils"
16 |
17 | "github.com/shirou/gopsutil/cpu"
18 | "github.com/shirou/gopsutil/disk"
19 | "github.com/shirou/gopsutil/mem"
20 | )
21 |
22 | func Footprint() (*utils.VMInfo, error) {
23 |
24 | hostname, err := os.Hostname()
25 | if err != nil {
26 | return nil, err
27 | }
28 |
29 | cpuInfo, err := cpu.Info()
30 | if err != nil {
31 | return nil, err
32 | }
33 |
34 | cpuCores := len(cpuInfo)
35 |
36 | memInfo, err := mem.VirtualMemory()
37 | if err != nil {
38 | return nil, err
39 | }
40 |
41 | interfaces, err := net.Interfaces()
42 | if err != nil {
43 | return nil, err
44 | }
45 |
46 | var InterfaceInfo []struct {
47 | Name string `json:"name"`
48 | IPAddress string `json:"ip_address"`
49 | SubnetMask string `json:"subnet_mask"`
50 | CIDRNotation string `json:"cidr_notation"`
51 | NetworkCIDR string `json:"network_cidr"`
52 | }
53 |
54 | // var ipAddress, network, subnet string
55 | for _, iface := range interfaces {
56 | addrs, err := iface.Addrs()
57 | if err != nil {
58 | fmt.Println(err)
59 | continue
60 | }
61 |
62 | for _, addr := range addrs {
63 | ipNet, ok := addr.(*net.IPNet)
64 | if !ok {
65 | continue
66 | }
67 |
68 | ip4 := ipNet.IP.To4()
69 | if ip4 == nil {
70 | continue
71 | }
72 |
73 | mask := ipNet.Mask
74 | networkIP := net.IP(make([]byte, 4))
75 | for i := range ip4 {
76 | networkIP[i] = ip4[i] & mask[i]
77 | }
78 |
79 | cidr, _ := ipNet.Mask.Size()
80 |
81 | InterfaceInfo = append(InterfaceInfo, struct {
82 | Name string `json:"name"`
83 | IPAddress string `json:"ip_address"`
84 | SubnetMask string `json:"subnet_mask"`
85 | CIDRNotation string `json:"cidr_notation"`
86 | NetworkCIDR string `json:"network_cidr"`
87 | }{
88 | Name: iface.Name,
89 | IPAddress: ip4.String(),
90 | SubnetMask: net.IP(mask).String(),
91 | CIDRNotation: fmt.Sprintf("%s/%d", ip4, cidr),
92 | NetworkCIDR: fmt.Sprintf("%s/%d", networkIP, cidr),
93 | })
94 | }
95 | }
96 |
97 | partitions, err := disk.Partitions(true)
98 | if err != nil {
99 | return nil, err
100 | }
101 |
102 | var diskDetails []utils.DiskDetailsStruct
103 | for _, partition := range partitions {
104 | mountpoint := partition.Mountpoint
105 | if !strings.HasPrefix(mountpoint, "/var/") && !strings.HasPrefix(mountpoint, "/run/") {
106 | usage, err := disk.Usage(mountpoint)
107 | if err == nil {
108 | fsType := partition.Fstype
109 | if fsType == "xfs" || strings.HasPrefix(fsType, "ext") {
110 | partitionName := partition.Device
111 | diskName := strings.TrimRightFunc(partitionName, func(r rune) bool {
112 | return '0' <= r && r <= '9'
113 | })
114 |
115 | if strings.Contains(diskName, "nvme") {
116 | diskName = strings.TrimRightFunc(diskName, func(r rune) bool {
117 | return 'p' <= r
118 | })
119 | }
120 |
121 | cmd := exec.Command("sudo", "blkid", partitionName)
122 | output, err := cmd.CombinedOutput()
123 | if err != nil {
124 | return nil, err
125 | }
126 |
127 | re := regexp.MustCompile(`\b(?i)UUID="([a-f0-9-]+)"`)
128 | match := re.FindStringSubmatch(string(output))
129 | diskDetails = append(diskDetails,
130 | utils.DiskDetailsStruct{
131 | FsType: fsType,
132 | Size: usage.Total,
133 | Uuid: match[1],
134 | Name: diskName,
135 | MountPoint: mountpoint,
136 | })
137 | }
138 | }
139 | }
140 | }
141 |
142 | content, err := ioutil.ReadFile("/etc/os-release")
143 | if err != nil {
144 | return nil, err
145 | }
146 |
147 | osRelease := string(content)
148 | distro := getValue(osRelease, "ID")
149 | majorVersion := getValue(osRelease, "VERSION_ID")
150 |
151 | vmInfo := utils.VMInfo{
152 | Hostname: hostname,
153 | CpuModel: cpuInfo[0].ModelName,
154 | CpuCores: cpuCores,
155 | Ram: memInfo.Total,
156 | InterfaceInfo: InterfaceInfo,
157 | DiskDetails: diskDetails,
158 | OsDistro: distro,
159 | OsVersion: majorVersion,
160 | }
161 |
162 | return &vmInfo, nil
163 | }
164 |
165 | func getValue(osRelease, key string) string {
166 | lines := strings.Split(osRelease, "\n")
167 | for _, line := range lines {
168 | if strings.HasPrefix(line, key+"=") {
169 | return strings.Trim(strings.TrimPrefix(line, key+"="), `"`)
170 | }
171 | }
172 | return ""
173 | }
174 |
--------------------------------------------------------------------------------
/pkg/agent/footprint_windows.go:
--------------------------------------------------------------------------------
1 | //go:build windows
2 | // +build windows
3 |
4 | package agent
5 |
6 | import (
7 | "fmt"
8 | "log"
9 | "net"
10 | "os"
11 | "os/exec"
12 | "regexp"
13 | "strconv"
14 | "strings"
15 |
16 | "blxrep/utils"
17 |
18 | "github.com/shirou/gopsutil/cpu"
19 | "github.com/shirou/gopsutil/host"
20 | "github.com/shirou/gopsutil/mem"
21 | )
22 |
23 | func Footprint() (*utils.VMInfo, error) {
24 |
25 | hostname, err := os.Hostname()
26 | if err != nil {
27 | return nil, err
28 | }
29 |
30 | cpuInfo, err := cpu.Info()
31 | if err != nil {
32 | return nil, err
33 | }
34 |
35 | cpuCores := len(cpuInfo)
36 |
37 | memInfo, err := mem.VirtualMemory()
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | interfaces, err := net.Interfaces()
43 | if err != nil {
44 | return nil, err
45 | }
46 |
47 | var InterfaceInfo []struct {
48 | Name string `json:"name"`
49 | IPAddress string `json:"ip_address"`
50 | SubnetMask string `json:"subnet_mask"`
51 | CIDRNotation string `json:"cidr_notation"`
52 | NetworkCIDR string `json:"network_cidr"`
53 | }
54 |
55 | for _, iface := range interfaces {
56 | addrs, err := iface.Addrs()
57 | if err != nil {
58 | fmt.Println(err)
59 | continue
60 | }
61 |
62 | for _, addr := range addrs {
63 | ipNet, ok := addr.(*net.IPNet)
64 | if !ok {
65 | continue
66 | }
67 |
68 | ip4 := ipNet.IP.To4()
69 | if ip4 == nil {
70 | continue
71 | }
72 |
73 | mask := ipNet.Mask
74 | networkIP := net.IP(make([]byte, 4))
75 | for i := range ip4 {
76 | networkIP[i] = ip4[i] & mask[i]
77 | }
78 |
79 | cidr, _ := ipNet.Mask.Size()
80 |
81 | InterfaceInfo = append(InterfaceInfo, struct {
82 | Name string `json:"name"`
83 | IPAddress string `json:"ip_address"`
84 | SubnetMask string `json:"subnet_mask"`
85 | CIDRNotation string `json:"cidr_notation"`
86 | NetworkCIDR string `json:"network_cidr"`
87 | }{
88 | Name: iface.Name,
89 | IPAddress: ip4.String(),
90 | SubnetMask: net.IP(mask).String(),
91 | CIDRNotation: fmt.Sprintf("%s/%d", ip4, cidr),
92 | NetworkCIDR: fmt.Sprintf("%s/%d", networkIP, cidr),
93 | })
94 | }
95 | }
96 |
97 | diskDetails, err := getDiskDetails()
98 | if err != nil {
99 | return nil, err
100 | }
101 |
102 | osVersion, err := host.Info()
103 | if err != nil {
104 | return nil, err
105 | }
106 |
107 | vmInfo := utils.VMInfo{
108 | Hostname: hostname,
109 | CpuModel: cpuInfo[0].ModelName,
110 | CpuCores: cpuCores,
111 | Ram: memInfo.Total,
112 | InterfaceInfo: InterfaceInfo,
113 | DiskDetails: diskDetails,
114 | OsDistro: osVersion.Platform,
115 | OsVersion: osVersion.PlatformVersion,
116 | }
117 | utils.LogDebug(fmt.Sprintf("Footprint %s", vmInfo))
118 | return &vmInfo, nil
119 | }
120 |
121 | func getDiskDetails() ([]utils.DiskDetailsStruct, error) {
122 | physicalDrives, err := getPhysicalDrives()
123 | if err != nil {
124 | return nil, err
125 | }
126 |
127 | cDriveDiskIndex, err := getCDriveDiskIndex()
128 | if err != nil {
129 | return nil, err
130 | }
131 |
132 | var diskDetails []utils.DiskDetailsStruct
133 | for _, drive := range physicalDrives {
134 | size := drive.BytesPerSector * drive.TotalSectors
135 | mountPoint := "/data"
136 | if drive.Index == cDriveDiskIndex {
137 | mountPoint = "/"
138 | }
139 |
140 | diskDetails = append(diskDetails, utils.DiskDetailsStruct{
141 | FsType: "NTFS",
142 | Size: size,
143 | Uuid: "", // UUID not retrieved in this approach
144 | Name: drive.DeviceID,
145 | MountPoint: mountPoint,
146 | })
147 | }
148 |
149 | return diskDetails, nil
150 | }
151 |
152 | type PhysicalDrive struct {
153 | DeviceID string
154 | BytesPerSector uint64
155 | Partitions int
156 | TotalSectors uint64
157 | Index int
158 | }
159 |
160 | func getPhysicalDrives() ([]PhysicalDrive, error) {
161 | cmd := exec.Command("wmic", "diskdrive", "get", "DeviceID,BytesPerSector,Partitions,TotalSectors,Index")
162 | output, err := cmd.CombinedOutput()
163 | if err != nil {
164 | return nil, err
165 | }
166 |
167 | var drives []PhysicalDrive
168 | lines := strings.Split(string(output), "\n")
169 | for i, line := range lines {
170 | log.Printf("line physical drive %s", line)
171 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines
172 | continue
173 | }
174 |
175 | fields := strings.Fields(line)
176 | if len(fields) < 5 {
177 | continue
178 | }
179 |
180 | deviceID := fields[1]
181 | bytesPerSector, err := strconv.ParseUint(fields[0], 10, 64)
182 | if err != nil {
183 | log.Printf("Error parsing BytesPerSector for %s: %v", deviceID, err)
184 | continue
185 | }
186 | partitions, err := strconv.Atoi(fields[3])
187 | if err != nil {
188 | log.Printf("Error parsing Partitions for %s: %v", deviceID, err)
189 | continue
190 | }
191 | totalSectors, err := strconv.ParseUint(fields[4], 10, 64)
192 | if err != nil {
193 | log.Printf("Error parsing TotalSectors for %s: %v", deviceID, err)
194 | continue
195 | }
196 | index, err := strconv.Atoi(fields[2])
197 | if err != nil {
198 | log.Printf("Error parsing Index for %s: %v", deviceID, err)
199 | continue
200 | }
201 |
202 | drives = append(drives, PhysicalDrive{
203 | DeviceID: deviceID,
204 | BytesPerSector: bytesPerSector,
205 | Partitions: partitions,
206 | TotalSectors: totalSectors,
207 | Index: index,
208 | })
209 | }
210 |
211 | return drives, nil
212 | }
213 |
214 | type Partition struct {
215 | DeviceID string
216 | DiskIndex int
217 | }
218 |
219 | func getPartitions() ([]Partition, error) {
220 | cmd := exec.Command("wmic", "partition", "get", "DeviceID,DiskIndex")
221 | output, err := cmd.CombinedOutput()
222 | if err != nil {
223 | return nil, err
224 | }
225 |
226 | var partitions []Partition
227 | lines := strings.Split(string(output), "\n")
228 | for i, line := range lines {
229 | log.Printf("line partitions %s", line)
230 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines
231 | continue
232 | }
233 |
234 | fields := strings.Fields(line)
235 | if len(fields) < 2 {
236 | continue
237 | }
238 |
239 | deviceID := fields[0]
240 | diskIndex, err := strconv.Atoi(fields[1])
241 | if err != nil {
242 | log.Printf("Error parsing DiskIndex for %s: %v", deviceID, err)
243 | continue
244 | }
245 |
246 | partitions = append(partitions, Partition{
247 | DeviceID: deviceID,
248 | DiskIndex: diskIndex,
249 | })
250 | }
251 |
252 | return partitions, nil
253 | }
254 |
255 | func getCDriveDiskIndex() (int, error) {
256 | cmd := exec.Command("wmic", "path", "Win32_LogicalDiskToPartition", "get", "Antecedent,Dependent")
257 | output, err := cmd.CombinedOutput()
258 | if err != nil {
259 | return -1, err
260 | }
261 |
262 | lines := strings.Split(string(output), "\n")
263 | re := regexp.MustCompile(`Win32_DiskPartition\.DeviceID="([^"]+)"`)
264 | for i, line := range lines {
265 | log.Printf("line c drive disk index: %s", line)
266 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines
267 | continue
268 | }
269 |
270 | parts := strings.Fields(line)
271 | if len(parts) < 2 {
272 | log.Printf("Unexpected output from wmic: %s length: %d", line, len(parts))
273 | continue
274 | }
275 |
276 | // Combine parts for parsing
277 | combinedParts := strings.Join(parts, " ")
278 | if strings.Contains(combinedParts, "C:") {
279 | match := re.FindStringSubmatch(combinedParts)
280 | if len(match) > 1 {
281 | partitionID := match[1]
282 | diskIndex, err := getDiskIndexFromPartition(partitionID)
283 | if err != nil {
284 | return -1, err
285 | }
286 | return diskIndex, nil
287 | }
288 | }
289 | }
290 | return -1, fmt.Errorf("C: drive not found")
291 | }
292 |
293 | func getDiskIndexFromPartition(partitionID string) (int, error) {
294 | // Construct the command string
295 | command := fmt.Sprintf("wmic partition where (DeviceID='%s') get DiskIndex", partitionID)
296 | log.Printf("Executing command: %s", command)
297 |
298 | // Execute the command
299 | cmd := exec.Command("cmd", "/C", command)
300 | output, err := cmd.CombinedOutput()
301 | if err != nil {
302 | log.Printf("Error getting DiskIndex for partition %s", partitionID)
303 | log.Printf("Command output: %s", string(output))
304 | log.Printf("Error message: %s", err.Error())
305 | return -1, err
306 | }
307 |
308 | // Log the output for debugging
309 | log.Printf("Command output: %s", string(output))
310 |
311 | lines := strings.Split(string(output), "\n")
312 | for i, line := range lines {
313 | log.Printf("line disk index from partition: %s", line)
314 | if i == 0 || strings.TrimSpace(line) == "" { // Skip the header and empty lines
315 | continue
316 | }
317 |
318 | fields := strings.Fields(line)
319 | if len(fields) == 1 {
320 | return strconv.Atoi(fields[0])
321 | }
322 | }
323 |
324 | return -1, fmt.Errorf("DiskIndex not found for partition: %s", partitionID)
325 | }
326 |
--------------------------------------------------------------------------------
/pkg/agent/live.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "crypto/sha256"
7 | "encoding/binary"
8 | "encoding/gob"
9 | "encoding/hex"
10 | "errors"
11 | "fmt"
12 | "io"
13 | "log"
14 | "os"
15 | "os/exec"
16 | "os/signal"
17 | "runtime"
18 | "syscall"
19 | "unsafe"
20 |
21 | "github.com/cilium/ebpf/link"
22 | "github.com/cilium/ebpf/ringbuf"
23 | "github.com/cilium/ebpf/rlimit"
24 | "github.com/gorilla/websocket"
25 | "github.com/xmigrate/blxrep/utils"
26 | "golang.org/x/sys/unix"
27 | )
28 |
29 | func setTargetDiskMajorMinor(objs *utils.BpfObjects, major uint32, minor uint32) error {
30 | majorKey := uint32(0)
31 | minorKey := uint32(1)
32 |
33 | // Put major number at index 0
34 | if err := objs.TargetDiskMap.Put(majorKey, major); err != nil {
35 | return err
36 | }
37 |
38 | // Put minor number at index 1
39 | if err := objs.TargetDiskMap.Put(minorKey, minor); err != nil {
40 | return err
41 | }
42 |
43 | return nil
44 | }
45 |
46 | func GetBlocks(ctx context.Context, blockSize int, srcPath string, websock *websocket.Conn, agentId string) {
47 | log.Printf("Block size: %d", blockSize)
48 | // Subscribe to signals for terminating the program.
49 | stopper := make(chan os.Signal, 1)
50 | signal.Notify(stopper, os.Interrupt, syscall.SIGTERM)
51 |
52 | // Allow the current process to lock memory for eBPF resources.
53 | if err := rlimit.RemoveMemlock(); err != nil {
54 | log.Printf("Error removing memlock: %v", err)
55 | }
56 |
57 | fileInfo, err := os.Stat(srcPath)
58 | if err != nil {
59 | log.Printf("Error retrieving information for /dev/xvda: %v", err)
60 | }
61 |
62 | // Asserting type to sys stat to get Sys() method
63 | sysInfo, ok := fileInfo.Sys().(*syscall.Stat_t)
64 | if !ok {
65 | log.Println("Error asserting type to syscall.Stat_t")
66 | }
67 |
68 | // Extracting major and minor numbers
69 | desiredMajor := uint32(sysInfo.Rdev / 256)
70 | desiredMinor := uint32(sysInfo.Rdev % 256)
71 | log.Printf("Major/minor: %d %d", desiredMajor, desiredMinor)
72 |
73 | // Load pre-compiled programs and maps into the kernel.
74 | objs := utils.BpfObjects{}
75 | if err := utils.LoadBpfObjects(&objs, nil); err != nil {
76 | log.Printf("loading objects: %v", err)
77 | }
78 | defer objs.Close()
79 |
80 | if err := setTargetDiskMajorMinor(&objs, desiredMajor, desiredMinor); err != nil {
81 | log.Printf("setting major/minor: %v", err)
82 | }
83 | // create a Tracepoint link
84 | tp, err := link.Tracepoint("block", "block_rq_complete", objs.BlockRqComplete, nil)
85 | if err != nil {
86 | log.Printf("opening tracepoint: %s", err)
87 | }
88 | defer tp.Close()
89 |
90 | // Open a ringbuf reader from userspace RINGBUF map described in the
91 | // eBPF C program.
92 | rd, err := ringbuf.NewReader(objs.Events)
93 | if err != nil {
94 | log.Printf("opening ringbuf reader: %s", err)
95 | }
96 | defer rd.Close()
97 |
98 | // Close the reader when the process receives a signal, which will exit
99 | // the read loop.
100 | go func() {
101 | for {
102 | select {
103 | case <-stopper:
104 | if err := rd.Close(); err != nil {
105 | log.Printf("closing ringbuf reader: %s", err)
106 | }
107 | return
108 |
109 | case <-ctx.Done():
110 |
111 | if err := rd.Close(); err != nil {
112 | log.Printf("closing ringbuf reader: %s", err)
113 | }
114 |
115 | return
116 |
117 | }
118 | }
119 | }()
120 |
121 | log.Println("Waiting for events..")
122 | for {
123 | record, err := rd.Read()
124 | if err != nil {
125 | if errors.Is(err, ringbuf.ErrClosed) {
126 | log.Println("Received signal, exiting..")
127 | log.Println(err.Error())
128 | os.Exit(0)
129 |
130 | }
131 | log.Printf("reading from reader: %s", err)
132 | continue
133 | }
134 |
135 | var event utils.Event
136 | // Parse the ringbuf event entry into a bpfEvent structure.
137 | if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil {
138 | log.Printf("parsing ringbuf event: %s", err)
139 | continue
140 | }
141 | var binaryBuffer bytes.Buffer
142 | var liveSectors utils.AgentBulkMessage
143 | liveSectors.AgentID = agentId
144 | liveSectors.SrcPath = srcPath
145 | liveSectors.DataType = "cst"
146 | liveSectors.StartSector = event.Block
147 | liveSectors.EndSector = event.EndBlock
148 | enc := gob.NewEncoder(&binaryBuffer)
149 | if err := enc.Encode(liveSectors); err != nil {
150 | log.Printf("Could not encode: %v", err)
151 | continue
152 | }
153 | binaryData := binaryBuffer.Bytes()
154 |
155 | if err := websock.WriteMessage(websocket.BinaryMessage, binaryData); err != nil {
156 | log.Printf("Could not send blocks: %v", err)
157 | continue
158 | }
159 | log.Printf("Sent sectors: %d-%d", event.Block, event.EndBlock)
160 | }
161 | }
162 |
163 | func syncAndClearCache(srcPath string) error {
164 | // For future, if filesystem changes are larger then we only have to use sync, if changes are smaller then we have to do all this
165 | cmd := exec.Command("sudo", "sync")
166 | if err := cmd.Run(); err != nil {
167 | return fmt.Errorf("failed to sync: %w", err)
168 | }
169 |
170 | cmd = exec.Command("sudo", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches")
171 | if err := cmd.Run(); err != nil {
172 | return fmt.Errorf("failed to clear cache: %w", err)
173 | }
174 |
175 | // Open device with read-write permissions
176 | file, err := os.OpenFile(srcPath, os.O_RDWR, 0666)
177 | if err != nil {
178 | return fmt.Errorf("failed to open device: %w", err)
179 | }
180 | defer file.Close()
181 |
182 | // File-specific sync
183 | if err := file.Sync(); err != nil {
184 | return fmt.Errorf("fsync failed: %w", err)
185 | }
186 |
187 | // Clear the buffer cache
188 | _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.BLKFLSBUF, uintptr(unsafe.Pointer(nil)))
189 | if errno != 0 {
190 | return errno
191 | }
192 |
193 | return nil
194 | }
195 |
196 | func ReadDataBlocks(blockSize int, srcPath string, pair utils.BlockPair, websock *websocket.Conn) {
197 | log.Printf("ReadDataBlocks started for blocks %d to %d on %s", pair.Start, pair.End, srcPath)
198 |
199 | var agentBlocks utils.AgentBulkMessage
200 | agentBlocks.AgentID, _ = os.Hostname()
201 | agentBlocks.DataType = "incremental"
202 | agentBlocks.SrcPath = srcPath
203 |
204 | src, err := os.OpenFile(srcPath, os.O_RDONLY|unix.O_DIRECT, 0)
205 | if err != nil {
206 | log.Printf("Error opening source: %v", err)
207 | return
208 | }
209 | defer src.Close()
210 |
211 | _, err = src.Seek(int64(pair.Start)*int64(blockSize), io.SeekStart)
212 | if err != nil {
213 | log.Printf("Error seeking to block %d: %v", pair.Start, err)
214 | return
215 | }
216 |
217 | for blockNumber := pair.Start; blockNumber <= pair.End; blockNumber++ {
218 | buf := make([]byte, blockSize)
219 | n, err := src.Read(buf)
220 | if err != nil && err != io.EOF {
221 | log.Printf("Error reading block %d: %v", blockNumber, err)
222 | continue
223 | }
224 |
225 | block := utils.AgentDataBlock{
226 | BlockNumber: blockNumber,
227 | BlockData: buf[:n],
228 | }
229 | checksumArray := sha256.Sum256(block.BlockData)
230 | block.Checksum = hex.EncodeToString(checksumArray[:])
231 | agentBlocks.Data = append(agentBlocks.Data, block)
232 |
233 | // Send data if we've accumulated enough or if this is the last block, 4096 should be changed to bandwidth limit
234 | if len(agentBlocks.Data) >= 4096 || blockNumber == pair.End {
235 | // Serialize and send data
236 | var binaryBuffer bytes.Buffer
237 | enc := gob.NewEncoder(&binaryBuffer)
238 | if err := enc.Encode(agentBlocks); err != nil {
239 | log.Printf("Could not encode: %v", err)
240 | continue
241 | }
242 | binaryData := binaryBuffer.Bytes()
243 |
244 | if err := websock.WriteMessage(websocket.BinaryMessage, binaryData); err != nil {
245 | log.Printf("Could not send blocks data: %v", err)
246 | continue
247 | }
248 |
249 | log.Printf("Sent batch of %d blocks", len(agentBlocks.Data))
250 | agentBlocks.Data = []utils.AgentDataBlock{} // Clear the data for the next batch
251 | }
252 |
253 | // Allow other goroutines to run
254 | runtime.Gosched()
255 | }
256 | }
257 |
258 | func ReadBlocks(ctx context.Context, blockSize int, blockPairs []utils.BlockPair, srcPath string, websock *websocket.Conn) {
259 | log.Printf("Reading and sending block pairs for %s", srcPath)
260 | if err := syncAndClearCache(srcPath); err != nil {
261 | log.Printf("Failed to sync and clear cache: %v", err)
262 | }
263 | for _, pair := range blockPairs {
264 | select {
265 | case <-ctx.Done():
266 | log.Println("Context cancelled, stopping ReadBlocks")
267 | return
268 | default:
269 | ReadDataBlocks(blockSize, srcPath, pair, websock)
270 | }
271 | }
272 | log.Println("Finished reading and sending block pairs")
273 | }
274 |
275 | func processSyncAction(msg utils.Message, ws *websocket.Conn) {
276 | syncData := msg.SyncMessage
277 | log.Printf("Start syncing from: %s", syncData.SrcPath)
278 | ctx, _ := context.WithCancel(context.Background())
279 | go ReadBlocks(ctx, syncData.BlockSize, syncData.Blocks, syncData.SrcPath, ws)
280 | }
281 |
282 | func processStopSyncAction() {
283 | log.Println("Stoping sync action")
284 | if cancelSync != nil {
285 | log.Println("Stopping live migrations")
286 | cancelSync()
287 | }
288 | syncMutex.Lock()
289 | isSyncing = false
290 | syncMutex.Unlock()
291 | }
292 |
--------------------------------------------------------------------------------
/pkg/agent/restore.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "archive/tar"
5 | "bytes"
6 | "compress/gzip"
7 | "fmt"
8 | "io"
9 | "log"
10 | "os"
11 | "path/filepath"
12 | "strings"
13 | "time"
14 |
15 | "github.com/xmigrate/blxrep/utils"
16 | )
17 |
18 | func processChunk(state *utils.RestoreState, data []byte, chunkIndex int) error {
19 | log.Printf("Received chunk %d, size: %d bytes", chunkIndex, len(data))
20 |
21 | if chunkIndex != state.ChunksReceived {
22 | return fmt.Errorf("received out-of-order chunk: expected %d, got %d", state.ChunksReceived, chunkIndex)
23 | }
24 |
25 | _, err := state.Buffer.Write(data)
26 | if err != nil {
27 | return fmt.Errorf("error writing to buffer: %v", err)
28 | }
29 |
30 | state.ChunksReceived++
31 |
32 | // If we've received all chunks, process the data
33 | if state.ChunksReceived == state.TotalChunks {
34 | return processCompleteData(state)
35 | }
36 |
37 | return nil
38 | }
39 |
40 | func processCompleteData(state *utils.RestoreState) error {
41 | utils.LogDebug(fmt.Sprintf("Processing complete data, total size: %d bytes", state.Buffer.Len()))
42 |
43 | gzipReader, err := gzip.NewReader(bytes.NewReader(state.Buffer.Bytes()))
44 | if err != nil {
45 | return fmt.Errorf("error creating gzip reader: %v", err)
46 | }
47 | defer gzipReader.Close()
48 |
49 | tarReader := tar.NewReader(gzipReader)
50 |
51 | for {
52 | header, err := tarReader.Next()
53 | if err == io.EOF {
54 | break // End of archive
55 | }
56 | if err != nil {
57 | return fmt.Errorf("error reading tar: %v", err)
58 | }
59 |
60 | err = extractEntry(state.FilePath, header, tarReader)
61 | if err != nil {
62 | return fmt.Errorf("error extracting entry: %v", err)
63 | }
64 | }
65 |
66 | log.Println("Restore process completed successfully")
67 | return nil
68 | }
69 |
70 | func extractEntry(basePath string, header *tar.Header, tarReader *tar.Reader) error {
71 | // Handle both cases: with and without "RESTORE" prefix
72 | relPath := header.Name
73 | if strings.HasPrefix(relPath, "RESTORE/") {
74 | relPath = strings.TrimPrefix(relPath, "RESTORE/")
75 | }
76 |
77 | relPath = filepath.FromSlash(relPath)
78 | target := filepath.Join(basePath, relPath)
79 |
80 | log.Printf("Extracting: %s, type: %c, size: %d bytes", target, header.Typeflag, header.Size)
81 |
82 | switch header.Typeflag {
83 | case tar.TypeDir:
84 | return os.MkdirAll(target, 0755)
85 | case tar.TypeReg:
86 | return extractRegularFile(target, header, tarReader)
87 | default:
88 | log.Printf("Unsupported file type: %c for %s", header.Typeflag, target)
89 | return nil // Skipping unsupported types
90 | }
91 | }
92 |
93 | func extractRegularFile(target string, header *tar.Header, tarReader *tar.Reader) error {
94 | dir := filepath.Dir(target)
95 | if err := os.MkdirAll(dir, 0755); err != nil {
96 | return fmt.Errorf("error creating directory %s: %v", dir, err)
97 | }
98 |
99 | file, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode))
100 | if err != nil {
101 | return fmt.Errorf("error creating file %s: %v", target, err)
102 | }
103 | defer file.Close()
104 |
105 | _, err = io.Copy(file, tarReader)
106 | if err != nil {
107 | return fmt.Errorf("error writing file %s: %v", target, err)
108 | }
109 |
110 | log.Printf("File extracted: %s", target)
111 | return nil
112 | }
113 |
114 | func cleanupRestore(state *utils.RestoreState) {
115 | log.Println("Cleaning up restore process")
116 | if state.GzipReader != nil {
117 | state.GzipReader.Close()
118 | }
119 | // Consider removing partially extracted files here
120 | }
121 |
122 | const BlockSize = 512
123 |
124 | func RestorePartition(c *chan utils.AgentBulkMessage, agentID string, progress *int, destPath string) {
125 | log.Printf("Starting RestorePartition process for agent %s, destPath: %s", agentID, destPath)
126 |
127 | f, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
128 | if err != nil {
129 | log.Printf("Cannot open partition %s for agent %s: %v", destPath, agentID, err)
130 | return
131 | }
132 | defer f.Close()
133 |
134 | timeoutDuration := 20 * time.Second
135 |
136 | totalBytesWritten := int64(0)
137 | totalMessagesReceived := 0
138 | lastActivityTime := time.Now()
139 | for {
140 | select {
141 | case msg, ok := <-*c:
142 | if !ok {
143 | log.Printf("Channel closed, exiting RestorePartition for agent %s", agentID)
144 | return
145 | }
146 | lastActivityTime = time.Now()
147 | totalMessagesReceived++
148 | batchBytesWritten := int64(0)
149 |
150 | for _, blockData := range msg.Data {
151 | n, err := f.Write(blockData.BlockData)
152 | if err != nil {
153 | log.Printf("Failed to write to file for agent %s: %v", agentID, err)
154 | return
155 | }
156 | batchBytesWritten += int64(n)
157 | totalBytesWritten += int64(n)
158 |
159 | }
160 | log.Printf("Batch received for agent %s: Wrote %d bytes", agentID, batchBytesWritten)
161 | log.Printf("Total for agent %s: Messages received: %d, Bytes written: %d", agentID, totalMessagesReceived, totalBytesWritten)
162 | // TODO: Below code might not be needed, hence commenting it out. No point in updating progress for restore from agent in dispatcher boltdb.
163 | // actionId := strings.Join([]string{agentID, fmt.Sprintf("%d", msg.StartTime)}, "_")
164 | // dispatcher.UpdateProgress(msg, progress, actionId, agentID, destPath)
165 |
166 | case <-time.After(timeoutDuration):
167 | log.Printf("No data for %v. Exiting RestorePartition for agent %s", timeoutDuration, agentID)
168 | log.Printf("Final stats for agent %s: Total messages received: %d, Total bytes written: %d", agentID, totalMessagesReceived, totalBytesWritten)
169 | log.Printf("Time since last activity: %v", time.Since(lastActivityTime))
170 | return
171 | }
172 | }
173 | }
174 |
--------------------------------------------------------------------------------
/pkg/agent/scheduled_jobs.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "strconv"
7 | "strings"
8 | "sync"
9 | "time"
10 |
11 | "github.com/xmigrate/blxrep/utils"
12 | )
13 |
14 | type Scheduler struct {
15 | snapshotTime string
16 | frequency utils.Frequency
17 | stopChan chan struct{}
18 | wg sync.WaitGroup
19 | }
20 |
21 | func NewScheduler(snapshotTime string, frequency utils.Frequency) (*Scheduler, error) {
22 | // Validate time format
23 | if _, err := time.Parse("15:04:00", snapshotTime); err != nil {
24 | return nil, fmt.Errorf("invalid time format. Use HH:MM:SS in 24-hour format: %v", err)
25 | }
26 |
27 | return &Scheduler{
28 | snapshotTime: snapshotTime,
29 | frequency: frequency,
30 | stopChan: make(chan struct{}),
31 | }, nil
32 | }
33 |
34 | func (s *Scheduler) Start(job func()) error {
35 | s.wg.Add(1)
36 | go func() {
37 | defer s.wg.Done()
38 |
39 | for {
40 | nextRun := s.calculateNextRun()
41 | log.Printf("Next snapshot scheduled for: %v", nextRun)
42 |
43 | select {
44 | case <-time.After(time.Until(nextRun)):
45 | log.Println("Starting snapshot job")
46 | job()
47 |
48 | case <-s.stopChan:
49 | log.Println("Scheduler stopped")
50 | return
51 | }
52 | }
53 | }()
54 |
55 | return nil
56 | }
57 |
58 | func (s *Scheduler) Stop() {
59 | close(s.stopChan)
60 | s.wg.Wait()
61 | }
62 |
63 | func (s *Scheduler) calculateNextRun() time.Time {
64 | now := time.Now().UTC()
65 | parts := strings.Split(s.snapshotTime, ":")
66 | hour, _ := strconv.Atoi(parts[0])
67 | minute, _ := strconv.Atoi(parts[1])
68 |
69 | var scheduled time.Time
70 |
71 | switch s.frequency {
72 | case utils.Daily:
73 | // For daily, start from current day
74 | scheduled = time.Date(
75 | now.Year(), now.Month(), now.Day(),
76 | hour, minute, 0, 0, time.UTC,
77 | )
78 | if now.After(scheduled) {
79 | scheduled = scheduled.AddDate(0, 0, 1)
80 | }
81 |
82 | case utils.Weekly:
83 | // For weekly, start from the first day of the week (assuming Monday is first)
84 | scheduled = time.Date(
85 | now.Year(), now.Month(), now.Day(),
86 | hour, minute, 0, 0, time.UTC,
87 | )
88 | // Adjust to the most recent Monday
89 | for scheduled.Weekday() != time.Monday {
90 | scheduled = scheduled.AddDate(0, 0, -1)
91 | }
92 | if now.After(scheduled) {
93 | scheduled = scheduled.AddDate(0, 0, 7)
94 | }
95 |
96 | case utils.Monthly:
97 | // For monthly, always start from the first day of the month
98 | scheduled = time.Date(
99 | now.Year(), now.Month(), 1, // Use day 1 for first of month
100 | hour, minute, 0, 0, time.UTC,
101 | )
102 | if now.After(scheduled) {
103 | scheduled = time.Date(
104 | now.Year(), now.Month()+1, 1, // Move to first day of next month
105 | hour, minute, 0, 0, time.UTC,
106 | )
107 | }
108 | }
109 |
110 | return scheduled
111 | }
112 |
113 | func StartScheduledJobs(agentID string, snapshotURL string) error {
114 | scheduler, err := NewScheduler(utils.AgentConfiguration.SnapshotTime, utils.Frequency(utils.AgentConfiguration.SnapshotFreq))
115 | if err != nil {
116 | return fmt.Errorf("failed to create scheduler: %v", err)
117 | }
118 | log.Printf("Scheduled job started to run at %s every %s", utils.AgentConfiguration.SnapshotTime, utils.AgentConfiguration.SnapshotFreq)
119 | // Start as a goroutine
120 | go func() {
121 | if err := scheduler.Start(func() {
122 | log.Printf("Scheduled job %s: agent %s connecting to snapshot endpoint at %s", time.Now().Format(time.RFC3339), agentID, snapshotURL)
123 | if err := connectAndHandle(agentID, snapshotURL); err != nil {
124 | log.Printf("Snapshot connection error: %v", err)
125 | }
126 | log.Printf("Snapshot job is running")
127 | }); err != nil {
128 | log.Printf("Scheduler error: %v", err)
129 | }
130 | }()
131 |
132 | return nil
133 | }
134 |
--------------------------------------------------------------------------------
/pkg/dispatcher/cleanup_jobs.go:
--------------------------------------------------------------------------------
1 | package dispatcher
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "strings"
9 | "time"
10 |
11 | "github.com/xmigrate/blxrep/service"
12 | "github.com/xmigrate/blxrep/utils"
13 | )
14 |
15 | func StartSnapshotCleanupJobs() error {
16 | utils.LogDebug(fmt.Sprintf("Cleanup jobs started to run at %s", time.Now().Format(time.RFC3339)))
17 |
18 | go func() {
19 | ctx, cancel := context.WithCancel(context.Background())
20 | defer cancel()
21 |
22 | checkInterval := 5 * time.Minute
23 | ticker := time.NewTicker(checkInterval)
24 | defer ticker.Stop()
25 |
26 | // Run initial cleanup for all agents
27 | if err := cleanupAllAgents(ctx); err != nil {
28 | utils.LogError(fmt.Sprintf("Initial cleanup for all agents failed: %v", err))
29 | }
30 |
31 | // Then run periodic cleanup for all agents
32 | for {
33 | select {
34 | case <-ctx.Done():
35 | utils.LogDebug("Cleanup jobs stopped")
36 | return
37 | case <-ticker.C:
38 | if err := cleanupAllAgents(ctx); err != nil {
39 | utils.LogError(fmt.Sprintf("Periodic cleanup for all agents failed: %v", err))
40 | }
41 | }
42 | }
43 | }()
44 |
45 | return nil
46 | }
47 |
48 | func cleanupAllAgents(ctx context.Context) error {
49 | agents, err := service.GetAllAgents(-1)
50 | if err != nil {
51 | return fmt.Errorf("failed to get agents: %w", err)
52 | }
53 |
54 | for _, agent := range agents {
55 | select {
56 | case <-ctx.Done():
57 | return ctx.Err()
58 | default:
59 | if err := performCleanup(ctx, agent.AgentId, agent.SnapshotRetention); err != nil {
60 | utils.LogError(fmt.Sprintf("Cleanup failed for agent %s: %v", agent.AgentId, err))
61 | // Continue with other agents even if one fails
62 | continue
63 | }
64 | utils.LogDebug(fmt.Sprintf("Cleanup completed for agent %s", agent.AgentId))
65 | }
66 | }
67 |
68 | return nil
69 | }
70 |
71 | func performCleanup(ctx context.Context, agentID string, snapshotRetention int) error {
72 | // Get latest snapshot times per disk first
73 | latestSnapshotTimes, err := getLatestSnapshotTimes(ctx, agentID)
74 | if err != nil {
75 | return fmt.Errorf("error getting latest snapshot times: %w", err)
76 | }
77 |
78 | // Perform both cleanups
79 | if err := cleanupSnapshots(ctx, agentID, latestSnapshotTimes, snapshotRetention); err != nil {
80 | utils.LogError(fmt.Sprintf("Snapshot cleanup failed: %v", err))
81 | }
82 |
83 | if err := cleanupIncrementals(ctx, agentID, latestSnapshotTimes); err != nil {
84 | utils.LogError(fmt.Sprintf("Incremental cleanup failed: %v", err))
85 | }
86 |
87 | return nil
88 | }
89 |
90 | func getLatestSnapshotTimes(ctx context.Context, agentID string) (map[string]time.Time, error) {
91 | snapshotFolder := filepath.Join(utils.AppConfiguration.DataDir, "snapshot")
92 | files, err := os.ReadDir(snapshotFolder)
93 | if err != nil {
94 | return nil, fmt.Errorf("error reading snapshot directory: %w", err)
95 | }
96 |
97 | latestTimes := make(map[string]time.Time)
98 |
99 | for _, file := range files {
100 | if ctx.Err() != nil {
101 | return nil, ctx.Err()
102 | }
103 |
104 | fileName := file.Name()
105 | if !strings.HasPrefix(fileName, agentID) || !strings.HasSuffix(fileName, ".img") {
106 | continue
107 | }
108 |
109 | baseName := strings.TrimSuffix(fileName, filepath.Ext(fileName))
110 | parts := strings.Split(baseName, "_")
111 | if len(parts) < 2 {
112 | continue
113 | }
114 | diskID := parts[len(parts)-2]
115 |
116 | fileInfo, err := file.Info()
117 | if err != nil {
118 | utils.LogError(fmt.Sprintf("Error getting file info for %s: %v", fileName, err))
119 | continue
120 | }
121 |
122 | if currentTime, exists := latestTimes[diskID]; !exists || fileInfo.ModTime().After(currentTime) {
123 | latestTimes[diskID] = fileInfo.ModTime()
124 | }
125 | }
126 |
127 | return latestTimes, nil
128 | }
129 |
130 | func cleanupSnapshots(ctx context.Context, agentID string, latestSnapshotTimes map[string]time.Time, snapshotRetention int) error {
131 | snapshotFolder := filepath.Join(utils.AppConfiguration.DataDir, "snapshot")
132 |
133 | files, err := os.ReadDir(snapshotFolder)
134 | if err != nil {
135 | return fmt.Errorf("error reading snapshot directory: %w", err)
136 | }
137 |
138 | currentTime := time.Now()
139 | cleanupCount := 0
140 | var totalSpaceFreed int64
141 |
142 | diskSnapshots := make(map[string][]utils.SnapshotInfo)
143 | mostRecentPerDisk := make(map[string]utils.SnapshotInfo)
144 |
145 | // First pass: collect all valid snapshot pairs and organize by disk
146 | for _, file := range files {
147 | if ctx.Err() != nil {
148 | return ctx.Err()
149 | }
150 |
151 | if file.IsDir() {
152 | continue
153 | }
154 |
155 | fileName := file.Name()
156 | if !strings.HasPrefix(fileName, agentID) {
157 | continue
158 | }
159 |
160 | baseName := strings.TrimSuffix(fileName, filepath.Ext(fileName))
161 | parts := strings.Split(baseName, "_")
162 | if len(parts) < 2 {
163 | utils.LogError(fmt.Sprintf("Invalid snapshot filename format: %s", fileName))
164 | continue
165 | }
166 | diskID := parts[len(parts)-2]
167 |
168 | // Skip if already processed
169 | alreadyProcessed := false
170 | for _, info := range diskSnapshots[diskID] {
171 | if info.BaseName == baseName {
172 | alreadyProcessed = true
173 | break
174 | }
175 | }
176 | if alreadyProcessed {
177 | continue
178 | }
179 |
180 | imgPath := filepath.Join(snapshotFolder, baseName+".img")
181 | logPath := filepath.Join(snapshotFolder, baseName+".log")
182 |
183 | imgInfo, imgErr := os.Stat(imgPath)
184 | logInfo, logErr := os.Stat(logPath)
185 |
186 | if imgErr != nil || logErr != nil {
187 | utils.LogError(fmt.Sprintf("Error accessing snapshot files for base %s: img error: %v, log error: %v",
188 | baseName, imgErr, logErr))
189 | continue
190 | }
191 |
192 | // Use the older of the two timestamps
193 | fileTime := imgInfo.ModTime()
194 | if logInfo.ModTime().Before(fileTime) {
195 | fileTime = logInfo.ModTime()
196 | }
197 |
198 | snapshotInfo := utils.SnapshotInfo{
199 | Timestamp: fileTime,
200 | BaseName: baseName,
201 | ImgSize: imgInfo.Size(),
202 | LogSize: logInfo.Size(),
203 | }
204 |
205 | diskSnapshots[diskID] = append(diskSnapshots[diskID], snapshotInfo)
206 |
207 | // Update most recent snapshot for this disk
208 | currentMostRecent, exists := mostRecentPerDisk[diskID]
209 | if !exists || fileTime.After(currentMostRecent.Timestamp) {
210 | mostRecentPerDisk[diskID] = snapshotInfo
211 | }
212 | }
213 |
214 | // Second pass: delete old snapshots while preserving the most recent one per disk
215 | for diskID, snapshots := range diskSnapshots {
216 | if ctx.Err() != nil {
217 | return ctx.Err()
218 | }
219 |
220 | hasRecentSnapshot := false
221 | for _, snapshot := range snapshots {
222 | if currentTime.Sub(snapshot.Timestamp) <= (time.Duration(snapshotRetention) * time.Hour) {
223 | hasRecentSnapshot = true
224 | break
225 | }
226 | }
227 |
228 | for _, snapshot := range snapshots {
229 | fileAge := currentTime.Sub(snapshot.Timestamp)
230 |
231 | // Skip most recent snapshot if no recent snapshots exist
232 | if !hasRecentSnapshot && snapshot.BaseName == mostRecentPerDisk[diskID].BaseName {
233 | utils.LogDebug(fmt.Sprintf("Preserving most recent snapshot %s for disk %s (age: %v)",
234 | snapshot.BaseName, diskID, fileAge))
235 | continue
236 | }
237 |
238 | // Delete if older than retention period
239 | if fileAge > (time.Duration(snapshotRetention) * time.Hour) {
240 | imgPath := filepath.Join(snapshotFolder, snapshot.BaseName+".img")
241 | logPath := filepath.Join(snapshotFolder, snapshot.BaseName+".log")
242 |
243 | imgErr := os.Remove(imgPath)
244 | logErr := os.Remove(logPath)
245 |
246 | if imgErr != nil || logErr != nil {
247 | utils.LogError(fmt.Sprintf("Error deleting snapshot files for base %s: img error: %v, log error: %v",
248 | snapshot.BaseName, imgErr, logErr))
249 | continue
250 | }
251 |
252 | cleanupCount += 2
253 | totalSpaceFreed += snapshot.ImgSize + snapshot.LogSize
254 | utils.LogDebug(fmt.Sprintf("Deleted snapshot files for base %s (age: %v, freed: %d bytes)",
255 | snapshot.BaseName, fileAge, snapshot.ImgSize+snapshot.LogSize))
256 | }
257 | }
258 | }
259 |
260 | utils.LogDebug(fmt.Sprintf("Snapshot cleanup completed for agent %s. Deleted %d files, freed %d bytes",
261 | agentID, cleanupCount, totalSpaceFreed))
262 |
263 | return nil
264 | }
265 |
266 | func cleanupIncrementals(ctx context.Context, agentID string, latestSnapshotTimes map[string]time.Time) error {
267 | incrementalFolder := filepath.Join(utils.AppConfiguration.DataDir, "incremental")
268 | files, err := os.ReadDir(incrementalFolder)
269 | if err != nil {
270 | return fmt.Errorf("error reading incremental directory: %w", err)
271 | }
272 |
273 | var totalSpaceFreed int64
274 | cleanupCount := 0
275 |
276 | for _, file := range files {
277 | if ctx.Err() != nil {
278 | return ctx.Err()
279 | }
280 |
281 | fileName := file.Name()
282 | if !strings.HasPrefix(fileName, agentID) {
283 | continue
284 | }
285 |
286 | // Parse the disk ID from the filename
287 | parts := strings.Split(fileName, "_")
288 | if len(parts) < 2 {
289 | utils.LogError(fmt.Sprintf("Invalid incremental filename format: %s", fileName))
290 | continue
291 | }
292 | diskID := parts[1] // Get the disk identifier part
293 |
294 | // Get the latest snapshot time for this disk
295 | latestSnapshotTime, exists := latestSnapshotTimes[diskID]
296 | if !exists {
297 | utils.LogDebug(fmt.Sprintf("No snapshot found for disk %s, skipping incremental cleanup", diskID))
298 | continue
299 | }
300 |
301 | fileInfo, err := file.Info()
302 | if err != nil {
303 | utils.LogError(fmt.Sprintf("Error getting file info for %s: %v", fileName, err))
304 | continue
305 | }
306 |
307 | // If the incremental file is older than the latest snapshot, delete it
308 | if fileInfo.ModTime().Before(latestSnapshotTime) {
309 | filePath := filepath.Join(incrementalFolder, fileName)
310 | fileSize := fileInfo.Size()
311 |
312 | if err := os.Remove(filePath); err != nil {
313 | utils.LogError(fmt.Sprintf("Error deleting incremental file %s: %v", fileName, err))
314 | continue
315 | }
316 |
317 | cleanupCount++
318 | totalSpaceFreed += fileSize
319 | utils.LogDebug(fmt.Sprintf("Deleted incremental file %s (created: %v, latest snapshot: %v)",
320 | fileName, fileInfo.ModTime(), latestSnapshotTime))
321 | }
322 | }
323 |
324 | if cleanupCount > 0 {
325 | utils.LogDebug(fmt.Sprintf("Incremental cleanup completed for agent %s. Deleted %d files, freed %d bytes",
326 | agentID, cleanupCount, totalSpaceFreed))
327 | }
328 |
329 | return nil
330 | }
331 |
--------------------------------------------------------------------------------
/pkg/dispatcher/config.go:
--------------------------------------------------------------------------------
1 | package dispatcher
2 |
3 | import (
4 | "fmt"
5 | "io/fs"
6 | "os"
7 | "path/filepath"
8 | "regexp"
9 | "strconv"
10 | "strings"
11 |
12 | "github.com/xmigrate/blxrep/service"
13 | "github.com/xmigrate/blxrep/utils"
14 | "gopkg.in/yaml.v3"
15 | )
16 |
17 | func Contains(slice []string, item string) bool {
18 | for _, s := range slice {
19 | if s == item {
20 | return true
21 | }
22 | }
23 | return false
24 | }
25 |
26 | // ConfigScheduler reads backup policies from files and updates agent configurations
27 | func ConfigScheduler(policyDir string) error {
28 | // Get all agents from DB as Map
29 | agentMap, err := service.GetAllAgentsMap(-1)
30 | if err != nil {
31 | return fmt.Errorf("failed to get agents from DB: %w", err)
32 | }
33 | // Walk through all YAML files in the policy directory
34 | err = filepath.WalkDir(policyDir, func(path string, d fs.DirEntry, err error) error {
35 | if err != nil {
36 | return err
37 | }
38 |
39 | // Skip if not a YAML file
40 | if !d.IsDir() && (filepath.Ext(path) == ".yaml" || filepath.Ext(path) == ".yml") {
41 | if err := processBackupPolicy(path, agentMap); err != nil {
42 | return fmt.Errorf("failed to process policy file %s: %w", path, err)
43 | }
44 | }
45 | return nil
46 | })
47 |
48 | if err != nil {
49 | return fmt.Errorf("failed to walk policy directory: %w", err)
50 | }
51 |
52 | // Update agents in the database
53 | if err := service.InsertOrUpdateAgentsMap(agentMap); err != nil {
54 | return fmt.Errorf("failed to update agents in DB: %w", err)
55 | }
56 |
57 | return nil
58 | }
59 |
60 | func processBackupPolicy(filePath string, agentMap map[string]utils.Agent) error {
61 | // Read policy file
62 | data, err := os.ReadFile(filePath)
63 | if err != nil {
64 | return fmt.Errorf("failed to read policy file: %w", err)
65 | }
66 |
67 | // Parse YAML
68 | var policy utils.BackupPolicy
69 | if err := yaml.Unmarshal(data, &policy); err != nil {
70 | return fmt.Errorf("failed to parse policy file: %w", err)
71 | }
72 | utils.LogDebug(fmt.Sprintf("Processed policy: %s", filePath))
73 | // Process each target group in the policy
74 | for _, targetGroup := range policy.Targets {
75 | // Expand the pattern to get all matching hostnames
76 | hostnames := expandPattern(targetGroup.Pattern, agentMap)
77 | // Update configuration for all matching agents
78 | for _, hostname := range hostnames {
79 | if agent, exists := agentMap[hostname]; exists {
80 | updateAgentConfig(&agent, policy, targetGroup)
81 | agentMap[hostname] = agent
82 | }
83 | }
84 | }
85 |
86 | return nil
87 | }
88 |
89 | // expandPattern expands patterns like "web[1-3].example.com" to ["web1.example.com", "web2.example.com", "web3.example.com"]
90 | func expandPattern(pattern string, agentMap map[string]utils.Agent) []string {
91 | // First check if pattern contains range expression
92 | rangeRegex := regexp.MustCompile(`\[(\d+)-(\d+)\]`)
93 | matches := rangeRegex.FindStringSubmatch(pattern)
94 |
95 | if len(matches) == 3 {
96 | // We found a range expression
97 | start, _ := strconv.Atoi(matches[1])
98 | end, _ := strconv.Atoi(matches[2])
99 |
100 | var result []string
101 | prefix := pattern[:strings.Index(pattern, "[")]
102 | suffix := pattern[strings.Index(pattern, "]")+1:]
103 |
104 | // Generate all hostnames in the range
105 | for i := start; i <= end; i++ {
106 | hostname := fmt.Sprintf("%s%d%s", prefix, i, suffix)
107 | result = append(result, hostname)
108 | }
109 | return result
110 | }
111 |
112 | // Handle comma-separated lists [1,2,3]
113 | listRegex := regexp.MustCompile(`\[([^\]]+)\]`)
114 | matches = listRegex.FindStringSubmatch(pattern)
115 |
116 | if len(matches) == 2 {
117 | items := strings.Split(matches[1], ",")
118 | var result []string
119 | prefix := pattern[:strings.Index(pattern, "[")]
120 | suffix := pattern[strings.Index(pattern, "]")+1:]
121 |
122 | for _, item := range items {
123 | hostname := fmt.Sprintf("%s%s%s", prefix, strings.TrimSpace(item), suffix)
124 | result = append(result, hostname)
125 | }
126 | return result
127 | }
128 |
129 | // Handle wildcards (* and ?)
130 | if strings.ContainsAny(pattern, "*?") {
131 | // Convert glob pattern to regex for matching
132 | regexPattern := strings.ReplaceAll(pattern, ".", "\\.")
133 | regexPattern = strings.ReplaceAll(regexPattern, "*", ".*")
134 | regexPattern = strings.ReplaceAll(regexPattern, "?", ".")
135 | regexPattern = "^" + regexPattern + "$"
136 |
137 | reg, err := regexp.Compile(regexPattern)
138 | if err != nil {
139 | utils.LogError(fmt.Sprintf("Invalid pattern %s: %v", pattern, err))
140 | return []string{pattern}
141 | }
142 |
143 | var result []string
144 | // Match against all known hostnames
145 | for hostname := range agentMap {
146 | if reg.MatchString(hostname) {
147 | result = append(result, hostname)
148 | }
149 | }
150 | return result
151 | }
152 |
153 | // If no special pattern, return as is
154 | return []string{pattern}
155 | }
156 |
157 | func updateAgentConfig(agent *utils.Agent, policy utils.BackupPolicy, targetGroup utils.TargetGroup) {
158 | agent.CloneSchedule.Frequency = policy.SnapshotFrequency
159 | agent.CloneSchedule.Time = policy.SnapshotTime
160 | agent.CloneSchedule.Bandwidth = policy.BandwidthLimit
161 | agent.Prerequisites = true
162 | agent.SnapshotRetention = policy.SnapshotRetention
163 | agent.ArchiveInterval = policy.ArchiveInterval
164 | agent.LiveSyncFreq = policy.LiveSyncFrequency
165 | agent.TransitionAfterDays = policy.TransitionAfterDays
166 | agent.DeleteAfterDays = policy.DeleteAfterDays
167 | utils.AppConfiguration.ArchiveInterval = policy.ArchiveInterval
168 | utils.LogDebug(fmt.Sprintf("Updated agent config: %+v", agent.AgentId))
169 | // Update disk configuration
170 | for _, disk := range agent.Footprint.DiskDetails {
171 | if !Contains(targetGroup.DisksExcluded, disk.Name) {
172 | if !Contains(agent.Disks, disk.Name) {
173 | agent.Disks = append(agent.Disks, disk.Name)
174 | }
175 | }
176 | }
177 | }
178 |
--------------------------------------------------------------------------------
/pkg/dispatcher/restore.go:
--------------------------------------------------------------------------------
1 | package dispatcher
2 |
3 | import (
4 | "archive/tar"
5 | "bufio"
6 | "bytes"
7 | "compress/gzip"
8 | "context"
9 | "encoding/base64"
10 | "fmt"
11 | "io"
12 | "log"
13 | "os"
14 | "path/filepath"
15 | "sync"
16 | "time"
17 |
18 | "github.com/xmigrate/blxrep/service"
19 | "github.com/xmigrate/blxrep/utils"
20 |
21 | "golang.org/x/sys/unix"
22 | )
23 |
24 | const chunkSize = 1 * 1024 * 1024 // 1MB chunks
25 |
26 | const (
27 | maxRetries = 5
28 | retryDelay = 2 * time.Second
29 | )
30 |
31 | func RestoreFiles(agentID string, sourcePath string, destPath string, action utils.Action) error {
32 | conn := agents[agentID].RestoreConn
33 | agent, exists := agents[agentID]
34 | if !exists {
35 | utils.LogError("Agent with ID " + agentID + " does not exist in the agents map")
36 | return fmt.Errorf("agent with ID " + agentID + " not found")
37 | }
38 |
39 | utils.LogDebug("Agent details for ID " + agentID + ":")
40 | utils.LogDebug(" Agent: " + fmt.Sprintf("%+v", agent))
41 |
42 | // Create a buffer to store the compressed data
43 | var buf bytes.Buffer
44 | gzipWriter := gzip.NewWriter(&buf)
45 | tarWriter := tar.NewWriter(gzipWriter)
46 |
47 | // Compress the file or directory
48 | err := compressPath(sourcePath, tarWriter)
49 | if err != nil {
50 | return fmt.Errorf("error compressing data: %v", err)
51 | }
52 |
53 | // Close the tar and gzip writers
54 | if err := tarWriter.Close(); err != nil {
55 | return fmt.Errorf("error closing tar writer: %v", err)
56 | }
57 | if err := gzipWriter.Close(); err != nil {
58 | return fmt.Errorf("error closing gzip writer: %v", err)
59 | }
60 |
61 | // Get the compressed data
62 | compressedData := buf.Bytes()
63 | totalSize := len(compressedData)
64 | totalChunks := (totalSize + chunkSize - 1) / chunkSize
65 |
66 | // Send start message
67 | startMsg := utils.Message{
68 | Action: utils.CONST_AGENT_ACTION_RESTORE,
69 | RestoreMessage: utils.RestoreData{
70 | Type: "start",
71 | TotalChunks: totalChunks,
72 | TotalSize: int64(totalSize),
73 | FilePath: destPath,
74 | },
75 | }
76 | if err := conn.WriteJSON(startMsg); err != nil {
77 | return fmt.Errorf("error sending start message: %v", err)
78 | }
79 | lastReportedProgress := 0
80 | // Send the data in chunks
81 | for i := 0; i < totalSize; i += chunkSize {
82 | end := i + chunkSize
83 | if end > totalSize {
84 | end = totalSize
85 | }
86 | chunk := compressedData[i:end]
87 |
88 | // Encode the chunk in base64
89 | encodedChunk := base64.StdEncoding.EncodeToString(chunk)
90 |
91 | // Send the chunk
92 | chunkMsg := utils.Message{
93 | Action: utils.CONST_AGENT_ACTION_RESTORE,
94 | RestoreMessage: utils.RestoreData{
95 | FilePath: destPath,
96 | Type: "chunk",
97 | ChunkIndex: i / chunkSize,
98 | Data: encodedChunk,
99 | },
100 | }
101 |
102 | // Retry loop for sending the chunk
103 | for retry := 0; retry < maxRetries; retry++ {
104 | conn := agents[agentID].RestoreConn
105 | err := conn.WriteJSON(chunkMsg)
106 | if err == nil {
107 | // Chunk sent successfully
108 | utils.LogDebug("Sent chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " of " + fmt.Sprintf("%d", totalChunks))
109 | break
110 | }
111 |
112 | if retry == maxRetries-1 {
113 | // Last retry attempt failed
114 | return fmt.Errorf("error sending chunk %d after %d attempts: %v", i/chunkSize+1, maxRetries, err)
115 | }
116 |
117 | utils.LogError("Error sending chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " (attempt " + fmt.Sprintf("%d", retry+1) + " of " + fmt.Sprintf("%d", maxRetries) + "): " + err.Error() + ". Retrying...")
118 | time.Sleep(retryDelay)
119 | }
120 |
121 | // Log progress
122 | progress := int(float64(i) / float64(totalSize) * 100)
123 | utils.LogDebug("Sent chunk " + fmt.Sprintf("%d", i/chunkSize+1) + " of " + fmt.Sprintf("%d", totalChunks) + " (" + fmt.Sprintf("%d", progress) + "%)")
124 | if progress >= lastReportedProgress+5 || progress == 100 {
125 | action.ActionProgress = progress
126 | service.InsertOrUpdateAction(action)
127 | lastReportedProgress = progress
128 | }
129 | time.Sleep(100 * time.Millisecond)
130 | }
131 |
132 | // Send complete message
133 | completeMsg := utils.Message{
134 | Action: utils.CONST_AGENT_ACTION_RESTORE,
135 | RestoreMessage: utils.RestoreData{
136 | Type: "complete",
137 | FilePath: destPath,
138 | },
139 | }
140 | action.ActionProgress = 100
141 | action.ActionStatus = "Completed"
142 | service.InsertOrUpdateAction(action)
143 | if err := conn.WriteJSON(completeMsg); err != nil {
144 | return fmt.Errorf("error sending complete message: %v", err)
145 | }
146 |
147 | utils.LogDebug("File transfer completed: " + destPath)
148 | return nil
149 | }
150 |
151 | func compressPath(sourcePath string, tarWriter *tar.Writer) error {
152 | return filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
153 | if err != nil {
154 | return err
155 | }
156 |
157 | header, err := tar.FileInfoHeader(info, path)
158 | if err != nil {
159 | return fmt.Errorf("error creating tar header: %v", err)
160 | }
161 |
162 | // Create relative path
163 | relPath, err := filepath.Rel(sourcePath, path)
164 | if err != nil {
165 | return fmt.Errorf("error creating relative path: %v", err)
166 | }
167 |
168 | // Set the header name to the relative path, preserving directory structure
169 | header.Name = filepath.ToSlash(relPath)
170 |
171 | if err := tarWriter.WriteHeader(header); err != nil {
172 | return fmt.Errorf("error writing tar header: %v", err)
173 | }
174 |
175 | if !info.IsDir() {
176 | file, err := os.Open(path)
177 | if err != nil {
178 | return fmt.Errorf("error opening file: %v", err)
179 | }
180 | defer file.Close()
181 |
182 | if _, err := io.Copy(tarWriter, file); err != nil {
183 | return fmt.Errorf("error writing file to tar: %v", err)
184 | }
185 | }
186 |
187 | return nil
188 | })
189 | }
190 |
191 | func getBlockDeviceSize(path string) (int64, error) {
192 | file, err := os.Open(path)
193 | if err != nil {
194 | return 0, err
195 | }
196 | defer file.Close()
197 |
198 | size, err := unix.IoctlGetInt(int(file.Fd()), unix.BLKGETSIZE64)
199 | if err != nil {
200 | return 0, err
201 | }
202 |
203 | return int64(size), nil
204 | }
205 |
206 | func RestorePartition(agentID string, sourcePath string, destPath string, blockSize int, channelSize int, ctx context.Context, restorePartition *sync.Mutex, isPartitionRestore *bool, action utils.Action) error {
207 | websock := agents[agentID].RestoreConn
208 | src, err := os.Open(sourcePath)
209 | if err != nil {
210 | utils.LogError(fmt.Sprintf("Failed to open source disk: %v", err))
211 | }
212 | defer src.Close()
213 | bufReader := bufio.NewReaderSize(src, blockSize*8000)
214 |
215 | // Allocate a buffer for one block.
216 | buf := make([]byte, blockSize)
217 |
218 | var blocks []utils.AgentDataBlock
219 | var blockCount uint64
220 | var batchSize int
221 | var totalDataSent int64
222 | var lastReportedProgress int
223 |
224 | totalSize, err := getBlockDeviceSize(sourcePath)
225 | if err != nil {
226 | return fmt.Errorf("failed to get block device size: %v", err)
227 | }
228 | if totalSize == 0 {
229 | return fmt.Errorf("block device is empty or size couldn't be determined")
230 | }
231 | utils.LogDebug(fmt.Sprintf("Restoration started for %s", sourcePath))
232 | for {
233 | select {
234 | case <-ctx.Done():
235 | // Handle context cancellation and exit the goroutine
236 | utils.LogDebug("Restoration was paused/cancelled and goroutine is exiting.")
237 | if len(blocks) > 0 {
238 | utils.StreamData(blocks, websock, false, destPath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix())
239 | totalDataSent += int64(len(blocks) * blockSize)
240 | }
241 | restorePartition.Lock()
242 | *isPartitionRestore = false
243 | restorePartition.Unlock()
244 | action.ActionProgress = int(float64(totalDataSent) / float64(totalSize) * 100)
245 | action.ActionStatus = "Paused"
246 | service.InsertOrUpdateAction(action)
247 | return nil
248 | default:
249 | // Read data in larger chunks to reduce syscall overhead
250 | n, err := bufReader.Read(buf)
251 | if n > 0 {
252 | for i := 0; i < n; i += blockSize {
253 | end := i + blockSize
254 | if end > n {
255 | end = n
256 | }
257 | blockData := utils.AgentDataBlock{
258 | BlockNumber: blockCount,
259 | BlockData: append([]byte(nil), buf[i:end]...),
260 | }
261 | blocks = append(blocks, blockData)
262 | blockCount++
263 | batchSize += end - i
264 |
265 | if batchSize >= channelSize {
266 | utils.StreamData(blocks, websock, false, destPath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix())
267 | totalDataSent += int64(batchSize)
268 | // Update action progress
269 | progress := float64(totalDataSent) / float64(totalSize) * 100
270 | utils.LogDebug(fmt.Sprintf("Batch sent. Total data sent so far: %d bytes percentage: %.2f%%", totalDataSent, progress))
271 |
272 | if int(progress) >= lastReportedProgress+2 || int(progress) == 100 {
273 | action.ActionProgress = int(progress)
274 | service.InsertOrUpdateAction(action)
275 | lastReportedProgress = int(progress)
276 | }
277 |
278 | blocks = nil
279 | batchSize = 0
280 | time.Sleep(100 * time.Millisecond)
281 | }
282 | }
283 |
284 | }
285 | if err != nil {
286 | if err == io.EOF {
287 | if len(blocks) > 0 {
288 | utils.StreamData(blocks, websock, false, sourcePath, utils.CONST_AGENT_ACTION_PARTITION_RESTORE, time.Now().Unix())
289 | totalDataSent += int64(len(blocks) * blockSize)
290 | }
291 | utils.LogDebug(fmt.Sprintf("Restore completed. Total data sent: %d bytes", totalDataSent))
292 | restorePartition.Lock()
293 | *isPartitionRestore = false
294 | restorePartition.Unlock()
295 | action.ActionProgress = 100
296 | action.ActionStatus = "Completed"
297 | service.InsertOrUpdateAction(action)
298 | return nil
299 | }
300 | action.ActionStatus = "Failed"
301 | service.InsertOrUpdateAction(action)
302 | log.Fatalf("Failed to read block: %v", err)
303 | }
304 | }
305 | }
306 | }
307 |
--------------------------------------------------------------------------------
/plans/lab.yaml:
--------------------------------------------------------------------------------
1 | name: "lab-backup-policy"
2 | description: "Backup policy for lab servers"
3 | archive_interval: 48h
4 | snapshot_frequency: "daily"
5 | snapshot_time: "12:00"
6 | bandwidth_limit: 100
7 | snapshot_retention: 30
8 | live_sync_frequency: 2m
9 | transition_after_days: 30
10 | delete_after_days: 90
11 |
12 | targets:
13 | # Range pattern
14 | - pattern: "ip-172-[31-32]-46-49"
15 | disks_excluded:
16 | - "/dev/xvda"
17 |
--------------------------------------------------------------------------------
/plans/testplan.yml:
--------------------------------------------------------------------------------
1 | name: "production-backup-policy"
2 | description: "Backup policy for production servers"
3 | archive_interval: 24h
4 | snapshot_frequency: "daily"
5 | snapshot_time: "00:00"
6 | bandwidth_limit: 50
7 | snapshot_retention: 7
8 | live_sync_frequency: 1m
9 | transition_after_days: 30
10 | delete_after_days: 90
11 |
12 | targets:
13 | # Range pattern
14 | - pattern: "web[1-5].prod.example.com"
15 | disks_excluded:
16 | - "/dev/sdb"
17 |
18 | # List pattern
19 | - pattern: "db[master,slave1,slave2].prod.example.com"
20 | disks_excluded:
21 | - "/dev/sdc"
22 |
23 | # Mixed pattern
24 | - pattern: "cache[1-3,backup].prod.example.com"
25 | disks_excluded:
26 | - "/dev/sdd"
27 |
28 | # Regular wildcard pattern
29 | - pattern: "monitor-*.prod.example.com"
30 | disks_excluded:
31 | - "/dev/sde"
--------------------------------------------------------------------------------
/postinstall.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | systemctl daemon-reload
3 | # systemctl enable blxrep
4 | # systemctl start blxrep
5 |
--------------------------------------------------------------------------------
/service/action_db_utils.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/xmigrate/blxrep/storage"
8 | "github.com/xmigrate/blxrep/storage/boltdb"
9 | "github.com/xmigrate/blxrep/utils"
10 | )
11 |
12 | func getActionDBInstance() storage.Service {
13 | return boltdb.New(utils.AppConfiguration.DataDir + "/xmaction.db")
14 | }
15 |
16 | func GetAction(actionID string) (utils.Action, error) {
17 | db := getActionDBInstance()
18 |
19 | if err := db.Open(); err != nil {
20 | return utils.Action{}, err
21 | }
22 | //close db and handle error inside defer
23 | defer func() {
24 | if err := db.Close(); err != nil {
25 | utils.LogError("unable to close DB : " + err.Error())
26 | }
27 | }()
28 |
29 | actionObj, err := db.Get(actionID)
30 |
31 | if err != nil {
32 | return utils.Action{}, err
33 | }
34 |
35 | var action utils.Action
36 | err = json.Unmarshal(actionObj, &action)
37 | if err != nil {
38 | return utils.Action{}, err
39 | }
40 |
41 | return action, nil
42 | }
43 |
44 | func GetActionWithId(actionID string) (utils.Action, error) {
45 | db := getActionDBInstance()
46 |
47 | if err := db.Open(); err != nil {
48 | return utils.Action{}, err
49 | }
50 | //close db and handle error inside defer
51 | defer func() {
52 | if err := db.Close(); err != nil {
53 | utils.LogError("unable to close DB : " + err.Error())
54 | }
55 | }()
56 |
57 | actionObj, err := db.SelectAll(-1)
58 |
59 | if err != nil {
60 | return utils.Action{}, err
61 | }
62 | for _, v := range actionObj {
63 | var action utils.Action
64 | err = json.Unmarshal(v, &action)
65 | if err != nil {
66 | return utils.Action{}, err
67 | }
68 | if action.ActionId == actionID {
69 | return action, nil
70 | }
71 | }
72 |
73 | return utils.Action{}, fmt.Errorf("action not found")
74 | }
75 |
76 | func InsertOrUpdateAction(action utils.Action) error {
77 | db := getActionDBInstance()
78 |
79 | if err := db.Open(); err != nil {
80 | return err
81 | }
82 |
83 | actionObj, err := json.Marshal(action)
84 | if err != nil {
85 | return err
86 | }
87 | actionId := action.Id
88 | err = db.Insert(actionId, actionObj)
89 | if err != nil {
90 | return err
91 | }
92 |
93 | defer func() {
94 | if err := db.Close(); err != nil {
95 | utils.LogError("unable to close DB : " + err.Error())
96 | }
97 | }()
98 |
99 | return nil
100 | }
101 |
102 | func GetAllActions(limit int) ([]utils.Action, error) {
103 | db := getActionDBInstance()
104 |
105 | if err := db.Open(); err != nil {
106 | return nil, err
107 | }
108 | //close db and handle error inside defer
109 | defer func() {
110 | if err := db.Close(); err != nil {
111 | utils.LogError("unable to close DB : " + err.Error())
112 | }
113 | }()
114 |
115 | actions, err := db.SelectAll(limit)
116 |
117 | if err != nil {
118 | return nil, err
119 | }
120 |
121 | actionSlice := make([]utils.Action, 0)
122 | for _, v := range actions {
123 | action := utils.Action{}
124 | if err := json.Unmarshal(v, &action); err != nil {
125 | return nil, err
126 | }
127 | actionSlice = append(actionSlice, action)
128 | }
129 |
130 | return actionSlice, nil
131 | }
132 |
133 | func GetAllActionsWithStatus(limit int, status utils.CONST_ACTION_STATUS_TYPE) ([]utils.Action, error) {
134 |
135 | actions, err := GetAllActions(limit)
136 | if err != nil {
137 | return nil, err
138 | }
139 |
140 | filteredActions := make([]utils.Action, 0)
141 | for _, action := range actions {
142 | if action.ActionStatus == string(status) {
143 | filteredActions = append(filteredActions, action)
144 | }
145 | }
146 |
147 | return filteredActions, nil
148 | }
149 |
150 | func GetAllActionsWithUpdateStatus(limit int, status bool) ([]utils.Action, error) {
151 | actions, err := GetAllActions(limit)
152 | if err != nil {
153 | return nil, err
154 | }
155 |
156 | filteredActions := make([]utils.Action, 0)
157 | for _, action := range actions {
158 | if action.UpdateBackend == status {
159 | filteredActions = append(filteredActions, action)
160 | }
161 | }
162 |
163 | return filteredActions, nil
164 |
165 | }
166 |
--------------------------------------------------------------------------------
/service/agent_db_utils.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/xmigrate/blxrep/storage"
9 | "github.com/xmigrate/blxrep/storage/boltdb"
10 | "github.com/xmigrate/blxrep/utils"
11 | )
12 |
13 | func getDBInstance() storage.Service {
14 | return boltdb.New(utils.AppConfiguration.DataDir + "/xmdispatcher.db")
15 | }
16 |
17 | func GetAgent(agentID string) (utils.Agent, error) {
18 |
19 | db := getDBInstance()
20 |
21 | if err := db.Open(); err != nil {
22 | return utils.Agent{}, err
23 | }
24 | //close db and handle error inside defer
25 | defer func() {
26 | if err := db.Close(); err != nil {
27 | utils.LogError("unable to close DB : " + err.Error())
28 | }
29 | }()
30 |
31 | agentObj, err := db.Get(agentID)
32 |
33 | if err != nil {
34 | return utils.Agent{}, err
35 | }
36 | var agent utils.Agent
37 |
38 | err = json.Unmarshal(agentObj, &agent)
39 | if err != nil {
40 | return utils.Agent{}, err
41 | }
42 |
43 | return agent, nil
44 | }
45 |
46 | func InsertOrUpdateAgent(agent utils.Agent) error {
47 |
48 | db := getDBInstance()
49 |
50 | if err := db.Open(); err != nil {
51 | return err
52 | }
53 |
54 | defer func() {
55 | if err := db.Close(); err != nil {
56 | utils.LogError("unable to close DB : " + err.Error())
57 | }
58 | }()
59 |
60 | data, err := json.Marshal(agent)
61 | if err != nil {
62 | return err
63 | }
64 |
65 | if err := db.Insert(agent.AgentId, data); err != nil {
66 | return err
67 | }
68 |
69 | return nil
70 | }
71 |
72 | func InsertOrUpdateAgents(agents []utils.Agent) error {
73 |
74 | db := getDBInstance()
75 |
76 | if err := db.Open(); err != nil {
77 | return err
78 | }
79 |
80 | defer func() {
81 | if err := db.Close(); err != nil {
82 | utils.LogError("unable to close DB : " + err.Error())
83 | }
84 | }()
85 |
86 | for _, agent := range agents {
87 | ag, err := json.Marshal(agent)
88 | if err != nil {
89 | return err
90 | }
91 | if err := db.Insert(agent.AgentId, ag); err != nil {
92 | return err
93 | }
94 | }
95 |
96 | return nil
97 | }
98 |
99 | func InsertOrUpdateAgentsMap(agents map[string]utils.Agent) error {
100 |
101 | // convert map to slice
102 | var agentsSlice []utils.Agent
103 | for _, v := range agents {
104 | agentsSlice = append(agentsSlice, v)
105 | }
106 | return InsertOrUpdateAgents(agentsSlice)
107 | }
108 |
109 | func GetAllAgents(limit int) ([]utils.Agent, error) {
110 |
111 | db := getDBInstance()
112 |
113 | if err := db.Open(); err != nil {
114 | return []utils.Agent{}, nil
115 | }
116 | defer func() {
117 | if err := db.Close(); err != nil {
118 | utils.LogError("unable to close DB : " + err.Error())
119 | }
120 | }()
121 |
122 | agents, err := db.SelectAll(limit)
123 |
124 | if err != nil {
125 | return []utils.Agent{}, err
126 | }
127 |
128 | // convert map to agents slice
129 | agentsSlice := make([]utils.Agent, 0)
130 | for _, v := range agents {
131 | agent := utils.Agent{}
132 | if err := json.Unmarshal(v, &agent); err != nil {
133 | return []utils.Agent{}, err
134 | }
135 | agentsSlice = append(agentsSlice, agent)
136 | }
137 |
138 | return agentsSlice, nil
139 | }
140 |
141 | func GetAllAgentsMap(limit int) (map[string]utils.Agent, error) {
142 |
143 | agents, err := GetAllAgents(limit)
144 | if err != nil {
145 | return nil, err
146 | }
147 |
148 | agentMap := make(map[string]utils.Agent)
149 | for _, i := range agents {
150 | agentMap[i.AgentId] = i
151 | }
152 |
153 | return agentMap, nil
154 | }
155 |
156 | func SetAgentAction(agentId string, action utils.CONST_AGENT_ACTION) error {
157 | db := getDBInstance()
158 |
159 | if err := db.Open(); err != nil {
160 | return nil
161 | }
162 |
163 | defer func() {
164 | if err := db.Close(); err != nil {
165 | utils.LogError("unable to close DB : " + err.Error())
166 | }
167 | }()
168 |
169 | agentObj, err := db.Get(agentId)
170 | var agent utils.Agent
171 |
172 | if err != nil {
173 | return err
174 | }
175 |
176 | err = json.Unmarshal(agentObj, &agent)
177 | if err != nil {
178 | return err
179 | }
180 |
181 | agent.Action = action
182 |
183 | ag, err := json.Marshal(agent)
184 | if err != nil {
185 | return err
186 | }
187 |
188 | if err := db.Insert(agent.AgentId, ag); err != nil {
189 | return err
190 | }
191 |
192 | return nil
193 | }
194 |
195 | func GetConnectedAgents() ([]utils.Agent, error) {
196 | db := getDBInstance()
197 | if db == nil {
198 | return nil, fmt.Errorf("failed to get database instance")
199 | }
200 |
201 | if err := db.Open(); err != nil {
202 | return nil, fmt.Errorf("failed to open database: %v", err)
203 | }
204 | defer db.Close() // Use defer to ensure db is closed
205 |
206 | agents, err := db.SelectAll(-1)
207 | if err != nil {
208 | return nil, fmt.Errorf("failed to select agents: %v", err)
209 | }
210 |
211 | if agents == nil {
212 | return make([]utils.Agent, 0), nil
213 | }
214 |
215 | utils.LogDebug(fmt.Sprintf("Retrieved %d raw agents from database", len(agents)))
216 |
217 | agentSlice := make([]utils.Agent, 0, len(agents))
218 | for i, v := range agents {
219 | if v == nil {
220 | utils.LogError(fmt.Sprintf("Warning: nil agent data at index %d", i))
221 | continue
222 | }
223 |
224 | var agent utils.Agent
225 | if err := json.Unmarshal(v, &agent); err != nil {
226 | utils.LogError(fmt.Sprintf("Error unmarshaling agent at index %d: %v", i, err))
227 | continue // Skip invalid agents instead of failing completely
228 | }
229 |
230 | // Validate critical fields
231 | if agent.AgentId == "" {
232 | utils.LogError(fmt.Sprintf("Warning: agent at index %d has empty AgentId", i))
233 | continue
234 | }
235 |
236 | agentSlice = append(agentSlice, agent)
237 | }
238 |
239 | // Filter connected agents
240 | connectedAgents := make([]utils.Agent, 0, len(agentSlice))
241 | for _, agent := range agentSlice {
242 | if agent.Connected {
243 | // Ensure LastSeen is not zero
244 | if agent.LastSeen.IsZero() {
245 | agent.LastSeen = time.Now()
246 | }
247 |
248 | // Initialize maps if nil
249 | if agent.CloneStatus == nil {
250 | agent.CloneStatus = make(map[string]int)
251 | }
252 |
253 | connectedAgents = append(connectedAgents, agent)
254 | }
255 | }
256 |
257 | utils.LogDebug(fmt.Sprintf("Returning %d connected agents", len(connectedAgents)))
258 | return connectedAgents, nil
259 | }
260 |
261 | func GetConnectedAgentsMap() (map[string]utils.Agent, error) {
262 | agents, err := GetConnectedAgents()
263 | if err != nil {
264 | utils.LogError("Error in GetConnectedAgents: " + err.Error())
265 | return nil, fmt.Errorf("failed to get connected agents: %v", err)
266 | }
267 |
268 | agentMap := make(map[string]utils.Agent)
269 | for _, agent := range agents {
270 | // Double check AgentId is not empty
271 | if agent.AgentId == "" {
272 | utils.LogError("Found agent with empty AgentId")
273 | continue
274 | }
275 | agentMap[agent.AgentId] = agent
276 | }
277 |
278 | // Log the result for debugging
279 | utils.LogDebug(fmt.Sprintf("Created agent map with %d entries", len(agentMap)))
280 |
281 | return agentMap, nil
282 | }
283 |
--------------------------------------------------------------------------------
/service/backend_utils.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 |
10 | "github.com/xmigrate/blxrep/utils"
11 | )
12 |
13 | func GetAgentConfigFromBackend(token, url string) (utils.ApiResponse, error) {
14 |
15 | req, err := http.NewRequest("GET", url, nil)
16 | if err != nil {
17 | return utils.ApiResponse{}, err
18 | }
19 |
20 | query := req.URL.Query()
21 | query.Add("hostname", "all")
22 | req.URL.RawQuery = query.Encode()
23 | req.Header.Set("accept", "application/json")
24 | req.Header.Set("token", token)
25 |
26 | client := &http.Client{}
27 | resp, err := client.Do(req)
28 | if err != nil {
29 | return utils.ApiResponse{}, err
30 | }
31 | defer resp.Body.Close()
32 | // Read response body
33 | body, err := io.ReadAll(resp.Body)
34 | if err != nil {
35 | return utils.ApiResponse{}, err
36 | }
37 |
38 | // Unmarshal (parse) the JSON response
39 | var apiResponse utils.ApiResponse
40 | err = json.Unmarshal(body, &apiResponse)
41 | if err != nil {
42 | return utils.ApiResponse{}, err
43 | }
44 |
45 | if resp.StatusCode != http.StatusOK {
46 | return utils.ApiResponse{}, fmt.Errorf("error fetching configuration status code :- %d", apiResponse.Status)
47 | }
48 | return apiResponse, nil
49 |
50 | }
51 |
52 | func GetAgentActionFromBackend(token string, url string, action_type utils.CONST_ACTION_TYPE) ([]utils.Action, error) {
53 |
54 | req, err := http.NewRequest("GET", url, nil)
55 | if err != nil {
56 | return []utils.Action{}, err
57 | }
58 | query := req.URL.Query()
59 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_WAITING))
60 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_PAUSED))
61 | query.Add("action_status", string(utils.CONST_ACTION_STATUS_RESUMED))
62 | req.URL.RawQuery = query.Encode()
63 | req.Header.Set("accept", "application/json")
64 | req.Header.Set("token", token)
65 |
66 | client := &http.Client{}
67 | resp, err := client.Do(req)
68 | if err != nil {
69 | return []utils.Action{}, err
70 | }
71 | defer resp.Body.Close()
72 |
73 | body, err := io.ReadAll(resp.Body)
74 | if err != nil {
75 | return []utils.Action{}, err
76 | }
77 |
78 | var action utils.ApiActionResponse
79 | err = json.Unmarshal(body, &action)
80 | if err != nil {
81 | return []utils.Action{}, err
82 | }
83 |
84 | if resp.StatusCode != http.StatusOK {
85 | return []utils.Action{}, fmt.Errorf("error fetching action status code :- %d", action.Status)
86 | }
87 | actions := []utils.Action{}
88 | for _, backendAction := range action.Actions {
89 | disk := map[string]utils.DiskSnapshot{
90 | backendAction.Disk: {
91 | Name: backendAction.Disk,
92 | },
93 | }
94 | actions = append(actions, utils.Action{
95 | Id: backendAction.Id,
96 | ActionId: backendAction.ActionId,
97 | SnapshotId: backendAction.SnapshotId,
98 | AgentId: backendAction.AgentId,
99 | OsVersion: backendAction.OsVersion,
100 | FileSystem: backendAction.FileSystem,
101 | Distro: backendAction.Distro,
102 | Disk: disk,
103 | Action: backendAction.Action,
104 | ActionType: backendAction.ActionType,
105 | ActionStatus: backendAction.ActionStatus,
106 | ActionProgress: backendAction.ActionProgress,
107 | Hostname: backendAction.Hostname,
108 | TargetName: backendAction.TargetName,
109 | TimeCreated: backendAction.TimeCreated,
110 | TimeStarted: backendAction.TimeStarted,
111 | TimeUpdated: backendAction.TimeUpdated,
112 | TimeFinished: backendAction.TimeFinished,
113 | SourceFilePath: backendAction.SourceFilePath,
114 | TargetFilePath: backendAction.TargetFilePath,
115 | })
116 | }
117 | return actions, nil
118 |
119 | }
120 |
121 | func PushActionToBackend(token string, url string, action utils.ActionPutRequest) error {
122 |
123 | reqBody, err := json.Marshal(action)
124 | if err != nil {
125 | return err
126 | }
127 |
128 | req, err := http.NewRequest("PUT", url, bytes.NewBuffer(reqBody))
129 |
130 | if err != nil {
131 | return err
132 | }
133 | req.Header.Set("Content-Type", "application/json")
134 | req.Header.Set("token", token)
135 |
136 | client := &http.Client{}
137 | resp, err := client.Do(req)
138 | if err != nil {
139 | return err
140 | }
141 | defer resp.Body.Close()
142 |
143 | body, err := io.ReadAll(resp.Body)
144 | if err != nil {
145 | return err
146 | }
147 | utils.LogDebug(fmt.Sprintf("Response: %s", string(body)))
148 | if resp.StatusCode != http.StatusAccepted {
149 | return fmt.Errorf("Error pushing action status code: %s", string(body))
150 | }
151 | return nil
152 | }
153 |
154 | func PostActionToBackend(token string, url string, action utils.ActionPostRequest) (string, error) {
155 | reqBody, err := json.Marshal(action)
156 | if err != nil {
157 | return "", err
158 | }
159 |
160 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(reqBody))
161 | if err != nil {
162 | return "", err
163 | }
164 | req.Header.Set("Content-Type", "application/json")
165 | req.Header.Set("token", token)
166 |
167 | client := &http.Client{}
168 | resp, err := client.Do(req)
169 | if err != nil {
170 | return "", err
171 | }
172 | defer resp.Body.Close()
173 |
174 | body, err := io.ReadAll(resp.Body)
175 | if err != nil {
176 | return "", err
177 | }
178 |
179 | if resp.StatusCode != http.StatusCreated {
180 | return "", fmt.Errorf("Error pushing action status code: %s", string(body))
181 | }
182 | var actionId utils.ActionIdResponse
183 | err = json.Unmarshal(body, &actionId)
184 | if err != nil {
185 | return "", err
186 | }
187 | return actionId.ActionId, nil
188 | }
189 |
--------------------------------------------------------------------------------
/service/dirtyblock_db_utils.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "strings"
7 | "time"
8 |
9 | "github.com/xmigrate/blxrep/storage"
10 | "github.com/xmigrate/blxrep/storage/boltdb"
11 | "github.com/xmigrate/blxrep/utils"
12 | )
13 |
14 | // getDirtyBlockDBInstance returns a new instance of the BoltDB service
15 | func getDirtyBlockDBInstance() storage.Service {
16 | dbPath := utils.AppConfiguration.DataDir + "/xmdirtyblocks.db"
17 | utils.LogDebug(fmt.Sprintf("Getting DirtyBlock DB instance at: %s", dbPath))
18 | return boltdb.New(dbPath)
19 | }
20 |
21 | // DirtyBlock represents a block that needs to be retried
22 | type DirtyBlock struct {
23 | BlockNumber int64 `json:"block_number"`
24 | TimeCreated time.Time `json:"time_created"`
25 | LastRetried time.Time `json:"last_retried"`
26 | RetryCount int `json:"retry_count"`
27 | AgentID string `json:"agent_id"`
28 | DiskPath string `json:"disk_path"`
29 | }
30 |
31 | // AddDirtyBlock adds a new dirty block to the database
32 | func AddDirtyBlock(agentID, diskPath string, blockNum int64) error {
33 | db := getDirtyBlockDBInstance()
34 | if err := db.Open(); err != nil {
35 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for adding dirty block: %v", err))
36 | return err
37 | }
38 | defer func() {
39 | if err := db.Close(); err != nil {
40 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
41 | }
42 | }()
43 |
44 | block := DirtyBlock{
45 | BlockNumber: blockNum,
46 | TimeCreated: time.Now().UTC(),
47 | LastRetried: time.Now().UTC(),
48 | RetryCount: 1,
49 | AgentID: agentID,
50 | DiskPath: diskPath,
51 | }
52 |
53 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum)
54 | blockData, err := json.Marshal(block)
55 | if err != nil {
56 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to marshal dirty block data: %v", err))
57 | return err
58 | }
59 |
60 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Adding dirty block - Key: %s, Block: %+v", key, block))
61 | err = db.Insert(key, blockData)
62 | if err != nil {
63 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to insert dirty block into DB: %v", err))
64 | return err
65 | }
66 |
67 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully added dirty block - Key: %s", key))
68 | return nil
69 | }
70 |
71 | // GetDirtyBlocks retrieves all dirty blocks for a specific agent and disk path
72 | func GetDirtyBlocks(agentID, diskPath string) ([]DirtyBlock, error) {
73 | db := getDirtyBlockDBInstance()
74 | if err := db.Open(); err != nil {
75 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for getting dirty blocks: %v", err))
76 | return nil, err
77 | }
78 | defer func() {
79 | if err := db.Close(); err != nil {
80 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
81 | }
82 | }()
83 |
84 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Getting dirty blocks for agent: %s, disk: %s", agentID, diskPath))
85 |
86 | allBlocks, err := db.SelectAll(-1)
87 | if err != nil {
88 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to select all blocks from DB: %v", err))
89 | return nil, err
90 | }
91 |
92 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Found %d total blocks in DB", len(allBlocks)))
93 |
94 | var blocks []DirtyBlock
95 | prefix := fmt.Sprintf("%s_%s_", agentID, diskPath)
96 |
97 | for key, blockData := range allBlocks {
98 | // Unmarshal the key since BoltDB stores it as JSON
99 | var keyStr string
100 | if err := json.Unmarshal([]byte(key), &keyStr); err != nil {
101 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal key: %v", err))
102 | continue
103 | }
104 |
105 | // Check if the key starts with our prefix
106 | if strings.HasPrefix(keyStr, prefix) {
107 | var block DirtyBlock
108 | if err := json.Unmarshal(blockData, &block); err != nil {
109 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal block data: %v", err))
110 | continue
111 | }
112 | blocks = append(blocks, block)
113 | }
114 | }
115 |
116 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Found %d dirty blocks for agent: %s, disk: %s", len(blocks), agentID, diskPath))
117 | return blocks, nil
118 | }
119 |
120 | // RemoveBlock removes a specific dirty block from the database
121 | func RemoveBlock(agentID, diskPath string, blockNum int64) error {
122 | db := getDirtyBlockDBInstance()
123 | if err := db.Open(); err != nil {
124 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for removing dirty block: %v", err))
125 | return err
126 | }
127 | defer func() {
128 | if err := db.Close(); err != nil {
129 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
130 | }
131 | }()
132 |
133 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum)
134 | err := db.Delete(key)
135 | if err != nil {
136 | if err.Error() == "key not found" {
137 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block already removed - Key: %s", key))
138 | return nil
139 | }
140 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to remove block: %v", err))
141 | return err
142 | }
143 |
144 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully removed block - Key: %s", key))
145 | return nil
146 | }
147 |
148 | // UpdateBlockRetry updates the retry count and last retried time for a specific block
149 | func UpdateBlockRetry(agentID, diskPath string, blockNum int64) error {
150 | db := getDirtyBlockDBInstance()
151 | if err := db.Open(); err != nil {
152 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for updating retry count: %v", err))
153 | return err
154 | }
155 | defer func() {
156 | if err := db.Close(); err != nil {
157 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
158 | }
159 | }()
160 |
161 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum)
162 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Updating retry count for block - Key: %s", key))
163 |
164 | blockData, err := db.Get(key)
165 | if err != nil {
166 | if err.Error() == "key does not exists" {
167 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block not found for retry update - Key: %s", key))
168 | return nil // Not an error, block might have been removed
169 | }
170 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to get block for retry update: %v", err))
171 | return err
172 | }
173 |
174 | var block DirtyBlock
175 | if err := json.Unmarshal(blockData, &block); err != nil {
176 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to unmarshal block data for retry update: %v", err))
177 | return err
178 | }
179 |
180 | block.LastRetried = time.Now().UTC()
181 | block.RetryCount++
182 |
183 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Updating block retry count - Key: %s, New Count: %d", key, block.RetryCount))
184 |
185 | updatedBlockData, err := json.Marshal(block)
186 | if err != nil {
187 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to marshal updated block data: %v", err))
188 | return err
189 | }
190 |
191 | if err := db.Insert(key, updatedBlockData); err != nil {
192 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to save updated retry count: %v", err))
193 | return err
194 | }
195 |
196 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Successfully updated retry count - Key: %s", key))
197 | return nil
198 | }
199 |
200 | // IsBlockDirty checks if a specific block is marked as dirty
201 | func IsBlockDirty(agentID, diskPath string, blockNum int64) (bool, error) {
202 | db := getDirtyBlockDBInstance()
203 | if err := db.Open(); err != nil {
204 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for checking dirty block: %v", err))
205 | return false, err
206 | }
207 | defer func() {
208 | if err := db.Close(); err != nil {
209 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
210 | }
211 | }()
212 |
213 | key := fmt.Sprintf("%s_%s_%d", agentID, diskPath, blockNum)
214 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Checking if block is dirty - Key: %s", key))
215 |
216 | _, err := db.Get(key)
217 | if err != nil {
218 | if strings.Contains(err.Error(), "does not exists") {
219 | // This is an expected case for most blocks
220 | return false, nil
221 | }
222 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Error checking dirty block: %v", err))
223 | return false, err
224 | }
225 |
226 | utils.LogDebug(fmt.Sprintf("DirtyBlock DB: Block found in dirty list - Key: %s", key))
227 | return true, nil
228 | }
229 |
230 | // GetAllDirtyBlocks retrieves all dirty blocks from the database
231 | func GetAllDirtyBlocks() ([]DirtyBlock, error) {
232 | db := getDirtyBlockDBInstance()
233 | if err := db.Open(); err != nil {
234 | utils.LogError(fmt.Sprintf("DirtyBlock DB: Failed to open DB for getting all dirty blocks: %v", err))
235 | return nil, err
236 | }
237 | defer func() {
238 | if err := db.Close(); err != nil {
239 | utils.LogError("DirtyBlock DB: unable to close DB: " + err.Error())
240 | }
241 | }()
242 |
243 | allBlocks, err := db.SelectAll(-1)
244 | if err != nil {
245 | return nil, err
246 | }
247 |
248 | var blocks []DirtyBlock
249 | for _, blockData := range allBlocks {
250 | var block DirtyBlock
251 | if err := json.Unmarshal(blockData, &block); err != nil {
252 | continue
253 | }
254 | blocks = append(blocks, block)
255 | }
256 |
257 | return blocks, nil
258 | }
259 |
--------------------------------------------------------------------------------
/storage/boltdb/boltdb.go:
--------------------------------------------------------------------------------
1 | package boltdb
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/xmigrate/blxrep/storage"
8 | "github.com/xmigrate/blxrep/utils"
9 |
10 | bolt "go.etcd.io/bbolt"
11 | )
12 |
13 | type BoltDB struct {
14 | DB *bolt.DB
15 | Path string
16 | }
17 |
18 | func New(path string) storage.Service {
19 | return &BoltDB{Path: path}
20 | }
21 |
22 | func (db *BoltDB) Open() error {
23 |
24 | dbInstance, err := bolt.Open(db.Path, 0600, nil)
25 | if err != nil {
26 | utils.LogError("unable to open db for path: " + db.Path)
27 | return err
28 | }
29 |
30 | db.DB = dbInstance
31 |
32 | err = db.DB.Update(func(tx *bolt.Tx) error {
33 | _, err := tx.CreateBucketIfNotExists([]byte("default"))
34 | if err != nil {
35 | return err
36 | }
37 | return nil
38 | })
39 |
40 | if err != nil {
41 | utils.LogError("unable to create 'default' bucket: " + err.Error())
42 | return err
43 | }
44 |
45 | return nil
46 | }
47 |
48 | func (db *BoltDB) Close() error { return db.DB.Close() }
49 |
50 | func (db *BoltDB) SelectAll(limit int) (map[string][]byte, error) {
51 | if limit == 0 {
52 | limit = 10
53 | }
54 |
55 | blocks := make(map[string][]byte, 0)
56 |
57 | err := db.DB.View(func(tx *bolt.Tx) error {
58 | b := tx.Bucket([]byte("default"))
59 | if b == nil {
60 | return fmt.Errorf("Bucket %q not found!", "default")
61 | }
62 |
63 | c := b.Cursor()
64 | for k, v := c.First(); k != nil; k, v = c.Next() {
65 | if limit != -1 && len(blocks) >= limit {
66 | break
67 | }
68 |
69 | blocks[string(k)] = v
70 |
71 | }
72 |
73 | return nil
74 | })
75 |
76 | return blocks, err
77 | }
78 |
79 | func (db *BoltDB) Insert(id string, data []byte) error {
80 |
81 | return db.DB.Update(func(tx *bolt.Tx) error {
82 | b, err := tx.CreateBucketIfNotExists([]byte("default"))
83 | if err != nil {
84 | return err
85 | }
86 |
87 | key, err := json.Marshal(id)
88 | if err != nil {
89 | return err
90 | }
91 |
92 | return b.Put(key, data)
93 | })
94 | }
95 |
96 | func (db *BoltDB) Get(agentId string) ([]byte, error) {
97 |
98 | var agent []byte
99 |
100 | err := db.DB.View(func(tx *bolt.Tx) error {
101 | // Retrieve the bucket (assumes it exists)
102 | bucket := tx.Bucket([]byte("default"))
103 |
104 | key, err := json.Marshal(agentId)
105 | if err != nil {
106 | return err
107 | }
108 |
109 | // Check if the key exists
110 | if value := bucket.Get(key); value != nil {
111 | // Key exists, print the value
112 |
113 | agent = value
114 | } else {
115 | // Key does not exist
116 | return fmt.Errorf("key %s does not exists", agentId)
117 | }
118 |
119 | return nil
120 | })
121 |
122 | return agent, err
123 | }
124 |
125 | func (db *BoltDB) GetKeyCount() (uint64, error) {
126 | var keyCount uint64
127 |
128 | err := db.DB.View(func(tx *bolt.Tx) error {
129 | b := tx.Bucket([]byte("default"))
130 | if b == nil {
131 | return fmt.Errorf("Bucket %s not found!", "default")
132 | }
133 |
134 | c := b.Cursor()
135 | for k, _ := c.First(); k != nil; k, _ = c.Next() {
136 | keyCount++
137 | }
138 |
139 | return nil
140 | })
141 |
142 | if err != nil {
143 | return 0, err
144 | }
145 |
146 | return keyCount, nil
147 | }
148 |
149 | func (db *BoltDB) Delete(agentId string) error {
150 | return db.DB.Update(func(tx *bolt.Tx) error {
151 | b := tx.Bucket([]byte("default"))
152 | if b == nil {
153 | return fmt.Errorf("Bucket %s not found!", "default")
154 | }
155 |
156 | key, err := json.Marshal(agentId)
157 | if err != nil {
158 | return err
159 | }
160 |
161 | return b.Delete(key)
162 | })
163 | }
164 |
--------------------------------------------------------------------------------
/storage/storage.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | type Storage struct {
4 | }
5 |
6 | type Service interface {
7 | Open() error
8 | Close() error
9 | SelectAll(limit int) (map[string][]byte, error)
10 | Insert(string, []byte) error
11 | Get(string) ([]byte, error)
12 | GetKeyCount() (uint64, error)
13 | Delete(string) error
14 | }
15 |
--------------------------------------------------------------------------------
/tui/actions.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 | "sort"
6 | "strings"
7 | "time"
8 |
9 | "github.com/xmigrate/blxrep/pkg/dispatcher"
10 | "github.com/xmigrate/blxrep/service"
11 |
12 | "github.com/gdamore/tcell/v2"
13 | "github.com/rivo/tview"
14 | )
15 |
16 | func (t *DispatcherTUI) showActions() {
17 | t.viewState = viewActions
18 | t.updateInfoBar([]string{
19 | "[green][white] Pause",
20 | "[green][white] Resume",
21 | "[green][white] Quit",
22 | "[green][white] Back",
23 | })
24 | // Create a new table for actions
25 | actionsTable := tview.NewTable().
26 | SetBorders(false).
27 | SetSelectable(true, false)
28 |
29 | actionsTable.SetTitle("").
30 | SetBorder(true).
31 | SetTitleColor(tcell.ColorPurple).
32 | SetBorderColor(tcell.ColorYellowGreen).SetBorderColor(tcell.ColorGreen)
33 |
34 | // Set up table headers
35 | actionsTable.SetCell(0, 0, tview.NewTableCell("Agent ID").SetTextColor(tcell.ColorYellow).SetSelectable(false))
36 | actionsTable.SetCell(0, 1, tview.NewTableCell("Action ID").SetTextColor(tcell.ColorYellow).SetSelectable(false))
37 | actionsTable.SetCell(0, 2, tview.NewTableCell("Action").SetTextColor(tcell.ColorYellow).SetSelectable(false))
38 | actionsTable.SetCell(0, 3, tview.NewTableCell("Status").SetTextColor(tcell.ColorYellow).SetSelectable(false))
39 | actionsTable.SetCell(0, 4, tview.NewTableCell("Type").SetTextColor(tcell.ColorYellow).SetSelectable(false))
40 | actionsTable.SetCell(0, 5, tview.NewTableCell("Created").SetTextColor(tcell.ColorYellow).SetSelectable(false))
41 | actionsTable.SetCell(0, 6, tview.NewTableCell("Progress").SetTextColor(tcell.ColorYellow).SetSelectable(false))
42 |
43 | t.content.Clear()
44 | t.content.AddItem(actionsTable, 0, 1, true)
45 | t.table = actionsTable
46 | t.app.SetFocus(actionsTable)
47 |
48 | // Start a goroutine to update the actions periodically
49 | go t.updateActionsPeriodicallly(actionsTable)
50 | }
51 |
52 | func (t *DispatcherTUI) updateActionsPeriodicallly(actionsTable *tview.Table) {
53 | ticker := time.NewTicker(1 * time.Second)
54 | defer ticker.Stop()
55 |
56 | for {
57 | select {
58 | case <-ticker.C:
59 | t.app.QueueUpdateDraw(func() {
60 | t.updateActionsTable(actionsTable)
61 | })
62 | }
63 | }
64 | }
65 |
66 | func (t *DispatcherTUI) updateActionsTable(actionsTable *tview.Table) {
67 | // t.tableMutex.Lock()
68 | // defer t.tableMutex.Unlock()
69 |
70 | actions, err := service.GetAllActions(100)
71 | if err != nil {
72 | t.showError(fmt.Sprintf("Error fetching actions: %v", err))
73 | return
74 | }
75 |
76 | // Sort actions by TimeStarted in descending order (most recent first)
77 | sort.Slice(actions, func(i, j int) bool {
78 | return actions[i].TimeStarted > actions[j].TimeStarted
79 | })
80 |
81 | // Update or add rows for each action
82 | for i, action := range actions {
83 | row := i + 1 // +1 because row 0 is the header
84 |
85 | // Ensure we have enough rows
86 | for r := actionsTable.GetRowCount(); r <= row; r++ {
87 | actionsTable.SetCell(r, 0, tview.NewTableCell(""))
88 | actionsTable.SetCell(r, 1, tview.NewTableCell(""))
89 | actionsTable.SetCell(r, 2, tview.NewTableCell(""))
90 | actionsTable.SetCell(r, 3, tview.NewTableCell(""))
91 | actionsTable.SetCell(r, 4, tview.NewTableCell(""))
92 | actionsTable.SetCell(r, 5, tview.NewTableCell(""))
93 | actionsTable.SetCell(r, 6, tview.NewTableCell(""))
94 | }
95 |
96 | actionsTable.GetCell(row, 0).SetText(action.AgentId)
97 | actionsTable.GetCell(row, 1).SetText(action.Id)
98 | actionsTable.GetCell(row, 2).SetText(action.Action)
99 | actionsTable.GetCell(row, 3).SetText(action.ActionStatus)
100 | actionsTable.GetCell(row, 4).SetText(action.ActionType)
101 | actionsTable.GetCell(row, 5).SetText(time.Unix(action.TimeStarted, 0).Format("2006-01-02 15:04:05"))
102 | progressBar := t.createProgressBar(action.ActionProgress, 20) // 20 is the width of the progress bar
103 |
104 | actionsTable.GetCell(row, 6).SetText(progressBar)
105 |
106 | }
107 |
108 | // Clear any extra rows
109 | for row := len(actions) + 1; row < actionsTable.GetRowCount(); row++ {
110 | for col := 0; col < actionsTable.GetColumnCount(); col++ {
111 | actionsTable.GetCell(row, col).SetText("")
112 | }
113 | }
114 |
115 | if len(actions) == 0 {
116 | actionsTable.GetCell(1, 0).SetText("No actions in progress").SetTextColor(tcell.ColorRed)
117 | }
118 | }
119 |
120 | func (t *DispatcherTUI) createProgressBar(progress int, width int) string {
121 | if progress < 0 {
122 | progress = 0
123 | }
124 | if progress > 100 {
125 | progress = 100
126 | }
127 |
128 | completed := int(float64(width) * float64(progress) / 100.0)
129 | remaining := width - completed
130 |
131 | bar := "["
132 | bar += strings.Repeat("[green]█[white]", completed)
133 | if remaining > 0 {
134 | bar += strings.Repeat("[green]░[white]", remaining)
135 | }
136 | bar += "]"
137 |
138 | return fmt.Sprintf("%s %3d%%", bar, progress)
139 | }
140 |
141 | func (t *DispatcherTUI) pauseSelectedAction(agentID string, actionID string, actionStatus string) {
142 | // Debug: Print the action details
143 | t.showMessage(fmt.Sprintf("Debug: AgentID: '%s', ActionID: '%s', Status: '%s'", agentID, actionID, actionStatus))
144 |
145 | if agentID == "" || actionID == "" {
146 | t.showError("Error: AgentID or ActionID is empty")
147 | return
148 | }
149 |
150 | if actionStatus == "" {
151 | t.showError("Error: Action status is empty")
152 | return
153 | }
154 |
155 | if strings.ToLower(actionStatus) != "in progress" {
156 | t.showMessage(fmt.Sprintf("Only actions in progress can be paused. Current status: %s", actionStatus))
157 | return
158 | }
159 |
160 | // Send pause message to the agent
161 | err := dispatcher.PauseAction(actionID, agentID)
162 | if err != nil {
163 | t.showError(fmt.Sprintf("Failed to pause action: %v", err))
164 | return
165 | }
166 |
167 | t.showMessage("Action paused successfully")
168 | }
169 |
170 | func (t *DispatcherTUI) resumeSelectedAction(agentID string, actionID string, actionStatus string) {
171 | // Debug: Print the action details
172 | t.showMessage(fmt.Sprintf("Debug: AgentID: '%s', ActionID: '%s', Status: '%s'", agentID, actionID, actionStatus))
173 |
174 | if agentID == "" || actionID == "" {
175 | t.showError("Error: AgentID or ActionID is empty")
176 | return
177 | }
178 |
179 | if actionStatus == "" {
180 | t.showError("Error: Action status is empty")
181 | return
182 | }
183 |
184 | if strings.ToLower(actionStatus) != "paused" {
185 | t.showMessage(fmt.Sprintf("Only paused actions can be resumed. Current status: %s", actionStatus))
186 | return
187 | }
188 |
189 | // Send resume message to the agent
190 | err := dispatcher.ResumeAction(actionID, agentID)
191 | if err != nil {
192 | t.showError(fmt.Sprintf("Failed to resume action: %v", err))
193 | return
194 | }
195 |
196 | t.showMessage("Action resumed successfully")
197 | }
198 |
--------------------------------------------------------------------------------
/tui/agents.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/gdamore/tcell/v2"
7 | "github.com/rivo/tview"
8 | "github.com/xmigrate/blxrep/service"
9 | )
10 |
11 | func (t *DispatcherTUI) showAgents() {
12 | t.viewState = viewAgents
13 | t.updateInfoBar([]string{
14 | "[green][white] Actions",
15 | "[green][white] Browse",
16 | "[green][white] Quit",
17 | })
18 | var err error
19 | t.agents, err = service.GetConnectedAgentsMap()
20 |
21 | if err != nil {
22 | t.showError(fmt.Sprintf("Error fetching connected agents: %v", err))
23 | return
24 | }
25 |
26 | agentsTable := tview.NewTable().
27 | SetBorders(false).
28 | SetSelectable(true, false)
29 |
30 | agentsTable.SetTitle("").
31 | SetBorder(true).SetTitleColor(tcell.ColorPurple).SetBorderColor(tcell.ColorGreen)
32 |
33 | agentsTable.SetCell(0, 0, tview.NewTableCell("Agent ID").SetTextColor(tcell.ColorYellow).SetSelectable(false))
34 | agentsTable.SetCell(0, 1, tview.NewTableCell("Status").SetTextColor(tcell.ColorYellow).SetSelectable(false))
35 |
36 | row := 1
37 | for id, agent := range t.agents {
38 | agentsTable.SetCell(row, 0, tview.NewTableCell(id))
39 | status := "Disconnected"
40 | if agent.Connected {
41 | status = "Connected"
42 | }
43 | agentsTable.SetCell(row, 1, tview.NewTableCell(status))
44 | row++
45 | }
46 |
47 | if len(t.agents) == 0 {
48 | agentsTable.SetCell(1, 0, tview.NewTableCell("No connected agents found").SetTextColor(tcell.ColorRed))
49 | }
50 |
51 | agentsTable.Select(1, 0).SetFixed(1, 0)
52 |
53 | t.content.Clear()
54 | t.content.AddItem(agentsTable, 0, 1, true)
55 | t.table = agentsTable
56 | t.app.SetFocus(agentsTable)
57 | }
58 |
59 | func (t *DispatcherTUI) showCheckpointsForSelectedAgent() {
60 | if t.table == nil {
61 | t.showError("No agent table available")
62 | return
63 | }
64 |
65 | row, _ := t.table.GetSelection()
66 | if row == 0 {
67 | t.showError("Please select an agent")
68 | return
69 | }
70 |
71 | agentID := t.table.GetCell(row, 0).Text
72 | t.currentAgentID = agentID
73 | t.showDisks(agentID)
74 | }
75 |
--------------------------------------------------------------------------------
/tui/checkpoints.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/gdamore/tcell/v2"
8 | "github.com/rivo/tview"
9 | "github.com/xmigrate/blxrep/pkg/dispatcher"
10 | "github.com/xmigrate/blxrep/utils"
11 | )
12 |
13 | func (t *DispatcherTUI) showCheckpoints(agentID string, disk string) {
14 | checkpoints, err := dispatcher.ShowCheckpoints("", "", agentID, t.dataDir, disk)
15 | if err != nil {
16 | t.showError(fmt.Sprintf("Error fetching checkpoints: %v", err))
17 | return
18 | }
19 |
20 | t.viewState = viewCheckpoints
21 | t.updateInfoBar([]string{
22 | "[green][white] Select",
23 | "[green][white] Back",
24 | "[green][white] Quit",
25 | })
26 | t.currentAgentID = agentID
27 |
28 | t.table.Clear()
29 | t.table.SetTitle(fmt.Sprintf("", agentID))
30 |
31 | t.table.SetCell(0, 0, tview.NewTableCell("Timestamp").SetTextColor(tcell.ColorYellow).SetSelectable(false))
32 | t.table.SetCell(0, 1, tview.NewTableCell("Filename").SetTextColor(tcell.ColorYellow).SetSelectable(false))
33 |
34 | for i, cp := range checkpoints {
35 | t.table.SetCell(i+1, 0, tview.NewTableCell(cp.Timestamp.Format("2006-01-02 15:04:05")))
36 | t.table.SetCell(i+1, 1, tview.NewTableCell(cp.Filename))
37 | }
38 |
39 | if len(checkpoints) == 0 {
40 | t.table.SetCell(1, 0, tview.NewTableCell("No checkpoints found").SetTextColor(tcell.ColorRed))
41 | }
42 |
43 | t.table.Select(1, 0).SetFixed(1, 0)
44 | t.app.SetFocus(t.table)
45 | }
46 |
47 | func (t *DispatcherTUI) selectCheckpoint() {
48 | row, _ := t.table.GetSelection()
49 | if row > 0 && row <= t.table.GetRowCount() {
50 | t.selectedCheckpoint = &utils.Checkpoint{
51 | Filename: t.table.GetCell(row, 1).Text,
52 | Timestamp: t.parseTimestamp(t.table.GetCell(row, 0).Text),
53 | }
54 | t.showCheckpointOptions()
55 | }
56 | }
57 |
58 | func (t *DispatcherTUI) showCheckpointOptions() {
59 | t.viewState = viewCheckpointOptions
60 |
61 | t.table.Clear()
62 | t.table.SetBorders(false)
63 |
64 | t.table.SetTitle(fmt.Sprintf("", t.selectedCheckpoint.Filename))
65 |
66 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false))
67 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false))
68 |
69 | t.table.SetCell(1, 0, tview.NewTableCell("Restore").SetTextColor(tcell.ColorWhite))
70 | t.table.SetCell(1, 1, tview.NewTableCell("Restore this checkpoint"))
71 |
72 | t.table.SetCell(2, 0, tview.NewTableCell("Browse").SetTextColor(tcell.ColorWhite))
73 | t.table.SetCell(2, 1, tview.NewTableCell("Browse files in this checkpoint"))
74 |
75 | t.table.Select(1, 0).SetFixed(1, 0)
76 |
77 | t.content.Clear()
78 | t.content.AddItem(t.table, 0, 1, true)
79 | t.app.SetFocus(t.table)
80 | }
81 |
82 | func (t *DispatcherTUI) selectOption() {
83 | row, _ := t.table.GetSelection()
84 | switch row {
85 | case 1:
86 | t.restoreCheckpoint()
87 | case 2:
88 | t.browseCheckpoint()
89 | }
90 | }
91 |
92 | func (t *DispatcherTUI) restoreCheckpoint() {
93 | t.viewState = viewRestoreOptions
94 |
95 | t.table.Clear()
96 | t.table.SetBorders(false)
97 |
98 | t.table.SetTitle(fmt.Sprintf("", t.selectedCheckpoint.Filename))
99 |
100 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false))
101 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false))
102 |
103 | t.table.SetCell(1, 0, tview.NewTableCell("Restore Partition").SetTextColor(tcell.ColorWhite))
104 | t.table.SetCell(1, 1, tview.NewTableCell("Restore a specific partition from the checkpoint"))
105 |
106 | t.table.SetCell(2, 0, tview.NewTableCell("Restore Disk").SetTextColor(tcell.ColorWhite))
107 | t.table.SetCell(2, 1, tview.NewTableCell("Restore the entire disk from the checkpoint"))
108 |
109 | t.table.Select(1, 0).SetFixed(1, 0)
110 |
111 | t.content.Clear()
112 | t.content.AddItem(t.table, 0, 1, true)
113 | t.app.SetFocus(t.table)
114 |
115 | }
116 |
117 | func (t *DispatcherTUI) parseTimestamp(timeStr string) time.Time {
118 | timestamp, err := time.Parse("2006-01-02 15:04:05", timeStr)
119 | if err != nil {
120 | // Handle the error, maybe log it or use a default time
121 | return time.Now()
122 | }
123 | return timestamp
124 | }
125 |
--------------------------------------------------------------------------------
/tui/disks.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/gdamore/tcell/v2"
7 | "github.com/rivo/tview"
8 | "github.com/xmigrate/blxrep/pkg/dispatcher"
9 | )
10 |
11 | func (t *DispatcherTUI) showDisks(agentID string) {
12 | disks, err := dispatcher.ShowDisks(agentID, t.dataDir)
13 | if err != nil {
14 | t.showError(fmt.Sprintf("Error fetching disks: %v", err))
15 | return
16 | }
17 |
18 | t.viewState = viewDisks
19 | t.updateInfoBar([]string{
20 | "[green][white] Select",
21 | "[green][white] Back",
22 | "[green][white] Quit",
23 | })
24 | t.currentAgentID = agentID
25 |
26 | t.table.Clear()
27 | t.table.SetTitle(fmt.Sprintf("", agentID))
28 |
29 | t.table.SetCell(0, 0, tview.NewTableCell("Disk").SetTextColor(tcell.ColorYellow).SetSelectable(false))
30 |
31 | for i, disk := range disks {
32 | t.table.SetCell(i+1, 0, tview.NewTableCell(disk))
33 | }
34 |
35 | if len(disks) == 0 {
36 | t.table.SetCell(1, 0, tview.NewTableCell("No disks found").SetTextColor(tcell.ColorRed))
37 | }
38 |
39 | t.table.Select(1, 0).SetFixed(1, 0)
40 | t.app.SetFocus(t.table)
41 | }
42 |
43 | func (t *DispatcherTUI) selectDisk() {
44 | row, _ := t.table.GetSelection()
45 | if row > 0 && row <= t.table.GetRowCount() {
46 | t.selectedDisk = t.table.GetCell(row, 0).Text
47 | t.showCheckpoints(t.currentAgentID, t.selectedDisk)
48 | }
49 | }
50 |
51 | func (t *DispatcherTUI) showDiskOptions() {
52 | t.viewState = viewDiskOptions
53 |
54 | t.table.Clear()
55 | t.table.SetBorders(false)
56 |
57 | t.table.SetTitle(fmt.Sprintf("", t.selectedDisk))
58 |
59 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false))
60 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false))
61 |
62 | t.table.SetCell(1, 0, tview.NewTableCell("Restore").SetTextColor(tcell.ColorWhite))
63 | t.table.SetCell(1, 1, tview.NewTableCell("Restore this disk"))
64 |
65 | t.table.SetCell(2, 0, tview.NewTableCell("Browse").SetTextColor(tcell.ColorWhite))
66 | t.table.SetCell(2, 1, tview.NewTableCell("Browse files in this disk"))
67 |
68 | t.table.Select(1, 0).SetFixed(1, 0)
69 |
70 | t.content.Clear()
71 | t.content.AddItem(t.table, 0, 1, true)
72 | t.app.SetFocus(t.table)
73 | }
74 |
75 | func (t *DispatcherTUI) selectDisks() {
76 | row, _ := t.table.GetSelection()
77 | switch row {
78 | case 1:
79 | t.restoreDiskOptions()
80 | case 2:
81 | t.showCheckpoints(t.currentAgentID, t.selectedDisk)
82 | }
83 | }
84 |
85 | func (t *DispatcherTUI) restoreDiskOptions() {
86 | t.viewState = viewRestoreOptions
87 |
88 | t.table.Clear()
89 | t.table.SetBorders(false)
90 |
91 | t.table.SetTitle(fmt.Sprintf("", t.selectedDisk))
92 |
93 | t.table.SetCell(0, 0, tview.NewTableCell("Option").SetTextColor(tcell.ColorYellow).SetSelectable(false))
94 | t.table.SetCell(0, 1, tview.NewTableCell("Description").SetTextColor(tcell.ColorYellow).SetSelectable(false))
95 |
96 | t.table.SetCell(1, 0, tview.NewTableCell("Restore Partition").SetTextColor(tcell.ColorWhite))
97 | t.table.SetCell(1, 1, tview.NewTableCell("Restore a specific partition from the disk"))
98 |
99 | t.table.SetCell(2, 0, tview.NewTableCell("Restore Disk").SetTextColor(tcell.ColorWhite))
100 | t.table.SetCell(2, 1, tview.NewTableCell("Restore the entire disk from the disk"))
101 |
102 | t.table.Select(1, 0).SetFixed(1, 0)
103 |
104 | t.content.Clear()
105 | t.content.AddItem(t.table, 0, 1, true)
106 | t.app.SetFocus(t.table)
107 |
108 | }
109 |
--------------------------------------------------------------------------------
/tui/dispatcher.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "os/exec"
7 | "path/filepath"
8 | "strings"
9 | "sync"
10 |
11 | "github.com/xmigrate/blxrep/utils"
12 |
13 | "github.com/gdamore/tcell/v2"
14 | "github.com/rivo/tview"
15 | )
16 |
17 | type DispatcherTUI struct {
18 | app *tview.Application
19 | mainFlex *tview.Flex
20 | infoBarLeft *tview.TextView
21 | infoBarRight *tview.TextView
22 | content *tview.Flex
23 | cmdInput *tview.InputField
24 | dataDir string
25 | agents map[string]utils.Agent
26 | viewState viewState
27 | table *tview.Table
28 | selectedCheckpoint *utils.Checkpoint
29 | selectedDisk string
30 | currentAgentID string
31 | currentDir string
32 | partitions []Partition
33 | loopDev string
34 | mountDir string
35 | isRestoreFormActive bool
36 | tableMutex sync.RWMutex
37 | }
38 |
39 | type viewState int
40 |
41 | const (
42 | viewAgents viewState = iota
43 | viewCheckpoints
44 | viewCheckpointOptions
45 | viewPartitions
46 | viewFileBrowser
47 | viewRestoreOptions
48 | viewActions
49 | viewDisks
50 | viewDiskOptions
51 | )
52 |
53 | func RunDispatcherTUI(dataDir string) {
54 | utils.AppConfiguration.DataDir = dataDir
55 | tui := &DispatcherTUI{
56 | app: tview.NewApplication(),
57 | dataDir: dataDir,
58 | viewState: viewAgents,
59 | }
60 |
61 | tui.setup()
62 |
63 | if err := tui.app.Run(); err != nil {
64 | panic(err)
65 | }
66 | }
67 |
68 | func (t *DispatcherTUI) setup() {
69 | dataDir := fmt.Sprintf("Data Dir: %s", t.dataDir)
70 | banner := utils.GetDiskBanner()
71 | infoText := fmt.Sprintf("%s \n %s", banner, dataDir)
72 |
73 | t.infoBarLeft = tview.NewTextView().
74 | SetTextColor(tcell.ColorPurple).
75 | SetDynamicColors(true).
76 | SetRegions(true).
77 | SetWrap(false).
78 | SetText(infoText)
79 | t.infoBarLeft.SetBorder(false)
80 |
81 | t.infoBarRight = tview.NewTextView().
82 | SetDynamicColors(true).
83 | SetRegions(true).
84 | SetWrap(false).
85 | SetTextAlign(tview.AlignLeft)
86 | t.infoBarRight.SetBorder(false)
87 | infoBarFlex := tview.NewFlex().
88 | AddItem(t.infoBarLeft, 0, 2, false).
89 | AddItem(t.infoBarRight, 0, 1, false)
90 |
91 | t.content = tview.NewFlex().SetDirection(tview.FlexColumn)
92 |
93 | t.cmdInput = tview.NewInputField().
94 | SetLabel(" Command: ").
95 | SetFieldWidth(0).
96 | SetDoneFunc(t.handleCommand).SetFieldBackgroundColor(tcell.ColorBlack).SetLabelColor(tcell.ColorWhite)
97 | // Calculate height based on banner lines plus data dir line
98 | bannerLines := len(strings.Split(banner, "\n"))
99 | totalHeight := bannerLines + 2 // +2 for data dir line and padding
100 |
101 | t.mainFlex = tview.NewFlex().
102 | SetDirection(tview.FlexRow).
103 | AddItem(infoBarFlex, totalHeight, 1, false). // Dynamic height based on content
104 | AddItem(t.content, 0, 1, true)
105 |
106 | t.app.SetRoot(t.mainFlex, true)
107 |
108 | t.app.SetInputCapture(t.globalInputHandler)
109 |
110 | t.showAgents()
111 | }
112 |
113 | func (t *DispatcherTUI) updateInfoBar(shortcuts []string) {
114 | banner := utils.GetDiskBanner()
115 | dataDir := fmt.Sprintf("[orange]Data Dir:[white] %s", t.dataDir)
116 |
117 | // Update left column
118 | leftText := fmt.Sprintf("%s\n%s", banner, dataDir)
119 | t.infoBarLeft.SetText(leftText)
120 |
121 | // Update right column
122 | rightText := "\n\n\n" + strings.Join(shortcuts, "\n")
123 | t.infoBarRight.SetText(rightText)
124 | }
125 |
126 | func (t *DispatcherTUI) globalInputHandler(event *tcell.EventKey) *tcell.EventKey {
127 | if t.isRestoreFormActive {
128 | return event
129 | }
130 | switch event.Key() {
131 | case tcell.KeyRune:
132 | switch event.Rune() {
133 | case ':':
134 | t.showCommandInput()
135 | return nil
136 | case 'q', 'Q':
137 | t.app.Stop()
138 | return nil
139 |
140 | case 'a', 'A':
141 | if t.viewState == viewAgents {
142 | t.showActions()
143 | return nil
144 | }
145 | case 'p', 'P':
146 | if t.viewState == viewActions {
147 | row, _ := t.table.GetSelection()
148 | agentID := t.table.GetCell(row, 0).Text
149 | actionID := t.table.GetCell(row, 1).Text
150 | actionStatus := t.table.GetCell(row, 3).Text
151 | t.pauseSelectedAction(agentID, actionID, actionStatus)
152 | return nil
153 | }
154 | case 'r', 'R':
155 | if t.viewState == viewActions {
156 | row, _ := t.table.GetSelection()
157 | agentID := t.table.GetCell(row, 0).Text
158 | actionID := t.table.GetCell(row, 1).Text
159 | actionStatus := t.table.GetCell(row, 3).Text
160 | t.resumeSelectedAction(agentID, actionID, actionStatus)
161 | return nil
162 | }
163 | }
164 | case tcell.KeyEscape:
165 | switch t.viewState {
166 | case viewActions:
167 | t.showAgents()
168 | return nil
169 | case viewCheckpoints:
170 | t.showDisks(t.currentAgentID)
171 | return nil
172 | case viewDisks:
173 | t.showAgents()
174 | return nil
175 | case viewDiskOptions:
176 | t.showDisks(t.currentAgentID)
177 | return nil
178 | case viewCheckpointOptions:
179 | t.showCheckpoints(t.currentAgentID, t.selectedDisk)
180 | return nil
181 | case viewPartitions:
182 | exec.Command("losetup", "-d", t.loopDev).Run()
183 | t.showCheckpointOptions()
184 | return nil
185 | case viewFileBrowser:
186 | // Go back to partition selection when in file browser
187 | exec.Command("umount", t.mountDir).Run()
188 | t.selectPartitionTUI()
189 | return nil
190 | case viewRestoreOptions:
191 | t.showCheckpointOptions()
192 | return nil
193 |
194 | }
195 | case tcell.KeyEnter:
196 | switch t.viewState {
197 | case viewAgents:
198 | t.showCheckpointsForSelectedAgent()
199 | return nil
200 | case viewCheckpoints:
201 | t.selectCheckpoint()
202 | return nil
203 | case viewDisks:
204 | t.selectDisk()
205 | return nil
206 | case viewDiskOptions:
207 | t.selectOption()
208 | return nil
209 | case viewCheckpointOptions:
210 | t.selectOption()
211 | return nil
212 | case viewPartitions:
213 | row, _ := t.table.GetSelection()
214 | if row > 0 && row <= len(t.partitions) {
215 | selectedPartition := t.partitions[row-1]
216 | t.mountSelectedPartition(selectedPartition)
217 | }
218 | return nil
219 | case viewFileBrowser:
220 | table := t.content.GetItem(0).(*tview.Table)
221 | row, _ := table.GetSelection()
222 | if row == 1 {
223 | // Go to parent directory
224 | parentDir := filepath.Dir(t.currentDir)
225 | if parentDir != t.currentDir {
226 | t.updateFileTable(table, parentDir)
227 | t.currentDir = parentDir
228 | }
229 | } else if row > 1 {
230 | cellContent := table.GetCell(row, 0).Text
231 | fileName := strings.TrimPrefix(cellContent, "[::b]") // Remove bold formatting if present
232 | filePath := filepath.Join(t.currentDir, fileName)
233 | fileInfo, err := os.Stat(filePath)
234 | if err != nil {
235 | t.showError(fmt.Sprintf("Error accessing file: %v", err))
236 | return nil
237 | }
238 | if fileInfo.IsDir() {
239 | t.updateFileTable(table, filePath)
240 | t.currentDir = filePath
241 | } else {
242 | // You can add file viewing functionality here if needed
243 | t.showMessage(fmt.Sprintf("Selected file: %s", fileName))
244 | }
245 | }
246 | return nil
247 | case viewRestoreOptions:
248 | row, _ := t.table.GetSelection()
249 | switch row {
250 | case 1:
251 | t.restorePartition()
252 | case 2:
253 | t.restoreDisk()
254 | }
255 | return nil
256 | }
257 | }
258 | return event
259 | }
260 |
261 | func (t *DispatcherTUI) restoreDisk() {
262 | // Implement full disk restoration logic here
263 | t.showMessage("Restoring full disk... (Not yet implemented)")
264 | }
265 |
266 | func (t *DispatcherTUI) showCommandInput() {
267 | t.mainFlex.RemoveItem(t.content)
268 | t.mainFlex.AddItem(t.cmdInput, 1, 1, true)
269 | t.mainFlex.AddItem(t.content, 0, 1, false)
270 | t.app.SetFocus(t.cmdInput)
271 | }
272 |
273 | func (t *DispatcherTUI) hideCommandInput() {
274 | t.mainFlex.RemoveItem(t.cmdInput)
275 | t.mainFlex.RemoveItem(t.content)
276 | t.mainFlex.AddItem(t.content, 0, 1, true)
277 | t.app.SetFocus(t.content)
278 | }
279 |
280 | func (t *DispatcherTUI) handleCommand(key tcell.Key) {
281 | if key != tcell.KeyEnter {
282 | return
283 | }
284 |
285 | cmd := strings.TrimSpace(t.cmdInput.GetText())
286 | t.cmdInput.SetText("")
287 | t.hideCommandInput()
288 |
289 | switch cmd {
290 | case "refresh":
291 | t.showAgents()
292 | default:
293 | t.showError(fmt.Sprintf("Unknown command: %s", cmd))
294 | }
295 | }
296 |
297 | func (t *DispatcherTUI) showError(message string) {
298 | t.table.Clear()
299 | t.table.SetBorders(false)
300 | t.table.SetTitle("Error")
301 |
302 | // Split the message into words
303 | words := strings.Fields(message)
304 | lines := []string{}
305 | currentLine := ""
306 |
307 | // Create lines with a maximum width of 80 characters
308 | for _, word := range words {
309 | if len(currentLine)+len(word)+1 > 80 {
310 | lines = append(lines, strings.TrimSpace(currentLine))
311 | currentLine = word
312 | } else {
313 | if currentLine != "" {
314 | currentLine += " "
315 | }
316 | currentLine += word
317 | }
318 | }
319 | if currentLine != "" {
320 | lines = append(lines, strings.TrimSpace(currentLine))
321 | }
322 |
323 | // Add each line to the table
324 | for i, line := range lines {
325 | t.table.SetCell(i, 0, tview.NewTableCell(line).SetTextColor(tcell.ColorRed))
326 | }
327 |
328 | t.app.SetFocus(t.table)
329 | }
330 |
331 | func (t *DispatcherTUI) showMessage(message string) {
332 | modal := tview.NewModal().
333 | SetText(message).
334 | AddButtons([]string{"OK"}).
335 | SetDoneFunc(func(buttonIndex int, buttonLabel string) {
336 | t.app.SetRoot(t.mainFlex, true)
337 | })
338 |
339 | t.app.SetRoot(modal, true)
340 | }
341 |
--------------------------------------------------------------------------------
/tui/filebrowser.go:
--------------------------------------------------------------------------------
1 | package tui
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path/filepath"
7 | "strings"
8 | "time"
9 |
10 | "github.com/xmigrate/blxrep/service"
11 | "github.com/xmigrate/blxrep/utils"
12 |
13 | "github.com/gdamore/tcell/v2"
14 | "github.com/rivo/tview"
15 | )
16 |
17 | func (t *DispatcherTUI) showFileBrowser(rootDir string) {
18 | t.viewState = viewFileBrowser
19 | t.updateInfoBar([]string{
20 | "[green][white] Restore",
21 | "[green][white] View/Browse",
22 | })
23 | table := tview.NewTable().
24 | SetBorders(false).
25 | SetSelectable(true, false)
26 |
27 | table.SetTitle("").
28 | SetBorder(true).SetBorderColor(tcell.ColorGreen)
29 |
30 | t.updateFileTable(table, rootDir)
31 |
32 | t.content.Clear()
33 | t.content.AddItem(table, 0, 1, true)
34 | t.app.SetFocus(table)
35 |
36 | table.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
37 | if event.Key() == tcell.KeyCtrlR {
38 | row, _ := table.GetSelection()
39 | if row > 0 { // Ignore header row
40 | cellContent := table.GetCell(row, 0).Text
41 | fileName := strings.TrimPrefix(cellContent, "[::b]")
42 | filePath := filepath.Join(t.currentDir, fileName)
43 | t.showRestorePrompt(filePath)
44 | return nil
45 | }
46 | }
47 | return event
48 | })
49 | }
50 |
51 | func (t *DispatcherTUI) showRestorePrompt(sourcePath string) {
52 | t.isRestoreFormActive = true
53 |
54 | form := tview.NewForm()
55 |
56 | form.AddInputField("Source Path", sourcePath, 0, nil, nil)
57 | form.AddInputField("Destination Path", sourcePath, 0, nil, nil)
58 |
59 | form.AddButton("Restore", func() {
60 | sourceInput := form.GetFormItemByLabel("Source Path").(*tview.InputField)
61 | destInput := form.GetFormItemByLabel("Destination Path").(*tview.InputField)
62 | source := sourceInput.GetText()
63 | dest := destInput.GetText()
64 | t.showRestoreConfirmation(source, dest)
65 | })
66 |
67 | form.AddButton("Cancel", func() {
68 | t.isRestoreFormActive = false
69 | t.app.SetRoot(t.mainFlex, true)
70 | })
71 |
72 | form.SetBorder(true).SetTitle("Create Restore Action")
73 |
74 | // Set custom input capture for the form
75 | form.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
76 | switch event.Key() {
77 | case tcell.KeyEscape:
78 | // Exit the form
79 | t.isRestoreFormActive = false
80 | t.app.SetRoot(t.mainFlex, true)
81 | return nil
82 | }
83 | // For all other keys, including Enter, let the form handle them
84 | return event
85 | })
86 |
87 | t.app.SetRoot(form, true)
88 | }
89 |
90 | func (t *DispatcherTUI) showRestoreConfirmation(sourcePath, destPath string) {
91 | modal := tview.NewModal().
92 | SetText(fmt.Sprintf("Are you sure you want to restore?\nFrom: %s\nTo: %s", sourcePath, destPath)).
93 | AddButtons([]string{"Restore", "Cancel"}).
94 | SetDoneFunc(func(buttonIndex int, buttonLabel string) {
95 | if buttonLabel == "Restore" {
96 | utils.LogDebug("Restore button pressed")
97 | actionId, err := t.createRestoreAction(sourcePath, destPath)
98 | if err != nil {
99 | utils.LogError(fmt.Sprintf("Failed to create restore action: %v", err))
100 | t.showError(fmt.Sprintf("Failed to create restore action: %v", err))
101 | } else {
102 | utils.LogDebug(fmt.Sprintf("Restore action created with ID: %s", actionId))
103 | t.showRestoreProgress(actionId)
104 | }
105 | } else {
106 | utils.LogDebug("Cancel button pressed")
107 | t.isRestoreFormActive = false
108 | t.app.SetRoot(t.mainFlex, true)
109 | }
110 | })
111 |
112 | t.app.SetRoot(modal, true)
113 | }
114 |
115 | func (t *DispatcherTUI) showRestoreProgress(actionId string) {
116 | utils.LogDebug(fmt.Sprintf("Showing restore progress for action ID: %s", actionId))
117 |
118 | progressText := tview.NewTextView().
119 | SetDynamicColors(true).
120 | SetTextAlign(tview.AlignCenter).
121 | SetText("Status: [yellow]Starting[white]\nProgress: [yellow]0%[white]")
122 |
123 | // Custom progress bar
124 | progressBar := tview.NewTextView().
125 | SetDynamicColors(true).
126 | SetTextAlign(tview.AlignLeft)
127 |
128 | progressFlex := tview.NewFlex().
129 | SetDirection(tview.FlexRow).
130 | AddItem(tview.NewTextView().SetText("Restore in progress...").SetTextAlign(tview.AlignCenter), 0, 1, false).
131 | AddItem(progressText, 0, 1, false).
132 | AddItem(progressBar, 1, 1, false)
133 |
134 | progressFlex.SetBorder(true).SetTitle("Restore Progress")
135 |
136 | t.app.SetRoot(progressFlex, true)
137 |
138 | go func() {
139 | ticker := time.NewTicker(1 * time.Second)
140 | defer ticker.Stop()
141 |
142 | for {
143 | select {
144 | case <-ticker.C:
145 | action, err := service.GetAction(actionId)
146 | if err != nil {
147 | utils.LogError(fmt.Sprintf("Error fetching action: %v", err))
148 | t.app.QueueUpdateDraw(func() {
149 | progressText.SetText(fmt.Sprintf("Error: %v", err))
150 | })
151 | return
152 | }
153 |
154 | t.app.QueueUpdateDraw(func() {
155 | status := action.ActionStatus
156 | progress := action.ActionProgress
157 |
158 | statusColor := "yellow"
159 | if status == string(utils.CONST_ACTION_STATUS_COMPLETED) {
160 | statusColor = "green"
161 | } else if status == string(utils.CONST_ACTION_STATUS_FAILED) {
162 | statusColor = "red"
163 | }
164 |
165 | progressText.SetText(fmt.Sprintf("Status: [%s]%s[white]\nProgress: [%s]%d%%[white]", statusColor, status, statusColor, progress))
166 |
167 | // Update custom progress bar
168 | _, _, width, _ := progressFlex.GetInnerRect()
169 | progressBarWidth := width
170 | completedWidth := int(float64(progress) / 100 * float64(progressBarWidth))
171 | progressBar.SetText(fmt.Sprintf("[green]%s[white]%s",
172 | strings.Repeat("█", completedWidth),
173 | strings.Repeat("░", progressBarWidth-completedWidth)))
174 |
175 | if status == string(utils.CONST_ACTION_STATUS_COMPLETED) || status == string(utils.CONST_ACTION_STATUS_FAILED) {
176 | time.Sleep(2 * time.Second) // Show the final status for 2 seconds
177 | t.isRestoreFormActive = false
178 | t.app.SetRoot(t.mainFlex, true)
179 | return
180 | }
181 | })
182 | }
183 | }
184 | }()
185 |
186 | }
187 |
188 | func (t *DispatcherTUI) createRestoreAction(sourcePath, destPath string) (string, error) {
189 | action := utils.Action{
190 | Id: utils.GenerateUUID(),
191 | AgentId: t.currentAgentID,
192 | Action: string(utils.CONST_AGENT_ACTION_RESTORE),
193 | ActionType: string(utils.CONST_AGENT_ACTION_RESTORE),
194 | ActionStatus: string(utils.CONST_ACTION_STATUS_WAITING),
195 | SourceFilePath: sourcePath,
196 | TargetFilePath: destPath,
197 | TimeCreated: utils.NewUTCTime(time.Now()),
198 | }
199 |
200 | err := service.InsertOrUpdateAction(action)
201 | if err != nil {
202 | return "", err
203 | }
204 |
205 | return action.Id, nil
206 | }
207 |
208 | func (t *DispatcherTUI) updateFileTable(table *tview.Table, dir string) {
209 | table.Clear()
210 |
211 | table.SetCell(0, 0, tview.NewTableCell("Name").SetTextColor(tcell.ColorYellow).SetSelectable(false))
212 | table.SetCell(0, 1, tview.NewTableCell("Type").SetTextColor(tcell.ColorYellow).SetSelectable(false))
213 | table.SetCell(0, 2, tview.NewTableCell("Size").SetTextColor(tcell.ColorYellow).SetSelectable(false))
214 | table.SetCell(0, 3, tview.NewTableCell("Modified").SetTextColor(tcell.ColorYellow).SetSelectable(false))
215 |
216 | files, err := os.ReadDir(dir)
217 | if err != nil {
218 | t.showError(fmt.Sprintf("Error reading directory: %v", err))
219 | return
220 | }
221 |
222 | table.SetCell(1, 0, tview.NewTableCell("..").SetTextColor(tcell.ColorDarkCyan))
223 | table.SetCell(1, 1, tview.NewTableCell("Directory"))
224 | table.SetCell(1, 2, tview.NewTableCell(""))
225 | table.SetCell(1, 3, tview.NewTableCell(""))
226 |
227 | row := 2
228 | for _, file := range files {
229 | info, err := file.Info()
230 | if err != nil {
231 | continue
232 | }
233 |
234 | name := file.Name()
235 | fileType := "File"
236 | size := fmt.Sprintf("%d", info.Size())
237 | modified := info.ModTime().Format("2006-01-02 15:04:05")
238 |
239 | if file.IsDir() {
240 | fileType = "Directory"
241 | size = ""
242 | name = "[::b]" + name // Make directories bold
243 | }
244 |
245 | table.SetCell(row, 0, tview.NewTableCell(name).SetTextColor(tcell.ColorWhite))
246 | table.SetCell(row, 1, tview.NewTableCell(fileType))
247 | table.SetCell(row, 2, tview.NewTableCell(size))
248 | table.SetCell(row, 3, tview.NewTableCell(modified))
249 |
250 | row++
251 | }
252 |
253 | table.SetTitle(fmt.Sprintf("", dir)).SetBorderColor(tcell.ColorGreen)
254 | table.Select(1, 0).SetFixed(1, 0).SetDoneFunc(func(key tcell.Key) {
255 | if key == tcell.KeyEnter {
256 | row, _ := table.GetSelection()
257 | if row == 1 {
258 | // Go to parent directory
259 | parentDir := filepath.Dir(dir)
260 | if parentDir != dir {
261 | t.updateFileTable(table, parentDir)
262 | }
263 | } else if row > 1 && row <= len(files)+1 {
264 | selectedFile := files[row-2]
265 | if selectedFile.IsDir() {
266 | t.updateFileTable(table, filepath.Join(dir, selectedFile.Name()))
267 | } else {
268 | // You can add file viewing functionality here if needed
269 | // t.showMessage(fmt.Sprintf("Selected file: %s", selectedFile.Name()))
270 | }
271 | }
272 | }
273 | })
274 | }
275 |
--------------------------------------------------------------------------------
/utils/banner.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/fatih/color"
7 | )
8 |
9 | const logoTemplate = `
10 | _ _
11 | | | | |
12 | | |__ | | _ _ ____ _____ ____
13 | | _ \ | | ( \ / ) / ___)| ___ || _ \
14 | | |_) )| | ) X ( | | | ____|| |_| |
15 | |____/ \_)(_/ \_)|_| |_____)| __/
16 | |_|
17 | made with ♥ by team xmigrate
18 | `
19 |
20 | func PrintAnimatedLogo() {
21 | cyan := color.New(color.FgMagenta).SprintFunc()
22 |
23 | // Clear the console (this may not work on all systems)
24 | fmt.Print("\033[H\033[2J")
25 |
26 | logo := fmt.Sprint(logoTemplate)
27 | fmt.Println(cyan(logo))
28 |
29 | }
30 |
31 | func GetDiskBanner() string {
32 | return `
33 | _ _
34 | | | | |
35 | | |__ | | _ _ ____ _____ ____
36 | | _ \ | | ( \ / ) / ___)| ___ || _ \
37 | | |_) )| | ) X ( | | | ____|| |_| |
38 | |____/ \_)(_/ \_)|_| |_____)| __/
39 | |_|
40 | `
41 | }
42 |
--------------------------------------------------------------------------------
/utils/bpf_bpfel_x86.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xmigrate/blxrep/3a931c6b216b0ed28f18729ba9fc626f42b9fcf5/utils/bpf_bpfel_x86.o
--------------------------------------------------------------------------------
/utils/bpf_helpers.go:
--------------------------------------------------------------------------------
1 | // Code generated by bpf2go; DO NOT EDIT.
2 | //go:build 386 || amd64
3 |
4 | package utils
5 |
6 | import (
7 | "bytes"
8 | _ "embed"
9 | "fmt"
10 | "io"
11 |
12 | "github.com/cilium/ebpf"
13 | )
14 |
15 | // loadBpf returns the embedded CollectionSpec for bpf.
16 | func loadBpf() (*ebpf.CollectionSpec, error) {
17 | reader := bytes.NewReader(_BpfBytes)
18 | spec, err := ebpf.LoadCollectionSpecFromReader(reader)
19 | if err != nil {
20 | return nil, fmt.Errorf("can't load bpf: %w", err)
21 | }
22 |
23 | return spec, err
24 | }
25 |
26 | // loadBpfObjects loads bpf and converts it into a struct.
27 | //
28 | // The following types are suitable as obj argument:
29 | //
30 | // *bpfObjects
31 | // *bpfPrograms
32 | // *bpfMaps
33 | //
34 | // See ebpf.CollectionSpec.LoadAndAssign documentation for details.
35 | func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
36 | spec, err := loadBpf()
37 | if err != nil {
38 | return err
39 | }
40 |
41 | return spec.LoadAndAssign(obj, opts)
42 | }
43 |
44 | // bpfSpecs contains maps and programs before they are loaded into the kernel.
45 | //
46 | // It can be passed ebpf.CollectionSpec.Assign.
47 | type bpfSpecs struct {
48 | bpfProgramSpecs
49 | bpfMapSpecs
50 | }
51 |
52 | // bpfSpecs contains programs before they are loaded into the kernel.
53 | //
54 | // It can be passed ebpf.CollectionSpec.Assign.
55 | type bpfProgramSpecs struct {
56 | BlockRqComplete *ebpf.ProgramSpec `ebpf:"block_rq_complete"`
57 | }
58 |
59 | // bpfMapSpecs contains maps before they are loaded into the kernel.
60 | //
61 | // It can be passed ebpf.CollectionSpec.Assign.
62 | type bpfMapSpecs struct {
63 | Events *ebpf.MapSpec `ebpf:"events"`
64 | TargetDiskMap *ebpf.MapSpec `ebpf:"target_disk_map"`
65 | }
66 |
67 | // bpfObjects contains all objects after they have been loaded into the kernel.
68 | //
69 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
70 | type BpfObjects struct {
71 | bpfPrograms
72 | bpfMaps
73 | }
74 |
75 | func (o *BpfObjects) Close() error {
76 | return _BpfClose(
77 | &o.bpfPrograms,
78 | &o.bpfMaps,
79 | )
80 | }
81 |
82 | // bpfMaps contains all maps after they have been loaded into the kernel.
83 | //
84 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
85 | type bpfMaps struct {
86 | Events *ebpf.Map `ebpf:"events"`
87 | TargetDiskMap *ebpf.Map `ebpf:"target_disk_map"`
88 | }
89 |
90 | func (m *bpfMaps) Close() error {
91 | return _BpfClose(
92 | m.Events,
93 | m.TargetDiskMap,
94 | )
95 | }
96 |
97 | // bpfPrograms contains all programs after they have been loaded into the kernel.
98 | //
99 | // It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
100 | type bpfPrograms struct {
101 | BlockRqComplete *ebpf.Program `ebpf:"block_rq_complete"`
102 | }
103 |
104 | func (p *bpfPrograms) Close() error {
105 | return _BpfClose(
106 | p.BlockRqComplete,
107 | )
108 | }
109 |
110 | func _BpfClose(closers ...io.Closer) error {
111 | for _, closer := range closers {
112 | if err := closer.Close(); err != nil {
113 | return err
114 | }
115 | }
116 | return nil
117 | }
118 |
119 | // Do not access this directly.
120 | //
121 | //go:embed bpf_bpfel_x86.o
122 | var _BpfBytes []byte
123 |
--------------------------------------------------------------------------------
/utils/config.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | var AppConfiguration AppConfig
4 | var AgentConfiguration AgentConfig
5 | var PublicKeyData []byte
6 |
--------------------------------------------------------------------------------
/utils/constants.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | type CONST_AGENT_ACTION string
4 | type CONST_DISPATCHER_ACTION string
5 | type CONST_ACTION_TYPE string
6 | type CONST_ACTION_STATUS_TYPE string
7 |
8 | const (
9 | CONST_ADHOC_ACTION CONST_ACTION_TYPE = "ADHOC_ACTION"
10 | CONST_SCHEDULED_ACTION CONST_ACTION_TYPE = "SCHEDULED_ACTION"
11 | CONST_START_ACTION CONST_AGENT_ACTION = "started"
12 | CONST_COMPRESS_ACTION CONST_AGENT_ACTION = "compress"
13 | CONST_AGENT_ACTION_CLONE CONST_AGENT_ACTION = "clone"
14 | CONST_AGENT_ACTION_PAUSE CONST_AGENT_ACTION = "pause"
15 | CONST_AGENT_ACTION_RESUME CONST_AGENT_ACTION = "resume"
16 | CONST_AGENT_ACTION_LIVE CONST_AGENT_ACTION = "live"
17 | CONST_AGENT_ACTION_STOP_LIVE CONST_AGENT_ACTION = "stop_live"
18 | CONST_AGENT_ACTION_SYNC CONST_AGENT_ACTION = "sync"
19 | CONST_AGENT_ACTION_RESTORE CONST_AGENT_ACTION = "restore"
20 | CONST_AGENT_ACTION_PREPARE CONST_AGENT_ACTION = "prepare"
21 | CONST_AGENT_ACTION_PARTITION_RESTORE CONST_AGENT_ACTION = "partition_restore"
22 | CONST_ACTION_STATUS_IN_PROGRESS CONST_ACTION_STATUS_TYPE = "in_progress"
23 | CONST_ACTION_STATUS_COMPLETED CONST_ACTION_STATUS_TYPE = "completed"
24 | CONST_ACTION_STATUS_FAILED CONST_ACTION_STATUS_TYPE = "failed"
25 | CONST_ACTION_STATUS_PAUSED CONST_ACTION_STATUS_TYPE = "paused"
26 | CONST_ACTION_STATUS_WAITING CONST_ACTION_STATUS_TYPE = "waiting"
27 | CONST_ACTION_STATUS_RESUMED CONST_ACTION_STATUS_TYPE = "resumed"
28 | CONST_BLOCK_SIZE uint64 = 512
29 | CONST_CHANNEL_SIZE uint64 = 12000
30 | CONST_MAX_ACTIONS_TO_PROCESS uint32 = 1000
31 | Daily Frequency = "daily"
32 | Weekly Frequency = "weekly"
33 | Monthly Frequency = "monthly"
34 | )
35 |
--------------------------------------------------------------------------------
/utils/dev_info.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "syscall"
7 | "unsafe"
8 | )
9 |
10 | const (
11 | BLKGETSIZE64 = 0x80081272
12 | )
13 |
14 | func GetTotalSectors(devicePath string) (uint64, error) {
15 | file, err := os.Open(devicePath)
16 | if err != nil {
17 | return 0, fmt.Errorf("failed to open device %s: %v", devicePath, err)
18 | }
19 | defer file.Close()
20 |
21 | var size uint64
22 | _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), BLKGETSIZE64, uintptr(unsafe.Pointer(&size)))
23 | if errno != 0 {
24 | return 0, fmt.Errorf("ioctl error: %v", errno)
25 | }
26 |
27 | // Assuming 512-byte sectors, which is common
28 | sectorSize := uint64(512)
29 | totalSectors := size / sectorSize
30 |
31 | return totalSectors, nil
32 | }
33 |
--------------------------------------------------------------------------------
/utils/disk.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "os/exec"
7 | "strings"
8 | )
9 |
10 | func MountImage(imagePath, mountDir string) (bool, error) {
11 | // Step 1: Create a loopback device
12 | loopDev, err := createLoopbackDevice(imagePath)
13 | if err != nil {
14 | return false, fmt.Errorf("failed to create loopback device: %v", err)
15 | }
16 | defer func() {
17 | if err != nil {
18 | // If an error occurred, try to clean up the loopback device
19 | exec.Command("losetup", "-d", loopDev).Run()
20 | }
21 | }()
22 |
23 | // Step 2: Mount the loopback device
24 | err = mountLoopbackDevice(loopDev, mountDir)
25 | if err != nil {
26 | exec.Command("losetup", "-d", loopDev).Run()
27 | return false, fmt.Errorf("failed to mount loopback device: %v", err)
28 | }
29 |
30 | fmt.Printf("Successfully mounted %s to %s using loopback device %s\n", imagePath, mountDir, loopDev)
31 | return true, nil
32 | }
33 |
34 | func createLoopbackDevice(imagePath string) (string, error) {
35 | cmd := exec.Command("losetup", "--partscan", "--find", "--show", imagePath)
36 | output, err := cmd.Output()
37 | if err != nil {
38 | return "", fmt.Errorf("failed to create loopback device: %v", err)
39 | }
40 | return strings.TrimSpace(string(output)), nil
41 | }
42 |
43 | func mountLoopbackDevice(loopDev, mountDir string) error {
44 | // Ensure the mount directory exists
45 | if err := os.MkdirAll(mountDir, 0755); err != nil {
46 | return fmt.Errorf("failed to create mount directory: %v", err)
47 | }
48 |
49 | cmd := exec.Command("mount", loopDev, mountDir)
50 | return cmd.Run()
51 | }
52 |
--------------------------------------------------------------------------------
/utils/duration.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "strconv"
7 | "time"
8 | )
9 |
10 | // ParseDuration parses a duration string that supports h,m,s,d,w,y units
11 | func ParseDuration(duration string) (time.Duration, error) {
12 | // Regular expression to match number followed by unit
13 | re := regexp.MustCompile(`^(\d+)([hmsdwy])$`)
14 | matches := re.FindStringSubmatch(duration)
15 |
16 | if matches == nil {
17 | return 0, fmt.Errorf("invalid duration format: %s. Expected format: number followed by h,m,s,d,w, or y", duration)
18 | }
19 |
20 | value, err := strconv.Atoi(matches[1])
21 | if err != nil {
22 | return 0, fmt.Errorf("invalid number in duration: %v", err)
23 | }
24 |
25 | unit := matches[2]
26 |
27 | switch unit {
28 | case "h":
29 | return time.Duration(value) * time.Hour, nil
30 | case "m":
31 | return time.Duration(value) * time.Minute, nil
32 | case "s":
33 | return time.Duration(value) * time.Second, nil
34 | case "d":
35 | return time.Duration(value) * 24 * time.Hour, nil
36 | case "w":
37 | return time.Duration(value) * 7 * 24 * time.Hour, nil
38 | case "y":
39 | return time.Duration(value) * 365 * 24 * time.Hour, nil
40 | default:
41 | return 0, fmt.Errorf("unsupported duration unit: %s", unit)
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/utils/file_helper.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bufio"
5 | "io"
6 | "os"
7 |
8 | "github.com/google/uuid"
9 | )
10 |
11 | func CopyFile(source, destination string) error {
12 | srcFile, err := os.Open(source)
13 | if err != nil {
14 | return err
15 | }
16 | defer srcFile.Close()
17 |
18 | destFile, err := os.Create(destination)
19 | if err != nil {
20 | return err
21 | }
22 | defer destFile.Close()
23 |
24 | _, err = io.Copy(destFile, srcFile)
25 | if err != nil {
26 | return err
27 | }
28 |
29 | err = destFile.Sync()
30 | if err != nil {
31 | return err
32 | }
33 |
34 | return nil
35 | }
36 |
37 | func Contains(slice []string, item string) bool {
38 | for _, s := range slice {
39 | if s == item {
40 | return true
41 | }
42 | }
43 | return false
44 | }
45 |
46 | func GenerateUUID() string {
47 | id, err := uuid.NewRandom()
48 | if err != nil {
49 |
50 | return ""
51 | }
52 | return id.String()
53 | }
54 |
55 | func ReadLastLine(filename string) (string, error) {
56 | file, err := os.Open(filename)
57 | if err != nil {
58 | return "", err
59 | }
60 | defer file.Close()
61 |
62 | var lastLine string
63 | scanner := bufio.NewScanner(file)
64 | for scanner.Scan() {
65 | lastLine = scanner.Text()
66 | }
67 |
68 | if err := scanner.Err(); err != nil {
69 | return "", err
70 | }
71 |
72 | LogDebug("Last line: " + lastLine)
73 | return lastLine, nil
74 | }
75 |
--------------------------------------------------------------------------------
/utils/logger.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "os"
7 | "path/filepath"
8 | "sync"
9 | )
10 |
11 | var (
12 | logFile *os.File
13 | logger *log.Logger
14 | logMu sync.Mutex
15 | )
16 |
17 | func InitLogging(logDir string) error {
18 | logMu.Lock()
19 | defer logMu.Unlock()
20 |
21 | if logger != nil {
22 | return nil // Already initialized
23 | }
24 |
25 | if err := os.MkdirAll(logDir, 0755); err != nil {
26 | return fmt.Errorf("failed to create log directory: %v", err)
27 | }
28 |
29 | logPath := filepath.Join(logDir, "blxrep.log")
30 | file, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
31 | if err != nil {
32 | return fmt.Errorf("failed to open log file: %v", err)
33 | }
34 |
35 | logFile = file
36 | logger = log.New(file, "", log.LstdFlags)
37 | return nil
38 | }
39 |
40 | func LogDebug(message string) {
41 | logMu.Lock()
42 | defer logMu.Unlock()
43 |
44 | if logger != nil {
45 | logger.Printf("[DEBUG] %s", message)
46 | }
47 | }
48 |
49 | func LogError(message string) {
50 | logMu.Lock()
51 | defer logMu.Unlock()
52 |
53 | if logger != nil {
54 | logger.Printf("[ERROR] %s", message)
55 | }
56 | }
57 |
58 | func CloseLogFile() {
59 | logMu.Lock()
60 | defer logMu.Unlock()
61 |
62 | if logFile != nil {
63 | logFile.Close()
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/utils/stream.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "log"
5 | "os"
6 |
7 | "github.com/gorilla/websocket"
8 | )
9 |
10 | func StreamData(blocks []AgentDataBlock, websock *websocket.Conn, resume bool, srcPath string, action CONST_AGENT_ACTION, startTime int64) {
11 | var agentBlocks AgentBulkMessage
12 | agentBlocks.StartTime = startTime
13 | agentBlocks.AgentID, _ = os.Hostname()
14 | agentBlocks.Data = blocks
15 | agentBlocks.SrcPath = srcPath
16 | agentBlocks.Action = action
17 | agentBlocks.TotalBlocks, _ = GetTotalSectors(srcPath)
18 |
19 | if resume {
20 | agentBlocks.DataType = "resume"
21 | } else {
22 | agentBlocks.DataType = "snapshot"
23 | }
24 | err := websock.WriteJSON(agentBlocks)
25 | if err != nil {
26 | log.Fatalf("Could not send snapshot data: %v", err)
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/utils/user_details.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "os"
7 | "regexp"
8 | "strings"
9 |
10 | "gopkg.in/yaml.v3"
11 | )
12 |
13 | type UserConfig struct {
14 | Name string `json:"name"`
15 | Email string `json:"email"`
16 | Organization string `json:"organization"`
17 | }
18 |
19 | func getConfigFilePath() string {
20 | filePath := "/etc/blxrep/config.yaml"
21 |
22 | if _, err := os.Stat(filePath); os.IsNotExist(err) {
23 | LogError("Config file not found at " + filePath)
24 | return filePath
25 | }
26 | return filePath
27 | }
28 |
29 | func loadUserConfig() (UserConfig, error) {
30 | configPath := getConfigFilePath()
31 | file, err := os.Open(configPath)
32 | if err != nil {
33 | return UserConfig{}, err
34 | }
35 | defer file.Close()
36 |
37 | var config UserConfig
38 | decoder := yaml.NewDecoder(file)
39 | err = decoder.Decode(&config)
40 | return config, err
41 | }
42 |
43 | func saveUserConfig(config UserConfig) error {
44 | configPath := getConfigFilePath()
45 | file, err := os.Create(configPath)
46 | if err != nil {
47 | return err
48 | }
49 | defer file.Close()
50 |
51 | encoder := yaml.NewEncoder(file)
52 | return encoder.Encode(config)
53 | }
54 |
55 | func isValidEmail(email string) bool {
56 | emailRegex := regexp.MustCompile(`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,4}$`)
57 | return emailRegex.MatchString(email)
58 | }
59 |
60 | func GetUserInfo() UserConfig {
61 | config, err := loadUserConfig()
62 | if err == nil {
63 | fmt.Println("Existing user configuration found.")
64 | return config
65 | }
66 |
67 | reader := bufio.NewReader(os.Stdin)
68 |
69 | for config.Name == "" {
70 | fmt.Print("Enter your name: ")
71 | config.Name, _ = reader.ReadString('\n')
72 | config.Name = strings.TrimSpace(config.Name)
73 | if config.Name == "" {
74 | fmt.Println("Name cannot be empty. Please try again.")
75 | }
76 | }
77 |
78 | for config.Email == "" || !isValidEmail(config.Email) {
79 | fmt.Print("Enter your email: ")
80 | config.Email, _ = reader.ReadString('\n')
81 | config.Email = strings.TrimSpace(config.Email)
82 | if !isValidEmail(config.Email) {
83 | fmt.Println("Invalid email format. Please try again.")
84 | }
85 | }
86 |
87 | for config.Organization == "" {
88 | fmt.Print("Enter your organization: ")
89 | config.Organization, _ = reader.ReadString('\n')
90 | config.Organization = strings.TrimSpace(config.Organization)
91 | if config.Organization == "" {
92 | fmt.Println("Organization cannot be empty. Please try again.")
93 | }
94 | }
95 |
96 | err = saveUserConfig(config)
97 | if err != nil {
98 | LogError("Error saving user config: " + err.Error())
99 | }
100 |
101 | return config
102 | }
103 |
--------------------------------------------------------------------------------