├── src ├── system │ ├── const.go │ ├── rate.go │ ├── rate_test.go │ ├── locker.go │ ├── utils_test.go │ ├── system.go │ ├── sink_pool.go │ └── locker_test.go ├── server │ ├── filesystem │ │ ├── testdata │ │ │ ├── test.rar │ │ │ ├── test.zip │ │ │ ├── test.tar.gz │ │ │ └── test.tar │ │ ├── stat_linux.go │ │ ├── compress_test.go │ │ ├── errors_test.go │ │ ├── path.go │ │ ├── stat.go │ │ ├── archiverext │ │ │ └── compressed.go │ │ ├── archive_test.go │ │ └── errors.go │ ├── transfer │ │ ├── doc.go │ │ ├── manager.go │ │ ├── archive.go │ │ ├── transfer.go │ │ └── source.go │ ├── installer │ │ ├── errors.go │ │ └── installer.go │ ├── power_test.go │ ├── errors.go │ ├── config_parser.go │ ├── console_test.go │ ├── websockets.go │ ├── events.go │ ├── resources.go │ ├── activity.go │ ├── console.go │ ├── mounts.go │ ├── update.go │ ├── configuration.go │ ├── backup │ │ ├── backup_local.go │ │ └── rustic_factory.go │ └── crash.go ├── router │ ├── tokens │ │ ├── transfer.go │ │ ├── file.go │ │ ├── upload.go │ │ ├── parser.go │ │ ├── token_store.go │ │ ├── backup.go │ │ └── websocket.go │ ├── middleware.go │ ├── middleware │ │ └── job_queue.go │ ├── websocket │ │ └── message.go │ ├── router_jobs.go │ ├── router_server_ws.go │ └── router_server_transfer.go ├── cmd │ └── elytra │ │ └── main.go ├── internal │ ├── rustic │ │ └── rustic.go │ ├── ufs │ │ ├── doc.go │ │ ├── README.md │ │ ├── LICENSE │ │ ├── go.LICENSE │ │ ├── file_posix.go │ │ ├── mkdir_unix.go │ │ ├── stat_unix.go │ │ ├── path_unix.go │ │ ├── quota_writer.go │ │ └── walk.go │ ├── models │ │ ├── models.go │ │ └── activity.go │ ├── progress │ │ ├── progress_test.go │ │ └── progress.go │ ├── database │ │ └── database.go │ └── cron │ │ ├── cron.go │ │ └── activity_cron.go ├── jobs │ └── setup.go ├── sftp │ ├── utils.go │ └── event.go ├── environment │ ├── stats.go │ ├── config.go │ ├── docker.go │ ├── docker │ │ └── api.go │ ├── allocations.go │ └── environment.go ├── remote │ ├── errors.go │ └── http_test.go ├── events │ ├── events.go │ └── events_test.go └── loggers │ └── cli │ └── cli.go ├── .envrc ├── .editorconfig ├── .gitmessage ├── Dockerfile ├── .dockerignore ├── ORIGINAL_LICENSE ├── .gitignore ├── docker-compose.example.yml ├── README.md ├── docker-compose.dev.yml ├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── flake.nix ├── flake.lock ├── Makefile └── CONTRIBUTING.md /src/system/const.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | var Version = "dev" 4 | -------------------------------------------------------------------------------- /src/server/filesystem/testdata/test.rar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyrohost/elytra/HEAD/src/server/filesystem/testdata/test.rar -------------------------------------------------------------------------------- /src/server/filesystem/testdata/test.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyrohost/elytra/HEAD/src/server/filesystem/testdata/test.zip -------------------------------------------------------------------------------- /src/server/filesystem/testdata/test.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyrohost/elytra/HEAD/src/server/filesystem/testdata/test.tar.gz -------------------------------------------------------------------------------- /src/server/transfer/doc.go: -------------------------------------------------------------------------------- 1 | // Package transfer handles all logic related to transferring servers between 2 | // two nodes. This includes the logic for archiving a server on the source node 3 | // and logic for importing a server from the source node into the target node. 4 | package transfer 5 | -------------------------------------------------------------------------------- /src/router/tokens/transfer.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "github.com/gbrlsnchs/jwt/v3" 5 | ) 6 | 7 | type TransferPayload struct { 8 | jwt.Payload 9 | } 10 | 11 | // GetPayload returns the JWT payload. 12 | func (p *TransferPayload) GetPayload() *jwt.Payload { 13 | return &p.Payload 14 | } 15 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # Load the flake's `devShells.${currentSystem}.default`. 4 | if ! use flake .; then 5 | echo 'The development shell was unable to be built.' >&2 6 | echo 'The development environment was not loaded.' >&2 7 | echo 'Please make the necessary changes in flake.nix to fix any issues and hit enter to try again.' >&2 8 | fi 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = tab 5 | indent_size = 4 6 | tab_width = 4 7 | end_of_line = lf 8 | charset = utf-8 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | 12 | [*.go] 13 | max_line_length = 100 14 | 15 | [*.md] 16 | trim_trailing_whitespace = false 17 | 18 | [*.{md,nix,yaml}] 19 | indent_style = space 20 | indent_size = 2 21 | tab_width = 2 22 | -------------------------------------------------------------------------------- /src/server/installer/errors.go: -------------------------------------------------------------------------------- 1 | package installer 2 | 3 | type validationError struct { 4 | msg string 5 | } 6 | 7 | func (e *validationError) Error() string { 8 | return e.msg 9 | } 10 | 11 | func IsValidationError(err error) bool { 12 | _, ok := err.(*validationError) 13 | 14 | return ok 15 | } 16 | 17 | func NewValidationError(msg string) error { 18 | return &validationError{msg: msg} 19 | } 20 | -------------------------------------------------------------------------------- /src/cmd/elytra/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "math/rand" 5 | 6 | "time" 7 | 8 | "github.com/pyrohost/elytra/src/cmd" 9 | ) 10 | 11 | func main() { 12 | 13 | // Since we make use of the math/rand package in the code, especially for generating 14 | 15 | // non-cryptographically secure random strings we need to seed the RNG. Just make use 16 | 17 | // of the current time for this. 18 | 19 | rand.Seed(time.Now().UnixNano()) 20 | 21 | // Execute the main binary code. 22 | 23 | cmd.Execute() 24 | 25 | } 26 | -------------------------------------------------------------------------------- /src/internal/rustic/rustic.go: -------------------------------------------------------------------------------- 1 | package rustic 2 | 3 | import ( 4 | "os/exec" 5 | 6 | "emperror.dev/errors" 7 | "github.com/apex/log" 8 | ) 9 | 10 | func GetBinaryPath() (string, error) { 11 | path, err := exec.LookPath("rustic") 12 | if err != nil { 13 | return "", errors.Wrap(err, "rustic binary not found in PATH") 14 | } 15 | 16 | log.WithField("path", path).Debug("found rustic binary in PATH") 17 | return path, nil 18 | } 19 | 20 | func IsAvailable() bool { 21 | _, err := exec.LookPath("rustic") 22 | return err == nil 23 | } 24 | -------------------------------------------------------------------------------- /src/server/power_test.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/franela/goblin" 7 | 8 | "github.com/pyrohost/elytra/src/system" 9 | ) 10 | 11 | func TestPower(t *testing.T) { 12 | g := Goblin(t) 13 | 14 | g.Describe("Server#ExecutingPowerAction", func() { 15 | g.It("should return based on locker status", func() { 16 | s := &Server{powerLock: system.NewLocker()} 17 | 18 | g.Assert(s.ExecutingPowerAction()).IsFalse() 19 | s.powerLock.Acquire() 20 | g.Assert(s.ExecutingPowerAction()).IsTrue() 21 | }) 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /src/router/middleware.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | 6 | "github.com/pyrohost/elytra/src/router/middleware" 7 | "github.com/pyrohost/elytra/src/server" 8 | ) 9 | 10 | // ExtractServer returns the server instance from the gin context. If there is 11 | // no server set in the context (e.g. calling from a controller not protected 12 | // by ServerExists) this function will panic. 13 | // 14 | // This function is deprecated. Use middleware.ExtractServer. 15 | func ExtractServer(c *gin.Context) *server.Server { 16 | return middleware.ExtractServer(c) 17 | } 18 | -------------------------------------------------------------------------------- /src/router/middleware/job_queue.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | "github.com/pyrohost/elytra/src/jobs" 6 | ) 7 | 8 | const JobManagerKey = "job_manager" 9 | 10 | func AttachJobManager(manager *jobs.Manager) gin.HandlerFunc { 11 | return func(c *gin.Context) { 12 | c.Set(JobManagerKey, manager) 13 | c.Next() 14 | } 15 | } 16 | 17 | func ExtractJobManager(c *gin.Context) *jobs.Manager { 18 | if v, ok := c.Get(JobManagerKey); ok { 19 | return v.(*jobs.Manager) 20 | } 21 | panic("request does not have a job manager set in the context") 22 | } 23 | -------------------------------------------------------------------------------- /src/internal/ufs/doc.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner 3 | 4 | // Package ufs provides an abstraction layer for performing I/O on filesystems. 5 | // This package is designed to be used in-place of standard `os` package I/O 6 | // calls, and is not designed to be used as a generic filesystem abstraction 7 | // like the `io/fs` package. 8 | // 9 | // The primary use-case of this package was to provide a "chroot-like" `os` 10 | // wrapper, so we can safely sandbox I/O operations within a directory and 11 | // use untrusted arbitrary paths. 12 | package ufs 13 | -------------------------------------------------------------------------------- /src/internal/ufs/README.md: -------------------------------------------------------------------------------- 1 | # Filesystem 2 | 3 | Coming Soon™ 4 | 5 | > TODO 6 | 7 | ## Licensing 8 | 9 | Most code in this package is licensed under `MIT` with some exceptions. 10 | 11 | The following files are licensed under `BSD-3-Clause` due to them being copied 12 | verbatim or derived from [Go](https://go.dev)'s source code. 13 | 14 | - [`file_posix.go`](./file_posix.go) 15 | - [`mkdir_unix.go`](./mkdir_unix.go) 16 | - [`path_unix.go`](./path_unix.go) 17 | - [`removeall_unix.go`](./removeall_unix.go) 18 | - [`stat_unix.go`](./stat_unix.go) 19 | - [`walk.go`](./walk.go) 20 | 21 | These changes are not associated with nor endorsed by The Go Authors. 22 | -------------------------------------------------------------------------------- /src/internal/models/models.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | 7 | "emperror.dev/errors" 8 | ) 9 | 10 | type JsonNullString struct { 11 | sql.NullString 12 | } 13 | 14 | func (v JsonNullString) MarshalJSON() ([]byte, error) { 15 | if v.Valid { 16 | return json.Marshal(v.String) 17 | } else { 18 | return json.Marshal(nil) 19 | } 20 | } 21 | 22 | func (v *JsonNullString) UnmarshalJSON(data []byte) error { 23 | var s *string 24 | if err := json.Unmarshal(data, &s); err != nil { 25 | return errors.WithStack(err) 26 | } 27 | if s != nil { 28 | v.String = *s 29 | } 30 | v.Valid = s != nil 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /src/router/tokens/file.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "github.com/gbrlsnchs/jwt/v3" 5 | ) 6 | 7 | type FilePayload struct { 8 | jwt.Payload 9 | FilePath string `json:"file_path"` 10 | ServerUuid string `json:"server_uuid"` 11 | UniqueId string `json:"unique_id"` 12 | } 13 | 14 | // Returns the JWT payload. 15 | func (p *FilePayload) GetPayload() *jwt.Payload { 16 | return &p.Payload 17 | } 18 | 19 | // Determines if this JWT is valid for the given request cycle. If the 20 | // unique ID passed in the token has already been seen before this will 21 | // return false. This allows us to use this JWT as a one-time token that 22 | // validates all of the request. 23 | func (p *FilePayload) IsUniqueRequest() bool { 24 | return getTokenStore().IsValidToken(p.UniqueId) 25 | } 26 | -------------------------------------------------------------------------------- /src/router/tokens/upload.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "github.com/gbrlsnchs/jwt/v3" 5 | ) 6 | 7 | type UploadPayload struct { 8 | jwt.Payload 9 | 10 | ServerUuid string `json:"server_uuid"` 11 | UserUuid string `json:"user_uuid"` 12 | UniqueId string `json:"unique_id"` 13 | } 14 | 15 | // Returns the JWT payload. 16 | func (p *UploadPayload) GetPayload() *jwt.Payload { 17 | return &p.Payload 18 | } 19 | 20 | // Determines if this JWT is valid for the given request cycle. If the 21 | // unique ID passed in the token has already been seen before this will 22 | // return false. This allows us to use this JWT as a one-time token that 23 | // validates all of the request. 24 | func (p *UploadPayload) IsUniqueRequest() bool { 25 | return getTokenStore().IsValidToken(p.UniqueId) 26 | } 27 | -------------------------------------------------------------------------------- /src/server/filesystem/stat_linux.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "syscall" 5 | "time" 6 | 7 | "golang.org/x/sys/unix" 8 | ) 9 | 10 | // CTime returns the time that the file/folder was created. 11 | // 12 | // TODO: remove. Ctim is not actually ever been correct and doesn't actually 13 | // return the creation time. 14 | func (s *Stat) CTime() time.Time { 15 | if st, ok := s.Sys().(*unix.Stat_t); ok { 16 | // Do not remove these "redundant" type-casts, they are required for 32-bit builds to work. 17 | return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)) 18 | } 19 | if st, ok := s.Sys().(*syscall.Stat_t); ok { 20 | // Do not remove these "redundant" type-casts, they are required for 32-bit builds to work. 21 | return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)) 22 | } 23 | return time.Time{} 24 | } 25 | -------------------------------------------------------------------------------- /src/router/websocket/message.go: -------------------------------------------------------------------------------- 1 | package websocket 2 | 3 | const ( 4 | AuthenticationSuccessEvent = "auth success" 5 | TokenExpiringEvent = "token expiring" 6 | TokenExpiredEvent = "token expired" 7 | AuthenticationEvent = "auth" 8 | SetStateEvent = "set state" 9 | SendServerLogsEvent = "send logs" 10 | SendCommandEvent = "send command" 11 | SendStatsEvent = "send stats" 12 | ErrorEvent = "daemon error" 13 | JwtErrorEvent = "jwt error" 14 | ) 15 | 16 | type Message struct { 17 | // The event to perform. 18 | Event string `json:"event"` 19 | 20 | // The data to pass along, only used by power/command currently. Other requests 21 | // should either omit the field or pass an empty value as it is ignored. 22 | Args []string `json:"args,omitempty"` 23 | } 24 | -------------------------------------------------------------------------------- /src/router/tokens/parser.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/gbrlsnchs/jwt/v3" 7 | 8 | "github.com/pyrohost/elytra/src/config" 9 | ) 10 | 11 | type TokenData interface { 12 | GetPayload() *jwt.Payload 13 | } 14 | 15 | // Validates the provided JWT against the known secret for the Daemon and returns the 16 | // parsed data. This function DOES NOT validate that the token is valid for the connected 17 | // server, nor does it ensure that the user providing the token is able to actually do things. 18 | // 19 | // This simply returns a parsed token. 20 | func ParseToken(token []byte, data TokenData) error { 21 | verifyOptions := jwt.ValidatePayload( 22 | data.GetPayload(), 23 | jwt.ExpirationTimeValidator(time.Now()), 24 | ) 25 | 26 | _, err := jwt.Verify(token, config.GetJwtAlgorithm(), &data, verifyOptions) 27 | 28 | return err 29 | } 30 | -------------------------------------------------------------------------------- /.gitmessage: -------------------------------------------------------------------------------- 1 | # [optional scope]: 2 | # 3 | # [optional body] 4 | # 5 | # [optional footer(s)] 6 | # 7 | # Type must be one of the following: 8 | # feat: A new feature 9 | # fix: A bug fix 10 | # docs: Documentation only changes 11 | # style: Changes that do not affect the meaning of the code 12 | # refactor: A code change that neither fixes a bug nor adds a feature 13 | # perf: A performance improvement 14 | # test: Adding missing tests or correcting existing tests 15 | # chore: Changes to the build process or auxiliary tools 16 | # 17 | # Breaking changes: 18 | # feat!: A new feature with breaking changes 19 | # fix!: A bug fix with breaking changes 20 | # 21 | # Examples: 22 | # feat: add backup encryption support 23 | # fix: resolve server startup crash on invalid config 24 | # feat!: change default configuration format to YAML 25 | # docs: update installation guide for new version -------------------------------------------------------------------------------- /src/router/tokens/token_store.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/patrickmn/go-cache" 8 | ) 9 | 10 | type TokenStore struct { 11 | sync.Mutex 12 | cache *cache.Cache 13 | } 14 | 15 | var _tokens *TokenStore 16 | 17 | // Returns the global unique token store cache. This is used to validate 18 | // one time token usage by storing any received tokens in a local memory 19 | // cache until they are ready to expire. 20 | func getTokenStore() *TokenStore { 21 | if _tokens == nil { 22 | _tokens = &TokenStore{ 23 | cache: cache.New(time.Minute*60, time.Minute*5), 24 | } 25 | } 26 | 27 | return _tokens 28 | } 29 | 30 | // Checks if a token is valid or not. 31 | func (t *TokenStore) IsValidToken(token string) bool { 32 | t.Lock() 33 | defer t.Unlock() 34 | 35 | _, exists := t.cache.Get(token) 36 | 37 | if !exists { 38 | t.cache.Add(token, "", time.Minute*60) 39 | } 40 | 41 | return !exists 42 | } 43 | -------------------------------------------------------------------------------- /src/router/tokens/backup.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "github.com/gbrlsnchs/jwt/v3" 5 | ) 6 | 7 | type BackupPayload struct { 8 | jwt.Payload 9 | 10 | ServerUuid string `json:"server_uuid"` 11 | BackupUuid string `json:"backup_uuid"` 12 | SnapshotId string `json:"snapshot_id,omitempty"` 13 | UniqueId string `json:"unique_id"` 14 | BackupDisk string `json:"backup_disk,omitempty"` 15 | RepositoryType string `json:"repository_type,omitempty"` 16 | } 17 | 18 | // Returns the JWT payload. 19 | func (p *BackupPayload) GetPayload() *jwt.Payload { 20 | return &p.Payload 21 | } 22 | 23 | // Determines if this JWT is valid for the given request cycle. If the 24 | // unique ID passed in the token has already been seen before this will 25 | // return false. This allows us to use this JWT as a one-time token that 26 | // validates all of the request. 27 | func (p *BackupPayload) IsUniqueRequest() bool { 28 | return getTokenStore().IsValidToken(p.UniqueId) 29 | } 30 | -------------------------------------------------------------------------------- /src/jobs/setup.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "github.com/pyrohost/elytra/src/remote" 5 | "github.com/pyrohost/elytra/src/server" 6 | ) 7 | 8 | // SetupJobs registers all available job types with the manager 9 | func SetupJobs(manager *Manager, serverManager *server.Manager, client remote.Client) { 10 | manager.RegisterJobType("backup_create", func(data map[string]interface{}) (Job, error) { 11 | return NewBackupCreateJob(data, serverManager, client) 12 | }) 13 | 14 | manager.RegisterJobType("backup_delete", func(data map[string]interface{}) (Job, error) { 15 | return NewBackupDeleteJob(data, serverManager, client) 16 | }) 17 | 18 | manager.RegisterJobType("backup_restore", func(data map[string]interface{}) (Job, error) { 19 | return NewBackupRestoreJob(data, serverManager, client) 20 | }) 21 | 22 | manager.RegisterJobType("backup_delete_all", func(data map[string]interface{}) (Job, error) { 23 | return NewBackupDeleteAllJob(data, serverManager, client) 24 | }) 25 | 26 | } 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1 (Build) 2 | FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS builder 3 | 4 | ARG TARGETPLATFORM 5 | ARG BUILDPLATFORM 6 | ARG TARGETOS 7 | ARG TARGETARCH 8 | ARG VERSION 9 | 10 | RUN apk add --update --no-cache git make mailcap 11 | 12 | WORKDIR /app/ 13 | COPY go.mod go.sum /app/ 14 | COPY . /app/ 15 | 16 | RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build \ 17 | -ldflags="-s -w -X github.com/pyrohost/elytra/src/system.Version=$VERSION" \ 18 | -v \ 19 | -trimpath \ 20 | -o elytra \ 21 | ./src/cmd/elytra 22 | 23 | RUN echo "ID=\"distroless\"" > /etc/os-release 24 | 25 | # Stage 2 (Final) 26 | FROM gcr.io/distroless/static:latest 27 | 28 | COPY --from=builder /etc/os-release /etc/os-release 29 | COPY --from=builder /etc/mime.types /etc/mime.types 30 | COPY --from=builder /app/elytra /usr/bin/ 31 | COPY --from=ghcr.io/rustic-rs/rustic:v0.10.0 /rustic /usr/local/bin/rustic 32 | 33 | ENTRYPOINT ["/usr/bin/elytra"] 34 | CMD ["--config", "/etc/elytra/config.yml"] 35 | 36 | EXPOSE 8080 2022 -------------------------------------------------------------------------------- /src/sftp/utils.go: -------------------------------------------------------------------------------- 1 | package sftp 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | const ( 9 | // ErrSSHQuotaExceeded extends the default SFTP server to return a quota exceeded error to the client. 10 | // 11 | // @see https://tools.ietf.org/id/draft-ietf-secsh-filexfer-13.txt 12 | ErrSSHQuotaExceeded = fxErr(15) 13 | ) 14 | 15 | type ListerAt []os.FileInfo 16 | 17 | // ListAt returns the number of entries copied and an io.EOF error if we made it to the end of the file list. 18 | // Take a look at the pkg/sftp godoc for more information about how this function should work. 19 | func (l ListerAt) ListAt(f []os.FileInfo, offset int64) (int, error) { 20 | if offset >= int64(len(l)) { 21 | return 0, io.EOF 22 | } 23 | 24 | if n := copy(f, l[offset:]); n < len(f) { 25 | return n, io.EOF 26 | } else { 27 | return n, nil 28 | } 29 | } 30 | 31 | type fxErr uint32 32 | 33 | func (e fxErr) Error() string { 34 | switch e { 35 | case ErrSSHQuotaExceeded: 36 | return "Quota Exceeded" 37 | default: 38 | return "Failure" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/server/errors.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "emperror.dev/errors" 5 | ) 6 | 7 | var ( 8 | ErrIsRunning = errors.New("server is running") 9 | ErrSuspended = errors.New("server is currently in a suspended state") 10 | ErrServerIsInstalling = errors.New("server is currently installing") 11 | ErrServerIsTransferring = errors.New("server is currently being transferred") 12 | ErrServerIsRestoring = errors.New("server is currently being restored") 13 | ) 14 | 15 | type crashTooFrequent struct{} 16 | 17 | func (e *crashTooFrequent) Error() string { 18 | return "server has crashed too soon after the last detected crash" 19 | } 20 | 21 | func IsTooFrequentCrashError(err error) bool { 22 | _, ok := err.(*crashTooFrequent) 23 | 24 | return ok 25 | } 26 | 27 | type serverDoesNotExist struct{} 28 | 29 | func (e *serverDoesNotExist) Error() string { 30 | return "server does not exist on remote system" 31 | } 32 | 33 | func IsServerDoesNotExistError(err error) bool { 34 | _, ok := err.(*serverDoesNotExist) 35 | 36 | return ok 37 | } 38 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | .idea/* 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 15 | .glide/ 16 | 17 | # dep related files and folders 18 | /vendor* 19 | 20 | # ignore logfiles (/* so the .gitkeep override works) 21 | /logs/* 22 | 23 | # ignore configuration file 24 | /config.yml 25 | 26 | # Ignore Vagrant stuff 27 | /.vagrant 28 | 29 | # Builds by gox 30 | /build 31 | 32 | # Go Coverage tool 33 | /coverage.out 34 | 35 | # The built executable 36 | elytra 37 | elytra.exe 38 | 39 | # IDE/Editor files (VS Code) 40 | /.vscode 41 | 42 | # test files 43 | test_*/ 44 | 45 | # Keep all gitkeep files (This needs to stay at the bottom) 46 | !.gitkeep 47 | debug 48 | .DS_Store 49 | *.pprof 50 | *.pdf 51 | 52 | Dockerfile 53 | CHANGELOG.md 54 | README.md 55 | elytra-api.paw 56 | -------------------------------------------------------------------------------- /src/internal/ufs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Matthew Penner 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ORIGINAL_LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 - 2021 Dane Everitt and Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | .idea/* 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 15 | .glide/ 16 | 17 | # dep related files and folders 18 | /vendor* 19 | 20 | # ignore logfiles (/* so the .gitkeep override works) 21 | /logs/* 22 | 23 | # ignore configuration file 24 | /config.yml 25 | /config*.yml 26 | /config.yaml 27 | /config*.yaml 28 | 29 | # Ignore Vagrant stuff 30 | /.vagrant 31 | 32 | # Builds by gox 33 | /build 34 | 35 | # Go Coverage tool 36 | /coverage.out 37 | 38 | # The built executable 39 | elytra 40 | elytra.exe 41 | !elytra/ 42 | 43 | # IDE/Editor files (VS Code) 44 | /.vscode 45 | 46 | # test files 47 | test_*/ 48 | 49 | # Embedded rustic binaries (downloaded at build time) 50 | /internal/rustic/binaries/rustic-* 51 | 52 | # Keep all gitkeep files (This needs to stay at the bottom) 53 | !.gitkeep 54 | debug 55 | .DS_Store 56 | *.pprof 57 | *.pdf 58 | pprof.* 59 | data/elytra.db 60 | .secrets 61 | 62 | # Nix output files 63 | /result 64 | /result-* 65 | -------------------------------------------------------------------------------- /src/system/rate.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // Rate defines a rate limiter of n items (limit) per duration of time. 9 | type Rate struct { 10 | mu sync.Mutex 11 | limit uint64 12 | duration time.Duration 13 | count uint64 14 | last time.Time 15 | } 16 | 17 | func NewRate(limit uint64, duration time.Duration) *Rate { 18 | return &Rate{ 19 | limit: limit, 20 | duration: duration, 21 | last: time.Now(), 22 | } 23 | } 24 | 25 | // Try returns true if under the rate limit defined, or false if the rate limit 26 | // has been exceeded for the current duration. 27 | func (r *Rate) Try() bool { 28 | r.mu.Lock() 29 | defer r.mu.Unlock() 30 | now := time.Now() 31 | // If it has been more than the duration, reset the timer and count. 32 | if now.Sub(r.last) > r.duration { 33 | r.count = 0 34 | r.last = now 35 | } 36 | if (r.count + 1) > r.limit { 37 | return false 38 | } 39 | // Hit this once, and return. 40 | r.count = r.count + 1 41 | return true 42 | } 43 | 44 | // Reset resets the internal state of the rate limiter back to zero. 45 | func (r *Rate) Reset() { 46 | r.mu.Lock() 47 | r.count = 0 48 | r.last = time.Now() 49 | r.mu.Unlock() 50 | } 51 | -------------------------------------------------------------------------------- /src/server/config_parser.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/gammazero/workerpool" 7 | 8 | "github.com/pyrohost/elytra/src/internal/ufs" 9 | ) 10 | 11 | // UpdateConfigurationFiles updates all the defined configuration files for 12 | // a server automatically to ensure that they always use the specified values. 13 | func (s *Server) UpdateConfigurationFiles() { 14 | pool := workerpool.New(runtime.NumCPU()) 15 | 16 | s.Log().Debug("acquiring process configuration files...") 17 | files := s.ProcessConfiguration().ConfigurationFiles 18 | s.Log().Debug("acquired process configuration files") 19 | for _, cf := range files { 20 | f := cf 21 | 22 | pool.Submit(func() { 23 | file, err := s.Filesystem().UnixFS().Touch(f.FileName, ufs.O_RDWR|ufs.O_CREATE, 0o644) 24 | if err != nil { 25 | s.Log().WithField("file_name", f.FileName).WithField("error", err).Error("failed to open file for configuration") 26 | return 27 | } 28 | defer file.Close() 29 | 30 | if err := f.Parse(file); err != nil { 31 | s.Log().WithField("error", err).Error("failed to parse and update server configuration file") 32 | } 33 | 34 | s.Log().WithField("file_name", f.FileName).Debug("finished processing server configuration file") 35 | }) 36 | } 37 | 38 | pool.StopWait() 39 | } 40 | -------------------------------------------------------------------------------- /src/environment/stats.go: -------------------------------------------------------------------------------- 1 | package environment 2 | 3 | // Stats defines the current resource usage for a given server instance. 4 | type Stats struct { 5 | // The total amount of memory, in bytes, that this server instance is consuming. This is 6 | // calculated slightly differently than just using the raw Memory field that the stats 7 | // return from the container, so please check the code setting this value for how that 8 | // is calculated. 9 | Memory uint64 `json:"memory_bytes"` 10 | 11 | // The total amount of memory this container or resource can use. Inside Docker this is 12 | // going to be higher than you'd expect because we're automatically allocating overhead 13 | // abilities for the container, so it's not going to be a perfect match. 14 | MemoryLimit uint64 `json:"memory_limit_bytes"` 15 | 16 | // The absolute CPU usage is the amount of CPU used in relation to the entire system and 17 | // does not take into account any limits on the server process itself. 18 | CpuAbsolute float64 `json:"cpu_absolute"` 19 | 20 | // Current network transmit in & out for a container. 21 | Network NetworkStats `json:"network"` 22 | 23 | // The current uptime of the container, in milliseconds. 24 | Uptime int64 `json:"uptime"` 25 | } 26 | 27 | type NetworkStats struct { 28 | RxBytes uint64 `json:"rx_bytes"` 29 | TxBytes uint64 `json:"tx_bytes"` 30 | } 31 | -------------------------------------------------------------------------------- /src/server/transfer/manager.go: -------------------------------------------------------------------------------- 1 | package transfer 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | var ( 8 | incomingTransfers = NewManager() 9 | outgoingTransfers = NewManager() 10 | ) 11 | 12 | // Incoming returns a transfer manager for incoming transfers. 13 | func Incoming() *Manager { 14 | return incomingTransfers 15 | } 16 | 17 | // Outgoing returns a transfer manager for outgoing transfers. 18 | func Outgoing() *Manager { 19 | return outgoingTransfers 20 | } 21 | 22 | // Manager manages transfers. 23 | type Manager struct { 24 | mu sync.RWMutex 25 | transfers map[string]*Transfer 26 | } 27 | 28 | // NewManager returns a new transfer manager. 29 | func NewManager() *Manager { 30 | return &Manager{ 31 | transfers: make(map[string]*Transfer), 32 | } 33 | } 34 | 35 | // Add adds a transfer to the manager. 36 | func (m *Manager) Add(transfer *Transfer) { 37 | m.mu.Lock() 38 | defer m.mu.Unlock() 39 | 40 | m.transfers[transfer.Server.ID()] = transfer 41 | } 42 | 43 | // Remove removes a transfer from the manager. 44 | func (m *Manager) Remove(transfer *Transfer) { 45 | m.mu.Lock() 46 | defer m.mu.Unlock() 47 | 48 | delete(m.transfers, transfer.Server.ID()) 49 | } 50 | 51 | // Get gets a transfer from the manager using a server ID. 52 | func (m *Manager) Get(id string) *Transfer { 53 | m.mu.RLock() 54 | defer m.mu.RUnlock() 55 | 56 | return m.transfers[id] 57 | } 58 | -------------------------------------------------------------------------------- /src/server/console_test.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/franela/goblin" 8 | ) 9 | 10 | func TestName(t *testing.T) { 11 | g := goblin.Goblin(t) 12 | 13 | g.Describe("ConsoleThrottler", func() { 14 | g.It("keeps count of the number of overages in a time period", func() { 15 | t := newConsoleThrottle(1, time.Second) 16 | g.Assert(t.Allow()).IsTrue() 17 | g.Assert(t.Allow()).IsFalse() 18 | g.Assert(t.Allow()).IsFalse() 19 | }) 20 | 21 | g.It("calls strike once per time period", func() { 22 | t := newConsoleThrottle(1, time.Millisecond*20) 23 | 24 | var times int 25 | t.strike = func() { 26 | times = times + 1 27 | } 28 | 29 | t.Allow() 30 | t.Allow() 31 | t.Allow() 32 | time.Sleep(time.Millisecond * 100) 33 | t.Allow() 34 | t.Reset() 35 | t.Allow() 36 | t.Allow() 37 | t.Allow() 38 | 39 | g.Assert(times).Equal(2) 40 | }) 41 | 42 | g.It("is properly reset", func() { 43 | t := newConsoleThrottle(10, time.Second) 44 | 45 | for i := 0; i < 10; i++ { 46 | g.Assert(t.Allow()).IsTrue() 47 | } 48 | g.Assert(t.Allow()).IsFalse() 49 | t.Reset() 50 | g.Assert(t.Allow()).IsTrue() 51 | }) 52 | }) 53 | } 54 | 55 | func BenchmarkConsoleThrottle(b *testing.B) { 56 | t := newConsoleThrottle(10, time.Millisecond*10) 57 | 58 | b.ReportAllocs() 59 | for i := 0; i < b.N; i++ { 60 | t.Allow() 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /docker-compose.example.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | elytra: 4 | image: ghcr.io/pyrohost/elytra:latest 5 | restart: unless-stopped 6 | tty: true 7 | ports: 8 | - "8080:8080" 9 | - "2022:2022" 10 | environment: 11 | # Required: Panel connection and token 12 | ELYTRA_PANEL_LOCATION: "https://panel.example.com" 13 | ELYTRA_TOKEN_ID: "panel-token-id" 14 | ELYTRA_TOKEN: "panel-token-value" 15 | 16 | # Optional overrides / useful defaults 17 | TZ: "UTC" 18 | ELYTRA_DEBUG: "true" 19 | ELYTRA_API_HOST: "0.0.0.0" 20 | ELYTRA_API_PORT: "8080" 21 | ELYTRA_UID: "988" 22 | ELYTRA_GID: "988" 23 | 24 | # System path overrides (inside container) 25 | ELYTRA_SYSTEM_ROOT_DIRECTORY: "/var/lib/elytra" 26 | ELYTRA_SYSTEM_DATA: "/var/lib/elytra/volumes" 27 | ELYTRA_SYSTEM_LOG_DIRECTORY: "/var/log/elytra" 28 | 29 | # Docker integration (required for managing containers) 30 | ELYTRA_DOCKER_TMPFS_SIZE: "100" 31 | # Disable rustic backups in container mode unless you have the 'rustic' binary available 32 | ELYTRA_SYSTEM_BACKUPS_RUSTIC_LOCAL_ENABLED: "false" 33 | 34 | volumes: 35 | - "/var/run/docker.sock:/var/run/docker.sock:ro" 36 | - "/var/lib/docker/containers/:/var/lib/docker/containers/:ro" 37 | - "./data:/var/lib/elytra" 38 | - "./logs:/var/log/elytra" 39 | 40 | networks: 41 | default: 42 | driver: bridge 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Elytra 2 | 3 | [![GitHub release](https://img.shields.io/github/v/release/pyrohost/elytra?style=flat-square)](https://github.com/pyrohost/elytra/releases) 4 | [![GitHub issues](https://img.shields.io/github/issues/pyrohost/elytra?style=flat-square)](https://github.com/pyrohost/elytra/issues) 5 | [![GitHub stars](https://img.shields.io/github/stars/pyrohost/elytra?style=flat-square)](https://github.com/pyrohost/elytra/stargazers) 6 | [![License](https://img.shields.io/github/license/pyrohost/elytra?style=flat-square)](LICENSE) 7 | 8 | **Next-generation daemon for the Pyrodactyl ecosystem** 9 | 10 | Elytra is an actively maintained fork of Pterodactyl Elytra by Pyro Inc., designed specifically for the Pyrodactyl panel. Born from the need for continued innovation and development, Elytra delivers enhanced features and reliable updates that keep your server infrastructure running smoothly. 11 | 12 | ## 📚 Documentation 13 | 14 | - [Pyrodactyl Panel Documentation](https://pyrodactyl.dev) 15 | - [Installation Guide](https://pyrodactyl.dev/docs/elytra/installation) 16 | - [Configuration Guide](https://pyrodactyl.dev/docs/elytra/configuration) 17 | 18 | ## 🛠️ Issues & Support 19 | 20 | Report bugs and request features on our [GitHub repository](https://github.com/pyrohost/elytra). 21 | 22 | ## 📄 License 23 | 24 | Elytra is licensed under the [GNU Affero General Public License v3.0](LICENSE). Original Pterodactyl Wings code retains its [MIT License](ORIGINAL_LICENSE). 25 | -------------------------------------------------------------------------------- /src/server/websockets.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/google/uuid" 8 | ) 9 | 10 | type WebsocketBag struct { 11 | mu sync.Mutex 12 | conns map[uuid.UUID]*context.CancelFunc 13 | } 14 | 15 | // Websockets returns the websocket bag which contains all the currently open websocket connections 16 | // for the server instance. 17 | func (s *Server) Websockets() *WebsocketBag { 18 | s.wsBagLocker.Lock() 19 | defer s.wsBagLocker.Unlock() 20 | 21 | if s.wsBag == nil { 22 | s.wsBag = &WebsocketBag{} 23 | } 24 | 25 | return s.wsBag 26 | } 27 | 28 | // Push adds a new websocket connection to the end of the stack. 29 | func (w *WebsocketBag) Push(u uuid.UUID, cancel *context.CancelFunc) { 30 | w.mu.Lock() 31 | defer w.mu.Unlock() 32 | 33 | if w.conns == nil { 34 | w.conns = make(map[uuid.UUID]*context.CancelFunc) 35 | } 36 | 37 | w.conns[u] = cancel 38 | } 39 | 40 | // Remove removes a connection from the stack. 41 | func (w *WebsocketBag) Remove(u uuid.UUID) { 42 | w.mu.Lock() 43 | delete(w.conns, u) 44 | w.mu.Unlock() 45 | } 46 | 47 | // CancelAll cancels all the stored cancel functions which has the effect of 48 | // disconnecting every listening websocket for the server. 49 | func (w *WebsocketBag) CancelAll() { 50 | w.mu.Lock() 51 | defer w.mu.Unlock() 52 | 53 | if w.conns != nil { 54 | for _, cancel := range w.conns { 55 | (*cancel)() 56 | } 57 | } 58 | 59 | // Reset the connections. 60 | w.conns = make(map[uuid.UUID]*context.CancelFunc) 61 | } 62 | -------------------------------------------------------------------------------- /src/internal/progress/progress_test.go: -------------------------------------------------------------------------------- 1 | package progress_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/franela/goblin" 8 | 9 | "github.com/pyrohost/elytra/src/internal/progress" 10 | ) 11 | 12 | func TestProgress(t *testing.T) { 13 | g := goblin.Goblin(t) 14 | 15 | g.Describe("Progress", func() { 16 | g.It("properly initializes", func() { 17 | total := uint64(1000) 18 | p := progress.NewProgress(total) 19 | g.Assert(p).IsNotNil() 20 | g.Assert(p.Total()).Equal(total) 21 | g.Assert(p.Written()).Equal(uint64(0)) 22 | }) 23 | 24 | g.It("increments written when Write is called", func() { 25 | v := []byte("hello") 26 | p := progress.NewProgress(1000) 27 | _, err := p.Write(v) 28 | g.Assert(err).IsNil() 29 | g.Assert(p.Written()).Equal(uint64(len(v))) 30 | }) 31 | 32 | g.It("renders a progress bar", func() { 33 | v := bytes.Repeat([]byte{' '}, 100) 34 | p := progress.NewProgress(1000) 35 | _, err := p.Write(v) 36 | g.Assert(err).IsNil() 37 | g.Assert(p.Written()).Equal(uint64(len(v))) 38 | g.Assert(p.Progress(25)).Equal("[== ] 100 B / 1000 B") 39 | }) 40 | 41 | g.It("renders a progress bar when written exceeds total", func() { 42 | v := bytes.Repeat([]byte{' '}, 1001) 43 | p := progress.NewProgress(1000) 44 | _, err := p.Write(v) 45 | g.Assert(err).IsNil() 46 | g.Assert(p.Written()).Equal(uint64(len(v))) 47 | g.Assert(p.Progress(25)).Equal("[=========================] 1001 B / 1000 B") 48 | }) 49 | }) 50 | } 51 | -------------------------------------------------------------------------------- /src/server/filesystem/compress_test.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | . "github.com/franela/goblin" 9 | ) 10 | 11 | // Given an archive named test.{ext}, with the following file structure: 12 | // 13 | // test/ 14 | // |──inside/ 15 | // |────finside.txt 16 | // |──outside.txt 17 | // 18 | // this test will ensure that it's being decompressed as expected 19 | func TestFilesystem_DecompressFile(t *testing.T) { 20 | g := Goblin(t) 21 | fs, rfs := NewFs() 22 | 23 | g.Describe("Decompress", func() { 24 | for _, ext := range []string{"zip", "rar", "tar", "tar.gz"} { 25 | g.It("can decompress a "+ext, func() { 26 | // copy the file to the new FS 27 | c, err := os.ReadFile("./testdata/test." + ext) 28 | g.Assert(err).IsNil() 29 | err = rfs.CreateServerFile("./test."+ext, c) 30 | g.Assert(err).IsNil() 31 | 32 | // decompress 33 | err = fs.DecompressFile(context.Background(), "/", "test."+ext) 34 | g.Assert(err).IsNil() 35 | 36 | // make sure everything is where it is supposed to be 37 | _, err = rfs.StatServerFile("test/outside.txt") 38 | g.Assert(err).IsNil() 39 | 40 | st, err := rfs.StatServerFile("test/inside") 41 | g.Assert(err).IsNil() 42 | g.Assert(st.IsDir()).IsTrue() 43 | 44 | _, err = rfs.StatServerFile("test/inside/finside.txt") 45 | g.Assert(err).IsNil() 46 | g.Assert(st.IsDir()).IsTrue() 47 | }) 48 | } 49 | 50 | g.AfterEach(func() { 51 | _ = fs.TruncateRootDirectory() 52 | }) 53 | }) 54 | } 55 | -------------------------------------------------------------------------------- /src/internal/ufs/go.LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009 The Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /src/system/rate_test.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/franela/goblin" 8 | ) 9 | 10 | func TestRate(t *testing.T) { 11 | g := Goblin(t) 12 | 13 | g.Describe("Rate", func() { 14 | g.It("properly rate limits a bucket", func() { 15 | r := NewRate(10, time.Millisecond*100) 16 | 17 | for i := 0; i < 100; i++ { 18 | ok := r.Try() 19 | if i < 10 && !ok { 20 | g.Failf("should not have allowed take on try %d", i) 21 | } else if i >= 10 && ok { 22 | g.Failf("should have blocked take on try %d", i) 23 | } 24 | } 25 | }) 26 | 27 | g.It("handles rate limiting in chunks", func() { 28 | var out []int 29 | r := NewRate(12, time.Millisecond*10) 30 | 31 | for i := 0; i < 100; i++ { 32 | if i%20 == 0 { 33 | // Give it time to recover. 34 | time.Sleep(time.Millisecond * 10) 35 | } 36 | if r.Try() { 37 | out = append(out, i) 38 | } 39 | } 40 | 41 | g.Assert(len(out)).Equal(60) 42 | g.Assert(out[0]).Equal(0) 43 | g.Assert(out[12]).Equal(20) 44 | g.Assert(out[len(out)-1]).Equal(91) 45 | }) 46 | 47 | g.It("resets back to zero when called", func() { 48 | r := NewRate(10, time.Second) 49 | for i := 0; i < 100; i++ { 50 | if i%10 == 0 { 51 | r.Reset() 52 | } 53 | g.Assert(r.Try()).IsTrue() 54 | } 55 | g.Assert(r.Try()).IsFalse("final attempt should not allow taking") 56 | }) 57 | }) 58 | } 59 | 60 | func BenchmarkRate_Try(b *testing.B) { 61 | r := NewRate(10, time.Millisecond*100) 62 | 63 | b.ReportAllocs() 64 | for i := 0; i < b.N; i++ { 65 | r.Try() 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/internal/ufs/file_posix.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | // Code in this file was copied from `go/src/os/file_posix.go`. 4 | 5 | // Copyright 2009 The Go Authors. All rights reserved. 6 | // Use of this source code is governed by a BSD-style 7 | // license that can be found in the `go.LICENSE` file. 8 | 9 | //go:build unix || (js && wasm) || wasip1 || windows 10 | 11 | package ufs 12 | 13 | import ( 14 | "golang.org/x/sys/unix" 15 | ) 16 | 17 | // ignoringEINTR makes a function call and repeats it if it returns an 18 | // EINTR error. This appears to be required even though we install all 19 | // signal handlers with SA_RESTART: see https://go.dev/issue/22838, 20 | // https://go.dev/issue/38033, https://go.dev/issue/38836, 21 | // https://go.dev/issue/40846. Also, https://go.dev/issue/20400 and 22 | // https://go.dev/issue/36644 are issues in which a signal handler is 23 | // installed without setting SA_RESTART. None of these are the common case, 24 | // but there are enough of them that it seems that we can't avoid 25 | // an EINTR loop. 26 | func ignoringEINTR(fn func() error) error { 27 | for { 28 | err := fn() 29 | if err != unix.EINTR { 30 | return err 31 | } 32 | } 33 | } 34 | 35 | // syscallMode returns the syscall-specific mode bits from Go's portable mode bits. 36 | func syscallMode(i FileMode) (o FileMode) { 37 | o |= i.Perm() 38 | if i&ModeSetuid != 0 { 39 | o |= unix.S_ISUID 40 | } 41 | if i&ModeSetgid != 0 { 42 | o |= unix.S_ISGID 43 | } 44 | if i&ModeSticky != 0 { 45 | o |= unix.S_ISVTX 46 | } 47 | // No mapping for Go's ModeTemporary (plan9 only). 48 | return 49 | } 50 | -------------------------------------------------------------------------------- /src/server/transfer/archive.go: -------------------------------------------------------------------------------- 1 | package transfer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/pyrohost/elytra/src/internal/progress" 9 | "github.com/pyrohost/elytra/src/server/filesystem" 10 | ) 11 | 12 | // Archive returns an archive that can be used to stream the contents of the 13 | // contents of a server. 14 | func (t *Transfer) Archive() (*Archive, error) { 15 | if t.archive == nil { 16 | // Get the disk usage of the server (used to calculate the progress of the archive process) 17 | rawSize, err := t.Server.Filesystem().DiskUsage(true) 18 | if err != nil { 19 | return nil, fmt.Errorf("transfer: failed to get server disk usage: %w", err) 20 | } 21 | 22 | // Create a new archive instance and assign it to the transfer. 23 | t.archive = NewArchive(t, uint64(rawSize)) 24 | } 25 | 26 | return t.archive, nil 27 | } 28 | 29 | // Archive represents an archive used to transfer the contents of a server. 30 | type Archive struct { 31 | archive *filesystem.Archive 32 | } 33 | 34 | // NewArchive returns a new archive associated with the given transfer. 35 | func NewArchive(t *Transfer, size uint64) *Archive { 36 | return &Archive{ 37 | archive: &filesystem.Archive{ 38 | Filesystem: t.Server.Filesystem(), 39 | Progress: progress.NewProgress(size), 40 | }, 41 | } 42 | } 43 | 44 | // Stream returns a reader that can be used to stream the contents of the archive. 45 | func (a *Archive) Stream(ctx context.Context, w io.Writer) error { 46 | return a.archive.Stream(ctx, w) 47 | } 48 | 49 | // Progress returns the current progress of the archive. 50 | func (a *Archive) Progress() *progress.Progress { 51 | return a.archive.Progress 52 | } 53 | -------------------------------------------------------------------------------- /docker-compose.dev.yml: -------------------------------------------------------------------------------- 1 | services: 2 | elytra: 3 | build: 4 | context: . 5 | dockerfile: Dockerfile 6 | args: 7 | VERSION: dev 8 | RUSTIC_VERSION: v0.10.0 9 | RUSTIC_TARGET: x86_64-unknown-linux-musl 10 | image: elytra:dev 11 | restart: unless-stopped 12 | tty: true 13 | stdin_open: true 14 | ports: 15 | - "8080:8080" 16 | - "2022:2022" 17 | environment: 18 | # Required: Panel connection and token 19 | ELYTRA_PANEL_LOCATION: "https://panel.example.com" 20 | ELYTRA_TOKEN_ID: "local-token-id" 21 | ELYTRA_TOKEN: "local-token-secret" 22 | 23 | # Optional overrides / useful defaults 24 | TZ: "UTC" 25 | ELYTRA_DEBUG: "true" 26 | ELYTRA_API_HOST: "0.0.0.0" 27 | ELYTRA_API_PORT: "8080" 28 | ELYTRA_UID: "988" 29 | ELYTRA_GID: "988" 30 | 31 | # System path overrides (inside container) 32 | ELYTRA_SYSTEM_ROOT_DIRECTORY: "/var/lib/elytra" 33 | ELYTRA_SYSTEM_DATA: "/var/lib/elytra/volumes" 34 | ELYTRA_SYSTEM_LOG_DIRECTORY: "/var/log/elytra" 35 | 36 | # Docker integration (required for managing containers) 37 | ELYTRA_DOCKER_TMPFS_SIZE: "100" 38 | # Disable rustic backups in container mode unless you have the 'rustic' binary available 39 | ELYTRA_SYSTEM_BACKUPS_RUSTIC_LOCAL_ENABLED: "false" 40 | 41 | volumes: 42 | - "/var/run/docker.sock:/var/run/docker.sock:ro" 43 | - "/var/lib/docker/containers/:/var/lib/docker/containers/:ro" 44 | - "./data:/var/lib/elytra" 45 | - "./logs:/var/log/elytra" 46 | - "./config.dev.yml:/etc/elytra/config.yml:ro" 47 | 48 | networks: 49 | default: 50 | driver: bridge 51 | -------------------------------------------------------------------------------- /src/remote/errors.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | "emperror.dev/errors" 8 | ) 9 | 10 | type RequestErrors struct { 11 | Errors []RequestError `json:"errors"` 12 | } 13 | 14 | type RequestError struct { 15 | response *http.Response 16 | Code string `json:"code"` 17 | Status string `json:"status"` 18 | Detail string `json:"detail"` 19 | } 20 | 21 | // IsRequestError checks if the given error is of the RequestError type. 22 | func IsRequestError(err error) bool { 23 | var rerr *RequestError 24 | if err == nil { 25 | return false 26 | } 27 | return errors.As(err, &rerr) 28 | } 29 | 30 | // AsRequestError transforms the error into a RequestError if it is currently 31 | // one, checking the wrap status from the other error handlers. If the error 32 | // is not a RequestError nil is returned. 33 | func AsRequestError(err error) *RequestError { 34 | if err == nil { 35 | return nil 36 | } 37 | var rerr *RequestError 38 | if errors.As(err, &rerr) { 39 | return rerr 40 | } 41 | return nil 42 | } 43 | 44 | // Error returns the error response in a string form that can be more easily 45 | // consumed. 46 | func (re *RequestError) Error() string { 47 | c := 0 48 | if re.response != nil { 49 | c = re.response.StatusCode 50 | } 51 | 52 | return fmt.Sprintf("Error response from Panel: %s: %s (HTTP/%d)", re.Code, re.Detail, c) 53 | } 54 | 55 | // StatusCode returns the status code of the response. 56 | func (re *RequestError) StatusCode() int { 57 | return re.response.StatusCode 58 | } 59 | 60 | type SftpInvalidCredentialsError struct{} 61 | 62 | func (ice SftpInvalidCredentialsError) Error() string { 63 | return "the credentials provided were invalid" 64 | } 65 | -------------------------------------------------------------------------------- /src/server/installer/installer.go: -------------------------------------------------------------------------------- 1 | package installer 2 | 3 | import ( 4 | "context" 5 | 6 | "emperror.dev/errors" 7 | "github.com/asaskevich/govalidator" 8 | 9 | "github.com/pyrohost/elytra/src/remote" 10 | "github.com/pyrohost/elytra/src/server" 11 | ) 12 | 13 | type Installer struct { 14 | server *server.Server 15 | StartOnCompletion bool 16 | } 17 | 18 | type ServerDetails struct { 19 | UUID string `json:"uuid"` 20 | StartOnCompletion bool `json:"start_on_completion"` 21 | } 22 | 23 | // New validates the received data to ensure that all the required fields 24 | // have been passed along in the request. This should be manually run before 25 | // calling Execute(). 26 | func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*Installer, error) { 27 | if !govalidator.IsUUIDv4(details.UUID) { 28 | return nil, NewValidationError("uuid provided was not in a valid format") 29 | } 30 | 31 | c, err := manager.Client().GetServerConfiguration(ctx, details.UUID) 32 | if err != nil { 33 | if !remote.IsRequestError(err) { 34 | return nil, errors.WithStackIf(err) 35 | } 36 | return nil, errors.WrapIf(err, "installer: could not get server configuration from remote API") 37 | } 38 | 39 | // Create a new server instance using the configuration we wrote to the disk 40 | // so that everything gets instantiated correctly on the struct. 41 | s, err := manager.InitServer(c) 42 | if err != nil { 43 | return nil, errors.WrapIf(err, "installer: could not init server instance") 44 | } 45 | i := Installer{server: s, StartOnCompletion: details.StartOnCompletion} 46 | return &i, nil 47 | } 48 | 49 | // Server returns the server instance. 50 | func (i *Installer) Server() *server.Server { 51 | return i.server 52 | } 53 | -------------------------------------------------------------------------------- /src/internal/ufs/mkdir_unix.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | // Code in this file was derived from `go/src/os/path.go`. 4 | 5 | // Copyright 2009 The Go Authors. All rights reserved. 6 | // Use of this source code is governed by a BSD-style 7 | // license that can be found in the `go.LICENSE` file. 8 | 9 | //go:build unix 10 | 11 | package ufs 12 | 13 | // mkdirAll is a recursive Mkdir implementation that properly handles symlinks. 14 | func (fs *UnixFS) mkdirAll(name string, mode FileMode) error { 15 | // Fast path: if we can tell whether path is a directory or file, stop with success or error. 16 | dir, err := fs.Lstat(name) 17 | if err == nil { 18 | if dir.Mode()&ModeSymlink != 0 { 19 | // If the final path is a symlink, resolve its target and use that 20 | // to check instead. 21 | dir, err = fs.Stat(name) 22 | if err != nil { 23 | return err 24 | } 25 | } 26 | if dir.IsDir() { 27 | return nil 28 | } 29 | return &PathError{Op: "mkdir", Path: name, Err: ErrNotDirectory} 30 | } 31 | 32 | // Slow path: make sure parent exists and then call Mkdir for path. 33 | i := len(name) 34 | for i > 0 && name[i-1] == '/' { // Skip trailing path separator. 35 | i-- 36 | } 37 | 38 | j := i 39 | for j > 0 && name[j-1] != '/' { // Scan backward over element. 40 | j-- 41 | } 42 | 43 | if j > 1 { 44 | // Create parent. 45 | err = fs.mkdirAll(name[:j-1], mode) 46 | if err != nil { 47 | return err 48 | } 49 | } 50 | 51 | // Parent now exists; invoke Mkdir and use its result. 52 | err = fs.Mkdir(name, mode) 53 | if err != nil { 54 | // Handle arguments like "foo/." by 55 | // double-checking that directory doesn't exist. 56 | dir, err1 := fs.Lstat(name) 57 | if err1 == nil && dir.IsDir() { 58 | return nil 59 | } 60 | return err 61 | } 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /src/internal/database/database.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "path/filepath" 5 | "time" 6 | 7 | "emperror.dev/errors" 8 | "github.com/glebarez/sqlite" 9 | "gorm.io/gorm" 10 | "gorm.io/gorm/logger" 11 | 12 | "github.com/pyrohost/elytra/src/config" 13 | "github.com/pyrohost/elytra/src/internal/models" 14 | "github.com/pyrohost/elytra/src/system" 15 | ) 16 | 17 | var ( 18 | o system.AtomicBool 19 | db *gorm.DB 20 | ) 21 | 22 | // Initialize configures the local SQLite database for Elytra and ensures that the models have 23 | // been fully migrated. 24 | func Initialize() error { 25 | if !o.SwapIf(true) { 26 | panic("database: attempt to initialize more than once during application lifecycle") 27 | } 28 | p := filepath.Join(config.Get().System.RootDirectory, "elytra.db") 29 | instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{ 30 | Logger: logger.Default.LogMode(logger.Silent), 31 | }) 32 | if err != nil { 33 | return errors.Wrap(err, "database: could not open database file") 34 | } 35 | db = instance 36 | if sql, err := db.DB(); err != nil { 37 | return errors.WithStack(err) 38 | } else { 39 | sql.SetMaxOpenConns(1) 40 | sql.SetConnMaxLifetime(time.Hour) 41 | } 42 | if tx := db.Exec("PRAGMA synchronous = OFF"); tx.Error != nil { 43 | return errors.WithStack(tx.Error) 44 | } 45 | if tx := db.Exec("PRAGMA journal_mode = MEMORY"); tx.Error != nil { 46 | return errors.WithStack(tx.Error) 47 | } 48 | if err := db.AutoMigrate(&models.Activity{}); err != nil { 49 | return errors.WithStack(err) 50 | } 51 | return nil 52 | } 53 | 54 | // Instance returns the gorm database instance that was configured when the application was 55 | // booted. 56 | func Instance() *gorm.DB { 57 | if db == nil { 58 | panic("database: attempt to access instance before initialized") 59 | } 60 | return db 61 | } 62 | -------------------------------------------------------------------------------- /src/server/filesystem/errors_test.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "io" 5 | "testing" 6 | 7 | "emperror.dev/errors" 8 | . "github.com/franela/goblin" 9 | ) 10 | 11 | type stackTracer interface { 12 | StackTrace() errors.StackTrace 13 | } 14 | 15 | func TestFilesystem_PathResolutionError(t *testing.T) { 16 | g := Goblin(t) 17 | 18 | g.Describe("NewFilesystemError", func() { 19 | g.It("includes a stack trace for the error", func() { 20 | err := newFilesystemError(ErrCodeUnknownError, nil) 21 | 22 | _, ok := err.(stackTracer) 23 | g.Assert(ok).IsTrue() 24 | }) 25 | 26 | g.It("properly wraps the underlying error cause", func() { 27 | underlying := io.EOF 28 | err := newFilesystemError(ErrCodeUnknownError, underlying) 29 | 30 | _, ok := err.(stackTracer) 31 | g.Assert(ok).IsTrue() 32 | 33 | _, ok = err.(*Error) 34 | g.Assert(ok).IsFalse() 35 | 36 | fserr, ok := errors.Unwrap(err).(*Error) 37 | g.Assert(ok).IsTrue() 38 | g.Assert(fserr.Unwrap()).IsNotNil() 39 | g.Assert(fserr.Unwrap()).Equal(underlying) 40 | }) 41 | }) 42 | 43 | g.Describe("NewBadPathResolutionError", func() { 44 | g.It("is can detect itself as an error correctly", func() { 45 | err := NewBadPathResolution("foo", "bar") 46 | g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue() 47 | g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: bar") 48 | g.Assert(IsErrorCode(&Error{code: ErrCodeIsDirectory}, ErrCodePathResolution)).IsFalse() 49 | }) 50 | 51 | g.It("returns if no destination path is provided", func() { 52 | err := NewBadPathResolution("foo", "") 53 | g.Assert(err).IsNotNil() 54 | g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: ") 55 | }) 56 | }) 57 | } 58 | -------------------------------------------------------------------------------- /src/server/filesystem/path.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "path/filepath" 5 | "strings" 6 | 7 | "emperror.dev/errors" 8 | ) 9 | 10 | // Checks if the given file or path is in the server's file denylist. If so, an Error 11 | // is returned, otherwise nil is returned. 12 | func (fs *Filesystem) IsIgnored(paths ...string) error { 13 | for _, p := range paths { 14 | //sp, err := fs.SafePath(p) 15 | //if err != nil { 16 | // return err 17 | //} 18 | // TODO: update logic to use unixFS 19 | if fs.denylist.MatchesPath(p) { 20 | return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: p}) 21 | } 22 | } 23 | return nil 24 | } 25 | 26 | // Generate a path to the file by cleaning it up and appending the root server path to it. This 27 | // DOES NOT guarantee that the file resolves within the server data directory. You'll want to use 28 | // the fs.unsafeIsInDataDirectory(p) function to confirm. 29 | func (fs *Filesystem) unsafeFilePath(p string) string { 30 | // Calling filepath.Clean on the joined directory will resolve it to the absolute path, 31 | // removing any ../ type of resolution arguments, and leaving us with a direct path link. 32 | // 33 | // This will also trim the existing root path off the beginning of the path passed to 34 | // the function since that can get a bit messy. 35 | return filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path()))) 36 | } 37 | 38 | // Check that that path string starts with the server data directory path. This function DOES NOT 39 | // validate that the rest of the path does not end up resolving out of this directory, or that the 40 | // targeted file or folder is not a symlink doing the same thing. 41 | func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool { 42 | return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/") 43 | } 44 | -------------------------------------------------------------------------------- /src/internal/ufs/stat_unix.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | // Code in this file was copied from `go/src/os/stat_linux.go` 4 | // and `go/src/os/types_unix.go`. 5 | 6 | // Copyright 2009 The Go Authors. All rights reserved. 7 | // Use of this source code is governed by a BSD-style 8 | // license that can be found in the `go.LICENSE` file. 9 | 10 | //go:build unix 11 | 12 | package ufs 13 | 14 | import ( 15 | "time" 16 | 17 | "golang.org/x/sys/unix" 18 | ) 19 | 20 | type fileStat struct { 21 | name string 22 | size int64 23 | mode FileMode 24 | modTime time.Time 25 | sys unix.Stat_t 26 | } 27 | 28 | var _ FileInfo = (*fileStat)(nil) 29 | 30 | func (fs *fileStat) Size() int64 { return fs.size } 31 | func (fs *fileStat) Mode() FileMode { return fs.mode } 32 | func (fs *fileStat) ModTime() time.Time { return fs.modTime } 33 | func (fs *fileStat) Sys() any { return &fs.sys } 34 | func (fs *fileStat) Name() string { return fs.name } 35 | func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() } 36 | 37 | func fillFileStatFromSys(fs *fileStat, name string) { 38 | fs.name = basename(name) 39 | fs.size = fs.sys.Size 40 | fs.modTime = time.Unix(fs.sys.Mtim.Unix()) 41 | fs.mode = FileMode(fs.sys.Mode & 0o777) 42 | switch fs.sys.Mode & unix.S_IFMT { 43 | case unix.S_IFBLK: 44 | fs.mode |= ModeDevice 45 | case unix.S_IFCHR: 46 | fs.mode |= ModeDevice | ModeCharDevice 47 | case unix.S_IFDIR: 48 | fs.mode |= ModeDir 49 | case unix.S_IFIFO: 50 | fs.mode |= ModeNamedPipe 51 | case unix.S_IFLNK: 52 | fs.mode |= ModeSymlink 53 | case unix.S_IFREG: 54 | // nothing to do 55 | case unix.S_IFSOCK: 56 | fs.mode |= ModeSocket 57 | } 58 | if fs.sys.Mode&unix.S_ISGID != 0 { 59 | fs.mode |= ModeSetgid 60 | } 61 | if fs.sys.Mode&unix.S_ISUID != 0 { 62 | fs.mode |= ModeSetuid 63 | } 64 | if fs.sys.Mode&unix.S_ISVTX != 0 { 65 | fs.mode |= ModeSticky 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/server/events.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/pyrohost/elytra/src/events" 5 | "github.com/pyrohost/elytra/src/system" 6 | ) 7 | 8 | // Defines all the possible output events for a server. 9 | const ( 10 | DaemonMessageEvent = "daemon message" 11 | InstallOutputEvent = "install output" 12 | InstallStartedEvent = "install started" 13 | InstallCompletedEvent = "install completed" 14 | ConsoleOutputEvent = "console output" 15 | StatusEvent = "status" 16 | StatsEvent = "stats" 17 | BackupRestoreCompletedEvent = "backup restore completed" 18 | BackupCompletedEvent = "backup completed" 19 | TransferLogsEvent = "transfer logs" 20 | TransferStatusEvent = "transfer status" 21 | DeletedEvent = "deleted" 22 | ) 23 | 24 | // Events returns the server's emitter instance. 25 | func (s *Server) Events() *events.Bus { 26 | s.emitterLock.Lock() 27 | defer s.emitterLock.Unlock() 28 | 29 | if s.emitter == nil { 30 | s.emitter = events.NewBus() 31 | } 32 | 33 | return s.emitter 34 | } 35 | 36 | // Sink returns the instantiated and named sink for a server. If the sink has 37 | // not been configured yet this function will cause a panic condition. 38 | func (s *Server) Sink(name system.SinkName) *system.SinkPool { 39 | sink, ok := s.sinks[name] 40 | if !ok { 41 | s.Log().Fatalf("attempt to access nil sink: %s", name) 42 | } 43 | return sink 44 | } 45 | 46 | // DestroyAllSinks iterates over all of the sinks configured for the server and 47 | // destroys their instances. Note that this will cause a panic if you attempt 48 | // to call Server.Sink() again after. This function is only used when a server 49 | // is being deleted from the system. 50 | func (s *Server) DestroyAllSinks() { 51 | s.Log().Info("destroying all registered sinks for server instance") 52 | for _, sink := range s.sinks { 53 | sink.Destroy() 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/sftp/event.go: -------------------------------------------------------------------------------- 1 | package sftp 2 | 3 | import ( 4 | "emperror.dev/errors" 5 | "github.com/apex/log" 6 | 7 | "github.com/pyrohost/elytra/src/internal/database" 8 | "github.com/pyrohost/elytra/src/internal/models" 9 | ) 10 | 11 | type eventHandler struct { 12 | ip string 13 | user string 14 | server string 15 | } 16 | 17 | type FileAction struct { 18 | // Entity is the targeted file or directory (depending on the event) that the action 19 | // is being performed _against_, such as "/foo/test.txt". This will always be the full 20 | // path to the element. 21 | Entity string 22 | // Target is an optional (often blank) field that only has a value in it when the event 23 | // is specifically modifying the entity, such as a rename or move event. In that case 24 | // the Target field will be the final value, such as "/bar/new.txt" 25 | Target string 26 | } 27 | 28 | // Log parses a SFTP specific file activity event and then passes it off to be stored 29 | // in the normal activity database. 30 | func (eh *eventHandler) Log(e models.Event, fa FileAction) error { 31 | metadata := map[string]interface{}{ 32 | "files": []string{fa.Entity}, 33 | } 34 | if fa.Target != "" { 35 | metadata["files"] = []map[string]string{ 36 | {"from": fa.Entity, "to": fa.Target}, 37 | } 38 | } 39 | 40 | a := models.Activity{ 41 | Server: eh.server, 42 | Event: e, 43 | Metadata: metadata, 44 | IP: eh.ip, 45 | } 46 | 47 | if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil { 48 | return errors.WithStack(tx.Error) 49 | } 50 | return nil 51 | } 52 | 53 | // MustLog is a wrapper around log that will trigger a fatal error and exit the application 54 | // if an error is encountered during the logging of the event. 55 | func (eh *eventHandler) MustLog(e models.Event, fa FileAction) { 56 | if err := eh.Log(e, fa); err != nil { 57 | log.WithField("error", errors.WithStack(err)).WithField("event", e).Error("sftp: failed to log event") 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/internal/ufs/path_unix.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | // Code in this file was copied from `go/src/os/path.go` 4 | // and `go/src/os/path_unix.go`. 5 | 6 | // Copyright 2009 The Go Authors. All rights reserved. 7 | // Use of this source code is governed by a BSD-style 8 | // license that can be found in the `go.LICENSE` file. 9 | 10 | //go:build unix 11 | 12 | package ufs 13 | 14 | import ( 15 | "os" 16 | ) 17 | 18 | // basename removes trailing slashes and the leading directory name from path name. 19 | func basename(name string) string { 20 | i := len(name) - 1 21 | // Remove trailing slashes 22 | for ; i > 0 && name[i] == '/'; i-- { 23 | name = name[:i] 24 | } 25 | // Remove leading directory name 26 | for i--; i >= 0; i-- { 27 | if name[i] == '/' { 28 | name = name[i+1:] 29 | break 30 | } 31 | } 32 | return name 33 | } 34 | 35 | // endsWithDot reports whether the final component of path is ".". 36 | func endsWithDot(path string) bool { 37 | if path == "." { 38 | return true 39 | } 40 | if len(path) >= 2 && path[len(path)-1] == '.' && os.IsPathSeparator(path[len(path)-2]) { 41 | return true 42 | } 43 | return false 44 | } 45 | 46 | // splitPath returns the base name and parent directory. 47 | func splitPath(path string) (string, string) { 48 | // if no better parent is found, the path is relative from "here" 49 | dirname := "." 50 | 51 | // Remove all but one leading slash. 52 | for len(path) > 1 && path[0] == '/' && path[1] == '/' { 53 | path = path[1:] 54 | } 55 | 56 | i := len(path) - 1 57 | 58 | // Remove trailing slashes. 59 | for ; i > 0 && path[i] == '/'; i-- { 60 | path = path[:i] 61 | } 62 | 63 | // if no slashes in path, base is path 64 | basename := path 65 | 66 | // Remove leading directory path 67 | for i--; i >= 0; i-- { 68 | if path[i] == '/' { 69 | if i == 0 { 70 | dirname = path[:1] 71 | } else { 72 | dirname = path[:i] 73 | } 74 | basename = path[i+1:] 75 | break 76 | } 77 | } 78 | 79 | return dirname, basename 80 | } 81 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI/CD 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | test: 14 | name: Test 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version-file: go.mod 22 | cache: true 23 | check-latest: true 24 | 25 | - name: Download dependencies 26 | run: go mod download 27 | 28 | - name: Run tests 29 | run: go test -race ./... 30 | 31 | docker: 32 | name: Docker Build & Push 33 | runs-on: ubuntu-latest 34 | needs: test 35 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 36 | permissions: 37 | contents: read 38 | packages: write 39 | id-token: write 40 | steps: 41 | - uses: actions/checkout@v4 42 | 43 | - uses: docker/setup-qemu-action@v3 44 | 45 | - uses: docker/setup-buildx-action@v3 46 | 47 | - uses: docker/login-action@v3 48 | with: 49 | registry: ghcr.io 50 | username: ${{ github.actor }} 51 | password: ${{ secrets.GITHUB_TOKEN }} 52 | 53 | - name: Extract metadata 54 | id: meta 55 | uses: docker/metadata-action@v5 56 | with: 57 | images: ghcr.io/pyrohost/elytra 58 | tags: | 59 | type=sha,format=short 60 | type=raw,value=dev,enable={{is_default_branch}} 61 | 62 | - name: Build and push 63 | uses: docker/build-push-action@v6 64 | with: 65 | context: . 66 | platforms: linux/amd64,linux/arm64 67 | push: true 68 | tags: ${{ steps.meta.outputs.tags }} 69 | labels: ${{ steps.meta.outputs.labels }} 70 | build-args: VERSION=dev-${{ github.sha }} 71 | cache-from: type=gha 72 | cache-to: type=gha,mode=max 73 | provenance: true 74 | sbom: true 75 | -------------------------------------------------------------------------------- /src/events/events.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | 7 | "emperror.dev/errors" 8 | 9 | "github.com/pyrohost/elytra/src/system" 10 | ) 11 | 12 | // Event represents an Event sent over a Bus. 13 | type Event struct { 14 | Topic string 15 | Data interface{} 16 | } 17 | 18 | // Bus represents an Event Bus. 19 | type Bus struct { 20 | *system.SinkPool 21 | } 22 | 23 | // NewBus returns a new empty Bus. This is simply a nicer wrapper around the 24 | // system.SinkPool implementation that allows for more simplistic usage within 25 | // the codebase. 26 | // 27 | // All of the events emitted out of this bus are byte slices that can be decoded 28 | // back into an events.Event interface. 29 | func NewBus() *Bus { 30 | return &Bus{ 31 | system.NewSinkPool(), 32 | } 33 | } 34 | 35 | // Publish publishes a message to the Bus. 36 | func (b *Bus) Publish(topic string, data interface{}) { 37 | // Some of our actions for the socket support passing a more specific namespace, 38 | // such as "backup completed:1234" to indicate which specific backup was completed. 39 | // 40 | // In these cases, we still need to send the event using the standard listener 41 | // name of "backup completed". 42 | if strings.Contains(topic, ":") { 43 | parts := strings.SplitN(topic, ":", 2) 44 | if len(parts) == 2 { 45 | topic = parts[0] 46 | } 47 | } 48 | 49 | enc, err := json.Marshal(Event{Topic: topic, Data: data}) 50 | if err != nil { 51 | panic(errors.WithStack(err)) 52 | } 53 | b.Push(enc) 54 | } 55 | 56 | // MustDecode decodes the event byte slice back into an events.Event struct or 57 | // panics if an error is encountered during this process. 58 | func MustDecode(data []byte) (e Event) { 59 | if err := DecodeTo(data, &e); err != nil { 60 | panic(err) 61 | } 62 | return 63 | } 64 | 65 | // DecodeTo decodes a byte slice of event data into the given interface. 66 | func DecodeTo(data []byte, v interface{}) error { 67 | if err := json.Unmarshal(data, &v); err != nil { 68 | return errors.Wrap(err, "events: failed to decode byte slice") 69 | } 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /src/system/locker.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "emperror.dev/errors" 8 | ) 9 | 10 | var ErrLockerLocked = errors.Sentinel("locker: cannot acquire lock, already locked") 11 | 12 | type Locker struct { 13 | mu sync.RWMutex 14 | ch chan bool 15 | } 16 | 17 | // NewLocker returns a new Locker instance. 18 | func NewLocker() *Locker { 19 | return &Locker{ 20 | ch: make(chan bool, 1), 21 | } 22 | } 23 | 24 | // IsLocked returns the current state of the locker channel. If there is 25 | // currently a value in the channel, it is assumed to be locked. 26 | func (l *Locker) IsLocked() bool { 27 | l.mu.RLock() 28 | defer l.mu.RUnlock() 29 | return len(l.ch) == 1 30 | } 31 | 32 | // Acquire will acquire the power lock if it is not currently locked. If it is 33 | // already locked, acquire will fail to acquire the lock, and will return false. 34 | func (l *Locker) Acquire() error { 35 | l.mu.Lock() 36 | defer l.mu.Unlock() 37 | select { 38 | case l.ch <- true: 39 | default: 40 | return ErrLockerLocked 41 | } 42 | return nil 43 | } 44 | 45 | // TryAcquire will attempt to acquire a power-lock until the context provided 46 | // is canceled. 47 | func (l *Locker) TryAcquire(ctx context.Context) error { 48 | select { 49 | case l.ch <- true: 50 | return nil 51 | case <-ctx.Done(): 52 | if err := ctx.Err(); err != nil { 53 | if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { 54 | return ErrLockerLocked 55 | } 56 | } 57 | return nil 58 | } 59 | } 60 | 61 | // Release will drain the locker channel so that we can properly re-acquire it 62 | // at a later time. If the channel is not currently locked this function is a 63 | // no-op and will immediately return. 64 | func (l *Locker) Release() { 65 | l.mu.Lock() 66 | select { 67 | case <-l.ch: 68 | default: 69 | } 70 | l.mu.Unlock() 71 | } 72 | 73 | // Destroy cleans up the power locker by closing the channel. 74 | func (l *Locker) Destroy() { 75 | l.mu.Lock() 76 | if l.ch != nil { 77 | select { 78 | case <-l.ch: 79 | default: 80 | } 81 | close(l.ch) 82 | } 83 | l.mu.Unlock() 84 | } 85 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Elytra"; 3 | 4 | inputs = { 5 | flake-parts.url = "github:hercules-ci/flake-parts"; 6 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 7 | 8 | treefmt-nix = { 9 | url = "github:numtide/treefmt-nix"; 10 | inputs.nixpkgs.follows = "nixpkgs"; 11 | }; 12 | }; 13 | 14 | outputs = {...} @ inputs: 15 | inputs.flake-parts.lib.mkFlake {inherit inputs;} { 16 | systems = inputs.nixpkgs.lib.systems.flakeExposed; 17 | 18 | imports = [ 19 | inputs.treefmt-nix.flakeModule 20 | inputs.flake-parts.flakeModules.easyOverlay 21 | ]; 22 | 23 | perSystem = { 24 | system, 25 | config, 26 | ... 27 | }: let 28 | pkgs = import inputs.nixpkgs {inherit system;}; 29 | in { 30 | devShells.default = pkgs.mkShell { 31 | buildInputs = with pkgs; [ 32 | go 33 | golangci-lint 34 | gotools 35 | config.treefmt.build.wrapper 36 | ]; 37 | }; 38 | 39 | packages.elytra = pkgs.buildGoModule rec { 40 | pname = "elytra"; 41 | version = inputs.self.rev or "dirty"; 42 | src = inputs.self; 43 | vendorHash = "sha256-scQQP9hRezE0pB6zh36IEqETktWwE8PH4gBCel8L3hQ="; 44 | ldflags = [ 45 | "-s" 46 | "-w" 47 | "-X" 48 | "github.com/pyrohost/elytra/system.Version=${version}" 49 | ]; 50 | }; 51 | 52 | treefmt = { 53 | projectRootFile = "flake.nix"; 54 | 55 | programs = { 56 | alejandra.enable = true; 57 | deadnix.enable = true; 58 | gofmt.enable = true; 59 | shellcheck.enable = true; 60 | shfmt = { 61 | enable = true; 62 | indent_size = 0; # 0 causes shfmt to use tabs 63 | }; 64 | yamlfmt = { 65 | enable = true; 66 | settings = { 67 | formatter.retain_line_breaks = true; 68 | }; 69 | }; 70 | }; 71 | }; 72 | }; 73 | }; 74 | } 75 | -------------------------------------------------------------------------------- /src/server/resources.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | 7 | "github.com/pyrohost/elytra/src/environment" 8 | "github.com/pyrohost/elytra/src/system" 9 | ) 10 | 11 | // ResourceUsage defines the current resource usage for a given server instance. If a server is offline you 12 | // should obviously expect memory and CPU usage to be 0. However, disk will always be returned 13 | // since that is not dependent on the server being running to collect that data. 14 | type ResourceUsage struct { 15 | mu sync.RWMutex 16 | 17 | // Embed the current environment stats into this server specific resource usage struct. 18 | environment.Stats 19 | 20 | // The current server status. 21 | State *system.AtomicString `json:"state"` 22 | 23 | // The current disk space being used by the server. This value is not guaranteed to be accurate 24 | // at all times. It is "manually" set whenever server.Proc() is called. This is kind of just a 25 | // hacky solution for now to avoid passing events all over the place. 26 | Disk int64 `json:"disk_bytes"` 27 | } 28 | 29 | // Proc returns the current resource usage stats for the server instance. This returns 30 | // a copy of the tracked resources, so making any changes to the response will not 31 | // have the desired outcome for you most likely. 32 | func (s *Server) Proc() ResourceUsage { 33 | s.resources.mu.Lock() 34 | defer s.resources.mu.Unlock() 35 | // Store the updated disk usage when requesting process usage. 36 | atomic.StoreInt64(&s.resources.Disk, s.Filesystem().CachedUsage()) 37 | //goland:noinspection GoVetCopyLock 38 | return s.resources 39 | } 40 | 41 | // UpdateStats updates the current stats for the server's resource usage. 42 | func (ru *ResourceUsage) UpdateStats(stats environment.Stats) { 43 | ru.mu.Lock() 44 | ru.Stats = stats 45 | ru.mu.Unlock() 46 | } 47 | 48 | // Reset resets the usages values to zero, used when a server is stopped to ensure we don't hold 49 | // onto any values incorrectly. 50 | func (ru *ResourceUsage) Reset() { 51 | ru.mu.Lock() 52 | defer ru.mu.Unlock() 53 | 54 | ru.Memory = 0 55 | ru.CpuAbsolute = 0 56 | ru.Uptime = 0 57 | ru.Network.TxBytes = 0 58 | ru.Network.RxBytes = 0 59 | } 60 | -------------------------------------------------------------------------------- /src/server/filesystem/stat.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/gabriel-vasile/mimetype" 10 | 11 | "github.com/pyrohost/elytra/src/internal/ufs" 12 | ) 13 | 14 | type Stat struct { 15 | ufs.FileInfo 16 | Mimetype string 17 | } 18 | 19 | func (s *Stat) MarshalJSON() ([]byte, error) { 20 | return json.Marshal(struct { 21 | Name string `json:"name"` 22 | Created string `json:"created"` 23 | Modified string `json:"modified"` 24 | Mode string `json:"mode"` 25 | ModeBits string `json:"mode_bits"` 26 | Size int64 `json:"size"` 27 | Directory bool `json:"directory"` 28 | File bool `json:"file"` 29 | Symlink bool `json:"symlink"` 30 | Mime string `json:"mime"` 31 | }{ 32 | Name: s.Name(), 33 | Created: s.CTime().Format(time.RFC3339), 34 | Modified: s.ModTime().Format(time.RFC3339), 35 | Mode: s.Mode().String(), 36 | // Using `&ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else. 37 | ModeBits: strconv.FormatUint(uint64(s.Mode()&ufs.ModePerm), 8), 38 | Size: s.Size(), 39 | Directory: s.IsDir(), 40 | File: !s.IsDir(), 41 | Symlink: s.Mode().Type()&ufs.ModeSymlink != 0, 42 | Mime: s.Mimetype, 43 | }) 44 | } 45 | 46 | func statFromFile(f ufs.File) (Stat, error) { 47 | s, err := f.Stat() 48 | if err != nil { 49 | return Stat{}, err 50 | } 51 | var m *mimetype.MIME 52 | if !s.IsDir() { 53 | m, err = mimetype.DetectReader(f) 54 | if err != nil { 55 | return Stat{}, err 56 | } 57 | if _, err := f.Seek(0, io.SeekStart); err != nil { 58 | return Stat{}, err 59 | } 60 | } 61 | st := Stat{ 62 | FileInfo: s, 63 | Mimetype: "inode/directory", 64 | } 65 | if m != nil { 66 | st.Mimetype = m.String() 67 | } 68 | return st, nil 69 | } 70 | 71 | // Stat stats a file or folder and returns the base stat object from go along 72 | // with the MIME data that can be used for editing files. 73 | func (fs *Filesystem) Stat(p string) (Stat, error) { 74 | f, err := fs.unixFS.Open(p) 75 | if err != nil { 76 | return Stat{}, err 77 | } 78 | defer f.Close() 79 | st, err := statFromFile(f) 80 | if err != nil { 81 | return Stat{}, err 82 | } 83 | return st, nil 84 | } 85 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-parts": { 4 | "inputs": { 5 | "nixpkgs-lib": "nixpkgs-lib" 6 | }, 7 | "locked": { 8 | "lastModified": 1760948891, 9 | "narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=", 10 | "owner": "hercules-ci", 11 | "repo": "flake-parts", 12 | "rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "hercules-ci", 17 | "repo": "flake-parts", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1761672384, 24 | "narHash": "sha256-o9KF3DJL7g7iYMZq9SWgfS1BFlNbsm6xplRjVlOCkXI=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "08dacfca559e1d7da38f3cf05f1f45ee9bfd213c", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "nixpkgs-lib": { 38 | "locked": { 39 | "lastModified": 1754788789, 40 | "narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=", 41 | "owner": "nix-community", 42 | "repo": "nixpkgs.lib", 43 | "rev": "a73b9c743612e4244d865a2fdee11865283c04e6", 44 | "type": "github" 45 | }, 46 | "original": { 47 | "owner": "nix-community", 48 | "repo": "nixpkgs.lib", 49 | "type": "github" 50 | } 51 | }, 52 | "root": { 53 | "inputs": { 54 | "flake-parts": "flake-parts", 55 | "nixpkgs": "nixpkgs", 56 | "treefmt-nix": "treefmt-nix" 57 | } 58 | }, 59 | "treefmt-nix": { 60 | "inputs": { 61 | "nixpkgs": [ 62 | "nixpkgs" 63 | ] 64 | }, 65 | "locked": { 66 | "lastModified": 1761311587, 67 | "narHash": "sha256-Msq86cR5SjozQGCnC6H8C+0cD4rnx91BPltZ9KK613Y=", 68 | "owner": "numtide", 69 | "repo": "treefmt-nix", 70 | "rev": "2eddae033e4e74bf581c2d1dfa101f9033dbd2dc", 71 | "type": "github" 72 | }, 73 | "original": { 74 | "owner": "numtide", 75 | "repo": "treefmt-nix", 76 | "type": "github" 77 | } 78 | } 79 | }, 80 | "root": "root", 81 | "version": 7 82 | } 83 | -------------------------------------------------------------------------------- /src/environment/config.go: -------------------------------------------------------------------------------- 1 | package environment 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type Settings struct { 8 | Mounts []Mount 9 | Allocations Allocations 10 | Limits Limits 11 | Labels map[string]string 12 | } 13 | 14 | // Defines the actual configuration struct for the environment with all of the settings 15 | // defined within it. 16 | type Configuration struct { 17 | mu sync.RWMutex 18 | 19 | environmentVariables []string 20 | settings Settings 21 | } 22 | 23 | // Returns a new environment configuration with the given settings and environment variables 24 | // defined within it. 25 | func NewConfiguration(s Settings, envVars []string) *Configuration { 26 | return &Configuration{ 27 | environmentVariables: envVars, 28 | settings: s, 29 | } 30 | } 31 | 32 | // Updates the settings struct for this environment on the fly. This allows modified servers to 33 | // automatically push those changes to the environment. 34 | func (c *Configuration) SetSettings(s Settings) { 35 | c.mu.Lock() 36 | c.settings = s 37 | c.mu.Unlock() 38 | } 39 | 40 | // Updates the environment variables associated with this environment by replacing the entire 41 | // array of them with a new one. 42 | func (c *Configuration) SetEnvironmentVariables(ev []string) { 43 | c.mu.Lock() 44 | c.environmentVariables = ev 45 | c.mu.Unlock() 46 | } 47 | 48 | // Returns the limits assigned to this environment. 49 | func (c *Configuration) Limits() Limits { 50 | c.mu.RLock() 51 | defer c.mu.RUnlock() 52 | 53 | return c.settings.Limits 54 | } 55 | 56 | // Returns the allocations associated with this environment. 57 | func (c *Configuration) Allocations() Allocations { 58 | c.mu.RLock() 59 | defer c.mu.RUnlock() 60 | 61 | return c.settings.Allocations 62 | } 63 | 64 | // Returns all of the mounts associated with this environment. 65 | func (c *Configuration) Mounts() []Mount { 66 | c.mu.RLock() 67 | defer c.mu.RUnlock() 68 | 69 | return c.settings.Mounts 70 | } 71 | 72 | // Labels returns the container labels associated with this instance. 73 | func (c *Configuration) Labels() map[string]string { 74 | c.mu.RLock() 75 | defer c.mu.RUnlock() 76 | 77 | return c.settings.Labels 78 | } 79 | 80 | // Returns the environment variables associated with this instance. 81 | func (c *Configuration) EnvironmentVariables() []string { 82 | c.mu.RLock() 83 | defer c.mu.RUnlock() 84 | 85 | return c.environmentVariables 86 | } 87 | -------------------------------------------------------------------------------- /src/events/events_test.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/franela/goblin" 8 | ) 9 | 10 | func TestNewBus(t *testing.T) { 11 | g := Goblin(t) 12 | 13 | g.Describe("Events", func() { 14 | var bus *Bus 15 | g.BeforeEach(func() { 16 | bus = NewBus() 17 | }) 18 | 19 | g.Describe("NewBus", func() { 20 | g.It("is not nil", func() { 21 | g.Assert(bus).IsNotNil("Bus expected to not be nil") 22 | }) 23 | }) 24 | 25 | g.Describe("Publish", func() { 26 | const topic = "test" 27 | const message = "this is a test message!" 28 | 29 | g.It("publishes message", func() { 30 | bus := NewBus() 31 | 32 | listener := make(chan []byte) 33 | bus.On(listener) 34 | 35 | done := make(chan struct{}, 1) 36 | go func() { 37 | select { 38 | case v := <-listener: 39 | m := MustDecode(v) 40 | g.Assert(m.Topic).Equal(topic) 41 | g.Assert(m.Data).Equal(message) 42 | case <-time.After(1 * time.Second): 43 | g.Fail("listener did not receive message in time") 44 | } 45 | done <- struct{}{} 46 | }() 47 | bus.Publish(topic, message) 48 | <-done 49 | 50 | // Cleanup 51 | bus.Off(listener) 52 | }) 53 | 54 | g.It("publishes message to all listeners", func() { 55 | bus := NewBus() 56 | 57 | listener := make(chan []byte) 58 | listener2 := make(chan []byte) 59 | listener3 := make(chan []byte) 60 | bus.On(listener) 61 | bus.On(listener2) 62 | bus.On(listener3) 63 | 64 | done := make(chan struct{}, 1) 65 | go func() { 66 | for i := 0; i < 3; i++ { 67 | select { 68 | case v := <-listener: 69 | m := MustDecode(v) 70 | g.Assert(m.Topic).Equal(topic) 71 | g.Assert(m.Data).Equal(message) 72 | case v := <-listener2: 73 | m := MustDecode(v) 74 | g.Assert(m.Topic).Equal(topic) 75 | g.Assert(m.Data).Equal(message) 76 | case v := <-listener3: 77 | m := MustDecode(v) 78 | g.Assert(m.Topic).Equal(topic) 79 | g.Assert(m.Data).Equal(message) 80 | case <-time.After(1 * time.Second): 81 | g.Fail("all listeners did not receive the message in time") 82 | i = 3 83 | } 84 | } 85 | 86 | done <- struct{}{} 87 | }() 88 | bus.Publish(topic, message) 89 | <-done 90 | 91 | // Cleanup 92 | bus.Off(listener) 93 | bus.Off(listener2) 94 | bus.Off(listener3) 95 | }) 96 | }) 97 | }) 98 | } 99 | -------------------------------------------------------------------------------- /src/router/router_jobs.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/gin-gonic/gin" 7 | 8 | "github.com/pyrohost/elytra/src/jobs" 9 | "github.com/pyrohost/elytra/src/router/middleware" 10 | ) 11 | 12 | func getJobStatus(c *gin.Context) { 13 | manager := middleware.ExtractJobManager(c) 14 | jobID := c.Param("job_id") 15 | 16 | instance, exists := manager.GetJob(jobID) 17 | if !exists { 18 | c.JSON(http.StatusNotFound, gin.H{ 19 | "error": "Job not found", 20 | }) 21 | return 22 | } 23 | 24 | c.JSON(http.StatusOK, gin.H{ 25 | "job_id": instance.Job.GetID(), 26 | "type": instance.Job.GetType(), 27 | "status": instance.Status, 28 | "progress": instance.Progress, 29 | "message": instance.Message, 30 | "error": instance.Error, 31 | "result": instance.Result, 32 | "created_at": instance.CreatedAt, 33 | "updated_at": instance.UpdatedAt, 34 | }) 35 | } 36 | 37 | func postCreateJob(c *gin.Context) { 38 | manager := middleware.ExtractJobManager(c) 39 | 40 | var request struct { 41 | JobType string `json:"job_type" binding:"required"` 42 | JobData map[string]interface{} `json:"job_data" binding:"required"` 43 | } 44 | 45 | if err := c.BindJSON(&request); err != nil { 46 | return 47 | } 48 | 49 | jobID, err := manager.CreateJob(request.JobType, request.JobData) 50 | if err != nil { 51 | c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) 52 | return 53 | } 54 | 55 | c.JSON(http.StatusAccepted, gin.H{ 56 | "job_id": jobID, 57 | "status": "pending", 58 | }) 59 | } 60 | 61 | func putUpdateJob(c *gin.Context) { 62 | manager := middleware.ExtractJobManager(c) 63 | jobID := c.Param("job_id") 64 | 65 | var request struct { 66 | Status string `json:"status"` 67 | Progress int `json:"progress"` 68 | Message string `json:"message"` 69 | Result interface{} `json:"result"` 70 | } 71 | 72 | if err := c.BindJSON(&request); err != nil { 73 | return 74 | } 75 | 76 | if err := manager.UpdateJob(jobID, jobs.Status(request.Status), request.Progress, request.Message, request.Result); err != nil { 77 | c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) 78 | return 79 | } 80 | 81 | c.JSON(http.StatusOK, gin.H{"success": true}) 82 | } 83 | 84 | func deleteJob(c *gin.Context) { 85 | manager := middleware.ExtractJobManager(c) 86 | jobID := c.Param("job_id") 87 | 88 | if err := manager.CancelJob(jobID); err != nil { 89 | c.JSON(http.StatusNotFound, gin.H{"error": err.Error()}) 90 | return 91 | } 92 | 93 | c.JSON(http.StatusOK, gin.H{"success": true}) 94 | } 95 | -------------------------------------------------------------------------------- /src/internal/cron/cron.go: -------------------------------------------------------------------------------- 1 | package cron 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "emperror.dev/errors" 8 | "github.com/apex/log" 9 | "github.com/go-co-op/gocron" 10 | 11 | "github.com/pyrohost/elytra/src/config" 12 | "github.com/pyrohost/elytra/src/server" 13 | "github.com/pyrohost/elytra/src/system" 14 | ) 15 | 16 | const ErrCronRunning = errors.Sentinel("cron: job already running") 17 | 18 | var o system.AtomicBool 19 | 20 | // Scheduler configures the internal cronjob system for Elytra and returns the scheduler 21 | // instance to the caller. This should only be called once per application lifecycle, additional 22 | // calls will result in an error being returned. 23 | func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error) { 24 | if !o.SwapIf(true) { 25 | return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle") 26 | } 27 | location, err := time.LoadLocation(config.Get().System.Timezone) 28 | if err != nil { 29 | return nil, errors.Wrap(err, "cron: failed to parse configured system timezone") 30 | } 31 | 32 | activity := activityCron{ 33 | mu: system.NewAtomicBool(false), 34 | manager: m, 35 | max: config.Get().System.ActivitySendCount, 36 | } 37 | 38 | sftp := sftpCron{ 39 | mu: system.NewAtomicBool(false), 40 | manager: m, 41 | max: config.Get().System.ActivitySendCount, 42 | } 43 | 44 | s := gocron.NewScheduler(location) 45 | l := log.WithField("subsystem", "cron") 46 | 47 | interval := time.Duration(config.Get().System.ActivitySendInterval) * time.Second 48 | l.WithField("interval", interval).Info("configuring system crons") 49 | 50 | _, _ = s.Tag("activity").Every(interval).Do(func() { 51 | l.WithField("cron", "activity").Debug("sending internal activity events to Panel") 52 | if err := activity.Run(ctx); err != nil { 53 | if errors.Is(err, ErrCronRunning) { 54 | l.WithField("cron", "activity").Warn("activity process is already running, skipping...") 55 | } else { 56 | l.WithField("cron", "activity").WithField("error", err).Error("activity process failed to execute") 57 | } 58 | } 59 | }) 60 | 61 | _, _ = s.Tag("sftp").Every(interval).Do(func() { 62 | l.WithField("cron", "sftp").Debug("sending sftp events to Panel") 63 | if err := sftp.Run(ctx); err != nil { 64 | if errors.Is(err, ErrCronRunning) { 65 | l.WithField("cron", "sftp").Warn("sftp events process already running, skipping...") 66 | } else { 67 | l.WithField("cron", "sftp").WithField("error", err).Error("sftp events process failed to execute") 68 | } 69 | } 70 | }) 71 | 72 | return s, nil 73 | } 74 | -------------------------------------------------------------------------------- /src/server/activity.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "emperror.dev/errors" 8 | 9 | "github.com/pyrohost/elytra/src/internal/database" 10 | "github.com/pyrohost/elytra/src/internal/models" 11 | ) 12 | 13 | const ActivityPowerPrefix = "server:power." 14 | 15 | const ( 16 | ActivityConsoleCommand = models.Event("server:console.command") 17 | ActivitySftpWrite = models.Event("server:sftp.write") 18 | ActivitySftpCreate = models.Event("server:sftp.create") 19 | ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory") 20 | ActivitySftpRename = models.Event("server:sftp.rename") 21 | ActivitySftpDelete = models.Event("server:sftp.delete") 22 | ActivityFileUploaded = models.Event("server:file.uploaded") 23 | ) 24 | 25 | // RequestActivity is a wrapper around a LoggedEvent that is able to track additional request 26 | // specific metadata including the specific user and IP address associated with all subsequent 27 | // events. The internal logged event structure can be extracted by calling RequestEvent.Event(). 28 | type RequestActivity struct { 29 | server string 30 | user string 31 | ip string 32 | } 33 | 34 | // Event returns the underlying logged event from the RequestEvent instance and sets the 35 | // specific event and metadata on it. 36 | func (ra RequestActivity) Event(event models.Event, metadata models.ActivityMeta) *models.Activity { 37 | a := models.Activity{Server: ra.server, IP: ra.ip, Event: event, Metadata: metadata} 38 | 39 | return a.SetUser(ra.user) 40 | } 41 | 42 | // SetUser clones the RequestActivity struct and sets a new user value on the copy 43 | // before returning it. 44 | func (ra RequestActivity) SetUser(u string) RequestActivity { 45 | c := ra 46 | c.user = u 47 | return c 48 | } 49 | 50 | func (s *Server) NewRequestActivity(user string, ip string) RequestActivity { 51 | return RequestActivity{server: s.ID(), user: user, ip: ip} 52 | } 53 | 54 | // SaveActivity saves an activity entry to the database in a background routine. If an error is 55 | // encountered it is logged but not returned to the caller. 56 | func (s *Server) SaveActivity(a RequestActivity, event models.Event, metadata models.ActivityMeta) { 57 | ctx, cancel := context.WithTimeout(s.Context(), time.Second*3) 58 | go func() { 59 | defer cancel() 60 | if tx := database.Instance().WithContext(ctx).Create(a.Event(event, metadata)); tx.Error != nil { 61 | s.Log().WithField("error", errors.WithStack(tx.Error)). 62 | WithField("event", event). 63 | Error("activity: failed to save event") 64 | } 65 | }() 66 | } 67 | -------------------------------------------------------------------------------- /src/server/console.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | "github.com/mitchellh/colorstring" 9 | 10 | "github.com/pyrohost/elytra/src/config" 11 | "github.com/pyrohost/elytra/src/system" 12 | ) 13 | 14 | // appName is a local cache variable to avoid having to make expensive copies of 15 | // the configuration every time we need to send output along to the websocket for 16 | // a server. 17 | var appName string 18 | var appNameSync sync.Once 19 | 20 | // PublishConsoleOutputFromDaemon sends output to the server console formatted 21 | // to appear correctly as being sent from Elytra. 22 | func (s *Server) PublishConsoleOutputFromDaemon(data string) { 23 | appNameSync.Do(func() { 24 | appName = config.Get().AppName 25 | }) 26 | s.Events().Publish( 27 | ConsoleOutputEvent, 28 | colorstring.Color(fmt.Sprintf("[yellow][bold][%s Daemon]:[default] %s", appName, data)), 29 | ) 30 | } 31 | 32 | // Throttler returns the throttler instance for the server or creates a new one. 33 | func (s *Server) Throttler() *ConsoleThrottle { 34 | s.throttleOnce.Do(func() { 35 | throttles := config.Get().Throttles 36 | period := time.Duration(throttles.Period) * time.Millisecond 37 | 38 | s.throttler = newConsoleThrottle(throttles.Lines, period) 39 | s.throttler.strike = func() { 40 | s.PublishConsoleOutputFromDaemon("Server is outputting console data too quickly -- throttling...") 41 | } 42 | }) 43 | return s.throttler 44 | } 45 | 46 | type ConsoleThrottle struct { 47 | limit *system.Rate 48 | lock *system.Locker 49 | strike func() 50 | } 51 | 52 | func newConsoleThrottle(lines uint64, period time.Duration) *ConsoleThrottle { 53 | return &ConsoleThrottle{ 54 | limit: system.NewRate(lines, period), 55 | lock: system.NewLocker(), 56 | } 57 | } 58 | 59 | // Allow checks if the console is allowed to process more output data, or if too 60 | // much has already been sent over the line. If there is too much output the 61 | // strike callback function is triggered, but only if it has not already been 62 | // triggered at this point in the process. 63 | // 64 | // If output is allowed, the lock on the throttler is released and the next time 65 | // it is triggered the strike function will be re-executed. 66 | func (ct *ConsoleThrottle) Allow() bool { 67 | if !ct.limit.Try() { 68 | if err := ct.lock.Acquire(); err == nil { 69 | if ct.strike != nil { 70 | ct.strike() 71 | } 72 | } 73 | return false 74 | } 75 | ct.lock.Release() 76 | return true 77 | } 78 | 79 | // Reset resets the console throttler internal rate limiter and overage counter. 80 | func (ct *ConsoleThrottle) Reset() { 81 | ct.limit.Reset() 82 | } 83 | -------------------------------------------------------------------------------- /src/internal/models/activity.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "net" 5 | "strings" 6 | "time" 7 | 8 | "gorm.io/gorm" 9 | ) 10 | 11 | type Event string 12 | 13 | type ActivityMeta map[string]interface{} 14 | 15 | // Activity defines an activity log event for a server entity performed by a user. This is 16 | // used for tracking commands, power actions, and SFTP events so that they can be reconciled 17 | // and sent back to the Panel instance to be displayed to the user. 18 | type Activity struct { 19 | ID int `gorm:"primaryKey;not null" json:"-"` 20 | // User is UUID of the user that triggered this event, or an empty string if the event 21 | // cannot be tied to a specific user, in which case we will assume it was the system 22 | // user. 23 | User JsonNullString `gorm:"type:uuid" json:"user"` 24 | // Server is the UUID of the server this event is associated with. 25 | Server string `gorm:"type:uuid;not null" json:"server"` 26 | // Event is a string that describes what occurred, and is used by the Panel instance to 27 | // properly associate this event in the activity logs. 28 | Event Event `gorm:"index;not null" json:"event"` 29 | // Metadata is either a null value, string, or a JSON blob with additional event specific 30 | // metadata that can be provided. 31 | Metadata ActivityMeta `gorm:"serializer:json" json:"metadata"` 32 | // IP is the IP address that triggered this event, or an empty string if it cannot be 33 | // determined properly. This should be the connecting user's IP address, and not the 34 | // internal system IP. 35 | IP string `gorm:"not null" json:"ip"` 36 | Timestamp time.Time `gorm:"not null" json:"timestamp"` 37 | } 38 | 39 | // SetUser sets the current user that performed the action. If an empty string is provided 40 | // it is cast into a null value when stored. 41 | func (a Activity) SetUser(u string) *Activity { 42 | var ns JsonNullString 43 | if u == "" { 44 | if err := ns.Scan(nil); err != nil { 45 | panic(err) 46 | } 47 | } else { 48 | if err := ns.Scan(u); err != nil { 49 | panic(err) 50 | } 51 | } 52 | a.User = ns 53 | return &a 54 | } 55 | 56 | // BeforeCreate executes before we create any activity entry to ensure the IP address 57 | // is trimmed down to remove any extraneous data, and the timestamp is set to the current 58 | // system time and then stored as UTC. 59 | func (a *Activity) BeforeCreate(_ *gorm.DB) error { 60 | if ip, _, err := net.SplitHostPort(strings.TrimSpace(a.IP)); err == nil { 61 | a.IP = ip 62 | } 63 | if a.Timestamp.IsZero() { 64 | a.Timestamp = time.Now() 65 | } 66 | a.Timestamp = a.Timestamp.UTC() 67 | if a.Metadata == nil { 68 | a.Metadata = ActivityMeta{} 69 | } 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /src/remote/http_test.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func createTestClient(h http.HandlerFunc) (*client, *httptest.Server) { 13 | s := httptest.NewServer(h) 14 | c := &client{ 15 | httpClient: s.Client(), 16 | baseUrl: s.URL, 17 | maxAttempts: 1, 18 | tokenId: "testid", 19 | token: "testtoken", 20 | } 21 | return c, s 22 | } 23 | 24 | func TestRequest(t *testing.T) { 25 | c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) { 26 | assert.Equal(t, "application/vnd.pterodactyl.v1+json", r.Header.Get("Accept")) 27 | assert.Equal(t, "application/json", r.Header.Get("Content-Type")) 28 | assert.Equal(t, "Bearer testid.testtoken", r.Header.Get("Authorization")) 29 | assert.Equal(t, "/test", r.URL.Path) 30 | 31 | rw.WriteHeader(http.StatusOK) 32 | }) 33 | r, err := c.requestOnce(context.Background(), "", "/test", nil) 34 | assert.NoError(t, err) 35 | assert.NotNil(t, r) 36 | } 37 | 38 | func TestRequestRetry(t *testing.T) { 39 | // Test if the client attempts failed requests 40 | i := 0 41 | c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) { 42 | if i < 1 { 43 | rw.WriteHeader(http.StatusInternalServerError) 44 | } else { 45 | rw.WriteHeader(http.StatusOK) 46 | } 47 | i++ 48 | }) 49 | c.maxAttempts = 2 50 | r, err := c.request(context.Background(), "", "", nil) 51 | assert.NoError(t, err) 52 | assert.NotNil(t, r) 53 | assert.Equal(t, http.StatusOK, r.StatusCode) 54 | assert.Equal(t, 2, i) 55 | 56 | // Test whether the client returns the last request after retry limit is reached 57 | i = 0 58 | c, _ = createTestClient(func(rw http.ResponseWriter, r *http.Request) { 59 | rw.WriteHeader(http.StatusInternalServerError) 60 | i++ 61 | }) 62 | c.maxAttempts = 2 63 | r, err = c.request(context.Background(), "get", "", nil) 64 | assert.Error(t, err) 65 | assert.Nil(t, r) 66 | 67 | v := AsRequestError(err) 68 | assert.NotNil(t, v) 69 | assert.Equal(t, http.StatusInternalServerError, v.StatusCode()) 70 | assert.Equal(t, 3, i) 71 | } 72 | 73 | func TestGet(t *testing.T) { 74 | c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) { 75 | assert.Equal(t, http.MethodGet, r.Method) 76 | assert.Len(t, r.URL.Query(), 1) 77 | assert.Equal(t, "world", r.URL.Query().Get("hello")) 78 | }) 79 | r, err := c.Get(context.Background(), "/test", q{"hello": "world"}) 80 | assert.NoError(t, err) 81 | assert.NotNil(t, r) 82 | } 83 | 84 | func TestPost(t *testing.T) { 85 | test := map[string]string{ 86 | "hello": "world", 87 | } 88 | c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) { 89 | assert.Equal(t, http.MethodPost, r.Method) 90 | }) 91 | r, err := c.Post(context.Background(), "/test", test) 92 | assert.NoError(t, err) 93 | assert.NotNil(t, r) 94 | } 95 | -------------------------------------------------------------------------------- /src/server/filesystem/testdata/test.tar: -------------------------------------------------------------------------------- 1 | test/0040777000000000000000000000000014041373127006775 5ustar00test/inside/0040777000000000000000000000000014041373121010242 5ustar00test/inside/finside.txt0100777000000000000000000000000014041373113012413 0ustar00test/outside.txt0100777000000000000000000000000014041373125011176 0ustar00 -------------------------------------------------------------------------------- /src/router/router_server_ws.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "time" 7 | 8 | "github.com/gin-gonic/gin" 9 | ws "github.com/gorilla/websocket" 10 | 11 | "github.com/pyrohost/elytra/src/router/middleware" 12 | "github.com/pyrohost/elytra/src/router/websocket" 13 | ) 14 | 15 | var expectedCloseCodes = []int{ 16 | ws.CloseGoingAway, 17 | ws.CloseAbnormalClosure, 18 | ws.CloseNormalClosure, 19 | ws.CloseNoStatusReceived, 20 | ws.CloseServiceRestart, 21 | } 22 | 23 | // Upgrades a connection to a websocket and passes events along between. 24 | func getServerWebsocket(c *gin.Context) { 25 | manager := middleware.ExtractManager(c) 26 | s, _ := manager.Get(c.Param("server")) 27 | 28 | // Create a context that can be canceled when the user disconnects from this 29 | // socket that will also cancel listeners running in separate threads. If the 30 | // connection itself is terminated listeners using this context will also be 31 | // closed. 32 | ctx, cancel := context.WithCancel(c.Request.Context()) 33 | defer cancel() 34 | 35 | handler, err := websocket.GetHandler(s, c.Writer, c.Request, c) 36 | if err != nil { 37 | middleware.CaptureAndAbort(c, err) 38 | return 39 | } 40 | defer handler.Connection.Close() 41 | 42 | // Track this open connection on the server so that we can close them all programmatically 43 | // if the server is deleted. 44 | s.Websockets().Push(handler.Uuid(), &cancel) 45 | handler.Logger().Debug("opening connection to server websocket") 46 | 47 | defer func() { 48 | s.Websockets().Remove(handler.Uuid()) 49 | handler.Logger().Debug("closing connection to server websocket") 50 | }() 51 | 52 | // If the server is deleted we need to send a close message to the connected client 53 | // so that they disconnect since there will be no more events sent along. Listen for 54 | // the request context being closed to break this loop, otherwise this routine will 55 | // be left hanging in the background. 56 | go func() { 57 | select { 58 | case <-ctx.Done(): 59 | break 60 | case <-s.Context().Done(): 61 | _ = handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5)) 62 | break 63 | } 64 | }() 65 | 66 | for { 67 | j := websocket.Message{} 68 | 69 | _, p, err := handler.Connection.ReadMessage() 70 | if err != nil { 71 | if ws.IsUnexpectedCloseError(err, expectedCloseCodes...) { 72 | handler.Logger().WithField("error", err).Warn("error handling websocket message for server") 73 | } 74 | break 75 | } 76 | 77 | // Discard and JSON parse errors into the void and don't continue processing this 78 | // specific socket request. If we did a break here the client would get disconnected 79 | // from the socket, which is NOT what we want to do. 80 | if err := json.Unmarshal(p, &j); err != nil { 81 | continue 82 | } 83 | 84 | go func(msg websocket.Message) { 85 | if err := handler.HandleInbound(ctx, msg); err != nil { 86 | _ = handler.SendErrorJson(msg, err) 87 | } 88 | }(j) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/server/mounts.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "path/filepath" 5 | "strings" 6 | 7 | "github.com/apex/log" 8 | 9 | "github.com/pyrohost/elytra/src/config" 10 | "github.com/pyrohost/elytra/src/environment" 11 | ) 12 | 13 | // To avoid confusion when working with mounts, assume that a server.Mount has not been properly 14 | // cleaned up and had the paths set. An environment.Mount should only be returned with valid paths 15 | // that have been checked. 16 | type Mount environment.Mount 17 | 18 | // Returns the default container mounts for the server instance. This includes the data directory 19 | // for the server. Previously this would also mount in host timezone files, however we've moved from 20 | // that approach to just setting `TZ=Timezone` environment values in containers which should work 21 | // in most scenarios. 22 | func (s *Server) Mounts() []environment.Mount { 23 | m := []environment.Mount{ 24 | { 25 | Default: true, 26 | Target: "/home/container", 27 | Source: s.Filesystem().Path(), 28 | ReadOnly: false, 29 | }, 30 | } 31 | 32 | // Handle mounting a generated `/etc/passwd` if the feature is enabled. 33 | if passwd := config.Get().System.Passwd; passwd.Enable { 34 | s.Log().WithFields(log.Fields{"source_path": passwd.Directory}).Info("mouting generated /etc/{group,passwd} to workaround UID/GID issues") 35 | m = append(m, environment.Mount{ 36 | Source: filepath.Join(passwd.Directory, "group"), 37 | Target: "/etc/group", 38 | ReadOnly: true, 39 | }) 40 | m = append(m, environment.Mount{ 41 | Source: filepath.Join(passwd.Directory, "passwd"), 42 | Target: "/etc/passwd", 43 | ReadOnly: true, 44 | }) 45 | } 46 | 47 | // Also include any of this server's custom mounts when returning them. 48 | return append(m, s.customMounts()...) 49 | } 50 | 51 | // Returns the custom mounts for a given server after verifying that they are within a list of 52 | // allowed mount points for the node. 53 | func (s *Server) customMounts() []environment.Mount { 54 | var mounts []environment.Mount 55 | 56 | // TODO: probably need to handle things trying to mount directories that do not exist. 57 | for _, m := range s.Config().Mounts { 58 | source := filepath.Clean(m.Source) 59 | target := filepath.Clean(m.Target) 60 | 61 | logger := s.Log().WithFields(log.Fields{ 62 | "source_path": source, 63 | "target_path": target, 64 | "read_only": m.ReadOnly, 65 | }) 66 | 67 | mounted := false 68 | for _, allowed := range config.Get().AllowedMounts { 69 | // Check if the source path is included in the allowed mounts list. 70 | // filepath.Clean will strip all trailing slashes (unless the path is a root directory). 71 | if !strings.HasPrefix(source, filepath.Clean(allowed)) { 72 | continue 73 | } 74 | mounted = true 75 | mounts = append(mounts, environment.Mount{ 76 | Source: source, 77 | Target: target, 78 | ReadOnly: m.ReadOnly, 79 | }) 80 | break 81 | } 82 | 83 | if !mounted { 84 | logger.Warn("skipping custom server mount, not in list of allowed mount points") 85 | } 86 | } 87 | 88 | return mounts 89 | } 90 | -------------------------------------------------------------------------------- /src/server/update.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/pyrohost/elytra/src/environment/docker" 7 | 8 | "github.com/pyrohost/elytra/src/environment" 9 | ) 10 | 11 | // SyncWithEnvironment updates the environment for the server to match any of 12 | // the changed data. This pushes new settings and environment variables to the 13 | // environment. In addition, the in-situ update method is called on the 14 | // environment which will allow environments that make use of it (such as Docker) 15 | // to immediately apply some settings without having to wait on a server to 16 | // restart. 17 | // 18 | // This functionality allows a server's resources limits to be modified on the 19 | // fly and have them apply right away allowing for dynamic resource allocation 20 | // and responses to abusive server processes. 21 | func (s *Server) SyncWithEnvironment() { 22 | s.Log().Debug("syncing server settings with environment") 23 | 24 | cfg := s.Config() 25 | 26 | // Update the environment settings using the new information from this server. 27 | s.Environment.Config().SetSettings(environment.Settings{ 28 | Mounts: s.Mounts(), 29 | Allocations: cfg.Allocations, 30 | Limits: cfg.Build, 31 | Labels: cfg.Labels, 32 | }) 33 | 34 | // For Docker specific environments we also want to update the configured image 35 | // and stop configuration. 36 | if e, ok := s.Environment.(*docker.Environment); ok { 37 | s.Log().Debug("syncing stop configuration with configured docker environment") 38 | e.SetImage(cfg.Container.Image) 39 | e.SetStopConfiguration(s.ProcessConfiguration().Stop) 40 | } 41 | 42 | // If build limits are changed, environment variables also change. Plus, any modifications to 43 | // the startup command also need to be properly propagated to this environment. 44 | // 45 | // @see https://github.com/pterodactyl/panel/issues/2255 46 | s.Environment.Config().SetEnvironmentVariables(s.GetEnvironmentVariables()) 47 | 48 | if !s.IsSuspended() { 49 | // Update the environment in place, allowing memory and CPU usage to be adjusted 50 | // on the fly without the user needing to reboot (theoretically). 51 | s.Log().Info("performing server limit modification on-the-fly") 52 | if err := s.Environment.InSituUpdate(); err != nil { 53 | // This is not a failure, the process is still running fine and will fix itself on the 54 | // next boot, or fail out entirely in a more logical position. 55 | s.Log().WithField("error", err).Warn("failed to perform on-the-fly update of the server environment") 56 | } 57 | } else { 58 | // Checks if the server is now in a suspended state. If so and a server process is currently running it 59 | // will be gracefully stopped (and terminated if it refuses to stop). 60 | if s.Environment.State() != environment.ProcessOfflineState { 61 | s.Log().Info("server suspended with running process state, terminating now") 62 | 63 | go func(s *Server) { 64 | if err := s.Environment.WaitForStop(s.Context(), time.Minute, true); err != nil { 65 | s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension") 66 | } 67 | }(s) 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/internal/cron/activity_cron.go: -------------------------------------------------------------------------------- 1 | package cron 2 | 3 | import ( 4 | "context" 5 | "net" 6 | 7 | "emperror.dev/errors" 8 | 9 | "github.com/pyrohost/elytra/src/internal/database" 10 | "github.com/pyrohost/elytra/src/internal/models" 11 | "github.com/pyrohost/elytra/src/server" 12 | "github.com/pyrohost/elytra/src/system" 13 | ) 14 | 15 | type activityCron struct { 16 | mu *system.AtomicBool 17 | manager *server.Manager 18 | max int 19 | } 20 | 21 | // Run executes the cronjob and ensures we fetch and send all the stored activity to the 22 | // Panel instance. Once activity is sent it is deleted from the local database instance. Any 23 | // SFTP specific events are not handled in this cron, they're handled separately to account 24 | // for de-duplication and event merging. 25 | func (ac *activityCron) Run(ctx context.Context) error { 26 | // Don't execute this cron if there is currently one running. Once this task is completed 27 | // go ahead and mark it as no longer running. 28 | if !ac.mu.SwapIf(true) { 29 | return errors.WithStack(ErrCronRunning) 30 | } 31 | defer ac.mu.Store(false) 32 | 33 | var activity []models.Activity 34 | tx := database.Instance().WithContext(ctx). 35 | Where("event NOT LIKE ?", "server:sftp.%"). 36 | Limit(ac.max). 37 | Find(&activity) 38 | if tx.Error != nil { 39 | return errors.WithStack(tx.Error) 40 | } 41 | if len(activity) == 0 { 42 | return nil 43 | } 44 | 45 | // ids to delete from the database. 46 | ids := make([]int, 0, len(activity)) 47 | // activities to send to the panel. 48 | activities := make([]models.Activity, 0, len(activity)) 49 | for _, v := range activity { 50 | // Delete any activity that has an invalid IP address. This is a fix for 51 | // a bug that truncated the last octet of an IPv6 address in the database. 52 | if ip := net.ParseIP(v.IP); ip == nil { 53 | ids = append(ids, v.ID) 54 | continue 55 | } 56 | activities = append(activities, v) 57 | } 58 | 59 | // Delete any invalid activies 60 | if len(ids) > 0 { 61 | tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{}) 62 | if tx.Error != nil { 63 | return errors.WithStack(tx.Error) 64 | } 65 | } 66 | 67 | if len(activities) == 0 { 68 | return nil 69 | } 70 | 71 | if err := ac.manager.Client().SendActivityLogs(ctx, activities); err != nil { 72 | return errors.WrapIf(err, "cron: failed to send activity events to Panel") 73 | } 74 | 75 | ids = make([]int, len(activities)) 76 | for i, v := range activities { 77 | ids[i] = v.ID 78 | } 79 | 80 | // SQLite has a limitation of how many parameters we can specify in a single 81 | // query, so we need to delete the activies in chunks of 32,000 instead of 82 | // all at once. 83 | i := 0 84 | idsLen := len(ids) 85 | for i < idsLen { 86 | start := i 87 | end := min(i+32000, idsLen) 88 | batchSize := end - start 89 | 90 | tx = database.Instance().WithContext(ctx).Where("id IN ?", ids[start:end]).Delete(&models.Activity{}) 91 | if tx.Error != nil { 92 | return errors.WithStack(tx.Error) 93 | } 94 | 95 | i += batchSize 96 | } 97 | 98 | return nil 99 | } 100 | -------------------------------------------------------------------------------- /src/server/configuration.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/pyrohost/elytra/src/environment" 7 | ) 8 | 9 | type EggConfiguration struct { 10 | // The internal UUID of the Egg on the Panel. 11 | ID string `json:"id"` 12 | 13 | // Maintains a list of files that are blacklisted for opening/editing/downloading 14 | // or basically any type of access on the server by any user. This is NOT the same 15 | // as a per-user denylist, this is defined at the Egg level. 16 | FileDenylist []string `json:"file_denylist"` 17 | } 18 | 19 | type ConfigurationMeta struct { 20 | Name string `json:"name"` 21 | Description string `json:"description"` 22 | } 23 | 24 | type Configuration struct { 25 | mu sync.RWMutex 26 | 27 | // The unique identifier for the server that should be used when referencing 28 | // it against the Panel API (and internally). This will be used when naming 29 | // docker containers as well as in log output. 30 | Uuid string `json:"uuid"` 31 | 32 | Meta ConfigurationMeta `json:"meta"` 33 | 34 | // Whether or not the server is in a suspended state. Suspended servers cannot 35 | // be started or modified except in certain scenarios by an admin user. 36 | Suspended bool `json:"suspended"` 37 | 38 | // The command that should be used when booting up the server instance. 39 | Invocation string `json:"invocation"` 40 | 41 | // By default this is false, however if selected within the Panel while installing or re-installing a 42 | // server, specific installation scripts will be skipped for the server process. 43 | SkipEggScripts bool `json:"skip_egg_scripts"` 44 | 45 | // An array of environment variables that should be passed along to the running 46 | // server process. 47 | EnvVars environment.Variables `json:"environment"` 48 | 49 | // Labels is a map of container labels that should be applied to the running server process. 50 | Labels map[string]string `json:"labels"` 51 | 52 | Allocations environment.Allocations `json:"allocations"` 53 | Build environment.Limits `json:"build"` 54 | CrashDetectionEnabled bool `json:"crash_detection_enabled"` 55 | Mounts []Mount `json:"mounts"` 56 | Egg EggConfiguration `json:"egg,omitempty"` 57 | 58 | Container struct { 59 | // Defines the Docker image that will be used for this server 60 | Image string `json:"image,omitempty"` 61 | } `json:"container,omitempty"` 62 | } 63 | 64 | func (s *Server) Config() *Configuration { 65 | s.cfg.mu.RLock() 66 | defer s.cfg.mu.RUnlock() 67 | return &s.cfg 68 | } 69 | 70 | // DiskSpace returns the amount of disk space available to a server in bytes. 71 | func (s *Server) DiskSpace() int64 { 72 | s.cfg.mu.RLock() 73 | defer s.cfg.mu.RUnlock() 74 | return s.cfg.Build.DiskSpace * 1024.0 * 1024.0 75 | } 76 | 77 | func (s *Server) MemoryLimit() int64 { 78 | s.cfg.mu.RLock() 79 | defer s.cfg.mu.RUnlock() 80 | return s.cfg.Build.MemoryLimit 81 | } 82 | 83 | func (c *Configuration) GetUuid() string { 84 | c.mu.RLock() 85 | defer c.mu.RUnlock() 86 | return c.Uuid 87 | } 88 | 89 | func (c *Configuration) SetSuspended(s bool) { 90 | c.mu.Lock() 91 | defer c.mu.Unlock() 92 | c.Suspended = s 93 | } 94 | -------------------------------------------------------------------------------- /src/internal/ufs/quota_writer.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner 3 | 4 | package ufs 5 | 6 | import ( 7 | "io" 8 | "sync/atomic" 9 | ) 10 | 11 | // CountedWriter is a writer that counts the amount of data written to the 12 | // underlying writer. 13 | type CountedWriter struct { 14 | File 15 | 16 | counter atomic.Int64 17 | err error 18 | } 19 | 20 | // NewCountedWriter returns a new countedWriter that counts the amount of bytes 21 | // written to the underlying writer. 22 | func NewCountedWriter(f File) *CountedWriter { 23 | return &CountedWriter{File: f} 24 | } 25 | 26 | // BytesWritten returns the amount of bytes that have been written to the 27 | // underlying writer. 28 | func (w *CountedWriter) BytesWritten() int64 { 29 | return w.counter.Load() 30 | } 31 | 32 | // Error returns the error from the writer if any. If the error is an EOF, nil 33 | // will be returned. 34 | func (w *CountedWriter) Error() error { 35 | if w.err == io.EOF { 36 | return nil 37 | } 38 | return w.err 39 | } 40 | 41 | // Write writes bytes to the underlying writer while tracking the total amount 42 | // of bytes written. 43 | func (w *CountedWriter) Write(p []byte) (int, error) { 44 | if w.err != nil { 45 | return 0, io.EOF 46 | } 47 | 48 | // Write is a very simple operation for us to handle. 49 | n, err := w.File.Write(p) 50 | w.counter.Add(int64(n)) 51 | w.err = err 52 | 53 | // TODO: is this how we actually want to handle errors with this? 54 | if err == io.EOF { 55 | return n, io.EOF 56 | } 57 | return n, nil 58 | } 59 | 60 | func (w *CountedWriter) ReadFrom(r io.Reader) (n int64, err error) { 61 | cr := NewCountedReader(r) 62 | n, err = w.File.ReadFrom(cr) 63 | w.counter.Add(n) 64 | return 65 | } 66 | 67 | // CountedReader is a reader that counts the amount of data read from the 68 | // underlying reader. 69 | type CountedReader struct { 70 | reader io.Reader 71 | 72 | counter atomic.Int64 73 | err error 74 | } 75 | 76 | var _ io.Reader = (*CountedReader)(nil) 77 | 78 | // NewCountedReader returns a new countedReader that counts the amount of bytes 79 | // read from the underlying reader. 80 | func NewCountedReader(r io.Reader) *CountedReader { 81 | return &CountedReader{reader: r} 82 | } 83 | 84 | // BytesRead returns the amount of bytes that have been read from the underlying 85 | // reader. 86 | func (r *CountedReader) BytesRead() int64 { 87 | return r.counter.Load() 88 | } 89 | 90 | // Error returns the error from the reader if any. If the error is an EOF, nil 91 | // will be returned. 92 | func (r *CountedReader) Error() error { 93 | if r.err == io.EOF { 94 | return nil 95 | } 96 | return r.err 97 | } 98 | 99 | // Read reads bytes from the underlying reader while tracking the total amount 100 | // of bytes read. 101 | func (r *CountedReader) Read(p []byte) (int, error) { 102 | if r.err != nil { 103 | return 0, io.EOF 104 | } 105 | 106 | n, err := r.reader.Read(p) 107 | r.counter.Add(int64(n)) 108 | r.err = err 109 | 110 | // TODO: is this how we actually want to handle errors with this? 111 | if err == io.EOF { 112 | return n, io.EOF 113 | } 114 | return n, nil 115 | } 116 | -------------------------------------------------------------------------------- /src/server/filesystem/archiverext/compressed.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // SPDX-FileCopyrightText: Copyright (c) 2016 Matthew Holt 3 | 4 | // Code in this file was derived from 5 | // https://github.com/mholt/archiver/blob/v4.0.0-alpha.8/fs.go 6 | // 7 | // These modifications were necessary to allow us to use an already open file 8 | // with archiver.FileFS. 9 | 10 | package archiverext 11 | 12 | import ( 13 | "io" 14 | "io/fs" 15 | 16 | "github.com/mholt/archives" 17 | ) 18 | 19 | // FileFS allows accessing a file on disk using a consistent file system interface. 20 | // The value should be the path to a regular file, not a directory. This file will 21 | // be the only entry in the file system and will be at its root. It can be accessed 22 | // within the file system by the name of "." or the filename. 23 | // 24 | // If the file is compressed, set the Compression field so that reads from the 25 | // file will be transparently decompressed. 26 | type FileFS struct { 27 | // File is the compressed file backing the FileFS. 28 | File fs.File 29 | 30 | // If file is compressed, setting this field will 31 | // transparently decompress reads. 32 | Compression archives.Decompressor 33 | } 34 | 35 | // Open opens the named file, which must be the file used to create the file system. 36 | func (f FileFS) Open(name string) (fs.File, error) { 37 | if err := f.checkName(name, "open"); err != nil { 38 | return nil, err 39 | } 40 | if f.Compression == nil { 41 | return f.File, nil 42 | } 43 | r, err := f.Compression.OpenReader(f.File) 44 | if err != nil { 45 | return nil, err 46 | } 47 | return compressedFile{f.File, r}, nil 48 | } 49 | 50 | // ReadDir returns a directory listing with the file as the singular entry. 51 | func (f FileFS) ReadDir(name string) ([]fs.DirEntry, error) { 52 | if err := f.checkName(name, "stat"); err != nil { 53 | return nil, err 54 | } 55 | info, err := f.Stat(name) 56 | if err != nil { 57 | return nil, err 58 | } 59 | return []fs.DirEntry{fs.FileInfoToDirEntry(info)}, nil 60 | } 61 | 62 | // Stat stats the named file, which must be the file used to create the file system. 63 | func (f FileFS) Stat(name string) (fs.FileInfo, error) { 64 | if err := f.checkName(name, "stat"); err != nil { 65 | return nil, err 66 | } 67 | return f.File.Stat() 68 | } 69 | 70 | func (f FileFS) checkName(name, op string) error { 71 | if !fs.ValidPath(name) { 72 | return &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} 73 | } 74 | // TODO: we may need better name validation. 75 | if name != "." { 76 | return &fs.PathError{Op: op, Path: name, Err: fs.ErrNotExist} 77 | } 78 | return nil 79 | } 80 | 81 | // compressedFile is an fs.File that specially reads 82 | // from a decompression reader, and which closes both 83 | // that reader and the underlying file. 84 | type compressedFile struct { 85 | fs.File 86 | decomp io.ReadCloser 87 | } 88 | 89 | func (cf compressedFile) Read(p []byte) (int, error) { 90 | return cf.decomp.Read(p) 91 | } 92 | 93 | func (cf compressedFile) Close() error { 94 | err := cf.File.Close() 95 | err2 := cf.decomp.Close() 96 | if err2 != nil && err == nil { 97 | err = err2 98 | } 99 | return err 100 | } 101 | -------------------------------------------------------------------------------- /src/internal/progress/progress.go: -------------------------------------------------------------------------------- 1 | package progress 2 | 3 | import ( 4 | "io" 5 | "strings" 6 | "sync/atomic" 7 | 8 | "github.com/pyrohost/elytra/src/system" 9 | ) 10 | 11 | // Progress is used to track the progress of any I/O operation that are being 12 | // performed. 13 | type Progress struct { 14 | // written is the total size of the files that have been written to the writer. 15 | written uint64 16 | // Total is the total size of the archive in bytes. 17 | total uint64 18 | 19 | // Writer . 20 | Writer io.Writer 21 | } 22 | 23 | // NewProgress returns a new progress tracker for the given total size. 24 | func NewProgress(total uint64) *Progress { 25 | return &Progress{total: total} 26 | } 27 | 28 | // Written returns the total number of bytes written. 29 | // This function should be used when the progress is tracking data being written. 30 | func (p *Progress) Written() uint64 { 31 | return atomic.LoadUint64(&p.written) 32 | } 33 | 34 | // Total returns the total size in bytes. 35 | func (p *Progress) Total() uint64 { 36 | return atomic.LoadUint64(&p.total) 37 | } 38 | 39 | // SetTotal sets the total size of the archive in bytes. This function is safe 40 | // to call concurrently and can be used to update the total size if it changes, 41 | // such as when the total size is simultaneously being calculated as data is 42 | // being written through the progress writer. 43 | func (p *Progress) SetTotal(total uint64) { 44 | atomic.StoreUint64(&p.total, total) 45 | } 46 | 47 | // Write totals the number of bytes that have been written to the writer. 48 | func (p *Progress) Write(v []byte) (int, error) { 49 | n := len(v) 50 | atomic.AddUint64(&p.written, uint64(n)) 51 | if p.Writer != nil { 52 | return p.Writer.Write(v) 53 | } 54 | return n, nil 55 | } 56 | 57 | // Progress returns a formatted progress string for the current progress. 58 | func (p *Progress) Progress(width int) string { 59 | // current = 100 (Progress, dynamic) 60 | // total = 1000 (Content-Length, dynamic) 61 | // width = 25 (Number of ticks to display, static) 62 | // widthPercentage = 100 / width (What percentage does each tick represent, static) 63 | // 64 | // percentageDecimal = current / total = 0.1 65 | // percentage = percentageDecimal * 100 = 10% 66 | // ticks = percentage / widthPercentage = 2.5 67 | // 68 | // ticks is a float64, so we cast it to an int which rounds it down to 2. 69 | 70 | // Values are cast to floats to prevent integer division. 71 | current := p.Written() 72 | total := p.Total() 73 | // width := is passed as a parameter 74 | widthPercentage := float64(100) / float64(width) 75 | percentageDecimal := float64(current) / float64(total) 76 | percentage := percentageDecimal * 100 77 | ticks := int(percentage / widthPercentage) 78 | 79 | // Ensure that we never get a negative number of ticks, this will prevent strings#Repeat 80 | // from panicking. A negative number of ticks is likely to happen when the total size is 81 | // inaccurate, such as when we are going off of rough disk usage calculation. 82 | if ticks < 0 { 83 | ticks = 0 84 | } else if ticks > width { 85 | ticks = width 86 | } 87 | 88 | bar := strings.Repeat("=", ticks) + strings.Repeat(" ", width-ticks) 89 | return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total) 90 | } 91 | -------------------------------------------------------------------------------- /src/server/filesystem/archive_test.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "context" 5 | iofs "io/fs" 6 | "os" 7 | "path/filepath" 8 | "sort" 9 | "strings" 10 | "testing" 11 | 12 | . "github.com/franela/goblin" 13 | "github.com/mholt/archives" 14 | ) 15 | 16 | func TestArchive_Stream(t *testing.T) { 17 | g := Goblin(t) 18 | fs, rfs := NewFs() 19 | 20 | g.Describe("Archive", func() { 21 | g.AfterEach(func() { 22 | // Reset the filesystem after each run. 23 | _ = fs.TruncateRootDirectory() 24 | }) 25 | 26 | g.It("creates archive with intended files", func() { 27 | g.Assert(fs.CreateDirectory("test", "/")).IsNil() 28 | g.Assert(fs.CreateDirectory("test2", "/")).IsNil() 29 | 30 | r := strings.NewReader("hello, world!\n") 31 | err := fs.Write("test/file.txt", r, r.Size(), 0o644) 32 | g.Assert(err).IsNil() 33 | 34 | r = strings.NewReader("hello, world!\n") 35 | err = fs.Write("test2/file.txt", r, r.Size(), 0o644) 36 | g.Assert(err).IsNil() 37 | 38 | r = strings.NewReader("hello, world!\n") 39 | err = fs.Write("test_file.txt", r, r.Size(), 0o644) 40 | g.Assert(err).IsNil() 41 | 42 | r = strings.NewReader("hello, world!\n") 43 | err = fs.Write("test_file.txt.old", r, r.Size(), 0o644) 44 | g.Assert(err).IsNil() 45 | 46 | a := &Archive{ 47 | Filesystem: fs, 48 | Files: []string{ 49 | "test", 50 | "test_file.txt", 51 | }, 52 | } 53 | 54 | // Create the archive. 55 | archivePath := filepath.Join(rfs.root, "archive.tar.gz") 56 | g.Assert(a.Create(context.Background(), archivePath)).IsNil() 57 | 58 | // Ensure the archive exists. 59 | _, err = os.Stat(archivePath) 60 | g.Assert(err).IsNil() 61 | 62 | // Open the archive. 63 | genericFs, err := archives.FileSystem(context.Background(), archivePath, nil) 64 | g.Assert(err).IsNil() 65 | 66 | // Assert that we are opening an archive. 67 | afs, ok := genericFs.(iofs.ReadDirFS) 68 | g.Assert(ok).IsTrue() 69 | 70 | // Get the names of the files recursively from the archive. 71 | files, err := getFiles(afs, ".") 72 | g.Assert(err).IsNil() 73 | 74 | // Ensure the files in the archive match what we are expecting. 75 | expected := []string{ 76 | "test_file.txt", 77 | "test/file.txt", 78 | } 79 | 80 | // Sort the slices to ensure the comparison never fails if the 81 | // contents are sorted differently. 82 | sort.Strings(expected) 83 | sort.Strings(files) 84 | 85 | g.Assert(files).Equal(expected) 86 | }) 87 | }) 88 | } 89 | 90 | func getFiles(f iofs.ReadDirFS, name string) ([]string, error) { 91 | var v []string 92 | 93 | entries, err := f.ReadDir(name) 94 | if err != nil { 95 | return nil, err 96 | } 97 | 98 | for _, e := range entries { 99 | entryName := e.Name() 100 | if name != "." { 101 | entryName = filepath.Join(name, entryName) 102 | } 103 | 104 | if e.IsDir() { 105 | files, err := getFiles(f, entryName) 106 | if err != nil { 107 | return nil, err 108 | } 109 | 110 | if files == nil { 111 | return nil, nil 112 | } 113 | 114 | v = append(v, files...) 115 | continue 116 | } 117 | 118 | v = append(v, entryName) 119 | } 120 | 121 | return v, nil 122 | } 123 | -------------------------------------------------------------------------------- /src/server/backup/backup_local.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "os" 7 | 8 | "emperror.dev/errors" 9 | "github.com/juju/ratelimit" 10 | "github.com/mholt/archives" 11 | 12 | "github.com/pyrohost/elytra/src/config" 13 | "github.com/pyrohost/elytra/src/remote" 14 | "github.com/pyrohost/elytra/src/server/filesystem" 15 | ) 16 | 17 | type LocalBackup struct { 18 | Backup 19 | } 20 | 21 | var _ BackupInterface = (*LocalBackup)(nil) 22 | 23 | func NewLocal(client remote.Client, uuid string, ignore string) *LocalBackup { 24 | return &LocalBackup{ 25 | Backup{ 26 | client: client, 27 | Uuid: uuid, 28 | Ignore: ignore, 29 | adapter: LocalBackupAdapter, 30 | }, 31 | } 32 | } 33 | 34 | // LocateLocal finds the backup for a server and returns the local path. This 35 | // will obviously only work if the backup was created as a local backup. 36 | func LocateLocal(client remote.Client, uuid string) (*LocalBackup, os.FileInfo, error) { 37 | b := NewLocal(client, uuid, "") 38 | st, err := os.Stat(b.Path()) 39 | if err != nil { 40 | return nil, nil, err 41 | } 42 | 43 | if st.IsDir() { 44 | return nil, nil, errors.New("invalid archive, is directory") 45 | } 46 | 47 | return b, st, nil 48 | } 49 | 50 | // Remove removes a backup from the system. 51 | func (b *LocalBackup) Remove() error { 52 | return os.Remove(b.Path()) 53 | } 54 | 55 | // WithLogContext attaches additional context to the log output for this backup. 56 | func (b *LocalBackup) WithLogContext(c map[string]interface{}) { 57 | b.logContext = c 58 | } 59 | 60 | // Generate generates a backup of the selected files and pushes it to the 61 | // defined location for this instance. 62 | func (b *LocalBackup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) { 63 | a := &filesystem.Archive{ 64 | Filesystem: fsys, 65 | Ignore: ignore, 66 | } 67 | 68 | b.log().WithField("path", b.Path()).Info("creating backup for server") 69 | if err := a.Create(ctx, b.Path()); err != nil { 70 | return nil, err 71 | } 72 | b.log().Info("created backup successfully") 73 | 74 | ad, err := b.Details(ctx, nil) 75 | if err != nil { 76 | return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup") 77 | } 78 | return ad, nil 79 | } 80 | 81 | // Restore will walk over the archive and call the callback function for each 82 | // file encountered. 83 | func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback RestoreCallback) error { 84 | f, err := os.Open(b.Path()) 85 | if err != nil { 86 | return err 87 | } 88 | defer f.Close() 89 | 90 | var reader io.Reader = f 91 | // Steal the logic we use for making backups which will be applied when restoring 92 | // this specific backup. This allows us to prevent overloading the disk unintentionally. 93 | if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 { 94 | reader = ratelimit.Reader(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit)) 95 | } 96 | if err := format.Extract(ctx, reader, func(ctx context.Context, f archives.FileInfo) error { 97 | r, err := f.Open() 98 | if err != nil { 99 | return err 100 | } 101 | defer r.Close() 102 | 103 | return callback(f.NameInArchive, f.FileInfo, r) 104 | }); err != nil { 105 | return err 106 | } 107 | return nil 108 | } 109 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Elytra Makefile 2 | 3 | .PHONY: help build test clean download-rustic release dev docker 4 | 5 | # Default target 6 | help: ## Show this help message 7 | @echo "Elytra Build System" 8 | @echo "" 9 | @echo "Available targets:" 10 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-15s %s\n", $$1, $$2}' $(MAKEFILE_LIST) 11 | 12 | # Variables 13 | VERSION ?= $(shell git describe --tags --always --dirty) 14 | BINARY_NAME = elytra 15 | BUILD_DIR = build 16 | GO_VERSION = 1.24.1 17 | 18 | # Build configuration 19 | LDFLAGS = -s -w -X github.com/pyrohost/elytra/src/system.Version=$(VERSION) 20 | BUILD_FLAGS = -v -trimpath -ldflags="$(LDFLAGS)" 21 | 22 | build: ## Build binary for current platform 23 | @echo "Building $(BINARY_NAME) $(VERSION)..." 24 | @mkdir -p $(BUILD_DIR) 25 | CGO_ENABLED=0 go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME) ./src/cmd/elytra 26 | 27 | build-all: ## Build binaries for all supported platforms 28 | @echo "Building $(BINARY_NAME) $(VERSION) for all platforms..." 29 | @mkdir -p $(BUILD_DIR) 30 | @for GOOS in linux; do \ 31 | for GOARCH in amd64 arm64; do \ 32 | echo "Building $${GOOS}/$${GOARCH}..."; \ 33 | env GOOS=$${GOOS} GOARCH=$${GOARCH} CGO_ENABLED=0 go build $(BUILD_FLAGS) \ 34 | -o $(BUILD_DIR)/$(BINARY_NAME)_$${GOOS}_$${GOARCH} ./src/cmd/elytra; \ 35 | done \ 36 | done 37 | @echo "Build complete!" 38 | 39 | test: ## Run tests 40 | @echo "Running tests..." 41 | go test -race -cover ./... 42 | 43 | debug: ## Build and run in debug mode 44 | go build -ldflags="-X github.com/pyrohost/elytra/src/system.Version=$(VERSION)" -o elytra ./src/cmd/elytra 45 | sudo ./elytra --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1 46 | 47 | # Runs a remotely debuggable session for Elytra allowing an IDE to connect and target 48 | # different breakpoints. 49 | rmdebug: ## Run with remote debugger 50 | go build -gcflags "all=-N -l" -ldflags="-X github.com/pyrohost/elytra/src/system.Version=$(VERSION)" -race -o elytra ./src/cmd/elytra 51 | sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./elytra -- --debug --ignore-certificate-errors --config config.yml 52 | 53 | dev: build ## Build and run in development mode 54 | @echo "Starting $(BINARY_NAME) in development mode..." 55 | ./$(BUILD_DIR)/$(BINARY_NAME) --help 56 | 57 | docker: ## Build Docker image 58 | @echo "Building Docker image..." 59 | docker build -t ghcr.io/pyrohost/elytra:dev --build-arg VERSION=$(VERSION) . 60 | 61 | fmt: ## Format Go code 62 | @echo "Formatting Go code..." 63 | go fmt ./... 64 | 65 | lint: ## Run linters 66 | @echo "Running linters..." 67 | @if command -v golangci-lint >/dev/null 2>&1; then \ 68 | golangci-lint run; \ 69 | else \ 70 | echo "golangci-lint not found, running go vet instead..."; \ 71 | go vet ./...; \ 72 | fi 73 | 74 | deps: ## Download dependencies 75 | @echo "Downloading dependencies..." 76 | go mod download 77 | go mod tidy 78 | 79 | # Show build info 80 | info: ## Show build information 81 | @echo "Build Information:" 82 | @echo " Version: $(VERSION)" 83 | @echo " Binary: $(BINARY_NAME)" 84 | @echo " Build Dir: $(BUILD_DIR)" 85 | @echo " Go Version: $(shell go version)" 86 | @echo " Git Commit: $(shell git rev-parse HEAD)" 87 | @echo " Git Branch: $(shell git rev-parse --abbrev-ref HEAD)" 88 | 89 | 90 | clean: ## Clean build artifacts 91 | @echo "Cleaning build artifacts..." 92 | rm -rf $(BUILD_DIR)/* 93 | 94 | .PHONY: help build build-all test debug rmdebug dev docker fmt lint deps info clean -------------------------------------------------------------------------------- /src/environment/docker.go: -------------------------------------------------------------------------------- 1 | package environment 2 | 3 | import ( 4 | "context" 5 | "strconv" 6 | "sync" 7 | 8 | "emperror.dev/errors" 9 | "github.com/apex/log" 10 | "github.com/docker/docker/api/types/network" 11 | "github.com/docker/docker/client" 12 | 13 | "github.com/pyrohost/elytra/src/config" 14 | ) 15 | 16 | var ( 17 | _conce sync.Once 18 | _client *client.Client 19 | ) 20 | 21 | // Docker returns a docker client to be used throughout the codebase. Once a 22 | // client has been created it will be returned for all subsequent calls to this 23 | // function. 24 | func Docker() (*client.Client, error) { 25 | var err error 26 | _conce.Do(func() { 27 | _client, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) 28 | }) 29 | return _client, errors.Wrap(err, "environment/docker: could not create client") 30 | } 31 | 32 | // ConfigureDocker configures the required network for the docker environment. 33 | func ConfigureDocker(ctx context.Context) error { 34 | // Ensure the required docker network exists on the system. 35 | cli, err := Docker() 36 | if err != nil { 37 | return err 38 | } 39 | 40 | nw := config.Get().Docker.Network 41 | resource, err := cli.NetworkInspect(ctx, nw.Name, network.InspectOptions{}) 42 | if err != nil { 43 | if !client.IsErrNotFound(err) { 44 | return err 45 | } 46 | 47 | log.Info("creating missing pterodactyl0 interface, this could take a few seconds...") 48 | if err := createDockerNetwork(ctx, cli); err != nil { 49 | return err 50 | } 51 | } 52 | 53 | config.Update(func(c *config.Configuration) { 54 | c.Docker.Network.Driver = resource.Driver 55 | switch c.Docker.Network.Driver { 56 | case "host": 57 | c.Docker.Network.Interface = "127.0.0.1" 58 | c.Docker.Network.ISPN = false 59 | case "overlay": 60 | fallthrough 61 | case "weavemesh": 62 | c.Docker.Network.Interface = "" 63 | c.Docker.Network.ISPN = true 64 | default: 65 | c.Docker.Network.ISPN = false 66 | } 67 | }) 68 | return nil 69 | } 70 | 71 | // Creates a new network on the machine if one does not exist already. 72 | func createDockerNetwork(ctx context.Context, cli *client.Client) error { 73 | nw := config.Get().Docker.Network 74 | enableIPv6 := true 75 | _, err := cli.NetworkCreate(ctx, nw.Name, network.CreateOptions{ 76 | Driver: nw.Driver, 77 | EnableIPv6: &enableIPv6, 78 | Internal: nw.IsInternal, 79 | IPAM: &network.IPAM{ 80 | Config: []network.IPAMConfig{{ 81 | Subnet: nw.Interfaces.V4.Subnet, 82 | Gateway: nw.Interfaces.V4.Gateway, 83 | }, { 84 | Subnet: nw.Interfaces.V6.Subnet, 85 | Gateway: nw.Interfaces.V6.Gateway, 86 | }}, 87 | }, 88 | Options: map[string]string{ 89 | "encryption": "false", 90 | "com.docker.network.bridge.default_bridge": "false", 91 | "com.docker.network.bridge.enable_icc": strconv.FormatBool(nw.EnableICC), 92 | "com.docker.network.bridge.enable_ip_masquerade": "true", 93 | "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", 94 | "com.docker.network.bridge.name": "pterodactyl0", 95 | "com.docker.network.driver.mtu": strconv.FormatInt(nw.NetworkMTU, 10), 96 | }, 97 | }) 98 | if err != nil { 99 | return err 100 | } 101 | if nw.Driver != "host" && nw.Driver != "overlay" && nw.Driver != "weavemesh" { 102 | config.Update(func(c *config.Configuration) { 103 | c.Docker.Network.Interface = c.Docker.Network.Interfaces.V4.Gateway 104 | }) 105 | } 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /src/environment/docker/api.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | "net/http" 8 | "reflect" 9 | "strings" 10 | "sync" 11 | 12 | "emperror.dev/errors" 13 | "github.com/docker/docker/api/types" 14 | "github.com/docker/docker/api/types/versions" 15 | "github.com/docker/docker/client" 16 | "github.com/docker/docker/errdefs" 17 | 18 | "github.com/pyrohost/elytra/src/config" 19 | ) 20 | 21 | var ( 22 | o sync.Once 23 | cli cliSettings 24 | fastEnabled bool 25 | ) 26 | 27 | type cliSettings struct { 28 | enabled bool 29 | proto string 30 | host string 31 | scheme string 32 | version string 33 | } 34 | 35 | func configure(c *client.Client) { 36 | o.Do(func() { 37 | fastEnabled = config.Get().Docker.UsePerformantInspect 38 | 39 | r := reflect.ValueOf(c).Elem() 40 | cli.proto = r.FieldByName("proto").String() 41 | cli.host = r.FieldByName("addr").String() 42 | cli.scheme = r.FieldByName("scheme").String() 43 | cli.version = r.FieldByName("version").String() 44 | }) 45 | } 46 | 47 | // ContainerInspect is a rough equivalent of Docker's client.ContainerInspect() 48 | // but re-written to use a more performant JSON decoder. This is important since 49 | // a large number of requests to this endpoint are spawned by Elytra, and the 50 | // standard "encoding/json" shows its performance woes badly even with single 51 | // containers running. 52 | func (e *Environment) ContainerInspect(ctx context.Context) (types.ContainerJSON, error) { 53 | configure(e.client) 54 | 55 | // Support feature flagging of this functionality so that if something goes 56 | // wrong for now it is easy enough for people to switch back to the older method 57 | // of fetching stats. 58 | if !fastEnabled { 59 | return e.client.ContainerInspect(ctx, e.Id) 60 | } 61 | 62 | var st types.ContainerJSON 63 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/containers/"+e.Id+"/json", nil) 64 | if err != nil { 65 | return st, errors.WithStack(err) 66 | } 67 | 68 | if cli.proto == "unix" || cli.proto == "npipe" { 69 | req.Host = "docker" 70 | } 71 | 72 | req.URL.Host = cli.host 73 | req.URL.Scheme = cli.scheme 74 | 75 | res, err := e.client.HTTPClient().Do(req) 76 | if err != nil { 77 | if res == nil { 78 | return st, errdefs.Unknown(err) 79 | } 80 | return st, errdefs.FromStatusCode(err, res.StatusCode) 81 | } 82 | 83 | body, err := io.ReadAll(res.Body) 84 | if err != nil { 85 | return st, errors.Wrap(err, "failed to read response body from Docker") 86 | } 87 | if err := parseErrorFromResponse(res, body); err != nil { 88 | return st, errdefs.FromStatusCode(err, res.StatusCode) 89 | } 90 | if err := json.Unmarshal(body, &st); err != nil { 91 | return st, errors.WithStack(err) 92 | } 93 | return st, nil 94 | } 95 | 96 | // parseErrorFromResponse is a re-implementation of Docker's 97 | // client.checkResponseErr() function. 98 | func parseErrorFromResponse(res *http.Response, body []byte) error { 99 | if res.StatusCode >= 200 && res.StatusCode < 400 { 100 | return nil 101 | } 102 | 103 | var ct string 104 | if res.Header != nil { 105 | ct = res.Header.Get("Content-Type") 106 | } 107 | 108 | var emsg string 109 | if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { 110 | var errResp types.ErrorResponse 111 | if err := json.Unmarshal(body, &errResp); err != nil { 112 | return errors.WithStack(err) 113 | } 114 | emsg = strings.TrimSpace(errResp.Message) 115 | } else { 116 | emsg = strings.TrimSpace(string(body)) 117 | } 118 | 119 | return errors.Wrap(errors.New(emsg), "Error response from daemon") 120 | } 121 | -------------------------------------------------------------------------------- /src/router/tokens/websocket.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "strings" 5 | "sync" 6 | "time" 7 | 8 | "github.com/apex/log" 9 | "github.com/gbrlsnchs/jwt/v3" 10 | ) 11 | 12 | // The time at which Elytra was booted. No JWT's created before this time are allowed to 13 | // connect to the socket since they may have been marked as denied already and therefore 14 | // could be invalid at this point. 15 | // 16 | // By doing this we make it so that a user who gets disconnected from Elytra due to a Elytra 17 | // reboot just needs to request a new token as if their old token had expired naturally. 18 | var elytraBootTime = time.Now() 19 | 20 | // A map that contains any JTI's that have been denied by the Panel and the time at which 21 | // they were marked as denied. Therefore any JWT with the same JTI and an IssuedTime that 22 | // is the same as or before this time should be considered invalid. 23 | // 24 | // This is used to allow the Panel to revoke tokens en-masse for a given user & server 25 | // combination since the JTI for tokens is just MD5(user.id + server.uuid). When a server 26 | // is booted this listing is fetched from the panel and the Websocket is dynamically updated. 27 | var denylist sync.Map 28 | 29 | // Adds a JTI to the denylist by marking any JWTs generated before the current time as 30 | // being invalid if they use the same JTI. 31 | func DenyJTI(jti string) { 32 | log.WithField("jti", jti).Debugf("adding \"%s\" to JTI denylist", jti) 33 | 34 | denylist.Store(jti, time.Now()) 35 | } 36 | 37 | // WebsocketPayload defines the JWT payload for a websocket connection. This JWT is passed along to 38 | // the websocket after it has been connected to by sending an "auth" event. 39 | type WebsocketPayload struct { 40 | jwt.Payload 41 | sync.RWMutex 42 | 43 | UserUUID string `json:"user_uuid"` 44 | ServerUUID string `json:"server_uuid"` 45 | Permissions []string `json:"permissions"` 46 | } 47 | 48 | // Returns the JWT payload. 49 | func (p *WebsocketPayload) GetPayload() *jwt.Payload { 50 | p.RLock() 51 | defer p.RUnlock() 52 | 53 | return &p.Payload 54 | } 55 | 56 | // Returns the UUID of the server associated with this JWT. 57 | func (p *WebsocketPayload) GetServerUuid() string { 58 | p.RLock() 59 | defer p.RUnlock() 60 | 61 | return p.ServerUUID 62 | } 63 | 64 | // Check if the JWT has been marked as denied by the instance due to either being issued 65 | // before Elytra was booted, or because we have denied all tokens with the same JTI 66 | // occurring before a set time. 67 | func (p *WebsocketPayload) Denylisted() bool { 68 | // If there is no IssuedAt present for the token, we cannot validate the token so 69 | // just immediately mark it as not valid. 70 | if p.IssuedAt == nil { 71 | return true 72 | } 73 | 74 | // If the time that the token was issued is before the time at which Elytra was booted 75 | // then the token is invalid for our purposes, even if the token "has permission". 76 | if p.IssuedAt.Time.Before(elytraBootTime) { 77 | return true 78 | } 79 | 80 | // Finally, if the token was issued before a time that is currently denied for this 81 | // token instance, ignore the permissions response. 82 | if t, ok := denylist.Load(p.JWTID); ok { 83 | if p.IssuedAt.Time.Before(t.(time.Time)) { 84 | return true 85 | } 86 | } 87 | 88 | return false 89 | } 90 | 91 | // Checks if the given token payload has a permission string. 92 | func (p *WebsocketPayload) HasPermission(permission string) bool { 93 | p.RLock() 94 | defer p.RUnlock() 95 | 96 | for _, k := range p.Permissions { 97 | if k == permission || (!strings.HasPrefix(permission, "admin") && k == "*") { 98 | return !p.Denylisted() 99 | } 100 | } 101 | 102 | return false 103 | } 104 | -------------------------------------------------------------------------------- /src/server/crash.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "sync" 7 | "time" 8 | 9 | "emperror.dev/errors" 10 | 11 | "github.com/pyrohost/elytra/src/config" 12 | "github.com/pyrohost/elytra/src/environment" 13 | ) 14 | 15 | type CrashHandler struct { 16 | mu sync.RWMutex 17 | 18 | // Tracks the time of the last server crash event. 19 | lastCrash time.Time 20 | } 21 | 22 | // Returns the time of the last crash for this server instance. 23 | func (cd *CrashHandler) LastCrashTime() time.Time { 24 | cd.mu.RLock() 25 | defer cd.mu.RUnlock() 26 | 27 | return cd.lastCrash 28 | } 29 | 30 | // Sets the last crash time for a server. 31 | func (cd *CrashHandler) SetLastCrash(t time.Time) { 32 | cd.mu.Lock() 33 | cd.lastCrash = t 34 | cd.mu.Unlock() 35 | } 36 | 37 | // Looks at the environment exit state to determine if the process exited cleanly or 38 | // if it was the result of an event that we should try to recover from. 39 | // 40 | // This function assumes it is called under circumstances where a crash is suspected 41 | // of occurring. It will not do anything to determine if it was actually a crash, just 42 | // look at the exit state and check if it meets the criteria of being called a crash 43 | // by Elytra. 44 | // 45 | // If the server is determined to have crashed, the process will be restarted and the 46 | // counter for the server will be incremented. 47 | func (s *Server) handleServerCrash() error { 48 | // No point in doing anything here if the server isn't currently offline, there 49 | // is no reason to do a crash detection event. If the server crash detection is 50 | // disabled we want to skip anything after this as well. 51 | if s.Environment.State() != environment.ProcessOfflineState || !s.Config().CrashDetectionEnabled { 52 | if !s.Config().CrashDetectionEnabled { 53 | s.Log().Debug("server triggered crash detection but handler is disabled for server process") 54 | s.PublishConsoleOutputFromDaemon("Aborting automatic restart, crash detection is disabled for this instance.") 55 | } 56 | 57 | return nil 58 | } 59 | 60 | exitCode, oomKilled, err := s.Environment.ExitState() 61 | if err != nil { 62 | return errors.Wrap(err, "failed to get exit state for server process") 63 | } 64 | 65 | // If the system is not configured to detect a clean exit code as a crash, and the 66 | // crash is not the result of the program running out of memory, do nothing. 67 | if exitCode == 0 && !oomKilled && !config.Get().System.CrashDetection.DetectCleanExitAsCrash { 68 | s.Log().Debug("server exited with successful exit code; system is configured to not detect this as a crash") 69 | return nil 70 | } 71 | 72 | s.PublishConsoleOutputFromDaemon("---------- Detected server process in a crashed state! ----------") 73 | s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Exit code: %d", exitCode)) 74 | s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Out of memory: %t", oomKilled)) 75 | 76 | c := s.crasher.LastCrashTime() 77 | timeout := config.Get().System.CrashDetection.Timeout 78 | 79 | // If the last crash time was within the last `timeout` seconds we do not want to perform 80 | // an automatic reboot of the process. Return an error that can be handled. 81 | // 82 | // If timeout is set to 0, always reboot the server (this is probably a terrible idea, but some people want it) 83 | if timeout != 0 && !c.IsZero() && c.Add(time.Second*time.Duration(config.Get().System.CrashDetection.Timeout)).After(time.Now()) { 84 | s.PublishConsoleOutputFromDaemon("Aborting automatic restart, last crash occurred less than " + strconv.Itoa(timeout) + " seconds ago.") 85 | return &crashTooFrequent{} 86 | } 87 | 88 | s.crasher.SetLastCrash(time.Now()) 89 | 90 | return errors.Wrap(s.HandlePowerAction(PowerActionStart), "failed to start server after crash detection") 91 | } 92 | -------------------------------------------------------------------------------- /src/system/utils_test.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "encoding/json" 5 | "math/rand" 6 | "strings" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | . "github.com/franela/goblin" 12 | ) 13 | 14 | func Test_Utils(t *testing.T) { 15 | g := Goblin(t) 16 | 17 | g.Describe("ScanReader", func() { 18 | g.BeforeEach(func() { 19 | maxBufferSize = 10 20 | }) 21 | 22 | g.It("should truncate and return long lines", func() { 23 | reader := strings.NewReader("hello world this is a long line\nof text that should be truncated\nnot here\nbut definitely on this line") 24 | 25 | var lines []string 26 | err := ScanReader(reader, func(line []byte) { 27 | lines = append(lines, string(line)) 28 | }) 29 | 30 | g.Assert(err).IsNil() 31 | g.Assert(lines).Equal([]string{"hello worl", "of text th", "not here", "but defini"}) 32 | }) 33 | 34 | g.It("should replace cariage returns with newlines", func() { 35 | reader := strings.NewReader("test\rstring\r\nanother\rline\nhodor\r\r\rheld the door\nmaterial gourl\n") 36 | var lines []string 37 | err := ScanReader(reader, func(line []byte) { 38 | lines = append(lines, string(line)) 39 | }) 40 | 41 | g.Assert(err).IsNil() 42 | g.Assert(lines).Equal([]string{"test\rstrin", "another\rli", "hodor\r\r\rhe", "material g"}) 43 | }) 44 | }) 45 | 46 | g.Describe("AtomicBool", func() { 47 | var b *AtomicBool 48 | g.BeforeEach(func() { 49 | b = NewAtomicBool(false) 50 | }) 51 | 52 | g.It("initalizes with the provided start value", func() { 53 | b = NewAtomicBool(true) 54 | g.Assert(b.Load()).IsTrue() 55 | 56 | b = NewAtomicBool(false) 57 | g.Assert(b.Load()).IsFalse() 58 | }) 59 | 60 | g.Describe("AtomicBool#Store", func() { 61 | g.It("stores the provided value", func() { 62 | g.Assert(b.Load()).IsFalse() 63 | b.Store(true) 64 | g.Assert(b.Load()).IsTrue() 65 | }) 66 | 67 | // This test makes no assertions, it just expects to not hit a race condition 68 | // by having multiple things writing at the same time. 69 | g.It("handles contention from multiple routines", func() { 70 | var wg sync.WaitGroup 71 | 72 | wg.Add(100) 73 | for i := 0; i < 100; i++ { 74 | go func(i int) { 75 | b.Store(i%2 == 0) 76 | wg.Done() 77 | }(i) 78 | } 79 | wg.Wait() 80 | }) 81 | }) 82 | 83 | g.Describe("AtomicBool#SwapIf", func() { 84 | g.It("swaps the value out if different than what is stored", func() { 85 | o := b.SwapIf(false) 86 | g.Assert(o).IsFalse() 87 | g.Assert(b.Load()).IsFalse() 88 | 89 | o = b.SwapIf(true) 90 | g.Assert(o).IsTrue() 91 | g.Assert(b.Load()).IsTrue() 92 | 93 | o = b.SwapIf(true) 94 | g.Assert(o).IsFalse() 95 | g.Assert(b.Load()).IsTrue() 96 | 97 | o = b.SwapIf(false) 98 | g.Assert(o).IsTrue() 99 | g.Assert(b.Load()).IsFalse() 100 | }) 101 | }) 102 | 103 | g.Describe("can be marshaled with JSON", func() { 104 | type testStruct struct { 105 | Value AtomicBool `json:"value"` 106 | } 107 | 108 | var o testStruct 109 | err := json.Unmarshal([]byte(`{"value":true}`), &o) 110 | 111 | g.Assert(err).IsNil() 112 | g.Assert(o.Value.Load()).IsTrue() 113 | 114 | b, err2 := json.Marshal(&o) 115 | g.Assert(err2).IsNil() 116 | g.Assert(b).Equal([]byte(`{"value":true}`)) 117 | }) 118 | }) 119 | } 120 | 121 | func Benchmark_ScanReader(b *testing.B) { 122 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 123 | var str string 124 | for i := 0; i < 10; i++ { 125 | str += strings.Repeat("hello \rworld", r.Intn(2000)) + "\n" 126 | } 127 | reader := strings.NewReader(str) 128 | 129 | b.ResetTimer() 130 | for i := 0; i < b.N; i++ { 131 | _ = ScanReader(reader, func(line []byte) { 132 | // no op 133 | }) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: ['v*'] 6 | 7 | permissions: 8 | contents: write 9 | packages: write 10 | id-token: write 11 | attestations: write 12 | 13 | jobs: 14 | release: 15 | name: Release 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | 22 | - uses: actions/setup-go@v5 23 | with: 24 | go-version-file: go.mod 25 | cache: true 26 | check-latest: true 27 | 28 | - name: Download dependencies 29 | run: go mod download 30 | 31 | - name: Build release binaries 32 | env: 33 | CGO_ENABLED: 0 34 | run: | 35 | VERSION=${GITHUB_REF#refs/tags/} 36 | mkdir -p dist 37 | 38 | # Build for multiple platforms 39 | for GOOS in linux; do 40 | for GOARCH in amd64 arm64; do 41 | echo "Building ${GOOS}/${GOARCH}..." 42 | env GOOS=${GOOS} GOARCH=${GOARCH} go build -v -trimpath \ 43 | -ldflags="-s -w -X github.com/pyrohost/elytra/src/system.Version=${VERSION}" \ 44 | -o dist/elytra_${GOOS}_${GOARCH} \ 45 | ./src/cmd/elytra 46 | done 47 | done 48 | 49 | # Make binaries executable 50 | chmod +x dist/* 51 | 52 | - name: Generate checksums 53 | run: | 54 | cd dist 55 | sha256sum * > checksums.txt 56 | cat checksums.txt 57 | 58 | - name: Generate attestations 59 | uses: actions/attest-build-provenance@v1 60 | with: 61 | subject-path: 'dist/*' 62 | 63 | - name: Create release 64 | uses: softprops/action-gh-release@v2 65 | if: github.ref_type == 'tag' 66 | with: 67 | draft: false 68 | generate_release_notes: true 69 | files: | 70 | dist/* 71 | env: 72 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 73 | 74 | docker: 75 | name: Docker Release 76 | runs-on: ubuntu-latest 77 | permissions: 78 | contents: read 79 | packages: write 80 | id-token: write 81 | attestations: write 82 | steps: 83 | - uses: actions/checkout@v4 84 | 85 | - uses: docker/setup-qemu-action@v3 86 | 87 | - uses: docker/setup-buildx-action@v3 88 | 89 | - uses: docker/login-action@v3 90 | with: 91 | registry: ghcr.io 92 | username: ${{ github.actor }} 93 | password: ${{ secrets.GITHUB_TOKEN }} 94 | 95 | - name: Extract metadata 96 | id: meta 97 | uses: docker/metadata-action@v5 98 | with: 99 | images: ghcr.io/pyrohost/elytra 100 | tags: | 101 | type=semver,pattern={{version}} 102 | type=semver,pattern={{major}}.{{minor}} 103 | type=semver,pattern={{major}} 104 | type=raw,value=latest 105 | 106 | - name: Build and push 107 | id: build 108 | uses: docker/build-push-action@v6 109 | with: 110 | context: . 111 | platforms: linux/amd64,linux/arm64 112 | push: true 113 | tags: ${{ steps.meta.outputs.tags }} 114 | labels: ${{ steps.meta.outputs.labels }} 115 | build-args: VERSION=${{ github.ref_name }} 116 | cache-from: type=gha 117 | cache-to: type=gha,mode=max 118 | provenance: true 119 | sbom: true 120 | 121 | - name: Generate attestation 122 | uses: actions/attest-build-provenance@v1 123 | with: 124 | subject-name: ghcr.io/pyrohost/elytra 125 | subject-digest: ${{ steps.build.outputs.digest }} 126 | push-to-registry: true 127 | -------------------------------------------------------------------------------- /src/loggers/cli/cli.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "emperror.dev/errors" 12 | "github.com/apex/log" 13 | "github.com/apex/log/handlers/cli" 14 | color2 "github.com/fatih/color" 15 | "github.com/mattn/go-colorable" 16 | ) 17 | 18 | var ( 19 | Default = New(os.Stderr, true) 20 | bold = color2.New(color2.Bold) 21 | boldred = color2.New(color2.Bold, color2.FgRed) 22 | ) 23 | 24 | var Strings = [...]string{ 25 | log.DebugLevel: "DEBUG", 26 | log.InfoLevel: " INFO", 27 | log.WarnLevel: " WARN", 28 | log.ErrorLevel: "ERROR", 29 | log.FatalLevel: "FATAL", 30 | } 31 | 32 | type Handler struct { 33 | mu sync.Mutex 34 | Writer io.Writer 35 | Padding int 36 | } 37 | 38 | func New(w io.Writer, useColors bool) *Handler { 39 | if f, ok := w.(*os.File); ok { 40 | if useColors { 41 | return &Handler{Writer: colorable.NewColorable(f), Padding: 2} 42 | } 43 | } 44 | 45 | return &Handler{Writer: colorable.NewNonColorable(w), Padding: 2} 46 | } 47 | 48 | // HandleLog implements log.Handler. 49 | func (h *Handler) HandleLog(e *log.Entry) error { 50 | color := cli.Colors[e.Level] 51 | level := Strings[e.Level] 52 | names := e.Fields.Names() 53 | 54 | h.mu.Lock() 55 | defer h.mu.Unlock() 56 | 57 | color.Fprintf(h.Writer, "%s: [%s] %-25s", bold.Sprintf("%*s", h.Padding+1, level), time.Now().Format(time.StampMilli), e.Message) 58 | 59 | for _, name := range names { 60 | if name == "source" { 61 | continue 62 | } 63 | fmt.Fprintf(h.Writer, " %s=%v", color.Sprint(name), e.Fields.Get(name)) 64 | } 65 | 66 | fmt.Fprintln(h.Writer) 67 | 68 | for _, name := range names { 69 | if name != "error" { 70 | continue 71 | } 72 | 73 | if err, ok := e.Fields.Get("error").(error); ok { 74 | // Attach the stacktrace if it is missing at this point, but don't point 75 | // it specifically to this line since that is irrelevant. 76 | err = errors.WithStackDepthIf(err, 4) 77 | formatted := fmt.Sprintf("\n%s\n%+v\n\n", boldred.Sprintf("Stacktrace:"), err) 78 | 79 | if !strings.Contains(formatted, "runtime.goexit") { 80 | _, _ = fmt.Fprint(h.Writer, formatted) 81 | break 82 | } 83 | 84 | // Inserts a new-line between sections of a stack. 85 | // When wrapping errors, you get multiple separate stacks that start with their message, 86 | // this allows us to separate them with a new-line and view them more easily. 87 | // 88 | // For example: 89 | // 90 | // Stacktrace: 91 | // readlink test: no such file or directory 92 | // failed to read symlink target for 'test' 93 | // github.com/pyrohost/elytra/src/server/filesystem.(*Archive).addToArchive 94 | // github.com/pyrohost/elytra/src/server/filesystem/archive.go:166 95 | // ... (Truncated the stack for easier reading) 96 | // runtime.goexit 97 | // runtime/asm_amd64.s:1374 98 | // **NEW LINE INSERTED HERE** 99 | // backup: error while generating server backup 100 | // github.com/pyrohost/elytra/src/server.(*Server).Backup 101 | // github.com/pyrohost/elytra/src/server/backup.go:84 102 | // ... (Truncated the stack for easier reading) 103 | // runtime.goexit 104 | // runtime/asm_amd64.s:1374 105 | // 106 | var b strings.Builder 107 | var endOfStack bool 108 | for _, s := range strings.Split(formatted, "\n") { 109 | b.WriteString(s + "\n") 110 | 111 | if s == "runtime.goexit" { 112 | endOfStack = true 113 | continue 114 | } 115 | 116 | if !endOfStack { 117 | continue 118 | } 119 | 120 | b.WriteString("\n") 121 | endOfStack = false 122 | } 123 | 124 | _, _ = fmt.Fprint(h.Writer, b.String()) 125 | } 126 | 127 | // Only one key with the name "error" can be in the map. 128 | break 129 | } 130 | 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /src/server/transfer/transfer.go: -------------------------------------------------------------------------------- 1 | package transfer 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/apex/log" 8 | "github.com/mitchellh/colorstring" 9 | 10 | "github.com/pyrohost/elytra/src/server" 11 | "github.com/pyrohost/elytra/src/system" 12 | ) 13 | 14 | // Status represents the current status of a transfer. 15 | type Status string 16 | 17 | // String satisfies the fmt.Stringer interface. 18 | func (s Status) String() string { 19 | return string(s) 20 | } 21 | 22 | const ( 23 | // StatusPending is the status of a transfer when it is first created. 24 | StatusPending Status = "pending" 25 | // StatusProcessing is the status of a transfer when it is currently in 26 | // progress, such as when the archive is being streamed to the target node. 27 | StatusProcessing Status = "processing" 28 | 29 | // StatusCancelling is the status of a transfer when it is in the process of 30 | // being cancelled. 31 | StatusCancelling Status = "cancelling" 32 | 33 | // StatusCancelled is the final status of a transfer when it has been 34 | // cancelled. 35 | StatusCancelled Status = "cancelled" 36 | // StatusFailed is the final status of a transfer when it has failed. 37 | StatusFailed Status = "failed" 38 | // StatusCompleted is the final status of a transfer when it has completed. 39 | StatusCompleted Status = "completed" 40 | ) 41 | 42 | // Transfer represents a transfer of a server from one node to another. 43 | type Transfer struct { 44 | // ctx is the context for the transfer. 45 | ctx context.Context 46 | // cancel is used to cancel all ongoing transfer operations for the server. 47 | cancel *context.CancelFunc 48 | 49 | // Server associated with the transfer. 50 | Server *server.Server 51 | // status of the transfer. 52 | status *system.Atomic[Status] 53 | 54 | // archive is the archive that is being created for the transfer. 55 | archive *Archive 56 | } 57 | 58 | // New returns a new transfer instance for the given server. 59 | func New(ctx context.Context, s *server.Server) *Transfer { 60 | ctx, cancel := context.WithCancel(ctx) 61 | 62 | return &Transfer{ 63 | ctx: ctx, 64 | cancel: &cancel, 65 | 66 | Server: s, 67 | status: system.NewAtomic(StatusPending), 68 | } 69 | } 70 | 71 | // Context returns the context for the transfer. 72 | func (t *Transfer) Context() context.Context { 73 | return t.ctx 74 | } 75 | 76 | // Cancel cancels the transfer. 77 | func (t *Transfer) Cancel() { 78 | status := t.Status() 79 | if status == StatusCancelling || 80 | status == StatusCancelled || 81 | status == StatusCompleted || 82 | status == StatusFailed { 83 | return 84 | } 85 | 86 | if t.cancel == nil { 87 | return 88 | } 89 | 90 | t.SetStatus(StatusCancelling) 91 | (*t.cancel)() 92 | } 93 | 94 | // Status returns the current status of the transfer. 95 | func (t *Transfer) Status() Status { 96 | return t.status.Load() 97 | } 98 | 99 | // SetStatus sets the status of the transfer. 100 | func (t *Transfer) SetStatus(s Status) { 101 | // TODO: prevent certain status changes from happening. 102 | // If we are cancelling, then we can't go back to processing. 103 | t.status.Store(s) 104 | 105 | t.Server.Events().Publish(server.TransferStatusEvent, s) 106 | } 107 | 108 | // SendMessage sends a message to the server's console. 109 | func (t *Transfer) SendMessage(v string) { 110 | t.Server.Events().Publish( 111 | server.TransferLogsEvent, 112 | colorstring.Color("[yellow][bold]"+time.Now().Format(time.RFC1123)+" [Transfer System] [Source Node]:[default] "+v), 113 | ) 114 | } 115 | 116 | // Error logs an error that occurred on the source node. 117 | func (t *Transfer) Error(err error, v string) { 118 | t.Log().WithError(err).Error(v) 119 | t.SendMessage(v) 120 | } 121 | 122 | // Log returns a logger for the transfer. 123 | func (t *Transfer) Log() *log.Entry { 124 | if t.Server == nil { 125 | return log.WithField("subsystem", "transfer") 126 | } 127 | return t.Server.Log().WithField("subsystem", "transfer") 128 | } 129 | -------------------------------------------------------------------------------- /src/system/system.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "context" 5 | "runtime" 6 | 7 | "github.com/acobaugh/osrelease" 8 | "github.com/docker/docker/api/types" 9 | "github.com/docker/docker/api/types/system" 10 | "github.com/docker/docker/client" 11 | "github.com/docker/docker/pkg/parsers/kernel" 12 | ) 13 | 14 | type Information struct { 15 | Version string `json:"version"` 16 | Docker DockerInformation `json:"docker"` 17 | System System `json:"system"` 18 | } 19 | 20 | type DockerInformation struct { 21 | Version string `json:"version"` 22 | Cgroups DockerCgroups `json:"cgroups"` 23 | Containers DockerContainers `json:"containers"` 24 | Storage DockerStorage `json:"storage"` 25 | Runc DockerRunc `json:"runc"` 26 | } 27 | 28 | type DockerCgroups struct { 29 | Driver string `json:"driver"` 30 | Version string `json:"version"` 31 | } 32 | 33 | type DockerContainers struct { 34 | Total int `json:"total"` 35 | Running int `json:"running"` 36 | Paused int `json:"paused"` 37 | Stopped int `json:"stopped"` 38 | } 39 | 40 | type DockerStorage struct { 41 | Driver string `json:"driver"` 42 | Filesystem string `json:"filesystem"` 43 | } 44 | 45 | type DockerRunc struct { 46 | Version string `json:"version"` 47 | } 48 | 49 | type System struct { 50 | Architecture string `json:"architecture"` 51 | CPUThreads int `json:"cpu_threads"` 52 | MemoryBytes int64 `json:"memory_bytes"` 53 | KernelVersion string `json:"kernel_version"` 54 | OS string `json:"os"` 55 | OSType string `json:"os_type"` 56 | } 57 | 58 | func GetSystemInformation() (*Information, error) { 59 | k, err := kernel.GetKernelVersion() 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | version, info, err := GetDockerInfo(context.Background()) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | release, err := osrelease.Read() 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | var os string 75 | if release["PRETTY_NAME"] != "" { 76 | os = release["PRETTY_NAME"] 77 | } else if release["NAME"] != "" { 78 | os = release["NAME"] 79 | } else { 80 | os = info.OperatingSystem 81 | } 82 | 83 | var filesystem string 84 | for _, v := range info.DriverStatus { 85 | if v[0] != "Backing Filesystem" { 86 | continue 87 | } 88 | filesystem = v[1] 89 | break 90 | } 91 | 92 | return &Information{ 93 | Version: Version, 94 | Docker: DockerInformation{ 95 | Version: version.Version, 96 | Cgroups: DockerCgroups{ 97 | Driver: info.CgroupDriver, 98 | Version: info.CgroupVersion, 99 | }, 100 | Containers: DockerContainers{ 101 | Total: info.Containers, 102 | Running: info.ContainersRunning, 103 | Paused: info.ContainersPaused, 104 | Stopped: info.ContainersStopped, 105 | }, 106 | Storage: DockerStorage{ 107 | Driver: info.Driver, 108 | Filesystem: filesystem, 109 | }, 110 | Runc: DockerRunc{ 111 | Version: info.RuncCommit.ID, 112 | }, 113 | }, 114 | System: System{ 115 | Architecture: runtime.GOARCH, 116 | CPUThreads: runtime.NumCPU(), 117 | MemoryBytes: info.MemTotal, 118 | KernelVersion: k.String(), 119 | OS: os, 120 | OSType: runtime.GOOS, 121 | }, 122 | }, nil 123 | } 124 | 125 | func GetDockerInfo(ctx context.Context) (types.Version, system.Info, error) { 126 | // TODO: find a way to re-use the client from the docker environment. 127 | c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) 128 | if err != nil { 129 | return types.Version{}, system.Info{}, err 130 | } 131 | defer c.Close() 132 | 133 | dockerVersion, err := c.ServerVersion(ctx) 134 | if err != nil { 135 | return types.Version{}, system.Info{}, err 136 | } 137 | 138 | dockerInfo, err := c.Info(ctx) 139 | if err != nil { 140 | return types.Version{}, system.Info{}, err 141 | } 142 | 143 | return dockerVersion, dockerInfo, nil 144 | } 145 | -------------------------------------------------------------------------------- /src/system/sink_pool.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // SinkName represents one of the registered sinks for a server. 9 | type SinkName string 10 | 11 | const ( 12 | // LogSink handles console output for game servers, including messages being 13 | // sent via Elytra to the console instance. 14 | LogSink SinkName = "log" 15 | // InstallSink handles installation output for a server. 16 | InstallSink SinkName = "install" 17 | ) 18 | 19 | // SinkPool represents a pool with sinks. 20 | type SinkPool struct { 21 | mu sync.RWMutex 22 | sinks []chan []byte 23 | } 24 | 25 | // NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a 26 | // server instance for its full lifetime. 27 | func NewSinkPool() *SinkPool { 28 | return &SinkPool{} 29 | } 30 | 31 | // On adds a channel to the sink pool instance. 32 | func (p *SinkPool) On(c chan []byte) { 33 | p.mu.Lock() 34 | defer p.mu.Unlock() 35 | p.sinks = append(p.sinks, c) 36 | } 37 | 38 | // Off removes a given channel from the sink pool. If no matching sink is found 39 | // this function is a no-op. If a matching channel is found, it will be removed. 40 | func (p *SinkPool) Off(c chan []byte) { 41 | p.mu.Lock() 42 | defer p.mu.Unlock() 43 | 44 | sinks := p.sinks 45 | for i, sink := range sinks { 46 | if c != sink { 47 | continue 48 | } 49 | 50 | // We need to maintain the order of the sinks in the slice we're tracking, 51 | // so shift everything to the left, rather than changing the order of the 52 | // elements. 53 | copy(sinks[i:], sinks[i+1:]) 54 | sinks[len(sinks)-1] = nil 55 | sinks = sinks[:len(sinks)-1] 56 | p.sinks = sinks 57 | 58 | // Avoid a panic if the sink channel is nil at this point. 59 | if c != nil { 60 | close(c) 61 | } 62 | 63 | return 64 | } 65 | } 66 | 67 | // Destroy destroys the pool by removing and closing all sinks and destroying 68 | // all of the channels that are present. 69 | func (p *SinkPool) Destroy() { 70 | p.mu.Lock() 71 | defer p.mu.Unlock() 72 | for _, c := range p.sinks { 73 | if c != nil { 74 | close(c) 75 | } 76 | } 77 | p.sinks = nil 78 | } 79 | 80 | // Push sends a given message to each of the channels registered in the pool. 81 | // This will use a Ring Buffer channel in order to avoid blocking the channel 82 | // sends, and attempt to push though the most recent messages in the queue in 83 | // favor of the oldest messages. 84 | // 85 | // If the channel becomes full and isn't being drained fast enough, this 86 | // function will remove the oldest message in the channel, and then push the 87 | // message that it got onto the end, effectively making the channel a rolling 88 | // buffer. 89 | // 90 | // There is a potential for data to be lost when passing it through this 91 | // function, but only in instances where the channel buffer is full and the 92 | // channel is not drained fast enough, in which case dropping messages is most 93 | // likely the best option anyways. This uses waitgroups to allow every channel 94 | // to attempt its send concurrently thus making the total blocking time of this 95 | // function "O(1)" instead of "O(n)". 96 | func (p *SinkPool) Push(data []byte) { 97 | p.mu.RLock() 98 | defer p.mu.RUnlock() 99 | 100 | var wg sync.WaitGroup 101 | wg.Add(len(p.sinks)) 102 | for _, c := range p.sinks { 103 | go func(c chan []byte) { 104 | defer wg.Done() 105 | select { 106 | case c <- data: 107 | case <-time.After(10 * time.Millisecond): 108 | // If we cannot send the message to the channel within 10ms, 109 | // then try to drop the oldest message from the channel, then 110 | // send our message. 111 | select { 112 | case <-c: 113 | // Only attempt to send the message if we were able to make 114 | // space for it on the channel. 115 | select { 116 | case c <- data: 117 | default: 118 | } 119 | default: 120 | // Do nothing, this is a fallthrough if there is nothing to 121 | // read from c. 122 | } 123 | } 124 | }(c) 125 | } 126 | wg.Wait() 127 | } 128 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Elytra 2 | 3 | Thank you for your interest in contributing to Elytra! This document provides guidelines for contributing to the project. 4 | 5 | ## Development Setup 6 | 7 | 1. Fork the repository 8 | 2. Clone your fork: `git clone https://github.com/yourusername/elytra.git` 9 | 3. Create a feature branch: `git checkout -b feature/your-feature-name` 10 | 4. Make your changes 11 | 5. Test your changes: `go test ./...` 12 | 6. Commit your changes using conventional commits (see below) 13 | 7. Push to your fork and create a pull request 14 | 15 | ## Conventional Commits 16 | 17 | This project uses [Conventional Commits](https://www.conventionalcommits.org/) for automatic versioning and changelog generation. All commit messages must follow this format: 18 | 19 | ```none 20 | [optional scope]: 21 | 22 | [optional body] 23 | 24 | [optional footer(s)] 25 | ``` 26 | 27 | ### Commit Types 28 | 29 | - **feat**: A new feature (triggers MINOR version bump) 30 | - **fix**: A bug fix (triggers PATCH version bump) 31 | - **docs**: Documentation only changes (no version bump) 32 | - **style**: Changes that do not affect the meaning of the code (no version bump) 33 | - **refactor**: A code change that neither fixes a bug nor adds a feature (no version bump) 34 | - **perf**: A performance improvement (triggers PATCH version bump) 35 | - **test**: Adding missing tests or correcting existing tests (no version bump) 36 | - **chore**: Changes to the build process or auxiliary tools (no version bump) 37 | 38 | ### Breaking Changes 39 | 40 | To indicate breaking changes, add an exclamation mark after the type: 41 | 42 | - **feat!**: A new feature with breaking changes (triggers MAJOR version bump) 43 | - **fix!**: A bug fix with breaking changes (triggers MAJOR version bump) 44 | 45 | You can also include `BREAKING CHANGE:` in the commit body or footer. 46 | 47 | ### Examples 48 | 49 | ```bash 50 | # Feature addition (minor bump) 51 | feat: add support for rustic backup encryption 52 | 53 | # Bug fix (patch bump) 54 | fix: resolve server startup crash on invalid config 55 | 56 | # Breaking change (major bump) 57 | feat!: change default configuration format from TOML to YAML 58 | 59 | # Breaking change with body 60 | feat: add new authentication system 61 | 62 | BREAKING CHANGE: the old authentication tokens are no longer supported 63 | ``` 64 | 65 | ### Scopes 66 | 67 | Optionally, you can add a scope to provide additional context: 68 | 69 | - `feat(backup): add compression options` 70 | - `fix(sftp): resolve permission issues` 71 | - `docs(readme): update installation instructions` 72 | 73 | ## Git Message Template 74 | 75 | To help with conventional commits, you can use our git message template: 76 | 77 | ```bash 78 | git config commit.template .gitmessage 79 | ``` 80 | 81 | ## Release Process 82 | 83 | Our release process is fully automated: 84 | 85 | 1. **Development**: Work on the `main` branch 86 | 2. **Release**: Merge to `production` branch 87 | 3. **Automatic**: CI generates version, builds, and releases based on conventional commits 88 | 89 | ### Version Bumping 90 | 91 | - **MAJOR** (1.0.0 → 2.0.0): Breaking changes (`feat!`, `fix!`, or `BREAKING CHANGE:`) 92 | - **MINOR** (1.0.0 → 1.1.0): New features (`feat:`) 93 | - **PATCH** (1.0.0 → 1.0.1): Bug fixes (`fix:`, `perf:`) 94 | 95 | ## Pull Request Guidelines 96 | 97 | 1. Use conventional commit format in your PR title 98 | 2. Provide a clear description of your changes 99 | 3. Include tests for new features 100 | 4. Update documentation if needed 101 | 5. Ensure all tests pass 102 | 6. Keep PRs focused on a single change 103 | 104 | ## Testing 105 | 106 | Run tests before submitting your PR: 107 | 108 | ```bash 109 | # Run all tests 110 | go test ./... 111 | 112 | # Run tests with race detection 113 | go test -race ./... 114 | 115 | # Build the project 116 | make build 117 | ``` 118 | 119 | ## Questions? 120 | 121 | If you have questions about contributing, please open an issue or reach out to the maintainers. 122 | 123 | Thank you for contributing to Elytra! 124 | -------------------------------------------------------------------------------- /src/environment/allocations.go: -------------------------------------------------------------------------------- 1 | package environment 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/docker/go-connections/nat" 8 | 9 | "github.com/pyrohost/elytra/src/config" 10 | ) 11 | 12 | // Defines the allocations available for a given server. When using the Docker environment 13 | // driver these correspond to mappings for the container that allow external connections. 14 | type Allocations struct { 15 | // ForceOutgoingIP causes a dedicated bridge network to be created for the 16 | // server with a special option, causing Docker to SNAT outgoing traffic to 17 | // the DefaultMapping's IP. This is important to servers which rely on external 18 | // services that check the IP of the server (Source Engine servers, for example). 19 | ForceOutgoingIP bool `json:"force_outgoing_ip"` 20 | // Defines the default allocation that should be used for this server. This is 21 | // what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration 22 | // files or the startup arguments for a server. 23 | DefaultMapping struct { 24 | Ip string `json:"ip"` 25 | Port int `json:"port"` 26 | } `json:"default"` 27 | 28 | // Mappings contains all the ports that should be assigned to a given server 29 | // attached to the IP they correspond to. 30 | Mappings map[string][]int `json:"mappings"` 31 | } 32 | 33 | // Converts the server allocation mappings into a format that can be understood by Docker. While 34 | // we do strive to support multiple environments, using Docker's standardized format for the 35 | // bindings certainly makes life a little easier for managing things. 36 | // 37 | // You'll want to use DockerBindings() if you need to re-map 127.0.0.1 to the Docker interface. 38 | func (a *Allocations) Bindings() nat.PortMap { 39 | out := nat.PortMap{} 40 | 41 | for ip, ports := range a.Mappings { 42 | for _, port := range ports { 43 | // Skip over invalid ports. 44 | if port < 1 || port > 65535 { 45 | continue 46 | } 47 | 48 | binding := nat.PortBinding{ 49 | HostIP: ip, 50 | HostPort: strconv.Itoa(port), 51 | } 52 | 53 | tcp := nat.Port(fmt.Sprintf("%d/tcp", port)) 54 | udp := nat.Port(fmt.Sprintf("%d/udp", port)) 55 | 56 | out[tcp] = append(out[tcp], binding) 57 | out[udp] = append(out[udp], binding) 58 | } 59 | } 60 | 61 | return out 62 | } 63 | 64 | // Returns the bindings for the server in a way that is supported correctly by Docker. This replaces 65 | // any reference to 127.0.0.1 with the IP of the pterodactyl0 network interface which will allow the 66 | // server to operate on a local address while still being accessible by other containers. 67 | func (a *Allocations) DockerBindings() nat.PortMap { 68 | iface := config.Get().Docker.Network.Interface 69 | 70 | out := a.Bindings() 71 | // Loop over all the bindings for this container, and convert any that reference 127.0.0.1 72 | // to use the pterodactyl0 network interface IP, as that is the true local for what people are 73 | // trying to do when creating servers. 74 | for p, binds := range out { 75 | for i, alloc := range binds { 76 | if alloc.HostIP != "127.0.0.1" { 77 | continue 78 | } 79 | 80 | // If using ISPN just delete the local allocation from the server. 81 | if config.Get().Docker.Network.ISPN { 82 | out[p] = append(out[p][:i], out[p][i+1:]...) 83 | } else { 84 | out[p][i] = nat.PortBinding{ 85 | HostIP: iface, 86 | HostPort: alloc.HostPort, 87 | } 88 | } 89 | } 90 | } 91 | 92 | return out 93 | } 94 | 95 | // Converts the server allocation mappings into a PortSet that can be understood 96 | // by Docker. This formatting is slightly different than "Bindings" as it should 97 | // return an empty struct rather than a binding. 98 | // 99 | // To accomplish this, we'll just get the values from "DockerBindings" and then set them 100 | // to empty structs. Because why not. 101 | func (a *Allocations) Exposed() nat.PortSet { 102 | out := nat.PortSet{} 103 | 104 | for port := range a.DockerBindings() { 105 | out[port] = struct{}{} 106 | } 107 | 108 | return out 109 | } 110 | -------------------------------------------------------------------------------- /src/system/locker_test.go: -------------------------------------------------------------------------------- 1 | package system 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "emperror.dev/errors" 9 | . "github.com/franela/goblin" 10 | ) 11 | 12 | func TestPower(t *testing.T) { 13 | g := Goblin(t) 14 | 15 | g.Describe("Locker", func() { 16 | var l *Locker 17 | g.BeforeEach(func() { 18 | l = NewLocker() 19 | }) 20 | 21 | g.Describe("PowerLocker#IsLocked", func() { 22 | g.It("should return false when the channel is empty", func() { 23 | g.Assert(cap(l.ch)).Equal(1) 24 | g.Assert(l.IsLocked()).IsFalse() 25 | }) 26 | 27 | g.It("should return true when the channel is at capacity", func() { 28 | l.ch <- true 29 | 30 | g.Assert(l.IsLocked()).IsTrue() 31 | <-l.ch 32 | g.Assert(l.IsLocked()).IsFalse() 33 | 34 | // We don't care what the channel value is, just that there is 35 | // something in it. 36 | l.ch <- false 37 | g.Assert(l.IsLocked()).IsTrue() 38 | g.Assert(cap(l.ch)).Equal(1) 39 | }) 40 | }) 41 | 42 | g.Describe("PowerLocker#Acquire", func() { 43 | g.It("should acquire a lock when channel is empty", func() { 44 | err := l.Acquire() 45 | 46 | g.Assert(err).IsNil() 47 | g.Assert(cap(l.ch)).Equal(1) 48 | g.Assert(len(l.ch)).Equal(1) 49 | }) 50 | 51 | g.It("should return an error when the channel is full", func() { 52 | l.ch <- true 53 | 54 | err := l.Acquire() 55 | 56 | g.Assert(err).IsNotNil() 57 | g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue() 58 | g.Assert(cap(l.ch)).Equal(1) 59 | g.Assert(len(l.ch)).Equal(1) 60 | }) 61 | }) 62 | 63 | g.Describe("PowerLocker#TryAcquire", func() { 64 | g.It("should acquire a lock when channel is empty", func() { 65 | g.Timeout(time.Second) 66 | 67 | err := l.TryAcquire(context.Background()) 68 | 69 | g.Assert(err).IsNil() 70 | g.Assert(cap(l.ch)).Equal(1) 71 | g.Assert(len(l.ch)).Equal(1) 72 | g.Assert(l.IsLocked()).IsTrue() 73 | }) 74 | 75 | g.It("should block until context is canceled if channel is full", func() { 76 | g.Timeout(time.Second) 77 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500) 78 | defer cancel() 79 | 80 | l.ch <- true 81 | err := l.TryAcquire(ctx) 82 | 83 | g.Assert(err).IsNotNil() 84 | g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue() 85 | g.Assert(cap(l.ch)).Equal(1) 86 | g.Assert(len(l.ch)).Equal(1) 87 | g.Assert(l.IsLocked()).IsTrue() 88 | }) 89 | 90 | g.It("should block until lock can be acquired", func() { 91 | g.Timeout(time.Second) 92 | 93 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200) 94 | defer cancel() 95 | 96 | l.Acquire() 97 | go func() { 98 | time.AfterFunc(time.Millisecond*50, func() { 99 | l.Release() 100 | }) 101 | }() 102 | 103 | err := l.TryAcquire(ctx) 104 | g.Assert(err).IsNil() 105 | g.Assert(cap(l.ch)).Equal(1) 106 | g.Assert(len(l.ch)).Equal(1) 107 | g.Assert(l.IsLocked()).IsTrue() 108 | }) 109 | }) 110 | 111 | g.Describe("PowerLocker#Release", func() { 112 | g.It("should release when channel is full", func() { 113 | l.Acquire() 114 | g.Assert(l.IsLocked()).IsTrue() 115 | l.Release() 116 | g.Assert(cap(l.ch)).Equal(1) 117 | g.Assert(len(l.ch)).Equal(0) 118 | g.Assert(l.IsLocked()).IsFalse() 119 | }) 120 | 121 | g.It("should release when channel is empty", func() { 122 | g.Assert(l.IsLocked()).IsFalse() 123 | l.Release() 124 | g.Assert(cap(l.ch)).Equal(1) 125 | g.Assert(len(l.ch)).Equal(0) 126 | g.Assert(l.IsLocked()).IsFalse() 127 | }) 128 | }) 129 | 130 | g.Describe("PowerLocker#Destroy", func() { 131 | g.It("should unlock and close the channel", func() { 132 | l.Acquire() 133 | g.Assert(l.IsLocked()).IsTrue() 134 | l.Destroy() 135 | g.Assert(l.IsLocked()).IsFalse() 136 | 137 | defer func() { 138 | r := recover() 139 | 140 | g.Assert(r).IsNotNil() 141 | g.Assert(r.(error).Error()).Equal("send on closed channel") 142 | }() 143 | 144 | l.Acquire() 145 | }) 146 | }) 147 | }) 148 | } 149 | -------------------------------------------------------------------------------- /src/router/router_server_transfer.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "strings" 7 | "time" 8 | 9 | "emperror.dev/errors" 10 | "github.com/gin-gonic/gin" 11 | 12 | "github.com/pyrohost/elytra/src/environment" 13 | "github.com/pyrohost/elytra/src/router/middleware" 14 | "github.com/pyrohost/elytra/src/server" 15 | "github.com/pyrohost/elytra/src/server/installer" 16 | "github.com/pyrohost/elytra/src/server/transfer" 17 | ) 18 | 19 | // Data passed over to initiate a server transfer. 20 | type serverTransferRequest struct { 21 | URL string `binding:"required" json:"url"` 22 | Token string `binding:"required" json:"token"` 23 | Server installer.ServerDetails `json:"server"` 24 | } 25 | 26 | // postServerTransfer handles the start of a transfer for a server. 27 | func postServerTransfer(c *gin.Context) { 28 | var data serverTransferRequest 29 | if err := c.BindJSON(&data); err != nil { 30 | return 31 | } 32 | 33 | s := ExtractServer(c) 34 | 35 | // Check if the server is already being transferred. 36 | // There will be another endpoint for resetting this value either by deleting the 37 | // server, or by canceling the transfer. 38 | if s.IsTransferring() { 39 | c.AbortWithStatusJSON(http.StatusConflict, gin.H{ 40 | "error": "A transfer is already in progress for this server.", 41 | }) 42 | return 43 | } 44 | 45 | manager := middleware.ExtractManager(c) 46 | 47 | notifyPanelOfFailure := func() { 48 | if err := manager.Client().SetTransferStatus(context.Background(), s.ID(), false); err != nil { 49 | s.Log().WithField("subsystem", "transfer"). 50 | WithField("status", false). 51 | WithError(err). 52 | Error("failed to set transfer status") 53 | } 54 | 55 | s.Events().Publish(server.TransferStatusEvent, "failure") 56 | s.SetTransferring(false) 57 | } 58 | 59 | // Block the server from starting while we are transferring it. 60 | s.SetTransferring(true) 61 | 62 | // Ensure the server is offline. Sometimes a "No such container" error gets through 63 | // which means the server is already stopped. We can ignore that. 64 | if s.Environment.State() != environment.ProcessOfflineState { 65 | if err := s.Environment.WaitForStop( 66 | s.Context(), 67 | time.Second*15, 68 | false, 69 | ); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") { 70 | s.SetTransferring(false) 71 | middleware.CaptureAndAbort(c, errors.Wrap(err, "failed to stop server for transfer")) 72 | return 73 | } 74 | } 75 | 76 | // Create a new transfer instance for this server. 77 | trnsfr := transfer.New(context.Background(), s) 78 | transfer.Outgoing().Add(trnsfr) 79 | 80 | go func() { 81 | defer transfer.Outgoing().Remove(trnsfr) 82 | 83 | if _, err := trnsfr.PushArchiveToTarget(data.URL, data.Token); err != nil { 84 | notifyPanelOfFailure() 85 | 86 | if err == context.Canceled { 87 | trnsfr.Log().Debug("canceled") 88 | trnsfr.SendMessage("Canceled.") 89 | return 90 | } 91 | 92 | trnsfr.Log().WithError(err).Error("failed to push archive to target") 93 | return 94 | } 95 | 96 | // DO NOT NOTIFY THE PANEL OF SUCCESS HERE. The only node that should send 97 | // a success status is the destination node. When we send a failure status, 98 | // the panel will automatically cancel the transfer and attempt to reset 99 | // the server state on the destination node, we just need to make sure 100 | // we clean up our statuses for failure. 101 | 102 | trnsfr.Log().Debug("transfer complete") 103 | }() 104 | 105 | c.Status(http.StatusAccepted) 106 | } 107 | 108 | // deleteServerTransfer cancels an outgoing transfer for a server. 109 | func deleteServerTransfer(c *gin.Context) { 110 | s := ExtractServer(c) 111 | 112 | if !s.IsTransferring() { 113 | c.AbortWithStatusJSON(http.StatusConflict, gin.H{ 114 | "error": "Server is not currently being transferred.", 115 | }) 116 | return 117 | } 118 | 119 | trnsfr := transfer.Outgoing().Get(s.ID()) 120 | if trnsfr == nil { 121 | c.AbortWithStatusJSON(http.StatusConflict, gin.H{ 122 | "error": "Server is not currently being transferred.", 123 | }) 124 | return 125 | } 126 | 127 | trnsfr.Cancel() 128 | 129 | c.Status(http.StatusAccepted) 130 | } 131 | -------------------------------------------------------------------------------- /src/server/backup/rustic_factory.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "context" 5 | 6 | "emperror.dev/errors" 7 | 8 | "github.com/pyrohost/elytra/src/remote" 9 | ) 10 | 11 | // NewRustic creates a new rustic backup instance (legacy interface) 12 | // somebody please refactor this - ellie 13 | func NewRustic(client remote.Client, uuid string, ignore string, backupType string, s3Creds *remote.S3Credentials, password string) *RusticBackup { 14 | config := Config{ 15 | BackupUUID: uuid, 16 | Password: password, 17 | BackupType: backupType, 18 | IgnorePatterns: ignore, 19 | } 20 | 21 | // Convert S3 credentials if provided 22 | if s3Creds != nil { 23 | config.S3Config = &S3Config{ 24 | Bucket: s3Creds.Bucket, 25 | Region: s3Creds.Region, 26 | Endpoint: s3Creds.Endpoint, 27 | AccessKeyID: s3Creds.AccessKeyID, 28 | SecretAccessKey: s3Creds.SecretAccessKey, 29 | SessionToken: s3Creds.SessionToken, 30 | ForcePathStyle: s3Creds.ForcePathStyle, 31 | } 32 | } 33 | 34 | backup, err := NewRusticBackup(client, config) 35 | if err != nil { 36 | // Return a broken backup for backward compatibility 37 | return &RusticBackup{ 38 | Backup: Backup{ 39 | client: client, 40 | Uuid: uuid, 41 | Ignore: ignore, 42 | adapter: AdapterType("rustic_" + backupType), 43 | }, 44 | config: config, 45 | } 46 | } 47 | 48 | return backup 49 | } 50 | 51 | // NewRusticWithServerPath creates a new rustic backup instance with server path (legacy interface) 52 | func NewRusticWithServerPath(client remote.Client, serverUuid string, backupUuid string, ignore string, backupType string, s3Creds *remote.S3Credentials, password string, repoPath string) *RusticBackup { 53 | config := Config{ 54 | BackupUUID: backupUuid, 55 | ServerUUID: serverUuid, 56 | Password: password, 57 | BackupType: backupType, 58 | LocalPath: repoPath, 59 | IgnorePatterns: ignore, 60 | } 61 | 62 | // Convert S3 credentials if provided 63 | if s3Creds != nil { 64 | config.S3Config = &S3Config{ 65 | Bucket: s3Creds.Bucket, 66 | Region: s3Creds.Region, 67 | Endpoint: s3Creds.Endpoint, 68 | AccessKeyID: s3Creds.AccessKeyID, 69 | SecretAccessKey: s3Creds.SecretAccessKey, 70 | SessionToken: s3Creds.SessionToken, 71 | ForcePathStyle: s3Creds.ForcePathStyle, 72 | } 73 | } 74 | 75 | backup, err := NewRusticBackup(client, config) 76 | if err != nil { 77 | // Return a broken backup for backward compatibility 78 | return &RusticBackup{ 79 | Backup: Backup{ 80 | client: client, 81 | Uuid: backupUuid, 82 | Ignore: ignore, 83 | adapter: AdapterType("rustic_" + backupType), 84 | }, 85 | config: config, 86 | } 87 | } 88 | 89 | return backup 90 | } 91 | 92 | // LocateRusticBySnapshotID finds a rustic backup by snapshot ID (preferred method) 93 | func LocateRusticBySnapshotID(client remote.Client, serverUuid string, snapshotID string, backupType string, s3Creds *remote.S3Credentials, password string, repoPath string) (*RusticBackup, error) { 94 | config := Config{ 95 | ServerUUID: serverUuid, 96 | Password: password, 97 | BackupType: backupType, 98 | LocalPath: repoPath, 99 | } 100 | 101 | // Convert S3 credentials if provided 102 | if s3Creds != nil { 103 | config.S3Config = &S3Config{ 104 | Bucket: s3Creds.Bucket, 105 | Region: s3Creds.Region, 106 | Endpoint: s3Creds.Endpoint, 107 | AccessKeyID: s3Creds.AccessKeyID, 108 | SecretAccessKey: s3Creds.SecretAccessKey, 109 | SessionToken: s3Creds.SessionToken, 110 | ForcePathStyle: s3Creds.ForcePathStyle, 111 | } 112 | } 113 | 114 | backup, err := NewRusticBackup(client, config) 115 | if err != nil { 116 | return nil, err 117 | } 118 | 119 | // For deletion or restore, set the snapshot ID directly 120 | if snapshotID == "" { 121 | // NULL snapshot ID means failed backup - nothing to delete 122 | backup.snapshot = nil 123 | return backup, nil 124 | } 125 | 126 | // Get the snapshot details from the repository 127 | snapshot, err := backup.repository.GetSnapshot(context.Background(), snapshotID) 128 | if err != nil { 129 | return nil, errors.Wrap(err, "failed to get snapshot details") 130 | } 131 | 132 | backup.snapshot = snapshot 133 | return backup, nil 134 | } 135 | -------------------------------------------------------------------------------- /src/internal/ufs/walk.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | // Code in this file was derived from `go/src/io/fs/walk.go`. 4 | 5 | // Copyright 2020 The Go Authors. All rights reserved. 6 | // Use of this source code is governed by a BSD-style 7 | // license that can be found in the `go.LICENSE` file. 8 | 9 | package ufs 10 | 11 | import ( 12 | iofs "io/fs" 13 | "path" 14 | ) 15 | 16 | // SkipDir is used as a return value from [WalkDirFunc] to indicate that 17 | // the directory named in the call is to be skipped. It is not returned 18 | // as an error by any function. 19 | var SkipDir = iofs.SkipDir 20 | 21 | // SkipAll is used as a return value from [WalkDirFunc] to indicate that 22 | // all remaining files and directories are to be skipped. It is not returned 23 | // as an error by any function. 24 | var SkipAll = iofs.SkipAll 25 | 26 | // WalkDirFunc is the type of the function called by [WalkDir] to visit 27 | // each file or directory. 28 | // 29 | // The path argument contains the argument to [WalkDir] as a prefix. 30 | // That is, if WalkDir is called with root argument "dir" and finds a file 31 | // named "a" in that directory, the walk function will be called with 32 | // argument "dir/a". 33 | // 34 | // The d argument is the [DirEntry] for the named path. 35 | // 36 | // The error result returned by the function controls how [WalkDir] 37 | // continues. If the function returns the special value [SkipDir], WalkDir 38 | // skips the current directory (path if d.IsDir() is true, otherwise 39 | // path's parent directory). If the function returns the special value 40 | // [SkipAll], WalkDir skips all remaining files and directories. Otherwise, 41 | // if the function returns a non-nil error, WalkDir stops entirely and 42 | // returns that error. 43 | // 44 | // The err argument reports an error related to path, signaling that 45 | // [WalkDir] will not walk into that directory. The function can decide how 46 | // to handle that error; as described earlier, returning the error will 47 | // cause WalkDir to stop walking the entire tree. 48 | // 49 | // [WalkDir] calls the function with a non-nil err argument in two cases. 50 | // 51 | // First, if the initial [Stat] on the root directory fails, WalkDir 52 | // calls the function with path set to root, d set to nil, and err set to 53 | // the error from [fs.Stat]. 54 | // 55 | // Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the 56 | // function with path set to the directory's path, d set to an 57 | // [DirEntry] describing the directory, and err set to the error from 58 | // ReadDir. In this second case, the function is called twice with the 59 | // path of the directory: the first call is before the directory read is 60 | // attempted and has err set to nil, giving the function a chance to 61 | // return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call 62 | // is after a failed ReadDir and reports the error from ReadDir. 63 | // (If ReadDir succeeds, there is no second call.) 64 | type WalkDirFunc func(path string, d DirEntry, err error) error 65 | 66 | // WalkDir walks the file tree rooted at root, calling fn for each file or 67 | // directory in the tree, including root. 68 | // 69 | // All errors that arise visiting files and directories are filtered by fn: 70 | // see the [WalkDirFunc] documentation for details. 71 | // 72 | // The files are walked in lexical order, which makes the output deterministic 73 | // but requires WalkDir to read an entire directory into memory before proceeding 74 | // to walk that directory. 75 | // 76 | // WalkDir does not follow symbolic links found in directories, 77 | // but if root itself is a symbolic link, its target will be walked. 78 | func WalkDir(fs Filesystem, root string, fn WalkDirFunc) error { 79 | info, err := fs.Stat(root) 80 | if err != nil { 81 | err = fn(root, nil, err) 82 | } else { 83 | err = walkDir(fs, root, iofs.FileInfoToDirEntry(info), fn) 84 | } 85 | if err == SkipDir || err == SkipAll { 86 | return nil 87 | } 88 | return err 89 | } 90 | 91 | // walkDir recursively descends path, calling walkDirFn. 92 | func walkDir(fs Filesystem, name string, d DirEntry, walkDirFn WalkDirFunc) error { 93 | if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() { 94 | if err == SkipDir && d.IsDir() { 95 | // Successfully skipped directory. 96 | err = nil 97 | } 98 | return err 99 | } 100 | 101 | dirs, err := fs.ReadDir(name) 102 | if err != nil { 103 | // Second call, to report ReadDir error. 104 | err = walkDirFn(name, d, err) 105 | if err != nil { 106 | if err == SkipDir && d.IsDir() { 107 | err = nil 108 | } 109 | return err 110 | } 111 | } 112 | 113 | for _, d1 := range dirs { 114 | name1 := path.Join(name, d1.Name()) 115 | if err := walkDir(fs, name1, d1, walkDirFn); err != nil { 116 | if err == SkipDir { 117 | break 118 | } 119 | return err 120 | } 121 | } 122 | 123 | return nil 124 | } 125 | -------------------------------------------------------------------------------- /src/environment/environment.go: -------------------------------------------------------------------------------- 1 | package environment 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pyrohost/elytra/src/events" 8 | ) 9 | 10 | const ( 11 | StateChangeEvent = "state change" 12 | ResourceEvent = "resources" 13 | DockerImagePullStarted = "docker image pull started" 14 | DockerImagePullStatus = "docker image pull status" 15 | DockerImagePullCompleted = "docker image pull completed" 16 | ) 17 | 18 | const ( 19 | ProcessOfflineState = "offline" 20 | ProcessStartingState = "starting" 21 | ProcessRunningState = "running" 22 | ProcessStoppingState = "stopping" 23 | ) 24 | 25 | // Defines the basic interface that all environments need to implement so that 26 | // a server can be properly controlled. 27 | type ProcessEnvironment interface { 28 | // Returns the name of the environment. 29 | Type() string 30 | 31 | // Returns the environment configuration to the caller. 32 | Config() *Configuration 33 | 34 | // Returns an event emitter instance that can be hooked into to listen for different 35 | // events that are fired by the environment. This should not allow someone to publish 36 | // events, only subscribe to them. 37 | Events() *events.Bus 38 | 39 | // Determines if the server instance exists. For example, in a docker environment 40 | // this should confirm that the container is created and in a bootable state. In 41 | // a basic CLI environment this can probably just return true right away. 42 | Exists() (bool, error) 43 | 44 | // IsRunning determines if the environment is currently active and running 45 | // a server process for this specific server instance. 46 | IsRunning(ctx context.Context) (bool, error) 47 | 48 | // Performs an update of server resource limits without actually stopping the server 49 | // process. This only executes if the environment supports it, otherwise it is 50 | // a no-op. 51 | InSituUpdate() error 52 | 53 | // Runs before the environment is started. If an error is returned starting will 54 | // not occur, otherwise proceeds as normal. 55 | OnBeforeStart(ctx context.Context) error 56 | 57 | // Starts a server instance. If the server instance is not in a state where it 58 | // can be started an error should be returned. 59 | Start(ctx context.Context) error 60 | 61 | // Stop stops a server instance. If the server is already stopped an error will 62 | // not be returned, this function will act as a no-op. 63 | Stop(ctx context.Context) error 64 | 65 | // WaitForStop waits for a server instance to stop gracefully. If the server is 66 | // still detected as running after "duration", an error will be returned, or the server 67 | // will be terminated depending on the value of the second argument. If the context 68 | // provided is canceled the underlying wait conditions will be stopped and the 69 | // entire loop will be ended (potentially without stopping or terminating). 70 | WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error 71 | 72 | // Terminate stops a running server instance using the provided signal. This function 73 | // is a no-op if the server is already stopped. 74 | Terminate(ctx context.Context, signal string) error 75 | 76 | // Destroys the environment removing any containers that were created (in Docker 77 | // environments at least). 78 | Destroy() error 79 | 80 | // Returns the exit state of the process. The first result is the exit code, the second 81 | // determines if the process was killed by the system OOM killer. 82 | ExitState() (uint32, bool, error) 83 | 84 | // Creates the necessary environment for running the server process. For example, 85 | // in the Docker environment create will create a new container instance for the 86 | // server. 87 | Create() error 88 | 89 | // Attach attaches to the server console environment and allows piping the output 90 | // to a websocket or other internal tool to monitor output. Also allows you to later 91 | // send data into the environment's stdin. 92 | Attach(ctx context.Context) error 93 | 94 | // Sends the provided command to the running server instance. 95 | SendCommand(string) error 96 | 97 | // Reads the log file for the process from the end backwards until the provided 98 | // number of lines is met. 99 | Readlog(int) ([]string, error) 100 | 101 | // Returns the current state of the environment. 102 | State() string 103 | 104 | // Sets the current state of the environment. In general you should let the environment 105 | // handle this itself, but there are some scenarios where it is helpful for the server 106 | // to update the state externally (e.g. starting -> started). 107 | SetState(string) 108 | 109 | // Uptime returns the current environment uptime in milliseconds. This is 110 | // the time that has passed since it was last started. 111 | Uptime(ctx context.Context) (int64, error) 112 | 113 | // SetLogCallback sets the callback that the container's log output will be passed to. 114 | SetLogCallback(func([]byte)) 115 | } 116 | -------------------------------------------------------------------------------- /src/server/transfer/source.go: -------------------------------------------------------------------------------- 1 | package transfer 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "mime/multipart" 11 | "net/http" 12 | "time" 13 | 14 | "github.com/pyrohost/elytra/src/internal/progress" 15 | ) 16 | 17 | // PushArchiveToTarget POSTs the archive to the target node and returns the 18 | // response body. 19 | func (t *Transfer) PushArchiveToTarget(url, token string) ([]byte, error) { 20 | ctx, cancel := context.WithCancel(t.ctx) 21 | defer cancel() 22 | 23 | t.SendMessage("Preparing to stream server data to destination...") 24 | t.SetStatus(StatusProcessing) 25 | 26 | a, err := t.Archive() 27 | if err != nil { 28 | t.Error(err, "Failed to get archive for transfer.") 29 | return nil, errors.New("failed to get archive for transfer") 30 | } 31 | 32 | t.SendMessage("Streaming archive to destination...") 33 | 34 | // Send the upload progress to the websocket every 5 seconds. 35 | ctx2, cancel2 := context.WithCancel(ctx) 36 | defer cancel2() 37 | go func(ctx context.Context, p *progress.Progress, tc *time.Ticker) { 38 | defer tc.Stop() 39 | 40 | for { 41 | select { 42 | case <-ctx.Done(): 43 | return 44 | case <-tc.C: 45 | t.SendMessage("Uploading " + p.Progress(25)) 46 | } 47 | } 48 | }(ctx2, a.Progress(), time.NewTicker(5*time.Second)) 49 | 50 | // Create a new request using the pipe as the body. 51 | body, writer := io.Pipe() 52 | defer body.Close() 53 | defer writer.Close() 54 | req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) 55 | if err != nil { 56 | return nil, err 57 | } 58 | req.Header.Set("Authorization", token) 59 | 60 | // Create a new multipart writer that writes the archive to the pipe. 61 | mp := multipart.NewWriter(writer) 62 | defer mp.Close() 63 | req.Header.Set("Content-Type", mp.FormDataContentType()) 64 | 65 | // Create a new goroutine to write the archive to the pipe used by the 66 | // multipart writer. 67 | errChan := make(chan error) 68 | go func() { 69 | defer close(errChan) 70 | defer writer.Close() 71 | defer mp.Close() 72 | 73 | src, pw := io.Pipe() 74 | defer src.Close() 75 | defer pw.Close() 76 | 77 | h := sha256.New() 78 | tee := io.TeeReader(src, h) 79 | 80 | dest, err := mp.CreateFormFile("archive", "archive.tar.gz") 81 | if err != nil { 82 | errChan <- errors.New("failed to create form file") 83 | return 84 | } 85 | 86 | ch := make(chan error) 87 | go func() { 88 | defer close(ch) 89 | 90 | if _, err := io.Copy(dest, tee); err != nil { 91 | ch <- fmt.Errorf("failed to stream archive to destination: %w", err) 92 | return 93 | } 94 | 95 | t.Log().Debug("finished copying dest to tee") 96 | }() 97 | 98 | if err := a.Stream(ctx, pw); err != nil { 99 | errChan <- errors.New("failed to stream archive to pipe") 100 | return 101 | } 102 | t.Log().Debug("finished streaming archive to pipe") 103 | 104 | // Close the pipe writer early to release resources and ensure that the data gets flushed. 105 | _ = pw.Close() 106 | 107 | // Wait for the copy to finish before we continue. 108 | t.Log().Debug("waiting on copy to finish") 109 | if err := <-ch; err != nil { 110 | errChan <- err 111 | return 112 | } 113 | 114 | if err := mp.WriteField("checksum", hex.EncodeToString(h.Sum(nil))); err != nil { 115 | errChan <- errors.New("failed to stream checksum") 116 | return 117 | } 118 | 119 | cancel2() 120 | t.SendMessage("Finished streaming archive to destination.") 121 | 122 | if err := mp.Close(); err != nil { 123 | t.Log().WithError(err).Error("error while closing multipart writer") 124 | } 125 | t.Log().Debug("closed multipart writer") 126 | }() 127 | 128 | t.Log().Debug("sending archive to destination") 129 | client := http.Client{Timeout: 0} 130 | res, err := client.Do(req) 131 | if err != nil { 132 | t.Log().Debug("error while sending archive to destination") 133 | return nil, err 134 | } 135 | if res.StatusCode != http.StatusOK { 136 | return nil, fmt.Errorf("unexpected status code from destination: %d", res.StatusCode) 137 | } 138 | t.Log().Debug("waiting for stream to complete") 139 | select { 140 | case <-ctx.Done(): 141 | return nil, ctx.Err() 142 | case err2 := <-errChan: 143 | t.Log().Debug("stream completed") 144 | if err != nil || err2 != nil { 145 | if err == context.Canceled { 146 | return nil, err 147 | } 148 | 149 | t.Log().WithError(err).Debug("failed to send archive to destination") 150 | return nil, fmt.Errorf("http error: %w, multipart error: %v", err, err2) 151 | } 152 | defer res.Body.Close() 153 | t.Log().Debug("received response from destination") 154 | 155 | v, err := io.ReadAll(res.Body) 156 | if err != nil { 157 | return nil, fmt.Errorf("failed to read response body: %w", err) 158 | } 159 | 160 | if res.StatusCode != http.StatusOK { 161 | return nil, errors.New(string(v)) 162 | } 163 | 164 | return v, nil 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/server/filesystem/errors.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "emperror.dev/errors" 8 | "github.com/apex/log" 9 | 10 | "github.com/pyrohost/elytra/src/internal/ufs" 11 | ) 12 | 13 | type ErrorCode string 14 | 15 | const ( 16 | ErrCodeIsDirectory ErrorCode = "E_ISDIR" 17 | ErrCodeDiskSpace ErrorCode = "E_NODISK" 18 | ErrCodeUnknownArchive ErrorCode = "E_UNKNFMT" 19 | ErrCodePathResolution ErrorCode = "E_BADPATH" 20 | ErrCodeDenylistFile ErrorCode = "E_DENYLIST" 21 | ErrCodeUnknownError ErrorCode = "E_UNKNOWN" 22 | ErrNotExist ErrorCode = "E_NOTEXIST" 23 | ) 24 | 25 | type Error struct { 26 | code ErrorCode 27 | // Contains the underlying error leading to this. This value may or may not be 28 | // present, it is entirely dependent on how this error was triggered. 29 | err error 30 | // This contains the value of the final destination that triggered this specific 31 | // error event. 32 | resolved string 33 | // This value is generally only present on errors stemming from a path resolution 34 | // error. For everything else you should be setting and reading the resolved path 35 | // value which will be far more useful. 36 | path string 37 | } 38 | 39 | // newFilesystemError returns a new error instance with a stack trace associated. 40 | func newFilesystemError(code ErrorCode, err error) error { 41 | if err != nil { 42 | return errors.WithStackDepth(&Error{code: code, err: err}, 1) 43 | } 44 | return errors.WithStackDepth(&Error{code: code}, 1) 45 | } 46 | 47 | // Code returns the ErrorCode for this specific error instance. 48 | func (e *Error) Code() ErrorCode { 49 | return e.code 50 | } 51 | 52 | // Returns a human-readable error string to identify the Error by. 53 | func (e *Error) Error() string { 54 | switch e.code { 55 | case ErrCodeIsDirectory: 56 | return fmt.Sprintf("filesystem: cannot perform action: [%s] is a directory", e.resolved) 57 | case ErrCodeDiskSpace: 58 | return "filesystem: not enough disk space" 59 | case ErrCodeUnknownArchive: 60 | return "filesystem: unknown archive format" 61 | case ErrCodeDenylistFile: 62 | r := e.resolved 63 | if r == "" { 64 | r = "" 65 | } 66 | return fmt.Sprintf("filesystem: file access prohibited: [%s] is on the denylist", r) 67 | case ErrCodePathResolution: 68 | r := e.resolved 69 | if r == "" { 70 | r = "" 71 | } 72 | return fmt.Sprintf("filesystem: server path [%s] resolves to a location outside the server root: %s", e.path, r) 73 | case ErrNotExist: 74 | return "filesystem: does not exist" 75 | case ErrCodeUnknownError: 76 | fallthrough 77 | default: 78 | return fmt.Sprintf("filesystem: an error occurred: %s", e.Unwrap()) 79 | } 80 | } 81 | 82 | // Unwrap returns the underlying cause of this filesystem error. In some causes 83 | // there may not be a cause present, in which case nil will be returned. 84 | func (e *Error) Unwrap() error { 85 | return e.err 86 | } 87 | 88 | // Generates an error logger instance with some basic information. 89 | func (fs *Filesystem) error(err error) *log.Entry { 90 | return log.WithField("subsystem", "filesystem").WithField("root", fs.Path()).WithField("error", err) 91 | } 92 | 93 | // Handle errors encountered when walking through directories. 94 | // 95 | // If there is a path resolution error just skip the item entirely. Only return this for a 96 | // directory, otherwise return nil. Returning this error for a file will stop the walking 97 | // for the remainder of the directory. This is assuming an FileInfo struct was even returned. 98 | func (fs *Filesystem) handleWalkerError(err error, f ufs.FileInfo) error { 99 | if !IsErrorCode(err, ErrCodePathResolution) { 100 | return err 101 | } 102 | if f != nil && f.IsDir() { 103 | return filepath.SkipDir 104 | } 105 | return nil 106 | } 107 | 108 | // IsFilesystemError checks if the given error is one of the Filesystem errors. 109 | func IsFilesystemError(err error) bool { 110 | var fserr *Error 111 | if err != nil && errors.As(err, &fserr) { 112 | return true 113 | } 114 | return false 115 | } 116 | 117 | // IsErrorCode checks if "err" is a filesystem Error type. If so, it will then 118 | // drop in and check that the error code is the same as the provided ErrorCode 119 | // passed in "code". 120 | func IsErrorCode(err error, code ErrorCode) bool { 121 | var fserr *Error 122 | if err != nil && errors.As(err, &fserr) { 123 | return fserr.code == code 124 | } 125 | return false 126 | } 127 | 128 | // NewBadPathResolution returns a new BadPathResolution error. 129 | func NewBadPathResolution(path string, resolved string) error { 130 | return errors.WithStackDepth(&Error{code: ErrCodePathResolution, path: path, resolved: resolved}, 1) 131 | } 132 | 133 | // wrapError wraps the provided error as a Filesystem error and attaches the 134 | // provided resolved source to it. If the error is already a Filesystem error 135 | // no action is taken. 136 | func wrapError(err error, resolved string) error { 137 | if err == nil || IsFilesystemError(err) { 138 | return err 139 | } 140 | return errors.WithStackDepth(&Error{code: ErrCodeUnknownError, err: err, resolved: resolved}, 1) 141 | } 142 | --------------------------------------------------------------------------------