├── img └── LogLynx-Overview-Demo.png ├── internal ├── version │ └── version.go ├── database │ ├── migrations.go │ ├── models │ │ ├── log_source.go │ │ ├── ip_reputation.go │ │ └── http_request.go │ ├── repositories │ │ └── log_source.go │ ├── connection.go │ ├── optimize.go │ ├── cleanup.go │ └── pool_monitor.go ├── parser │ ├── parser.go │ ├── registry.go │ ├── traefik │ │ ├── models.go │ │ └── parser_test.go │ └── useragent │ │ └── parser.go ├── banner │ └── banner.go ├── discovery │ ├── discovery.go │ └── traefik.go ├── ingestion │ └── watcher.go ├── config │ └── config.go └── api │ ├── handlers │ ├── system.go │ └── realtime.go │ └── server.go ├── .dockerignore ├── web ├── static │ ├── images │ │ └── favicon.svg │ ├── js │ │ └── core │ │ │ └── refresh-manager.js │ └── css │ │ ├── geographic.css │ │ └── tooltip.css └── templates │ └── pages │ ├── system.html │ └── realtime.html ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── workflows │ ├── ci.yml │ ├── docker-publish.yml │ ├── dependency-check.yml │ └── greetings.yaml ├── .gitignore ├── LICENSE ├── Dockerfile ├── go.mod ├── .env.example ├── SECURITY.md ├── CODE_OF_CONDUCT.md ├── cmd └── server │ └── main.go └── README.md /img/LogLynx-Overview-Demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/K0lin/loglynx/HEAD/img/LogLynx-Overview-Demo.png -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | // Version represents the current LogLynx application version. 4 | const Version = "1.0.4" 5 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore files not needed for Docker build 2 | bin/ 3 | dist/ 4 | .git 5 | .github 6 | .idea 7 | *.md 8 | *.log 9 | *.sqlite 10 | *.db 11 | vendor/ 12 | node_modules/ 13 | **/.cache 14 | **/.DS_Store 15 | coverage.out 16 | tmp/ 17 | build/ 18 | *.swp 19 | -------------------------------------------------------------------------------- /internal/database/migrations.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "loglynx/internal/database/models" 5 | 6 | "gorm.io/gorm" 7 | ) 8 | 9 | func RunMigrations(db *gorm.DB) error { 10 | return db.AutoMigrate( 11 | &models.LogSource{}, 12 | &models.HTTPRequest{}, 13 | &models.IPReputation{}, 14 | ) 15 | } -------------------------------------------------------------------------------- /internal/parser/parser.go: -------------------------------------------------------------------------------- 1 | package parsers 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Event interface { 8 | GetTimestamp() time.Time 9 | GetSourceName() string 10 | } 11 | 12 | type LogParser interface { 13 | Name() string 14 | Parse(line string) (Event, error) 15 | CanParse(line string) bool 16 | } -------------------------------------------------------------------------------- /web/static/images/favicon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /internal/database/models/log_source.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type LogSource struct { 8 | Name string `gorm:"primaryKey"` 9 | Path string `gorm:"not null"` 10 | ParserType string `gorm:"not null;index"` 11 | LastLineContent string 12 | LastPosition int64 `gorm:"default:0"` 13 | LastInode int64 `gorm:"default:0"` // File inode for identity tracking (SQLite only supports int64) 14 | LastReadAt *time.Time 15 | CreatedAt time.Time 16 | UpdatedAt time.Time 17 | } 18 | 19 | func (LogSource) TableName() string { 20 | return "log_sources" 21 | } -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[FEATURE REQUEST]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **Expected behavior** 14 | A clear and concise description of what you expected to happen. 15 | 16 | **Screenshots** 17 | If applicable, add screenshots to help explain your problem. 18 | 19 | **Desktop (please complete the following information):** 20 | - OS: [e.g. iOS] 21 | - Browser [e.g. chrome, safari] 22 | - Version [e.g. 22] 23 | 24 | **Smartphone (please complete the following information):** 25 | - Device: [e.g. iPhone17] 26 | - OS: [e.g. iOS8.1] 27 | - Browser [e.g. stock browser, safari] 28 | - Version [e.g. 22] 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ---> Go 2 | # 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, built with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | 17 | # Go workspace file 18 | go.work 19 | go.work.sum 20 | 21 | # Environment and configuration files 22 | .env 23 | .env.* 24 | *.env 25 | config.local.* 26 | 27 | # Database files 28 | *.db 29 | *.db-journal 30 | *.db-shm 31 | *.db-wal 32 | loglynx.db 33 | *.mmdb 34 | 35 | # Log files 36 | *.log 37 | b 38 | 39 | 40 | # IDE and editor directories/files 41 | .vscode/ 42 | .idea/ 43 | *.iml 44 | *.swp 45 | *.swo 46 | *~ 47 | 48 | # Backup and temporary files 49 | *.old 50 | *.original 51 | *.backup 52 | *.tmp 53 | *.temp 54 | grep 55 | 56 | # Standalone example files 57 | web/static/*-examples.html 58 | -------------------------------------------------------------------------------- /internal/database/models/ip_reputation.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // IPReputation stores GeoIP and reputation data for IP addresses 8 | type IPReputation struct { 9 | ID uint `gorm:"primaryKey;autoIncrement"` 10 | IPAddress string `gorm:"uniqueIndex;not null;index:idx_ip_lookup"` 11 | 12 | // GeoIP data 13 | Country string `gorm:"index"` 14 | CountryName string 15 | City string 16 | Latitude float64 17 | Longitude float64 18 | 19 | // ASN data 20 | ASN int `gorm:"index"` 21 | ASNOrg string 22 | 23 | // Reputation/Usage tracking 24 | FirstSeen time.Time `gorm:"not null"` 25 | LastSeen time.Time `gorm:"not null;index"` 26 | LookupCount int64 `gorm:"default:0;index:idx_lookup_count"` 27 | 28 | CreatedAt time.Time `gorm:"autoCreateTime"` 29 | UpdatedAt time.Time `gorm:"autoUpdateTime"` 30 | } 31 | 32 | func (IPReputation) TableName() string { 33 | return "ip_reputation" 34 | } 35 | -------------------------------------------------------------------------------- /internal/banner/banner.go: -------------------------------------------------------------------------------- 1 | package banner 2 | 3 | import ( 4 | "fmt" 5 | 6 | "loglynx/internal/version" 7 | 8 | "github.com/pterm/pterm" 9 | "github.com/pterm/pterm/putils" 10 | ) 11 | 12 | func Print() { 13 | ptermLogo, _ := pterm.DefaultBigText.WithLetters( 14 | putils.LettersFromStringWithRGB("Log", pterm.NewRGB(255, 107, 53)), 15 | putils.LettersFromStringWithRGB("Lynx", pterm.NewRGB(0, 0, 0))). 16 | Srender() 17 | 18 | pterm.DefaultCenter.Print(ptermLogo) 19 | 20 | pterm.DefaultCenter.Print( 21 | pterm.DefaultHeader. 22 | WithFullWidth(). 23 | WithBackgroundStyle(pterm.NewStyle(pterm.BgLightRed)). 24 | WithMargin(5). 25 | Sprint(pterm.White("🐱 LogLynx - Fast & Precise Log Analytics")), 26 | ) 27 | 28 | pterm.Info.Println( 29 | "Swift log monitoring and analytics. Fast like a lynx, precise like a predator." + 30 | "\nBuilt for speed and accuracy in production environments." + 31 | fmt.Sprintf("\nVersion %s.", version.Version), 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main, dev ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | build: 11 | name: Build and test 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Go 19 | uses: actions/setup-go@v4 20 | with: 21 | go-version: '1.25.5' 22 | 23 | - name: Cache Go build 24 | uses: actions/cache@v4 25 | with: 26 | path: | 27 | ~/.cache/go-build 28 | ~/.cache/go/pkg 29 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 30 | restore-keys: | 31 | ${{ runner.os }}-go- 32 | 33 | - name: Install dependencies 34 | run: go mod download 35 | 36 | - name: Build 37 | run: go build ./... 38 | 39 | - name: Vet 40 | run: go vet ./... 41 | 42 | - name: Test 43 | run: go test ./... -v 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Kolin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Multi-stage Dockerfile for LogLynx 2 | # Builder stage: compiles a static linux/amd64 binary 3 | FROM golang:1.25.5 AS builder 4 | 5 | WORKDIR /src 6 | 7 | # Copy go.mod and go.sum first to leverage Docker layer cache 8 | COPY go.mod go.sum ./ 9 | RUN go mod download 10 | 11 | # Copy rest of the sources 12 | COPY . . 13 | 14 | # Build the server binary (CGO enabled for sqlite/geoip native deps) 15 | RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \ 16 | go build -ldflags "-s -w" -o /out/loglynx ./cmd/server 17 | 18 | 19 | # Final image: small, secure runtime that still ships glibc for CGO 20 | FROM gcr.io/distroless/base-debian12 21 | 22 | # Create application directory and set as working dir so relative paths like 23 | # `web/templates/**/*.html` and `geoip/*` resolve inside the container. 24 | WORKDIR /app 25 | 26 | # Copy binary from builder 27 | COPY --from=builder /out/loglynx /usr/local/bin/loglynx 28 | 29 | # Copy web assets (templates + static) so Gin can load templates from 30 | # the expected relative path `web/templates/**/*.html`. 31 | COPY --from=builder /src/web ./web 32 | 33 | # Optional: create directories for volumes 34 | VOLUME ["/data", "/app/geoip", "/traefik/logs"] 35 | 36 | EXPOSE 8080 37 | 38 | ENTRYPOINT ["/usr/local/bin/loglynx"] 39 | 40 | 41 | -------------------------------------------------------------------------------- /internal/database/repositories/log_source.go: -------------------------------------------------------------------------------- 1 | package repositories 2 | 3 | import ( 4 | "loglynx/internal/database/models" 5 | "time" 6 | 7 | "gorm.io/gorm" 8 | ) 9 | 10 | type LogSourceRepository interface { 11 | Create(source *models.LogSource) error 12 | FindByName(name string) (*models.LogSource, error) 13 | FindAll() ([]*models.LogSource, error) 14 | Update(source *models.LogSource) error 15 | UpdateTracking(name string, position int64, inode int64, lastLine string) error 16 | } 17 | 18 | type logSourceRepo struct { 19 | db *gorm.DB 20 | } 21 | 22 | func NewLogSourceRepository(db *gorm.DB) LogSourceRepository { 23 | return &logSourceRepo{db: db} 24 | } 25 | 26 | func (r *logSourceRepo) Create(source *models.LogSource) error { 27 | return r.db.Create(source).Error 28 | } 29 | 30 | func (r *logSourceRepo) FindByName(name string) (*models.LogSource, error) { 31 | var source models.LogSource 32 | err := r.db.Where("name = ?", name).First(&source).Error 33 | if err != nil { 34 | return nil, err 35 | } 36 | return &source, nil 37 | } 38 | 39 | func (r *logSourceRepo) FindAll() ([]*models.LogSource, error) { 40 | var sources []*models.LogSource 41 | err := r.db.Find(&sources).Error 42 | return sources, err 43 | } 44 | 45 | func (r *logSourceRepo) Update(source *models.LogSource) error { 46 | return r.db.Save(source).Error 47 | } 48 | 49 | func (r *logSourceRepo) UpdateTracking(name string, position int64, inode int64, lastLine string) error { 50 | // Use Exec for better performance with direct SQL execution 51 | return r.db.Exec( 52 | "UPDATE log_sources SET last_position = ?, last_inode = ?, last_line_content = ?, last_read_at = ?, updated_at = ? WHERE name = ?", 53 | position, inode, lastLine, time.Now(), time.Now(), name, 54 | ).Error 55 | } 56 | -------------------------------------------------------------------------------- /internal/parser/registry.go: -------------------------------------------------------------------------------- 1 | package parsers 2 | 3 | import ( 4 | "fmt" 5 | "loglynx/internal/parser/traefik" 6 | 7 | "github.com/pterm/pterm" 8 | ) 9 | 10 | // Registry manages all available log parsers 11 | type Registry struct { 12 | parsers map[string]LogParser 13 | logger *pterm.Logger 14 | } 15 | 16 | // traefikParserWrapper wraps traefik.Parser to implement LogParser interface 17 | type traefikParserWrapper struct { 18 | *traefik.Parser 19 | } 20 | 21 | // Parse adapts traefik.Parser.Parse to return Event interface 22 | func (w *traefikParserWrapper) Parse(line string) (Event, error) { 23 | return w.Parser.Parse(line) 24 | } 25 | 26 | // NewRegistry creates a new parser registry with all built-in parsers 27 | func NewRegistry(logger *pterm.Logger) *Registry { 28 | registry := &Registry{ 29 | parsers: make(map[string]LogParser), 30 | logger: logger, 31 | } 32 | 33 | // Register built-in parsers with wrappers 34 | traefikParser := traefik.NewParser(logger) 35 | registry.Register("traefik", &traefikParserWrapper{traefikParser}) 36 | logger.Debug("Registered parser", logger.Args("type", "traefik")) 37 | 38 | return registry 39 | } 40 | 41 | // Register adds a parser to the registry 42 | func (r *Registry) Register(name string, parser LogParser) { 43 | r.parsers[name] = parser 44 | } 45 | 46 | // Get retrieves a parser by type 47 | func (r *Registry) Get(parserType string) (LogParser, error) { 48 | parser, exists := r.parsers[parserType] 49 | if !exists { 50 | r.logger.WithCaller().Warn("Parser not found", r.logger.Args("type", parserType)) 51 | return nil, fmt.Errorf("parser not found: %s", parserType) 52 | } 53 | return parser, nil 54 | } 55 | 56 | // GetAll returns all registered parsers 57 | func (r *Registry) GetAll() map[string]LogParser { 58 | return r.parsers 59 | } -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker Publish 2 | 3 | on: 4 | push: 5 | branches: [ main, dev ] 6 | release: 7 | types: [ published ] 8 | 9 | jobs: 10 | build-and-push: 11 | name: Build and push Docker image 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v2 20 | 21 | - name: Set up Docker Buildx 22 | uses: docker/setup-buildx-action@v2 23 | 24 | - name: Log in to Docker Hub 25 | uses: docker/login-action@v2 26 | with: 27 | username: ${{ secrets.DOCKERHUB_USERNAME }} 28 | password: ${{ secrets.DOCKERHUB_TOKEN }} 29 | 30 | - name: Prepare image name 31 | run: echo "IMAGE=${{ secrets.DOCKERHUB_USERNAME }}/loglynx" >> $GITHUB_ENV 32 | 33 | - name: Build and push (dev branch -> dev-k) 34 | if: github.event_name == 'push' && github.ref == 'refs/heads/dev' 35 | uses: docker/build-push-action@v5 36 | with: 37 | context: . 38 | push: true 39 | tags: ${{ env.IMAGE }}:dev-k 40 | 41 | - name: Build and push (main -> dev) 42 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 43 | uses: docker/build-push-action@v5 44 | with: 45 | context: . 46 | push: true 47 | tags: ${{ env.IMAGE }}:dev 48 | 49 | - name: Build and push (release -> latest and version) 50 | if: github.event_name == 'release' 51 | uses: docker/build-push-action@v5 52 | with: 53 | context: . 54 | push: true 55 | tags: | 56 | ${{ env.IMAGE }}:latest 57 | ${{ env.IMAGE }}:${{ github.ref_name }} 58 | -------------------------------------------------------------------------------- /internal/discovery/discovery.go: -------------------------------------------------------------------------------- 1 | package discovery 2 | 3 | import ( 4 | "loglynx/internal/database/models" 5 | "loglynx/internal/database/repositories" 6 | 7 | "github.com/pterm/pterm" 8 | ) 9 | 10 | type ServiceDetector interface { 11 | Name() string 12 | Detect() ([]*models.LogSource, error) 13 | } 14 | 15 | type Engine struct { 16 | repo repositories.LogSourceRepository 17 | detectors []ServiceDetector 18 | } 19 | 20 | func NewEngine(repo repositories.LogSourceRepository, logger *pterm.Logger) *Engine { 21 | return &Engine{ 22 | repo: repo, 23 | detectors: []ServiceDetector{ 24 | NewTraefikDetector(logger), 25 | }, 26 | } 27 | } 28 | 29 | func (e *Engine) Run(logger *pterm.Logger) error { 30 | logger.Trace("Check if the discovery is needed.") 31 | existing, err := e.repo.FindAll() 32 | if err != nil { 33 | return err 34 | } 35 | 36 | if len(existing) > 0 { 37 | logger.Trace("Discovery is not needed.") 38 | return nil 39 | } 40 | 41 | logger.Debug("Starting discovery...") 42 | 43 | logger.Trace("Running service detectors...") 44 | for _, detector := range e.detectors { 45 | sources, err := detector.Detect() 46 | logger.Trace("Detector executed.", logger.Args("Name", detector.Name())) 47 | if err != nil { 48 | logger.WithCaller().Warn("Detection failed,", logger.Args("detector", detector.Name(), "error", err)) 49 | continue 50 | } 51 | 52 | logger.Trace("Registering discovered sources...") 53 | for _, source := range sources { 54 | if err := e.repo.Create(source); err != nil { 55 | logger.WithCaller().Error("Detection failed,", logger.Args("detector", source.Name, "error", err)) 56 | } else { 57 | logger.Info("Registered new log source.", logger.Args("Name", source.Name, "Path", source.Path)) 58 | } 59 | } 60 | } 61 | 62 | logger.Debug("Discovery completed") 63 | return nil 64 | } -------------------------------------------------------------------------------- /internal/parser/traefik/models.go: -------------------------------------------------------------------------------- 1 | package traefik 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // HTTPRequestEvent represents a complete Traefik HTTP request log entry 8 | type HTTPRequestEvent struct { 9 | Timestamp time.Time 10 | SourceName string 11 | 12 | // Client info 13 | ClientIP string 14 | ClientPort int 15 | ClientUser string 16 | 17 | // Request info 18 | Method string 19 | Protocol string 20 | Host string 21 | Path string 22 | QueryString string 23 | RequestLength int64 24 | RequestScheme string // Request scheme: http, https (from request_X-Forwarded-Proto) 25 | 26 | // Response info 27 | StatusCode int 28 | ResponseSize int64 29 | ResponseTimeMs float64 30 | ResponseContentType string // downstream_Content-Type 31 | 32 | // Detailed timing (for hash calculation precision) 33 | Duration int64 // Duration in nanoseconds (Traefik's Duration field) 34 | StartUTC string // Start timestamp with nanosecond precision (Traefik's StartUTC field) 35 | UpstreamResponseTimeMs float64 36 | RetryAttempts int // Number of retry attempts 37 | RequestsTotal int // Total number of requests at router level (Traefik CLF field) 38 | 39 | // Headers 40 | UserAgent string 41 | Referer string 42 | 43 | // Proxy/Upstream info 44 | BackendName string 45 | BackendURL string 46 | RouterName string 47 | UpstreamStatus int 48 | UpstreamContentType string // origin_Content-Type 49 | ClientHostname string // ClientHost field (may contain hostname) 50 | 51 | // TLS info 52 | TLSVersion string 53 | TLSCipher string 54 | TLSServerName string 55 | 56 | // Tracing & IDs 57 | RequestID string 58 | TraceID string 59 | 60 | // GeoIP enrichment (populated later by enrichment layer) 61 | GeoCountry string 62 | GeoCity string 63 | GeoLat float64 64 | GeoLon float64 65 | ASN int 66 | ASNOrg string 67 | 68 | // Proxy-specific metadata 69 | ProxyMetadata string 70 | } 71 | 72 | func (e *HTTPRequestEvent) GetTimestamp() time.Time { 73 | return e.Timestamp 74 | } 75 | 76 | func (e *HTTPRequestEvent) GetSourceName() string { 77 | return e.SourceName 78 | } -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module loglynx 2 | 3 | go 1.25 4 | 5 | require ( 6 | github.com/fsnotify/fsnotify v1.9.0 7 | github.com/gin-gonic/gin v1.11.0 8 | github.com/joho/godotenv v1.5.1 9 | github.com/oschwald/geoip2-golang v1.13.0 10 | github.com/pterm/pterm v0.12.82 11 | gorm.io/driver/sqlite v1.6.0 12 | gorm.io/gorm v1.31.0 13 | ) 14 | 15 | require ( 16 | atomicgo.dev/cursor v0.2.0 // indirect 17 | atomicgo.dev/keyboard v0.2.9 // indirect 18 | atomicgo.dev/schedule v0.1.0 // indirect 19 | github.com/bytedance/gopkg v0.1.3 // indirect 20 | github.com/bytedance/sonic v1.14.2 // indirect 21 | github.com/bytedance/sonic/loader v0.4.0 // indirect 22 | github.com/cloudwego/base64x v0.1.6 // indirect 23 | github.com/containerd/console v1.0.5 // indirect 24 | github.com/gabriel-vasile/mimetype v1.4.10 // indirect 25 | github.com/gin-contrib/sse v1.1.0 // indirect 26 | github.com/go-playground/locales v0.14.1 // indirect 27 | github.com/go-playground/universal-translator v0.18.1 // indirect 28 | github.com/go-playground/validator/v10 v10.28.0 // indirect 29 | github.com/goccy/go-json v0.10.5 // indirect 30 | github.com/goccy/go-yaml v1.18.0 // indirect 31 | github.com/gookit/color v1.5.4 // indirect 32 | github.com/jinzhu/inflection v1.0.0 // indirect 33 | github.com/jinzhu/now v1.1.5 // indirect 34 | github.com/json-iterator/go v1.1.12 // indirect 35 | github.com/klauspost/cpuid/v2 v2.3.0 // indirect 36 | github.com/leodido/go-urn v1.4.0 // indirect 37 | github.com/lithammer/fuzzysearch v1.1.8 // indirect 38 | github.com/mattn/go-isatty v0.0.20 // indirect 39 | github.com/mattn/go-runewidth v0.0.16 // indirect 40 | github.com/mattn/go-sqlite3 v1.14.24 // indirect 41 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 42 | github.com/modern-go/reflect2 v1.0.2 // indirect 43 | github.com/oschwald/maxminddb-golang v1.13.1 // indirect 44 | github.com/pelletier/go-toml/v2 v2.2.4 // indirect 45 | github.com/quic-go/qpack v0.6.0 // indirect 46 | github.com/quic-go/quic-go v0.57.0 // indirect 47 | github.com/rivo/uniseg v0.4.7 // indirect 48 | github.com/twitchyliquid64/golang-asm v0.15.1 // indirect 49 | github.com/ugorji/go/codec v1.3.1 // indirect 50 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 51 | go.uber.org/mock v0.6.0 // indirect 52 | golang.org/x/arch v0.22.0 // indirect 53 | golang.org/x/crypto v0.45.0 // indirect 54 | golang.org/x/net v0.47.0 // indirect 55 | golang.org/x/sys v0.38.0 // indirect 56 | golang.org/x/term v0.37.0 // indirect 57 | golang.org/x/text v0.31.0 // indirect 58 | google.golang.org/protobuf v1.36.10 // indirect 59 | ) 60 | -------------------------------------------------------------------------------- /.github/workflows/dependency-check.yml: -------------------------------------------------------------------------------- 1 | name: Dependency Security Check 2 | 3 | on: 4 | schedule: 5 | # Run every day at 2 AM UTC 6 | - cron: '0 2 * * *' 7 | workflow_dispatch: # Allow manual triggering 8 | 9 | jobs: 10 | vulnerability-scan: 11 | name: Check for known vulnerabilities 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Go 19 | uses: actions/setup-go@v4 20 | with: 21 | go-version: '1.25.5' 22 | 23 | - name: Install govulncheck 24 | run: go install golang.org/x/vuln/cmd/govulncheck@latest 25 | 26 | - name: Run vulnerability check 27 | run: | 28 | echo "Checking for known vulnerabilities in dependencies..." 29 | govulncheck ./... 30 | 31 | outdated-check: 32 | name: Check for outdated dependencies 33 | runs-on: ubuntu-latest 34 | 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v4 38 | 39 | - name: Set up Go 40 | uses: actions/setup-go@v4 41 | with: 42 | go-version: '1.25' 43 | 44 | - name: Check for outdated direct dependencies 45 | run: | 46 | echo "Checking for outdated dependencies..." 47 | echo "Direct dependencies:" 48 | go list -u -m all | grep -E "\[direct\]" -A1 -B1 || echo "No outdated direct dependencies found" 49 | 50 | - name: List all dependencies with available updates 51 | run: | 52 | echo "All dependencies with available updates:" 53 | go list -u -m -json all | jq -r '"\(.Path): current=\(.Version) latest=\(.Update.Version // "none")"' | grep -v "none$" || echo "No dependencies have updates available" 54 | 55 | license-check: 56 | name: Check dependency licenses 57 | runs-on: ubuntu-latest 58 | 59 | steps: 60 | - name: Checkout 61 | uses: actions/checkout@v4 62 | 63 | - name: Set up Go 64 | uses: actions/setup-go@v4 65 | with: 66 | go-version: '1.25' 67 | 68 | - name: Install go-licenses 69 | run: go install github.com/google/go-licenses@latest 70 | 71 | - name: Check licenses 72 | run: | 73 | echo "Checking dependency licenses..." 74 | go-licenses check ./... 75 | echo "" 76 | echo "License summary:" 77 | go-licenses csv ./... | sort | uniq -c | sort -nr 78 | 79 | - name: Save license report 80 | run: | 81 | go-licenses csv ./... > license-report.csv 82 | 83 | - name: Upload license report 84 | uses: actions/upload-artifact@v4 85 | with: 86 | name: license-report 87 | path: license-report.csv 88 | retention-days: 30 89 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # LogLynx Configuration File 2 | # Copy this file to .env and customize the values 3 | 4 | # ================================ 5 | # Database Configuration 6 | # ================================ 7 | DB_PATH=loglynx.db 8 | DB_MAX_OPEN_CONNS=10 9 | DB_MAX_IDLE_CONNS=3 10 | DB_CONN_MAX_LIFE=1h 11 | 12 | # Data Retention (NEW - Automatic cleanup) 13 | # Set to 0 to disable automatic cleanup (database will grow indefinitely) 14 | DB_RETENTION_DAYS=60 15 | 16 | # Cleanup schedule - how often to check if cleanup should run 17 | DB_CLEANUP_INTERVAL=1h 18 | 19 | # Time of day to run cleanup (24-hour format HH:MM) 20 | # Default: 02:00 (2 AM) - recommended for low-traffic time 21 | DB_CLEANUP_TIME=02:00 22 | 23 | # Run VACUUM after cleanup to reclaim disk space 24 | # VACUUM briefly locks the database (~1 minute per GB freed) 25 | DB_VACUUM_ENABLED=true 26 | 27 | # ================================ 28 | # GeoIP Configuration 29 | # ================================ 30 | # Download GeoIP databases from MaxMind: 31 | # https://dev.maxmind.com/geoip/geolite2-free-geolocation-data 32 | GEOIP_ENABLED=true 33 | GEOIP_CITY_DB=geoip/GeoLite2-City.mmdb 34 | GEOIP_COUNTRY_DB=geoip/GeoLite2-Country.mmdb 35 | GEOIP_ASN_DB=geoip/GeoLite2-ASN.mmdb 36 | 37 | # ================================ 38 | # Log Sources Configuration 39 | # ================================ 40 | # Path to Traefik access log file 41 | TRAEFIK_LOG_PATH=traefik/logs/access.log 42 | 43 | # Traefik log format (auto, json, clf) 44 | # auto: automatically detects format (default) 45 | # json: expects JSON formatted logs only 46 | # clf: expects Common Log Format (text) only 47 | TRAEFIK_LOG_FORMAT=auto 48 | 49 | # Auto-discover log files in directories 50 | LOG_AUTO_DISCOVER=true 51 | 52 | # Initial Import Limiting (NEW) 53 | # On first run, only import last N days from log files 54 | # This prevents overwhelming the database with years of old logs 55 | # Set to 0 to import all historical data 56 | INITIAL_IMPORT_DAYS=60 57 | 58 | # Enable/disable initial import limiting 59 | INITIAL_IMPORT_ENABLE=true 60 | 61 | # ================================ 62 | # Web Server Configuration 63 | # ================================ 64 | SERVER_HOST=0.0.0.0 65 | SERVER_PORT=8080 66 | SERVER_PRODUCTION=false 67 | 68 | # Dashboard UI enabled (set to false for API-only mode) 69 | # When disabled, only API routes at /api/v1 are accessible 70 | # Useful for headless/API-only deployments or security-focused setups 71 | DASHBOARD_ENABLED=true 72 | 73 | # Splash screen on startup (set to false to disable) 74 | # When enabled, shows a loading screen while initial logs are being processed 75 | # Default: true 76 | SPLASH_SCREEN_ENABLED=true 77 | 78 | # Application log level (trace, debug, info, warn, error, fatal) 79 | # Default: info 80 | LOG_LEVEL=info 81 | 82 | # ================================ 83 | # Performance Tuning 84 | # ================================ 85 | # How often to collect real-time metrics 86 | # 1s recommended for best real-time responsiveness 87 | METRICS_INTERVAL=1s 88 | 89 | # GeoIP cache size (number of IPs to cache) 90 | GEOIP_CACHE_SIZE=10000 #Loaded but not implemented yet (reserved for future LRU caching) 91 | 92 | # Batch size for bulk inserts 93 | BATCH_SIZE=1000 94 | 95 | # Number of worker goroutines for log processing 96 | WORKER_POOL_SIZE=4 -------------------------------------------------------------------------------- /.github/workflows/greetings.yaml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | issues: 8 | types: 9 | - opened 10 | 11 | permissions: 12 | issues: write 13 | pull-requests: write 14 | 15 | jobs: 16 | greeting: 17 | name: Greet First-Time Contributors 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/first-interaction@v3 22 | with: 23 | issue_message: | 24 | # 🎉 Welcome to LogLynx! 25 | 26 | Thank you for opening your first issue in the LogLynx project! We appreciate you taking the time to help improve our advanced log analytics platform. 27 | 28 | ## 📝 What to expect 29 | - Our maintainers will review your issue shortly 30 | - We may ask for additional information to better understand the issue 31 | - We'll label the issue appropriately to track its progress 32 | 33 | ## 🔍 Useful Resources 34 | - [Project Documentation](../tree/main/README.md) 35 | - [Contributing Guidelines](../tree/main/CONTRIBUTING.md) 36 | - [Code of Conduct](../tree/main/CODE_OF_CONDUCT.md) 37 | - [Security Policy](../tree/main/SECURITY.md) 38 | 39 | ## 🚀 Getting Started with LogLynx 40 | LogLynx is a powerful log analytics platform that provides real-time insights into your web traffic. If you're new to the project, you might want to: 41 | - Read about our [features](../tree/main/README.md#-features) 42 | - Learn how to [get started](../tree/main/README.md#-quick-start) 43 | 44 | Thanks again for your contribution! 🙏 45 | 46 | _The LogLynx Team_ 47 | pr_message: | 48 | # 🎉 Welcome to LogLynx! 49 | 50 | Thank you for your first pull request to LogLynx! We're excited to have you contribute to our advanced log analytics platform. 51 | 52 | ## 📋 Review Process 53 | Your PR will be reviewed by our maintainers who will: 54 | - Check that it follows our [contributing guidelines](../tree/main/CONTRIBUTING.md) 55 | - Ensure tests pass and code quality standards are met 56 | - Verify the PR addresses an existing issue or brings valuable improvements 57 | - May suggest changes or request additional information 58 | 59 | ## 🔧 Development Checklist 60 | - [ ] Code follows the project's style guidelines 61 | - [ ] Tests have been added/updated if applicable 62 | - [ ] Documentation has been updated if needed 63 | - [ ] Commit messages are clear and descriptive 64 | - [ ] PR is linked to any relevant issues 65 | 66 | ## 📚 Helpful Resources 67 | - [Contributing Guidelines](../tree/main/CONTRIBUTING.md) 68 | - [Code of Conduct](../tree/main/CODE_OF_CONDUCT.md) 69 | - [Project Architecture](../tree/main/README.md#architecture) 70 | - [API Documentation](../wiki/API-Documentation) 71 | 72 | ## 🚀 About LogLynx 73 | Your contribution helps improve LogLynx, a comprehensive log analytics solution that provides: 74 | - Real-time log monitoring and visualization 75 | - Geographic analytics and IP intelligence 76 | - Performance metrics and system statistics 77 | - Advanced filtering and search capabilities 78 | 79 | We appreciate your time and effort to make LogLynx better! 🙏 80 | 81 | _The LogLynx Team_ 82 | -------------------------------------------------------------------------------- /internal/parser/useragent/parser.go: -------------------------------------------------------------------------------- 1 | package useragent 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | // UserAgentInfo contains parsed information from a User-Agent string 9 | type UserAgentInfo struct { 10 | Browser string 11 | BrowserVersion string 12 | OS string 13 | OSVersion string 14 | DeviceType string 15 | } 16 | 17 | var ( 18 | // Browser patterns (order matters - more specific first) 19 | browserPatterns = []struct { 20 | name string 21 | pattern *regexp.Regexp 22 | }{ 23 | {"Edge", regexp.MustCompile(`(?i)Edg/(\d+\.\d+)`)}, 24 | {"Chrome", regexp.MustCompile(`(?i)Chrome/(\d+\.\d+)`)}, 25 | {"Safari", regexp.MustCompile(`(?i)Version/(\d+\.\d+).*Safari`)}, 26 | {"Firefox", regexp.MustCompile(`(?i)Firefox/(\d+\.\d+)`)}, 27 | {"Opera", regexp.MustCompile(`(?i)(?:Opera|OPR)/(\d+\.\d+)`)}, 28 | {"IE", regexp.MustCompile(`(?i)MSIE\s+(\d+\.\d+)`)}, 29 | {"IE", regexp.MustCompile(`(?i)Trident/.*rv:(\d+\.\d+)`)}, 30 | } 31 | 32 | // OS patterns 33 | osPatterns = []struct { 34 | name string 35 | pattern *regexp.Regexp 36 | }{ 37 | {"Windows", regexp.MustCompile(`(?i)Windows NT (\d+\.\d+)`)}, 38 | {"macOS", regexp.MustCompile(`(?i)Mac OS X (\d+[._]\d+)`)}, 39 | {"iOS", regexp.MustCompile(`(?i)(?:iPhone|iPad).*OS (\d+[._]\d+)`)}, 40 | {"Android", regexp.MustCompile(`(?i)Android (\d+\.\d+)`)}, 41 | {"Linux", regexp.MustCompile(`(?i)Linux`)}, 42 | {"ChromeOS", regexp.MustCompile(`(?i)CrOS`)}, 43 | } 44 | 45 | // Bot patterns 46 | botPatterns = regexp.MustCompile(`(?i)bot|crawler|spider|scraper|curl|wget|python|go-http`) 47 | 48 | // Mobile patterns 49 | mobilePatterns = regexp.MustCompile(`(?i)mobile|android|iphone|ipad|ipod|blackberry|windows phone`) 50 | ) 51 | 52 | // Parse extracts browser, OS, and device information from a User-Agent string 53 | func Parse(userAgent string) *UserAgentInfo { 54 | if userAgent == "" { 55 | return &UserAgentInfo{ 56 | Browser: "Unknown", 57 | OS: "Unknown", 58 | DeviceType: "unknown", 59 | } 60 | } 61 | 62 | info := &UserAgentInfo{} 63 | 64 | // Check if it's a bot 65 | if botPatterns.MatchString(userAgent) { 66 | info.DeviceType = "bot" 67 | info.Browser = detectBot(userAgent) 68 | info.OS = "Bot" 69 | return info 70 | } 71 | 72 | // Detect browser 73 | for _, bp := range browserPatterns { 74 | if matches := bp.pattern.FindStringSubmatch(userAgent); matches != nil { 75 | info.Browser = bp.name 76 | if len(matches) > 1 { 77 | info.BrowserVersion = matches[1] 78 | } 79 | break 80 | } 81 | } 82 | if info.Browser == "" { 83 | info.Browser = "Unknown" 84 | } 85 | 86 | // Detect OS 87 | for _, op := range osPatterns { 88 | if matches := op.pattern.FindStringSubmatch(userAgent); matches != nil { 89 | info.OS = op.name 90 | if len(matches) > 1 { 91 | // Replace underscores with dots for iOS versions 92 | info.OSVersion = strings.ReplaceAll(matches[1], "_", ".") 93 | } 94 | break 95 | } 96 | } 97 | if info.OS == "" { 98 | info.OS = "Unknown" 99 | } 100 | 101 | // Detect device type 102 | if mobilePatterns.MatchString(userAgent) { 103 | info.DeviceType = "mobile" 104 | } else { 105 | info.DeviceType = "desktop" 106 | } 107 | 108 | return info 109 | } 110 | 111 | // detectBot tries to identify the specific bot from User-Agent 112 | func detectBot(userAgent string) string { 113 | botNames := map[string]string{ 114 | "googlebot": "Googlebot", 115 | "bingbot": "Bingbot", 116 | "slurp": "Yahoo Slurp", 117 | "duckduckbot": "DuckDuckBot", 118 | "baiduspider": "Baidu Spider", 119 | "yandexbot": "YandexBot", 120 | "facebookbot": "Facebookbot", 121 | "twitterbot": "Twitterbot", 122 | "linkedinbot": "LinkedInBot", 123 | "applebot": "Applebot", 124 | "python": "Python Client", 125 | "go-http": "Go HTTP Client", 126 | "curl": "cURL", 127 | "wget": "Wget", 128 | "postman": "Postman", 129 | } 130 | 131 | lowerUA := strings.ToLower(userAgent) 132 | for pattern, name := range botNames { 133 | if strings.Contains(lowerUA, pattern) { 134 | return name 135 | } 136 | } 137 | 138 | return "Bot" 139 | } 140 | 141 | // GetWindowsVersion converts Windows NT version to friendly name 142 | func GetWindowsVersion(ntVersion string) string { 143 | versions := map[string]string{ 144 | "10.0": "Windows 10/11", 145 | "6.3": "Windows 8.1", 146 | "6.2": "Windows 8", 147 | "6.1": "Windows 7", 148 | "6.0": "Windows Vista", 149 | "5.1": "Windows XP", 150 | } 151 | 152 | if friendly, ok := versions[ntVersion]; ok { 153 | return friendly 154 | } 155 | return "Windows " + ntVersion 156 | } 157 | -------------------------------------------------------------------------------- /web/static/js/core/refresh-manager.js: -------------------------------------------------------------------------------- 1 | const RefreshManager = (() => { 2 | const STORAGE_KEY = 'loglynx_refresh_global'; 3 | const DEFAULT_INTERVAL = 30; 4 | const DEFAULT_AUTO_START = true; 5 | 6 | // Run the check on script load 7 | checkAndHideFilters(); 8 | 9 | // Session-only state (cleared on navigation) 10 | let pageOverride = null; 11 | let currentPage = null; 12 | 13 | /** 14 | * Get current page identifier from URL 15 | */ 16 | function getCurrentPageId() { 17 | const path = window.location.pathname; 18 | if (path === '/' || path === '/index' || path === '/overview') return 'overview'; 19 | return path.replace(/^\//, '').replace(/\//g, '-') || 'overview'; 20 | } 21 | 22 | /** 23 | * Check current path and hide refresh filters if needed 24 | */ 25 | function checkAndHideFilters() { 26 | const currentPath = window.location.pathname; 27 | 28 | const shouldHideFilters = 29 | currentPath === '/system' || 30 | currentPath === '/backends' || 31 | currentPath === '/content' || 32 | currentPath === '/security' || 33 | currentPath === '/realtime' || 34 | currentPath.startsWith('/ip/'); 35 | 36 | if (shouldHideFilters) { 37 | // Hide the entire refresh-controls container 38 | const refreshControls = document.querySelector('.header-refresh-controls'); 39 | if (refreshControls) { 40 | refreshControls.style.display = 'none'; 41 | } 42 | } 43 | } 44 | 45 | 46 | /** 47 | * Load global settings from localStorage 48 | */ 49 | function loadGlobalSettings() { 50 | try { 51 | const stored = localStorage.getItem(STORAGE_KEY); 52 | if (stored) { 53 | const parsed = JSON.parse(stored); 54 | return { 55 | interval: parsed.interval || DEFAULT_INTERVAL, 56 | autoStart: parsed.autoStart !== undefined ? parsed.autoStart : DEFAULT_AUTO_START 57 | }; 58 | } 59 | } catch (e) { 60 | console.warn('[RefreshManager] Failed to load global settings from localStorage:', e); 61 | } 62 | 63 | return { 64 | interval: DEFAULT_INTERVAL, 65 | autoStart: DEFAULT_AUTO_START 66 | }; 67 | } 68 | 69 | /** 70 | * Save global settings to localStorage 71 | */ 72 | function saveGlobalSettings(settings) { 73 | try { 74 | localStorage.setItem(STORAGE_KEY, JSON.stringify(settings)); 75 | console.log('[RefreshManager] Global settings saved:', settings); 76 | } catch (e) { 77 | console.warn('[RefreshManager] Failed to save global settings to localStorage:', e); 78 | } 79 | } 80 | 81 | /** 82 | * Initialize RefreshManager for current page 83 | */ 84 | function init() { 85 | currentPage = getCurrentPageId(); 86 | // Clear any previous page override when initializing 87 | pageOverride = null; 88 | 89 | console.log('[RefreshManager] Initialized for page:', currentPage); 90 | } 91 | 92 | /** 93 | * Get current settings (page override OR global) 94 | */ 95 | function getCurrentSettings() { 96 | if (pageOverride) { 97 | return { 98 | ...pageOverride, 99 | isPageSpecific: true, 100 | pageId: currentPage 101 | }; 102 | } 103 | 104 | const global = loadGlobalSettings(); 105 | return { 106 | ...global, 107 | isPageSpecific: false, 108 | pageId: null 109 | }; 110 | } 111 | 112 | /** 113 | * Update global settings 114 | */ 115 | function updateGlobal(interval, autoStart) { 116 | const settings = { 117 | interval: interval, 118 | autoStart: autoStart !== undefined ? autoStart : true 119 | }; 120 | 121 | saveGlobalSettings(settings); 122 | console.log('[RefreshManager] Global settings updated:', settings); 123 | } 124 | 125 | /** 126 | * Enable page-specific override (session-only) 127 | */ 128 | function enablePageOverride(interval, autoStart) { 129 | pageOverride = { 130 | interval: interval, 131 | autoStart: autoStart !== undefined ? autoStart : true 132 | }; 133 | 134 | console.log('[RefreshManager] Page override enabled for', currentPage, ':', pageOverride); 135 | } 136 | 137 | /** 138 | * Disable page-specific override (revert to global) 139 | */ 140 | function disablePageOverride() { 141 | pageOverride = null; 142 | console.log('[RefreshManager] Page override disabled for', currentPage); 143 | } 144 | 145 | /** 146 | * Check if page has active override 147 | */ 148 | function hasPageOverride() { 149 | return pageOverride !== null; 150 | } 151 | 152 | /** 153 | * Update current settings (global or page-specific) 154 | */ 155 | function updateCurrent(interval, autoStart) { 156 | if (hasPageOverride()) { 157 | // Update page override 158 | enablePageOverride(interval, autoStart); 159 | } else { 160 | // Update global 161 | updateGlobal(interval, autoStart); 162 | } 163 | } 164 | 165 | // Public API 166 | return { 167 | init, 168 | getCurrentSettings, 169 | updateGlobal, 170 | enablePageOverride, 171 | disablePageOverride, 172 | hasPageOverride, 173 | updateCurrent, 174 | getCurrentPageId 175 | }; 176 | })(); 177 | 178 | // Export for use in other modules 179 | window.RefreshManager = RefreshManager; 180 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | We release patches for security vulnerabilities in the latest version. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | We take the security of LogLynx seriously. If you believe you have found a security vulnerability, please report it to us as described below. 10 | 11 | ### Please DO: 12 | 13 | 1. **Open a GitHub issue** for security vulnerabilities 14 | 2. **Provide detailed information** including: 15 | - Type of vulnerability (e.g., SQL injection, XSS) 16 | - Full paths of source file(s) related to the vulnerability 17 | - Location of the affected source code (tag/branch/commit or direct URL) 18 | - Step-by-step instructions to reproduce the issue 19 | - Proof-of-concept or exploit code (if possible) 20 | - Impact of the vulnerability and how an attacker might exploit it 21 | 22 | ### What to Expect: 23 | 24 | - **Initial Response**: We will acknowledge receipt of your vulnerability report within 48 hours 25 | - **Status Updates**: We will send you regular updates about our progress (at least every 5 business days) 26 | - **Disclosure Timeline**: We aim to address critical vulnerabilities within 7-14 days 27 | - **Credit**: We will credit you in our security advisory (unless you prefer to remain anonymous) 28 | 29 | ## Security Best Practices for Users 30 | 31 | ### Database Security 32 | 33 | 1. **Access Control** 34 | - Restrict file system access to the SQLite database file 35 | - Use proper file permissions (600 or 640) 36 | - Never expose the database file via web server 37 | 38 | 2. **Network Security** 39 | - Run LogLynx behind a reverse proxy (Traefik, Nginx, Caddy) 40 | - Use HTTPS/TLS for all connections 41 | - Implement authentication at the proxy level if needed 42 | - Restrict access by IP/network when possible 43 | 44 | 3. **Log File Security** 45 | - Ensure log files don't contain sensitive information (passwords, tokens, etc.) 46 | - Use log rotation to manage file sizes 47 | - Restrict read access to log directories 48 | 49 | ### Docker Security 50 | 51 | 1. **Container Hardening** 52 | - Run container as non-root user (already implemented in Dockerfile) 53 | - Use read-only file systems where possible 54 | - Limit container resources (memory, CPU) 55 | 56 | 2. **Secrets Management** 57 | - Never commit `.env` files to version control 58 | - Use Docker secrets for sensitive configuration 59 | - Rotate credentials regularly 60 | 61 | 3. **Image Security** 62 | - Use official base images (Alpine) 63 | - Keep images updated regularly 64 | - Scan images for vulnerabilities (`docker scan`) 65 | 66 | ### Configuration Security 67 | 68 | 1. **Environment Variables** 69 | ```bash 70 | # Production settings 71 | SERVER_PRODUCTION=true 72 | LOG_LEVEL=warn # Don't use 'trace' or 'debug' in production 73 | ``` 74 | 75 | 2. **Database Configuration** 76 | - Enable WAL mode for better concurrency (enabled by default) 77 | - Set appropriate connection pool limits 78 | - Monitor for database locks 79 | 80 | 3. **GeoIP Database** 81 | - Keep MaxMind GeoLite2 databases updated 82 | - Download from official sources only 83 | 84 | ## Known Security Considerations 85 | 86 | ### SQLite Limitations 87 | 88 | - **Concurrent Writes**: SQLite has limited concurrent write support 89 | - Use connection pooling (implemented) 90 | - Monitor for database locks 91 | - Consider PostgreSQL/MySQL for high-write workloads 92 | 93 | - **File-Based**: Database is a file on disk 94 | - Ensure proper file permissions 95 | - Implement regular backups 96 | - Protect against unauthorized access 97 | 98 | ### Input Validation 99 | 100 | - **Log Parsing**: Input validation is performed on parsed log entries 101 | - Status codes are validated (0-599) 102 | - Request schemes are validated (http, https, ws, wss) 103 | - IP addresses are validated for GeoIP lookup 104 | 105 | - **API Endpoints**: All API endpoints validate input parameters 106 | - Query parameters are sanitized 107 | - Pagination limits are enforced 108 | - SQL injection protection via ORM (GORM) 109 | 110 | ### Dependencies 111 | 112 | We regularly update dependencies to address security vulnerabilities: 113 | 114 | - **Go Modules**: Updated regularly via `go get -u` 115 | - **MaxMind GeoIP**: Update databases monthly 116 | - **JavaScript Libraries**: Frontend uses minimal dependencies 117 | 118 | ## Security Features 119 | 120 | ### Implemented Protections 121 | 122 | 1. **SQL Injection Protection** 123 | - GORM ORM with parameterized queries 124 | - No raw SQL concatenation 125 | 126 | 2. **Path Traversal Protection** 127 | - File paths are validated and sanitized 128 | - No user-controllable file paths 129 | 130 | 3. **Resource Limits** 131 | - Connection pool limits prevent resource exhaustion 132 | - Query timeouts prevent long-running queries 133 | - Batch size limits for inserts 134 | 135 | 4. **Rate Limiting** (Recommended) 136 | - Implement at reverse proxy level 137 | - Use tools like Traefik, Nginx rate limiting 138 | - Protect against DoS attacks 139 | 140 | ### Logging Security 141 | 142 | - **Sensitive Data**: No sensitive data is logged by default 143 | - **Log Levels**: Configurable via `LOG_LEVEL` environment variable 144 | - **Audit Trail**: All API requests are logged by Gin framework 145 | 146 | ## Vulnerability Disclosure Policy 147 | 148 | When we receive a security vulnerability report, we will: 149 | 150 | 1. **Confirm** the vulnerability and determine its severity 151 | 2. **Develop** a fix in a private repository 152 | 3. **Notify** affected users if the vulnerability is critical 153 | 4. **Release** a patched version 154 | 5. **Publish** a security advisory on GitHub 155 | 6. **Credit** the reporter (with permission) 156 | 157 | ## Security Updates 158 | 159 | Security updates are released as patch versions and announced via: 160 | 161 | - GitHub Discussion 162 | - Release notes 163 | - README.md updates 164 | 165 | Subscribe to GitHub repository notifications to stay informed. 166 | 167 | **Last Updated**: November 2025 168 | -------------------------------------------------------------------------------- /internal/ingestion/watcher.go: -------------------------------------------------------------------------------- 1 | package ingestion 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | 7 | "github.com/fsnotify/fsnotify" 8 | "github.com/pterm/pterm" 9 | ) 10 | 11 | // FileWatcher monitors log files for changes using fsnotify 12 | type FileWatcher struct { 13 | watcher *fsnotify.Watcher 14 | paths []string 15 | events chan string // Channel for file modification events 16 | errors chan error 17 | logger *pterm.Logger 18 | stopCh chan struct{} 19 | wg sync.WaitGroup 20 | } 21 | 22 | // NewFileWatcher creates a new file watcher for the specified paths 23 | func NewFileWatcher(paths []string, logger *pterm.Logger) (*FileWatcher, error) { 24 | watcher, err := fsnotify.NewWatcher() 25 | if err != nil { 26 | logger.WithCaller().Error("Failed to create file watcher", logger.Args("error", err)) 27 | return nil, err 28 | } 29 | 30 | fw := &FileWatcher{ 31 | watcher: watcher, 32 | paths: paths, 33 | events: make(chan string, 100), 34 | errors: make(chan error, 10), 35 | logger: logger, 36 | stopCh: make(chan struct{}), 37 | } 38 | 39 | // Add all paths to watch 40 | successCount := 0 41 | for _, path := range paths { 42 | // Check if file exists before trying to watch 43 | if _, err := os.Stat(path); os.IsNotExist(err) { 44 | logger.Warn("Log file does not exist yet, skipping watch (will retry on file creation)", 45 | logger.Args("path", path)) 46 | continue 47 | } 48 | 49 | // Check if we have permission to read the file 50 | if file, err := os.Open(path); err != nil { 51 | if os.IsPermission(err) { 52 | logger.Error("Permission denied for log file", 53 | logger.Args("path", path, "error", err)) 54 | } else { 55 | logger.Warn("Cannot access log file, skipping watch", 56 | logger.Args("path", path, "error", err)) 57 | } 58 | continue 59 | } else { 60 | file.Close() 61 | } 62 | 63 | if err := watcher.Add(path); err != nil { 64 | logger.Warn("Failed to watch file", logger.Args("path", path, "error", err)) 65 | continue 66 | } 67 | logger.Debug("Started watching file", logger.Args("path", path)) 68 | successCount++ 69 | } 70 | 71 | if successCount == 0 { 72 | logger.Warn("No log files are currently available to watch. Will continue running and watch for new files.") 73 | } 74 | 75 | // Start event loop 76 | fw.wg.Add(1) 77 | go fw.eventLoop() 78 | 79 | logger.Info("File watcher initialized", 80 | logger.Args("files_watched", successCount, "files_pending", len(paths)-successCount)) 81 | return fw, nil 82 | } 83 | 84 | // eventLoop processes file system events 85 | func (fw *FileWatcher) eventLoop() { 86 | defer fw.wg.Done() 87 | 88 | for { 89 | select { 90 | case <-fw.stopCh: 91 | fw.logger.Debug("File watcher stopped") 92 | return 93 | 94 | case event, ok := <-fw.watcher.Events: 95 | if !ok { 96 | fw.logger.Warn("File watcher events channel closed") 97 | return 98 | } 99 | 100 | // Handle different event types 101 | switch { 102 | case event.Op&fsnotify.Write == fsnotify.Write: 103 | fw.logger.Trace("File write detected", fw.logger.Args("file", event.Name)) 104 | select { 105 | case fw.events <- event.Name: 106 | default: 107 | fw.logger.Warn("Event channel full, dropping event", fw.logger.Args("file", event.Name)) 108 | } 109 | 110 | case event.Op&fsnotify.Create == fsnotify.Create: 111 | fw.logger.Debug("File created", fw.logger.Args("file", event.Name)) 112 | // Re-add watch in case of log rotation 113 | if err := fw.watcher.Add(event.Name); err != nil { 114 | fw.logger.WithCaller().Warn("Failed to watch new file", fw.logger.Args("file", event.Name, "error", err)) 115 | } 116 | 117 | case event.Op&fsnotify.Remove == fsnotify.Remove: 118 | fw.logger.Debug("File removed (possible rotation)", fw.logger.Args("file", event.Name)) 119 | select { 120 | case fw.events <- event.Name: 121 | default: 122 | fw.logger.Warn("Event channel full, dropping remove event", fw.logger.Args("file", event.Name)) 123 | } 124 | 125 | case event.Op&fsnotify.Rename == fsnotify.Rename: 126 | fw.logger.Debug("File renamed (possible rotation)", fw.logger.Args("file", event.Name)) 127 | select { 128 | case fw.events <- event.Name: 129 | default: 130 | fw.logger.Warn("Event channel full, dropping rename event", fw.logger.Args("file", event.Name)) 131 | } 132 | } 133 | 134 | case err, ok := <-fw.watcher.Errors: 135 | if !ok { 136 | fw.logger.Warn("File watcher errors channel closed") 137 | return 138 | } 139 | fw.logger.WithCaller().Error("File watcher error", fw.logger.Args("error", err)) 140 | select { 141 | case fw.errors <- err: 142 | default: 143 | fw.logger.Warn("Error channel full, dropping error") 144 | } 145 | } 146 | } 147 | } 148 | 149 | // Events returns the channel for file modification events 150 | func (fw *FileWatcher) Events() <-chan string { 151 | return fw.events 152 | } 153 | 154 | // Errors returns the channel for watcher errors 155 | func (fw *FileWatcher) Errors() <-chan error { 156 | return fw.errors 157 | } 158 | 159 | // AddPath adds a new path to watch 160 | func (fw *FileWatcher) AddPath(path string) error { 161 | if err := fw.watcher.Add(path); err != nil { 162 | fw.logger.WithCaller().Warn("Failed to add watch path", fw.logger.Args("path", path, "error", err)) 163 | return err 164 | } 165 | fw.paths = append(fw.paths, path) 166 | fw.logger.Info("Added new watch path", fw.logger.Args("path", path)) 167 | return nil 168 | } 169 | 170 | // RemovePath removes a path from watching 171 | func (fw *FileWatcher) RemovePath(path string) error { 172 | if err := fw.watcher.Remove(path); err != nil { 173 | fw.logger.WithCaller().Warn("Failed to remove watch path", fw.logger.Args("path", path, "error", err)) 174 | return err 175 | } 176 | fw.logger.Info("Removed watch path", fw.logger.Args("path", path)) 177 | return nil 178 | } 179 | 180 | // Close stops the file watcher and cleans up resources 181 | func (fw *FileWatcher) Close() error { 182 | fw.logger.Debug("Closing file watcher...") 183 | close(fw.stopCh) 184 | fw.wg.Wait() 185 | 186 | if err := fw.watcher.Close(); err != nil { 187 | fw.logger.WithCaller().Error("Failed to close file watcher", fw.logger.Args("error", err)) 188 | return err 189 | } 190 | 191 | close(fw.events) 192 | close(fw.errors) 193 | fw.logger.Info("File watcher closed") 194 | return nil 195 | } 196 | -------------------------------------------------------------------------------- /web/static/css/geographic.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Geographic Analytics Specific Styles 3 | * Includes map controls, popups, and legend styling 4 | */ 5 | 6 | /* Leaflet Popup Styling for Dark Theme */ 7 | .leaflet-popup-content-wrapper { 8 | background: #1A1A1D !important; 9 | color: #E8E8E8 !important; 10 | border: 1px solid #444 !important; 11 | border-radius: 8px !important; 12 | } 13 | 14 | .leaflet-popup-tip { 15 | background: #1A1A1D !important; 16 | } 17 | 18 | .leaflet-popup-close-button { 19 | color: #E8E8E8 !important; 20 | font-size: 20px !important; 21 | } 22 | 23 | .leaflet-popup-close-button:hover { 24 | color: #FF6B35 !important; 25 | } 26 | 27 | /* Map Legend Styling */ 28 | .leaflet-control-layers, 29 | .leaflet-bar, 30 | .leaflet-control { 31 | background: #1A1A1D !important; 32 | border: 1px solid #444 !important; 33 | } 34 | 35 | .leaflet-control-layers-toggle { 36 | background-color: #1A1A1D !important; 37 | border: 1px solid #444 !important; 38 | } 39 | 40 | .leaflet-control-layers-expanded { 41 | color: #E8E8E8 !important; 42 | background: #1A1A1D !important; 43 | } 44 | 45 | .leaflet-control-layers label { 46 | color: #E8E8E8 !important; 47 | } 48 | 49 | .leaflet-control-layers-separator { 50 | border-top: 1px solid #444 !important; 51 | } 52 | 53 | /* Zoom Control Buttons */ 54 | .leaflet-bar a { 55 | background-color: #1A1A1D !important; 56 | border-bottom: 1px solid #444 !important; 57 | color: #E8E8E8 !important; 58 | } 59 | 60 | .leaflet-bar a:hover { 61 | background-color: #2A2A2D !important; 62 | color: #FF6B35 !important; 63 | } 64 | 65 | .leaflet-bar a:first-child { 66 | border-top-left-radius: 4px; 67 | border-top-right-radius: 4px; 68 | } 69 | 70 | .leaflet-bar a:last-child { 71 | border-bottom-left-radius: 4px; 72 | border-bottom-right-radius: 4px; 73 | border-bottom: none; 74 | } 75 | 76 | .leaflet-touch .leaflet-bar a { 77 | width: 34px; 78 | height: 34px; 79 | line-height: 34px; 80 | } 81 | 82 | /* Attribution */ 83 | .leaflet-control-attribution { 84 | background: rgba(26, 26, 29, 0.8) !important; 85 | color: #999 !important; 86 | } 87 | 88 | .leaflet-control-attribution a { 89 | color: #FF6B35 !important; 90 | } 91 | 92 | /* Custom Marker Cluster Styling */ 93 | .marker-cluster-small, 94 | .marker-cluster-medium, 95 | .marker-cluster-large { 96 | background-color: rgba(255, 107, 53, 0.6) !important; 97 | } 98 | 99 | .marker-cluster-small div, 100 | .marker-cluster-medium div, 101 | .marker-cluster-large div { 102 | background-color: rgba(255, 107, 53, 0.8) !important; 103 | color: #FFFFFF !important; 104 | font-weight: bold; 105 | } 106 | 107 | /* Map Container */ 108 | #worldMap { 109 | background: #0A0A0D; 110 | border-radius: 8px; 111 | border: 1px solid #2A2A2D; 112 | } 113 | 114 | /* Map Loading State */ 115 | .leaflet-container .leaflet-tile-pane { 116 | opacity: 0; 117 | transition: opacity 0.3s; 118 | } 119 | 120 | .leaflet-container.leaflet-fade-anim .leaflet-tile-pane { 121 | opacity: 1; 122 | } 123 | 124 | /* Custom Map Controls in Card Header */ 125 | .map-controls { 126 | display: flex; 127 | gap: 8px; 128 | align-items: center; 129 | } 130 | 131 | .map-controls .btn { 132 | padding: 6px 12px; 133 | font-size: 12px; 134 | border: 1px solid #444; 135 | background: #1A1A1D; 136 | color: #E8E8E8; 137 | transition: all 0.2s; 138 | } 139 | 140 | .map-controls .btn:hover { 141 | background: #2A2A2D; 142 | border-color: #FF6B35; 143 | color: #FF6B35; 144 | } 145 | 146 | .map-controls .btn.active { 147 | background: #FF6B35; 148 | border-color: #FF6B35; 149 | color: #FFFFFF; 150 | } 151 | 152 | /* Heatmap Legend (if shown) */ 153 | .heatmap-legend { 154 | background: rgba(26, 26, 29, 0.9); 155 | padding: 10px; 156 | border-radius: 4px; 157 | border: 1px solid #444; 158 | color: #E8E8E8; 159 | } 160 | 161 | .heatmap-legend .legend-item { 162 | display: flex; 163 | align-items: center; 164 | margin-bottom: 5px; 165 | } 166 | 167 | .heatmap-legend .legend-color { 168 | width: 20px; 169 | height: 20px; 170 | margin-right: 8px; 171 | border-radius: 3px; 172 | border: 1px solid #666; 173 | } 174 | 175 | .heatmap-legend .legend-label { 176 | font-size: 12px; 177 | color: #E8E8E8; 178 | } 179 | 180 | /* Traffic Intensity Legend */ 181 | .traffic-legend { 182 | position: absolute; 183 | bottom: 30px; 184 | right: 10px; 185 | background: rgba(26, 26, 29, 0.95); 186 | padding: 12px; 187 | border-radius: 6px; 188 | border: 1px solid #444; 189 | z-index: 1000; 190 | min-width: 150px; 191 | } 192 | 193 | .traffic-legend h6 { 194 | margin: 0 0 8px 0; 195 | font-size: 12px; 196 | font-weight: bold; 197 | color: #FF6B35; 198 | text-transform: uppercase; 199 | } 200 | 201 | .traffic-legend .legend-item { 202 | display: flex; 203 | align-items: center; 204 | margin-bottom: 6px; 205 | font-size: 11px; 206 | color: #E8E8E8; 207 | } 208 | 209 | .traffic-legend .legend-item:last-child { 210 | margin-bottom: 0; 211 | } 212 | 213 | .traffic-legend .legend-dot { 214 | width: 12px; 215 | height: 12px; 216 | border-radius: 50%; 217 | border: 2px solid white; 218 | margin-right: 8px; 219 | flex-shrink: 0; 220 | } 221 | 222 | .traffic-legend .legend-dot.very-high { background-color: #dc3545; } 223 | .traffic-legend .legend-dot.high { background-color: #F46319; } 224 | .traffic-legend .legend-dot.medium { background-color: #ffc107; } 225 | .traffic-legend .legend-dot.low { background-color: #28a745; } 226 | 227 | /* Responsive Design */ 228 | @media (max-width: 768px) { 229 | .traffic-legend { 230 | bottom: 10px; 231 | right: 10px; 232 | min-width: 120px; 233 | padding: 8px; 234 | } 235 | 236 | .traffic-legend h6 { 237 | font-size: 10px; 238 | } 239 | 240 | .traffic-legend .legend-item { 241 | font-size: 10px; 242 | } 243 | 244 | .map-controls { 245 | flex-wrap: wrap; 246 | } 247 | 248 | .map-controls .btn { 249 | padding: 4px 8px; 250 | font-size: 11px; 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /internal/database/models/http_request.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "time" 5 | 6 | "gorm.io/gorm" 7 | ) 8 | 9 | type HTTPRequest struct { 10 | ID uint `gorm:"primaryKey;autoIncrement"` 11 | SourceName string `gorm:"type:varchar(255);not null"` // index created by OptimizeDatabase 12 | Timestamp time.Time `gorm:"not null"` // index created by OptimizeDatabase 13 | RequestHash string `gorm:"type:char(64);uniqueIndex:idx_request_hash"` // SHA256 hash for deduplication (UNIQUE index is essential) 14 | 15 | // Partition key for future scaling (YYYY-MM format) 16 | PartitionKey string `gorm:"type:varchar(7)"` // index created by OptimizeDatabase 17 | 18 | // Client info 19 | ClientIP string `gorm:"type:varchar(45);not null"` // index created by OptimizeDatabase (IPv6 support) 20 | ClientPort int `gorm:"check:client_port >= 0 AND client_port <= 65535"` 21 | ClientUser string `gorm:"type:varchar(255)"` // HTTP authentication user (NPM: remote_user) 22 | 23 | // Request info 24 | Method string `gorm:"type:varchar(10);not null"` // GET, POST, PUT, DELETE, etc. 25 | Protocol string `gorm:"type:varchar(10)"` // HTTP/1.1, HTTP/2.0, HTTP/3.0 26 | Host string `gorm:"type:varchar(255);not null"` // index created by OptimizeDatabase 27 | Path string `gorm:"type:varchar(2048);not null"` // Paths can be long 28 | QueryString string `gorm:"type:text"` // Can be very long 29 | RequestLength int64 `gorm:"check:request_length >= 0"` // Request size in bytes (NPM/Caddy) 30 | RequestScheme string `gorm:"type:varchar(10);check:request_scheme IN ('http', 'https', 'ws', 'wss', '')"` // Request scheme: http, https, ws (WebSocket), wss (WebSocket Secure) 31 | 32 | // Response info 33 | // Allow 0 to represent unknown/invalid status codes produced by some parsers 34 | // Parser code may set StatusCode=0 for invalid values, so permit 0 in DB 35 | StatusCode int `gorm:"not null;check:status_code >= 0 AND status_code < 600"` // index created by OptimizeDatabase 36 | ResponseSize int64 `gorm:"check:response_size >= 0"` 37 | ResponseTimeMs float64 `gorm:"check:response_time_ms >= 0"` // index created by OptimizeDatabase - Total response time 38 | ResponseContentType string `gorm:"type:varchar(255)"` // downstream Content-Type 39 | 40 | // Detailed timing (optional, for advanced proxies) 41 | Duration int64 `gorm:"check:duration >= 0"` // Duration in nanoseconds (for precise hash calculation) 42 | StartUTC string `gorm:"type:varchar(35)"` // Start timestamp with nanosecond precision (RFC3339Nano format) 43 | UpstreamResponseTimeMs float64 `gorm:"check:upstream_response_time_ms >= 0"` // Time spent waiting for upstream/backend 44 | RetryAttempts int `gorm:"check:retry_attempts >= 0"` // Number of retry attempts (Traefik) - index created by OptimizeDatabase 45 | RequestsTotal int `gorm:"check:requests_total >= 0"` // Total number of requests at router level (Traefik CLF field) 46 | 47 | // Headers 48 | UserAgent string `gorm:"type:varchar(512)"` // Most user agents are <512 chars 49 | Referer string `gorm:"type:varchar(512)"` // Most referers are <512 chars 50 | 51 | // Parsed User-Agent fields 52 | Browser string `gorm:"type:varchar(50)"` // index created by OptimizeDatabase 53 | BrowserVersion string `gorm:"type:varchar(20)"` 54 | OS string `gorm:"type:varchar(50)"` // index created by OptimizeDatabase 55 | OSVersion string `gorm:"type:varchar(20)"` 56 | DeviceType string `gorm:"type:varchar(20)"` // desktop, mobile, tablet, bot - index created by OptimizeDatabase 57 | 58 | // Proxy/Upstream info (proxy-agnostic naming) 59 | // These fields work for Traefik, NPM, Caddy, HAProxy, etc. 60 | BackendName string `gorm:"type:varchar(255)"` // Traefik: BackendName, NPM: proxy_upstream_name, Caddy: upstream_addr 61 | BackendURL string `gorm:"type:varchar(512)"` // Full backend URL (Traefik: BackendURL, others: constructed) 62 | RouterName string `gorm:"type:varchar(255)"` // Traefik: RouterName, NPM: server_name, Caddy: logger name - index created by OptimizeDatabase 63 | UpstreamStatus int `gorm:"check:upstream_status >= 0 AND upstream_status < 600"` // Upstream/backend response status 64 | UpstreamContentType string `gorm:"type:varchar(255)"` // Origin/backend Content-Type (origin_Content-Type in Traefik) 65 | ClientHostname string `gorm:"type:varchar(255)"` // Client hostname (if reverse DNS available, from ClientHost) 66 | 67 | // TLS info 68 | TLSVersion string `gorm:"type:varchar(10)"` // 1.2, 1.3 69 | TLSCipher string `gorm:"type:varchar(255)"` 70 | TLSServerName string `gorm:"type:varchar(255)"` // SNI server name 71 | 72 | // Tracing & IDs 73 | RequestID string `gorm:"type:varchar(100)"` // X-Request-ID or similar - index created by OptimizeDatabase 74 | TraceID string `gorm:"type:varchar(100)"` // Distributed tracing ID (optional) - index created by OptimizeDatabase 75 | 76 | // GeoIP enrichment 77 | GeoCountry string `gorm:"type:varchar(2)"` // ISO 3166-1 alpha-2 - index created by OptimizeDatabase 78 | GeoCity string `gorm:"type:varchar(100)"` 79 | GeoLat float64 80 | GeoLon float64 81 | ASN int 82 | ASNOrg string `gorm:"type:varchar(255)"` 83 | 84 | // Extensibility: JSON field for proxy-specific data 85 | // This allows storing proxy-specific fields without schema changes 86 | // Examples: Traefik middlewares, NPM custom fields, Caddy logger details 87 | ProxyMetadata string `gorm:"type:text"` // JSON string for flexible data 88 | 89 | CreatedAt time.Time `gorm:"autoCreateTime"` // index created by OptimizeDatabase 90 | 91 | // Foreign key 92 | LogSource LogSource `gorm:"foreignKey:SourceName;references:Name"` 93 | } 94 | 95 | func (HTTPRequest) TableName() string { 96 | return "http_requests" 97 | } 98 | 99 | // BeforeCreate hook to automatically set partition key 100 | func (r *HTTPRequest) BeforeCreate(tx *gorm.DB) error { 101 | // Set partition key for future partitioning support (YYYY-MM format) 102 | if r.PartitionKey == "" { 103 | r.PartitionKey = r.Timestamp.Format("2006-01") 104 | } 105 | return nil 106 | } 107 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | "time" 7 | 8 | "github.com/joho/godotenv" 9 | ) 10 | 11 | // Config holds all application configuration 12 | type Config struct { 13 | // Database Configuration 14 | Database DatabaseConfig 15 | 16 | // GeoIP Configuration 17 | GeoIP GeoIPConfig 18 | 19 | // Log configuration 20 | LogLevel string 21 | 22 | // Log Sources Configuration 23 | LogSources LogSourcesConfig 24 | 25 | // Server Configuration 26 | Server ServerConfig 27 | 28 | // Performance Configuration 29 | Performance PerformanceConfig 30 | } 31 | 32 | // DatabaseConfig contains database-related settings 33 | type DatabaseConfig struct { 34 | Path string 35 | MaxOpenConns int 36 | MaxIdleConns int 37 | ConnMaxLife time.Duration 38 | RetentionDays int // Number of days to retain data (0 = unlimited) 39 | CleanupInterval time.Duration // How often to check for cleanup (default: 1 hour) 40 | CleanupTime string // Time of day to run cleanup (24-hour format, e.g., "02:00") 41 | VacuumEnabled bool // Run VACUUM after cleanup to reclaim space 42 | 43 | // Connection Pool Monitoring 44 | PoolMonitoringEnabled bool // Enable connection pool monitoring 45 | PoolMonitoringInterval time.Duration // How often to check pool stats 46 | PoolSaturationThreshold float64 // Alert threshold (0.0-1.0, default: 0.85) 47 | AutoTuning bool // Enable auto-tuning based on CPU cores 48 | } 49 | 50 | // GeoIPConfig contains GeoIP database paths 51 | type GeoIPConfig struct { 52 | CityDBPath string 53 | CountryDBPath string 54 | ASNDBPath string 55 | Enabled bool 56 | } 57 | 58 | // LogSourcesConfig contains log source paths 59 | type LogSourcesConfig struct { 60 | TraefikLogPath string 61 | TraefikLogFormat string // auto, json, clf 62 | AutoDiscover bool 63 | InitialImportDays int // Only import last N days on first run (0 = import all) 64 | InitialImportEnable bool // Enable initial import limiting 65 | } 66 | 67 | // ServerConfig contains web server settings 68 | type ServerConfig struct { 69 | Host string 70 | Port int 71 | Production bool 72 | DashboardEnabled bool // If false, only API routes are exposed 73 | SplashScreenEnabled bool // If false, splash screen is disabled on startup 74 | } 75 | 76 | // PerformanceConfig contains performance tuning settings 77 | type PerformanceConfig struct { 78 | RealtimeMetricsInterval time.Duration 79 | GeoIPCacheSize int 80 | BatchSize int 81 | WorkerPoolSize int 82 | } 83 | 84 | // Load reads configuration from .env file and environment variables 85 | func Load() (*Config, error) { 86 | // Try to load .env file (ignore error if file doesn't exist) 87 | _ = godotenv.Load() 88 | 89 | cfg := &Config{ 90 | Database: DatabaseConfig{ 91 | Path: getEnv("DB_PATH", "loglynx.db"), 92 | MaxOpenConns: getEnvAsInt("DB_MAX_OPEN_CONNS", 25), 93 | MaxIdleConns: getEnvAsInt("DB_MAX_IDLE_CONNS", 10), 94 | ConnMaxLife: getEnvAsDuration("DB_CONN_MAX_LIFE", time.Hour), 95 | RetentionDays: getEnvAsInt("DB_RETENTION_DAYS", 60), 96 | CleanupInterval: getEnvAsDuration("DB_CLEANUP_INTERVAL", 1*time.Hour), 97 | CleanupTime: getEnv("DB_CLEANUP_TIME", "02:00"), 98 | VacuumEnabled: getEnvAsBool("DB_VACUUM_ENABLED", true), 99 | 100 | // Connection Pool Monitoring 101 | PoolMonitoringEnabled: getEnvAsBool("DB_POOL_MONITORING", true), 102 | PoolMonitoringInterval: getEnvAsDuration("DB_POOL_MONITOR_INTERVAL", 30*time.Second), 103 | PoolSaturationThreshold: getEnvAsFloat("DB_POOL_SATURATION_THRESHOLD", 0.85), 104 | AutoTuning: getEnvAsBool("DB_AUTO_TUNING", true), 105 | }, 106 | GeoIP: GeoIPConfig{ 107 | CityDBPath: getEnv("GEOIP_CITY_DB", "geoip/GeoLite2-City.mmdb"), 108 | CountryDBPath: getEnv("GEOIP_COUNTRY_DB", "geoip/GeoLite2-Country.mmdb"), 109 | ASNDBPath: getEnv("GEOIP_ASN_DB", "geoip/GeoLite2-ASN.mmdb"), 110 | Enabled: getEnvAsBool("GEOIP_ENABLED", true), 111 | }, 112 | LogSources: LogSourcesConfig{ 113 | TraefikLogPath: getEnv("TRAEFIK_LOG_PATH", "traefik/logs/access.log"), 114 | TraefikLogFormat: getEnv("TRAEFIK_LOG_FORMAT", "auto"), 115 | AutoDiscover: getEnvAsBool("LOG_AUTO_DISCOVER", true), 116 | InitialImportDays: getEnvAsInt("INITIAL_IMPORT_DAYS", 60), 117 | InitialImportEnable: getEnvAsBool("INITIAL_IMPORT_ENABLE", true), 118 | }, 119 | Server: ServerConfig{ 120 | Host: getEnv("SERVER_HOST", "0.0.0.0"), 121 | Port: getEnvAsInt("SERVER_PORT", 8080), 122 | Production: getEnvAsBool("SERVER_PRODUCTION", false), 123 | DashboardEnabled: getEnvAsBool("DASHBOARD_ENABLED", true), 124 | SplashScreenEnabled: getEnvAsBool("SPLASH_SCREEN_ENABLED", true), 125 | }, 126 | Performance: PerformanceConfig{ 127 | RealtimeMetricsInterval: getEnvAsDuration("METRICS_INTERVAL", 1*time.Second), 128 | GeoIPCacheSize: getEnvAsInt("GEOIP_CACHE_SIZE", 10000), 129 | BatchSize: getEnvAsInt("BATCH_SIZE", 1000), 130 | WorkerPoolSize: getEnvAsInt("WORKER_POOL_SIZE", 4), 131 | }, 132 | LogLevel: getEnv("LOG_LEVEL", "info"), 133 | } 134 | 135 | return cfg, nil 136 | } 137 | 138 | // Helper functions to read environment variables with defaults 139 | 140 | func getEnv(key, defaultValue string) string { 141 | if value := os.Getenv(key); value != "" { 142 | return value 143 | } 144 | return defaultValue 145 | } 146 | 147 | func getEnvAsInt(key string, defaultValue int) int { 148 | valueStr := os.Getenv(key) 149 | if valueStr == "" { 150 | return defaultValue 151 | } 152 | if value, err := strconv.Atoi(valueStr); err == nil { 153 | return value 154 | } 155 | return defaultValue 156 | } 157 | 158 | func getEnvAsBool(key string, defaultValue bool) bool { 159 | valueStr := os.Getenv(key) 160 | if valueStr == "" { 161 | return defaultValue 162 | } 163 | if value, err := strconv.ParseBool(valueStr); err == nil { 164 | return value 165 | } 166 | return defaultValue 167 | } 168 | 169 | func getEnvAsDuration(key string, defaultValue time.Duration) time.Duration { 170 | valueStr := os.Getenv(key) 171 | if valueStr == "" { 172 | return defaultValue 173 | } 174 | if value, err := time.ParseDuration(valueStr); err == nil { 175 | return value 176 | } 177 | return defaultValue 178 | } 179 | 180 | func getEnvAsFloat(key string, defaultValue float64) float64 { 181 | valueStr := os.Getenv(key) 182 | if valueStr == "" { 183 | return defaultValue 184 | } 185 | if value, err := strconv.ParseFloat(valueStr, 64); err == nil { 186 | return value 187 | } 188 | return defaultValue 189 | } 190 | -------------------------------------------------------------------------------- /internal/discovery/traefik.go: -------------------------------------------------------------------------------- 1 | package discovery 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "os" 7 | "regexp" 8 | "loglynx/internal/database/models" 9 | "strings" 10 | 11 | "github.com/pterm/pterm" 12 | ) 13 | 14 | type TraefikDetector struct{ 15 | logger *pterm.Logger 16 | configuredPath string 17 | autoDiscover bool 18 | } 19 | 20 | func NewTraefikDetector(logger *pterm.Logger) ServiceDetector { 21 | // Read LOG_AUTO_DISCOVER setting (default: true for backward compatibility) 22 | autoDiscover := true 23 | if autoDiscoverEnv := os.Getenv("LOG_AUTO_DISCOVER"); autoDiscoverEnv != "" { 24 | autoDiscover = autoDiscoverEnv == "true" 25 | } 26 | 27 | return &TraefikDetector{ 28 | logger: logger, 29 | configuredPath: os.Getenv("TRAEFIK_LOG_PATH"), 30 | autoDiscover: autoDiscover, 31 | } 32 | } 33 | 34 | func (d *TraefikDetector) Name() string { 35 | return "traefik" 36 | } 37 | 38 | func (d *TraefikDetector) Detect() ([]*models.LogSource, error) { 39 | sources := []*models.LogSource{} 40 | d.logger.Trace("Detecting Traefik log sources...") 41 | 42 | // Build paths list with priority logic: 43 | // 1. If TRAEFIK_LOG_PATH is set, use ONLY that path (no auto-discovery) 44 | // 2. If TRAEFIK_LOG_PATH is not set OR points to non-existent file, use auto-discovery 45 | paths := []string{} 46 | 47 | // Check if configured path exists and is valid 48 | configuredPathValid := false 49 | if d.configuredPath != "" { 50 | d.logger.Debug("Checking configured Traefik log path", d.logger.Args("path", d.configuredPath)) 51 | if fileInfo, err := os.Stat(d.configuredPath); err == nil && !fileInfo.IsDir() { 52 | configuredPathValid = true 53 | d.logger.Info("Using configured TRAEFIK_LOG_PATH (auto-discovery disabled)", 54 | d.logger.Args("path", d.configuredPath)) 55 | } else { 56 | d.logger.Warn("Configured TRAEFIK_LOG_PATH not accessible, falling back to auto-discovery", 57 | d.logger.Args("path", d.configuredPath, "error", err)) 58 | } 59 | } 60 | 61 | // Priority 1: Use configured path if valid (disables auto-discovery) 62 | if configuredPathValid { 63 | paths = append(paths, d.configuredPath) 64 | } else if d.autoDiscover { 65 | // Priority 2: Auto-discovery - only if enabled AND configured path is not set or invalid 66 | d.logger.Debug("Using auto-discovery for Traefik log sources", 67 | d.logger.Args("LOG_AUTO_DISCOVER", true)) 68 | paths = append(paths, "traefik/logs/access.log", "traefik/logs/error.log") 69 | } else { 70 | // Auto-discovery disabled and no valid configured path 71 | d.logger.Info("Auto-discovery disabled and no valid TRAEFIK_LOG_PATH configured", 72 | d.logger.Args("LOG_AUTO_DISCOVER", false, "TRAEFIK_LOG_PATH", d.configuredPath)) 73 | } 74 | 75 | for _, path := range paths { 76 | d.logger.Trace("Checking", d.logger.Args("path", path)) 77 | if fileInfo, err := os.Stat(path); err == nil { 78 | d.logger.Trace("File found", d.logger.Args("path", path)) 79 | if !fileInfo.IsDir() && fileInfo.Size() > 0 { 80 | d.logger.Trace("Validating format", d.logger.Args("path", path)) 81 | if isTraefikFormat(path) { 82 | d.logger.Info("✓ Traefik log source detected", d.logger.Args("path", path)) 83 | sources = append(sources, &models.LogSource{ 84 | Name: generateName(path), 85 | Path: path, 86 | ParserType: "traefik", 87 | }) 88 | // Only use the first valid source found 89 | break 90 | }else{ 91 | d.logger.WithCaller().Warn("Format invalid - not a Traefik access log", d.logger.Args("path", path)) 92 | } 93 | } else { 94 | d.logger.Trace("File is directory or empty", d.logger.Args("path", path, "size", fileInfo.Size())) 95 | } 96 | } else { 97 | d.logger.Trace("File not accessible", d.logger.Args("path", path, "error", err.Error())) 98 | } 99 | } 100 | 101 | if len(sources) == 0 { 102 | if d.configuredPath != "" { 103 | d.logger.Warn("No valid Traefik log source found at configured path", 104 | d.logger.Args("TRAEFIK_LOG_PATH", d.configuredPath)) 105 | } else if d.autoDiscover { 106 | d.logger.Warn("No Traefik log sources found via auto-discovery", 107 | d.logger.Args("hint", "Set TRAEFIK_LOG_PATH in .env or ensure traefik/logs/access.log exists")) 108 | } else { 109 | d.logger.Warn("No Traefik log sources configured", 110 | d.logger.Args("hint", "Set TRAEFIK_LOG_PATH in .env or enable LOG_AUTO_DISCOVER=true")) 111 | } 112 | } 113 | 114 | return sources, nil 115 | } 116 | 117 | func isTraefikFormat(path string) bool { 118 | file, err := os.Open(path) 119 | if err != nil { 120 | return false 121 | } 122 | defer file.Close() 123 | 124 | scanner := bufio.NewScanner(file) 125 | if scanner.Scan() { 126 | line := scanner.Text() 127 | 128 | // Try JSON format first 129 | var logEntry map[string]any 130 | if err := json.Unmarshal([]byte(line), &logEntry); err == nil { 131 | // Check for multiple Traefik-specific fields to improve detection accuracy 132 | // Traefik access logs typically contain these fields 133 | traefikFields := []string{"ClientHost", "RequestMethod", "RequestPath", "DownstreamStatus", "RouterName"} 134 | matchCount := 0 135 | 136 | for _, field := range traefikFields { 137 | if _, ok := logEntry[field]; ok { 138 | matchCount++ 139 | } 140 | } 141 | 142 | // If we find at least 2 Traefik-specific fields, consider it a Traefik log 143 | if matchCount >= 2 { 144 | return true 145 | } 146 | } 147 | 148 | // Try CLF format (both Traefik and generic) 149 | // Traefik CLF pattern: - [] " HTTP/" "" "" "" "" ms 150 | traefikCLFPattern := `^(\S+) \S+ (\S+) \[([^\]]+)\] "([A-Z]+) ([^ "]+)? HTTP/[0-9.]+" (\d{3}) (\d+|-) "([^"]*)" "([^"]*)" (\d+) "([^"]*)" "([^"]*)" (\d+)ms` 151 | if matched, _ := regexp.MatchString(traefikCLFPattern, line); matched { 152 | return true 153 | } 154 | 155 | // Generic CLF pattern: - [] " HTTP/" "" "" 156 | genericCLFPattern := `^(\S+) \S+ (\S+) \[([^\]]+)\] "([A-Z]+) ([^ "]+)? HTTP/[0-9.]+" (\d{3}) (\d+|-) "([^"]*)" "([^"]*)"` 157 | if matched, _ := regexp.MatchString(genericCLFPattern, line); matched { 158 | return true 159 | } 160 | } 161 | return false 162 | } 163 | 164 | func generateName(path string) string { 165 | pathSplit := strings.Split(path, "/") 166 | fileNameExtension := pathSplit[(len(pathSplit)-1)] 167 | fileName := strings.Split(fileNameExtension, ".")[0] 168 | return "traefik-"+fileName 169 | } -------------------------------------------------------------------------------- /web/static/css/tooltip.css: -------------------------------------------------------------------------------- 1 | /** 2 | * LogLynx Tooltip Component 3 | * Reusable information tooltips with hover effects 4 | */ 5 | 6 | /* Tooltip Container */ 7 | .info-tooltip { 8 | position: relative; 9 | display: inline-flex; 10 | align-items: center; 11 | justify-content: center; 12 | margin-left: 0.5rem; 13 | cursor: help; 14 | vertical-align: middle; 15 | } 16 | 17 | /* Info Icon */ 18 | .info-tooltip-icon { 19 | display: inline-flex; 20 | align-items: center; 21 | justify-content: center; 22 | width: 18px; 23 | height: 18px; 24 | border-radius: 50%; 25 | background: rgba(255, 107, 53, 0.2); 26 | color: var(--loglynx-primary); 27 | font-size: 11px; 28 | transition: all var(--transition-fast); 29 | border: 1px solid var(--loglynx-primary); 30 | } 31 | 32 | .info-tooltip:hover .info-tooltip-icon { 33 | background: var(--loglynx-primary); 34 | color: white; 35 | transform: scale(1.1); 36 | box-shadow: 0 0 10px rgba(255, 107, 53, 0.5); 37 | } 38 | 39 | /* Tooltip Content */ 40 | .info-tooltip-content { 41 | position: absolute; 42 | bottom: calc(100% + 8px); 43 | left: 50%; 44 | transform: translateX(-50%); 45 | background: var(--loglynx-card); 46 | color: var(--loglynx-text); 47 | padding: 0.75rem 1rem; 48 | border-radius: var(--radius-md); 49 | box-shadow: 0 4px 20px rgba(0, 0, 0, 0.5); 50 | border: 1px solid var(--loglynx-primary); 51 | min-width: 200px; 52 | max-width: 350px; 53 | font-size: var(--font-size-sm); 54 | line-height: 1.5; 55 | z-index: 9999; 56 | opacity: 0; 57 | visibility: hidden; 58 | transition: all var(--transition-base); 59 | pointer-events: none; 60 | white-space: normal; 61 | text-align: left; 62 | } 63 | 64 | /* Tooltip Arrow */ 65 | .info-tooltip-content::after { 66 | content: ''; 67 | position: absolute; 68 | top: 100%; 69 | left: 50%; 70 | transform: translateX(-50%); 71 | border: 6px solid transparent; 72 | border-top-color: var(--loglynx-primary); 73 | } 74 | 75 | .info-tooltip-content::before { 76 | content: ''; 77 | position: absolute; 78 | top: 100%; 79 | left: 50%; 80 | transform: translateX(-50%); 81 | border: 7px solid transparent; 82 | border-top-color: var(--loglynx-card); 83 | margin-top: 1px; 84 | } 85 | 86 | /* Show tooltip on hover */ 87 | .info-tooltip:hover .info-tooltip-content { 88 | opacity: 1; 89 | visibility: visible; 90 | bottom: calc(100% + 12px); 91 | } 92 | 93 | /* Tooltip variations for different positions */ 94 | .info-tooltip.tooltip-bottom .info-tooltip-content { 95 | bottom: auto; 96 | top: calc(100% + 8px); 97 | } 98 | 99 | .info-tooltip.tooltip-bottom .info-tooltip-content::after { 100 | top: auto; 101 | bottom: 100%; 102 | border-top-color: transparent; 103 | border-bottom-color: var(--loglynx-primary); 104 | } 105 | 106 | .info-tooltip.tooltip-bottom .info-tooltip-content::before { 107 | top: auto; 108 | bottom: 100%; 109 | border-top-color: transparent; 110 | border-bottom-color: var(--loglynx-card); 111 | margin-top: 0; 112 | margin-bottom: 1px; 113 | } 114 | 115 | .info-tooltip.tooltip-bottom:hover .info-tooltip-content { 116 | bottom: auto; 117 | top: calc(100% + 12px); 118 | } 119 | 120 | /* Force page header tooltips below the icon to stay away from the sticky top bar */ 121 | .page-header .info-tooltip-content, 122 | .top-header .info-tooltip-content { 123 | bottom: auto; 124 | top: calc(100% + 6px); 125 | transform: translateX(-50%); 126 | } 127 | 128 | .page-header .info-tooltip:hover .info-tooltip-content, 129 | .top-header .info-tooltip:hover .info-tooltip-content { 130 | bottom: auto; 131 | top: calc(100% + 12px); 132 | } 133 | 134 | .page-header .info-tooltip-content::after, 135 | .top-header .info-tooltip-content::after { 136 | top: auto; 137 | bottom: 100%; 138 | border-top-color: transparent; 139 | border-bottom-color: var(--loglynx-primary); 140 | } 141 | 142 | .page-header .info-tooltip-content::before, 143 | .top-header .info-tooltip-content::before { 144 | top: auto; 145 | bottom: 100%; 146 | border-top-color: transparent; 147 | border-bottom-color: var(--loglynx-card); 148 | margin-top: 0; 149 | margin-bottom: 1px; 150 | } 151 | 152 | /* Tooltip variations for right position */ 153 | .info-tooltip.tooltip-right .info-tooltip-content { 154 | bottom: auto; 155 | left: calc(100% + 8px); 156 | top: 50%; 157 | transform: translateY(-50%); 158 | } 159 | 160 | .info-tooltip.tooltip-right .info-tooltip-content::after { 161 | top: 50%; 162 | left: auto; 163 | right: 100%; 164 | transform: translateY(-50%); 165 | border-top-color: transparent; 166 | border-right-color: var(--loglynx-primary); 167 | } 168 | 169 | .info-tooltip.tooltip-right .info-tooltip-content::before { 170 | top: 50%; 171 | left: auto; 172 | right: 100%; 173 | transform: translateY(-50%); 174 | border-top-color: transparent; 175 | border-right-color: var(--loglynx-card); 176 | margin-top: 0; 177 | margin-right: 1px; 178 | } 179 | 180 | .info-tooltip.tooltip-right:hover .info-tooltip-content { 181 | left: calc(100% + 12px); 182 | } 183 | 184 | /* Tooltip variations for left position */ 185 | .info-tooltip.tooltip-left .info-tooltip-content { 186 | bottom: auto; 187 | left: auto; 188 | right: calc(100% + 8px); 189 | top: 50%; 190 | transform: translateY(-50%); 191 | } 192 | 193 | .info-tooltip.tooltip-left .info-tooltip-content::after { 194 | top: 50%; 195 | right: auto; 196 | left: 100%; 197 | transform: translateY(-50%); 198 | border-top-color: transparent; 199 | border-left-color: var(--loglynx-primary); 200 | } 201 | 202 | .info-tooltip.tooltip-left .info-tooltip-content::before { 203 | top: 50%; 204 | right: auto; 205 | left: 100%; 206 | transform: translateY(-50%); 207 | border-top-color: transparent; 208 | border-left-color: var(--loglynx-card); 209 | margin-top: 0; 210 | margin-left: 1px; 211 | } 212 | 213 | .info-tooltip.tooltip-left:hover .info-tooltip-content { 214 | right: calc(100% + 12px); 215 | } 216 | 217 | /* Tooltip title and description */ 218 | .info-tooltip-title { 219 | font-weight: 600; 220 | color: var(--loglynx-primary); 221 | margin-bottom: 0.25rem; 222 | font-size: var(--font-size-sm); 223 | } 224 | 225 | .info-tooltip-description { 226 | color: var(--loglynx-text-light); 227 | font-size: var(--font-size-xs); 228 | } 229 | 230 | /* Tooltip in headers */ 231 | .card-title .info-tooltip, 232 | .chart-title .info-tooltip, 233 | .stat-label .info-tooltip, 234 | .page-title .info-tooltip { 235 | vertical-align: middle; 236 | } 237 | 238 | /* Responsive adjustments */ 239 | @media (max-width: 768px) { 240 | .info-tooltip-content { 241 | max-width: 250px; 242 | font-size: 0.75rem; 243 | padding: 0.5rem 0.75rem; 244 | } 245 | 246 | .info-tooltip-icon { 247 | width: 16px; 248 | height: 16px; 249 | font-size: 10px; 250 | } 251 | } 252 | 253 | /* Animation for tooltip appearance */ 254 | @keyframes tooltipFadeIn { 255 | from { 256 | opacity: 0; 257 | transform: translateX(-50%) translateY(-5px); 258 | } 259 | to { 260 | opacity: 1; 261 | transform: translateX(-50%) translateY(0); 262 | } 263 | } 264 | 265 | .info-tooltip:hover .info-tooltip-content { 266 | animation: tooltipFadeIn 0.2s ease; 267 | } 268 | 269 | /* Inline tooltip for text elements */ 270 | .inline-tooltip { 271 | border-bottom: 1px dotted var(--loglynx-primary); 272 | cursor: help; 273 | position: relative; 274 | } 275 | 276 | .inline-tooltip:hover { 277 | color: var(--loglynx-primary); 278 | } 279 | -------------------------------------------------------------------------------- /internal/api/handlers/system.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "runtime" 8 | "strconv" 9 | "time" 10 | 11 | "loglynx/internal/database" 12 | "loglynx/internal/database/repositories" 13 | "loglynx/internal/version" 14 | 15 | "github.com/gin-gonic/gin" 16 | "github.com/pterm/pterm" 17 | ) 18 | 19 | // SystemHandler handles system statistics requests 20 | type SystemHandler struct { 21 | statsRepo repositories.StatsRepository 22 | httpRepo repositories.HTTPRequestRepository 23 | cleanupService *database.CleanupService 24 | logger *pterm.Logger 25 | startTime time.Time 26 | dbPath string 27 | retentionDays int 28 | } 29 | 30 | // SystemStats holds comprehensive system statistics 31 | type SystemStats struct { 32 | // Process Info 33 | AppVersion string `json:"app_version"` 34 | Uptime string `json:"uptime"` 35 | UptimeSeconds int64 `json:"uptime_seconds"` 36 | StartTime string `json:"start_time"` 37 | GoVersion string `json:"go_version"` 38 | NumCPU int `json:"num_cpu"` 39 | NumGoroutines int `json:"num_goroutines"` 40 | MemoryAllocMB float64 `json:"memory_alloc_mb"` 41 | MemoryTotalMB float64 `json:"memory_total_mb"` 42 | MemorySysMB float64 `json:"memory_sys_mb"` 43 | GCPauseMs float64 `json:"gc_pause_ms"` 44 | 45 | // Database Info 46 | TotalRecords int64 `json:"total_records"` 47 | RecordsToCleanup int64 `json:"records_to_cleanup"` 48 | DatabaseSizeMB float64 `json:"database_size_mb"` 49 | DatabasePath string `json:"database_path"` 50 | 51 | // Cleanup Info 52 | RetentionDays int `json:"retention_days"` 53 | NextCleanupTime string `json:"next_cleanup_time"` 54 | NextCleanupCountdown string `json:"next_cleanup_countdown"` 55 | LastCleanupTime string `json:"last_cleanup_time"` 56 | 57 | // Additional Stats 58 | OldestRecordAge string `json:"oldest_record_age"` 59 | NewestRecordAge string `json:"newest_record_age"` 60 | RequestsPerSecond float64 `json:"requests_per_second"` 61 | } 62 | 63 | // NewSystemHandler creates a new system handler 64 | func NewSystemHandler( 65 | statsRepo repositories.StatsRepository, 66 | httpRepo repositories.HTTPRequestRepository, 67 | cleanupService *database.CleanupService, 68 | logger *pterm.Logger, 69 | dbPath string, 70 | retentionDays int, 71 | ) *SystemHandler { 72 | return &SystemHandler{ 73 | statsRepo: statsRepo, 74 | httpRepo: httpRepo, 75 | cleanupService: cleanupService, 76 | logger: logger, 77 | startTime: time.Now(), 78 | dbPath: dbPath, 79 | retentionDays: retentionDays, 80 | } 81 | } 82 | 83 | // HandleSystemStatsPage renders the system stats page 84 | func (h *SystemHandler) HandleSystemStatsPage(c *gin.Context) { 85 | c.HTML(http.StatusOK, "system.html", gin.H{ 86 | "title": "System Stats", 87 | }) 88 | } 89 | 90 | // GetSystemStats returns comprehensive system statistics 91 | func (h *SystemHandler) GetSystemStats(c *gin.Context) { 92 | stats, err := h.collectSystemStats() 93 | if err != nil { 94 | h.logger.WithCaller().Error("Failed to collect system stats", h.logger.Args("error", err)) 95 | c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to collect system stats"}) 96 | return 97 | } 98 | 99 | c.JSON(http.StatusOK, stats) 100 | } 101 | 102 | // GetRecordsTimeline returns records count timeline for system stats chart 103 | func (h *SystemHandler) GetRecordsTimeline(c *gin.Context) { 104 | // Get days parameter (default 30) 105 | days := 30 106 | if daysParam := c.Query("days"); daysParam != "" { 107 | if d, err := strconv.Atoi(daysParam); err == nil && d > 0 { 108 | if d <= 365 { 109 | days = d 110 | } else { 111 | days = 365 // Cap at 1 year 112 | } 113 | } 114 | } 115 | 116 | timeline, err := h.statsRepo.GetRecordsTimeline(days) 117 | if err != nil { 118 | h.logger.WithCaller().Error("Failed to get records timeline", h.logger.Args("error", err)) 119 | c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get records timeline"}) 120 | return 121 | } 122 | 123 | c.JSON(http.StatusOK, timeline) 124 | } 125 | 126 | // collectSystemStats gathers all system statistics 127 | func (h *SystemHandler) collectSystemStats() (*SystemStats, error) { 128 | stats := &SystemStats{ 129 | AppVersion: version.Version, 130 | StartTime: h.startTime.Format(time.RFC3339), 131 | GoVersion: runtime.Version(), 132 | NumCPU: runtime.NumCPU(), 133 | NumGoroutines: runtime.NumGoroutine(), 134 | DatabasePath: h.dbPath, 135 | RetentionDays: h.retentionDays, 136 | } 137 | 138 | // Calculate uptime 139 | uptime := time.Since(h.startTime) 140 | stats.UptimeSeconds = int64(uptime.Seconds()) 141 | stats.Uptime = formatDuration(uptime) 142 | 143 | // Memory stats 144 | var m runtime.MemStats 145 | runtime.ReadMemStats(&m) 146 | stats.MemoryAllocMB = float64(m.Alloc) / 1024 / 1024 147 | stats.MemoryTotalMB = float64(m.TotalAlloc) / 1024 / 1024 148 | stats.MemorySysMB = float64(m.Sys) / 1024 / 1024 149 | stats.GCPauseMs = float64(m.PauseNs[(m.NumGC+255)%256]) / 1000000 150 | 151 | // Database record count 152 | totalRecords, err := h.httpRepo.Count() 153 | if err != nil { 154 | h.logger.WithCaller().Warn("Failed to get total records", h.logger.Args("error", err)) 155 | } 156 | stats.TotalRecords = totalRecords 157 | 158 | // Calculate records to cleanup (if retention is enabled) 159 | if h.retentionDays > 0 { 160 | cutoffDate := time.Now().AddDate(0, 0, -h.retentionDays) 161 | recordsToCleanup, err := h.statsRepo.CountRecordsOlderThan(cutoffDate) 162 | if err != nil { 163 | h.logger.WithCaller().Warn("Failed to count records to cleanup", h.logger.Args("error", err)) 164 | } 165 | stats.RecordsToCleanup = recordsToCleanup 166 | } 167 | 168 | // Database file size 169 | if fileInfo, err := os.Stat(h.dbPath); err == nil { 170 | stats.DatabaseSizeMB = float64(fileInfo.Size()) / 1024 / 1024 171 | } 172 | 173 | // Cleanup schedule info 174 | if h.cleanupService != nil && h.retentionDays > 0 { 175 | cleanupStats := h.cleanupService.GetStats() 176 | stats.NextCleanupTime = cleanupStats.NextScheduledRun.Format(time.DateTime) 177 | 178 | timeUntilCleanup := time.Until(cleanupStats.NextScheduledRun) 179 | stats.NextCleanupCountdown = formatDuration(timeUntilCleanup) 180 | 181 | if !cleanupStats.LastRunTime.IsZero() { 182 | stats.LastCleanupTime = cleanupStats.LastRunTime.Format(time.DateTime) 183 | } else { 184 | stats.LastCleanupTime = "Never" 185 | } 186 | } else { 187 | stats.NextCleanupTime = "Disabled" 188 | stats.NextCleanupCountdown = "N/A" 189 | stats.LastCleanupTime = "N/A" 190 | } 191 | 192 | // Oldest and newest record ages 193 | oldestTime, newestTime, err := h.statsRepo.GetRecordTimeRange() 194 | if err == nil { 195 | if !oldestTime.IsZero() { 196 | stats.OldestRecordAge = formatDuration(time.Since(oldestTime)) 197 | } else { 198 | stats.OldestRecordAge = "No records" 199 | } 200 | 201 | if !newestTime.IsZero() { 202 | stats.NewestRecordAge = formatDuration(time.Since(newestTime)) 203 | } else { 204 | stats.NewestRecordAge = "No records" 205 | } 206 | } 207 | 208 | // Calculate requests per second since startup 209 | if stats.TotalRecords > 0 && stats.UptimeSeconds > 0 { 210 | stats.RequestsPerSecond = float64(stats.TotalRecords) / float64(stats.UptimeSeconds) 211 | } 212 | 213 | return stats, nil 214 | } 215 | 216 | // formatDuration formats a duration into a human-readable string 217 | func formatDuration(d time.Duration) string { 218 | if d < 0 { 219 | d = -d 220 | } 221 | 222 | days := int(d.Hours() / 24) 223 | hours := int(d.Hours()) % 24 224 | minutes := int(d.Minutes()) % 60 225 | seconds := int(d.Seconds()) % 60 226 | 227 | if days > 0 { 228 | return formatPlural(days, "day", hours, "hour") 229 | } 230 | if hours > 0 { 231 | return formatPlural(hours, "hour", minutes, "minute") 232 | } 233 | if minutes > 0 { 234 | return formatPlural(minutes, "minute", seconds, "second") 235 | } 236 | return formatPlural(seconds, "second", 0, "") 237 | } 238 | 239 | // formatPlural formats numbers with proper pluralization 240 | func formatPlural(n1 int, unit1 string, n2 int, unit2 string) string { 241 | result := formatSingle(n1, unit1) 242 | if n2 > 0 && unit2 != "" { 243 | result += ", " + formatSingle(n2, unit2) 244 | } 245 | return result 246 | } 247 | 248 | // formatSingle formats a single value with pluralization 249 | func formatSingle(n int, unit string) string { 250 | if n == 1 { 251 | return "1 " + unit 252 | } 253 | return fmt.Sprintf("%d %ss", n, unit) 254 | } 255 | -------------------------------------------------------------------------------- /internal/database/connection.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "loglynx/internal/database/repositories" 7 | "loglynx/internal/discovery" 8 | "os" 9 | "runtime" 10 | "strings" 11 | "time" 12 | 13 | "github.com/pterm/pterm" 14 | "gorm.io/driver/sqlite" 15 | "gorm.io/gorm" 16 | "gorm.io/gorm/logger" 17 | ) 18 | 19 | type Config struct { 20 | Path string 21 | MaxOpenConns int 22 | MaxIdleConns int 23 | ConnMaxLife time.Duration 24 | 25 | // Pool Monitoring 26 | PoolMonitoringEnabled bool 27 | PoolMonitoringInterval time.Duration 28 | PoolSaturationThreshold float64 29 | AutoTuning bool 30 | } 31 | 32 | // SlowQueryLogger logs slow database queries for performance monitoring 33 | type SlowQueryLogger struct { 34 | logger *pterm.Logger 35 | slowThreshold time.Duration 36 | logLevel logger.LogLevel 37 | ignoreNotFoundErr bool 38 | } 39 | 40 | func NewSlowQueryLogger(ptermLogger *pterm.Logger, slowThreshold time.Duration) *SlowQueryLogger { 41 | return &SlowQueryLogger{ 42 | logger: ptermLogger, 43 | slowThreshold: slowThreshold, 44 | logLevel: logger.Warn, 45 | ignoreNotFoundErr: true, 46 | } 47 | } 48 | 49 | func (l *SlowQueryLogger) LogMode(level logger.LogLevel) logger.Interface { 50 | l.logLevel = level 51 | return l 52 | } 53 | 54 | func (l *SlowQueryLogger) Info(ctx context.Context, msg string, data ...interface{}) { 55 | if l.logLevel >= logger.Info { 56 | l.logger.Info(msg, l.logger.Args("data", data)) 57 | } 58 | } 59 | 60 | func (l *SlowQueryLogger) Warn(ctx context.Context, msg string, data ...interface{}) { 61 | if l.logLevel >= logger.Warn { 62 | l.logger.Warn(msg, l.logger.Args("data", data)) 63 | } 64 | } 65 | 66 | func (l *SlowQueryLogger) Error(ctx context.Context, msg string, data ...interface{}) { 67 | if l.logLevel >= logger.Error { 68 | l.logger.Error(msg, l.logger.Args("data", data)) 69 | } 70 | } 71 | 72 | func (l *SlowQueryLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) { 73 | elapsed := time.Since(begin) 74 | sql, rows := fc() 75 | 76 | // Log slow queries (debug level to avoid console noise in normal runs) 77 | if elapsed >= l.slowThreshold { 78 | l.logger.Debug("SLOW QUERY DETECTED", 79 | l.logger.Args( 80 | "duration_ms", elapsed.Milliseconds(), 81 | "rows", rows, 82 | "sql", sql, 83 | )) 84 | } else if l.logLevel >= logger.Info { 85 | // Trace all queries in debug mode 86 | l.logger.Trace("Database query", 87 | l.logger.Args( 88 | "duration_ms", elapsed.Milliseconds(), 89 | "rows", rows, 90 | "sql", sql, 91 | )) 92 | } 93 | 94 | // Log errors (but ignore UNIQUE constraint violations - they're handled by the application) 95 | if err != nil && (!l.ignoreNotFoundErr || !errors.Is(err, gorm.ErrRecordNotFound)) { 96 | // Ignore UNIQUE constraint errors - these are expected during deduplication 97 | // The application handles them gracefully in the repository layer 98 | errStr := err.Error() 99 | if strings.Contains(errStr, "UNIQUE constraint failed") || strings.Contains(errStr, "request_hash") { 100 | // This is a duplicate - silently skip logging (summary is logged in repository) 101 | return 102 | } 103 | 104 | l.logger.Error("Database query error", 105 | l.logger.Args( 106 | "error", err, 107 | "duration_ms", elapsed.Milliseconds(), 108 | "sql", sql, 109 | )) 110 | } 111 | } 112 | 113 | func NewConnection(cfg *Config, logger *pterm.Logger) (*gorm.DB, error) { 114 | // Optimized DSN with: 115 | // - WAL mode for concurrent reads/writes 116 | // - NORMAL synchronous for balance between safety and speed 117 | // - cache_size=-64000 (negative means KB, 64MB) for better query performance 118 | // - busy_timeout=5000ms (5 seconds) to prevent SQLITE_BUSY errors 119 | // Note: mattn/go-sqlite3 uses different parameter names than glebarez 120 | dsn := cfg.Path + "?_journal_mode=WAL&_synchronous=NORMAL&_cache_size=-64000&_busy_timeout=5000" 121 | _, err := os.Stat(cfg.Path) 122 | 123 | if errors.Is(err, os.ErrPermission) { 124 | logger.WithCaller().Fatal("Permission denied to access database file.", logger.Args("error", err)) 125 | // Fatal() terminates the program, so no code after this will execute 126 | } 127 | 128 | logger.Debug("Permission to access database file granted.", logger.Args("path", cfg.Path)) 129 | logger.Debug("Initialization of the database with optimized settings (WAL mode, page_size=4096).") 130 | 131 | // Create slow query logger (log queries taking >100ms) 132 | slowQueryLogger := NewSlowQueryLogger(logger, 100*time.Millisecond) 133 | 134 | db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{ 135 | PrepareStmt: true, 136 | Logger: slowQueryLogger, 137 | }) 138 | 139 | if err != nil { 140 | logger.WithCaller().Fatal("Failed to connect to the database.", logger.Args("error", err)) 141 | // Fatal() terminates the program, so no code after this will execute 142 | } 143 | 144 | // Get underlying SQL DB for connection pool 145 | sqlDB, err := db.DB() 146 | if err != nil { 147 | logger.WithCaller().Fatal("Failed to get database instance.", logger.Args("error", err)) 148 | // Fatal() terminates the program, so no code after this will execute 149 | } 150 | 151 | // Configure connection pool with auto-tuning if enabled 152 | maxOpenConns := cfg.MaxOpenConns 153 | maxIdleConns := cfg.MaxIdleConns 154 | 155 | if cfg.AutoTuning { 156 | // Auto-tune based on CPU cores 157 | cpuCores := runtime.NumCPU() 158 | optimalMaxOpen := cpuCores * 3 // 3 connections per core for read-heavy workloads 159 | 160 | if optimalMaxOpen > maxOpenConns { 161 | maxOpenConns = optimalMaxOpen 162 | maxIdleConns = maxOpenConns * 40 / 100 // 40% idle 163 | 164 | if maxIdleConns < 10 { 165 | maxIdleConns = 10 166 | } 167 | 168 | logger.Info("🔧 Auto-tuned connection pool based on CPU cores", 169 | logger.Args( 170 | "cpu_cores", cpuCores, 171 | "max_open_conns", maxOpenConns, 172 | "max_idle_conns", maxIdleConns, 173 | )) 174 | } 175 | } 176 | 177 | sqlDB.SetMaxOpenConns(maxOpenConns) 178 | sqlDB.SetMaxIdleConns(maxIdleConns) 179 | sqlDB.SetConnMaxLifetime(cfg.ConnMaxLife) 180 | 181 | logger.Debug("Connection pool configured", 182 | logger.Args( 183 | "max_open_conns", maxOpenConns, 184 | "max_idle_conns", maxIdleConns, 185 | "conn_max_life", cfg.ConnMaxLife, 186 | )) 187 | 188 | // Run migrations 189 | logger.Trace("Running database migrations.") 190 | if err := RunMigrations(db); err != nil { 191 | logger.WithCaller().Fatal("Failed to run database migrations.", logger.Args("error", err)) 192 | // Fatal() terminates the program, so no code after this will execute 193 | } 194 | 195 | // Check if database is empty (first load) 196 | // We need to import models for this check 197 | var count int64 198 | db.Table("http_requests").Count(&count) 199 | isDatabaseEmpty := (count == 0) 200 | 201 | if isDatabaseEmpty { 202 | logger.Info("🚀 Empty database detected - deferring index creation until after first data load for optimal performance") 203 | logger.Info(" Indexes will be created automatically when initial data load completes") 204 | } else { 205 | // Database has data - create/verify indexes now 206 | logger.Debug("Existing data found - verifying database indexes") 207 | if err := OptimizeDatabase(db, logger); err != nil { 208 | logger.Warn("Database optimization had warnings", logger.Args("error", err)) 209 | // Don't fail on optimization errors, just warn 210 | } 211 | } 212 | 213 | // Run discovery engine in background to speed up startup 214 | go func() { 215 | logger.Debug("Running log source discovery in background...") 216 | engine := discovery.NewEngine(repositories.NewLogSourceRepository(db), logger) 217 | if err := engine.Run(logger); err != nil { 218 | logger.Warn("Failed to run discovery engine", logger.Args("error", err)) 219 | return 220 | } 221 | 222 | logSourceRepo, err := repositories.NewLogSourceRepository(db).FindAll() 223 | if err != nil { 224 | logger.Warn("Failed to retrieve log sources", logger.Args("error", err)) 225 | return 226 | } 227 | 228 | logger.Info("Discovered log sources", logger.Args("count", len(logSourceRepo))) 229 | }() 230 | 231 | // Start pool monitoring if enabled 232 | if cfg.PoolMonitoringEnabled { 233 | monitor := NewPoolMonitor( 234 | sqlDB, 235 | logger, 236 | cfg.PoolMonitoringInterval, 237 | cfg.PoolSaturationThreshold, 238 | cfg.AutoTuning, 239 | ) 240 | monitor.Start(context.Background()) 241 | 242 | // Log initial stats after a short delay 243 | go func() { 244 | time.Sleep(2 * time.Second) 245 | monitor.PrintSummary() 246 | }() 247 | } 248 | 249 | logger.Info("Database connection established successfully.") 250 | return db, nil 251 | } -------------------------------------------------------------------------------- /web/templates/pages/system.html: -------------------------------------------------------------------------------- 1 | {{define "system.html"}} 2 | 3 | 4 | 5 | 6 | 7 | {{.Title}} - LogLynx 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 |
26 | {{template "sidebar" .}} 27 | 28 |
29 | {{template "header" .}} 30 |
31 | 32 | 48 | 49 | 50 |
51 |

Process Information

52 |
53 |
54 |
55 |
Uptime
56 |
-
57 |
Started at: -
58 |
59 | 60 |
61 |
Memory Usage
62 |
-
63 |
System: -
64 |
65 | 66 |
67 |
Go Routines
68 |
-
69 |
CPUs: -
70 |
71 | 72 |
73 |
Runtime
74 |
-
75 |
Go: -
76 |
App: -
77 |
78 |
79 | 80 | 81 |
82 |

Database Information

83 |
84 |
85 |
86 |
Total Records
87 |
-
88 |
HTTP requests stored
89 |
90 | 91 |
92 |
Database Size
93 |
-
94 |
-
95 |
96 | 97 |
98 |
Records to Cleanup
99 |
-
100 |
Retention: -
101 |
102 | 103 |
104 |
Ingestion Rate
105 |
-
106 |
requests/second
107 |
108 |
109 | 110 | 111 |
112 |

Records Timeline

113 |
114 |
115 |
116 |
117 |
Database Growth Over Time
118 |
119 |
120 | 121 | 122 | 123 |
124 |
125 | 126 |
127 | 128 | 129 |
130 |

Cleanup & Retention

131 |
132 |
133 |
134 |
Next Cleanup
135 |
-
136 |
Scheduled: -
137 |
138 | 139 |
140 |
Last Cleanup
141 |
-
142 |
Last execution
143 |
144 | 145 |
146 |
Record Age Range
147 |
-
148 |
Newest: -
149 |
150 |
151 | 152 | 153 |
154 |

Detailed Information

155 |
156 |
157 | 158 | 159 | 160 | 161 |
Loading...
162 |
163 | 164 |
165 | {{template "footer" .}} 166 |
167 |
168 | 169 | {{template "scripts-common" .}} 170 | 171 | 172 | 173 | 196 | 197 | 198 | {{end}} 199 | -------------------------------------------------------------------------------- /internal/database/optimize.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/pterm/pterm" 5 | "gorm.io/gorm" 6 | ) 7 | 8 | // OptimizeDatabase applies additional optimizations after initial migrations 9 | // This includes creating performance indexes and verifying SQLite settings 10 | func OptimizeDatabase(db *gorm.DB, logger *pterm.Logger) error { 11 | logger.Debug("Applying database optimizations...") 12 | 13 | // Verify WAL mode is enabled (debug level - only show if there's a problem) 14 | var journalMode string 15 | if err := db.Raw("PRAGMA journal_mode").Scan(&journalMode).Error; err != nil { 16 | logger.Warn("Failed to check journal mode", logger.Args("error", err)) 17 | } else if journalMode != "wal" { 18 | logger.Warn("Database not in WAL mode", logger.Args("mode", journalMode)) 19 | } else { 20 | logger.Trace("Database journal mode verified", logger.Args("mode", journalMode)) 21 | } 22 | 23 | // Verify page size (trace level - not critical) 24 | var pageSize int 25 | if err := db.Raw("PRAGMA page_size").Scan(&pageSize).Error; err != nil { 26 | logger.Debug("Failed to check page size", logger.Args("error", err)) 27 | } else { 28 | logger.Trace("Database page size", logger.Args("bytes", pageSize)) 29 | } 30 | 31 | indexes := []string{ 32 | // ===== PRIMARY COMPOSITE INDEXES (for time-range queries) ===== 33 | 34 | // Main timeline index - covers most dashboard queries 35 | // Used by: GetTimelineStats, GetStatusCodeTimeline, GetSummary 36 | `CREATE INDEX IF NOT EXISTS idx_timestamp_status 37 | ON http_requests(timestamp DESC, status_code)`, 38 | 39 | // Time + Host - for per-service filtering 40 | // Used by: service-filtered queries 41 | `CREATE INDEX IF NOT EXISTS idx_time_host 42 | ON http_requests(timestamp DESC, host)`, 43 | 44 | // Time + Backend - for backend analytics 45 | // Used by: GetTopBackends, backend-filtered queries 46 | `CREATE INDEX IF NOT EXISTS idx_time_backend 47 | ON http_requests(timestamp DESC, backend_name, status_code)`, 48 | 49 | // ===== AGGREGATION INDEXES (for GROUP BY queries) ===== 50 | 51 | // Path aggregation - CRITICAL for GetTopPaths 52 | // Covering index includes all columns needed for the query 53 | `CREATE INDEX IF NOT EXISTS idx_path_agg 54 | ON http_requests(path, timestamp, client_ip, response_time_ms, response_size)`, 55 | 56 | // Country aggregation - CRITICAL for GetTopCountries 57 | // Partial index excludes empty geo_country for efficiency 58 | `CREATE INDEX IF NOT EXISTS idx_geo_agg 59 | ON http_requests(geo_country, timestamp, client_ip, response_size) 60 | WHERE geo_country != ''`, 61 | 62 | // Referrer aggregation - CRITICAL for GetTopReferrerDomains 63 | // Partial index excludes empty referrers 64 | `CREATE INDEX IF NOT EXISTS idx_referer_agg 65 | ON http_requests(referer, timestamp, client_ip) 66 | WHERE referer != ''`, 67 | 68 | // Service identification - for GetServices 69 | // Covers the UNION ALL query pattern 70 | `CREATE INDEX IF NOT EXISTS idx_service_id 71 | ON http_requests(backend_name, backend_url, host)`, 72 | 73 | // Backend aggregation - CRITICAL for GetTopBackends (was taking 3s) 74 | // Covering index for backend_name grouping with all aggregation columns 75 | `CREATE INDEX IF NOT EXISTS idx_backend_agg 76 | ON http_requests(backend_name, timestamp, backend_url, host, response_size, status_code) 77 | WHERE backend_name != ''`, 78 | 79 | // Backend URL fallback aggregation - for requests without backend_name 80 | `CREATE INDEX IF NOT EXISTS idx_backend_url_agg 81 | ON http_requests(backend_url, timestamp, host, response_size, status_code) 82 | WHERE backend_name = '' AND backend_url != ''`, 83 | 84 | // Host fallback aggregation - for requests without backend_name or backend_url 85 | `CREATE INDEX IF NOT EXISTS idx_host_agg 86 | ON http_requests(host, timestamp, response_size, status_code) 87 | WHERE backend_name = '' AND backend_url = '' AND host != ''`, 88 | 89 | // ===== LOOKUP INDEXES (for filtering and detail queries) ===== 90 | 91 | // Client IP aggregation - CRITICAL for GetTopIPAddresses and GetIPDetailedStats 92 | // Covering index includes geo data, ASN, and aggregation columns to avoid table lookups 93 | `CREATE INDEX IF NOT EXISTS idx_ip_agg 94 | ON http_requests(client_ip, timestamp, geo_country, geo_city, geo_lat, geo_lon, response_size, asn, asn_org, status_code)`, 95 | 96 | // IP + Browser aggregation - for GetIPTopBrowsers 97 | // Partial index matches WHERE condition in query exactly 98 | `CREATE INDEX IF NOT EXISTS idx_ip_browser_agg 99 | ON http_requests(client_ip, timestamp, browser) 100 | WHERE browser != ''`, 101 | 102 | // IP + Backend aggregation - for GetIPTopBackends 103 | // Partial index matches WHERE condition, includes all aggregation columns 104 | `CREATE INDEX IF NOT EXISTS idx_ip_backend_agg 105 | ON http_requests(client_ip, timestamp, backend_name, backend_url, response_size, response_time_ms, status_code) 106 | WHERE backend_name != ''`, 107 | 108 | // IP + Device aggregation - for GetIPDeviceTypeDistribution 109 | // Partial index matches WHERE condition in query 110 | `CREATE INDEX IF NOT EXISTS idx_ip_device_agg 111 | ON http_requests(client_ip, timestamp, device_type) 112 | WHERE device_type != ''`, 113 | 114 | // IP + OS aggregation - for GetIPTopOperatingSystems 115 | // Partial index matches WHERE condition in query 116 | `CREATE INDEX IF NOT EXISTS idx_ip_os_agg 117 | ON http_requests(client_ip, timestamp, os) 118 | WHERE os != ''`, 119 | 120 | // IP + Status Code aggregation - for GetIPStatusCodeDistribution 121 | // Covering index for status code grouping 122 | `CREATE INDEX IF NOT EXISTS idx_ip_status_agg 123 | ON http_requests(client_ip, timestamp, status_code)`, 124 | 125 | // IP + Path aggregation - for GetIPTopPaths 126 | // Covering index includes all columns needed for aggregation 127 | `CREATE INDEX IF NOT EXISTS idx_ip_path_agg 128 | ON http_requests(client_ip, timestamp, path, response_time_ms, response_size, backend_name, host, backend_url)`, 129 | 130 | // IP + Heatmap/Timeline aggregation - for GetIPTrafficHeatmap and GetIPTimelineStats 131 | // Covering index for time-based aggregations with response metrics 132 | `CREATE INDEX IF NOT EXISTS idx_ip_heatmap_agg 133 | ON http_requests(client_ip, timestamp, response_time_ms, response_size, backend_name)`, 134 | 135 | // Status code lookup - for distribution queries 136 | // Used by: GetStatusCodeDistribution 137 | `CREATE INDEX IF NOT EXISTS idx_status_code 138 | ON http_requests(status_code, timestamp)`, 139 | 140 | // Method lookup - for distribution queries 141 | // Used by: GetMethodDistribution 142 | `CREATE INDEX IF NOT EXISTS idx_method 143 | ON http_requests(method, timestamp)`, 144 | 145 | // ASN aggregation - for GetTopASNs 146 | // Partial index for records with valid ASN 147 | `CREATE INDEX IF NOT EXISTS idx_asn_agg 148 | ON http_requests(asn, timestamp, asn_org, geo_country, response_size) 149 | WHERE asn > 0`, 150 | 151 | // Device type distribution - for GetDeviceTypeDistribution 152 | // Partial index for records with device type 153 | `CREATE INDEX IF NOT EXISTS idx_device_type 154 | ON http_requests(device_type, timestamp) 155 | WHERE device_type != ''`, 156 | 157 | // Protocol distribution - for GetProtocolDistribution 158 | // Partial index for records with protocol 159 | `CREATE INDEX IF NOT EXISTS idx_protocol 160 | ON http_requests(protocol, timestamp) 161 | WHERE protocol != ''`, 162 | 163 | // TLS version distribution - for GetTLSVersionDistribution 164 | // Partial index for records with TLS version 165 | `CREATE INDEX IF NOT EXISTS idx_tls_version 166 | ON http_requests(tls_version, timestamp) 167 | WHERE tls_version != ''`, 168 | 169 | // ===== PARTIAL INDEXES (for specific filtered queries) ===== 170 | 171 | // Errors only - for error analysis dashboards 172 | // Partial index significantly reduces size (typically <5% of data) 173 | `CREATE INDEX IF NOT EXISTS idx_errors 174 | ON http_requests(timestamp DESC, status_code, path, client_ip) 175 | WHERE status_code >= 400`, 176 | 177 | // Slow requests - for performance analysis 178 | // Partial index for requests >1 second 179 | `CREATE INDEX IF NOT EXISTS idx_slow 180 | ON http_requests(timestamp DESC, response_time_ms, path, host) 181 | WHERE response_time_ms > 1000`, 182 | 183 | // Response time percentiles - for P50/P95/P99 calculations 184 | // Partial index excludes zero/null response times 185 | `CREATE INDEX IF NOT EXISTS idx_response_time 186 | ON http_requests(timestamp DESC, response_time_ms) 187 | WHERE response_time_ms > 0`, 188 | 189 | // ===== MAINTENANCE INDEX ===== 190 | 191 | // Cleanup index - for data retention queries 192 | // Simple timestamp index for DELETE operations 193 | `CREATE INDEX IF NOT EXISTS idx_cleanup 194 | ON http_requests(timestamp)`, 195 | } 196 | 197 | indexCount := 0 198 | for _, indexSQL := range indexes { 199 | if err := db.Exec(indexSQL).Error; err != nil { 200 | logger.Warn("Failed to create index", logger.Args("error", err)) 201 | return err 202 | } 203 | indexCount++ 204 | } 205 | 206 | logger.Debug("Performance indexes verified", logger.Args("count", indexCount)) 207 | 208 | // Analyze tables for query optimizer (only log if it fails) 209 | if err := db.Exec("ANALYZE").Error; err != nil { 210 | logger.Warn("Failed to analyze database", logger.Args("error", err)) 211 | } else { 212 | logger.Trace("Database statistics analyzed") 213 | } 214 | 215 | logger.Debug("Database optimizations completed") 216 | return nil 217 | } 218 | -------------------------------------------------------------------------------- /internal/api/handlers/realtime.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "loglynx/internal/database/repositories" 10 | "loglynx/internal/realtime" 11 | 12 | "github.com/gin-gonic/gin" 13 | "github.com/pterm/pterm" 14 | ) 15 | 16 | const ( 17 | // MaxSSEConnections is the maximum number of simultaneous SSE connections 18 | MaxSSEConnections = 100 19 | ) 20 | 21 | // RealtimeHandler handles real-time streaming endpoints 22 | type RealtimeHandler struct { 23 | collector *realtime.MetricsCollector 24 | logger *pterm.Logger 25 | activeConnections int 26 | maxConnections int 27 | connectionMutex sync.Mutex 28 | } 29 | 30 | // NewRealtimeHandler creates a new realtime handler 31 | func NewRealtimeHandler(collector *realtime.MetricsCollector, logger *pterm.Logger) *RealtimeHandler { 32 | return &RealtimeHandler{ 33 | collector: collector, 34 | logger: logger, 35 | maxConnections: MaxSSEConnections, 36 | } 37 | } 38 | 39 | // getServiceFilter extracts service filter parameters from request (legacy single service) 40 | // Returns serviceName and serviceType separately 41 | // Falls back to legacy "host" parameter for backward compatibility 42 | func (h *RealtimeHandler) getServiceFilter(c *gin.Context) (string, string) { 43 | // Try new parameters first 44 | service := c.Query("service") 45 | serviceType := c.Query("service_type") 46 | 47 | // Fallback to legacy "host" parameter 48 | if service == "" { 49 | service = c.Query("host") 50 | } 51 | 52 | // Default service type to "auto" if not specified 53 | if serviceType == "" { 54 | serviceType = "auto" 55 | } 56 | 57 | // Return combined format for new filter system 58 | if service != "" { 59 | return service, serviceType 60 | } 61 | 62 | return "", "auto" 63 | } 64 | 65 | // getServiceFilters extracts multiple service filters from request 66 | // Returns array of {name, type} service filters 67 | func (h *RealtimeHandler) getServiceFilters(c *gin.Context) []realtime.ServiceFilter { 68 | // Try new multi-service parameters 69 | serviceNames := c.QueryArray("services[]") 70 | serviceTypes := c.QueryArray("service_types[]") 71 | 72 | // If we have multiple services, use them 73 | if len(serviceNames) > 0 && len(serviceNames) == len(serviceTypes) { 74 | filters := make([]realtime.ServiceFilter, len(serviceNames)) 75 | for i := range serviceNames { 76 | filters[i] = realtime.ServiceFilter{ 77 | Name: serviceNames[i], 78 | Type: serviceTypes[i], 79 | } 80 | } 81 | return filters 82 | } 83 | 84 | // Fall back to single service filter (legacy) 85 | service, serviceType := h.getServiceFilter(c) 86 | if service != "" { 87 | return []realtime.ServiceFilter{{Name: service, Type: serviceType}} 88 | } 89 | 90 | return nil 91 | } 92 | 93 | // getExcludeOwnIP extracts exclude_own_ip and related parameters 94 | // Returns ExcludeIPFilter or nil 95 | func (h *RealtimeHandler) getExcludeOwnIP(c *gin.Context) *realtime.ExcludeIPFilter { 96 | excludeIP := c.Query("exclude_own_ip") == "true" 97 | if !excludeIP { 98 | return nil 99 | } 100 | 101 | // Get client IP 102 | clientIP := c.ClientIP() 103 | 104 | // Get exclude services 105 | serviceNames := c.QueryArray("exclude_services[]") 106 | serviceTypes := c.QueryArray("exclude_service_types[]") 107 | 108 | var excludeServices []realtime.ServiceFilter 109 | if len(serviceNames) > 0 && len(serviceNames) == len(serviceTypes) { 110 | excludeServices = make([]realtime.ServiceFilter, len(serviceNames)) 111 | for i := range serviceNames { 112 | excludeServices[i] = realtime.ServiceFilter{ 113 | Name: serviceNames[i], 114 | Type: serviceTypes[i], 115 | } 116 | } 117 | } 118 | 119 | return &realtime.ExcludeIPFilter{ 120 | ClientIP: clientIP, 121 | ExcludeServices: excludeServices, 122 | } 123 | } 124 | 125 | // StreamMetrics streams real-time metrics via Server-Sent Events 126 | func (h *RealtimeHandler) StreamMetrics(c *gin.Context) { 127 | // Check connection limit 128 | h.connectionMutex.Lock() 129 | if h.activeConnections >= h.maxConnections { 130 | h.connectionMutex.Unlock() 131 | c.JSON(503, gin.H{"error": "Maximum concurrent connections reached. Please try again later."}) 132 | return 133 | } 134 | h.activeConnections++ 135 | currentConnections := h.activeConnections 136 | // Update collector with active connection count 137 | h.collector.SetActiveConnections(h.activeConnections) 138 | h.connectionMutex.Unlock() 139 | 140 | // Ensure we decrement on exit 141 | defer func() { 142 | h.connectionMutex.Lock() 143 | h.activeConnections-- 144 | // Update collector with active connection count 145 | h.collector.SetActiveConnections(h.activeConnections) 146 | h.connectionMutex.Unlock() 147 | 148 | // Recover from panics 149 | if r := recover(); r != nil { 150 | h.logger.Error("Panic in SSE stream", h.logger.Args("panic", r, "client_ip", c.ClientIP())) 151 | } 152 | }() 153 | 154 | // Get filters 155 | serviceName, _ := h.getServiceFilter(c) // Legacy single service filter 156 | serviceFilters := h.getServiceFilters(c) 157 | excludeIPFilter := h.getExcludeOwnIP(c) 158 | 159 | // Set SSE headers 160 | c.Header("Content-Type", "text/event-stream") 161 | c.Header("Cache-Control", "no-cache") 162 | c.Header("Connection", "keep-alive") 163 | c.Header("X-Accel-Buffering", "no") 164 | 165 | // Create a ticker for sending updates 166 | ticker := time.NewTicker(1 * time.Second) // Send updates every 1 second for real-time responsiveness 167 | defer ticker.Stop() 168 | 169 | h.logger.Debug("Client connected to real-time metrics stream", 170 | h.logger.Args("client_ip", c.ClientIP(), "host_filter", serviceName, "exclude_own_ip", excludeIPFilter != nil, "active_connections", currentConnections)) 171 | 172 | for { 173 | select { 174 | case <-c.Request.Context().Done(): 175 | // Server is shutting down or request context cancelled 176 | h.logger.Debug("Request context cancelled (server shutdown or timeout)", 177 | h.logger.Args("client_ip", c.ClientIP())) 178 | return 179 | 180 | case <-ticker.C: 181 | // Get current metrics with filters 182 | var metrics *realtime.RealtimeMetrics 183 | 184 | // Optimization: If no filters are active, use the cached global metrics JSON directly 185 | // This avoids hitting the database AND avoids JSON marshaling for every connected client 186 | if serviceName == "" && len(serviceFilters) == 0 && excludeIPFilter == nil { 187 | if cachedJSON := h.collector.GetCachedJSON(); cachedJSON != nil { 188 | // Write cached JSON directly to stream 189 | _, err := fmt.Fprintf(c.Writer, "data: %s\n\n", cachedJSON) 190 | if err != nil { 191 | h.logger.Debug("Failed to write SSE data", h.logger.Args("error", err)) 192 | return 193 | } 194 | c.Writer.Flush() 195 | continue 196 | } 197 | // Fallback if cache is empty (should rarely happen) 198 | metrics = h.collector.GetMetrics() 199 | } else if len(serviceFilters) > 0 || excludeIPFilter != nil { 200 | // Use new filter system 201 | metrics = h.collector.GetMetricsWithFilters(serviceName, serviceFilters, excludeIPFilter) 202 | } else { 203 | // Use legacy single service filter 204 | metrics = h.collector.GetMetricsWithHost(serviceName) 205 | } 206 | 207 | // Marshal to JSON 208 | data, err := json.Marshal(metrics) 209 | if err != nil { 210 | h.logger.Error("Failed to marshal metrics", h.logger.Args("error", err)) 211 | continue 212 | } 213 | 214 | // Send SSE event (always send for heartbeat, frontend handles duplicates) 215 | _, err = fmt.Fprintf(c.Writer, "data: %s\n\n", data) 216 | if err != nil { 217 | h.logger.Debug("Failed to write SSE data", h.logger.Args("error", err)) 218 | return 219 | } 220 | 221 | // Flush the data immediately 222 | c.Writer.Flush() 223 | } 224 | } 225 | } 226 | 227 | // GetCurrentMetrics returns a single snapshot of current metrics 228 | func (h *RealtimeHandler) GetCurrentMetrics(c *gin.Context) { 229 | serviceName, _ := h.getServiceFilter(c) 230 | serviceFilters := h.getServiceFilters(c) 231 | excludeIPFilter := h.getExcludeOwnIP(c) 232 | 233 | var metrics *realtime.RealtimeMetrics 234 | if len(serviceFilters) > 0 || excludeIPFilter != nil { 235 | metrics = h.collector.GetMetricsWithFilters(serviceName, serviceFilters, excludeIPFilter) 236 | } else { 237 | metrics = h.collector.GetMetricsWithHost(serviceName) 238 | } 239 | 240 | c.JSON(200, metrics) 241 | } 242 | 243 | // GetPerServiceMetrics returns current metrics for each service 244 | func (h *RealtimeHandler) GetPerServiceMetrics(c *gin.Context) { 245 | serviceFilters := h.getServiceFilters(c) 246 | excludeIPFilter := h.getExcludeOwnIP(c) 247 | 248 | // Convert realtime filters to repositories filters 249 | var repoFilters []repositories.ServiceFilter 250 | if len(serviceFilters) > 0 { 251 | repoFilters = make([]repositories.ServiceFilter, len(serviceFilters)) 252 | for i, f := range serviceFilters { 253 | repoFilters[i] = repositories.ServiceFilter{Name: f.Name, Type: f.Type} 254 | } 255 | } 256 | 257 | var repoExcludeIP *repositories.ExcludeIPFilter 258 | if excludeIPFilter != nil { 259 | repoExcludeIP = &repositories.ExcludeIPFilter{ 260 | ClientIP: excludeIPFilter.ClientIP, 261 | } 262 | if len(excludeIPFilter.ExcludeServices) > 0 { 263 | repoExcludeIP.ExcludeServices = make([]repositories.ServiceFilter, len(excludeIPFilter.ExcludeServices)) 264 | for i, f := range excludeIPFilter.ExcludeServices { 265 | repoExcludeIP.ExcludeServices[i] = repositories.ServiceFilter{Name: f.Name, Type: f.Type} 266 | } 267 | } 268 | } 269 | 270 | metrics := h.collector.GetPerServiceMetrics(repoFilters, repoExcludeIP) 271 | c.JSON(200, metrics) 272 | } 273 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | [INSERT CONTACT EMAIL]. 64 | 65 | All complaints will be reviewed and investigated promptly and fairly. 66 | 67 | All community leaders are obligated to respect the privacy and security of the 68 | reporter of any incident. 69 | 70 | ## Enforcement Guidelines 71 | 72 | Community leaders will follow these Community Impact Guidelines in determining 73 | the consequences for any action they deem in violation of this Code of Conduct: 74 | 75 | ### 1. Correction 76 | 77 | **Community Impact**: Use of inappropriate language or other behavior deemed 78 | unprofessional or unwelcome in the community. 79 | 80 | **Consequence**: A private, written warning from community leaders, providing 81 | clarity around the nature of the violation and an explanation of why the 82 | behavior was inappropriate. A public apology may be requested. 83 | 84 | ### 2. Warning 85 | 86 | **Community Impact**: A violation through a single incident or series of 87 | actions. 88 | 89 | **Consequence**: A warning with consequences for continued behavior. No 90 | interaction with the people involved, including unsolicited interaction with 91 | those enforcing the Code of Conduct, for a specified period of time. This 92 | includes avoiding interactions in community spaces as well as external channels 93 | like social media. Violating these terms may lead to a temporary or permanent 94 | ban. 95 | 96 | ### 3. Temporary Ban 97 | 98 | **Community Impact**: A serious violation of community standards, including 99 | sustained inappropriate behavior. 100 | 101 | **Consequence**: A temporary ban from any sort of interaction or public 102 | communication with the community for a specified period of time. No public or 103 | private interaction with the people involved, including unsolicited interaction 104 | with those enforcing the Code of Conduct, is allowed during this period. 105 | Violating these terms may lead to a permanent ban. 106 | 107 | ### 4. Permanent Ban 108 | 109 | **Community Impact**: Demonstrating a pattern of violation of community 110 | standards, including sustained inappropriate behavior, harassment of an 111 | individual, or aggression toward or disparagement of classes of individuals. 112 | 113 | **Consequence**: A permanent ban from any sort of public interaction within the 114 | community. 115 | 116 | ## Attribution 117 | 118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 119 | version 2.1, available at 120 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 121 | 122 | Community Impact Guidelines were inspired by 123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 127 | [https://www.contributor-covenant.org/translations][translations]. 128 | 129 | [homepage]: https://www.contributor-covenant.org 130 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 131 | [Mozilla CoC]: https://github.com/mozilla/diversity 132 | [FAQ]: https://www.contributor-covenant.org/faq 133 | [translations]: https://www.contributor-covenant.org/translations 134 | 135 | --- 136 | 137 | ## Additional Community Guidelines for LogLynx 138 | 139 | ### Technical Discussions 140 | 141 | * **Be constructive**: Focus on the technical merits of ideas and implementations 142 | * **Provide context**: When suggesting changes, explain the reasoning and benefits 143 | * **Share knowledge**: Help others learn from your experience and expertise 144 | * **Stay on topic**: Keep discussions relevant to LogLynx development 145 | 146 | ### Code Reviews 147 | 148 | * **Be respectful**: Remember there's a person behind every pull request 149 | * **Be specific**: Provide clear, actionable feedback 150 | * **Be helpful**: Suggest improvements rather than just pointing out problems 151 | * **Be timely**: Review pull requests promptly to keep contributions moving 152 | 153 | ### Issue Reporting 154 | 155 | * **Be clear**: Provide detailed information about bugs or feature requests 156 | * **Be patient**: Maintainers are often volunteers with limited time 157 | * **Be collaborative**: Work with maintainers to resolve issues 158 | * **Be thankful**: Acknowledge the work of others who help you 159 | 160 | ### Community Participation 161 | 162 | * **Welcome newcomers**: Help new contributors get started 163 | * **Share resources**: Point people to documentation and examples 164 | * **Celebrate contributions**: Acknowledge and thank contributors 165 | * **Stay positive**: Maintain an encouraging and supportive atmosphere 166 | 167 | ### Communication Channels 168 | 169 | When participating in LogLynx community spaces: 170 | 171 | * **GitHub Issues**: For bug reports and feature requests 172 | * **GitHub Discussions**: For questions, ideas, and general discussion 173 | * **Pull Requests**: For code contributions and reviews 174 | 175 | ### Handling Disagreements 176 | 177 | Technical disagreements are normal and healthy: 178 | 179 | * **Assume good intentions**: Start with the belief that others want what's best 180 | * **Focus on facts**: Use data, examples, and evidence to support your position 181 | * **Seek consensus**: Try to find solutions that address everyone's concerns 182 | * **Escalate gracefully**: If you can't reach agreement, ask maintainers for input 183 | * **Accept decisions**: Once a decision is made, support it even if you disagreed 184 | 185 | ### Recognizing Contributions 186 | 187 | We value all types of contributions: 188 | 189 | * Code contributions (features, bug fixes, optimizations) 190 | * Documentation improvements 191 | * Bug reports and testing 192 | * Feature suggestions and design input 193 | * Community support and mentoring 194 | * Translation and localization 195 | 196 | All contributors will be acknowledged and appreciated. 197 | 198 | ## Reporting Issues 199 | 200 | ### How to Report 201 | 202 | If you experience or witness unacceptable behavior, or have any other concerns, 203 | please report it by contacting the project team at [INSERT CONTACT EMAIL]. 204 | 205 | ### What to Include 206 | 207 | When reporting, please include: 208 | 209 | * Your contact information 210 | * Names (real, usernames, or pseudonyms) of any individuals involved 211 | * Your account of what occurred 212 | * Whether you believe this incident is ongoing 213 | * Any additional context or information 214 | 215 | ### Confidentiality 216 | 217 | All reports will be handled with discretion. We will respect confidentiality 218 | requests for the purpose of protecting victims of abuse. 219 | 220 | ### Response Timeline 221 | 222 | * **Acknowledgment**: Within 24-48 hours 223 | * **Initial assessment**: Within 5 business days 224 | * **Resolution**: As appropriate for the situation 225 | 226 | --- 227 | 228 | **Last Updated**: November 2025 229 | 230 | Thank you for helping make LogLynx a welcoming, friendly community for everyone! 🙏 231 | -------------------------------------------------------------------------------- /internal/api/server.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "time" 8 | 9 | "loglynx/internal/api/handlers" 10 | "loglynx/internal/version" 11 | 12 | "github.com/gin-gonic/gin" 13 | "github.com/pterm/pterm" 14 | ) 15 | 16 | // Server represents the HTTP server 17 | type Server struct { 18 | router *gin.Engine 19 | server *http.Server 20 | logger *pterm.Logger 21 | port int 22 | splashScreenEnabled bool 23 | } 24 | 25 | // Config holds server configuration 26 | type Config struct { 27 | Host string 28 | Port int 29 | Production bool 30 | DashboardEnabled bool // If false, only API routes are exposed 31 | SplashScreenEnabled bool // If false, splash screen is disabled on startup 32 | } 33 | 34 | // NewServer creates a new HTTP server 35 | func NewServer(cfg *Config, dashboardHandler *handlers.DashboardHandler, realtimeHandler *handlers.RealtimeHandler, systemHandler *handlers.SystemHandler, logger *pterm.Logger) *Server { 36 | // Set Gin mode 37 | if cfg.Production { 38 | gin.SetMode(gin.ReleaseMode) 39 | } else { 40 | gin.SetMode(gin.DebugMode) 41 | } 42 | 43 | router := gin.New() 44 | 45 | // Middleware 46 | router.Use(gin.Logger()) 47 | router.Use(gin.Recovery()) 48 | router.Use(corsMiddleware()) 49 | 50 | // Health check 51 | router.GET("/health", func(c *gin.Context) { 52 | c.JSON(http.StatusOK, gin.H{ 53 | "status": "healthy", 54 | "timestamp": time.Now(), 55 | "version": version.Version, 56 | }) 57 | }) 58 | 59 | // Helper function to render pages with common config 60 | splashScreenEnabled := cfg.SplashScreenEnabled 61 | renderPage := func(c *gin.Context, pageName, pageTitle, pageIcon string) { 62 | c.HTML(http.StatusOK, pageName+".html", gin.H{ 63 | "Title": pageTitle, 64 | "PageName": pageName, 65 | "PageTitle": pageTitle, 66 | "PageIcon": pageIcon, 67 | "AppVersion": version.Version, 68 | "SplashScreenEnabled": splashScreenEnabled, 69 | }) 70 | } 71 | 72 | // Dashboard UI routes (only if dashboard is enabled) 73 | if cfg.DashboardEnabled { 74 | // Load HTML templates with pattern for nested directories 75 | router.LoadHTMLGlob("web/templates/**/*.html") 76 | 77 | // Static files 78 | router.Static("/static", "./web/static") 79 | 80 | // Dashboard pages (HTML) 81 | router.GET("/", func(c *gin.Context) { 82 | renderPage(c, "overview", "Executive Overview", "fas fa-home") 83 | }) 84 | 85 | router.GET("/realtime", func(c *gin.Context) { 86 | renderPage(c, "realtime", "Real-time Monitor", "fas fa-broadcast-tower") 87 | }) 88 | 89 | router.GET("/traffic", func(c *gin.Context) { 90 | renderPage(c, "traffic", "Traffic Analysis", "fas fa-globe") 91 | }) 92 | 93 | router.GET("/performance", func(c *gin.Context) { 94 | renderPage(c, "performance", "Performance Monitoring", "fas fa-tachometer-alt") 95 | }) 96 | 97 | router.GET("/security", func(c *gin.Context) { 98 | renderPage(c, "security", "Security & Network", "fas fa-shield-alt") 99 | }) 100 | 101 | router.GET("/users", func(c *gin.Context) { 102 | renderPage(c, "users", "User Analytics", "fas fa-users") 103 | }) 104 | 105 | router.GET("/content", func(c *gin.Context) { 106 | renderPage(c, "content", "Content Analytics", "fas fa-file-alt") 107 | }) 108 | 109 | router.GET("/backends", func(c *gin.Context) { 110 | renderPage(c, "backends", "Backend Health", "fas fa-server") 111 | }) 112 | 113 | router.GET("/geographic", func(c *gin.Context) { 114 | renderPage(c, "geographic", "Geographic Analytics", "fas fa-map-marked-alt") 115 | }) 116 | 117 | router.GET("/system", func(c *gin.Context) { 118 | renderPage(c, "system", "System Statistics", "fas fa-server") 119 | }) 120 | 121 | // IP Analytics page 122 | router.GET("/ip/:ip", func(c *gin.Context) { 123 | ip := c.Param("ip") 124 | c.HTML(http.StatusOK, "ip-detail.html", gin.H{ 125 | "Title": "IP Analytics - " + ip, 126 | "PageName": "ip-detail", 127 | "PageTitle": "IP Analytics", 128 | "PageIcon": "fas fa-network-wired", 129 | "AppVersion": version.Version, 130 | "IPAddress": ip, 131 | "SplashScreenEnabled": splashScreenEnabled, 132 | }) 133 | }) 134 | 135 | logger.Info("Dashboard UI routes enabled") 136 | } else { 137 | logger.Info("Dashboard UI disabled - API-only mode") 138 | // Serve a simple message at root when dashboard is disabled 139 | router.GET("/", func(c *gin.Context) { 140 | c.JSON(http.StatusOK, gin.H{ 141 | "message": "LogLynx API Server - Dashboard UI is disabled", 142 | "api": "/api/v1", 143 | "health": "/health", 144 | "version": version.Version, 145 | }) 146 | }) 147 | } 148 | 149 | // API routes 150 | api := router.Group("/api/v1") 151 | { 152 | api.GET("/version", func(c *gin.Context) { 153 | c.JSON(http.StatusOK, gin.H{"version": version.Version}) 154 | }) 155 | 156 | // Summary stats 157 | api.GET("/stats/summary", dashboardHandler.GetSummary) 158 | 159 | // Timeline data 160 | api.GET("/stats/timeline", dashboardHandler.GetTimeline) 161 | api.GET("/stats/timeline/status-codes", dashboardHandler.GetStatusCodeTimeline) 162 | api.GET("/stats/heatmap/traffic", dashboardHandler.GetTrafficHeatmap) 163 | 164 | // Top stats 165 | api.GET("/stats/top/paths", dashboardHandler.GetTopPaths) 166 | api.GET("/stats/top/countries", dashboardHandler.GetTopCountries) 167 | api.GET("/stats/top/ips", dashboardHandler.GetTopIPs) 168 | api.GET("/stats/top/user-agents", dashboardHandler.GetTopUserAgents) 169 | api.GET("/stats/top/browsers", dashboardHandler.GetTopBrowsers) 170 | api.GET("/stats/top/operating-systems", dashboardHandler.GetTopOperatingSystems) 171 | api.GET("/stats/top/asns", dashboardHandler.GetTopASNs) 172 | api.GET("/stats/top/backends", dashboardHandler.GetTopBackends) 173 | api.GET("/stats/top/referrers", dashboardHandler.GetTopReferrers) 174 | api.GET("/stats/top/referrer-domains", dashboardHandler.GetTopReferrerDomains) 175 | 176 | // Distribution stats 177 | api.GET("/stats/distribution/status-codes", dashboardHandler.GetStatusCodeDistribution) 178 | api.GET("/stats/distribution/methods", dashboardHandler.GetMethodDistribution) 179 | api.GET("/stats/distribution/protocols", dashboardHandler.GetProtocolDistribution) 180 | api.GET("/stats/distribution/tls-versions", dashboardHandler.GetTLSVersionDistribution) 181 | api.GET("/stats/distribution/device-types", dashboardHandler.GetDeviceTypeDistribution) 182 | 183 | // Performance stats 184 | api.GET("/stats/performance/response-time", dashboardHandler.GetResponseTimeStats) 185 | api.GET("/stats/log-processing", dashboardHandler.GetLogProcessingStats) 186 | 187 | // Recent requests 188 | api.GET("/requests/recent", dashboardHandler.GetRecentRequests) 189 | 190 | // Real-time metrics 191 | api.GET("/realtime/metrics", realtimeHandler.GetCurrentMetrics) 192 | api.GET("/realtime/stream", realtimeHandler.StreamMetrics) 193 | api.GET("/realtime/services", realtimeHandler.GetPerServiceMetrics) 194 | 195 | // Domains list (deprecated) 196 | api.GET("/domains", dashboardHandler.GetDomains) 197 | 198 | // Services list (with types) 199 | api.GET("/services", dashboardHandler.GetServices) 200 | 201 | // IP Analytics 202 | api.GET("/ip/:ip/stats", dashboardHandler.GetIPDetailedStats) 203 | api.GET("/ip/:ip/timeline", dashboardHandler.GetIPTimeline) 204 | api.GET("/ip/:ip/heatmap", dashboardHandler.GetIPHeatmap) 205 | api.GET("/ip/:ip/top/paths", dashboardHandler.GetIPTopPaths) 206 | api.GET("/ip/:ip/top/backends", dashboardHandler.GetIPTopBackends) 207 | api.GET("/ip/:ip/distribution/status-codes", dashboardHandler.GetIPStatusCodeDistribution) 208 | api.GET("/ip/:ip/top/browsers", dashboardHandler.GetIPTopBrowsers) 209 | api.GET("/ip/:ip/top/operating-systems", dashboardHandler.GetIPTopOperatingSystems) 210 | api.GET("/ip/:ip/distribution/device-types", dashboardHandler.GetIPDeviceTypeDistribution) 211 | api.GET("/ip/:ip/performance/response-time", dashboardHandler.GetIPResponseTimeStats) 212 | api.GET("/ip/:ip/recent-requests", dashboardHandler.GetIPRecentRequests) 213 | api.GET("/ip/search", dashboardHandler.SearchIPs) 214 | 215 | // System Statistics 216 | api.GET("/system/stats", systemHandler.GetSystemStats) 217 | api.GET("/system/timeline", systemHandler.GetRecordsTimeline) 218 | } 219 | 220 | addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port) 221 | return &Server{ 222 | router: router, 223 | server: &http.Server{ 224 | Addr: addr, 225 | Handler: router, 226 | ReadTimeout: 10 * time.Second, 227 | WriteTimeout: 300 * time.Second, // Long timeout for SSE streams 228 | MaxHeaderBytes: 1 << 20, 229 | }, 230 | logger: logger, 231 | port: cfg.Port, 232 | splashScreenEnabled: cfg.SplashScreenEnabled, 233 | } 234 | } 235 | 236 | // Run starts the HTTP server 237 | func (s *Server) Run() error { 238 | s.logger.Info("Starting web server", s.logger.Args("address", s.server.Addr)) 239 | if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { 240 | s.logger.WithCaller().Error("Web server failed", s.logger.Args("error", err)) 241 | return err 242 | } 243 | return nil 244 | } 245 | 246 | // Shutdown gracefully shuts down the server 247 | func (s *Server) Shutdown(ctx context.Context) error { 248 | s.logger.Info("Shutting down web server...") 249 | return s.server.Shutdown(ctx) 250 | } 251 | 252 | // corsMiddleware adds CORS headers 253 | func corsMiddleware() gin.HandlerFunc { 254 | return func(c *gin.Context) { 255 | c.Writer.Header().Set("Access-Control-Allow-Origin", "*") 256 | c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") 257 | c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With") 258 | c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE") 259 | 260 | if c.Request.Method == "OPTIONS" { 261 | c.AbortWithStatus(204) 262 | return 263 | } 264 | 265 | c.Next() 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /internal/parser/traefik/parser_test.go: -------------------------------------------------------------------------------- 1 | package traefik 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/pterm/pterm" 8 | ) 9 | 10 | func TestParser_CanParse_JSON(t *testing.T) { 11 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 12 | parser := NewParser(logger) 13 | 14 | jsonLog := `{"ClientHost":"103.4.250.66","DownstreamStatus":200,"Duration":299425702,"RequestMethod":"GET","RequestPath":"/","RequestProtocol":"HTTP/1.1","ServiceName":"next-service@file","TLSCipher":"TLS_AES_128_GCM_SHA256","TLSVersion":"1.3","request_User-Agent":"Mozilla/5.0","request_X-Real-Ip":"103.4.250.66","time":"2025-10-25T21:11:49Z"}` 15 | 16 | if !parser.CanParse(jsonLog) { 17 | t.Error("Expected parser to accept JSON log format") 18 | } 19 | } 20 | 21 | func TestParser_CanParse_TraefikCLF(t *testing.T) { 22 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 23 | parser := NewParser(logger) 24 | 25 | clfLog := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api/endpoint HTTP/1.1" 200 1024 "https://example.com" "Mozilla/5.0" 42 "my-router" "http://backend:8080" 150ms` 26 | 27 | if !parser.CanParse(clfLog) { 28 | t.Error("Expected parser to accept Traefik CLF format") 29 | } 30 | } 31 | 32 | func TestParser_CanParse_GenericCLF(t *testing.T) { 33 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 34 | parser := NewParser(logger) 35 | 36 | genericCLF := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api/endpoint HTTP/1.1" 200 1024 "https://example.com" "Mozilla/5.0"` 37 | 38 | if !parser.CanParse(genericCLF) { 39 | t.Error("Expected parser to accept generic CLF format") 40 | } 41 | } 42 | 43 | func TestParser_CanParse_Invalid(t *testing.T) { 44 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 45 | parser := NewParser(logger) 46 | 47 | tests := []string{ 48 | "", 49 | "invalid log line", 50 | "192.168.1.1 - just some random text", 51 | } 52 | 53 | for _, tc := range tests { 54 | if parser.CanParse(tc) { 55 | t.Errorf("Expected parser to reject invalid log: %q", tc) 56 | } 57 | } 58 | } 59 | 60 | func TestParser_ParseJSON(t *testing.T) { 61 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 62 | parser := NewParser(logger) 63 | 64 | jsonLog := `{"ClientHost":"103.4.250.66","DownstreamContentSize":31869,"DownstreamStatus":200,"Duration":299425702,"RequestMethod":"GET","RequestPath":"/test?redirect=https://example.com","RequestProtocol":"HTTP/1.1","ServiceName":"next-service@file","TLSCipher":"TLS_AES_128_GCM_SHA256","TLSVersion":"1.3","request_User-Agent":"Mozilla/5.0 (Test)","request_Referer":"https://referrer.com","request_X-Real-Ip":"103.4.250.66","time":"2025-10-25T21:11:49Z"}` 65 | 66 | event, err := parser.Parse(jsonLog) 67 | if err != nil { 68 | t.Fatalf("Failed to parse JSON log: %v", err) 69 | } 70 | 71 | // Verify key fields 72 | if event.ClientIP != "103.4.250.66" { 73 | t.Errorf("Expected ClientIP '103.4.250.66', got '%s'", event.ClientIP) 74 | } 75 | if event.Method != "GET" { 76 | t.Errorf("Expected Method 'GET', got '%s'", event.Method) 77 | } 78 | if event.Path != "/test" { 79 | t.Errorf("Expected Path '/test', got '%s'", event.Path) 80 | } 81 | if event.QueryString != "redirect=https://example.com" { 82 | t.Errorf("Expected QueryString 'redirect=https://example.com', got '%s'", event.QueryString) 83 | } 84 | if event.StatusCode != 200 { 85 | t.Errorf("Expected StatusCode 200, got %d", event.StatusCode) 86 | } 87 | if event.ResponseSize != 31869 { 88 | t.Errorf("Expected ResponseSize 31869, got %d", event.ResponseSize) 89 | } 90 | if event.TLSVersion != "1.3" { 91 | t.Errorf("Expected TLSVersion '1.3', got '%s'", event.TLSVersion) 92 | } 93 | if event.UserAgent != "Mozilla/5.0 (Test)" { 94 | t.Errorf("Expected UserAgent 'Mozilla/5.0 (Test)', got '%s'", event.UserAgent) 95 | } 96 | } 97 | 98 | func TestParser_ParseTraefikCLF(t *testing.T) { 99 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 100 | parser := NewParser(logger) 101 | 102 | clfLog := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api/endpoint HTTP/1.1" 200 1024 "https://example.com" "Mozilla/5.0" 42 "my-router" "http://backend:8080" 150ms` 103 | 104 | event, err := parser.Parse(clfLog) 105 | if err != nil { 106 | t.Fatalf("Failed to parse Traefik CLF log: %v", err) 107 | } 108 | 109 | // Verify key fields 110 | if event.ClientIP != "192.168.1.100" { 111 | t.Errorf("Expected ClientIP '192.168.1.100', got '%s'", event.ClientIP) 112 | } 113 | if event.Method != "GET" { 114 | t.Errorf("Expected Method 'GET', got '%s'", event.Method) 115 | } 116 | if event.Path != "/api/endpoint" { 117 | t.Errorf("Expected Path '/api/endpoint', got '%s'", event.Path) 118 | } 119 | if event.StatusCode != 200 { 120 | t.Errorf("Expected StatusCode 200, got %d", event.StatusCode) 121 | } 122 | if event.ResponseSize != 1024 { 123 | t.Errorf("Expected ResponseSize 1024, got %d", event.ResponseSize) 124 | } 125 | if event.ResponseTimeMs != 150 { 126 | t.Errorf("Expected ResponseTimeMs 150, got %f", event.ResponseTimeMs) 127 | } 128 | if event.BackendName != "my-router" { 129 | t.Errorf("Expected RouterName 'my-router', got '%s'", event.RouterName) 130 | } 131 | if event.BackendURL != "http://backend:8080" { 132 | t.Errorf("Expected BackendURL 'http://backend:8080', got '%s'", event.BackendURL) 133 | } 134 | if event.UserAgent != "Mozilla/5.0" { 135 | t.Errorf("Expected UserAgent 'Mozilla/5.0', got '%s'", event.UserAgent) 136 | } 137 | 138 | // Check timestamp parsing 139 | expectedTime, _ := time.Parse("02/Jan/2006:15:04:05 -0700", "15/May/2025:12:06:30 +0000") 140 | if !event.Timestamp.Equal(expectedTime) { 141 | t.Errorf("Expected Timestamp %v, got %v", expectedTime, event.Timestamp) 142 | } 143 | } 144 | 145 | func TestParser_ParseGenericCLF(t *testing.T) { 146 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 147 | parser := NewParser(logger) 148 | 149 | genericCLF := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "POST /api/users HTTP/1.1" 201 512 "https://app.example.com" "curl/7.68.0"` 150 | 151 | event, err := parser.Parse(genericCLF) 152 | if err != nil { 153 | t.Fatalf("Failed to parse generic CLF log: %v", err) 154 | } 155 | 156 | // Verify key fields 157 | if event.ClientIP != "192.168.1.100" { 158 | t.Errorf("Expected ClientIP '192.168.1.100', got '%s'", event.ClientIP) 159 | } 160 | if event.Method != "POST" { 161 | t.Errorf("Expected Method 'POST', got '%s'", event.Method) 162 | } 163 | if event.Path != "/api/users" { 164 | t.Errorf("Expected Path '/api/users', got '%s'", event.Path) 165 | } 166 | if event.StatusCode != 201 { 167 | t.Errorf("Expected StatusCode 201, got %d", event.StatusCode) 168 | } 169 | if event.ResponseSize != 512 { 170 | t.Errorf("Expected ResponseSize 512, got %d", event.ResponseSize) 171 | } 172 | if event.UserAgent != "curl/7.68.0" { 173 | t.Errorf("Expected UserAgent 'curl/7.68.0', got '%s'", event.UserAgent) 174 | } 175 | // Generic CLF doesn't have duration 176 | if event.ResponseTimeMs != 0 { 177 | t.Errorf("Expected ResponseTimeMs 0, got %f", event.ResponseTimeMs) 178 | } 179 | } 180 | 181 | func TestParser_DetectFormat(t *testing.T) { 182 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 183 | parser := NewParser(logger) 184 | 185 | tests := []struct { 186 | name string 187 | line string 188 | expected LogFormat 189 | }{ 190 | { 191 | name: "JSON format", 192 | line: `{"time":"2025-10-25T21:11:49Z","request_X-Real-Ip":"103.4.250.66"}`, 193 | expected: FormatJSON, 194 | }, 195 | { 196 | name: "Traefik CLF format", 197 | line: `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api HTTP/1.1" 200 1024 "-" "Mozilla" 42 "router" "http://backend" 150ms`, 198 | expected: FormatCLF, 199 | }, 200 | { 201 | name: "Generic CLF format", 202 | line: `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api HTTP/1.1" 200 1024 "-" "Mozilla"`, 203 | expected: FormatCLF, 204 | }, 205 | { 206 | name: "Unknown format", 207 | line: `invalid log line`, 208 | expected: FormatUnknown, 209 | }, 210 | { 211 | name: "Empty line", 212 | line: "", 213 | expected: FormatUnknown, 214 | }, 215 | } 216 | 217 | for _, tc := range tests { 218 | t.Run(tc.name, func(t *testing.T) { 219 | format := parser.detectFormat(tc.line) 220 | if format != tc.expected { 221 | t.Errorf("Expected format %d, got %d", tc.expected, format) 222 | } 223 | }) 224 | } 225 | } 226 | 227 | func TestParser_ParseCLFWithDashValues(t *testing.T) { 228 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 229 | parser := NewParser(logger) 230 | 231 | // Test with "-" values for referer and user agent 232 | clfLog := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api HTTP/1.1" 200 - "-" "-" 42 "-" "-" 150ms` 233 | 234 | event, err := parser.Parse(clfLog) 235 | if err != nil { 236 | t.Fatalf("Failed to parse CLF log with dash values: %v", err) 237 | } 238 | 239 | // Verify "-" values are converted to empty strings 240 | if event.Referer != "" { 241 | t.Errorf("Expected empty Referer, got '%s'", event.Referer) 242 | } 243 | if event.UserAgent != "" { 244 | t.Errorf("Expected empty UserAgent, got '%s'", event.UserAgent) 245 | } 246 | if event.RouterName != "" { 247 | t.Errorf("Expected empty RouterName, got '%s'", event.RouterName) 248 | } 249 | if event.BackendURL != "" { 250 | t.Errorf("Expected empty BackendURL, got '%s'", event.BackendURL) 251 | } 252 | if event.ResponseSize != 0 { 253 | t.Errorf("Expected ResponseSize 0, got %d", event.ResponseSize) 254 | } 255 | } 256 | 257 | func TestParser_ParseCLFWithQueryString(t *testing.T) { 258 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelTrace) 259 | parser := NewParser(logger) 260 | 261 | clfLog := `192.168.1.100 - - [15/May/2025:12:06:30 +0000] "GET /api/search?q=test&limit=10 HTTP/1.1" 200 1024 "-" "Mozilla" 42 "router" "http://backend" 150ms` 262 | 263 | event, err := parser.Parse(clfLog) 264 | if err != nil { 265 | t.Fatalf("Failed to parse CLF log with query string: %v", err) 266 | } 267 | 268 | if event.Path != "/api/search" { 269 | t.Errorf("Expected Path '/api/search', got '%s'", event.Path) 270 | } 271 | if event.QueryString != "q=test&limit=10" { 272 | t.Errorf("Expected QueryString 'q=test&limit=10', got '%s'", event.QueryString) 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /internal/database/cleanup.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/pterm/pterm" 9 | "gorm.io/gorm" 10 | ) 11 | 12 | // CoordinatorController interface for controlling ingestion during maintenance 13 | type CoordinatorController interface { 14 | Stop() 15 | Start() error 16 | GetProcessorCount() int 17 | } 18 | 19 | // CleanupService manages database cleanup and retention 20 | type CleanupService struct { 21 | db *gorm.DB 22 | logger *pterm.Logger 23 | retentionDays int 24 | cleanupInterval time.Duration 25 | cleanupTime string 26 | vacuumEnabled bool 27 | coordinator CoordinatorController 28 | stopChan chan struct{} 29 | running bool 30 | // Stats tracking 31 | lastRunTime time.Time 32 | recordsDeleted int64 33 | cleanupDuration time.Duration 34 | } 35 | 36 | // CleanupStats holds statistics about cleanup operations 37 | type CleanupStats struct { 38 | LastRunTime time.Time 39 | RecordsDeleted int64 40 | SpaceFreed int64 41 | VacuumDuration time.Duration 42 | CleanupDuration time.Duration 43 | NextScheduledRun time.Time 44 | } 45 | 46 | // NewCleanupService creates a new cleanup service 47 | func NewCleanupService(db *gorm.DB, logger *pterm.Logger, retentionDays int, cleanupInterval time.Duration, cleanupTime string, vacuumEnabled bool, coordinator CoordinatorController) *CleanupService { 48 | return &CleanupService{ 49 | db: db, 50 | logger: logger, 51 | retentionDays: retentionDays, 52 | cleanupInterval: cleanupInterval, 53 | cleanupTime: cleanupTime, 54 | vacuumEnabled: vacuumEnabled, 55 | coordinator: coordinator, 56 | stopChan: make(chan struct{}), 57 | running: false, 58 | } 59 | } 60 | 61 | // Start begins the cleanup service 62 | func (s *CleanupService) Start() { 63 | if s.retentionDays <= 0 { 64 | s.logger.Info("Data retention disabled (DB_RETENTION_DAYS=0), cleanup service not started") 65 | return 66 | } 67 | 68 | s.running = true 69 | s.logger.Info("Starting database cleanup service", 70 | s.logger.Args( 71 | "retention_days", s.retentionDays, 72 | "cleanup_time", s.cleanupTime, 73 | "vacuum_enabled", s.vacuumEnabled, 74 | )) 75 | 76 | go s.scheduledCleanupLoop() 77 | } 78 | 79 | // Stop stops the cleanup service 80 | func (s *CleanupService) Stop() { 81 | if !s.running { 82 | return 83 | } 84 | 85 | s.logger.Info("Stopping database cleanup service") 86 | close(s.stopChan) 87 | s.running = false 88 | } 89 | 90 | // scheduledCleanupLoop runs cleanup at scheduled time daily 91 | func (s *CleanupService) scheduledCleanupLoop() { 92 | // Run initial cleanup check after 1 minute 93 | time.Sleep(1 * time.Minute) 94 | 95 | for { 96 | select { 97 | case <-s.stopChan: 98 | return 99 | default: 100 | // Check if it's time to run cleanup 101 | now := time.Now() 102 | targetTime := s.parseCleanupTime(now) 103 | 104 | // If target time has passed today, schedule for tomorrow 105 | if now.After(targetTime) { 106 | targetTime = targetTime.Add(24 * time.Hour) 107 | } 108 | 109 | waitDuration := time.Until(targetTime) 110 | s.logger.Debug("Next cleanup scheduled", 111 | s.logger.Args("next_run", targetTime.Format(time.DateTime), "wait_duration", waitDuration.Round(time.Minute))) 112 | 113 | // Wait until target time or check interval 114 | select { 115 | case <-s.stopChan: 116 | return 117 | case <-time.After(min(waitDuration, s.cleanupInterval)): 118 | // Check if we're at target time 119 | if time.Now().After(targetTime.Add(-1 * time.Minute)) { 120 | s.runCleanup() 121 | } 122 | } 123 | } 124 | } 125 | } 126 | 127 | // parseCleanupTime parses the cleanup time string (HH:MM) and returns today's time 128 | func (s *CleanupService) parseCleanupTime(baseTime time.Time) time.Time { 129 | // Parse HH:MM format 130 | cleanupTime, err := time.Parse("15:04", s.cleanupTime) 131 | if err != nil { 132 | s.logger.Warn("Invalid cleanup time format, using 02:00", 133 | s.logger.Args("configured", s.cleanupTime, "error", err)) 134 | cleanupTime, _ = time.Parse("15:04", "02:00") 135 | } 136 | 137 | // Combine with today's date 138 | return time.Date( 139 | baseTime.Year(), baseTime.Month(), baseTime.Day(), 140 | cleanupTime.Hour(), cleanupTime.Minute(), 0, 0, 141 | baseTime.Location(), 142 | ) 143 | } 144 | 145 | // runCleanup performs the cleanup operation 146 | func (s *CleanupService) runCleanup() { 147 | s.logger.Info("Starting scheduled database cleanup", 148 | s.logger.Args("retention_days", s.retentionDays)) 149 | 150 | startTime := time.Now() 151 | 152 | // Calculate cutoff date 153 | cutoffDate := time.Now().AddDate(0, 0, -s.retentionDays) 154 | 155 | // Delete old records in batches to avoid long locks 156 | totalDeleted, err := s.deleteOldRecords(cutoffDate) 157 | if err != nil { 158 | s.logger.WithCaller().Error("Failed to delete old records", 159 | s.logger.Args("error", err, "cutoff_date", cutoffDate.Format("2006-01-02"))) 160 | return 161 | } 162 | 163 | cleanupDuration := time.Since(startTime) 164 | 165 | // Update stats 166 | s.lastRunTime = startTime 167 | s.recordsDeleted = totalDeleted 168 | s.cleanupDuration = cleanupDuration 169 | 170 | s.logger.Info("Cleanup completed", 171 | s.logger.Args( 172 | "records_deleted", totalDeleted, 173 | "duration", cleanupDuration.Round(time.Second), 174 | "cutoff_date", cutoffDate.Format("2006-01-02"), 175 | )) 176 | 177 | // Run VACUUM if enabled and significant space was freed 178 | if s.vacuumEnabled && totalDeleted > 0 { 179 | s.runVacuum() 180 | } 181 | } 182 | 183 | // deleteOldRecords deletes records older than cutoff date in batches 184 | func (s *CleanupService) deleteOldRecords(cutoffDate time.Time) (int64, error) { 185 | const batchSize = 1000 186 | totalDeleted := int64(0) 187 | 188 | s.logger.Debug("Deleting records in batches", 189 | s.logger.Args("batch_size", batchSize, "cutoff_date", cutoffDate.Format("2006-01-02"))) 190 | 191 | for { 192 | // Delete in batches using subquery to avoid full table scan 193 | result := s.db.Exec(` 194 | DELETE FROM http_requests 195 | WHERE id IN ( 196 | SELECT id FROM http_requests 197 | WHERE timestamp < ? 198 | LIMIT ? 199 | ) 200 | `, cutoffDate, batchSize) 201 | 202 | if result.Error != nil { 203 | return totalDeleted, result.Error 204 | } 205 | 206 | deleted := result.RowsAffected 207 | totalDeleted += deleted 208 | 209 | if deleted == 0 { 210 | break // No more records to delete 211 | } 212 | 213 | s.logger.Trace("Deleted batch", 214 | s.logger.Args("batch_deleted", deleted, "total_deleted", totalDeleted)) 215 | 216 | // Small pause between batches to avoid hogging the database 217 | time.Sleep(100 * time.Millisecond) 218 | } 219 | 220 | return totalDeleted, nil 221 | } 222 | 223 | // runVacuum runs VACUUM to reclaim space 224 | // pauses ingestion to prevent "database locked" errors 225 | func (s *CleanupService) runVacuum() { 226 | s.logger.Info("Starting VACUUM maintenance window") 227 | 228 | startTime := time.Now() 229 | 230 | // Phase 1: Stop ingestion coordinator to prevent query conflicts 231 | if s.coordinator != nil { 232 | processorCount := s.coordinator.GetProcessorCount() 233 | if processorCount > 0 { 234 | s.logger.Info("Pausing ingestion for maintenance", 235 | s.logger.Args("active_processors", processorCount)) 236 | s.coordinator.Stop() 237 | 238 | // Give processors time to finish current operations 239 | time.Sleep(2 * time.Second) 240 | } 241 | } 242 | 243 | // Phase 2: Run VACUUM with exclusive database access 244 | s.logger.Info("Running VACUUM to reclaim disk space (maintenance window active)") 245 | 246 | vacuumStart := time.Now() 247 | 248 | // Create context with timeout (max 10 minutes for VACUUM) 249 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) 250 | defer cancel() 251 | 252 | // Run VACUUM - no "database locked" errors because no active queries 253 | if err := s.db.WithContext(ctx).Exec("VACUUM").Error; err != nil { 254 | s.logger.WithCaller().Error("Failed to run VACUUM", 255 | s.logger.Args("error", err)) 256 | 257 | // Restart coordinator even if VACUUM failed 258 | if s.coordinator != nil { 259 | s.logger.Info("Restarting ingestion after VACUUM failure") 260 | if err := s.coordinator.Start(); err != nil { 261 | s.logger.WithCaller().Error("Failed to restart coordinator after VACUUM failure", 262 | s.logger.Args("error", err)) 263 | } 264 | } 265 | return 266 | } 267 | 268 | vacuumDuration := time.Since(vacuumStart) 269 | 270 | // Phase 3: Restart ingestion coordinator 271 | if s.coordinator != nil { 272 | s.logger.Info("Restarting ingestion after VACUUM") 273 | if err := s.coordinator.Start(); err != nil { 274 | s.logger.WithCaller().Error("Failed to restart coordinator", 275 | s.logger.Args("error", err)) 276 | // Critical error - coordinator should always restart 277 | return 278 | } 279 | 280 | // Verify processors restarted successfully 281 | processorCount := s.coordinator.GetProcessorCount() 282 | s.logger.Info("Ingestion resumed", 283 | s.logger.Args("active_processors", processorCount)) 284 | } 285 | 286 | totalDuration := time.Since(startTime) 287 | 288 | s.logger.Info("VACUUM maintenance completed", 289 | s.logger.Args( 290 | "vacuum_duration", vacuumDuration.Round(time.Second), 291 | "total_duration", totalDuration.Round(time.Second), 292 | )) 293 | } 294 | 295 | // GetStats returns cleanup statistics 296 | func (s *CleanupService) GetStats() *CleanupStats { 297 | // Calculate next scheduled run 298 | now := time.Now() 299 | targetTime := s.parseCleanupTime(now) 300 | 301 | // If target time has passed today, schedule for tomorrow 302 | if now.After(targetTime) { 303 | targetTime = targetTime.Add(24 * time.Hour) 304 | } 305 | 306 | return &CleanupStats{ 307 | LastRunTime: s.lastRunTime, 308 | RecordsDeleted: s.recordsDeleted, 309 | CleanupDuration: s.cleanupDuration, 310 | NextScheduledRun: targetTime, 311 | } 312 | } 313 | 314 | // ManualCleanup triggers cleanup immediately (useful for testing/admin) 315 | func (s *CleanupService) ManualCleanup() error { 316 | if s.retentionDays <= 0 { 317 | return fmt.Errorf("retention disabled (DB_RETENTION_DAYS=0)") 318 | } 319 | 320 | s.logger.Info("Manual cleanup triggered") 321 | go s.runCleanup() 322 | return nil 323 | } 324 | 325 | // min returns the minimum of two durations 326 | func min(a, b time.Duration) time.Duration { 327 | if a < b { 328 | return a 329 | } 330 | return b 331 | } 332 | -------------------------------------------------------------------------------- /cmd/server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "runtime" 8 | "syscall" 9 | "time" 10 | 11 | "loglynx/internal/api" 12 | "loglynx/internal/api/handlers" 13 | "loglynx/internal/banner" 14 | "loglynx/internal/config" 15 | "loglynx/internal/database" 16 | "loglynx/internal/database/repositories" 17 | "loglynx/internal/discovery" 18 | "loglynx/internal/enrichment" 19 | "loglynx/internal/ingestion" 20 | parsers "loglynx/internal/parser" 21 | "loglynx/internal/realtime" 22 | 23 | "strings" 24 | 25 | "github.com/pterm/pterm" 26 | ) 27 | 28 | func main() { 29 | // Configure Go runtime to use all available CPU cores 30 | // This enables goroutines to run in parallel on all cores 31 | runtime.GOMAXPROCS(runtime.NumCPU()) 32 | 33 | // Initialize logger with INFO level for production as a sensible default 34 | // We'll reconfigure the level after loading the configuration (LOG_LEVEL) 35 | logger := pterm.DefaultLogger.WithLevel(pterm.LogLevelInfo) 36 | 37 | // Print banner 38 | banner.Print() 39 | 40 | logger.Info("Initializing LogLynx - Fast Log Analytics...", 41 | logger.Args("cpu_cores", runtime.NumCPU(), "gomaxprocs", runtime.GOMAXPROCS(0))) 42 | 43 | // Load configuration from .env file and environment variables 44 | cfg, err := config.Load() 45 | if err != nil { 46 | logger.WithCaller().Fatal("Failed to load configuration", logger.Args("error", err)) 47 | } 48 | 49 | // Apply configured log level from environment variable LOG_LEVEL (default: info) 50 | // Supported values: trace, debug, info, warn, error, fatal 51 | lvl := strings.ToLower(cfg.LogLevel) 52 | var ptermLevel pterm.LogLevel 53 | switch lvl { 54 | case "trace": 55 | ptermLevel = pterm.LogLevelTrace 56 | case "debug": 57 | ptermLevel = pterm.LogLevelDebug 58 | case "info": 59 | ptermLevel = pterm.LogLevelInfo 60 | case "warn", "warning": 61 | ptermLevel = pterm.LogLevelWarn 62 | case "error": 63 | ptermLevel = pterm.LogLevelError 64 | case "fatal": 65 | ptermLevel = pterm.LogLevelFatal 66 | default: 67 | ptermLevel = pterm.LogLevelInfo 68 | } 69 | logger = pterm.DefaultLogger.WithLevel(ptermLevel) 70 | logger.Debug("Log level set", logger.Args("level", lvl)) 71 | 72 | logger.Debug("Configuration loaded", 73 | logger.Args( 74 | "db_path", cfg.Database.Path, 75 | "server_port", cfg.Server.Port, 76 | "geoip_enabled", cfg.GeoIP.Enabled, 77 | )) 78 | 79 | // Initialize database connection with configured settings 80 | db, err := database.NewConnection(&database.Config{ 81 | Path: cfg.Database.Path, 82 | MaxOpenConns: cfg.Database.MaxOpenConns, 83 | MaxIdleConns: cfg.Database.MaxIdleConns, 84 | ConnMaxLife: cfg.Database.ConnMaxLife, 85 | 86 | // Pool Monitoring 87 | PoolMonitoringEnabled: cfg.Database.PoolMonitoringEnabled, 88 | PoolMonitoringInterval: cfg.Database.PoolMonitoringInterval, 89 | PoolSaturationThreshold: cfg.Database.PoolSaturationThreshold, 90 | AutoTuning: cfg.Database.AutoTuning, 91 | }, logger) 92 | if err != nil { 93 | logger.WithCaller().Fatal("Failed to connect to database", logger.Args("error", err)) 94 | } 95 | 96 | // Initialize repositories 97 | logger.Debug("Initializing repositories...") 98 | sourceRepo := repositories.NewLogSourceRepository(db) 99 | httpRepo := repositories.NewHTTPRequestRepository(db, logger) 100 | statsRepo := repositories.NewStatsRepository(db, logger) 101 | 102 | // Initialize GeoIP enricher (optional - will work without GeoIP databases) 103 | var geoIP *enrichment.GeoIPEnricher 104 | if cfg.GeoIP.Enabled { 105 | logger.Debug("Initializing GeoIP enricher...") 106 | geoIP, err = enrichment.NewGeoIPEnricher( 107 | cfg.GeoIP.CityDBPath, 108 | cfg.GeoIP.CountryDBPath, 109 | cfg.GeoIP.ASNDBPath, 110 | db, 111 | logger, 112 | cfg.Performance.GeoIPCacheSize, // Pass configured cache size 113 | ) 114 | if err != nil { 115 | logger.Warn("GeoIP enricher initialization failed, continuing without GeoIP", logger.Args("error", err)) 116 | } else if geoIP.IsEnabled() { 117 | logger.Info("GeoIP enrichment enabled successfully") 118 | // Load cache from database in background (non-blocking) 119 | go func() { 120 | logger.Debug("Loading GeoIP cache in background...") 121 | if err := geoIP.LoadCache(); err != nil { 122 | logger.Warn("Failed to load GeoIP cache", logger.Args("error", err)) 123 | } else { 124 | logger.Info("GeoIP cache loaded", logger.Args("entries", geoIP.GetCacheSize())) 125 | } 126 | }() 127 | } 128 | } else { 129 | logger.Info("GeoIP enrichment disabled by configuration") 130 | } 131 | 132 | // Initialize parser registry 133 | logger.Debug("Initializing parser registry...") 134 | parserRegistry := parsers.NewRegistry(logger) 135 | 136 | // Run initial discovery SYNCHRONOUSLY to ensure log sources are found before starting ingestion 137 | logger.Info("Discovering log sources...") 138 | discoveryEngine := discovery.NewEngine(sourceRepo, logger) 139 | if err := discoveryEngine.Run(logger); err != nil { 140 | logger.Warn("Initial discovery failed", logger.Args("error", err)) 141 | } else { 142 | logger.Info("Log source discovery completed") 143 | } 144 | 145 | // Run periodic discovery in background for late-arriving files 146 | go func() { 147 | ticker := time.NewTicker(5 * time.Minute) // Check every 5 minutes 148 | defer ticker.Stop() 149 | 150 | for range ticker.C { 151 | logger.Debug("Running periodic log source discovery...") 152 | if err := discoveryEngine.Run(logger); err != nil { 153 | logger.Warn("Periodic discovery failed", logger.Args("error", err)) 154 | } else { 155 | // Get updated source count 156 | sources, err := sourceRepo.FindAll() 157 | if err == nil { 158 | logger.Debug("Periodic discovery completed", logger.Args("sources", len(sources))) 159 | } 160 | } 161 | } 162 | }() 163 | 164 | // Initialize real-time metrics collector with configured interval 165 | logger.Info("Initializing real-time metrics collector...") 166 | metricsCollector := realtime.NewMetricsCollector(db, logger) 167 | metricsCollector.Start(cfg.Performance.RealtimeMetricsInterval) 168 | 169 | // Initialize ingestion coordinator with initial import limiting and performance config 170 | // NOTE: Coordinator is initialized before cleanup service because cleanup needs to pause ingestion during VACUUM 171 | logger.Debug("Initializing ingestion coordinator...") 172 | coordinator := ingestion.NewCoordinator( 173 | sourceRepo, 174 | httpRepo, 175 | parserRegistry, 176 | geoIP, 177 | metricsCollector, 178 | logger, 179 | cfg.LogSources.InitialImportDays, 180 | cfg.LogSources.InitialImportEnable, 181 | cfg.Performance.BatchSize, 182 | cfg.Performance.WorkerPoolSize, 183 | ) 184 | 185 | // Initialize database cleanup service with coordinator reference for maintenance windows 186 | logger.Debug("Initializing database cleanup service...") 187 | cleanupService := database.NewCleanupService( 188 | db, 189 | logger, 190 | cfg.Database.RetentionDays, 191 | cfg.Database.CleanupInterval, 192 | cfg.Database.CleanupTime, 193 | cfg.Database.VacuumEnabled, 194 | coordinator, // Pass coordinator to enable pause/resume during VACUUM 195 | ) 196 | cleanupService.Start() 197 | 198 | // Start ingestion engine 199 | logger.Info("Starting ingestion engine...") 200 | if err := coordinator.Start(); err != nil { 201 | logger.WithCaller().Fatal("Failed to start ingestion coordinator", logger.Args("error", err)) 202 | } 203 | 204 | logger.Info("Ingestion engine started", 205 | logger.Args("processors", coordinator.GetProcessorCount())) 206 | 207 | // Start database sync loop to automatically pick up new log sources 208 | // This ensures that when periodic discovery adds new sources to the database, 209 | // the coordinator will automatically start processing them 210 | coordinator.StartSyncLoop(30 * time.Second) 211 | 212 | // Give the ingestion engine a moment to start processing before accepting web requests 213 | // This improves initial user experience by ensuring some data is available 214 | if coordinator.GetProcessorCount() > 0 { 215 | logger.Info("Waiting for initial log processing to begin...") 216 | // Wait up to 3 seconds for processing to start 217 | time.Sleep(3 * time.Second) 218 | } 219 | 220 | // Initialize web server with configured settings 221 | logger.Info("Initializing web server...") 222 | dashboardHandler := handlers.NewDashboardHandler(statsRepo, httpRepo, logger) 223 | realtimeHandler := handlers.NewRealtimeHandler(metricsCollector, logger) 224 | systemHandler := handlers.NewSystemHandler( 225 | statsRepo, 226 | httpRepo, 227 | cleanupService, 228 | logger, 229 | cfg.Database.Path, 230 | cfg.Database.RetentionDays, 231 | ) 232 | webServer := api.NewServer(&api.Config{ 233 | Host: cfg.Server.Host, 234 | Port: cfg.Server.Port, 235 | Production: cfg.Server.Production, 236 | DashboardEnabled: cfg.Server.DashboardEnabled, 237 | SplashScreenEnabled: cfg.Server.SplashScreenEnabled, 238 | }, dashboardHandler, realtimeHandler, systemHandler, logger) 239 | 240 | // Start web server in goroutine 241 | go func() { 242 | if err := webServer.Run(); err != nil { 243 | logger.WithCaller().Error("Web server error", logger.Args("error", err)) 244 | } 245 | }() 246 | 247 | logger.Info("🐱 LogLynx is running", 248 | logger.Args( 249 | "url", pterm.Sprintf("http://localhost:%d", cfg.Server.Port), 250 | "processors", coordinator.GetProcessorCount(), 251 | )) 252 | 253 | // Setup graceful shutdown 254 | sigChan := make(chan os.Signal, 1) 255 | signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) 256 | 257 | // Wait for shutdown signal 258 | <-sigChan 259 | 260 | logger.Info("Shutdown signal received, stopping services...") 261 | 262 | // Stop ingestion coordinator first (prevents new data writes) 263 | logger.Debug("Stopping ingestion coordinator...") 264 | coordinator.Stop() 265 | 266 | // Stop cleanup service 267 | logger.Debug("Stopping cleanup service...") 268 | cleanupService.Stop() 269 | 270 | // Create shutdown context with timeout (30s to handle SSE connections gracefully) 271 | shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*cfg.Performance.RealtimeMetricsInterval) 272 | defer cancel() 273 | 274 | // Stop web server (this will close SSE connections) 275 | logger.Debug("Stopping web server...") 276 | if err := webServer.Shutdown(shutdownCtx); err != nil { 277 | logger.WithCaller().Error("Web server shutdown error", logger.Args("error", err)) 278 | } else { 279 | logger.Info("Web server stopped successfully") 280 | } 281 | 282 | // Close GeoIP 283 | if geoIP != nil { 284 | geoIP.Close() 285 | } 286 | 287 | logger.Info("LogLynx stopped gracefully") 288 | } 289 | -------------------------------------------------------------------------------- /internal/database/pool_monitor.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "runtime" 8 | "sync" 9 | "time" 10 | 11 | "github.com/pterm/pterm" 12 | ) 13 | 14 | // PoolStats contains detailed connection pool statistics 15 | type PoolStats struct { 16 | MaxOpenConns int // Maximum number of open connections 17 | OpenConns int // Current number of open connections 18 | InUse int // Number of connections in use 19 | Idle int // Number of idle connections 20 | WaitCount int64 // Total number of connections waited for 21 | WaitDuration time.Duration // Total time waited for connections 22 | MaxIdleClosed int64 // Total number of connections closed due to SetMaxIdleConns 23 | MaxLifetimeClosed int64 // Total number of connections closed due to SetConnMaxLifetime 24 | Timestamp time.Time 25 | 26 | // Calculated metrics 27 | Utilization float64 // Percentage of connections in use (InUse / MaxOpenConns) 28 | IdleRatio float64 // Percentage of idle connections (Idle / OpenConns) 29 | AvgWaitTime time.Duration // Average wait time per connection 30 | 31 | // Alert flags 32 | IsHighUtilization bool // True if utilization > threshold 33 | IsSaturated bool // True if all connections in use 34 | } 35 | 36 | // PoolMonitor monitors database connection pool health 37 | type PoolMonitor struct { 38 | db *sql.DB 39 | logger *pterm.Logger 40 | interval time.Duration 41 | threshold float64 42 | autoTune bool 43 | cancel context.CancelFunc 44 | wg sync.WaitGroup 45 | 46 | // Stats tracking 47 | mu sync.RWMutex 48 | currentStats *PoolStats 49 | alertCount int64 50 | lastAlert time.Time 51 | totalAdjustments int 52 | } 53 | 54 | // NewPoolMonitor creates a new connection pool monitor 55 | func NewPoolMonitor(db *sql.DB, logger *pterm.Logger, interval time.Duration, threshold float64, autoTune bool) *PoolMonitor { 56 | return &PoolMonitor{ 57 | db: db, 58 | logger: logger, 59 | interval: interval, 60 | threshold: threshold, 61 | autoTune: autoTune, 62 | } 63 | } 64 | 65 | // Start begins monitoring the connection pool 66 | func (pm *PoolMonitor) Start(ctx context.Context) { 67 | ctx, cancel := context.WithCancel(ctx) 68 | pm.cancel = cancel 69 | 70 | pm.wg.Add(1) 71 | go pm.monitorLoop(ctx) 72 | 73 | pm.logger.Info("Connection pool monitoring started", 74 | pm.logger.Args( 75 | "interval", pm.interval, 76 | "threshold", pm.threshold, 77 | "auto_tuning", pm.autoTune, 78 | )) 79 | } 80 | 81 | // Stop stops the pool monitor 82 | func (pm *PoolMonitor) Stop() { 83 | if pm.cancel != nil { 84 | pm.cancel() 85 | } 86 | pm.wg.Wait() 87 | pm.logger.Info("Connection pool monitoring stopped") 88 | } 89 | 90 | // GetCurrentStats returns the current pool statistics 91 | func (pm *PoolMonitor) GetCurrentStats() *PoolStats { 92 | pm.mu.RLock() 93 | defer pm.mu.RUnlock() 94 | 95 | if pm.currentStats == nil { 96 | return nil 97 | } 98 | 99 | // Return a copy to prevent race conditions 100 | statsCopy := *pm.currentStats 101 | return &statsCopy 102 | } 103 | 104 | // monitorLoop continuously monitors the connection pool 105 | func (pm *PoolMonitor) monitorLoop(ctx context.Context) { 106 | defer pm.wg.Done() 107 | 108 | ticker := time.NewTicker(pm.interval) 109 | defer ticker.Stop() 110 | 111 | // Initial stats collection 112 | pm.collectAndAnalyze() 113 | 114 | for { 115 | select { 116 | case <-ctx.Done(): 117 | return 118 | case <-ticker.C: 119 | pm.collectAndAnalyze() 120 | } 121 | } 122 | } 123 | 124 | // collectAndAnalyze collects pool stats and performs analysis 125 | func (pm *PoolMonitor) collectAndAnalyze() { 126 | stats := pm.collectStats() 127 | 128 | pm.mu.Lock() 129 | pm.currentStats = stats 130 | pm.mu.Unlock() 131 | 132 | // Log stats at trace level 133 | pm.logger.Trace("Connection pool stats", 134 | pm.logger.Args( 135 | "max_open", stats.MaxOpenConns, 136 | "open", stats.OpenConns, 137 | "in_use", stats.InUse, 138 | "idle", stats.Idle, 139 | "utilization", fmt.Sprintf("%.1f%%", stats.Utilization*100), 140 | "wait_count", stats.WaitCount, 141 | )) 142 | 143 | // Check for high utilization 144 | if stats.IsHighUtilization { 145 | pm.mu.Lock() 146 | pm.alertCount++ 147 | pm.lastAlert = time.Now() 148 | pm.mu.Unlock() 149 | 150 | pm.logger.Warn("⚠️ Connection pool high utilization detected", 151 | pm.logger.Args( 152 | "utilization", fmt.Sprintf("%.1f%%", stats.Utilization*100), 153 | "in_use", stats.InUse, 154 | "max_open", stats.MaxOpenConns, 155 | "threshold", fmt.Sprintf("%.1f%%", pm.threshold*100), 156 | "wait_count", stats.WaitCount, 157 | )) 158 | 159 | // Auto-tune if enabled 160 | if pm.autoTune { 161 | pm.performAutoTuning(stats) 162 | } 163 | } 164 | 165 | // Check for saturation 166 | if stats.IsSaturated { 167 | pm.logger.Error("🚨 Connection pool SATURATED - all connections in use!", 168 | pm.logger.Args( 169 | "in_use", stats.InUse, 170 | "max_open", stats.MaxOpenConns, 171 | "wait_count", stats.WaitCount, 172 | "avg_wait_time", stats.AvgWaitTime, 173 | )) 174 | 175 | // Force auto-tune on saturation 176 | if pm.autoTune { 177 | pm.performAutoTuning(stats) 178 | } 179 | } 180 | 181 | // Log performance warnings 182 | if stats.WaitCount > 0 { 183 | pm.logger.Debug("Connections waiting for availability", 184 | pm.logger.Args( 185 | "wait_count", stats.WaitCount, 186 | "avg_wait_time", stats.AvgWaitTime, 187 | "total_wait_time", stats.WaitDuration, 188 | )) 189 | } 190 | } 191 | 192 | // collectStats collects current pool statistics 193 | func (pm *PoolMonitor) collectStats() *PoolStats { 194 | dbStats := pm.db.Stats() 195 | 196 | stats := &PoolStats{ 197 | MaxOpenConns: dbStats.MaxOpenConnections, 198 | OpenConns: dbStats.OpenConnections, 199 | InUse: dbStats.InUse, 200 | Idle: dbStats.Idle, 201 | WaitCount: dbStats.WaitCount, 202 | WaitDuration: dbStats.WaitDuration, 203 | MaxIdleClosed: dbStats.MaxIdleClosed, 204 | MaxLifetimeClosed: dbStats.MaxLifetimeClosed, 205 | Timestamp: time.Now(), 206 | } 207 | 208 | // Calculate metrics 209 | if stats.MaxOpenConns > 0 { 210 | stats.Utilization = float64(stats.InUse) / float64(stats.MaxOpenConns) 211 | } 212 | 213 | if stats.OpenConns > 0 { 214 | stats.IdleRatio = float64(stats.Idle) / float64(stats.OpenConns) 215 | } 216 | 217 | if stats.WaitCount > 0 { 218 | stats.AvgWaitTime = stats.WaitDuration / time.Duration(stats.WaitCount) 219 | } 220 | 221 | // Set alert flags 222 | stats.IsHighUtilization = stats.Utilization >= pm.threshold 223 | stats.IsSaturated = stats.InUse >= stats.MaxOpenConns 224 | 225 | return stats 226 | } 227 | 228 | // performAutoTuning adjusts connection pool settings based on system resources 229 | func (pm *PoolMonitor) performAutoTuning(stats *PoolStats) { 230 | // Only tune once every 5 minutes to avoid thrashing 231 | pm.mu.RLock() 232 | timeSinceLastAdjustment := time.Since(pm.lastAlert) 233 | pm.mu.RUnlock() 234 | 235 | if timeSinceLastAdjustment < 5*time.Minute { 236 | return 237 | } 238 | 239 | // Calculate optimal pool size based on CPU cores 240 | cpuCores := runtime.NumCPU() 241 | 242 | // SQLite with WAL mode: 243 | // - 1 writer + multiple readers 244 | // - Recommended: 2-3 connections per CPU core for read-heavy workloads 245 | optimalMaxOpen := cpuCores * 3 246 | if optimalMaxOpen < 25 { 247 | optimalMaxOpen = 25 // Minimum production setting 248 | } 249 | if optimalMaxOpen > 100 { 250 | optimalMaxOpen = 100 // Cap at 100 to prevent excessive overhead 251 | } 252 | 253 | // Adjust idle connections to 40% of max open 254 | optimalMaxIdle := optimalMaxOpen * 40 / 100 255 | if optimalMaxIdle < 10 { 256 | optimalMaxIdle = 10 257 | } 258 | 259 | currentMaxOpen := stats.MaxOpenConns 260 | 261 | // Only increase if current utilization is high 262 | if stats.Utilization >= pm.threshold && optimalMaxOpen > currentMaxOpen { 263 | pm.logger.Info("🔧 Auto-tuning connection pool (increasing capacity)", 264 | pm.logger.Args( 265 | "current_max_open", currentMaxOpen, 266 | "new_max_open", optimalMaxOpen, 267 | "current_max_idle", "auto", 268 | "new_max_idle", optimalMaxIdle, 269 | "cpu_cores", cpuCores, 270 | "utilization", fmt.Sprintf("%.1f%%", stats.Utilization*100), 271 | )) 272 | 273 | pm.db.SetMaxOpenConns(optimalMaxOpen) 274 | pm.db.SetMaxIdleConns(optimalMaxIdle) 275 | 276 | pm.mu.Lock() 277 | pm.totalAdjustments++ 278 | pm.mu.Unlock() 279 | } else if stats.IdleRatio > 0.7 && optimalMaxOpen < currentMaxOpen { 280 | // Decrease if too many idle connections (optimization) 281 | pm.logger.Info("🔧 Auto-tuning connection pool (optimizing idle)", 282 | pm.logger.Args( 283 | "current_max_open", currentMaxOpen, 284 | "new_max_open", optimalMaxOpen, 285 | "idle_ratio", fmt.Sprintf("%.1f%%", stats.IdleRatio*100), 286 | )) 287 | 288 | pm.db.SetMaxOpenConns(optimalMaxOpen) 289 | pm.db.SetMaxIdleConns(optimalMaxIdle) 290 | 291 | pm.mu.Lock() 292 | pm.totalAdjustments++ 293 | pm.mu.Unlock() 294 | } 295 | } 296 | 297 | // GetAlertCount returns the total number of high utilization alerts 298 | func (pm *PoolMonitor) GetAlertCount() int64 { 299 | pm.mu.RLock() 300 | defer pm.mu.RUnlock() 301 | return pm.alertCount 302 | } 303 | 304 | // GetTotalAdjustments returns the total number of auto-tuning adjustments 305 | func (pm *PoolMonitor) GetTotalAdjustments() int { 306 | pm.mu.RLock() 307 | defer pm.mu.RUnlock() 308 | return pm.totalAdjustments 309 | } 310 | 311 | // PrintSummary prints a human-readable summary of pool statistics 312 | func (pm *PoolMonitor) PrintSummary() { 313 | stats := pm.GetCurrentStats() 314 | if stats == nil { 315 | pm.logger.Info("No pool statistics available yet") 316 | return 317 | } 318 | 319 | pm.mu.RLock() 320 | alerts := pm.alertCount 321 | adjustments := pm.totalAdjustments 322 | pm.mu.RUnlock() 323 | 324 | pm.logger.Info("📊 Connection Pool Summary", 325 | pm.logger.Args( 326 | "max_open_conns", stats.MaxOpenConns, 327 | "current_open", stats.OpenConns, 328 | "in_use", stats.InUse, 329 | "idle", stats.Idle, 330 | "utilization", fmt.Sprintf("%.1f%%", stats.Utilization*100), 331 | "total_waits", stats.WaitCount, 332 | "avg_wait_time", stats.AvgWaitTime, 333 | "total_alerts", alerts, 334 | "total_adjustments", adjustments, 335 | )) 336 | } 337 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LogLynx 🦁📊 2 | 3 | **Advanced Log Analytics Platform for Traefik and Beyond** 4 | 5 | LogLynx is a high-performance (less than 50 MB of RAM), real-time log analytics platform designed to provide deep insights into your web traffic. Built with Go and optimized for Traefik reverse proxy logs, it offers a beautiful dark-themed dashboard and comprehensive REST API. 6 | 7 | ![License](https://img.shields.io/badge/license-MIT-blue.svg) 8 | ![Go Version](https://img.shields.io/badge/go-%3E%3D1.21-blue.svg) 9 | ![Status](https://img.shields.io/badge/status-active-success.svg) 10 | 11 | > **📚 Important Documentation** 12 | > 13 | > - **[Traefik Setup Guide](../../wiki/Traefik)** - Recommended Traefik configuration for optimal LogLynx performance and complete field capture (for the [pangolin quick installation](https://docs.pangolin.net/self-host/quick-install) no additional configuration is required for Traefik). 14 | > - **[Deduplication System](../../wiki/Deduplication-System)** - Learn how LogLynx prevents duplicate log entries and handles various scenarios (log rotation, crashes, re-imports) 15 | 16 | LogLynx-Overview-Demo 17 | 18 | ## ✨ Features 19 | 20 | - 📊 **Real-time Analytics** - Live metrics with Server-Sent Events (SSE) 21 | - 🗺️ **Geographic Insights** - Interactive maps with traffic visualization 22 | - 📈 **Timeline Analysis** - Hourly, daily, and custom time ranges 23 | - 🔍 **Deep Filtering** - Filter by service, backend, or domain 24 | - 🚀 **High Performance** - Optimized batch processing and SQLite backend 25 | - 🎨 **Beautiful UI** - Dark-themed responsive dashboard 26 | - 🔌 **REST API** - Full-featured API for integrations 27 | - 📱 **Device Analytics** - Browser, OS, and device type detection 28 | - 🌐 **GeoIP Enrichment** - Country, city, and ASN information 29 | - 🔄 **Auto-Discovery** - Automatically detects Traefik log files 30 | 31 | ## 🚀 Quick Start 32 | 33 | ### Prerequisites 34 | 35 | - Go 1.25 or higher 36 | - Traefik access logs (optional for initial setup) 37 | 38 | ### Standalone installation 39 | 40 | ```bash 41 | # Clone the repository 42 | git clone https://github.com/k0lin/loglynx.git 43 | cd loglynx 44 | 45 | # Customize your installation (None of these parameters are mandatory, but customization for your system is recommended.) 46 | cp .env.example .env 47 | 48 | # Install dependencies 49 | go mod tidy 50 | ``` 51 | #### Now there are two deployment methods: 52 | Creating the binary to be executed 53 | ```bash 54 | # Build 55 | go build -o loglynx cmd/server/main.go 56 | 57 | # Start the server 58 | ./loglynx 59 | ``` 60 | Run the service directly without creating the binary 61 | ```bash 62 | # Build and run 63 | go run cmd/server/main.go 64 | 65 | ``` 66 | 67 | ### Deployment with docker compose on standard pangolin installation 68 | This should be your pangolin installation in broad terms if you used the installer from the official documentation. 69 | ``` 70 | your-folder/ 71 | ├── config/ # Pangolin configuration 72 | │ └── traefik/ 73 | │ │ └── logs/ 74 | │ │ └── access.log # Traefik access log 75 | │ ├── logs/ 76 | │ ├── letsencrypt/ 77 | │ ├── db/ 78 | │ ├── config.yml 79 | │ ├── GeoLite2-City.mmdb # optional 80 | │ ├── GeoLite2-ASN.mmdb # optional 81 | │ └── GeoLite2-Country.mmdb # optional 82 | ├── loglynx-data/ # database for loglynx service 83 | ├── GeoLite2-Country_20251024/ # MaxMind license 84 | └── docker-compose.yml 85 | ``` 86 | This is the deployment of Docker Compose, which will also contain services such as Pangolin, Traefik, etc. The example configuration is set up using the Pangolin configuration described above. 87 | ```yml 88 | #other service related to pangolin 89 | 90 | loglynx: 91 | image: k0lin/loglynx:latest 92 | container_name: loglynx 93 | restart: unless-stopped 94 | ports: 95 | - "8080:8080" 96 | volumes: 97 | - ./loglynx-data:/data 98 | - ./config:/app/geoip 99 | - ./config/traefik/logs:/traefik/logs 100 | environment: 101 | - DB_PATH=/data/loglynx.db 102 | - GEOIP_ENABLED=true #if the geolite database are installed 103 | - GEOIP_CITY_DB=/app/geoip/GeoLite2-City.mmdb #only if GEOIP_ENABLED is set to true, It is not mandatory to set all three, even just one is fine (obviously it will work with limited functionality) 104 | - GEOIP_COUNTRY_DB=/app/geoip/GeoLite2-Country.mmdb #(only if GEOIP_ENABLED is set to true), It is not mandatory to set all three, even just one is fine (obviously it will work with limited functionality) 105 | - GEOIP_ASN_DB=/app/geoip/GeoLite2-ASN.mmdb #(only if GEOIP_ENABLED is set to true), It is not mandatory to set all three, even just one is fine (obviously it will work with limited functionality) 106 | - TRAEFIK_LOG_PATH=/traefik/logs/access.log 107 | - LOG_LEVEL=info 108 | - SERVER_PRODUCTION=true 109 | # There are several configurable environment variables to optimize program startup (check the wiki). 110 | ``` 111 | 112 | The dashboard will be available at `http://localhost:8080` 113 | 114 | ## 📊 Dashboard 115 | 116 | Access the web interface at `http://localhost:8080` to explore: 117 | 118 | - **Overview** - Executive summary with key metrics 119 | - **Real-time Monitor** - Live traffic monitoring 120 | - **Traffic Analysis** - Patterns and trends over time 121 | - **Geographic Analytics** - Interactive world map 122 | - **Performance** - Response times and backend health 123 | - **Security & Network** - IP analysis, ASN tracking, TLS versions 124 | - **User Analytics** - Browsers, OS, device types, referrers 125 | - **Content Analytics** - Top paths and referrers 126 | - **Backend Health** - Service performance monitoring 127 | 128 | ## 🔌 API Usage 129 | 130 | LogLynx provides a comprehensive REST API for programmatic access to all analytics. 131 | 132 | ### API-Only Mode 133 | 134 | You can disable the dashboard UI and run LogLynx in API-only mode by setting: 135 | 136 | ```bash 137 | DASHBOARD_ENABLED=false 138 | ``` 139 | 140 | When dashboard is disabled: 141 | - All `/api/v1/*` endpoints remain fully accessible 142 | - `/health` endpoint continues to work for health checks 143 | - Dashboard routes (`/`, `/traffic`, etc.) are not exposed 144 | - Static assets are not loaded, reducing memory footprint 145 | 146 | ### OpenAPI Specification 147 | 148 | Full API documentation is available in `openapi.yaml`. View it with: 149 | 150 | - [Swagger Editor](https://editor.swagger.io/) - Paste the content 151 | - [Swagger UI](https://petstore.swagger.io/) - Import the file 152 | - Generate clients: `npx @openapitools/openapi-generator-cli generate -i openapi.yaml -g python` 153 | 154 | See the [API Wiki](../../wiki/API-Documentation) for detailed examples and use cases. 155 | 156 | ## 🛠️ Configuration 157 | 158 | ### Environment Variables 159 | 160 | ```bash 161 | # ================================ 162 | # GeoIP Configuration 163 | # ================================ 164 | # Download GeoIP databases from MaxMind: 165 | # https://dev.maxmind.com/geoip/geolite2-free-geolocation-data 166 | 167 | GEOIP_ENABLED=true 168 | GEOIP_CITY_DB=geoip/GeoLite2-City.mmdb 169 | GEOIP_COUNTRY_DB=geoip/GeoLite2-Country.mmdb 170 | GEOIP_ASN_DB=geoip/GeoLite2-ASN.mmdb 171 | 172 | # ================================ 173 | # Log Sources Configuration 174 | # ================================ 175 | # Path to Traefik access log file 176 | TRAEFIK_LOG_PATH=traefik/logs/access.log 177 | ``` 178 | 179 | 180 | ### GeoIP databases 181 | 182 | Some community projects (for example, [`P3TERX/GeoLite.mmdb`](https://github.com/P3TERX/GeoLite.mmdb)) provide convenient downloads of GeoLite2 City/Country/ASN files. LogLynx does not ship GeoIP databases and is not responsible for third-party downloads. 183 | 184 | If you use third-party downloaders, please ensure you comply with MaxMind's license and, when required, register and accept the license on the official MaxMind site: [MaxMind GeoLite2](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data). 185 | 186 | To use GeoIP with LogLynx, place the `.mmdb` files in a directory and mount that directory into the container at the paths configured by `GEOIP_CITY_DB`, `GEOIP_COUNTRY_DB` and `GEOIP_ASN_DB`. 187 | 188 | 189 | ### Traefik Log Format 190 | 191 | LogLynx works best with Traefik's default access log format. Ensure Traefik is configured with: 192 | 193 | ```yaml 194 | accessLog: 195 | filePath: "/var/log/traefik/access.log" 196 | format: json # JSON format recommended 197 | ``` 198 | 199 | ## 📦 Project Structure 200 | 201 | ``` 202 | loglynx/ 203 | ├── cmd/server/ # Application entry point 204 | ├── internal/ 205 | │ ├── api/ # HTTP server and handlers 206 | │ ├── database/ # Database models and repositories 207 | │ ├── enrichment/ # GeoIP enrichment 208 | │ ├── ingestion/ # Log file processing 209 | │ ├── parser/ # Log format parsers 210 | │ └── realtime/ # Real-time metrics 211 | ├── web/ 212 | │ ├── static/ # CSS, JavaScript, images 213 | │ └── templates/ # HTML templates 214 | ├── openapi.yaml # API specification 215 | └── README.md 216 | ``` 217 | 218 | ## 🔒 Features in Detail 219 | 220 | ### Resilient Startup 221 | - ✅ Starts successfully even without log files 222 | - ✅ Automatic retry with clear error messages 223 | - ✅ Graceful handling of permission errors 224 | - ✅ Runs in standby mode until logs are available 225 | 226 | ### Real-time Monitoring 227 | - Live metrics updated every second 228 | - Server-Sent Events (SSE) streaming 229 | - Per-service breakdown 230 | - Active connections and error rates 231 | 232 | ### Geographic Analytics 233 | - Interactive Leaflet map with clustering 234 | - Country, city, and coordinate data 235 | - ASN (Autonomous System) tracking 236 | - Dark-themed map styling 237 | 238 | ### Performance Tracking 239 | - Response time percentiles (P50, P95, P99) 240 | - Backend health monitoring 241 | - Bandwidth analysis 242 | - Request rate tracking 243 | 244 | ## 🤝 Contributing 245 | 246 | Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change. 247 | 248 | ## 📝 License 249 | 250 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 251 | 252 | ## 🙏 Acknowledgments 253 | 254 | - [Traefik](https://traefik.io/) - Modern HTTP reverse proxy 255 | - [MaxMind GeoLite2](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data) - GeoIP databases 256 | - [DataTables](https://datatables.net/) - Table plugin for jQuery 257 | - [Chart.js](https://www.chartjs.org/) - JavaScript charting 258 | - [Leaflet](https://leafletjs.com/) - Interactive maps 259 | 260 | ## 💬 Support 261 | 262 | - 🐛 [Report Issues](../../issues) 263 | - 💡 [Feature Requests](../../issues/new?labels=enhancement) 264 | - 📖 [Documentation Wiki](../../wiki) 265 | 266 | --- 267 | 268 | **Made with ❤️ for the community** -------------------------------------------------------------------------------- /web/templates/pages/realtime.html: -------------------------------------------------------------------------------- 1 | {{define "realtime.html"}} 2 | 3 | 4 | 5 | 6 | 7 | {{.Title}} - LogLynx 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | {{template "sidebar" .}} 30 | 31 |
32 | {{template "header" .}} 33 |
34 | 51 | 52 | 53 |
54 |
55 |
Request Rate
56 |
0.00
57 |
req/sec
58 |
59 | 60 |
61 |
Error Rate
62 |
0.00
63 |
errors/sec
64 |
65 | 66 |
67 |
Avg Response Time
68 |
0
69 |
Last minute
70 |
71 | 72 |
73 |
Status Distribution
74 |
75 | 2xx: 0 | 76 | 4xx: 0 | 77 | 5xx: 0 78 |
79 |
Last minute
80 |
81 |
82 | 83 | 84 |
85 |
86 |
87 | 88 | Live Metrics Stream 89 | 90 | 91 | STREAMING 92 | 93 |
94 |
95 | Updates: 0 96 | 99 |
100 |
101 |
102 | 103 |
104 |
105 | 106 | 107 |
108 |
109 |
110 |
111 | 112 | Request Rate per Service 113 |
114 |
115 | 118 |
119 |
120 |
121 | 122 |
123 |
124 | 125 |
126 |
127 |
128 | 129 | Top Active Clients 130 |
131 |
132 |
133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 |
IP AddressCountryReq/Sec
Waiting for data...
145 |
146 |
147 |
148 | 149 | 150 |
151 |
152 |
153 | 154 | Live Request Stream 155 |
156 |
157 |
158 | 159 | 165 |
166 | 169 | 172 |
173 |
174 |
175 |
176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 |
TimeMethodHostPathStatusResponse TimeCountryIP
193 |
194 |
195 |
196 | 197 | 198 | 204 | 205 | 206 | 241 | 242 | 243 |
244 | 245 | 246 | Live Monitor 247 |
248 |
249 | {{template "footer" .}} 250 |
251 |
252 | 253 | {{template "scripts-common" .}} 254 | 255 | 256 | 261 | 262 | 263 | {{end}} 264 | --------------------------------------------------------------------------------