├── .github ├── dependabot.yml └── workflows │ └── go.yml ├── .gitignore ├── Dockerfile.test-race ├── LICENSE ├── README.md ├── bench_test.go ├── chunk.go ├── chunk_test.go ├── demo ├── .dockerignore ├── Dockerfile ├── README.md ├── driver-mattn.go.txt ├── driver-modernc.go.txt ├── go.mod.txt ├── main.go └── public │ ├── index.html │ └── main.js ├── docs ├── diagram.png ├── logo.png ├── repository-open-graph.png ├── repository-open-graph.psd └── ui.png ├── expr.go ├── expr_parse_state.go ├── expr_test.go ├── go.mod ├── go.sum ├── handler.go ├── handler_test.go ├── http.go ├── ingester.go ├── ingester_test.go ├── memory ├── README.md ├── memory.go ├── memory_expr.go ├── memory_expr_test.go ├── wildcard.go └── wildcard_test.go ├── sqlite ├── README.md ├── api_entries.go ├── api_ticks.go ├── expr.go ├── expr_test.go ├── storage.go ├── storage_db.go ├── storage_routine_checkpoint.go ├── storage_routine_size.go ├── storage_scheduler.go ├── storage_test.go ├── storage_test_driver_test.go └── utils.go ├── sqlog.go ├── sqlog_api.go ├── storage.go └── web ├── index.html ├── main.js ├── reset.css └── styles.css /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | target-branch: "main" 13 | reviewers: 14 | - "nidorx" 15 | commit-message: 16 | prefix: "mod:" 17 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - dev 7 | - 'feature/**' 8 | paths: 9 | - '**.go' 10 | - 'go.mod' 11 | - '.golangci.yml' 12 | - '.github/workflows/go.yml' 13 | pull_request: 14 | paths: 15 | - '**.go' 16 | - 'go.mod' 17 | - '.golangci.yml' 18 | - '.github/workflows/go.yml' 19 | env: 20 | GOPROXY: "https://proxy.golang.org" 21 | 22 | permissions: 23 | contents: read 24 | 25 | jobs: 26 | test: 27 | name: Test 28 | strategy: 29 | matrix: 30 | go-version: [ 1.22.x ] 31 | platform: [ ubuntu-latest ] # macos-latest 32 | runs-on: ${{ matrix.platform }} 33 | steps: 34 | - name: Install Go 35 | uses: actions/setup-go@v3 36 | with: 37 | go-version: ${{ matrix.go-version }} 38 | - name: Checkout code 39 | uses: actions/checkout@v3 40 | - name: Run tests 41 | run: | 42 | go test -v -race ./... 43 | 44 | # Running tests with race detection consumes too much memory on Windows, 45 | # see https://github.com/golang/go/issues/46099 for details. 46 | test-windows: 47 | name: TestOnWindows 48 | strategy: 49 | matrix: 50 | go-version: [ 1.22.x ] 51 | platform: [ windows-latest ] 52 | runs-on: ${{ matrix.platform }} 53 | steps: 54 | - name: Install Go 55 | uses: actions/setup-go@v3 56 | with: 57 | go-version: ${{ matrix.go-version }} 58 | - name: Checkout code 59 | uses: actions/checkout@v3 60 | - name: Run tests 61 | run: | 62 | go test -v ./... 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | 3 | *.exe 4 | *.db 5 | **/*.db 6 | demo/logs -------------------------------------------------------------------------------- /Dockerfile.test-race: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # used to test linux -race local 4 | # docker build --progress plain -t sqlot-test -f Dockerfile.test-race . 5 | 6 | FROM golang:1.23.2-alpine AS builder 7 | 8 | RUN go version 9 | 10 | RUN apk --no-cache add gcc g++ 11 | 12 | WORKDIR /go/src/demo/ 13 | COPY . ./ 14 | 15 | RUN set -Eeux && go mod tidy && go mod download && go mod verify 16 | 17 | ARG GO_TAGS 18 | ARG PROJECT_VERSION 19 | 20 | RUN GOOS=linux CGO_ENABLED=1 go test -v -race ./... 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Alex Rodin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |
3 | 4 |

5 | SQLog - Connecting the dots 6 |

7 |
8 | 9 | 10 | [![Go][actions-shield]][actions-url] 11 | [![Go Report Card][goreport-shield]][goreport-url] 12 | [![MIT License][license-shield]][license-url] 13 | 14 | [actions-shield]: https://github.com/nidorx/sqlog/actions/workflows/go.yml/badge.svg 15 | [actions-url]: https://github.com/nidorx/sqlog/actions/workflows/go.yml 16 | [goreport-shield]: https://goreportcard.com/badge/github.com/nidorx/sqlog 17 | [goreport-url]: https://goreportcard.com/report/github.com/nidorx/sqlog 18 | [license-shield]: https://img.shields.io/github/license/nidorx/sqlog.svg?style=flat 19 | [license-url]: https://github.com/nidorx/sqlog/blob/main/LICENSE.txt 20 | 21 | 22 | **SQLog** is a **Golang** library that simplifies log management using **slog**. **SQLog** offers a lightweight and reliable solution for logging, making it perfect for developers seeking a cost-effective, high-performance way to monitor their applications. 23 | 24 | ## Usage 25 | 26 | Below is an example of using **SQLog** with the SQLite storage, with the interface exposed on port `8080`. When you access `http://localhost:8080?msg=test`, any query parameter will be sent to the log. 27 | 28 | You can view the generated logs at `http://localhost:8080/logs/`. 29 | 30 | ```go 31 | import ( 32 | "log/slog" 33 | "net/http" 34 | "os" 35 | "strings" 36 | 37 | "github.com/nidorx/sqlog" 38 | 39 | // sqlite storage 40 | "github.com/nidorx/sqlog/sqlite" 41 | 42 | // sqlite driver 43 | _ "github.com/mattn/go-sqlite3" 44 | ) 45 | 46 | func main() { 47 | storage, _ := sqlite.New(nil) 48 | logger, _ := sqlog.New(&sqlog.Config{ 49 | Storage: storage, 50 | }) 51 | 52 | // magic 53 | slog.SetDefault(slog.New(logger.Handler())) 54 | 55 | if true { 56 | // In the local environment, you can send the log to standard output. 57 | logger.Fanout(slog.NewTextHandler(os.Stdout, nil)) 58 | } 59 | 60 | // SQLog ui/api handler 61 | logHttpHandler := logger.HttpHandler() 62 | 63 | // http handler (... nidorx/chain, gin-gonic/gin, gorilla/mux, julienschmidt/httprouter) 64 | httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 65 | if idx := strings.LastIndex(r.URL.Path, "/logs"); idx >= 0 { 66 | logHttpHandler.ServeHTTP(w, r) 67 | } else { 68 | args := []any{} 69 | msg := "I ❤️ SQLog" 70 | for k, v := range r.URL.Query() { 71 | if k == "msg" { 72 | msg = strings.Join(v, ",") 73 | } else { 74 | args = append(args, slog.Any(k, strings.Join(v, ","))) 75 | } 76 | } 77 | 78 | slog.Log(r.Context(), slog.LevelInfo, msg, args...) 79 | w.Write([]byte("🫶")) 80 | } 81 | }) 82 | 83 | http.ListenAndServe(":8080", httpHandler) 84 | } 85 | ``` 86 | 87 | ## Demo 88 | 89 | You can view the current version of the SQLog demo at the links below: 90 | 91 | The demo project captures all mouse movements and sends them to the server, which logs the received parameters. 92 | 93 | - **Log generator**: https://sqlog-demo.onrender.com/ 94 | - **SQLog UI** : https://sqlog-demo.onrender.com/logs/ 95 | 96 | > **IMPORTANT**! I am using the [free version of Render](https://docs.render.com/free), so there is no guarantee of service stability or availability. 97 | 98 | [SQLog-demo.webm](https://github.com/user-attachments/assets/046b65c9-dd36-4779-8b15-915be5f7e3f3) 99 | 100 | The source code is in the [demo directory](./demo/). 101 | 102 | 103 | 104 |
105 | 106 |
107 | 108 | 109 | ## How SQLog works? 110 | 111 |
112 | 113 |
114 | 115 | **SQLog** is an efficient library for capturing and managing logs, designed with a focus on performance and low latency. The architecture of the solution consists of four main components: **Handler**, **Ingester**, **Chunk**, and **Storage**. Each of these components plays a crucial role in the ingestion, storage, and retrieval of logs. 116 | 117 | 1. **Handler** 118 | 119 | The **Handler** is an implementation of `slog.Handler`, allowing **SQLog** to be easily integrated as the standard logger in Go. It transforms logs coming from `slog` into JSON format using the default encoder `slog.NewJSONHandler` or a custom encoder, providing flexibility in log formatting. 120 | 121 | 2. **Ingester** 122 | 123 | The **Ingester** manages the ingestion of logs from the **Handler** in a non-blocking manner. It maintains an in-memory buffer that stores log entries in blocks called **Chunks**. When a **Chunk** reaches its maximum capacity, the Ingester starts using the next **Chunk**, marking the current one for writing to **Storage**. At regular intervals, the Ingester invokes the `Flush` method of the **Storage** to persist filled **Chunks**. 124 | 125 | > **Performance**: One notable feature of the Ingester is its non-blocking implementation, which avoids using mutexes to ensure concurrency. Instead, it utilizes atomic operations from Go (`sync/atomic`), allowing multiple goroutines to write logs simultaneously without waiting on each other, resulting in superior performance and reduced latency. 126 | 127 | 3. **Chunk** 128 | 129 | The **Chunk** is a non-blocking structure that allows continuous writing of log entries. Each **Chunk** is a node in a linked list that has a reference to the next **Chunk**. The Ingester maintains references to two nodes: the current **Chunk**, which is still accepting new entries, and the flush **Chunk**, which contains completed entries that have not yet been persisted. 130 | 131 | > **Data Structure**: This linked list approach not only simplifies the management of **Chunks** but also facilitates fast, non-blocking writing. By updating the internal references during the flush process, the Ingester ensures that logs are written efficiently with minimal performance impact. 132 | 133 | 4. **Storage** 134 | 135 | The **Storage** is responsible for the persistence of logs. **SQLog**'s modular architecture allows for the implementation of different types of storage, such as disk files, databases, and external systems. Currently, there is a [native implementation for SQLite (see more details)](./sqlite) and another in development for in-memory persistence. 136 | 137 | **Responsibilities**: 138 | - **Persistence**: The Storage receives and stores **Chunks** from the Ingester. 139 | - **Search**: It also allows for the retrieval of records, which is used by the **SQLog** API for log visualization. 140 | 141 | The combination of these layers makes **SQLog** a robust and efficient solution for log management, optimizing performance through a non-blocking architecture and the use of atomic operations. This results in fast, real-time log capture capable of handling high workloads without compromising efficiency. 142 | 143 | ## Requirements 144 | 145 | To use the builtin SQLite Storage implementation, you need to register the driver of your choice. 146 | 147 | Examples: 148 | 149 | - `github.com/mattn/go-sqlite3` 150 | - `modernc.org/sqlite` 151 | 152 | 153 | ## @TODO/IDEAS/ROADMAP 154 | 155 | If you would like to contribute to this project, here are some tasks and ideas listed below. Feel free to suggest and implement new features in **SQLog**. 156 | 157 | If you decide to work on a task, please leave a comment on the Issue so that others can collaborate. 158 | 159 | - **[InMemory Storage](https://github.com/nidorx/sqlog/issues/4)** 160 | - **[Alerts](https://github.com/nidorx/sqlog/issues/2)** - Enable the creation of alerts within SQLog. The solution should leverage the syntax of the language to evaluate logs at regular intervals and trigger alerts when specific conditions are met. 161 | - [Metrics (Count, AVG, Dashboards)](https://github.com/nidorx/sqlog/issues/3) 162 | - NOT, REGEX (https://www.sqlite.org/lang_expr.html) 163 | 164 | 165 | All kinds of contributions are welcome! 166 | 167 | 🐛 **Found a bug?** 168 | Let me know by [creating an issue][new-issue]. 169 | 170 | ## References 171 | 172 | - This project was inspired by [Blacklite](https://github.com/tersesystems/blacklite) 173 | - https://github.com/IGLOU-EU/go-wildcard 174 | - The interface is inspired by [Datadog](https://www.datadoghq.com/) 175 | 176 | 177 | [bugs]: https://github.com/nidorx/sqlog/issues?q=is%3Aissue+is%3Aopen+label%3Abug 178 | [features]: https://github.com/nidorx/sqlog/issues?q=is%3Aissue+is%3Aopen+label%3Afeature 179 | [new-issue]: https://github.com/nidorx/sqlog/issues/new/choose 180 | [discussions]: https://github.com/go-path/di/discussions 181 | 182 | ## License 183 | 184 | This code is distributed under the terms and conditions of the [MIT license](LICENSE). 185 | -------------------------------------------------------------------------------- /bench_test.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "io" 5 | "log/slog" 6 | "testing" 7 | ) 8 | 9 | // go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem . 10 | 11 | const msg = "The quick brown fox jumps over the lazy dog" 12 | 13 | func BenchmarkSlogSimple(b *testing.B) { 14 | logger := slog.New(slog.NewJSONHandler(io.Discard, nil)) 15 | for i := 0; i < b.N; i++ { 16 | logger.Info(msg, "rate", "15", "low", 16, "high", 123.2) 17 | } 18 | } 19 | 20 | func BenchmarkSQLogSimple(b *testing.B) { 21 | l, err := New(&Config{}) 22 | if err != nil { 23 | b.Error(err) 24 | return 25 | } 26 | logger := slog.New(l.Handler()) 27 | b.ResetTimer() 28 | 29 | for i := 0; i < b.N; i++ { 30 | logger.Info(msg, "rate", "15", "low", 16, "high", 123.2) 31 | } 32 | 33 | l.Stop() 34 | } 35 | -------------------------------------------------------------------------------- /chunk.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "time" 7 | ) 8 | 9 | // Entry represents a formatted log entry 10 | type Entry struct { 11 | Time time.Time 12 | Level int8 13 | Content []byte 14 | } 15 | 16 | // Chunk stores up to 900 log entries that will be persisted in the storage 17 | type Chunk struct { 18 | mu sync.RWMutex // (next, Depth()) only 19 | id int32 // The identifier of this chunk 20 | cap int32 // Configured batch size 21 | book int32 // Number of scheduled writes in this chunk 22 | write int32 // Number of writes completed 23 | size int64 // Size of content (in bytes) 24 | epochStart int64 // First epoch 25 | epochEnd int64 // Last epoch 26 | retries int32 // Number of attempts to persist in the storage 27 | locked atomic.Bool // Indicates if this chunk no longer accepts writes 28 | next *Chunk // Pointer to the next chunk 29 | entries [900]*Entry // The log entries in this chunk 30 | } 31 | 32 | // NewChunk creates a new chunk with the specified capacity 33 | func NewChunk(cap int32) *Chunk { 34 | return &Chunk{cap: min(max(1, cap), 900)} 35 | } 36 | 37 | // ID returns the identifier of this chunk 38 | func (c *Chunk) ID() int32 { 39 | return c.id 40 | } 41 | 42 | // Next returns the next chunk in the sequence 43 | func (c *Chunk) Next() *Chunk { 44 | c.Init(1) // ensures the next chunk is initialized 45 | return c.next 46 | } 47 | 48 | // Size returns the size of this chunk (in bytes) 49 | func (c *Chunk) Size() int64 { 50 | return atomic.LoadInt64(&c.size) 51 | } 52 | 53 | // Size returns the number of flush retries for this chunk 54 | func (c *Chunk) Retries() int32 { 55 | return atomic.LoadInt32(&c.retries) 56 | } 57 | 58 | // Init initializes the next chunks 59 | func (c *Chunk) Init(depth uint8) { 60 | if depth > 0 { 61 | if c.next == nil { 62 | c.mu.Lock() 63 | if c.cap <= 0 { 64 | c.cap = 900 65 | } 66 | if c.next == nil { 67 | c.next = &Chunk{cap: c.cap} 68 | c.next.id = c.id + 1 69 | } 70 | c.mu.Unlock() 71 | } 72 | 73 | depth-- 74 | if depth > 0 { 75 | c.next.Init(depth) 76 | } 77 | } 78 | } 79 | 80 | // Depth retrieves the number of non-empty chunks 81 | func (c *Chunk) Depth() int { 82 | if c.Empty() { 83 | return 0 84 | } 85 | c.mu.RLock() 86 | n := c.next 87 | c.mu.RUnlock() 88 | if n == nil { 89 | return 1 90 | } 91 | return 1 + n.Depth() 92 | } 93 | 94 | // First retrieves the epoch of the first entry in this chunk 95 | func (c *Chunk) First() int64 { 96 | return c.epochStart 97 | } 98 | 99 | // Last retrieves the epoch of the last entry in this chunk 100 | func (c *Chunk) Last() int64 { 101 | return c.epochEnd 102 | } 103 | 104 | // TTL retrieves the age of the last log entry inserted in this chunk 105 | func (c *Chunk) TTL() time.Duration { 106 | index := atomic.LoadInt32(&c.write) - 1 107 | if index < 0 || c.Empty() { 108 | return 0 109 | } 110 | last := c.entries[index].Time 111 | if last.IsZero() { 112 | return 0 113 | } 114 | return time.Since(last) 115 | } 116 | 117 | // Ready indicates if this chunk is full and ready for a flush 118 | func (c *Chunk) Ready() bool { 119 | w := atomic.LoadInt32(&c.write) 120 | if w == c.cap { 121 | return true 122 | } 123 | 124 | b := atomic.LoadInt32(&c.book) 125 | return (b > 0 && w == b && c.locked.Load()) 126 | } 127 | 128 | // Empty indicates if no write attempts have been made 129 | func (c *Chunk) Empty() bool { 130 | return atomic.LoadInt32(&c.book) == 0 131 | } 132 | 133 | // Lock prevents further writes to this chunk. 134 | // From this point on, writes will occur in the next chunk. 135 | // This chunk will be ready for flushing after write confirmation. 136 | func (c *Chunk) Lock() { 137 | c.locked.Store(true) 138 | } 139 | 140 | // Locked checks if this chunk is locked for writing 141 | func (c *Chunk) Locked() bool { 142 | return c.locked.Load() 143 | } 144 | 145 | // List retrieves the list of written entries 146 | func (c *Chunk) List() []*Entry { 147 | return c.entries[:atomic.LoadInt32(&c.write)] 148 | } 149 | 150 | // Put attempts to write the log entry into this chunk. 151 | // Returns the chunk that accepted the entry. 152 | // If the chunk that accepted the entry is the same, it returns false in the second parameter. 153 | func (c *Chunk) Put(e *Entry) (into *Chunk, isFull bool) { 154 | if c.locked.Load() { 155 | // chunk is locked 156 | i, _ := c.Next().Put(e) 157 | return i, true 158 | } 159 | 160 | index := (atomic.AddInt32(&c.book, 1) - 1) 161 | if index > (c.cap - 1) { 162 | // chunk is full 163 | i, _ := c.Next().Put(e) 164 | return i, true 165 | } 166 | 167 | defer atomic.AddInt32(&c.write, 1) 168 | 169 | // safe write 170 | c.entries[index] = e // @TODO: test to ensure it is safe 171 | 172 | entryEpoch := e.Time.Unix() 173 | if last := c.epochEnd; entryEpoch > last { 174 | atomic.CompareAndSwapInt64(&c.epochEnd, last, entryEpoch) 175 | } 176 | 177 | if first := c.epochStart; (first == 0 || entryEpoch < first) && !atomic.CompareAndSwapInt64(&c.epochStart, first, entryEpoch) { 178 | // unlikely to happen 179 | for i := 0; i < 3; i++ { 180 | first = c.epochStart 181 | if entryEpoch < first { 182 | if atomic.CompareAndSwapInt64(&c.epochStart, first, entryEpoch) { 183 | break 184 | } 185 | } else { 186 | break 187 | } 188 | } 189 | } 190 | 191 | atomic.AddInt64(&c.size, int64(len(e.Content))) 192 | 193 | return c, false 194 | } 195 | -------------------------------------------------------------------------------- /chunk_test.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func Test_Chunk_Depth(t *testing.T) { 11 | chunk := NewChunk(1) 12 | assert.Equal(t, 0, chunk.Depth(), "Empty chunk should have depth 0") 13 | 14 | chunk.Put(&Entry{Time: time.Now()}) 15 | assert.Equal(t, 1, chunk.Depth(), "Chunk with one entry should have depth 1") 16 | 17 | chunk.Init(1) 18 | chunk.Put(&Entry{Time: time.Now()}) 19 | assert.Equal(t, 2, chunk.Depth(), "Chunk with next initialized chunk should have depth 2") 20 | } 21 | 22 | func Test_Chunk_Init(t *testing.T) { 23 | chunk := &Chunk{} 24 | chunk.Init(2) 25 | assert.NotNil(t, chunk.Next(), "Next chunk should be initialized") 26 | assert.Equal(t, int32(1), chunk.Next().ID(), "ID of the next chunk should be 1") 27 | } 28 | 29 | func Test_Chunk_First(t *testing.T) { 30 | chunk := NewChunk(2) 31 | assert.Equal(t, int64(0), chunk.First(), "Empty chunk should return zero time for First") 32 | 33 | entry := &Entry{Time: time.Now()} 34 | chunk.Put(entry) 35 | assert.Equal(t, entry.Time.Unix(), chunk.First(), "Chunk with one entry should return the timestamp of the first entry") 36 | } 37 | 38 | func Test_Chunk_Last(t *testing.T) { 39 | chunk := NewChunk(2) 40 | assert.Equal(t, int64(0), chunk.Last(), "Empty chunk should return zero time for Last") 41 | 42 | entry := &Entry{Time: time.Now()} 43 | chunk.Put(entry) 44 | assert.Equal(t, entry.Time.Unix(), chunk.Last(), "Chunk with one entry should return the timestamp of the last entry") 45 | } 46 | 47 | func Test_Chunk_TTL(t *testing.T) { 48 | chunk := NewChunk(2) 49 | assert.Equal(t, time.Duration(0), chunk.TTL(), "TTL of empty chunk should be zero") 50 | 51 | entry := &Entry{Time: time.Now()} 52 | chunk.Put(entry) 53 | time.Sleep(1 * time.Second) 54 | assert.Greater(t, chunk.TTL(), time.Duration(0), "TTL of chunk with one entry should be greater than zero") 55 | } 56 | 57 | func Test_Chunk_Full(t *testing.T) { 58 | chunk := NewChunk(2) 59 | assert.False(t, chunk.Ready(), "Empty chunk should not be full") 60 | 61 | chunk.Put(&Entry{Time: time.Now()}) 62 | chunk.Put(&Entry{Time: time.Now()}) 63 | assert.True(t, chunk.Ready(), "Chunk should be full when it reaches capacity") 64 | } 65 | 66 | func Test_Chunk_Empty(t *testing.T) { 67 | chunk := NewChunk(2) 68 | assert.True(t, chunk.Empty(), "Newly created chunk should be empty") 69 | 70 | chunk.Put(&Entry{Time: time.Now()}) 71 | assert.False(t, chunk.Empty(), "Chunk with one entry should not be empty") 72 | } 73 | 74 | func Test_Chunk_Lock(t *testing.T) { 75 | chunk := NewChunk(2) 76 | chunk.Lock() 77 | 78 | assert.True(t, chunk.Locked(), "Chunk should be locked after calling Lock") 79 | 80 | new, isFull := chunk.Put(&Entry{Time: time.Now()}) 81 | assert.True(t, isFull, "Chunk should be full after calling Lock") 82 | assert.Equal(t, new, chunk.Next(), "Chunk.Put should return the next chunk when Locked") 83 | assert.NotEqual(t, chunk, new, "Chunk.Put should return the next chunk when Locked") 84 | } 85 | 86 | func Test_Chunk_List(t *testing.T) { 87 | chunk := NewChunk(5) 88 | assert.Empty(t, chunk.List(), "Empty chunk should return an empty list") 89 | 90 | entry := &Entry{Time: time.Now()} 91 | chunk.Put(entry) 92 | 93 | list := chunk.List() 94 | assert.Len(t, list, 1, "Chunk with one entry should return list with 1 item") 95 | assert.Equal(t, entry, list[0], "The first item in the list should be the inserted entry") 96 | } 97 | 98 | func Test_Chunk_Put(t *testing.T) { 99 | chunk := NewChunk(1) 100 | 101 | entry := &Entry{Time: time.Now()} 102 | resultChunk, isFull := chunk.Put(entry) 103 | assert.Equal(t, chunk, resultChunk, "The current chunk should accept the entry") 104 | assert.False(t, isFull, "Chunk should not be full after the first insertion") 105 | 106 | // Test when the chunk is full 107 | entry2 := &Entry{Time: time.Now()} 108 | resultChunk, isFull = chunk.Put(entry2) 109 | assert.NotEqual(t, chunk, resultChunk, "A new chunk should accept the entry after the current chunk is full") 110 | assert.True(t, isFull, "Original chunk should be full after the second insertion") 111 | } 112 | -------------------------------------------------------------------------------- /demo/.dockerignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | *Dockerfile* 3 | logs 4 | -------------------------------------------------------------------------------- /demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # We use a multi-stage build setup. 4 | # (https://docs.docker.com/build/building/multi-stage/) 5 | 6 | # Stage 1 7 | FROM golang:1.23.2-alpine AS builder 8 | 9 | # smoke test to verify if golang is available 10 | RUN go version 11 | 12 | RUN apk --no-cache add gcc g++ 13 | 14 | WORKDIR /go/src/demo/ 15 | COPY . ./ 16 | COPY go.mod.txt ./go.mod 17 | COPY driver-mattn.go.txt ./driver-mattn.go 18 | COPY driver-modernc.go.txt ./driver-modernc.go 19 | 20 | RUN set -Eeux && go mod tidy && go mod download && go mod verify 21 | 22 | ARG GO_TAGS 23 | ARG PROJECT_VERSION 24 | 25 | RUN GOOS=linux CGO_ENABLED=1 go build -tags $GO_TAGS -trimpath -ldflags="-w -s -X 'main.Version=${PROJECT_VERSION}'" -o app . 26 | 27 | # Stage 2 28 | FROM alpine:3.20.3 29 | RUN apk --no-cache add ca-certificates 30 | 31 | WORKDIR /root/ 32 | COPY --from=builder /go/src/demo/app . 33 | 34 | EXPOSE 8080 35 | CMD ["/root/app"] -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | # SQLog Demo 2 | 3 | 4 | 5 | You can view the current version of the SQLog demo at the links below: 6 | 7 | The demo project captures all mouse movements and sends them to the server, which logs the received parameters. 8 | 9 | - **Log generator**: https://sqlog-demo.onrender.com/ 10 | - **SQLog UI** : https://sqlog-demo.onrender.com/logs/ 11 | 12 | > **IMPORTANT**! I am using the [free version of Render](https://docs.render.com/free), so there is no guarantee of service stability or availability. 13 | 14 | [SQLog-demo.webm](https://github.com/user-attachments/assets/046b65c9-dd36-4779-8b15-915be5f7e3f3) 15 | 16 | 17 |
18 | 19 |
20 | 21 | ## Build Docker (To deploy on render.com) 22 | 23 | - **GO_TAGS=mattn**: Use github.com/mattn/go-sqlite3 driver 24 | - **GO_TAGS=modernc**: Use modernc.org/sqlite driver 25 | 26 | ``` 27 | docker build --build-arg PROJECT_VERSION=1.0.0 --build-arg GO_TAGS=mattn -t nidorx/sqlog-demo:latest . 28 | 29 | # run 30 | docker run -p 8080:8080 nidorx/sqlog-demo:latest 31 | 32 | # publish 33 | docker images nidorx/sqlog-demo 34 | docker push nidorx/sqlog-demo:latest 35 | ``` -------------------------------------------------------------------------------- /demo/driver-mattn.go.txt: -------------------------------------------------------------------------------- 1 | //go:build mattn 2 | // +build mattn 3 | 4 | package main 5 | 6 | import ( 7 | _ "github.com/mattn/go-sqlite3" 8 | ) 9 | -------------------------------------------------------------------------------- /demo/driver-modernc.go.txt: -------------------------------------------------------------------------------- 1 | //go:build modernc 2 | // +build modernc 3 | 4 | package main 5 | 6 | import ( 7 | _ "modernc.org/sqlite" 8 | ) 9 | -------------------------------------------------------------------------------- /demo/go.mod.txt: -------------------------------------------------------------------------------- 1 | module github.com/nidorx/sqlog-demo 2 | 3 | go 1.23 4 | 5 | toolchain go1.23.1 6 | 7 | require ( 8 | github.com/nidorx/sqlog v0.0.8 9 | modernc.org/sqlite v1.33.1 10 | github.com/mattn/go-sqlite3 v1.14.24 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /demo/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "embed" 5 | "io/fs" 6 | "log/slog" 7 | "net/http" 8 | "os" 9 | "path" 10 | "strings" 11 | 12 | "github.com/nidorx/sqlog" 13 | "github.com/nidorx/sqlog/sqlite" 14 | ) 15 | 16 | var ( 17 | dev = false 18 | 19 | log sqlog.Log 20 | 21 | //go:embed public/* 22 | webFiles embed.FS 23 | ) 24 | 25 | func init() { 26 | 27 | // SQLite storage 28 | storage, err := sqlite.New(&sqlite.Config{ 29 | Dir: "logs", 30 | Prefix: "demo", 31 | MaxFilesizeMB: 10, 32 | MaxSizeTotalMB: 100, 33 | MaxOpenedDB: 2, 34 | MaxRunningTasks: 5, 35 | CloseIdleSec: 10, 36 | Driver: "sqlite3", 37 | }) 38 | if err != nil { 39 | panic(err) 40 | } 41 | 42 | config := &sqlog.Config{ 43 | Ingester: &sqlog.IngesterConfig{ 44 | Chunks: 5, 45 | ChunkSize: 250, 46 | }, 47 | Storage: storage, 48 | } 49 | 50 | if l, err := sqlog.New(config); err != nil { 51 | panic(err) 52 | } else { 53 | log = l 54 | slog.SetDefault(slog.New(l.Handler())) 55 | } 56 | } 57 | 58 | func main() { 59 | defer log.Stop() 60 | 61 | logHttpHandler := log.HttpHandler() 62 | staticHttpHandler := getStaticHandler() 63 | 64 | httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 65 | p := r.URL.Path 66 | 67 | if idx := strings.LastIndex(p, "/logs"); idx >= 0 { 68 | 69 | logHttpHandler.ServeHTTP(w, r) 70 | 71 | } else if p == "/debug" { 72 | 73 | l := slog.LevelInfo 74 | args := []any{} 75 | msg := "debug log form screen" 76 | for k, v := range r.URL.Query() { 77 | if k == "msg" { 78 | msg = strings.Join(v, ",") 79 | } else if k == "level" { 80 | switch strings.Join(v, ",") { 81 | case "INFO": 82 | l = slog.LevelInfo 83 | case "WARN": 84 | l = slog.LevelWarn 85 | case "ERROR": 86 | l = slog.LevelError 87 | } 88 | } else { 89 | args = append(args, slog.Any(k, strings.Join(v, ","))) 90 | } 91 | } 92 | 93 | slog.Log(r.Context(), l, msg, args...) 94 | 95 | } else { 96 | staticHttpHandler.ServeHTTP(w, r) 97 | } 98 | }) 99 | 100 | http.ListenAndServe(":8080", httpHandler) 101 | } 102 | 103 | func getStaticHandler() http.Handler { 104 | var wfs http.FileSystem 105 | 106 | if dev { 107 | PWD, _ := os.Getwd() 108 | wfs = http.Dir(path.Join(PWD, "/public")) 109 | } else { 110 | subfs, _ := fs.Sub(webFiles, "public") 111 | wfs = http.FS(subfs) 112 | } 113 | return http.FileServer(wfs) 114 | } 115 | -------------------------------------------------------------------------------- /demo/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | SQLog - Demo - Log generator 8 | 30 | 31 | 32 | 33 | 34 | https://github.com/nidorx/sqlog 35 | 36 | 37 | -------------------------------------------------------------------------------- /docs/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nidorx/sqlog/bdb97c97230725d8d3ca053ea6d6f08598b13a20/docs/diagram.png -------------------------------------------------------------------------------- /docs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nidorx/sqlog/bdb97c97230725d8d3ca053ea6d6f08598b13a20/docs/logo.png -------------------------------------------------------------------------------- /docs/repository-open-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nidorx/sqlog/bdb97c97230725d8d3ca053ea6d6f08598b13a20/docs/repository-open-graph.png -------------------------------------------------------------------------------- /docs/repository-open-graph.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nidorx/sqlog/bdb97c97230725d8d3ca053ea6d6f08598b13a20/docs/repository-open-graph.psd -------------------------------------------------------------------------------- /docs/ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nidorx/sqlog/bdb97c97230725d8d3ca053ea6d6f08598b13a20/docs/ui.png -------------------------------------------------------------------------------- /expr.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | ) 11 | 12 | // ExprBuilder expression builder interface 13 | type ExprBuilder[E any] interface { 14 | Build() E 15 | GroupStart() 16 | GroupEnd() 17 | Operator(op string) 18 | Text(field, term string, isSequence, isWildcard bool) 19 | Number(field, condition string, value float64) 20 | Between(field string, x, y float64) 21 | TextIn(field string, values []string) 22 | NumberIn(field string, values []float64) 23 | } 24 | 25 | type ExprBuilderFactory[E any] func(expression string) (ExprBuilder[E], string) 26 | 27 | // NewExprBuilder creates a new expression builder. 28 | // Allows the use of the same filter pattern in different dialects (Ex. Memory, Sqlite, PostgreSQL) 29 | func NewExprBuilder[E any](factory ExprBuilderFactory[E]) func(expression string) (E, error) { 30 | 31 | var parse func(expression string, builder ExprBuilder[E]) error 32 | 33 | parse = func(expression string, builder ExprBuilder[E]) error { 34 | var ( 35 | i int 36 | b byte 37 | qs = []byte(expression) 38 | last = len(qs) - 1 39 | s = &exprParseState[E]{ 40 | buf: bytes.NewBuffer(make([]byte, 0, 256)), 41 | field: bytes.NewBuffer(make([]byte, 0, 10)), 42 | builder: builder, 43 | } 44 | ) 45 | 46 | for ; i <= last; i++ { 47 | b = qs[i] 48 | 49 | if b == '(' && !s.inArray && !s.inQuote { 50 | // compiles all internal groups 51 | var ( 52 | inner = bytes.NewBuffer(make([]byte, 0, 512)) 53 | parenthesis = 1 // inner parenthesis 54 | innerInQuote = false 55 | j = i + 1 // ignore ( 56 | ) 57 | for ; j <= last; j++ { 58 | c := qs[j] 59 | 60 | if c == '(' && !innerInQuote { 61 | parenthesis++ 62 | inner.WriteByte(c) 63 | } else if c == ')' && !innerInQuote { 64 | parenthesis-- 65 | if parenthesis == 0 { 66 | break 67 | } else { 68 | inner.WriteByte(c) 69 | } 70 | } else if c == '"' { 71 | // is escaped (Ex. `error:myMethod\(\"trace\"\)`)? append a '"' 72 | if qs[j-1] != '\\' { 73 | if innerInQuote { 74 | innerInQuote = false 75 | } else { 76 | innerInQuote = true 77 | } 78 | } 79 | inner.WriteByte(c) 80 | } else { 81 | inner.WriteByte(c) 82 | } 83 | } 84 | 85 | s.addOperator() 86 | 87 | s.builder.GroupStart() 88 | 89 | substr := inner.String() 90 | subErr := parse(substr, builder) 91 | if subErr != nil { 92 | return errors.Join(fmt.Errorf("invalid expression %s ", substr), subErr) 93 | } 94 | 95 | s.builder.GroupEnd() 96 | s.dirty = true 97 | 98 | i = j 99 | } else if b == '[' && !s.inQuote { 100 | if i > 0 && qs[i-1] == '\\' { 101 | s.buf.Truncate(s.buf.Len() - 1) 102 | s.buf.WriteByte('[') 103 | } else if s.inArray { 104 | // a '[' while we're in a array is an error 105 | return errors.New("unexpected `[` at " + strconv.Itoa(i)) 106 | } else { 107 | s.inArray = true 108 | } 109 | } else if b == ']' && s.inArray && !s.inQuote { 110 | if i > 0 && qs[i-1] == '\\' { 111 | s.buf.Truncate(s.buf.Len() - 1) 112 | s.buf.WriteByte(']') 113 | } else { 114 | s.addTermSingle() 115 | if err := s.closeArray(); err != nil { 116 | return err 117 | } 118 | } 119 | } else if b == ' ' { 120 | if s.inQuote { 121 | s.buf.WriteByte(b) 122 | } else { 123 | s.addTermSingle() 124 | } 125 | } else if b == '"' { 126 | // is escaped (Ex. `error:myMethod\(\"trace\"\)`)? append a '"' 127 | if i > 0 && qs[i-1] == '\\' { 128 | s.buf.Truncate(s.buf.Len() - 1) 129 | s.buf.WriteByte('"') 130 | } else if s.inQuote { 131 | s.inQuote = false 132 | s.addTermSequence() 133 | } else { 134 | s.inQuote = true 135 | } 136 | } else if b == ':' && !s.inQuote { 137 | // is escaped (Ex. "path:c\:/my/path")? append a ':' 138 | if i > 0 && qs[i-1] == '\\' { 139 | s.buf.Truncate(s.buf.Len() - 1) 140 | s.buf.WriteByte(':') 141 | } else if s.field.Len() > 0 { 142 | // a ':' while we're in a name is an error 143 | return errors.New("unexpected `:` at " + strconv.Itoa(i)) 144 | } else { 145 | if f := strings.TrimSpace(s.buf.String()); f != "" { 146 | s.field.WriteString(f) 147 | } 148 | s.buf.Reset() 149 | } 150 | } else { 151 | // save buffer 152 | s.buf.WriteByte(b) 153 | } 154 | } 155 | 156 | // add last part 157 | if s.inQuote { 158 | s.addTermSequence() 159 | } else { 160 | s.addTermSingle() 161 | } 162 | 163 | if err := s.closeArray(); err != nil { 164 | return err 165 | } 166 | 167 | return nil 168 | } 169 | 170 | var cache = sync.Map{} 171 | 172 | // the builder 173 | return func(expression string) (exp E, err error) { 174 | expression = strings.TrimSpace(expression) 175 | 176 | if c, ok := cache.Load(expression); ok { 177 | return c.(E), nil 178 | } 179 | 180 | mapper, newExpression := factory(expression) 181 | 182 | err = parse(newExpression, mapper) 183 | if err != nil { 184 | return 185 | } 186 | 187 | exp = mapper.Build() 188 | cache.Store(expression, exp) 189 | 190 | return exp, nil 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /expr_parse_state.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | type exprParseState[E any] struct { 11 | builder ExprBuilder[E] 12 | inQuote bool 13 | inArray bool 14 | operator string 15 | arrayParts []string 16 | dirty bool 17 | buf *bytes.Buffer // current value 18 | field *bytes.Buffer // current field name 19 | } 20 | 21 | func (s *exprParseState[E]) addOperator() { 22 | if s.dirty { 23 | if s.operator == "" { 24 | s.builder.Operator("AND") 25 | } else { 26 | s.builder.Operator(s.operator) 27 | } 28 | } 29 | 30 | s.operator = "" 31 | } 32 | 33 | func (s *exprParseState[E]) closeArray() error { 34 | if !s.inArray { 35 | return nil 36 | } 37 | s.inArray = false 38 | 39 | if len(s.arrayParts) == 0 { 40 | return nil 41 | } 42 | 43 | s.addOperator() 44 | 45 | fieldName := "msg" 46 | if s.field.Len() > 0 { 47 | fieldName = s.field.String() 48 | } 49 | 50 | if len(s.arrayParts) == 3 && s.arrayParts[1] == "TO" { 51 | // field:[400 TO 499] 52 | 53 | x, err := strconv.ParseFloat(s.arrayParts[0], 64) 54 | if err != nil { 55 | return errors.New("invalid clause [" + strings.Join(s.arrayParts, "") + "]") 56 | } 57 | 58 | y, err := strconv.ParseFloat(s.arrayParts[2], 64) 59 | if err != nil { 60 | return errors.New("invalid clause [" + strings.Join(s.arrayParts, "") + "]") 61 | } 62 | 63 | s.builder.Between(fieldName, x, y) 64 | 65 | } else { 66 | 67 | var ( 68 | textArgs []string 69 | numberArgs []float64 70 | ) 71 | 72 | for _, v := range s.arrayParts { 73 | if n, err := strconv.ParseFloat(v, 64); err == nil { 74 | numberArgs = append(numberArgs, n) 75 | } else { 76 | textArgs = append(textArgs, v) 77 | } 78 | } 79 | 80 | group := len(numberArgs) > 0 && len(textArgs) > 0 81 | 82 | if group { 83 | s.builder.GroupStart() 84 | } 85 | 86 | if len(numberArgs) > 0 { 87 | if len(numberArgs) == 1 { 88 | s.builder.Number(fieldName, "=", numberArgs[0]) 89 | } else { 90 | s.builder.NumberIn(fieldName, numberArgs) 91 | } 92 | } 93 | 94 | if group { 95 | s.builder.Operator("OR") 96 | } 97 | 98 | if len(textArgs) > 0 { 99 | s.builder.TextIn(fieldName, textArgs) 100 | } 101 | 102 | if group { 103 | s.builder.GroupEnd() 104 | } 105 | } 106 | 107 | s.dirty = true 108 | s.arrayParts = nil 109 | 110 | return nil 111 | } 112 | 113 | // addTermSingle a single term is a single word such as test or hello. 114 | func (s *exprParseState[E]) addTermSingle() { 115 | 116 | if s.inArray { 117 | if s.buf.Len() > 0 { 118 | s.arrayParts = append(s.arrayParts, s.buf.String()) 119 | } 120 | s.buf.Reset() 121 | return 122 | } 123 | 124 | if s.buf.Len() > 0 { 125 | 126 | var ( 127 | number float64 128 | isNumeric bool 129 | numberCondition string 130 | text = s.buf.String() 131 | textUpper = strings.ToUpper(text) 132 | ) 133 | if textUpper == "AND" || textUpper == "OR" { 134 | s.operator = textUpper 135 | s.buf.Reset() 136 | return 137 | } 138 | 139 | s.addOperator() 140 | 141 | fieldName := "msg" 142 | if s.field.Len() > 0 { 143 | fieldName = s.field.String() 144 | 145 | if strings.HasPrefix(text, ">") || strings.HasPrefix(text, "<") { 146 | // Numerical values ? 147 | var numberStr string 148 | for _, cond := range []string{">=", ">", "<=", "<"} { 149 | if strings.HasPrefix(text, cond) { 150 | numberCondition = cond 151 | numberStr = strings.TrimPrefix(text, cond) 152 | break 153 | } 154 | } 155 | 156 | if n, err := strconv.ParseFloat(numberStr, 64); err == nil { 157 | number = n 158 | isNumeric = true 159 | } 160 | } else if n, err := strconv.ParseFloat(strings.TrimSpace(text), 64); err == nil { 161 | number = n 162 | isNumeric = true 163 | numberCondition = "=" 164 | } 165 | } 166 | 167 | if isNumeric { 168 | s.builder.Number(fieldName, numberCondition, number) 169 | } else { 170 | s.builder.Text(fieldName, text, false, strings.LastIndexByte(text, '*') >= 0 || strings.LastIndexByte(text, '?') >= 0) 171 | } 172 | 173 | s.dirty = true 174 | } 175 | s.buf.Reset() 176 | s.field.Reset() 177 | } 178 | 179 | // addTermSequence a sequence is a group of words surrounded by double quotes, such as "hello world". 180 | func (s *exprParseState[E]) addTermSequence() { 181 | if s.inArray { 182 | s.arrayParts = append(s.arrayParts, s.buf.String()) 183 | s.buf.Reset() 184 | return 185 | } 186 | 187 | if s.buf.Len() > 0 { 188 | s.addOperator() 189 | 190 | fieldName := "msg" 191 | if s.field.Len() > 0 { 192 | fieldName = s.field.String() 193 | } 194 | 195 | text := s.buf.String() 196 | 197 | s.builder.Text(fieldName, text, true, strings.LastIndexByte(text, '*') >= 0 || strings.LastIndexByte(text, '?') >= 0) 198 | s.dirty = true 199 | } 200 | s.buf.Reset() 201 | s.field.Reset() 202 | } 203 | -------------------------------------------------------------------------------- /expr_test.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | var ( 10 | testExprBuilderFn = NewExprBuilder(func(expression string) (ExprBuilder[[]any], string) { 11 | return &testExprBuilder{parts: []any{}}, expression 12 | }) 13 | ) 14 | 15 | type testExprBuilder struct { 16 | parts []any 17 | } 18 | 19 | func (s *testExprBuilder) Build() []any { 20 | return s.parts 21 | } 22 | 23 | func (s *testExprBuilder) GroupStart() { 24 | s.parts = append(s.parts, "(") 25 | } 26 | 27 | func (s *testExprBuilder) GroupEnd() { 28 | s.parts = append(s.parts, ")") 29 | } 30 | 31 | func (s *testExprBuilder) Operator(op string) { 32 | s.parts = append(s.parts, op) 33 | } 34 | 35 | func (s *testExprBuilder) Text(field, term string, isSequence, isWildcard bool) { 36 | if isSequence { 37 | if isWildcard { 38 | s.parts = append(s.parts, field, "LIKE SEQ", term) 39 | } else { 40 | s.parts = append(s.parts, field, "EQUAL", term) 41 | } 42 | } else { 43 | s.parts = append(s.parts, field, "LIKE", term) 44 | } 45 | } 46 | 47 | func (s *testExprBuilder) TextIn(field string, values []string) { 48 | s.parts = append(s.parts, field, "IN") 49 | for _, v := range values { 50 | s.parts = append(s.parts, v) 51 | } 52 | } 53 | 54 | func (s *testExprBuilder) Number(field, condition string, value float64) { 55 | s.parts = append(s.parts, field, condition, value) 56 | } 57 | 58 | func (s *testExprBuilder) Between(field string, x, y float64) { 59 | s.parts = append(s.parts, field, "BETWEEN", x, y) 60 | } 61 | 62 | func (s *testExprBuilder) NumberIn(field string, values []float64) { 63 | s.parts = append(s.parts, field) 64 | s.parts = append(s.parts, "IN NUMERIC") 65 | for _, v := range values { 66 | s.parts = append(s.parts, v) 67 | } 68 | } 69 | 70 | type testExprData struct { 71 | expr string 72 | parts []any 73 | } 74 | 75 | func Test_ExprBasic(t *testing.T) { 76 | testCases := []testExprData{ 77 | { 78 | "hello", 79 | []any{"msg", "LIKE", "hello"}, 80 | }, 81 | { 82 | "hello*", 83 | []any{"msg", "LIKE", "hello*"}, 84 | }, 85 | { 86 | "hello world", 87 | []any{"msg", "LIKE", "hello", "AND", "msg", "LIKE", "world"}, 88 | }, 89 | { 90 | "hello* *world", 91 | []any{"msg", "LIKE", "hello*", "AND", "msg", "LIKE", "*world"}, 92 | }, 93 | { 94 | `"hello world"`, 95 | []any{"msg", "EQUAL", "hello world"}, 96 | }, 97 | { 98 | `"hello world*"`, 99 | []any{"msg", "LIKE SEQ", "hello world*"}, 100 | }, 101 | { 102 | `"*hello*world*"`, 103 | []any{"msg", "LIKE SEQ", "*hello*world*"}, 104 | }, 105 | { 106 | `field:hello`, 107 | []any{"field", "LIKE", "hello"}, 108 | }, 109 | { 110 | "field:hello*", 111 | []any{"field", "LIKE", "hello*"}, 112 | }, 113 | { 114 | "field:hello world", 115 | []any{"field", "LIKE", "hello", "AND", "msg", "LIKE", "world"}, 116 | }, 117 | { 118 | "field:hello* *world", 119 | []any{"field", "LIKE", "hello*", "AND", "msg", "LIKE", "*world"}, 120 | }, 121 | { 122 | `field:"hello world"`, 123 | []any{"field", "EQUAL", "hello world"}, 124 | }, 125 | { 126 | `field:"hello world*"`, 127 | []any{"field", "LIKE SEQ", "hello world*"}, 128 | }, 129 | { 130 | `field:"*hello*world*"`, 131 | []any{"field", "LIKE SEQ", "*hello*world*"}, 132 | }, 133 | } 134 | for _, tt := range testCases { 135 | t.Run(tt.expr, func(t *testing.T) { 136 | runExprTest(t, tt) 137 | }) 138 | } 139 | } 140 | 141 | // Numerical values 142 | func Test_ExprNumerical(t *testing.T) { 143 | testCases := []testExprData{ 144 | { 145 | `field:99`, 146 | []any{"field", "=", float64(99)}, 147 | }, 148 | { 149 | `field:>99`, 150 | []any{"field", ">", float64(99)}, 151 | }, 152 | { 153 | `field:<99`, 154 | []any{"field", "<", float64(99)}, 155 | }, 156 | { 157 | `field:>=99`, 158 | []any{"field", ">=", float64(99)}, 159 | }, 160 | { 161 | `field:<=99`, 162 | []any{"field", "<=", float64(99)}, 163 | }, 164 | } 165 | for _, tt := range testCases { 166 | t.Run(tt.expr, func(t *testing.T) { 167 | runExprTest(t, tt) 168 | }) 169 | } 170 | } 171 | 172 | func Test_ExprArray(t *testing.T) { 173 | testCases := []testExprData{ 174 | { 175 | `[hello world]`, 176 | []any{"msg", "IN", "hello", "world"}, 177 | }, 178 | { 179 | `[hello "beautiful world"]`, 180 | []any{"msg", "IN", "hello", "beautiful world"}, 181 | }, 182 | { 183 | `field:[hello world]`, 184 | []any{"field", "IN", "hello", "world"}, 185 | }, 186 | { 187 | `field:[hello "beautiful world"]`, 188 | []any{"field", "IN", "hello", "beautiful world"}, 189 | }, 190 | { 191 | `field:[400 TO 499]`, 192 | []any{"field", "BETWEEN", float64(400), float64(499)}, 193 | }, 194 | { 195 | `field:[100 200 300]`, 196 | []any{"field", "IN NUMERIC", float64(100), float64(200), float64(300)}, 197 | }, 198 | { 199 | `field:[100 hello "beautiful world" 200 300]`, 200 | []any{"(", "field", "IN NUMERIC", float64(100), float64(200), float64(300), "OR", "field", "IN", "hello", "beautiful world", ")"}, 201 | }, 202 | } 203 | for _, tt := range testCases { 204 | t.Run(tt.expr, func(t *testing.T) { 205 | runExprTest(t, tt) 206 | }) 207 | } 208 | } 209 | 210 | // Boolean Operators 211 | func Test_ExprBoolean(t *testing.T) { 212 | testCases := []testExprData{ 213 | { 214 | "hello AND world", 215 | []any{"msg", "LIKE", "hello", "AND", "msg", "LIKE", "world"}, 216 | }, 217 | { 218 | "hello AND beautiful AND world", 219 | []any{"msg", "LIKE", "hello", "AND", "msg", "LIKE", "beautiful", "AND", "msg", "LIKE", "world"}, 220 | }, 221 | { 222 | "hello OR world", 223 | []any{"msg", "LIKE", "hello", "OR", "msg", "LIKE", "world"}, 224 | }, 225 | { 226 | "field:hello AND world", 227 | []any{"field", "LIKE", "hello", "AND", "msg", "LIKE", "world"}, 228 | }, 229 | { 230 | "field:hello AND beautiful AND field:world", 231 | []any{"field", "LIKE", "hello", "AND", "msg", "LIKE", "beautiful", "AND", "field", "LIKE", "world"}, 232 | }, 233 | { 234 | "field:hello OR world", 235 | []any{"field", "LIKE", "hello", "OR", "msg", "LIKE", "world"}, 236 | }, 237 | { 238 | "hello AND (beautiful OR world)", 239 | []any{"msg", "LIKE", "hello", "AND", "(", "msg", "LIKE", "beautiful", "OR", "msg", "LIKE", "world", ")"}, 240 | }, 241 | { 242 | "hello AND (beautiful AND world)", 243 | []any{"msg", "LIKE", "hello", "AND", "(", "msg", "LIKE", "beautiful", "AND", "msg", "LIKE", "world", ")"}, 244 | }, 245 | { 246 | "field:hello AND (beautiful AND field:99)", 247 | []any{"field", "LIKE", "hello", "AND", "(", "msg", "LIKE", "beautiful", "AND", "field", "=", float64(99), ")"}, 248 | }, 249 | { 250 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (field:99 AND field:[100 200 300]) OR field:[400 TO 499])`, 251 | []any{ 252 | "(", "field", "LIKE", "hello*", "OR", "msg", "LIKE", "world*", ")", "AND", 253 | "(", 254 | "field", "IN", "hello", "beautiful world", "OR", 255 | "(", "field", "=", float64(99), "AND", "field", "IN NUMERIC", float64(100), float64(200), float64(300), ")", "OR", 256 | "field", "BETWEEN", float64(400), float64(499), 257 | ")", 258 | }, 259 | }, 260 | } 261 | for _, tt := range testCases { 262 | t.Run(tt.expr, func(t *testing.T) { 263 | runExprTest(t, tt) 264 | }) 265 | } 266 | } 267 | 268 | func Test_ExprEscape(t *testing.T) { 269 | testCases := []testExprData{ 270 | { 271 | `hell\"o`, 272 | []any{"msg", "LIKE", `hell"o`}, 273 | }, 274 | { 275 | `"hello \" world"`, 276 | []any{"msg", "EQUAL", `hello " world`}, 277 | }, 278 | { 279 | `"hello \" world*"`, 280 | []any{"msg", "LIKE SEQ", `hello " world*`}, 281 | }, 282 | { 283 | `field:hell\"o`, 284 | []any{"field", "LIKE", `hell"o`}, 285 | }, 286 | { 287 | `field:"hello \" world"`, 288 | []any{"field", "EQUAL", `hello " world`}, 289 | }, 290 | { 291 | `field:"hello \" world*"`, 292 | []any{"field", "LIKE SEQ", `hello " world*`}, 293 | }, 294 | { 295 | `field:"hello [beautiful] world*"`, 296 | []any{"field", "LIKE SEQ", `hello [beautiful] world*`}, 297 | }, 298 | { 299 | `field:he\[ll]\"o`, 300 | []any{"field", "LIKE", `he[ll]"o`}, 301 | }, 302 | { 303 | `field:[hell\"o "beautiful \" world"]`, 304 | []any{"field", "IN", `hell"o`, `beautiful " world`}, 305 | }, 306 | { 307 | `field:[hell\"o world\]]`, 308 | []any{"field", "IN", `hell"o`, `world]`}, 309 | }, 310 | { 311 | `path:c\:/dev/projects/*`, 312 | []any{"path", "LIKE", `c:/dev/projects/*`}, 313 | }, 314 | { 315 | `(hell\"o AND \"world)`, 316 | []any{"(", "msg", "LIKE", `hell"o`, "AND", "msg", "LIKE", `"world`, ")"}, 317 | }, 318 | } 319 | for _, tt := range testCases { 320 | t.Run(tt.expr, func(t *testing.T) { 321 | runExprTest(t, tt) 322 | }) 323 | } 324 | } 325 | 326 | func Test_ExprIncomplete(t *testing.T) { 327 | testCases := []testExprData{ 328 | { 329 | `"hello \" world`, 330 | []any{"msg", "EQUAL", `hello " world`}, 331 | }, 332 | { 333 | `field:[hell\"o "beautiful \" world"`, 334 | []any{"field", "IN", `hell"o`, `beautiful " world`}, 335 | }, 336 | { 337 | `field:[hell\"o world\]`, 338 | []any{"field", "IN", `hell"o`, `world]`}, 339 | }, 340 | { 341 | `field:[]`, 342 | []any{}, 343 | }, 344 | { 345 | `field:[ ]`, 346 | []any{}, 347 | }, 348 | { 349 | `(hell\"o AND \"world`, 350 | []any{"(", "msg", "LIKE", `hell"o`, "AND", "msg", "LIKE", `"world`, ")"}, 351 | }, 352 | { 353 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (field:99 AND field:[100 200 300`, 354 | []any{ 355 | "(", "field", "LIKE", "hello*", "OR", "msg", "LIKE", "world*", ")", "AND", 356 | "(", 357 | "field", "IN", "hello", "beautiful world", "OR", 358 | "(", "field", "=", float64(99), "AND", "field", "IN NUMERIC", float64(100), float64(200), float64(300), ")", 359 | ")", 360 | }, 361 | }, 362 | } 363 | for _, tt := range testCases { 364 | t.Run(tt.expr, func(t *testing.T) { 365 | runExprTest(t, tt) 366 | }) 367 | } 368 | } 369 | 370 | func runExprTest(t *testing.T, tt testExprData) { 371 | compiled, err := testExprBuilderFn(tt.expr) 372 | assert.NoError(t, err) 373 | assert.Equal(t, tt.parts, compiled, "exp=%s", tt.expr) 374 | } 375 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/nidorx/sqlog 2 | 3 | go 1.23 4 | 5 | toolchain go1.23.1 6 | 7 | require ( 8 | github.com/pmezard/go-difflib v1.0.0 9 | github.com/stretchr/testify v1.9.0 10 | ) 11 | 12 | require ( 13 | github.com/davecgh/go-spew v1.1.1 // indirect 14 | gopkg.in/yaml.v3 v3.0.1 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 6 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 7 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 8 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 9 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 10 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 11 | -------------------------------------------------------------------------------- /handler.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "log/slog" 8 | "sync" 9 | ) 10 | 11 | const bbcap = 1 << 16 // 65536 12 | 13 | type Encoder func(w io.Writer, opts *slog.HandlerOptions) slog.Handler 14 | 15 | type HandlerConfig struct { 16 | Encoder Encoder // The slog.Handler used for JSON encoding 17 | Options *slog.HandlerOptions 18 | } 19 | 20 | // Handles the logic for writing logs 21 | type writer struct { 22 | buffer *bytes.Buffer 23 | encoder slog.Handler 24 | } 25 | 26 | type handler struct { 27 | mu sync.Mutex 28 | writers sync.Pool 29 | ingester Ingester 30 | config *HandlerConfig 31 | handlers []slog.Handler // Fanout handlers 32 | } 33 | 34 | // Creates a new handler with the given ingester and configuration 35 | func newHandler(ingester Ingester, config *HandlerConfig) *handler { 36 | if config == nil { 37 | config = &HandlerConfig{} 38 | } 39 | 40 | // Sets the default encoder to JSON if none is provided 41 | if config.Encoder == nil { 42 | config.Encoder = func(w io.Writer, opts *slog.HandlerOptions) slog.Handler { 43 | return slog.NewJSONHandler(w, opts) 44 | } 45 | } 46 | 47 | // Sets default handler options if none are provided 48 | if config.Options == nil { 49 | config.Options = &slog.HandlerOptions{ 50 | AddSource: false, 51 | Level: slog.LevelInfo, 52 | } 53 | } 54 | 55 | h := &handler{ 56 | config: config, 57 | ingester: ingester, 58 | } 59 | 60 | encoder := config.Encoder 61 | 62 | // Creates a new writer with a buffer and encoder 63 | h.writers.New = func() any { 64 | buf := bytes.NewBuffer(make([]byte, 0, 1024)) 65 | return &writer{buffer: buf, encoder: encoder(buf, config.Options)} 66 | } 67 | 68 | return h 69 | } 70 | 71 | // Adds handlers to the fanout list 72 | func (h *handler) fanout(handlers ...slog.Handler) { 73 | h.mu.Lock() 74 | defer h.mu.Unlock() 75 | h.handlers = append(h.handlers, handlers...) 76 | } 77 | 78 | // Processes a log record, encoding it and sending it to the ingester 79 | func (h *handler) Handle(ctx context.Context, record slog.Record) error { 80 | 81 | // Sends the log record to fanout handlers 82 | for _, h2 := range h.handlers { 83 | if h2.Enabled(ctx, record.Level) { 84 | h2.Handle(ctx, record.Clone()) 85 | } 86 | } 87 | 88 | // Processes the log if it's enabled for this handler 89 | if h.enabledSelf(record.Level) { 90 | w := h.writers.Get().(*writer) 91 | w.buffer.Reset() 92 | 93 | // Ensures the log time is in UTC 94 | record.Time = record.Time.UTC() 95 | if err := w.encoder.Handle(ctx, record); err != nil { 96 | return err 97 | } 98 | 99 | // Ingests the log if there is data to write 100 | if w.buffer.Len() > 0 { 101 | if err := h.ingester.Ingest(record.Time, int8(record.Level), bytes.Clone(w.buffer.Bytes())); err != nil { 102 | return err 103 | } 104 | } 105 | 106 | // Reuses the writer if its buffer capacity is below the limit 107 | if w.buffer.Cap() <= bbcap { 108 | h.writers.Put(w) 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | 115 | // enabledSelf checks if the log level is greater than or equal to the minimum level 116 | func (h *handler) enabledSelf(l slog.Level) bool { 117 | minLevel := slog.LevelInfo 118 | if h.config.Options.Level != nil { 119 | minLevel = h.config.Options.Level.Level() 120 | } 121 | return l >= minLevel 122 | } 123 | 124 | // Enabled checks whether the log level meets the minimum requirement 125 | func (h *handler) Enabled(ctx context.Context, l slog.Level) bool { 126 | if h.enabledSelf(l) { 127 | return true 128 | } 129 | 130 | // Also check if any fanout handler is enabled 131 | for _, h2 := range h.handlers { 132 | if h2.Enabled(ctx, l) { 133 | return true 134 | } 135 | } 136 | 137 | return false 138 | } 139 | 140 | // WithAttrs returns a new Handler that includes the provided attributes 141 | func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler { 142 | o := &handler{ 143 | config: h.config, 144 | ingester: h.ingester, 145 | } 146 | parentPool := &h.writers 147 | o.writers.New = func() any { 148 | w := parentPool.Get().(*writer) 149 | w.encoder = w.encoder.WithAttrs(attrs) 150 | return w 151 | } 152 | 153 | // Propagates attributes to fanout handlers 154 | for _, h2 := range h.handlers { 155 | o.handlers = append(o.handlers, h2.WithAttrs(attrs)) 156 | } 157 | return o 158 | } 159 | 160 | // WithGroup returns a new Handler with the provided group name 161 | func (h *handler) WithGroup(name string) slog.Handler { 162 | o := &handler{ 163 | config: h.config, 164 | ingester: h.ingester, 165 | } 166 | parentPool := &h.writers 167 | o.writers.New = func() any { 168 | w := parentPool.Get().(*writer) 169 | w.encoder = w.encoder.WithGroup(name) 170 | return w 171 | } 172 | 173 | // Propagates the group to fanout handlers 174 | for _, h2 := range h.handlers { 175 | o.handlers = append(o.handlers, h2.WithGroup(name)) 176 | } 177 | return o 178 | } 179 | -------------------------------------------------------------------------------- /handler_test.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | // Mock do Ingester 13 | type testMockIngester struct { 14 | close func() error 15 | ingest func(time time.Time, level int8, data []byte) error 16 | } 17 | 18 | func (m *testMockIngester) Close() error { 19 | if m.close == nil { 20 | return nil 21 | } 22 | return m.close() 23 | } 24 | 25 | func (m *testMockIngester) Ingest(time time.Time, level int8, data []byte) error { 26 | if m.ingest == nil { 27 | return nil 28 | } 29 | return m.ingest(time, level, data) 30 | } 31 | 32 | func Test_Handler_Handle(t *testing.T) { 33 | 34 | var ( 35 | ingested bool 36 | ) 37 | 38 | ingester := &testMockIngester{ 39 | ingest: func(time time.Time, level int8, data []byte) error { 40 | ingested = true 41 | return nil 42 | }, 43 | } 44 | 45 | logger := slog.New(newHandler(ingester, nil)) 46 | 47 | logger.Info("test message") 48 | 49 | assert.True(t, ingested) 50 | } 51 | 52 | func Test_Handler_Enabled(t *testing.T) { 53 | ingester := new(testMockIngester) 54 | config := &HandlerConfig{ 55 | Options: &slog.HandlerOptions{ 56 | Level: slog.LevelWarn, 57 | }, 58 | } 59 | 60 | handler := newHandler(ingester, config) 61 | 62 | ctx := context.Background() 63 | 64 | assert.True(t, handler.Enabled(ctx, slog.LevelWarn)) 65 | assert.False(t, handler.Enabled(ctx, slog.LevelInfo)) 66 | } 67 | 68 | func Test_Handler_Fanout(t *testing.T) { 69 | 70 | var ( 71 | ingested int 72 | ingestCount = func(time time.Time, level int8, data []byte) error { 73 | ingested++ 74 | return nil 75 | } 76 | ) 77 | 78 | handler := newHandler(&testMockIngester{ingest: ingestCount}, &HandlerConfig{ 79 | Options: &slog.HandlerOptions{ 80 | Level: slog.LevelWarn, // will not ingest info msg 81 | }, 82 | }) 83 | logger := slog.New(handler) 84 | 85 | ctx := context.Background() 86 | assert.True(t, handler.Enabled(ctx, slog.LevelWarn)) 87 | assert.False(t, handler.Enabled(ctx, slog.LevelInfo)) 88 | 89 | handler1 := newHandler(&testMockIngester{ingest: ingestCount}, nil) 90 | handler2 := newHandler(&testMockIngester{ingest: ingestCount}, nil) 91 | 92 | handler.fanout(handler1, handler2) 93 | assert.Equal(t, 2, len(handler.handlers)) 94 | assert.True(t, handler.Enabled(ctx, slog.LevelWarn)) 95 | assert.True(t, handler.Enabled(ctx, slog.LevelInfo)) 96 | 97 | logger.Info("test message") 98 | assert.Equal(t, 2, ingested) // only fanout 99 | 100 | logger.Warn("test message") 101 | assert.Equal(t, 5, ingested) 102 | } 103 | -------------------------------------------------------------------------------- /http.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "embed" 5 | "encoding/json" 6 | "io/fs" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "path" 11 | "strconv" 12 | "strings" 13 | ) 14 | 15 | var ( 16 | // running "/demo/main.go" 17 | devDemo = false 18 | 19 | //go:embed web/* 20 | webFiles embed.FS 21 | ) 22 | 23 | func (l *sqlog) HttpHandler() http.Handler { 24 | 25 | var wfs http.FileSystem 26 | 27 | if devDemo { 28 | pwd, _ := os.Getwd() 29 | pwd = strings.TrimSuffix(pwd, "demo") + "web" 30 | wfs = http.Dir(pwd) 31 | } else { 32 | subfs, _ := fs.Sub(webFiles, "web") 33 | wfs = http.FS(subfs) 34 | } 35 | 36 | staticHandler := http.FileServer(wfs) 37 | 38 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 39 | p := r.URL.Path 40 | 41 | if idx := strings.LastIndex(p, "/api/"); idx >= 0 { 42 | p = p[idx+len("/api/"):] 43 | switch p { 44 | case "ticks": 45 | l.ServeHTTPTicks(w, r) 46 | case "entries": 47 | l.ServeHTTPEntries(w, r) 48 | case "result": 49 | l.ServeHTTPResult(w, r) 50 | } 51 | } else { 52 | switch path.Ext(p) { 53 | case ".html", ".css", ".js": 54 | p = path.Base(p) 55 | default: 56 | p = "" 57 | } 58 | 59 | r.URL.Path = p 60 | staticHandler.ServeHTTP(w, r) 61 | } 62 | }) 63 | } 64 | 65 | // ServeHTTPTicks tick api 66 | func (l *sqlog) ServeHTTPTicks(w http.ResponseWriter, r *http.Request) { 67 | var ( 68 | q = r.URL.Query() 69 | levels []string 70 | ) 71 | 72 | if level := q.Get("level"); level != "" { 73 | levels = strings.Split(level, ",") 74 | } 75 | 76 | list, err := l.Ticks(&TicksInput{ 77 | Expr: q.Get("expr"), 78 | Level: levels, 79 | EpochEnd: getInt64(q, "epoch"), 80 | IntervalSec: getInt(q, "interval"), 81 | MaxResult: getInt(q, "limit"), 82 | }) 83 | sendJson(w, list, err) 84 | } 85 | 86 | // ServeHTTPEntries entries api. (seek method or keyset pagination {before, after}) 87 | func (l *sqlog) ServeHTTPEntries(w http.ResponseWriter, r *http.Request) { 88 | var ( 89 | q = r.URL.Query() 90 | levels []string 91 | ) 92 | 93 | if level := q.Get("level"); level != "" { 94 | levels = strings.Split(level, ",") 95 | } 96 | 97 | entries, err := l.Entries(&EntriesInput{ 98 | Expr: q.Get("expr"), 99 | Level: levels, 100 | Direction: q.Get("dir"), // before, after 101 | EpochStart: getInt64(q, "epoch"), 102 | NanosStart: getInt(q, "nanos"), 103 | MaxResult: getInt(q, "limit"), 104 | }) 105 | sendJson(w, entries, err) 106 | } 107 | 108 | // ServeHTTPResult scheduled result api. 109 | func (l *sqlog) ServeHTTPResult(w http.ResponseWriter, r *http.Request) { 110 | var q = r.URL.Query() 111 | result, err := l.Result(getInt32(q, "id")) 112 | sendJson(w, result, err) 113 | } 114 | 115 | // ServeHTTPResult scheduled result api. 116 | func (l *sqlog) ServeHTTPCancel(w http.ResponseWriter, r *http.Request) { 117 | var q = r.URL.Query() 118 | err := l.Cancel(getInt32(q, "id")) 119 | sendJson(w, nil, err) 120 | } 121 | 122 | func sendJson(w http.ResponseWriter, data any, err error) { 123 | w.Header().Set("Content-Type", "application/json") 124 | if err != nil { 125 | w.WriteHeader(http.StatusBadRequest) 126 | json.NewEncoder(w).Encode(map[string]any{ 127 | "error": err.Error(), 128 | }) 129 | } else { 130 | if data != nil { 131 | json.NewEncoder(w).Encode(data) 132 | } else { 133 | w.WriteHeader(http.StatusNoContent) 134 | } 135 | } 136 | } 137 | 138 | func getInt(q url.Values, key string) int { 139 | v, _ := strconv.Atoi(q.Get(key)) 140 | return v 141 | } 142 | 143 | func getInt64(q url.Values, key string) int64 { 144 | v, _ := strconv.ParseInt(q.Get(key), 10, 64) 145 | return v 146 | } 147 | 148 | func getInt32(q url.Values, key string) int32 { 149 | v, _ := strconv.ParseInt(q.Get(key), 10, 32) 150 | return int32(v) 151 | } 152 | -------------------------------------------------------------------------------- /ingester.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "errors" 5 | "log/slog" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | // ErrIngesterClosed is returned by methods of the `Client` interface 11 | // when they are called after the Ingester has already been closed. 12 | var ErrIngesterClosed = errors.New("[sqlog] the ingester was already closed") 13 | 14 | // IngesterConfig contains configuration parameters for the ingester. 15 | type IngesterConfig struct { 16 | // Chunks is the size of the chunk buffer (default 3). 17 | Chunks uint8 18 | 19 | // ChunkSize defines the maximum number of log records per chunk before 20 | // being persisted in storage (default|max 900). 21 | ChunkSize uint16 22 | 23 | // MaxChunkSizeBytes sets the maximum desired chunk size in bytes. 24 | // If this size is exceeded, the chunk will be sent to storage (default 0). 25 | MaxChunkSizeBytes int64 26 | 27 | // MaxDirtyChunks specifies the maximum number of chunks with data in memory (default 50). 28 | // This prevents unlimited memory consumption in the event of a catastrophic failure 29 | // in writing logs. 30 | MaxDirtyChunks int 31 | 32 | // MaxFlushRetry defines the number of retry attempts to persist a chunk in case of failure (default 3). 33 | MaxFlushRetry int32 34 | 35 | // FlushAfterSec defines how long a chunk can remain inactive before being sent to storage (default 3 seconds). 36 | FlushAfterSec int 37 | 38 | // IntervalCheckMs sets the interval for chunk maintenance in milliseconds (default 100 ms). 39 | IntervalCheckMs int32 40 | } 41 | 42 | // Ingester is the interface that represents the behavior of the log ingester. 43 | type Ingester interface { 44 | // Close terminates the ingester and flushes any remaining log data. 45 | Close() (err error) 46 | 47 | // Ingest adds a new log entry with the given timestamp, level, and content. 48 | Ingest(t time.Time, level int8, content []byte) error 49 | } 50 | 51 | // ingester is the implementation of the Ingester interface, responsible for managing 52 | // log chunks and ensuring they are flushed to storage. 53 | type ingester struct { 54 | flushChunk *Chunk // The chunk that will be saved to the database 55 | writeChunk *Chunk // The chunk currently receiving log entries 56 | flushChunkId int32 // ID of the currently active flush chunk 57 | writeChunkId int32 // ID of the currently active write chunk 58 | config *IngesterConfig // Configuration options for the ingester 59 | storage Storage // The storage backend used to persist chunks 60 | quit chan struct{} // Channel used to signal termination 61 | shutdown chan struct{} // Channel used to signal shutdown completion 62 | } 63 | 64 | // NewIngester creates a new ingester with the given configuration and storage. 65 | // If no configuration is provided, default values are used. 66 | func NewIngester(config *IngesterConfig, storage Storage) (*ingester, error) { 67 | if config == nil { 68 | config = &IngesterConfig{} 69 | } 70 | 71 | // Set default values for config if necessary 72 | if config.Chunks <= 0 { 73 | config.Chunks = 3 74 | } 75 | 76 | if config.MaxDirtyChunks <= int(config.Chunks) { 77 | config.MaxDirtyChunks = 50 78 | } 79 | 80 | if config.FlushAfterSec <= 0 { 81 | config.FlushAfterSec = 3 82 | } 83 | 84 | if config.MaxFlushRetry <= 0 { 85 | config.MaxFlushRetry = 3 86 | } 87 | 88 | if config.ChunkSize <= 0 || config.ChunkSize > 900 { 89 | config.ChunkSize = 900 90 | } 91 | 92 | if config.IntervalCheckMs <= 0 { 93 | config.IntervalCheckMs = 100 94 | } 95 | 96 | root := NewChunk(int32(config.ChunkSize)) 97 | root.Init(config.Chunks) 98 | 99 | i := &ingester{ 100 | config: config, 101 | writeChunkId: root.id, 102 | flushChunk: root, 103 | writeChunk: root, 104 | storage: storage, 105 | quit: make(chan struct{}), 106 | shutdown: make(chan struct{}), 107 | } 108 | 109 | // Start the routine to regularly check chunk states 110 | go i.routineCheck() 111 | 112 | return i, nil 113 | } 114 | 115 | // Ingest adds a new log entry to the active write chunk. If the chunk becomes full, 116 | // the ingester switches to a new chunk. 117 | func (i *ingester) Ingest(t time.Time, level int8, content []byte) error { 118 | lastWriteId := i.writeChunkId 119 | chunk, isFull := i.writeChunk.Put(&Entry{t, level, content}) 120 | if isFull && atomic.CompareAndSwapInt32(&i.writeChunkId, lastWriteId, chunk.id) { 121 | // The chunk is full, switch to the next one 122 | i.writeChunk = chunk 123 | } 124 | return nil 125 | } 126 | 127 | // routineCheck is responsible for periodically checking the status of chunks, 128 | // flushing them if necessary, and managing shutdown procedures. 129 | func (i *ingester) routineCheck() { 130 | defer close(i.shutdown) 131 | 132 | d := time.Duration(i.config.IntervalCheckMs) 133 | tick := time.NewTicker(d) 134 | defer tick.Stop() 135 | 136 | for { 137 | select { 138 | case <-tick.C: 139 | // Perform a routine check of chunk states 140 | i.doRoutineCheck() 141 | tick.Reset(d) 142 | 143 | case <-i.quit: 144 | // Flush all pending logs when termination is requested 145 | tick.Stop() 146 | 147 | chunk := i.flushChunk 148 | chunk.Lock() 149 | 150 | t := 0 151 | 152 | // Attempt to flush all chunks 153 | for { 154 | if chunk.Empty() { 155 | break 156 | } 157 | 158 | if chunk.Ready() { 159 | // If the chunk is ready to be written to storage, flush it 160 | if err := i.storage.Flush(chunk); err != nil { 161 | retries := atomic.AddInt32(&chunk.retries, 1) 162 | slog.Error("[sqlog] error writing chunk", slog.Any("error", err)) 163 | 164 | // If retries exceed the limit, move to the next chunk 165 | if retries > i.config.MaxFlushRetry { 166 | chunk = chunk.Next() 167 | chunk.Lock() 168 | } else { 169 | time.Sleep(10 * time.Millisecond) 170 | } 171 | } else { 172 | chunk = chunk.Next() 173 | chunk.Lock() 174 | } 175 | } else { 176 | // Unexpected state, continue checking next chunk 177 | t++ 178 | if t > 3 { 179 | chunk = chunk.Next() 180 | chunk.Lock() 181 | continue 182 | } 183 | t = 0 184 | chunk.Lock() 185 | time.Sleep(2 * time.Millisecond) 186 | } 187 | } 188 | 189 | i.flushChunk = chunk 190 | atomic.StoreInt32(&i.flushChunkId, chunk.id) 191 | 192 | // Close the storage after flushing all logs 193 | if err := i.storage.Close(); err != nil { 194 | slog.Warn( 195 | "[sqlog] error closing storage", 196 | slog.Any("error", err), 197 | ) 198 | } 199 | 200 | return 201 | } 202 | } 203 | } 204 | 205 | func (i *ingester) getFlushChunk() { 206 | 207 | } 208 | 209 | // doRoutineCheck handles the periodic maintenance of chunks, flushing them if they 210 | // meet the conditions for size or age, and ensuring memory usage stays within limits. 211 | func (i *ingester) doRoutineCheck() { 212 | for { 213 | chunk := i.flushChunk 214 | if chunk.Empty() { 215 | break 216 | } 217 | 218 | // Flush the chunk if it's ready to be persisted 219 | if chunk.Ready() { 220 | if err := i.storage.Flush(chunk); err != nil { 221 | retries := atomic.AddInt32(&chunk.retries, 1) 222 | slog.Error("[sqlog] error writing chunk", slog.Any("error", err)) 223 | 224 | if retries > i.config.MaxFlushRetry { 225 | chunk.Init(i.config.Chunks + 1) 226 | } else { 227 | break 228 | } 229 | } else { 230 | chunk.Init(i.config.Chunks + 1) 231 | } 232 | } else { 233 | // If the chunk is inactive for too long or exceeds the size limit, prepare it for flushing 234 | if int(chunk.TTL().Seconds()) > i.config.FlushAfterSec { 235 | chunk.Lock() // Lock the chunk for flushing in the next routine 236 | chunk.Init(i.config.Chunks) 237 | } else if i.config.MaxChunkSizeBytes > 0 && chunk.Size() > i.config.MaxChunkSizeBytes { 238 | chunk.Lock() // Lock the chunk for flushing in the next routine 239 | chunk.Init(i.config.Chunks) 240 | } 241 | break 242 | } 243 | i.flushChunk = i.flushChunk.Next() 244 | atomic.StoreInt32(&i.flushChunkId, i.flushChunk.id) 245 | } 246 | 247 | // Limit memory consumption by discarding old chunks if necessary 248 | if !i.flushChunk.Empty() && i.flushChunk.Depth() > i.config.MaxDirtyChunks { 249 | for { 250 | if i.flushChunk.Depth() > i.config.MaxDirtyChunks { 251 | i.flushChunk = i.flushChunk.Next() 252 | atomic.StoreInt32(&i.flushChunkId, i.flushChunk.id) 253 | } else { 254 | break 255 | } 256 | } 257 | i.flushChunk.Init(i.config.Chunks) 258 | } 259 | } 260 | 261 | // Close flushes any pending log data and closes the storage. 262 | func (i *ingester) Close() (err error) { 263 | defer func() { 264 | // Always recover, as a panic could be raised if `i.quit` was closed, 265 | // indicating the method was called more than once. 266 | if rec := recover(); rec != nil { 267 | err = ErrIngesterClosed 268 | } 269 | }() 270 | 271 | close(i.quit) 272 | 273 | <-i.shutdown 274 | 275 | return nil 276 | } 277 | -------------------------------------------------------------------------------- /ingester_test.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "sync/atomic" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | type testMockStorage struct { 14 | close func() error 15 | flush func(chunk *Chunk) error 16 | } 17 | 18 | func (m *testMockStorage) Flush(chunk *Chunk) error { 19 | if m.flush != nil { 20 | return m.flush(chunk) 21 | } 22 | return nil 23 | } 24 | 25 | func (m *testMockStorage) Close() error { 26 | if m.close != nil { 27 | return m.close() 28 | } 29 | return nil 30 | } 31 | 32 | func Test_Ingester_FlushAfterSec(t *testing.T) { 33 | 34 | var ( 35 | mu sync.Mutex 36 | chunk *Chunk 37 | ) 38 | 39 | storage := &testMockStorage{ 40 | flush: func(c *Chunk) error { 41 | mu.Lock() 42 | defer mu.Unlock() 43 | chunk = c 44 | return nil 45 | }, 46 | } 47 | 48 | config := &IngesterConfig{ 49 | Chunks: 3, 50 | FlushAfterSec: 1, 51 | } 52 | 53 | ingester, _ := NewIngester(config, storage) 54 | defer ingester.Close() 55 | 56 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 57 | 58 | waitMax(3*time.Second, func() bool { 59 | mu.Lock() 60 | defer mu.Unlock() 61 | return (chunk != nil) 62 | }) 63 | 64 | assert.NotNil(t, chunk, "Storage.Flush not called") 65 | } 66 | 67 | func Test_Ingester_MaxChunkSizeBytes(t *testing.T) { 68 | 69 | var ( 70 | mu sync.Mutex 71 | chunk *Chunk 72 | ) 73 | 74 | storage := &testMockStorage{ 75 | flush: func(c *Chunk) error { 76 | mu.Lock() 77 | defer mu.Unlock() 78 | chunk = c 79 | return nil 80 | }, 81 | } 82 | 83 | config := &IngesterConfig{ 84 | Chunks: 3, 85 | FlushAfterSec: 50, 86 | MaxChunkSizeBytes: 2, 87 | } 88 | 89 | ingester, _ := NewIngester(config, storage) 90 | defer ingester.Close() 91 | 92 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 93 | 94 | waitMax(3*time.Second, func() bool { 95 | mu.Lock() 96 | defer mu.Unlock() 97 | return (chunk != nil) 98 | }) 99 | 100 | assert.NotNil(t, chunk, "Storage.Flush not called") 101 | } 102 | 103 | func Test_Ingester_MaxFlushRetry(t *testing.T) { 104 | 105 | var ( 106 | mu sync.Mutex 107 | chunk *Chunk 108 | ) 109 | 110 | storage := &testMockStorage{ 111 | flush: func(c *Chunk) error { 112 | mu.Lock() 113 | defer mu.Unlock() 114 | chunk = c 115 | return errors.New("test") 116 | }, 117 | } 118 | 119 | config := &IngesterConfig{ 120 | Chunks: 3, 121 | FlushAfterSec: 1, 122 | MaxFlushRetry: 1, 123 | } 124 | 125 | ingester, _ := NewIngester(config, storage) 126 | defer ingester.Close() 127 | 128 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 129 | 130 | waitMax(5*time.Second, func() bool { 131 | mu.Lock() 132 | defer mu.Unlock() 133 | return (chunk != nil && chunk.Retries() > 1) 134 | }) 135 | 136 | assert.Equal(t, int32(2), chunk.Retries()) 137 | } 138 | 139 | func Test_Ingester_MaxDirtyChunks(t *testing.T) { 140 | 141 | storage := &testMockStorage{ 142 | flush: func(c *Chunk) error { 143 | return errors.New("test") 144 | }, 145 | } 146 | 147 | config := &IngesterConfig{ 148 | Chunks: 3, 149 | ChunkSize: 4, 150 | FlushAfterSec: 50, 151 | MaxDirtyChunks: 5, 152 | MaxFlushRetry: 50, // << misconfigured parameter 153 | } 154 | 155 | ingester, _ := NewIngester(config, storage) 156 | defer ingester.Close() 157 | 158 | numEntries := 40 159 | for i := 0; i < numEntries; i++ { 160 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 161 | } 162 | 163 | waitMax(5*time.Second, func() bool { 164 | return atomic.LoadInt32(&ingester.flushChunkId) == 10 165 | }) 166 | 167 | ingester.Close() 168 | 169 | // lastChunk.id = 10 = (numEntries/ChunkSize) 170 | assert.Equal(t, int32(10), atomic.LoadInt32(&ingester.flushChunkId)) 171 | } 172 | 173 | func Test_Ingester_Close(t *testing.T) { 174 | storage := new(testMockStorage) 175 | config := &IngesterConfig{} 176 | ingester, _ := NewIngester(config, storage) 177 | defer ingester.Close() 178 | 179 | err := ingester.Close() 180 | assert.NoError(t, err) 181 | } 182 | 183 | func Test_Ingester_Close_Flush(t *testing.T) { 184 | 185 | var ( 186 | chunk *Chunk 187 | closed bool 188 | ) 189 | 190 | storage := &testMockStorage{ 191 | flush: func(c *Chunk) error { 192 | chunk = c 193 | return nil 194 | }, 195 | close: func() error { 196 | closed = true 197 | return nil 198 | }, 199 | } 200 | 201 | config := &IngesterConfig{ 202 | Chunks: 3, 203 | FlushAfterSec: 1, 204 | } 205 | 206 | ingester, _ := NewIngester(config, storage) 207 | defer ingester.Close() 208 | 209 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 210 | 211 | err := ingester.Close() 212 | assert.NoError(t, err) 213 | 214 | assert.NotNil(t, chunk, "Storage.Flush not called") 215 | assert.True(t, closed, "Storage.Close not called") 216 | } 217 | 218 | func Test_Ingester_Close_MaxFlushRetry(t *testing.T) { 219 | 220 | var ( 221 | chunk *Chunk 222 | closed bool 223 | ) 224 | 225 | storage := &testMockStorage{ 226 | flush: func(c *Chunk) error { 227 | chunk = c 228 | return errors.New("test") 229 | }, 230 | close: func() error { 231 | closed = true 232 | return nil 233 | }, 234 | } 235 | 236 | config := &IngesterConfig{ 237 | Chunks: 3, 238 | FlushAfterSec: 1, 239 | MaxFlushRetry: 1, 240 | } 241 | 242 | ingester, _ := NewIngester(config, storage) 243 | defer ingester.Close() 244 | 245 | ingester.Ingest(time.Now(), 0, []byte(`{"msg":"test"}`)) 246 | 247 | err := ingester.Close() 248 | assert.NoError(t, err) 249 | 250 | assert.True(t, closed, "Storage.Close not called") 251 | assert.Equal(t, int32(2), chunk.Retries()) 252 | } 253 | 254 | func waitMax(max time.Duration, condition func() bool) { 255 | init := time.Now() 256 | for { 257 | if condition() || time.Since(init) > max { 258 | break 259 | } 260 | time.Sleep(500 * time.Millisecond) 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /memory/README.md: -------------------------------------------------------------------------------- 1 | # InMemory Storage for SQLog 2 | 3 | WIP! Feel free to contribute. All the expression processing logic is already working (see memory_expr_test.go), all that's missing is the storage logic (structure for saving in memory that allows quick search) 4 | 5 | - LSM Trees 6 | - Bloom filters 7 | - Hyperloglog -------------------------------------------------------------------------------- /memory/memory.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import "github.com/nidorx/sqlog" 4 | 5 | type MemoryConfig struct { 6 | // Each time the current log file reaches MaxFilesize, 7 | // it will be archived (default 20). 8 | MaxSizeMB int32 9 | 10 | // Fecha banco de dados que estão inativos (default 30) 11 | MaxAgeSec int64 12 | 13 | // Intervalo de manutençao do storage em segundos (default 5) 14 | IntervalSizeCheckSec int32 15 | } 16 | 17 | // MemoryStorage not optimized storage implementation that keeps logs in memory. 18 | // Useful for systems with limited storage or for debugging 19 | // logs without disk persistence 20 | type MemoryStorage struct { 21 | sqlog.Storage 22 | sqlog.StorageWithApi 23 | } 24 | 25 | func (s *MemoryStorage) Flush(chunk *sqlog.Chunk) error { 26 | // var ( 27 | // epochStart = chunk.First().Unix() 28 | // epochEnd = chunk.Last().Unix() 29 | // ) 30 | return nil 31 | } 32 | 33 | func (s *MemoryStorage) Close() error { 34 | return nil 35 | } 36 | 37 | func (s *MemoryStorage) Ticks(input *sqlog.TicksInput) (*sqlog.Output, error) { 38 | return nil, nil 39 | } 40 | 41 | func (s *MemoryStorage) Entries(input *sqlog.EntriesInput) (*sqlog.Output, error) { 42 | return nil, nil 43 | } 44 | -------------------------------------------------------------------------------- /memory/memory_expr.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import ( 4 | "encoding/json" 5 | "strconv" 6 | "unsafe" 7 | 8 | "github.com/nidorx/sqlog" 9 | ) 10 | 11 | var ( 12 | MemoryExprBuilderFn = sqlog.NewExprBuilder(func(expression string) (sqlog.ExprBuilder[MemoryExpr], string) { 13 | return &MemoryExprBuilder{}, expression 14 | }) 15 | ) 16 | 17 | // MemoryExpr is the structure that represents the expression to be evaluated in memory. 18 | type MemoryExpr func(e *sqlog.Entry) bool 19 | 20 | // MemoryExprBuilder is used to build memory expressions. 21 | type MemoryExprBuilder struct { 22 | stack []memExpr 23 | groupStack [][]memExpr // Stack to handle grouping of expressions 24 | } 25 | 26 | // Build returns the final composed expression to be applied to the Entry. 27 | func (m *MemoryExprBuilder) Build() MemoryExpr { 28 | stack := m.stack 29 | if len(stack) == 0 { 30 | return func(e *sqlog.Entry) bool { 31 | return true 32 | } 33 | } 34 | return func(e *sqlog.Entry) bool { 35 | var j map[string]any 36 | if err := json.Unmarshal(e.Content, &j); err != nil { 37 | return false 38 | } 39 | for _, expr := range stack { 40 | if !expr.eval(e, j) { 41 | return false 42 | } 43 | } 44 | return true 45 | } 46 | } 47 | 48 | func (m *MemoryExprBuilder) add(expr memExpr) { 49 | if len(m.stack) > 0 { 50 | last := m.stack[len(m.stack)-1] 51 | if exprAnd, ok := last.(*memExprAND); ok { 52 | exprAnd.stack = append(exprAnd.stack, expr) 53 | return 54 | } 55 | 56 | if exprOr, ok := last.(*memExprOR); ok { 57 | exprOr.stack = append(exprOr.stack, expr) 58 | return 59 | } 60 | } 61 | 62 | m.stack = append(m.stack, expr) 63 | } 64 | 65 | // Operator appends logical operators (AND, OR) between expressions. 66 | func (m *MemoryExprBuilder) Operator(op string) { 67 | if len(m.stack) == 0 { 68 | return 69 | } 70 | 71 | last := m.stack[len(m.stack)-1] 72 | 73 | if op == "AND" { 74 | if _, ok := last.(*memExprAND); !ok { 75 | m.stack[len(m.stack)-1] = &memExprAND{ 76 | stack: []memExpr{last}, 77 | } 78 | } 79 | } else { 80 | if _, ok := last.(*memExprOR); !ok { 81 | m.stack[len(m.stack)-1] = &memExprOR{ 82 | stack: []memExpr{last}, 83 | } 84 | } 85 | } 86 | } 87 | 88 | // GroupStart creates a new group of expressions. 89 | func (m *MemoryExprBuilder) GroupStart() { 90 | // Push the current stack to the group stack, and start a new group 91 | m.groupStack = append(m.groupStack, m.stack) 92 | m.stack = nil // Start a fresh group 93 | } 94 | 95 | // GroupEnd finalizes the grouping of expressions and merges with the parent stack. 96 | func (m *MemoryExprBuilder) GroupEnd() { 97 | if len(m.groupStack) == 0 { 98 | return // No group to end 99 | } 100 | 101 | // Get the last group (current group) 102 | groupExpr := &memExprGroup{stack: m.stack} 103 | 104 | // Restore the previous stack 105 | lastGroupIndex := len(m.groupStack) - 1 106 | m.stack = m.groupStack[lastGroupIndex] // Restore parent group 107 | m.groupStack = m.groupStack[:lastGroupIndex] // Remove the current group from stack 108 | 109 | // Add the group expression to the parent stack 110 | m.add(groupExpr) 111 | } 112 | 113 | // Text checks if a text field in the log matches the specified term. 114 | func (m *MemoryExprBuilder) Text(field, term string, isSequence, isWildcard bool) { 115 | m.add(&memExprText{ 116 | field: field, 117 | term: term, 118 | isSequence: isSequence, 119 | isWildcard: isWildcard, 120 | }) 121 | } 122 | 123 | // Number checks if a numeric field matches the condition with the specified value. 124 | func (m *MemoryExprBuilder) Number(field, condition string, value float64) { 125 | m.add(&memExprNumber{ 126 | field: field, 127 | condition: condition, 128 | value: value, 129 | }) 130 | } 131 | 132 | // Between checks if a numeric field is between two values. 133 | func (m *MemoryExprBuilder) Between(field string, x, y float64) { 134 | m.add(&memExprBetween{ 135 | field: field, 136 | x: x, 137 | y: y, 138 | }) 139 | } 140 | 141 | // TextIn checks if a text field matches one of the values in a list. 142 | func (m *MemoryExprBuilder) TextIn(field string, values []string) { 143 | m.add(&memExprTextIn{ 144 | field: field, 145 | values: values, 146 | }) 147 | } 148 | 149 | // NumberIn checks if a numeric field matches one of the values in a list. 150 | func (m *MemoryExprBuilder) NumberIn(field string, values []float64) { 151 | m.add(&memExprNumberIn{ 152 | field: field, 153 | values: values, 154 | }) 155 | } 156 | 157 | type memExpr interface { 158 | eval(e *sqlog.Entry, j map[string]any) bool 159 | } 160 | 161 | type memExprGroup struct { 162 | stack []memExpr 163 | } 164 | 165 | func (m *memExprGroup) eval(e *sqlog.Entry, j map[string]any) bool { 166 | for _, expr := range m.stack { 167 | if !expr.eval(e, j) { 168 | return false 169 | } 170 | } 171 | return true 172 | } 173 | 174 | type memExprOR struct { 175 | stack []memExpr 176 | } 177 | 178 | func (m *memExprOR) eval(e *sqlog.Entry, j map[string]any) bool { 179 | for _, expr := range m.stack { 180 | if expr.eval(e, j) { 181 | return true 182 | } 183 | } 184 | return false 185 | } 186 | 187 | type memExprAND struct { 188 | stack []memExpr 189 | } 190 | 191 | func (m *memExprAND) eval(e *sqlog.Entry, j map[string]any) bool { 192 | for _, expr := range m.stack { 193 | if !expr.eval(e, j) { 194 | return false 195 | } 196 | } 197 | return true 198 | } 199 | 200 | type memExprText struct { 201 | field string 202 | term string 203 | isSequence bool 204 | isWildcard bool 205 | } 206 | 207 | func (m *memExprText) eval(e *sqlog.Entry, j map[string]any) bool { 208 | var ( 209 | fieldValue string 210 | field = m.field 211 | term = m.term 212 | isSequence = m.isSequence 213 | isWildcard = m.isWildcard 214 | ) 215 | 216 | fieldValue, valid := memExprGetText(j, field) 217 | if !valid { 218 | return false 219 | } 220 | 221 | if isSequence { 222 | if isWildcard { 223 | return wildcardMatch(term, fieldValue) 224 | } 225 | return fieldValue == term 226 | } 227 | 228 | if isWildcard { 229 | return wildcardMatch(term, fieldValue) 230 | } 231 | return wildcardMatch("*"+term+"*", fieldValue) 232 | } 233 | 234 | type memExprNumber struct { 235 | field string 236 | condition string 237 | value float64 238 | } 239 | 240 | func (m *memExprNumber) eval(e *sqlog.Entry, j map[string]any) bool { 241 | var ( 242 | fieldValue float64 243 | field = m.field 244 | condition = m.condition 245 | value = m.value 246 | ) 247 | 248 | fieldValue, valid := memExprGetNumber(j, field) 249 | if !valid { 250 | return false 251 | } 252 | 253 | switch condition { 254 | case "=": 255 | return fieldValue == value 256 | case ">": 257 | return fieldValue > value 258 | case "<": 259 | return fieldValue < value 260 | case ">=": 261 | return fieldValue >= value 262 | case "<=": 263 | return fieldValue <= value 264 | } 265 | 266 | return false 267 | } 268 | 269 | type memExprBetween struct { 270 | field string 271 | x, y float64 272 | } 273 | 274 | func (m *memExprBetween) eval(e *sqlog.Entry, j map[string]any) bool { 275 | var ( 276 | fieldValue float64 277 | field = m.field 278 | x = m.x 279 | y = m.y 280 | ) 281 | 282 | fieldValue, valid := memExprGetNumber(j, field) 283 | if !valid { 284 | return false 285 | } 286 | 287 | return fieldValue >= x && fieldValue <= y 288 | } 289 | 290 | type memExprTextIn struct { 291 | field string 292 | values []string 293 | } 294 | 295 | func (m *memExprTextIn) eval(e *sqlog.Entry, j map[string]any) bool { 296 | var ( 297 | fieldValue string 298 | field = m.field 299 | values = m.values 300 | ) 301 | 302 | fieldValue, valid := memExprGetText(j, field) 303 | if !valid { 304 | return false 305 | } 306 | 307 | for _, v := range values { 308 | if fieldValue == v { 309 | return true 310 | } 311 | } 312 | return false 313 | } 314 | 315 | type memExprNumberIn struct { 316 | field string 317 | values []float64 318 | } 319 | 320 | func (m *memExprNumberIn) eval(e *sqlog.Entry, j map[string]any) bool { 321 | var ( 322 | fieldValue float64 323 | field = m.field 324 | values = m.values 325 | ) 326 | 327 | fieldValue, valid := memExprGetNumber(j, field) 328 | if !valid { 329 | return false 330 | } 331 | 332 | for _, v := range values { 333 | if fieldValue == v { 334 | return true 335 | } 336 | } 337 | return false 338 | } 339 | 340 | func memExprGetNumber(j map[string]any, field string) (fieldValue float64, valid bool) { 341 | if v, ok := j[field]; !ok { 342 | return 0, false 343 | } else { 344 | switch tv := v.(type) { 345 | case float64: 346 | fieldValue = tv 347 | case string: 348 | if n, err := strconv.ParseFloat(tv, 64); err != nil { 349 | return 0, false 350 | } else { 351 | fieldValue = n 352 | } 353 | default: 354 | return 0, false 355 | } 356 | } 357 | 358 | return fieldValue, true 359 | } 360 | 361 | func memExprGetText(j map[string]any, field string) (fieldValue string, valid bool) { 362 | if v, ok := j[field]; !ok { 363 | return "", false 364 | } else { 365 | switch tv := v.(type) { 366 | case string: 367 | fieldValue = tv 368 | default: 369 | // cache json.Marshal during this execution 370 | if s, ok := j["___cs"+field]; !ok { 371 | if b, err := json.Marshal(tv); err != nil { 372 | j["___cs"+field] = "" 373 | return "", false 374 | } else { 375 | fieldValue = unsafe.String(unsafe.SliceData(b), len(b)) 376 | j["___cs"+field] = fieldValue 377 | } 378 | return "", false 379 | } else { 380 | fieldValue = s.(string) 381 | } 382 | } 383 | } 384 | if fieldValue == "" { 385 | return "", false 386 | } 387 | 388 | return fieldValue, true 389 | } 390 | -------------------------------------------------------------------------------- /memory/memory_expr_test.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/nidorx/sqlog" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | var testExpMemoryEntries = []*sqlog.Entry{} 13 | 14 | func init() { 15 | for _, log := range testExpMemoryLogs { 16 | testExpMemoryEntries = append(testExpMemoryEntries, &sqlog.Entry{ 17 | Content: []byte(log), 18 | }) 19 | } 20 | } 21 | 22 | type testExprMemoryData struct { 23 | expr string 24 | ids []int 25 | } 26 | 27 | // json entries 28 | var testExpMemoryLogs = []string{ 29 | `{"id": 1, "msg":"hello"}`, 30 | `{"id": 2, "msg":"Hello"}`, 31 | `{"id": 3, "msg":"world"}`, 32 | `{"id": 4, "msg":"World"}`, 33 | `{"id": 5, "msg":"beautiful"}`, 34 | `{"id": 6, "msg":"Beautiful"}`, 35 | `{"id": 7, "msg":"hello world"}`, 36 | `{"id": 8, "msg":"hello world!"}`, 37 | `{"id": 9, "msg":"Hello World"}`, 38 | `{"id": 10, "msg":"Hello World!"}`, 39 | `{"id": 11, "msg":"hello beautiful world"}`, 40 | `{"id": 12, "msg":"hello beautiful world!"}`, 41 | `{"id": 13, "msg":"Hello Beautiful World"}`, 42 | `{"id": 14, "msg":"Hello Beautiful World!"}`, 43 | `{"id": 15, "msg":"", "field":"hello"}`, 44 | `{"id": 16, "msg":"", "field":"Hello"}`, 45 | `{"id": 17, "msg":"", "field":"world"}`, 46 | `{"id": 18, "msg":"", "field":"World"}`, 47 | `{"id": 19, "msg":"", "field":"beautiful"}`, 48 | `{"id": 20, "msg":"", "field":"Beautiful"}`, 49 | `{"id": 21, "msg":"", "field":"hello world"}`, 50 | `{"id": 22, "msg":"", "field":"hello world!"}`, 51 | `{"id": 23, "msg":"", "field":"Hello World"}`, 52 | `{"id": 24, "msg":"", "field":"Hello World!"}`, 53 | `{"id": 25, "msg":"", "field":"hello beautiful world"}`, 54 | `{"id": 26, "msg":"", "field":"hello beautiful world!"}`, 55 | `{"id": 27, "msg":"", "field":"Hello Beautiful World"}`, 56 | `{"id": 28, "msg":"", "field":"Hello Beautiful World!"}`, 57 | `{"id": 30, "msg":"", "field":98}`, 58 | `{"id": 31, "msg":"", "field":"98"}`, 59 | `{"id": 32, "msg":"", "field":99}`, 60 | `{"id": 33, "msg":"", "field":"99"}`, 61 | `{"id": 34, "msg":"", "field":400}`, 62 | `{"id": 35, "msg":"", "field":"400"}`, 63 | `{"id": 36, "msg":"", "field":499}`, 64 | `{"id": 37, "msg":"", "field":"499"}`, 65 | `{"id": 38, "msg":"", "field":500}`, 66 | `{"id": 39, "msg":"", "field":"500"}`, 67 | `{"id": 40, "msg":"beautiful world"}`, 68 | `{"id": 41, "msg":"", "field":"beautiful world"}`, 69 | `{"id": 42, "msg":"world", "field":"hello"}`, 70 | `{"id": 43, "msg":"beautiful", "field":"hello world"}`, 71 | `{"id": 44, "msg":"beautiful", "field":"hello world", "count":99}`, 72 | `{"id": 45, "msg":"beautiful", "field":"hello world", "count":99, "status":200, "code":300}`, 73 | `{"id": 46, "msg":"beautiful", "field":"hello world", "count":99, "status":404, "code":300}`, 74 | `{"id": 47, "msg":"beautiful", "field":"hello world", "count":99, "status":404, "code":450}`, 75 | `{"id": 50, "msg":"hell\"o"}`, 76 | `{"id": 51, "msg":"hello \" world"}`, 77 | `{"id": 52, "msg":"hello \" world!"}`, 78 | `{"id": 53, "msg":"", "field":"hell\"o"}`, 79 | `{"id": 54, "msg":"", "field":"hello \" world"}`, 80 | `{"id": 55, "msg":"", "field":"hello \" world!"}`, 81 | `{"id": 56, "field":"my [beautiful] pet"}`, 82 | `{"id": 57, "field":"he[ll]\"o"}`, 83 | `{"id": 58, "field":"hell\"o"}`, 84 | `{"id": 59, "msg":"beautiful \" pet"}`, 85 | `{"id": 60, "msg":"pet]"}`, 86 | `{"id": 61, "msg":"hell\"o \"pet"}`, 87 | `{"id": 62, "path":"c:/dev/projects/sqlog"}`, 88 | `{"id": 63, "path":"c:/dev/projects/chain"}`, 89 | `{"id": 64, "msg":"hell\"o \"world"}`, 90 | } 91 | 92 | func Test_Memory_ExprBasic(t *testing.T) { 93 | testCases := []testExprMemoryData{ 94 | { 95 | "hello", 96 | []int{1, 7, 8, 11, 12, 51, 52}, 97 | }, 98 | { 99 | "hello*", 100 | []int{1, 7, 8, 11, 12, 51, 52}, 101 | }, 102 | { 103 | "hello world", 104 | []int{7, 8, 11, 12, 51, 52}, 105 | }, 106 | { 107 | "hello* *world", 108 | []int{7, 11, 51}, 109 | }, 110 | { 111 | `"hello world"`, 112 | []int{7}, 113 | }, 114 | { 115 | `"hello world*"`, 116 | []int{7, 8}, 117 | }, 118 | { 119 | `"*hello*world*"`, 120 | []int{7, 8, 11, 12, 51, 52}, 121 | }, 122 | { 123 | `field:hello`, 124 | []int{15, 21, 22, 25, 26, 42, 43, 44, 45, 46, 47, 54, 55}, 125 | }, 126 | { 127 | "field:hello*", 128 | []int{15, 21, 22, 25, 26, 42, 43, 44, 45, 46, 47, 54, 55}, 129 | }, 130 | { 131 | "field:hello world", 132 | []int{42}, 133 | }, 134 | { 135 | "field:hello* *world", 136 | []int{42}, 137 | }, 138 | { 139 | `field:"hello world"`, 140 | []int{21, 43, 44, 45, 46, 47}, 141 | }, 142 | { 143 | `field:"hello world*"`, 144 | []int{21, 22, 43, 44, 45, 46, 47}, 145 | }, 146 | { 147 | `field:"*hello*world*"`, 148 | []int{21, 22, 25, 26, 43, 44, 45, 46, 47, 54, 55}, 149 | }, 150 | } 151 | for _, tt := range testCases { 152 | t.Run(tt.expr, func(t *testing.T) { 153 | runMemoryExprTest(t, tt) 154 | }) 155 | } 156 | } 157 | 158 | // Numerical values 159 | func Test_Memory_ExprNumerical(t *testing.T) { 160 | testCases := []testExprMemoryData{ 161 | { 162 | `field:99`, 163 | []int{32, 33}, 164 | }, 165 | { 166 | `field:>99`, 167 | []int{34, 35, 36, 37, 38, 39}, 168 | }, 169 | { 170 | `field:<99`, 171 | []int{30, 31}, 172 | }, 173 | { 174 | `field:>=99`, 175 | []int{32, 33, 34, 35, 36, 37, 38, 39}, 176 | }, 177 | { 178 | `field:<=99`, 179 | []int{30, 31, 32, 33}, 180 | }, 181 | } 182 | for _, tt := range testCases { 183 | t.Run(tt.expr, func(t *testing.T) { 184 | runMemoryExprTest(t, tt) 185 | }) 186 | } 187 | } 188 | 189 | func Test_Memory_ExprArray(t *testing.T) { 190 | testCases := []testExprMemoryData{ 191 | { 192 | `[hello world]`, 193 | []int{1, 3, 42}, 194 | }, 195 | { 196 | `[hello "beautiful world"]`, 197 | []int{1, 40}, 198 | }, 199 | { 200 | `field:[hello world]`, 201 | []int{15, 17, 42}, 202 | }, 203 | { 204 | `field:[hello "beautiful world"]`, 205 | []int{15, 41, 42}, 206 | }, 207 | { 208 | `field:[400 TO 499]`, 209 | []int{34, 35, 36, 37}, 210 | }, 211 | { 212 | `field:[99 400 500]`, 213 | []int{32, 33, 34, 35, 38, 39}, 214 | }, 215 | { 216 | `field:[99 hello "beautiful world" 400 500]`, 217 | []int{15, 32, 33, 34, 35, 38, 39, 41, 42}, 218 | }, 219 | } 220 | for _, tt := range testCases { 221 | t.Run(tt.expr, func(t *testing.T) { 222 | runMemoryExprTest(t, tt) 223 | }) 224 | } 225 | } 226 | 227 | // Boolean Operators 228 | func Test_Memory_ExprBoolean(t *testing.T) { 229 | testCases := []testExprMemoryData{ 230 | { 231 | "hello AND world", 232 | []int{7, 8, 11, 12, 51, 52}, 233 | }, 234 | { 235 | "hello AND beautiful AND world", 236 | []int{11, 12}, 237 | }, 238 | { 239 | "hello AND beautiful AND *world*", 240 | []int{11, 12}, 241 | }, 242 | { 243 | "hello OR world", 244 | []int{1, 3, 7, 8, 11, 12, 40, 42, 51, 52, 64}, 245 | }, 246 | { 247 | "field:hello AND world", 248 | []int{42}, 249 | }, 250 | { 251 | "field:hello AND beautiful AND field:world", 252 | []int{43, 44, 45, 46, 47}, 253 | }, 254 | { 255 | "field:hello OR world", 256 | []int{3, 7, 8, 11, 12, 15, 21, 22, 25, 26, 40, 42, 43, 44, 45, 46, 47, 51, 52, 54, 55, 64}, 257 | }, 258 | { 259 | "hello AND (beautiful world)", 260 | []int{11, 12}, 261 | }, 262 | { 263 | "hello AND (beautiful AND world)", 264 | []int{11, 12}, 265 | }, 266 | { 267 | "field:hello AND (beautiful AND count:99)", 268 | []int{44, 45, 46, 47}, 269 | }, 270 | { 271 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (count:99 AND status:[200 400 500]) OR code:[400 TO 499])`, 272 | []int{15, 42, 45, 47}, 273 | }, 274 | } 275 | for _, tt := range testCases { 276 | t.Run(tt.expr, func(t *testing.T) { 277 | runMemoryExprTest(t, tt) 278 | }) 279 | } 280 | } 281 | 282 | func Test_Memory_ExprEscape(t *testing.T) { 283 | testCases := []testExprMemoryData{ 284 | { 285 | `hell\"o`, 286 | []int{50, 61, 64}, 287 | }, 288 | { 289 | `"hello \" world"`, 290 | []int{51}, 291 | }, 292 | { 293 | `"hello \" world*"`, 294 | []int{51, 52}, 295 | }, 296 | { 297 | `field:hell\"o`, 298 | []int{53, 58}, 299 | }, 300 | { 301 | `field:"hello \" world"`, 302 | []int{54}, 303 | }, 304 | { 305 | `field:"hello \" world*"`, 306 | []int{54, 55}, 307 | }, 308 | { 309 | `field:"my [beautiful] pet*"`, 310 | []int{56}, 311 | }, 312 | { 313 | `field:he\[ll]\"o`, 314 | []int{57}, 315 | }, 316 | { 317 | `field:[hell\"o "beautiful \" pet"]`, 318 | []int{53, 58}, 319 | }, 320 | { 321 | `field:[hell\"o pet\]]`, 322 | []int{53, 58}, 323 | }, 324 | { 325 | `path:c\:/dev/projects/*`, 326 | []int{62, 63}, 327 | }, 328 | { 329 | `(hell\"o AND \"pet)`, 330 | []int{61}, 331 | }, 332 | } 333 | for _, tt := range testCases { 334 | t.Run(tt.expr, func(t *testing.T) { 335 | runMemoryExprTest(t, tt) 336 | }) 337 | } 338 | } 339 | 340 | func Test_Memory_ExprIncomplete(t *testing.T) { 341 | testCases := []testExprMemoryData{ 342 | { 343 | `"hello \" world`, 344 | []int{51}, 345 | }, 346 | { 347 | `field:[hell\"o "beautiful \" world"`, 348 | []int{53, 58}, 349 | }, 350 | { 351 | `field:[hell\"o world\]`, 352 | []int{53, 58}, 353 | }, 354 | { 355 | `field:[]`, 356 | []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, 357 | }, 358 | { 359 | `field:[ ]`, 360 | []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, 361 | }, 362 | { 363 | `(hell\"o AND \"world`, 364 | []int{64}, 365 | }, 366 | { 367 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (field:99 AND field:[100 200 300`, 368 | []int{15, 42}, 369 | }, 370 | } 371 | for _, tt := range testCases { 372 | t.Run(tt.expr, func(t *testing.T) { 373 | runMemoryExprTest(t, tt) 374 | }) 375 | } 376 | } 377 | 378 | func runMemoryExprTest(t *testing.T, tt testExprMemoryData) { 379 | expr, err := MemoryExprBuilderFn(tt.expr) 380 | assert.NoError(t, err) 381 | 382 | var ids []int 383 | 384 | for _, e := range testExpMemoryEntries { 385 | if expr(e) { 386 | var c map[string]interface{} 387 | err := json.Unmarshal(e.Content, &c) 388 | if err == nil { 389 | ids = append(ids, int(c["id"].(float64))) 390 | } 391 | 392 | } 393 | } 394 | 395 | assert.Equal(t, tt.ids, ids, "exp=%s", tt.expr) 396 | } 397 | -------------------------------------------------------------------------------- /memory/wildcard.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Iglou.eu 3 | * Copyright (c) 2023 Adrien Kara 4 | * 5 | * Licensed under the BSD 3-Clause License, 6 | * see LICENSE.md for more details. 7 | * 8 | * Extracted from https://github.com/IGLOU-EU/go-wildcard/blob/master/wildcard.go 9 | */ 10 | package memory 11 | 12 | // wildcardMatch returns true if the pattern matches the s string. 13 | // The pattern can contain the wildcard characters '?' and '*'. 14 | func wildcardMatch(pattern, s string) bool { 15 | if pattern == "" { 16 | return s == pattern 17 | } 18 | if pattern == "*" || s == pattern { 19 | return true 20 | } 21 | 22 | var ( 23 | lastErotemeByte byte 24 | patternIndex int 25 | sIndex int 26 | lastStar int 27 | lastEroteme int 28 | patternLen = len(pattern) 29 | sLen = len(s) 30 | star = -1 31 | eroteme = -1 32 | ) 33 | 34 | Loop: 35 | if sIndex >= sLen { 36 | goto checkPattern 37 | } 38 | 39 | if patternIndex >= patternLen { 40 | if star != -1 { 41 | patternIndex = star + 1 42 | lastStar++ 43 | sIndex = lastStar 44 | goto Loop 45 | } 46 | return false 47 | } 48 | switch pattern[patternIndex] { 49 | case '?': 50 | // '?' matches one character. Store its position and match exactly one character in the string. 51 | eroteme = patternIndex 52 | lastEroteme = sIndex 53 | lastErotemeByte = s[sIndex] 54 | case '*': 55 | // '*' matches zero or more characters. Store its position and increment the pattern index. 56 | star = patternIndex 57 | lastStar = sIndex 58 | patternIndex++ 59 | goto Loop 60 | default: 61 | // If the characters don't match, check if there was a previous '?' or '*' to backtrack. 62 | if pattern[patternIndex] != s[sIndex] { 63 | if eroteme != -1 { 64 | patternIndex = eroteme + 1 65 | sIndex = lastEroteme 66 | eroteme = -1 67 | goto Loop 68 | } 69 | 70 | if star != -1 { 71 | patternIndex = star + 1 72 | lastStar++ 73 | sIndex = lastStar 74 | goto Loop 75 | } 76 | 77 | return false 78 | } 79 | 80 | // If the characters match, check if it was not the same to validate the eroteme. 81 | if eroteme != -1 && lastErotemeByte != s[sIndex] { 82 | eroteme = -1 83 | } 84 | } 85 | 86 | patternIndex++ 87 | sIndex++ 88 | goto Loop 89 | 90 | // Check if the remaining pattern characters are '*' or '?', which can match the end of the string. 91 | checkPattern: 92 | if patternIndex < patternLen { 93 | if pattern[patternIndex] == '*' { 94 | patternIndex++ 95 | goto checkPattern 96 | } else if pattern[patternIndex] == '?' { 97 | if sIndex >= sLen { 98 | sIndex-- 99 | } 100 | patternIndex++ 101 | goto checkPattern 102 | } 103 | } 104 | 105 | return patternIndex == patternLen 106 | } 107 | -------------------------------------------------------------------------------- /memory/wildcard_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Iglou.eu 3 | * Copyright (c) 2023 Adrien Kara 4 | * 5 | * Licensed under the BSD 3-Clause License, 6 | * see LICENSE.md for more details. 7 | * 8 | * Extracted from https://github.com/IGLOU-EU/go-wildcard/blob/master/wildcard.go 9 | */ 10 | package memory 11 | 12 | import ( 13 | "testing" 14 | ) 15 | 16 | // TestMatch - Tests validate the logic of wild card matching. 17 | // `Match` supports '*' and '?' wildcards. 18 | // Sample usage: In resource matching for bucket policy validation. 19 | func TestMatch(t *testing.T) { 20 | cases := []struct { 21 | s string 22 | pattern string 23 | result bool 24 | }{ 25 | {"", "", true}, 26 | {"", "*", true}, 27 | {"", "**", true}, 28 | {"", "?", true}, 29 | {"", "??", true}, 30 | {"", "?*", true}, 31 | {"", "*?", true}, 32 | {"a", "", false}, 33 | {"a", "a", true}, 34 | {"a", "*", true}, 35 | {"a", "**", true}, 36 | {"a", "?", true}, 37 | {"a", "??", true}, 38 | 39 | {"match the exact string", "match the exact string", true}, 40 | {"do not match a different string", "this is a different string", false}, 41 | {"Match The Exact String WITH DIFFERENT CASE", "Match The Exact String WITH DIFFERENT CASE", true}, 42 | {"do not match a different string WITH DIFFERENT CASE", "this is a different string WITH DIFFERENT CASE", false}, 43 | {"Do Not Match The Exact String With Different Case", "do not match the exact string with different case", false}, 44 | {"match an emoji 😃", "match an emoji 😃", true}, 45 | {"do not match because of different emoji 😃", "do not match because of different emoji 😄", false}, 46 | {"🌅☕️📰👨‍💼👩‍💼🏢🖥️💼💻📊📈📉👨‍👩‍👧‍👦🍝🕰️💪🏋️‍♂️🏋️‍♀️🏋️‍♂️💼🚴‍♂️🚴‍♀️🚴‍♂️🛀💤🌃", "🌅☕️📰👨‍💼👩‍💼🏢🖥️💼💻📊📈📉👨‍👩‍👧‍👦🍝🕰️💪🏋️‍♂️🏋️‍♀️🏋️‍♂️💼🚴‍♂️🚴‍♀️🚴‍♂️🛀💤🌃", true}, 47 | {"🌅☕️📰👨‍💼👩‍💼🏢🖥️💼💻📊📈📉👨‍👩‍👧‍👦🍝🕰️💪🏋️‍♂️🏋️‍♀️🏋️‍♂️💼🚴‍♂️🚴‍♀️🚴‍♂️🛀💤🌃", "🦌🐇🦡🐿️🌲🌳🏰🌳🌲🌞🌧️❄️🌬️⛈️🔥🎄🎅🎁🎉🎊🥳👨‍👩‍👧‍👦💏👪💖👩‍💼🛀", false}, 48 | 49 | {"match a string with a *", "match a string *", true}, 50 | {"match a string with a * at the beginning", "* at the beginning", true}, 51 | {"match a string with two *", "match * with *", true}, 52 | {"do not match a string with extra and a *", "do not match a string * with more", false}, 53 | 54 | {"match a string with a ?", "match ? string with a ?", true}, 55 | {"match a string with a ? at the beginning", "?atch a string with a ? at the beginning", true}, 56 | {"match a string with two ?", "match a string with two ??", true}, 57 | {"match a optional char with a ?", "match a optional? char with a ?", true}, 58 | {"match a optional char with a ?", "match a optional? char with a ?", true}, 59 | {"do not match a string with extra and a ?", "do not match ? string with extra and a ? like this", false}, 60 | 61 | {"A big brown fox jumps over the lazy dog, with all there wildcards friends", "? big?brown fox jumps over * wildcard? friend??", true}, 62 | {"A big brown fox fails to jump over the lazy dog, with all there wildcards friends", "? big?brown fox jumps over * wildcard? friend??", false}, 63 | } 64 | 65 | for i, c := range cases { 66 | result := wildcardMatch(c.pattern, c.s) 67 | if c.result != result { 68 | t.Errorf("Test %d: Expected `%v`, found `%v`; With Pattern: `%s` and String: `%s`", i+1, c.result, result, c.pattern, c.s) 69 | } 70 | } 71 | } 72 | 73 | func FuzzMatch(f *testing.F) { 74 | f.Fuzz(func(t *testing.T, s string) { 75 | if !wildcardMatch(s, s) { 76 | t.Fatalf("%s does not match %s", s, s) 77 | } 78 | }) 79 | } 80 | -------------------------------------------------------------------------------- /sqlite/README.md: -------------------------------------------------------------------------------- 1 | # SQLite Storage for SQLog 2 | 3 | 4 | @TODO: Docs -------------------------------------------------------------------------------- /sqlite/api_entries.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "bytes" 5 | "sort" 6 | "strings" 7 | "time" 8 | 9 | "github.com/nidorx/sqlog" 10 | ) 11 | 12 | var ( 13 | sqlSeekPageAfter = []byte("SELECT e.epoch_secs, e.nanos, e.level, e.content FROM entries e WHERE (e.epoch_secs > ? OR (e.epoch_secs = ? AND e.nanos > ?)) ") 14 | sqlSeekPageBefore = []byte("SELECT e.epoch_secs, e.nanos, e.level, e.content FROM entries e WHERE (e.epoch_secs < ? OR (e.epoch_secs = ? AND e.nanos < ?)) ") 15 | sqlSeekPageAfterOrder = []byte(" ORDER BY e.epoch_secs ASC, e.nanos ASC LIMIT ?") 16 | sqlSeekPageBeforeOrder = []byte(" ORDER BY e.epoch_secs DESC, e.nanos DESC LIMIT ?") 17 | ) 18 | 19 | func (s *storage) Entries(input *sqlog.EntriesInput) (*sqlog.Output, error) { 20 | 21 | var ( 22 | levels map[string]bool 23 | expr = input.Expr 24 | direction = input.Direction 25 | epochStart = input.EpochStart 26 | nanosStart = input.NanosStart 27 | maxResult = input.MaxResult 28 | ) 29 | 30 | if epochStart == 0 { 31 | epochStart = time.Now().Unix() 32 | } 33 | 34 | if len(input.Level) > 0 { 35 | levels = make(map[string]bool) 36 | for _, v := range input.Level { 37 | levels[v] = true 38 | } 39 | } 40 | 41 | buf := bytes.NewBuffer(make([]byte, 0, 128)) 42 | 43 | if direction == "before" { 44 | buf.Write(sqlSeekPageBefore) 45 | } else { 46 | buf.Write(sqlSeekPageAfter) 47 | } 48 | args := []any{epochStart, epochStart, nanosStart} 49 | 50 | if len(levels) > 0 && len(levels) != 4 { 51 | buf.WriteString(" AND (") 52 | 53 | if levels["error"] && levels["warn"] && levels["info"] { 54 | buf.WriteString(" e.level >= 0 ") 55 | } else { 56 | clause := "" 57 | 58 | if levels["error"] && levels["warn"] { 59 | buf.WriteString(" e.level >= 4 ") 60 | clause = " OR " 61 | } else { 62 | if levels["error"] { 63 | buf.WriteString(" e.level >= 8 ") 64 | clause = " OR " 65 | } else if levels["warn"] { 66 | buf.WriteString(" (e.level BETWEEN 4 AND 7) ") 67 | clause = " OR " 68 | } 69 | } 70 | 71 | if levels["info"] { 72 | buf.WriteString(clause) 73 | buf.WriteString(" (e.level BETWEEN 0 AND 3) ") 74 | clause = " OR " 75 | } 76 | 77 | if levels["debug"] { 78 | buf.WriteString(clause) 79 | buf.WriteString(" e.level < 0 ") 80 | } 81 | } 82 | buf.WriteString(") ") 83 | } 84 | 85 | if expr = strings.TrimSpace(expr); expr != "" { 86 | if compiled, err := s.config.ExprBuilder(expr); err != nil { 87 | return nil, err 88 | } else if compiled.Sql != "" { 89 | buf.WriteString(" AND (") 90 | buf.WriteString(compiled.Sql) 91 | buf.WriteByte(')') 92 | args = append(args, compiled.Args...) 93 | } 94 | } 95 | 96 | if direction == "before" { 97 | buf.Write(sqlSeekPageBeforeOrder) 98 | } else { 99 | buf.Write(sqlSeekPageAfterOrder) 100 | } 101 | args = append(args, min(max(maxResult, 10), 100)) 102 | 103 | var ( 104 | sql = buf.String() 105 | list = []any{} 106 | dbs []*storageDb 107 | ) 108 | 109 | //fmt.Printf("[sqlog] Entries\nSQL: %s\n\nARG: %v\n", sql, args) // debug 110 | 111 | for _, d := range s.dbs { 112 | if direction == "before" { 113 | if d.epochStart <= epochStart { 114 | // er | 115 | // ds |--------| 116 | // ds |---| 117 | dbs = append(dbs, d) 118 | } 119 | } else { 120 | // from older to new 121 | if d.epochEnd == 0 || d.epochEnd >= epochStart { 122 | // er | 123 | // ds |--------| 124 | // ds |---| 125 | dbs = append(dbs, d) 126 | } 127 | } 128 | } 129 | 130 | if direction == "before" { 131 | sort.SliceStable(dbs, func(i, j int) bool { 132 | return dbs[i].epochStart > dbs[j].epochStart 133 | }) 134 | } else { 135 | sort.SliceStable(dbs, func(i, j int) bool { 136 | return dbs[i].epochStart < dbs[j].epochStart 137 | }) 138 | } 139 | 140 | out := &sqlog.Output{} 141 | 142 | for _, db := range dbs { 143 | if db.isOpen() { 144 | if ll, err := listEntries(db, sql, args); err != nil { 145 | return nil, err 146 | } else { 147 | list = append(list, ll...) 148 | } 149 | 150 | if len(list) >= maxResult { 151 | break 152 | } 153 | } else { 154 | // schedule more result 155 | out.Scheduled = true 156 | out.TaskIds = s.schedule([]*storageDb{db}, func(db *storageDb, o *sqlog.Output) error { 157 | if list, err := listEntries(db, sql, args); err != nil { 158 | return err 159 | } else { 160 | o.Entries = list 161 | return nil 162 | } 163 | }) 164 | break 165 | } 166 | } 167 | 168 | out.Entries = list 169 | return out, nil 170 | } 171 | 172 | func listEntries(db *storageDb, sql string, args []any) ([]any, error) { 173 | var list []any 174 | 175 | stm, rows, err := db.query(sql, args) 176 | if err != nil { 177 | return nil, err 178 | } 179 | defer stm.Close() 180 | defer rows.Close() 181 | 182 | for rows.Next() { 183 | var ( 184 | epoch int64 185 | nanos int 186 | level int 187 | content string 188 | ) 189 | if err = rows.Scan(&epoch, &nanos, &level, &content); err != nil { 190 | rows.Close() 191 | stm.Close() 192 | return nil, err 193 | } 194 | 195 | list = append(list, []any{epoch, nanos, level, content}) 196 | } 197 | 198 | return list, nil 199 | } 200 | -------------------------------------------------------------------------------- /sqlite/api_ticks.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | "time" 7 | 8 | "github.com/nidorx/sqlog" 9 | ) 10 | 11 | var ( 12 | sqlTicksInit = []byte(` 13 | WITH RECURSIVE series(idx, epoch_start, epoch_end) AS ( 14 | SELECT ?-1, ? - ?, ? 15 | UNION ALL 16 | SELECT idx-1, epoch_start - ?, epoch_end - ? FROM series LIMIT ? 17 | ) 18 | SELECT 19 | c.idx, 20 | c.epoch_start, 21 | c.epoch_end, 22 | COUNT(e.epoch_secs) AS count, 23 | COUNT(CASE WHEN e.level < 0 THEN 1 END) AS count_debug, 24 | COUNT(CASE WHEN e.level >= 0 AND e.level < 4 THEN 1 END) AS count_info, 25 | COUNT(CASE WHEN e.level >= 4 AND e.level < 8 THEN 1 END) AS count_warn, 26 | COUNT(CASE WHEN e.level >= 8 THEN 1 END) AS count_error 27 | FROM series c 28 | JOIN entries e ON e.epoch_secs >= c.epoch_start AND e.epoch_secs < c.epoch_end 29 | `) 30 | sqlTicksEnd = []byte(`GROUP BY c.epoch_start, c.epoch_end`) 31 | ) 32 | 33 | func (s *storage) Ticks(input *sqlog.TicksInput) (*sqlog.Output, error) { 34 | 35 | var ( 36 | levels map[string]bool 37 | expr = input.Expr 38 | epochEnd = input.EpochEnd 39 | intervalSec = input.IntervalSec 40 | maxResult = input.MaxResult 41 | ) 42 | 43 | if epochEnd == 0 { 44 | epochEnd = time.Now().Unix() 45 | } 46 | 47 | if len(input.Level) > 0 { 48 | levels = make(map[string]bool) 49 | for _, v := range input.Level { 50 | levels[v] = true 51 | } 52 | } 53 | 54 | buf := bytes.NewBuffer(make([]byte, 0, 128)) 55 | buf.Write(sqlTicksInit) 56 | 57 | args := []any{ 58 | maxResult, 59 | epochEnd, 60 | intervalSec, 61 | epochEnd, 62 | intervalSec, 63 | intervalSec, 64 | maxResult, 65 | } 66 | 67 | clause := " WHERE " 68 | 69 | if len(levels) > 0 && len(levels) != 4 { 70 | buf.WriteString(clause) 71 | buf.WriteString(" (") 72 | 73 | if levels["error"] && levels["warn"] && levels["info"] { 74 | buf.WriteString(" e.level >= 0 ") 75 | } else { 76 | clause = "" 77 | 78 | if levels["error"] && levels["warn"] { 79 | buf.WriteString(" e.level >= 4 ") 80 | clause = " OR " 81 | } else { 82 | if levels["error"] { 83 | buf.WriteString(" e.level >= 8 ") 84 | clause = " OR " 85 | } else if levels["warn"] { 86 | buf.WriteString(" (e.level BETWEEN 4 AND 7) ") 87 | clause = " OR " 88 | } 89 | } 90 | 91 | if levels["info"] { 92 | buf.WriteString(clause) 93 | buf.WriteString(" (e.level BETWEEN 0 AND 3) ") 94 | clause = " OR " 95 | } 96 | 97 | if levels["debug"] { 98 | buf.WriteString(clause) 99 | buf.WriteString(" e.level < 0 ") 100 | } 101 | } 102 | buf.WriteString(") ") 103 | 104 | clause = " AND " 105 | } 106 | 107 | if expr = strings.TrimSpace(expr); expr != "" { 108 | if compiled, err := s.config.ExprBuilder(expr); err != nil { 109 | return nil, err 110 | } else if compiled.Sql != "" { 111 | buf.WriteString(clause) 112 | buf.WriteString(compiled.Sql) 113 | buf.WriteString(" ") 114 | args = append(args, compiled.Args...) 115 | } 116 | } 117 | buf.Write(sqlTicksEnd) 118 | 119 | var ( 120 | sql = buf.String() 121 | epochStart = epochEnd - int64((intervalSec * maxResult)) 122 | dbs []*storageDb 123 | closedDbs []*storageDb 124 | list []*sqlog.Tick 125 | tickByIndex = map[int]*sqlog.Tick{} 126 | ) 127 | 128 | // fmt.Printf("[sqlog] Ticks\nSQL: %s\n\nARG: %v\n", sql, args) // debug 129 | 130 | for _, d := range s.dbs { 131 | if epochEnd < d.epochStart || (d.epochEnd != 0 && d.epochEnd < epochStart) { 132 | // es |---| 133 | // es |---| 134 | // ds |----| 135 | continue 136 | } 137 | dbs = append(dbs, d) 138 | } 139 | 140 | for _, db := range dbs { 141 | if db.isOpen() { 142 | if ll, err := listTicks(db, sql, args); err != nil { 143 | return nil, err 144 | } else { 145 | for _, t := range ll { 146 | if o, exists := tickByIndex[t.Index]; exists { 147 | t.Count += o.Count 148 | t.Debug += o.Debug 149 | t.Info += o.Info 150 | t.Warn += o.Warn 151 | t.Error += o.Error 152 | } else { 153 | list = append(list, t) 154 | } 155 | } 156 | } 157 | } else { 158 | closedDbs = append(closedDbs, db) 159 | } 160 | } 161 | 162 | out := &sqlog.Output{ 163 | Ticks: list, 164 | } 165 | 166 | if len(closedDbs) > 0 { 167 | // schedule more result 168 | out.Scheduled = true 169 | out.TaskIds = s.schedule(closedDbs, func(db *storageDb, o *sqlog.Output) error { 170 | if list, err := listTicks(db, sql, args); err != nil { 171 | return err 172 | } else { 173 | o.Ticks = list 174 | return nil 175 | } 176 | }) 177 | } 178 | 179 | return out, nil 180 | } 181 | 182 | func listTicks(db *storageDb, sql string, args []any) ([]*sqlog.Tick, error) { 183 | var list []*sqlog.Tick 184 | 185 | stm, rows, err := db.query(sql, args) 186 | if err != nil { 187 | return nil, err 188 | } 189 | defer stm.Close() 190 | defer rows.Close() 191 | 192 | for rows.Next() { 193 | t := &sqlog.Tick{} 194 | if err = rows.Scan(&t.Index, &t.Start, &t.End, &t.Count, &t.Debug, &t.Info, &t.Warn, &t.Error); err != nil { 195 | return nil, err 196 | } 197 | list = append(list, t) 198 | } 199 | 200 | return list, nil 201 | } 202 | -------------------------------------------------------------------------------- /sqlite/expr.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/nidorx/sqlog" 7 | ) 8 | 9 | var ( 10 | ExpBuilderFn = sqlog.NewExprBuilder(func(expression string) (sqlog.ExprBuilder[*Expr], string) { 11 | return &SqliteExprBuilder{ 12 | args: []any{}, 13 | sql: bytes.NewBuffer(make([]byte, 0, 512)), 14 | }, expression 15 | }) 16 | ) 17 | 18 | type Expr struct { 19 | Sql string 20 | Args []any 21 | } 22 | 23 | type SqliteExprBuilder struct { 24 | args []any 25 | sql *bytes.Buffer 26 | groups []*bytes.Buffer 27 | } 28 | 29 | func (s *SqliteExprBuilder) Build() *Expr { 30 | // @TODO: write all opened s.groups 31 | return &Expr{ 32 | Sql: s.sql.String(), 33 | Args: s.args, 34 | } 35 | } 36 | 37 | func (s *SqliteExprBuilder) GroupStart() { 38 | s.sql.WriteByte('(') 39 | s.groups = append(s.groups, s.sql) 40 | s.sql = bytes.NewBuffer(make([]byte, 0, 512)) 41 | } 42 | 43 | func (s *SqliteExprBuilder) GroupEnd() { 44 | if len(s.groups) > 0 { 45 | // sempre espera que sim 46 | last := len(s.groups) - 1 47 | parent := s.groups[last] 48 | s.groups = s.groups[:last] 49 | parent.Write(s.sql.Bytes()) 50 | s.sql = parent 51 | } 52 | s.sql.WriteByte(')') 53 | } 54 | 55 | func (s *SqliteExprBuilder) Operator(op string) { 56 | if s.sql.Len() > 0 { 57 | s.sql.WriteByte(' ') 58 | s.sql.WriteString(op) // AND|OR 59 | s.sql.WriteByte(' ') 60 | } 61 | } 62 | 63 | func (s *SqliteExprBuilder) Text(field, term string, isSequence, isWildcard bool) { 64 | field = "$." + field 65 | if isSequence { 66 | if isWildcard { 67 | s.sql.WriteString("json_extract(e.content, ?) GLOB ?") 68 | s.args = append(s.args, field, term) 69 | } else { 70 | s.sql.WriteString("json_extract(e.content, ?) = ?") 71 | s.args = append(s.args, field, term) 72 | } 73 | } else { 74 | s.sql.WriteString("json_extract(e.content, ?) GLOB ?") 75 | if isWildcard { 76 | s.args = append(s.args, field, term) 77 | } else { 78 | s.args = append(s.args, field, "*"+term+"*") 79 | } 80 | } 81 | } 82 | 83 | func (s *SqliteExprBuilder) TextIn(field string, values []string) { 84 | s.args = append(s.args, "$."+field) 85 | s.sql.WriteString("json_extract(e.content, ?) IN (") 86 | for i, v := range values { 87 | if i > 0 { 88 | s.sql.WriteByte(',') 89 | } 90 | s.sql.WriteByte('?') 91 | s.args = append(s.args, v) 92 | } 93 | s.sql.WriteByte(')') 94 | } 95 | 96 | func (s *SqliteExprBuilder) Number(field, condition string, value float64) { 97 | s.sql.WriteString("CAST(json_extract(e.content, ?) AS NUMERIC) ") 98 | s.sql.WriteString(condition) 99 | s.sql.WriteString(" ? ") 100 | s.args = append(s.args, "$."+field, value) 101 | } 102 | 103 | func (s *SqliteExprBuilder) Between(field string, x, y float64) { 104 | s.sql.WriteString("CAST(json_extract(e.content, ?) AS NUMERIC) BETWEEN ? AND ?") 105 | s.args = append(s.args, "$."+field, x, y) 106 | } 107 | 108 | func (s *SqliteExprBuilder) NumberIn(field string, values []float64) { 109 | s.sql.WriteString("CAST(json_extract(e.content, ?) AS NUMERIC) IN (") 110 | s.args = append(s.args, "$."+field) 111 | for i, v := range values { 112 | if i > 0 { 113 | s.sql.WriteByte(',') 114 | } 115 | s.sql.WriteByte('?') 116 | s.args = append(s.args, v) 117 | } 118 | s.sql.WriteByte(')') 119 | } 120 | -------------------------------------------------------------------------------- /sqlite/expr_test.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/pmezard/go-difflib/difflib" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | type testExprData struct { 14 | expr string 15 | sql string 16 | args []any 17 | } 18 | 19 | func Test_ExprBasic(t *testing.T) { 20 | testCases := []testExprData{ 21 | { 22 | "hello", 23 | "json_extract(e.content, ?) GLOB ?", 24 | []any{"$.msg", "*hello*"}, 25 | }, 26 | { 27 | "hello*", 28 | "json_extract(e.content, ?) GLOB ?", 29 | []any{"$.msg", "hello*"}, 30 | }, 31 | { 32 | "hello world", 33 | "json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?", 34 | []any{"$.msg", "*hello*", "$.msg", "*world*"}, 35 | }, 36 | { 37 | "hello* *world", 38 | "json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?", 39 | []any{"$.msg", "hello*", "$.msg", "*world"}, 40 | }, 41 | { 42 | `"hello world"`, 43 | "json_extract(e.content, ?) = ?", 44 | []any{"$.msg", "hello world"}, 45 | }, 46 | { 47 | `"hello world*"`, 48 | "json_extract(e.content, ?) GLOB ?", 49 | []any{"$.msg", "hello world*"}, 50 | }, 51 | { 52 | `"*hello*world*"`, 53 | "json_extract(e.content, ?) GLOB ?", 54 | []any{"$.msg", "*hello*world*"}, 55 | }, 56 | { 57 | `field:hello`, 58 | "json_extract(e.content, ?) GLOB ?", 59 | []any{"$.field", "*hello*"}, 60 | }, 61 | { 62 | "field:hello*", 63 | "json_extract(e.content, ?) GLOB ?", 64 | []any{"$.field", "hello*"}, 65 | }, 66 | { 67 | "field:hello world", 68 | "json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?", 69 | []any{"$.field", "*hello*", "$.msg", "*world*"}, 70 | }, 71 | { 72 | "field:hello* *world", 73 | "json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?", 74 | []any{"$.field", "hello*", "$.msg", "*world"}, 75 | }, 76 | { 77 | `field:"hello world"`, 78 | "json_extract(e.content, ?) = ?", 79 | []any{"$.field", "hello world"}, 80 | }, 81 | { 82 | `field:"hello world*"`, 83 | "json_extract(e.content, ?) GLOB ?", 84 | []any{"$.field", "hello world*"}, 85 | }, 86 | { 87 | `field:"*hello*world*"`, 88 | "json_extract(e.content, ?) GLOB ?", 89 | []any{"$.field", "*hello*world*"}, 90 | }, 91 | } 92 | for _, tt := range testCases { 93 | t.Run(tt.expr, func(t *testing.T) { 94 | runExprTest(t, tt) 95 | }) 96 | } 97 | } 98 | 99 | // Numerical values 100 | func Test_ExprNumerical(t *testing.T) { 101 | testCases := []testExprData{ 102 | { 103 | `field:99`, 104 | `CAST(json_extract(e.content, ?) AS NUMERIC) = ?`, 105 | []any{"$.field", float64(99)}, 106 | }, 107 | { 108 | `field:>99`, 109 | `CAST(json_extract(e.content, ?) AS NUMERIC) > ?`, 110 | []any{"$.field", float64(99)}, 111 | }, 112 | { 113 | `field:<99`, 114 | `CAST(json_extract(e.content, ?) AS NUMERIC) < ?`, 115 | []any{"$.field", float64(99)}, 116 | }, 117 | { 118 | `field:>=99`, 119 | `CAST(json_extract(e.content, ?) AS NUMERIC) >= ?`, 120 | []any{"$.field", float64(99)}, 121 | }, 122 | { 123 | `field:<=99`, 124 | `CAST(json_extract(e.content, ?) AS NUMERIC) <= ?`, 125 | []any{"$.field", float64(99)}, 126 | }, 127 | } 128 | for _, tt := range testCases { 129 | t.Run(tt.expr, func(t *testing.T) { 130 | runExprTest(t, tt) 131 | }) 132 | } 133 | } 134 | 135 | func Test_ExprArray(t *testing.T) { 136 | testCases := []testExprData{ 137 | { 138 | `[hello world]`, 139 | "json_extract(e.content, ?) IN (?, ?)", 140 | []any{"$.msg", "hello", "world"}, 141 | }, 142 | { 143 | `[hello "beautiful world"]`, 144 | "json_extract(e.content, ?) IN (?, ?)", 145 | []any{"$.msg", "hello", "beautiful world"}, 146 | }, 147 | { 148 | `field:[hello world]`, 149 | "json_extract(e.content, ?) IN (?, ?)", 150 | []any{"$.field", "hello", "world"}, 151 | }, 152 | { 153 | `field:[hello "beautiful world"]`, 154 | "json_extract(e.content, ?) IN (?, ?)", 155 | []any{"$.field", "hello", "beautiful world"}, 156 | }, 157 | { 158 | `field:[400 TO 499]`, 159 | `CAST(json_extract(e.content, ?) AS NUMERIC) BETWEEN ? AND ?`, 160 | []any{"$.field", float64(400), float64(499)}, 161 | }, 162 | { 163 | `field:[100 200 300]`, 164 | `CAST(json_extract(e.content, ?) AS NUMERIC) IN (?, ?, ?)`, 165 | []any{"$.field", float64(100), float64(200), float64(300)}, 166 | }, 167 | { 168 | `field:[100 hello "beautiful world" 200 300]`, 169 | `(CAST(json_extract(e.content, ?) AS NUMERIC) IN (?, ?, ?) OR json_extract(e.content, ?) IN (?, ?))`, 170 | []any{"$.field", float64(100), float64(200), float64(300), "$.field", "hello", "beautiful world"}, 171 | }, 172 | } 173 | for _, tt := range testCases { 174 | t.Run(tt.expr, func(t *testing.T) { 175 | runExprTest(t, tt) 176 | }) 177 | } 178 | } 179 | 180 | // Boolean Operators 181 | func Test_ExprBoolean(t *testing.T) { 182 | testCases := []testExprData{ 183 | { 184 | "hello AND world", 185 | `json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?`, 186 | []any{"$.msg", "*hello*", "$.msg", "*world*"}, 187 | }, 188 | { 189 | "hello AND beautiful AND world", 190 | `json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?`, 191 | []any{"$.msg", "*hello*", "$.msg", "*beautiful*", "$.msg", "*world*"}, 192 | }, 193 | { 194 | "hello OR world", 195 | `json_extract(e.content, ?) GLOB ? OR json_extract(e.content, ?) GLOB ?`, 196 | []any{"$.msg", "*hello*", "$.msg", "*world*"}, 197 | }, 198 | { 199 | "field:hello AND world", 200 | `json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?`, 201 | []any{"$.field", "*hello*", "$.msg", "*world*"}, 202 | }, 203 | { 204 | "field:hello AND beautiful AND field:world", 205 | `json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?`, 206 | []any{"$.field", "*hello*", "$.msg", "*beautiful*", "$.field", "*world*"}, 207 | }, 208 | { 209 | "field:hello OR world", 210 | `json_extract(e.content, ?) GLOB ? OR json_extract(e.content, ?) GLOB ?`, 211 | []any{"$.field", "*hello*", "$.msg", "*world*"}, 212 | }, 213 | { 214 | "hello AND (beautiful OR world)", 215 | `json_extract(e.content, ?) GLOB ? AND (json_extract(e.content, ?) GLOB ? OR json_extract(e.content, ?) GLOB ?)`, 216 | []any{"$.msg", "*hello*", "$.msg", "*beautiful*", "$.msg", "*world*"}, 217 | }, 218 | { 219 | "hello AND (beautiful AND world)", 220 | `json_extract(e.content, ?) GLOB ? AND (json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?)`, 221 | []any{"$.msg", "*hello*", "$.msg", "*beautiful*", "$.msg", "*world*"}, 222 | }, 223 | { 224 | "field:hello AND (beautiful AND field:99)", 225 | `json_extract(e.content, ?) GLOB ? AND (json_extract(e.content, ?) GLOB ? AND CAST(json_extract(e.content, ?) AS NUMERIC) = ?)`, 226 | []any{"$.field", "*hello*", "$.msg", "*beautiful*", "$.field", float64(99)}, 227 | }, 228 | { 229 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (field:99 AND field:[100 200 300]) OR field:[400 TO 499])`, 230 | `(json_extract(e.content, ?) GLOB ? OR json_extract(e.content, ?) GLOB ?) AND ` + 231 | `( json_extract(e.content, ?) IN (?,?) OR ` + 232 | ` (CAST(json_extract(e.content, ?) AS NUMERIC) = ? AND CAST(json_extract(e.content, ?) AS NUMERIC) IN (?,?,?)) OR` + 233 | ` CAST(json_extract(e.content, ?) AS NUMERIC) BETWEEN ? AND ?` + 234 | `)`, 235 | []any{ 236 | "$.field", "hello*", "$.msg", "world*", 237 | "$.field", "hello", "beautiful world", 238 | "$.field", float64(99), "$.field", float64(100), float64(200), float64(300), 239 | "$.field", float64(400), float64(499), 240 | }, 241 | }, 242 | } 243 | for _, tt := range testCases { 244 | t.Run(tt.expr, func(t *testing.T) { 245 | runExprTest(t, tt) 246 | }) 247 | } 248 | } 249 | 250 | func Test_ExprEscape(t *testing.T) { 251 | testCases := []testExprData{ 252 | { 253 | `hell\"o`, 254 | "json_extract(e.content, ?) GLOB ?", 255 | []any{"$.msg", `*hell"o*`}, 256 | }, 257 | { 258 | `"hello \" world"`, 259 | "json_extract(e.content, ?) = ?", 260 | []any{"$.msg", `hello " world`}, 261 | }, 262 | { 263 | `"hello \" world*"`, 264 | "json_extract(e.content, ?) GLOB ?", 265 | []any{"$.msg", `hello " world*`}, 266 | }, 267 | { 268 | `field:hell\"o`, 269 | "json_extract(e.content, ?) GLOB ?", 270 | []any{"$.field", `*hell"o*`}, 271 | }, 272 | { 273 | `field:"hello \" world"`, 274 | "json_extract(e.content, ?) = ?", 275 | []any{"$.field", `hello " world`}, 276 | }, 277 | { 278 | `field:"hello \" world*"`, 279 | "json_extract(e.content, ?) GLOB ?", 280 | []any{"$.field", `hello " world*`}, 281 | }, 282 | { 283 | `field:"hello [beautiful] world*"`, 284 | "json_extract(e.content, ?) GLOB ?", 285 | []any{"$.field", `hello [beautiful] world*`}, 286 | }, 287 | { 288 | `field:he\[ll]\"o`, 289 | "json_extract(e.content, ?) GLOB ?", 290 | []any{"$.field", `*he[ll]"o*`}, 291 | }, 292 | { 293 | `field:[hell\"o "beautiful \" world"]`, 294 | "json_extract(e.content, ?) IN (?, ?)", 295 | []any{"$.field", `hell"o`, `beautiful " world`}, 296 | }, 297 | { 298 | `field:[hell\"o world\]]`, 299 | "json_extract(e.content, ?) IN (?, ?)", 300 | []any{"$.field", `hell"o`, `world]`}, 301 | }, 302 | { 303 | `path:c\:/dev/projects/*`, 304 | "json_extract(e.content, ?) GLOB ?", 305 | []any{"$.path", `c:/dev/projects/*`}, 306 | }, 307 | { 308 | `(hell\"o AND \"world)`, 309 | "(json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?)", 310 | []any{"$.msg", `*hell"o*`, "$.msg", `*"world*`}, 311 | }, 312 | } 313 | for _, tt := range testCases { 314 | t.Run(tt.expr, func(t *testing.T) { 315 | runExprTest(t, tt) 316 | }) 317 | } 318 | } 319 | 320 | func Test_ExprIncomplete(t *testing.T) { 321 | testCases := []testExprData{ 322 | { 323 | `"hello \" world`, 324 | "json_extract(e.content, ?) = ?", 325 | []any{"$.msg", `hello " world`}, 326 | }, 327 | { 328 | `field:[hell\"o "beautiful \" world"`, 329 | "json_extract(e.content, ?) IN (?, ?)", 330 | []any{"$.field", `hell"o`, `beautiful " world`}, 331 | }, 332 | { 333 | `field:[hell\"o world\]`, 334 | "json_extract(e.content, ?) IN (?, ?)", 335 | []any{"$.field", `hell"o`, `world]`}, 336 | }, 337 | { 338 | `field:[]`, 339 | "", 340 | []any{}, 341 | }, 342 | { 343 | `field:[ ]`, 344 | "", 345 | []any{}, 346 | }, 347 | { 348 | `(hell\"o AND \"world`, 349 | "(json_extract(e.content, ?) GLOB ? AND json_extract(e.content, ?) GLOB ?)", 350 | []any{"$.msg", `*hell"o*`, "$.msg", `*"world*`}, 351 | }, 352 | { 353 | `(field:hello* OR world*) AND (field:[hello "beautiful world"] OR (field:99 AND field:[100 200 300`, 354 | `(json_extract(e.content, ?) GLOB ? OR json_extract(e.content, ?) GLOB ?) AND ` + 355 | `( json_extract(e.content, ?) IN (?,?) OR ` + 356 | ` (CAST(json_extract(e.content, ?) AS NUMERIC) = ? AND CAST(json_extract(e.content, ?) AS NUMERIC) IN (?,?,?))` + 357 | `)`, 358 | []any{ 359 | "$.field", "hello*", "$.msg", "world*", 360 | "$.field", "hello", "beautiful world", 361 | "$.field", float64(99), "$.field", float64(100), float64(200), float64(300), 362 | }, 363 | }, 364 | } 365 | for _, tt := range testCases { 366 | t.Run(tt.expr, func(t *testing.T) { 367 | runExprTest(t, tt) 368 | }) 369 | } 370 | } 371 | 372 | func runExprTest(t *testing.T, tt testExprData) { 373 | compiled, err := ExpBuilderFn(tt.expr) 374 | assert.NoError(t, err) 375 | 376 | actual := compiled.Sql 377 | expected := tt.sql 378 | 379 | a := strings.Join(strings.Fields(actual), "") 380 | b := strings.Join(strings.Fields(expected), "") 381 | if !reflect.DeepEqual(a, b) { 382 | 383 | diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ 384 | A: difflib.SplitLines(reflect.ValueOf(expected).String()), 385 | B: difflib.SplitLines(reflect.ValueOf(actual).String()), 386 | FromFile: "Expected", 387 | FromDate: "", 388 | ToFile: "Actual", 389 | ToDate: "", 390 | Context: 1, 391 | }) 392 | 393 | assert.Fail(t, fmt.Sprintf("Not equal: \n"+ 394 | "expected: %s\n"+ 395 | "actual : %s\n\n"+ 396 | "Diff: %s\n", expected, actual, diff)) 397 | return 398 | } 399 | assert.Equal(t, tt.args, compiled.Args) 400 | } 401 | -------------------------------------------------------------------------------- /sqlite/storage.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/nidorx/sqlog" 11 | ) 12 | 13 | type Config struct { 14 | Dir string // Directory for the database files (default "./logs") 15 | Prefix string // Prefix for the database name (default "sqlog") 16 | SQLiteOptions map[string]string // SQLite connection string options (https://github.com/mattn/go-sqlite3#connection-string) 17 | 18 | Driver string // SQLite driver name (default sqlite3) 19 | 20 | // Allows defining a custom expression processor. 21 | ExprBuilder func(expression string) (*Expr, error) 22 | 23 | // Allows the database to accept older logs. 24 | // Useful for log migration or receiving delayed logs from integrations. 25 | // (Default: 3600 seconds = 1 hour) 26 | // 27 | // @TODO: Implement a solution to enable moving logs to the correct time range. 28 | MaxChunkAgeSec int64 29 | 30 | // When the current log file reaches this size (in MB), it will be archived. 31 | // (Default: 20MB). 32 | MaxFilesizeMB int32 33 | 34 | // The total size of all archived files. Once this cap is exceeded, 35 | // the oldest archive files will be deleted asynchronously. 36 | // (Default: 1000MB). 37 | MaxSizeTotalMB int32 38 | 39 | // The maximum number of databases that can be opened simultaneously. 40 | MaxOpenedDB int32 41 | 42 | // The maximum number of goroutines for scheduled task processing. 43 | // (Default: 200). 44 | MaxRunningTasks int32 45 | 46 | // Time (in seconds) before closing idle databases. 47 | // (Default: 30 seconds). 48 | CloseIdleSec int64 49 | 50 | // Interval (in seconds) for storage maintenance checks. 51 | // (Default: 5 seconds). 52 | IntervalSizeCheckSec int32 53 | 54 | // Interval (in milliseconds) for scheduled task maintenance. 55 | // (Default: 100ms). 56 | IntervalScheduledTasksMs int32 57 | 58 | // Interval (in seconds) to execute a WAL checkpoint. 59 | // Set to 0 or less to disable. 60 | // 61 | // See https://www.sqlite.org/wal.html#ckpt 62 | IntervalWalCheckpointSec int32 63 | 64 | // WAL checkpoint modes: PASSIVE, FULL, RESTART, or TRUNCATE. 65 | // (Default: TRUNCATE). 66 | // 67 | // See https://www.sqlite.org/wal.html#ckpt 68 | WalCheckpointMode string 69 | } 70 | 71 | // Storage represents a connection to a SQLite database. 72 | // Reference: https://ferrous-systems.com/blog/lock-free-ring-buffer/ 73 | type storage struct { 74 | sqlog.Storage 75 | sqlog.StorageWithApi 76 | mu sync.Mutex 77 | dbs []*storageDb // All databases managed by this storage 78 | liveDbs []*storageDb // Currently active databases receiving logs 79 | config *Config // 80 | taskIdSeq int32 // Last task ID 81 | taskMap sync.Map // Stores task execution outputs 82 | numActiveTasks int32 // Tracks the number of active goroutines for scheduled tasks 83 | closed atomic.Bool 84 | quit chan struct{} 85 | shutdown chan struct{} 86 | } 87 | 88 | // New initializes a new storage instance. 89 | func New(config *Config) (*storage, error) { 90 | if config == nil { 91 | config = &Config{} 92 | } 93 | 94 | if len(config.SQLiteOptions) == 0 { 95 | config.SQLiteOptions = StorageSQLiteOptions 96 | } 97 | 98 | if config.Dir == "" { 99 | config.Dir = "./logs" 100 | } 101 | 102 | config.Prefix = strings.TrimSpace(config.Prefix) 103 | if config.Prefix == "" { 104 | config.Prefix = "sqlog" 105 | } else { 106 | config.Prefix = strings.ToLower(strings.Join(strings.Fields(config.Prefix), "_")) 107 | } 108 | 109 | if config.Driver == "" { 110 | config.Driver = "sqlite3" 111 | } 112 | 113 | if config.MaxFilesizeMB <= 0 { 114 | config.MaxFilesizeMB = 20 // ~20MB 115 | } 116 | 117 | if config.MaxSizeTotalMB <= 0 { 118 | config.MaxSizeTotalMB = 1000 // ~1GB 119 | } 120 | 121 | if config.IntervalScheduledTasksMs <= 0 { 122 | config.IntervalScheduledTasksMs = 100 123 | } 124 | 125 | if config.MaxRunningTasks <= 0 { 126 | config.MaxRunningTasks = 500 127 | } 128 | 129 | if config.CloseIdleSec <= 0 { 130 | config.CloseIdleSec = 30 131 | } 132 | 133 | if config.IntervalSizeCheckSec <= 0 { 134 | config.IntervalSizeCheckSec = 5 135 | } 136 | 137 | if config.ExprBuilder == nil { 138 | config.ExprBuilder = ExpBuilderFn 139 | } 140 | 141 | if config.MaxChunkAgeSec <= 0 { 142 | config.MaxChunkAgeSec = 3600 143 | } 144 | 145 | dbs, err := initDbs(config.Driver, config.Dir, config.Prefix) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | if len(dbs) == 0 { 151 | dbs = append(dbs, newDb(config.Driver, config.Dir, config.Prefix, time.Now(), config.MaxChunkAgeSec)) 152 | } 153 | 154 | // Initialize the active database (live) 155 | live := dbs[len(dbs)-1] 156 | if err := live.connect(config.SQLiteOptions); err != nil { 157 | return nil, errors.Join(errors.New("[sqlog] unable to start live db"), err) 158 | } 159 | live.live = true 160 | 161 | s := &storage{ 162 | config: config, 163 | dbs: dbs, 164 | liveDbs: []*storageDb{live}, 165 | quit: make(chan struct{}), 166 | shutdown: make(chan struct{}), 167 | } 168 | 169 | go s.routineSizeCheck() 170 | go s.routineScheduledTasks() 171 | 172 | if s.config.IntervalWalCheckpointSec > 0 { 173 | config.WalCheckpointMode = strings.ToUpper(config.WalCheckpointMode) 174 | switch config.WalCheckpointMode { 175 | case "PASSIVE", "FULL", "RESTART", "TRUNCATE": 176 | default: 177 | config.WalCheckpointMode = "TRUNCATE" 178 | } 179 | 180 | go s.routineWalCheckpoint() 181 | } 182 | 183 | return s, nil 184 | } 185 | 186 | // Flush saves the chunk records to the current live database. 187 | func (s *storage) Flush(chunk *sqlog.Chunk) error { 188 | var ( 189 | db *storageDb 190 | epochStart = chunk.First() 191 | epochEnd = chunk.Last() 192 | ) 193 | for _, d := range s.liveDbs { 194 | if d.epochStart <= epochStart && (d.epochEnd == 0 || d.epochEnd >= epochEnd) { 195 | db = d 196 | break 197 | } 198 | } 199 | if db == nil { 200 | db = s.liveDbs[len(s.liveDbs)-1] 201 | } 202 | 203 | if !db.isOpen() { 204 | // Should not happen 205 | return errors.New("db is closed") 206 | } 207 | 208 | return db.flush(chunk) 209 | } 210 | 211 | // Close closes all databases and cleans up. 212 | func (s *storage) Close() error { 213 | 214 | // stop routines 215 | close(s.quit) 216 | <-s.shutdown 217 | 218 | // close dbs 219 | for _, db := range s.dbs { 220 | db.close() 221 | } 222 | 223 | s.closed.Store(true) 224 | return nil 225 | } 226 | -------------------------------------------------------------------------------- /sqlite/storage_db.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "bytes" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "log/slog" 9 | "os" 10 | "path" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | 15 | "github.com/nidorx/sqlog" 16 | ) 17 | 18 | var ( 19 | // SQLite performance optimizations and tips 20 | // https://sqlite.org/forum/forum 21 | // https://www.powersync.com/blog/sqlite-optimizations-for-ultra-high-performance 22 | // https://phiresky.github.io/blog/2020/sqlite-performance-tuning/ 23 | // https://stackoverflow.com/questions/1711631/improve-insert-per-second-performance-of-sqlite 24 | // https://sqlite.org/pragma.html#pragma_journal_mode 25 | // https://wiki.mozilla.org/Performance/Avoid_SQLite_In_Your_Next_Firefox_Feature 26 | // https://www.deconstructconf.com/2019/dan-luu-files 27 | StorageSQLiteOptions = map[string]string{ 28 | "_journal_mode": "WAL", // Write-ahead logging for better performance 29 | "_synchronous": "NORMAL", // Balance between performance and safety 30 | "_cache_size": "409600", // 4MB 31 | } 32 | 33 | sqlCreateTable = `CREATE TABLE IF NOT EXISTS entries ( 34 | epoch_secs LONG, 35 | nanos INTEGER, 36 | level INTEGER, 37 | content BLOB 38 | )` 39 | 40 | sqlCreateIndex = `CREATE INDEX IF NOT EXISTS entries_epoch_desc ON entries(epoch_secs DESC)` 41 | 42 | sqlInsert = []byte(`INSERT INTO entries(epoch_secs, nanos, level, content) VALUES `) 43 | sqlInsertValues = []byte(`(?,?,?,?)`) 44 | ) 45 | 46 | const ( 47 | db_closed int32 = iota // Database is closed 48 | db_loading // Initializing the database 49 | db_open // Database is open 50 | db_closing // Closing the database 51 | db_removing // Removing the database 52 | ) 53 | 54 | type storageDb struct { 55 | mu sync.Mutex // Mutex for checkpoint and flush operations 56 | live bool // Indicates if the database is live and receiving logs 57 | size int64 // Size of the database in bytes 58 | status int32 // Connection status (closed, loading, open, closing, removing) 59 | epochStart int64 // Epoch of the oldest entry in this database 60 | newEpochStart int64 // When accepting an old log, adjust file name when closing the DB 61 | epochEnd int64 // Epoch of the newest entry in this database 62 | lastUsedEpoch int64 // Last usage timestamp of this storage (query, flush) 63 | maxChunkAgeSec int64 // Maximum allowed chunk age 64 | fileDir string // Directory of the database file 65 | filePath string // Path to the database file 66 | filePrefix string // Prefix for the database file name 67 | db *sql.DB // SQLite connection object 68 | taskCount int32 // Number of scheduled tasks 69 | taskMap sync.Map // Map of scheduled tasks 70 | driver string // SQLite driver name 71 | } 72 | 73 | // schedule schedules a query execution on this instance 74 | func (s *storageDb) schedule(id int32, task *dbTask) { 75 | s.taskMap.Store(id, task) 76 | atomic.AddInt32(&s.taskCount, 1) 77 | } 78 | 79 | // cancel cancels an asynchronous process 80 | func (s *storageDb) cancel(id int32) bool { 81 | _, loaded := s.taskMap.LoadAndDelete(id) 82 | if loaded { 83 | atomic.AddInt32(&s.taskCount, -1) 84 | } 85 | return loaded 86 | } 87 | 88 | // tasks returns the number of scheduled queries for this database 89 | func (s *storageDb) tasks() int32 { 90 | return s.taskCount 91 | } 92 | 93 | // execute executa os proximos callbacks nesse banco de dados 94 | func (s *storageDb) execute(f func(id int32, task *dbTask) bool) { 95 | s.taskMap.Range(func(key, value any) bool { 96 | atomic.AddInt32(&s.taskCount, -1) 97 | s.taskMap.Delete(key) 98 | 99 | id, isIdOk := key.(int32) 100 | task, isTaskOk := value.(*dbTask) 101 | if isIdOk && isTaskOk { 102 | return f(id, task) 103 | } else { 104 | return f(-1, nil) 105 | } 106 | }) 107 | } 108 | 109 | // isOpen checks if the database is open 110 | func (s *storageDb) isOpen() bool { 111 | return atomic.LoadInt32(&s.status) == db_open 112 | } 113 | 114 | // lastUsedSec returns the time elapsed since the last use of this database 115 | func (s *storageDb) lastUsedSec() int64 { 116 | return time.Now().Unix() - s.lastUsedEpoch 117 | } 118 | 119 | // updateSize updates the size of the database 120 | // https://til.simonwillison.net/sqlite/database-file-size 121 | // https://www.powersync.com/blog/sqlite-optimizations-for-ultra-high-performance 122 | func (s *storageDb) updateSize() error { 123 | stm, err := s.db.Prepare(` 124 | SELECT 125 | page_count * page_size as total_size, 126 | freelist_count * page_size as freelist_size 127 | FROM pragma_page_count(), pragma_freelist_count(), pragma_page_size() 128 | `) 129 | if err != nil { 130 | return err 131 | } 132 | defer stm.Close() 133 | 134 | rows, err := stm.Query() 135 | if err != nil { 136 | return err 137 | } 138 | defer rows.Close() 139 | 140 | var ( 141 | totalSize int64 142 | freelistSize int64 143 | ) 144 | 145 | if rows.Next() { 146 | if err = rows.Scan(&totalSize, &freelistSize); err != nil { 147 | return err 148 | } 149 | } 150 | 151 | atomic.StoreInt64(&s.size, totalSize-freelistSize) 152 | return nil 153 | } 154 | 155 | // closeSafe checks if the database can be safely closed 156 | func (s *storageDb) closeSafe() bool { 157 | if s.lastUsedSec() < 2 { 158 | return false 159 | } 160 | return s.close() 161 | } 162 | 163 | // remove deletes the database file 164 | func (s *storageDb) remove() { 165 | if s.close() && atomic.CompareAndSwapInt32(&s.status, db_closed, db_removing) { 166 | if err := os.Remove(s.filePath); err != nil { 167 | slog.Warn( 168 | "[sqlog] error removing database", 169 | slog.String("file", s.filePath), 170 | slog.Any("error", err), 171 | ) 172 | } 173 | } 174 | } 175 | 176 | // connect establishes the connection to the database 177 | func (s *storageDb) connect(options map[string]string) error { 178 | if atomic.CompareAndSwapInt32(&s.status, db_closed, db_loading) { 179 | 180 | // file:test.db?cache=shared&mode=memory 181 | connString := "file:" + s.filePath 182 | if len(options) > 0 { 183 | connString += "?" 184 | i := 0 185 | for k, v := range options { 186 | if i > 0 { 187 | connString += "&" 188 | } 189 | connString += k + "=" + v 190 | i++ 191 | } 192 | } 193 | 194 | db, err := sql.Open(s.driver, connString) 195 | if err != nil { 196 | atomic.StoreInt32(&s.status, db_closed) 197 | return err 198 | } 199 | 200 | if _, err := db.Exec(sqlCreateTable); err != nil { 201 | db.Close() 202 | atomic.StoreInt32(&s.status, db_closed) 203 | return err 204 | } 205 | 206 | if _, err := db.Exec(sqlCreateIndex); err != nil { 207 | db.Close() 208 | atomic.StoreInt32(&s.status, db_closed) 209 | return err 210 | } 211 | 212 | s.db = db 213 | atomic.StoreInt64(&s.lastUsedEpoch, time.Now().Unix()) 214 | atomic.StoreInt32(&s.status, db_open) 215 | } 216 | return nil 217 | } 218 | 219 | // query prepares and executes a query on the database 220 | func (s *storageDb) query(sql string, args []any) (*sql.Stmt, *sql.Rows, error) { 221 | if s.db == nil { 222 | return nil, nil, errors.New("db is closed") 223 | } 224 | 225 | stm, err := s.db.Prepare(sql) 226 | if err != nil { 227 | return nil, nil, err 228 | } 229 | 230 | rows, err := stm.Query(args...) 231 | if err != nil { 232 | stm.Close() 233 | return nil, nil, err 234 | } 235 | 236 | return stm, rows, nil 237 | } 238 | 239 | // flush saves the chunk records to this database 240 | func (s *storageDb) flush(chunk *sqlog.Chunk) error { 241 | values := []any{} 242 | 243 | sql := bytes.NewBuffer(make([]byte, 0, 1952)) 244 | sql.Write(sqlInsert) 245 | 246 | var size int64 247 | maxChunkAge := s.epochStart - s.maxChunkAgeSec 248 | 249 | for i, e := range chunk.List() { 250 | if e == nil { 251 | continue 252 | } 253 | if i > 0 { 254 | sql.WriteByte(',') 255 | } 256 | sql.Write(sqlInsertValues) 257 | epoch := e.Time.Unix() 258 | if epoch < maxChunkAge { 259 | continue 260 | } 261 | values = append(values, epoch, e.Time.Nanosecond(), e.Level, e.Content) 262 | size += int64(len(e.Content)) 263 | } 264 | 265 | if len(values) == 0 { 266 | // should never happen 267 | slog.Warn("[sqlog] trying to flush an empty chunk") 268 | return nil 269 | } 270 | 271 | s.mu.Lock() 272 | defer s.mu.Unlock() 273 | 274 | if tx, err := s.db.Begin(); err != nil { 275 | return err 276 | } else if stmt, err := tx.Prepare(sql.String()); err != nil { 277 | tx.Rollback() 278 | return err 279 | } else if _, err = stmt.Exec(values...); err != nil { 280 | stmt.Close() 281 | tx.Rollback() 282 | return err 283 | } else { 284 | stmt.Close() 285 | tx.Commit() 286 | 287 | atomic.AddInt64(&s.size, size) 288 | s.epochEnd = max(chunk.Last(), s.epochEnd) 289 | if chunkEpochStart := chunk.First(); chunkEpochStart < s.newEpochStart { 290 | // db will renamed during close 291 | s.newEpochStart = chunkEpochStart 292 | } 293 | atomic.StoreInt64(&s.lastUsedEpoch, time.Now().Unix()) 294 | 295 | return nil 296 | } 297 | } 298 | 299 | // See https://www.sqlite.org/wal.html#ckpt 300 | func (s *storageDb) checkpoint(mode string) { 301 | if s.isOpen() { 302 | s.mu.Lock() 303 | defer s.mu.Unlock() 304 | 305 | // "PASSIVE", "FULL", "RESTART", "TRUNCATE" 306 | // https://www.sqlite.org/pragma.html#pragma_wal_checkpoint 307 | _, err := s.db.Exec(fmt.Sprintf("PRAGMA wal_checkpoint(%s)", mode)) 308 | if err != nil { 309 | slog.Warn( 310 | "[sqlog] error checkpoint", 311 | slog.String("path", s.filePath), 312 | slog.Any("error", err), 313 | ) 314 | } 315 | } 316 | } 317 | 318 | func (s *storageDb) close() bool { 319 | if atomic.CompareAndSwapInt32(&s.status, db_open, db_closing) { 320 | 321 | if s.live { 322 | if err := s.vacuum(); err != nil { 323 | slog.Warn( 324 | "[sqlog] error vacuum database", 325 | slog.String("path", s.filePath), 326 | slog.Any("error", err), 327 | ) 328 | } 329 | } 330 | 331 | s.db.Close() 332 | s.db = nil 333 | 334 | if s.newEpochStart < s.epochStart { 335 | // need to rename DB 336 | newPath := path.Join(s.fileDir, fmt.Sprintf("%s_%d.db", s.filePrefix, s.newEpochStart)) 337 | if err := os.Rename(s.filePath, newPath); err != nil { 338 | slog.Warn( 339 | "[sqlog] error renaming database", 340 | slog.String("path", s.filePath), 341 | slog.String("newpath", newPath), 342 | slog.Any("error", err), 343 | ) 344 | } 345 | } 346 | 347 | atomic.StoreInt32(&s.status, db_closed) 348 | } 349 | return atomic.LoadInt32(&s.status) == db_closed 350 | } 351 | 352 | // vacuum the database. 353 | func (s *storageDb) vacuum() error { 354 | // Do a full vacuum of the live repository. This 355 | // should be fairly fast as it's deliberately size constrained. 356 | 357 | // 1) maintain an in-memory operation-queue, so you can copy the DB when idle, 358 | // vacuum as long as necessary on the copy, and then switch to the vacuumed copy 359 | // after replaying the queue. 360 | // (SQLite allows only a single writer, so statement-replay is safe, unlike 361 | // concurrent-writer databases in some cases since you can't recreate the DB's 362 | // row-visibility logic) 363 | // https://news.ycombinator.com/item?id=23521079 364 | 365 | // Use dbstat to find out what fraction of the pages in a database are sequential 366 | // if there's a significant degree of fragmentation, then vacuum. 367 | // https://www.sqlite.org/dbstat.html 368 | _, err := s.db.Exec("VACUUM") 369 | return err 370 | } 371 | -------------------------------------------------------------------------------- /sqlite/storage_routine_checkpoint.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // See https://www.sqlite.org/wal.html#ckpt 8 | func (s *storage) routineWalCheckpoint() { 9 | d := time.Duration(s.config.IntervalWalCheckpointSec) * time.Second 10 | tick := time.NewTicker(d) 11 | defer tick.Stop() 12 | 13 | for { 14 | select { 15 | 16 | case <-tick.C: 17 | var liveOpenDbs []*storageDb 18 | s.mu.Lock() 19 | for _, db := range s.liveDbs { 20 | if db.isOpen() { 21 | liveOpenDbs = append(liveOpenDbs, db) 22 | } 23 | } 24 | s.mu.Unlock() 25 | 26 | for _, db := range liveOpenDbs { 27 | db.checkpoint(s.config.WalCheckpointMode) 28 | } 29 | 30 | tick.Reset(d) 31 | case <-s.quit: 32 | return 33 | } 34 | 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /sqlite/storage_routine_size.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "log/slog" 5 | "time" 6 | ) 7 | 8 | func (s *storage) routineSizeCheck() { 9 | d := time.Duration(s.config.IntervalSizeCheckSec) * time.Second 10 | tick := time.NewTicker(d) 11 | defer tick.Stop() 12 | 13 | for { 14 | select { 15 | 16 | case <-tick.C: 17 | s.doRoutineSizeCheck() 18 | tick.Reset(d) 19 | 20 | // @TODO: compress DB 21 | case <-s.quit: 22 | return 23 | } 24 | 25 | } 26 | } 27 | 28 | func (s *storage) doRoutineSizeCheck() { 29 | // archiving dbs 30 | if s.liveDbs[len(s.liveDbs)-1].size > int64(s.config.MaxFilesizeMB)*1000000 { 31 | nextStart := time.Now().Add(time.Duration(s.config.IntervalSizeCheckSec * 2 * int32(time.Second))) 32 | ndb := newDb(s.config.Driver, s.config.Dir, s.config.Prefix, nextStart, s.config.MaxChunkAgeSec) 33 | ndb.live = true 34 | if err := ndb.connect(s.config.SQLiteOptions); err != nil { 35 | slog.Warn( 36 | "[sqlog] error creating live database", 37 | slog.String("file", ndb.filePath), 38 | slog.Any("error", err), 39 | ) 40 | } else { 41 | s.mu.Lock() 42 | s.dbs = append(s.dbs, ndb) 43 | s.liveDbs = append(s.liveDbs, ndb) 44 | s.mu.Unlock() 45 | } 46 | } 47 | 48 | // update live-dbs 49 | var ( 50 | maxLiveEpochEnd = time.Now().Unix() - s.config.MaxChunkAgeSec 51 | liveDbsValid []*storageDb 52 | liveDbsInvalid []*storageDb 53 | ) 54 | // maxChunkAge := d.epochStart - d.maxChunkAgeSec 55 | for _, d := range s.liveDbs { 56 | d.updateSize() 57 | 58 | if d.epochEnd == 0 || d.epochEnd >= maxLiveEpochEnd { 59 | liveDbsValid = append(liveDbsValid, d) 60 | } else { 61 | liveDbsInvalid = append(liveDbsInvalid, d) 62 | } 63 | } 64 | 65 | if len(liveDbsValid) > 0 && len(liveDbsValid) != len(s.liveDbs) && len(s.liveDbs) > 1 { 66 | s.mu.Lock() 67 | s.liveDbs = liveDbsValid 68 | for _, d := range liveDbsInvalid { 69 | d.live = false // can be closed 70 | } 71 | s.mu.Unlock() 72 | } 73 | 74 | totalSizeBytes := int64(0) 75 | for _, db := range s.dbs { 76 | totalSizeBytes += db.size 77 | } 78 | 79 | if totalSizeBytes > int64(s.config.MaxSizeTotalMB)*1000000 { 80 | if olderDb := s.dbs[0]; !olderDb.live { 81 | olderDb.remove() 82 | s.mu.Lock() 83 | s.dbs = s.dbs[1:] 84 | s.mu.Unlock() 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /sqlite/storage_scheduler.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "sort" 5 | "sync/atomic" 6 | "time" 7 | 8 | "github.com/nidorx/sqlog" 9 | ) 10 | 11 | const ( 12 | task_created int32 = iota // Task has been created 13 | task_process // Task is being processed 14 | task_finished // Task has been completed 15 | task_canceled // Task has been canceled 16 | ) 17 | 18 | type dbTask struct { 19 | db *storageDb // Reference to the storage database associated with the task 20 | state int32 // Task state: 0=created, 1=processing, 2=finished, 3=canceled 21 | output *sqlog.Output // Output of the task 22 | callback func(*storageDb, *sqlog.Output) error // Callback function to execute the task logic 23 | } 24 | 25 | // Result retrieves the result of an asynchronous task processing. 26 | // If the task has finished or has been canceled, it returns the task output and removes the task from taskMap. 27 | // Otherwise, it returns a status indicating that the task is still scheduled. 28 | func (s *storage) Result(taskId int32) (*sqlog.Output, error) { 29 | if v, loaded := s.taskMap.Load(taskId); loaded { 30 | task := v.(*dbTask) 31 | if task.state == task_finished || task.state == task_canceled { 32 | s.taskMap.Delete(taskId) 33 | return task.output, nil 34 | } else { 35 | return &sqlog.Output{Scheduled: true, TaskIds: []int32{taskId}}, nil 36 | } 37 | } 38 | return nil, nil 39 | } 40 | 41 | // Cancel aborts an asynchronous task processing. 42 | func (s *storage) Cancel(taskId int32) error { 43 | if v, loaded := s.taskMap.Load(taskId); loaded { 44 | task := v.(*dbTask) 45 | atomic.StoreInt32(&task.state, task_canceled) 46 | task.db.cancel(taskId) 47 | s.taskMap.Delete(taskId) 48 | } 49 | return nil 50 | } 51 | 52 | // schedule creates and schedules a task for each database in the list. 53 | // The provided callback will be executed for each scheduled task. 54 | func (s *storage) schedule(dbs []*storageDb, callback func(*storageDb, *sqlog.Output) error) (taskIds []int32) { 55 | for _, db := range dbs { 56 | id := atomic.AddInt32(&s.taskIdSeq, 1) 57 | task := &dbTask{callback: callback, output: &sqlog.Output{}} 58 | s.taskMap.Store(id, task) 59 | db.schedule(id, task) 60 | taskIds = append(taskIds, id) 61 | } 62 | return 63 | } 64 | 65 | // routineScheduledTasks manages the scheduled tasks processing loop. 66 | // It runs on a separate goroutine, executing tasks at regular intervals. 67 | func (s *storage) routineScheduledTasks() { 68 | defer close(s.shutdown) 69 | 70 | d := time.Duration(s.config.IntervalScheduledTasksMs) * time.Millisecond 71 | // d := 5 * time.Second 72 | tick := time.NewTicker(d) 73 | defer tick.Stop() 74 | 75 | for { 76 | select { 77 | 78 | case <-tick.C: 79 | s.doRoutineScheduledTasks() 80 | 81 | tick.Reset(d) 82 | case <-s.quit: 83 | return 84 | } 85 | } 86 | } 87 | 88 | // doRoutineScheduledTasks processes tasks in the databases. 89 | // It also manages the lifecycle of databases by closing idle ones 90 | // and ensuring the right number of databases are open. 91 | func (s *storage) doRoutineScheduledTasks() { 92 | var ( 93 | totalTasks int32 94 | totalOpen int32 95 | openWithTasks []*storageDb 96 | openWithoutTask []*storageDb 97 | closedWithTasks []*storageDb 98 | ) 99 | 100 | // Gather information about tasks and databases 101 | for _, db := range s.dbs { 102 | tasks := db.tasks() 103 | totalTasks += tasks 104 | if db.isOpen() { 105 | totalOpen++ 106 | if tasks > 0 { 107 | openWithTasks = append(openWithTasks, db) 108 | } else { 109 | openWithoutTask = append(openWithoutTask, db) 110 | } 111 | } else if tasks > 0 { 112 | closedWithTasks = append(closedWithTasks, db) 113 | } 114 | } 115 | 116 | closedAnyDb := false 117 | 118 | // Process tasks in the currently open databases 119 | if totalTasks > 0 { 120 | 121 | // Maximum number of tasks that can be processed in parallel 122 | qtMaxTasks := s.config.MaxRunningTasks - s.numActiveTasks 123 | if qtMaxTasks > 0 { 124 | 125 | onDbTask := func(taskId int32, complete bool) { 126 | if complete { 127 | // @TODO: remove result from taskMap after n seconds 128 | atomic.AddInt32(&s.numActiveTasks, -1) 129 | } else { 130 | atomic.AddInt32(&s.numActiveTasks, 1) 131 | } 132 | } 133 | 134 | for _, db := range openWithTasks { 135 | numTasks := db.tasks() 136 | if numTasks == 0 { 137 | continue 138 | } 139 | maxForThisDb := max(1, numTasks/totalTasks*qtMaxTasks) 140 | s.executeDbTasks(db, maxForThisDb, onDbTask) 141 | } 142 | } 143 | } 144 | 145 | // Close idle databases 146 | for _, db := range s.dbs { 147 | if !db.live && db.lastUsedSec() > s.config.CloseIdleSec && db.closeSafe() { 148 | totalOpen-- 149 | closedAnyDb = true 150 | } 151 | } 152 | 153 | // Ensure the number of open databases is within the limit 154 | if totalOpen > s.config.MaxOpenedDB { 155 | 156 | // Close databases that have no tasks 157 | for _, db := range openWithoutTask { 158 | if totalOpen > s.config.MaxOpenedDB { 159 | if !db.live && db.closeSafe() { 160 | totalOpen-- 161 | closedAnyDb = true 162 | } 163 | } 164 | } 165 | 166 | // Close databases with the fewest tasks 167 | if totalOpen > s.config.MaxOpenedDB { 168 | sort.SliceStable(openWithTasks, func(i, j int) bool { 169 | return openWithTasks[i].tasks() < openWithTasks[j].tasks() 170 | }) 171 | for _, db := range openWithTasks { 172 | if totalOpen > s.config.MaxOpenedDB { 173 | if !db.live && db.closeSafe() { 174 | totalOpen-- 175 | closedAnyDb = true 176 | } 177 | } 178 | } 179 | } 180 | } 181 | 182 | // Open closed databases that have pending tasks 183 | if len(closedWithTasks) > 0 { 184 | sort.SliceStable(closedWithTasks, func(i, j int) bool { 185 | return closedWithTasks[i].tasks() < closedWithTasks[j].tasks() 186 | }) 187 | 188 | if totalOpen > s.config.MaxOpenedDB { 189 | // Open the database with the fewest tasks 190 | for _, db := range closedWithTasks { 191 | if err := db.connect(s.config.SQLiteOptions); err == nil { 192 | break 193 | } 194 | } 195 | } else { 196 | // Open as many databases as allowed 197 | for _, db := range closedWithTasks { 198 | if totalOpen > s.config.MaxOpenedDB { 199 | break 200 | } 201 | if err := db.connect(s.config.SQLiteOptions); err == nil { 202 | totalOpen++ 203 | } 204 | } 205 | } 206 | } 207 | 208 | // If any database was closed, re-sort the database lists 209 | if closedAnyDb { 210 | s.mu.Lock() 211 | sortDbs(s.dbs) 212 | sortDbs(s.liveDbs) 213 | s.mu.Unlock() 214 | } 215 | } 216 | 217 | // executeDbTasks executes a set number of tasks for the given database. 218 | // Tasks are executed asynchronously, and the task completion is managed via the onTask callback. 219 | func (s *storage) executeDbTasks(db *storageDb, maxForThisDb int32, onTask func(taskId int32, complete bool)) { 220 | i := int32(0) 221 | db.execute(func(id int32, task *dbTask) (stops bool) { 222 | i++ 223 | stops = i < maxForThisDb 224 | if id < 0 { 225 | return 226 | } 227 | 228 | retries := 0 229 | var exec func() 230 | exec = func() { 231 | if !atomic.CompareAndSwapInt32(&task.state, task_created, task_process) { 232 | onTask(id, true) 233 | return 234 | } 235 | 236 | if !db.isOpen() { 237 | // If the database closed during the task execution 238 | if atomic.CompareAndSwapInt32(&task.state, task_process, task_created) { 239 | db.schedule(id, task) // Reschedule the task 240 | } 241 | onTask(id, true) 242 | return 243 | } 244 | 245 | if err := task.callback(db, task.output); err != nil { 246 | if !db.isOpen() { 247 | // If the database closed during task execution 248 | if atomic.CompareAndSwapInt32(&task.state, task_process, task_created) { 249 | db.schedule(id, task) // Reschedule the task 250 | } 251 | onTask(id, true) 252 | return 253 | } 254 | 255 | if retries < 3 { 256 | retries++ 257 | go exec() 258 | } else { 259 | task.output.Error = err 260 | atomic.CompareAndSwapInt32(&task.state, task_process, task_finished) 261 | onTask(id, true) 262 | } 263 | } else { 264 | atomic.CompareAndSwapInt32(&task.state, task_process, task_finished) 265 | onTask(id, true) 266 | } 267 | } 268 | onTask(id, false) 269 | 270 | go exec() 271 | 272 | return 273 | }) 274 | } 275 | -------------------------------------------------------------------------------- /sqlite/storage_test.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "log/slog" 7 | "os" 8 | "testing" 9 | "time" 10 | "unsafe" 11 | 12 | "math/rand" 13 | 14 | "github.com/nidorx/sqlog" 15 | "github.com/stretchr/testify/assert" 16 | ) 17 | 18 | const ( 19 | storageDir = "testdata/logs" 20 | storagePrefix = "test" 21 | ) 22 | 23 | func init() { 24 | sql.Register("sqlite3", &mockDriver{ 25 | conns: make(map[string]*mockDriverConn), 26 | }) 27 | } 28 | 29 | func Test_Sqlite_Simple(t *testing.T) { 30 | testClearDir(storageDir) 31 | defer testClearDir(storageDir) 32 | 33 | storage, err := New(&Config{ 34 | Dir: storageDir, 35 | Prefix: storagePrefix, 36 | }) 37 | assert.Nil(t, err) 38 | 39 | log, err := sqlog.New(&sqlog.Config{Storage: storage}) 40 | assert.Nil(t, err) 41 | defer log.Stop() 42 | 43 | logger := slog.New(log.Handler()) 44 | 45 | for i := 0; i < 100; i++ { 46 | logger.Info(randString(256)) 47 | } 48 | 49 | log.Stop() 50 | 51 | waitMax(3*time.Second, func() bool { 52 | return storage.closed.Load() 53 | }) 54 | 55 | assert.Equal(t, 1, len(storage.dbs)) 56 | db := storage.dbs[0] 57 | assert.Equal(t, db_closed, db.status) 58 | assert.Nil(t, db.db) 59 | } 60 | 61 | func Test_Sqlite_MaxFilesize(t *testing.T) { 62 | testClearDir(storageDir) 63 | defer testClearDir(storageDir) 64 | 65 | storage, err := New(&Config{ 66 | Dir: storageDir, 67 | Prefix: storagePrefix, 68 | MaxFilesizeMB: 1, 69 | IntervalSizeCheckSec: 1000, 70 | IntervalScheduledTasksMs: 1000000, 71 | MaxChunkAgeSec: 1, 72 | CloseIdleSec: 1, 73 | }) 74 | assert.Nil(t, err) 75 | defer storage.Close() 76 | 77 | chunk := sqlog.NewChunk(99) 78 | 79 | for i := 0; i < 1000; i++ { 80 | chunk.Put(&sqlog.Entry{ 81 | Time: time.Now(), 82 | Level: 0, 83 | Content: []byte(fmt.Sprintf(`{"msg":"%s"}`, randString(1024))), 84 | }) 85 | } 86 | 87 | for ; !chunk.Empty(); chunk = chunk.Next() { 88 | storage.Flush(chunk) 89 | } 90 | 91 | time.Sleep(2 * time.Second) 92 | 93 | storage.doRoutineSizeCheck() 94 | 95 | assert.Equal(t, 2, len(storage.dbs)) 96 | assert.Equal(t, 1, len(storage.liveDbs)) 97 | 98 | storage.doRoutineScheduledTasks() 99 | 100 | db := storage.dbs[0] 101 | assert.Equal(t, db_closed, db.status) 102 | assert.Nil(t, db.db) 103 | assert.Equal(t, len(storage.liveDbs), 1) 104 | } 105 | 106 | func Test_Sqlite_TotalSizeCap(t *testing.T) { 107 | testClearDir(storageDir) 108 | defer testClearDir(storageDir) 109 | 110 | storage, err := New(&Config{ 111 | Dir: storageDir, 112 | Prefix: storagePrefix, 113 | MaxFilesizeMB: 1, 114 | MaxSizeTotalMB: 3, 115 | IntervalSizeCheckSec: 1000, 116 | IntervalScheduledTasksMs: 1000000, 117 | MaxChunkAgeSec: 1, 118 | CloseIdleSec: 1, 119 | }) 120 | assert.Nil(t, err) 121 | defer storage.Close() 122 | 123 | firstDb := storage.dbs[0] // remove 124 | 125 | for j := 0; j < 3; j++ { 126 | root := sqlog.NewChunk(100) 127 | chunk := root 128 | for i := 0; i < 1001; i++ { 129 | chunk, _ = chunk.Put(&sqlog.Entry{ 130 | Time: time.Now(), //.Add(time.Duration(-1) * time.Second), 131 | Level: 0, 132 | Content: []byte(fmt.Sprintf(`{"msg":"%s"}`, randString(1024))), 133 | }) 134 | } 135 | for c := root; !c.Empty(); c = c.Next() { 136 | storage.Flush(c) 137 | } 138 | time.Sleep(1200 * time.Millisecond) 139 | storage.doRoutineSizeCheck() 140 | } 141 | 142 | time.Sleep(1200 * time.Millisecond) 143 | storage.doRoutineSizeCheck() 144 | storage.doRoutineScheduledTasks() 145 | 146 | assert.GreaterOrEqual(t, len(storage.dbs), 2) 147 | assert.Equal(t, 1, len(storage.liveDbs)) 148 | 149 | assert.Equal(t, db_closed, storage.dbs[0].status) 150 | assert.Nil(t, storage.dbs[0].db) 151 | 152 | assert.Equal(t, len(storage.liveDbs), 1) 153 | 154 | assert.NoFileExists(t, firstDb.filePath) 155 | assert.FileExists(t, storage.dbs[1].filePath) 156 | 157 | } 158 | 159 | func Test_Sqlite_WALCheckpoint(t *testing.T) { 160 | testClearDir(storageDir) 161 | defer testClearDir(storageDir) 162 | 163 | storage, err := New(&Config{ 164 | Dir: storageDir, 165 | Prefix: storagePrefix, 166 | MaxFilesizeMB: 1, 167 | MaxSizeTotalMB: 3, 168 | IntervalSizeCheckSec: 1000, 169 | IntervalScheduledTasksMs: 1000000, 170 | IntervalWalCheckpointSec: 1, 171 | MaxChunkAgeSec: 1, 172 | CloseIdleSec: 1, 173 | }) 174 | assert.Nil(t, err) 175 | defer storage.Close() 176 | 177 | db := storage.dbs[0] 178 | walFile := db.filePath + "-wal" 179 | 180 | for j := 0; j < 1; j++ { 181 | root := sqlog.NewChunk(99) 182 | chunk := root 183 | for i := 0; i < 1001; i++ { 184 | chunk, _ = chunk.Put(&sqlog.Entry{ 185 | Time: time.Now(), //.Add(time.Duration(-1) * time.Second), 186 | Level: 0, 187 | Content: []byte(fmt.Sprintf(`{"msg":"%s"}`, randString(1024))), 188 | }) 189 | } 190 | for c := root; !c.Empty(); c = c.Next() { 191 | storage.Flush(c) 192 | } 193 | time.Sleep(1 * time.Second) 194 | storage.doRoutineSizeCheck() 195 | } 196 | 197 | time.Sleep(1 * time.Second) 198 | storage.doRoutineSizeCheck() 199 | storage.doRoutineScheduledTasks() 200 | 201 | // log, err := sqlog.New(&sqlog.Config{Storage: storage}) 202 | // assert.Nil(t, err) 203 | // defer log.Stop() 204 | 205 | // logger := slog.New(log.Handler()) 206 | 207 | // for i := 0; i < 1000; i++ { 208 | // logger.Info(randString(1024)) 209 | // } 210 | 211 | // waitMax(5*time.Second, func() bool { 212 | // return testGetFileSize(walFile) > 0 213 | // }) 214 | 215 | // waitMax(5*time.Second, func() bool { 216 | // return testGetFileSize(walFile) == 0 217 | // }) 218 | 219 | assert.Equal(t, int64(0), testGetFileSize(walFile)) 220 | assert.Greater(t, testGetFileSize(db.filePath), int64(1000*1024)) 221 | } 222 | 223 | func testClearDir(dir string) { 224 | if err := os.RemoveAll(dir); err != nil { 225 | panic(err) 226 | } 227 | } 228 | 229 | func waitMax(max time.Duration, condition func() bool) { 230 | init := time.Now() 231 | for { 232 | if condition() || time.Since(init) > max { 233 | break 234 | } 235 | time.Sleep(500 * time.Millisecond) 236 | } 237 | } 238 | 239 | var src = rand.NewSource(time.Now().UnixNano()) 240 | 241 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 242 | const ( 243 | letterIdxBits = 6 // 6 bits to represent a letter index 244 | letterIdxMask = 1<= 0; { 253 | if remain == 0 { 254 | cache, remain = src.Int63(), letterIdxMax 255 | } 256 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 257 | b[i] = letterBytes[idx] 258 | i-- 259 | } 260 | cache >>= letterIdxBits 261 | remain-- 262 | } 263 | 264 | return *(*string)(unsafe.Pointer(&b)) 265 | } 266 | -------------------------------------------------------------------------------- /sqlite/storage_test_driver_test.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "database/sql/driver" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/url" 9 | "os" 10 | "strings" 11 | "sync" 12 | ) 13 | 14 | type mockDriver struct { 15 | mu sync.Mutex 16 | conns map[string]*mockDriverConn 17 | } 18 | 19 | func (d *mockDriver) Open(dsn string) (driver.Conn, error) { 20 | d.mu.Lock() 21 | defer d.mu.Unlock() 22 | if c, exists := d.conns[dsn]; exists { 23 | c.count++ 24 | return c, nil 25 | } 26 | 27 | url, err := url.Parse(dsn) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | // simulate sqlite wal 33 | file, err := os.OpenFile(url.Opaque, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0755) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | wal, err := os.OpenFile(url.Opaque+"-wal", os.O_CREATE|os.O_RDWR, 0755) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | d.conns[dsn] = &mockDriverConn{ 44 | count: 1, 45 | dsn: dsn, 46 | file: file, 47 | wal: wal, 48 | driver: d, 49 | } 50 | return d.conns[dsn], nil 51 | } 52 | 53 | type mockDriverConn struct { 54 | mu sync.Mutex 55 | tx *mockTx 56 | count int 57 | dsn string 58 | file *os.File 59 | wal *os.File 60 | driver *mockDriver 61 | } 62 | 63 | func (c *mockDriverConn) Prepare(query string) (stmt driver.Stmt, err error) { 64 | return &mockStmt{query: query, conn: c}, nil 65 | } 66 | 67 | func (c *mockDriverConn) Begin() (tx driver.Tx, err error) { 68 | c.mu.Lock() 69 | c.tx = &mockTx{conn: c} 70 | return c.tx, nil 71 | } 72 | 73 | func (c *mockDriverConn) Close() error { 74 | c.driver.mu.Lock() 75 | defer c.driver.mu.Unlock() 76 | c.count-- 77 | if c.count == 0 { 78 | delete(c.driver.conns, c.dsn) 79 | c.wal.Close() 80 | c.file.Close() 81 | } 82 | return nil 83 | } 84 | 85 | type mockTxEntry struct { 86 | epoch int64 87 | nanos int64 88 | level int64 89 | content []byte 90 | } 91 | 92 | // Tx is a transaction. 93 | type mockTx struct { 94 | conn *mockDriverConn 95 | commit []func() error 96 | } 97 | 98 | func (t *mockTx) Commit() error { 99 | defer t.conn.mu.Unlock() 100 | for _, fn := range t.commit { 101 | if err := fn(); err != nil { 102 | return err 103 | } 104 | } 105 | t.conn.tx = nil 106 | return nil 107 | } 108 | 109 | func (t *mockTx) Rollback() error { 110 | defer t.conn.mu.Unlock() 111 | t.conn.tx = nil 112 | return nil 113 | } 114 | 115 | // Stmt is a prepared statement. It is bound to a [Conn] and not 116 | // used by multiple goroutines concurrently. 117 | type mockStmt struct { 118 | conn *mockDriverConn 119 | query string 120 | } 121 | 122 | func (s *mockStmt) Close() error { 123 | return nil 124 | } 125 | 126 | func (s *mockStmt) NumInput() int { 127 | return -1 128 | } 129 | 130 | func (s *mockStmt) Exec(args []driver.Value) (driver.Result, error) { 131 | if strings.Contains(s.query, "INSERT INTO entries") { // (epoch_secs, nanos, level, content) 132 | if s.conn.tx == nil { 133 | return nil, errors.New("transaction required") 134 | } 135 | 136 | id := int64(0) 137 | rows := int64(0) 138 | 139 | var entries []*mockTxEntry 140 | 141 | for i := 0; i < len(args); i++ { 142 | entries = append(entries, &mockTxEntry{ 143 | epoch: args[i].(int64), 144 | nanos: args[i+1].(int64), 145 | level: args[i+2].(int64), 146 | content: args[i+3].([]byte), 147 | }) 148 | id++ 149 | rows++ 150 | i += 3 151 | } 152 | 153 | s.conn.tx.commit = append(s.conn.tx.commit, func() error { 154 | for _, entry := range entries { 155 | _, err := s.conn.wal.WriteString(fmt.Sprintf("%d,%d,%d", entry.epoch, entry.nanos, entry.level)) 156 | if err != nil { 157 | return err 158 | } 159 | _, err = s.conn.wal.Write(entry.content) 160 | if err != nil { 161 | return err 162 | } 163 | _, err = s.conn.wal.WriteString("\n") 164 | if err != nil { 165 | return err 166 | } 167 | } 168 | return nil 169 | }) 170 | 171 | return &mockResult{last: id, rows: rows}, nil 172 | } else if s.query == "VACUUM" || strings.Contains(s.query, "PRAGMA wal_checkpoint") { 173 | // copy from wal to db 174 | if s.conn.tx != nil { 175 | // https://sqlite-users.sqlite.narkive.com/8rtWJfAH/vacuum-command-in-a-transaction 176 | return nil, errors.New("transaction not allowed") 177 | } 178 | s.conn.mu.Lock() 179 | defer s.conn.mu.Unlock() 180 | 181 | b, err := os.ReadFile(s.conn.wal.Name()) 182 | if err != nil { 183 | return nil, err 184 | } 185 | _, err = s.conn.file.Write(b) 186 | if err != nil { 187 | return nil, err 188 | } 189 | 190 | if err := os.Truncate(s.conn.wal.Name(), 0); err != nil { 191 | return nil, err 192 | } 193 | return &mockResult{}, nil 194 | } 195 | return &mockResult{}, nil 196 | } 197 | 198 | func (s *mockStmt) Query(args []driver.Value) (driver.Rows, error) { 199 | if strings.Contains(s.query, "page_count * page_size") { 200 | s.conn.mu.Lock() 201 | defer s.conn.mu.Unlock() 202 | 203 | return &mockRows{ 204 | columns: []string{"total_size", "freelist_size"}, 205 | values: [][]any{{ 206 | testGetFileSize(s.conn.wal.Name()) + testGetFileSize(s.conn.file.Name()), 207 | 0, 208 | }}, 209 | }, nil 210 | } 211 | return &mockRows{}, nil 212 | } 213 | 214 | type mockResult struct { 215 | last int64 216 | rows int64 217 | } 218 | 219 | func (r *mockResult) LastInsertId() (int64, error) { 220 | return r.last, nil 221 | } 222 | 223 | func (r *mockResult) RowsAffected() (int64, error) { 224 | return r.rows, nil 225 | } 226 | 227 | // Rows is an iterator over an executed query's results. 228 | type mockRows struct { 229 | query string 230 | index int 231 | args []driver.Value 232 | values [][]any 233 | columns []string 234 | } 235 | 236 | func (r *mockRows) Columns() []string { 237 | return r.columns 238 | } 239 | 240 | func (r *mockRows) Close() error { 241 | return nil 242 | } 243 | 244 | func (r *mockRows) Next(dest []driver.Value) error { 245 | if r.index > len(r.values)-1 { 246 | return io.EOF 247 | } 248 | values := r.values[r.index] 249 | r.index++ 250 | for i, _ := range r.columns { 251 | dest[i] = values[i] 252 | } 253 | return nil 254 | } 255 | 256 | func testGetFileSize(file string) int64 { 257 | info, _ := os.Stat(file) 258 | return info.Size() 259 | } 260 | -------------------------------------------------------------------------------- /sqlite/utils.go: -------------------------------------------------------------------------------- 1 | package sqlite 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log/slog" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | "sort" 11 | "strconv" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | func newDb(driver, dir, prefix string, start time.Time, maxChunkAgeSec int64) *storageDb { 17 | 18 | epochStart := start.Unix() 19 | name := fmt.Sprintf("%s_%d.db", prefix, epochStart) 20 | 21 | return &storageDb{ 22 | fileDir: dir, 23 | filePath: path.Join(dir, name), 24 | filePrefix: prefix, 25 | size: 0, // live db will updated during execution 26 | status: db_closed, 27 | epochStart: epochStart, 28 | newEpochStart: epochStart, 29 | maxChunkAgeSec: maxChunkAgeSec, 30 | epochEnd: 0, 31 | driver: driver, 32 | } 33 | } 34 | 35 | func initDbs(driver, dir, prefix string) (dbs []*storageDb, err error) { 36 | 37 | if err = os.MkdirAll(dir, 0755); err != nil { 38 | return 39 | } 40 | 41 | err = filepath.Walk(dir, func(filepath string, info os.FileInfo, err error) error { 42 | if err != nil { 43 | return err 44 | } 45 | if info.IsDir() { 46 | return nil 47 | } 48 | 49 | name := info.Name() 50 | if !strings.HasPrefix(name, prefix) || path.Ext(name) != ".db" { 51 | return nil 52 | } 53 | 54 | var ( 55 | epochStart int64 56 | epochEnd int64 57 | ) 58 | 59 | epochs := strings.Split(strings.TrimSuffix(strings.TrimPrefix(name, prefix+"_"), ".db"), "_") 60 | 61 | epochStart, err = strconv.ParseInt(epochs[0], 10, 64) 62 | if err != nil { 63 | slog.Warn("[sqlog] invalid database name", slog.String("filepath", filepath), slog.Any("err", err)) 64 | return nil 65 | } 66 | 67 | if len(epochs) > 1 { 68 | epochEnd, err = strconv.ParseInt(epochs[1], 10, 64) 69 | if err != nil { 70 | slog.Warn("[sqlog] invalid database name", slog.String("filepath", filepath), slog.Any("err", err)) 71 | return nil 72 | } 73 | } 74 | 75 | dbs = append(dbs, &storageDb{ 76 | filePath: path.Join(dir, name), 77 | size: info.Size(), // live db will updated during execution 78 | status: db_closed, 79 | epochStart: epochStart, 80 | epochEnd: epochEnd, 81 | driver: driver, 82 | }) 83 | 84 | return nil 85 | }) 86 | if err != nil { 87 | err = errors.Join(errors.New("[sqlog] error initializing the storage"), err) 88 | } 89 | 90 | if len(dbs) > 1 { 91 | sortDbs(dbs) 92 | } 93 | 94 | return 95 | } 96 | 97 | func sortDbs(dbs []*storageDb) { 98 | sort.SliceStable(dbs, func(i, j int) bool { 99 | ae := dbs[i].epochEnd 100 | be := dbs[j].epochEnd 101 | if ae == 0 && be == 0 { 102 | return dbs[i].epochStart < dbs[j].epochStart 103 | } 104 | 105 | if be == 0 { 106 | return true 107 | } 108 | 109 | if ae == 0 { 110 | return false 111 | } 112 | 113 | return ae > be 114 | }) 115 | } 116 | -------------------------------------------------------------------------------- /sqlog.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | import ( 4 | "log/slog" 5 | "net/http" 6 | "os" 7 | "sync" 8 | ) 9 | 10 | type Config struct { 11 | Storage Storage 12 | Handler *HandlerConfig 13 | Ingester *IngesterConfig 14 | } 15 | 16 | // Log SQLog interface 17 | type Log interface { 18 | 19 | // Stop terminates any ongoing logging operations. It should be called 20 | // to release resources and stop log collection or processing. 21 | Stop() 22 | 23 | // Handler returns the primary log handler 24 | Handler() slog.Handler 25 | 26 | // Fanout distributes logs to multiple slog.Handler instances in parallel. 27 | // This allows logs to be processed by several handlers simultaneously. 28 | Fanout(...slog.Handler) 29 | 30 | // Ticks api 31 | Ticks(*TicksInput) (*Output, error) 32 | 33 | // Entries api 34 | Entries(*EntriesInput) (*Output, error) 35 | 36 | // Result scheduled result api 37 | Result(taskId int32) (*Output, error) 38 | 39 | // Cancel scheduled result 40 | Cancel(taskId int32) error 41 | 42 | // HttpHandler returns an http.Handler responsible for handling 43 | // HTTP requests related to the api 44 | HttpHandler() http.Handler 45 | 46 | // ServeHTTPTicks handles HTTP requests for Ticks api 47 | ServeHTTPTicks(w http.ResponseWriter, r *http.Request) 48 | 49 | // ServeHTTPEntries handles HTTP requests for Entries api 50 | ServeHTTPEntries(w http.ResponseWriter, r *http.Request) 51 | 52 | // ServeHTTPEntries handles HTTP requests for scheduled result api 53 | ServeHTTPResult(w http.ResponseWriter, r *http.Request) 54 | 55 | // ServeHTTPEntries handles HTTP requests to cancel scheduled result api 56 | ServeHTTPCancel(w http.ResponseWriter, r *http.Request) 57 | } 58 | 59 | type sqlog struct { 60 | close sync.Once 61 | config *Config 62 | handler *handler 63 | storage Storage 64 | ingester *ingester 65 | } 66 | 67 | func New(config *Config) (*sqlog, error) { 68 | if config == nil { 69 | config = &Config{} 70 | } 71 | 72 | storage := config.Storage 73 | if storage == nil { 74 | storage = &DummyStorage{} 75 | } 76 | 77 | ingester, err := NewIngester(config.Ingester, storage) 78 | if err != nil { 79 | return nil, err 80 | } 81 | 82 | return &sqlog{ 83 | config: config, 84 | storage: storage, 85 | ingester: ingester, 86 | handler: newHandler(ingester, config.Handler), 87 | }, nil 88 | } 89 | 90 | func (l *sqlog) Handler() slog.Handler { 91 | return l.handler 92 | } 93 | 94 | func (l *sqlog) Fanout(handlers ...slog.Handler) { 95 | l.handler.fanout(handlers...) 96 | } 97 | 98 | func (l *sqlog) Stop() { 99 | l.close.Do(func() { 100 | if slog.Default().Handler() == l.handler { 101 | // we will no longer be able to write the log 102 | slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, nil))) 103 | } 104 | if err := l.ingester.Close(); err != nil { 105 | slog.Warn( 106 | "[sqlog] error closing", 107 | slog.Any("error", err), 108 | ) 109 | } 110 | }) 111 | } 112 | -------------------------------------------------------------------------------- /sqlog_api.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | type Tick struct { 4 | Index int `json:"index"` 5 | Start int64 `json:"epoch_start"` 6 | End int64 `json:"epoch_end"` 7 | Count int64 `json:"count"` 8 | Debug int64 `json:"debug"` 9 | Info int64 `json:"info"` 10 | Warn int64 `json:"warn"` 11 | Error int64 `json:"error"` 12 | } 13 | 14 | type TicksInput struct { 15 | Expr string `json:"expr"` 16 | Level []string `json:"level"` // ["debug","info","warn","error"] 17 | EpochEnd int64 `json:"epoch"` 18 | IntervalSec int `json:"interval"` 19 | MaxResult int `json:"limit"` 20 | } 21 | 22 | type EntriesInput struct { 23 | Expr string `json:"expr"` 24 | Level []string `json:"level"` // ["debug","info","warn","error"] 25 | Direction string `json:"dir"` // before|after 26 | EpochStart int64 `json:"epoch"` // @TODO: add epochMax 27 | NanosStart int `json:"nanos"` 28 | MaxResult int `json:"limit"` 29 | } 30 | 31 | type Output struct { 32 | Scheduled bool `json:"scheduled,omitempty"` // Indicates that this is a partial result 33 | TaskIds []int32 `json:"tasks,omitempty"` // The id so that the result can be retrieved in the future 34 | Error error `json:"-"` // The last error occurred 35 | Ticks []*Tick `json:"ticks,omitempty"` // The ticks available in this response 36 | Entries []any `json:"entries,omitempty"` // The log records available in this response 37 | } 38 | 39 | func (l *sqlog) Entries(input *EntriesInput) (*Output, error) { 40 | if s, ok := l.storage.(StorageWithApi); ok { 41 | return s.Entries(input) 42 | } 43 | return &Output{}, nil 44 | } 45 | 46 | func (l *sqlog) Ticks(input *TicksInput) (*Output, error) { 47 | if s, ok := l.storage.(StorageWithApi); ok { 48 | return s.Ticks(input) 49 | } 50 | return &Output{}, nil 51 | } 52 | 53 | func (l *sqlog) Result(taskId int32) (*Output, error) { 54 | if s, ok := l.storage.(StorageWithApi); ok { 55 | return s.Result(taskId) 56 | } 57 | return &Output{}, nil 58 | } 59 | 60 | func (l *sqlog) Cancel(taskId int32) error { 61 | if s, ok := l.storage.(StorageWithApi); ok { 62 | return s.Cancel(taskId) 63 | } 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /storage.go: -------------------------------------------------------------------------------- 1 | package sqlog 2 | 3 | // Storage storage contract 4 | type Storage interface { 5 | Close() error // Close storage must perform cleaning during shutdown 6 | Flush(chunk *Chunk) error // Flush the chunk must persist 7 | } 8 | 9 | // StorageWithApi contract for storage that allows search 10 | type StorageWithApi interface { 11 | Storage 12 | 13 | // Fetches information about all series within the range. 14 | Ticks(input *TicksInput) (*Output, error) 15 | 16 | // Fetches a page of results (seek method or keyset pagination). 17 | // The sorting is reversed, with the oldest result coming first. 18 | Entries(input *EntriesInput) (*Output, error) 19 | Result(taskId int32) (*Output, error) 20 | Cancel(taskId int32) error 21 | } 22 | 23 | type DummyStorage struct { 24 | } 25 | 26 | func (s *DummyStorage) Flush(chunk *Chunk) error { 27 | return nil 28 | } 29 | 30 | func (s *DummyStorage) Close() error { 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | SQLog - Connecting the dots 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |   25 |
26 | 27 | 28 |
29 | 30 | 31 | 32 | 33 |
34 | 35 |
36 |
37 | 38 | 43 |
44 |
45 |
46 |
47 | 48 | 54 |
55 |
56 |
57 |
58 | 59 | 65 |
66 |
67 |
68 |
69 | 70 | 76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 | 94 | 101 |
102 |
103 |
104 |
105 |
106 | 107 |
108 |
109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 |
LEVELDATEMESSAGEOVERVIEW
120 |
121 |
122 |
123 |
124 |
125 |

126 |                         
127 |
128 |
129 |
130 |
131 |
132 | 135 | 136 | 144 | 145 |
146 |
147 |

Search Syntax

148 | 149 | 150 |
151 |
152 |
153 |

154 | 155 | In addition to the UI, SQLog's search syntax is also 156 | inspired by Datadog. 157 | 158 |

159 | 160 |

Overview

161 | 162 |

A query filter is composed of terms and operators.

163 |

There are two types of terms:

164 |
    165 |
  • 166 |

    A single term is a single word such as test or hello.

    167 |
  • 168 |
  • 169 |

    170 | A sequence is a group of words surrounded by double quotes, such as "hello dolly". 171 |

    172 |
  • 173 |
174 | 175 |

176 | To combine multiple terms into a complex query, you can use any of the following case insensitive Boolean operators: 177 |

178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 |
OperatorDescriptionExample
AND 190 | Intersection: both terms are in the selected events (if nothing is added, AND is taken by default) 191 | authentication AND failure
ORUnion: either term is contained in the selected eventsauthentication OR password
201 | 202 |

Search wildcard

203 |

204 | You can combine text with wildcards to enhance your searches. 205 |

206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 220 | 221 | 222 | 223 | 233 | 234 | 235 |
WildcardDescription
? 217 | Match a single special character or space. For example, to search 218 | for an attribute my_attribute with the value hello world, my_attribute:hello?world. 219 |
* 224 | Perform a multi-character wildcard search. 225 |
    226 |
  • 227 | service:web* matches every log message that has a service starting with web. 228 |
  • 229 |
  • web* matches all log messages starting with web.
  • 230 |
  • *web matches all log messages that end with web.
  • 231 |
232 |
236 |

237 | Note: Wildcards work as wildcards inside and outside of double quotes. For example, 238 | "*test*" and *test* matches a log which has the string test in its message. 239 | You cand escape the wildcard with with the \ character (example "\*test\*"). 240 |

241 | 242 |

Single term example

243 |
244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 |
Search syntaxDescription
helloSearches only the log message for the term hello.
hello*Searches all log attributes for strings that starts with hello. For example, hello_world.
*worldSearches all log attributes for strings that finishes with world. For example, hello_world.
266 |
267 | 268 |

Multiple terms with exact match example

269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 |
Search syntaxDescription
"hello world"Searches only the log message for the exact term hello world.
283 | 284 |

Multiple terms without exact match example

285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 299 | 300 | 301 | 302 | 305 | 306 | 307 |
Search syntaxDescription
hello world 296 | Is equivalent to hello AND world. 297 | It searches only the log message for the terms hello and world. 298 |
"hello world" "i am here" 303 | It searches all log attributes for the terms hello world and i am here. 304 |
308 | 309 | 310 | 311 |

Attributes search

312 |

To search on a specific attribute, add : to specify you are searching on an attribute.

313 |

314 | For instance, if your attribute name is url and you want to filter on the 315 | url value https://github.com/nidorx/sqlog, enter: url:https://github.com/nidorx/sqlog 316 |

317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 331 | 332 | 333 | 334 | 337 | 338 | 339 | 340 | 345 | 346 | 347 |
Search queryDescription
http.url_details.path:"/api/v1/test" 329 | Searches all logs matching /api/v1/test in the attribute http.url_details.path. 330 |
http.url:/api-v1/* 335 | Searches all logs containing a value in http.url attribute that start with /api-v1/ 336 |
http.status_code:[200 TO 299] http.url_details.path:/api-v1/* 341 | Searches all logs containing a http.status_code value between 200 and 299, and 342 | containing a value in http.url_details.path attribute that start with 343 | /api-v1/ 344 |
348 | 349 |

Numerical values

350 |

351 | You can use numerical operators (<,>, <=, or >=) to 352 | perform a search. 353 | For instance, retrieve all logs that have a response time over 100ms with: http.response_time:>100 354 |

355 |

356 | You can search for numerical attribute within a specific range. For instance, retrieve all your 4xx errors with: http.status_code:[400 TO 499] 357 |

358 |
359 |
360 |
361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | -------------------------------------------------------------------------------- /web/reset.css: -------------------------------------------------------------------------------- 1 | /*** 2 | The new CSS reset - version 1.11.2 (last updated 15.11.2023) 3 | GitHub page: https://github.com/elad2412/the-new-css-reset 4 | ***/ 5 | 6 | /* 7 | Remove all the styles of the "User-Agent-Stylesheet", except for the 'display' property 8 | - The "symbol *" part is to solve Firefox SVG sprite bug 9 | - The "html" element is excluded, otherwise a bug in Chrome breaks the CSS hyphens property (https://github.com/elad2412/the-new-css-reset/issues/36) 10 | */ 11 | *:where(:not(html, iframe, canvas, img, svg, video, audio):not(svg *, symbol *)) { 12 | all: unset; 13 | display: revert; 14 | } 15 | 16 | /* Preferred box-sizing value */ 17 | *, 18 | *::before, 19 | *::after { 20 | box-sizing: border-box; 21 | } 22 | 23 | /* Fix mobile Safari increase font-size on landscape mode */ 24 | html { 25 | -moz-text-size-adjust: none; 26 | -webkit-text-size-adjust: none; 27 | text-size-adjust: none; 28 | } 29 | 30 | /* Reapply the pointer cursor for anchor tags */ 31 | a, button { 32 | cursor: revert; 33 | } 34 | 35 | /* Remove list styles (bullets/numbers) */ 36 | ol, ul, menu, summary { 37 | list-style: none; 38 | } 39 | 40 | /* For images to not be able to exceed their container */ 41 | img { 42 | max-inline-size: 100%; 43 | max-block-size: 100%; 44 | } 45 | 46 | /* removes spacing between cells in tables */ 47 | table { 48 | border-collapse: collapse; 49 | } 50 | 51 | /* Safari - solving issue when using user-select:none on the text input doesn't working */ 52 | input, textarea { 53 | -webkit-user-select: auto; 54 | } 55 | 56 | /* revert the 'white-space' property for textarea elements on Safari */ 57 | textarea { 58 | white-space: revert; 59 | } 60 | 61 | /* minimum style to allow to style meter element */ 62 | meter { 63 | -webkit-appearance: revert; 64 | appearance: revert; 65 | } 66 | 67 | /* preformatted text - use only for this feature */ 68 | :where(pre) { 69 | all: revert; 70 | box-sizing: border-box; 71 | } 72 | 73 | /* reset default text opacity of input placeholder */ 74 | ::placeholder { 75 | color: unset; 76 | } 77 | 78 | /* fix the feature of 'hidden' attribute. 79 | display:revert; revert to element instead of attribute */ 80 | :where([hidden]) { 81 | display: none; 82 | } 83 | 84 | /* revert for bug in Chromium browsers 85 | - fix for the content editable attribute will work properly. 86 | - webkit-user-select: auto; added for Safari in case of using user-select:none on wrapper element*/ 87 | :where([contenteditable]:not([contenteditable="false"])) { 88 | -moz-user-modify: read-write; 89 | -webkit-user-modify: read-write; 90 | overflow-wrap: break-word; 91 | -webkit-line-break: after-white-space; 92 | -webkit-user-select: auto; 93 | } 94 | 95 | /* apply back the draggable feature - exist only in Chromium and Safari */ 96 | :where([draggable="true"]) { 97 | -webkit-user-drag: element; 98 | } 99 | 100 | /* Revert Modal native behavior */ 101 | :where(dialog:modal) { 102 | all: revert; 103 | box-sizing: border-box; 104 | } 105 | 106 | /* Remove details summary webkit styles */ 107 | ::-webkit-details-marker { 108 | display: none; 109 | } -------------------------------------------------------------------------------- /web/styles.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap'); 2 | 3 | html, 4 | body { 5 | font-family: 'Roboto', sans-serif; 6 | font-size: 12px; 7 | } 8 | 9 | body.no-scroll { 10 | overflow: hidden; 11 | } 12 | 13 | ::-webkit-scrollbar { 14 | width: 5px; 15 | } 16 | 17 | ::-webkit-scrollbar-track { 18 | background: #f1f1f1; 19 | } 20 | 21 | ::-webkit-scrollbar-thumb { 22 | background: #bec4c4; 23 | } 24 | 25 | ::-webkit-scrollbar-thumb:hover { 26 | background: #555; 27 | } 28 | 29 | strong { 30 | font-weight: bold; 31 | } 32 | 33 | .clear { 34 | position: absolute; 35 | top: 2px; 36 | right: 5px; 37 | cursor: pointer; 38 | } 39 | 40 | .textbox { 41 | width: 100%; 42 | } 43 | 44 | .wrapper { 45 | width: 200px; 46 | position: relative; 47 | } 48 | 49 | #date-range { 50 | min-width: 240px; 51 | max-width: 240px; 52 | text-align: center; 53 | } 54 | 55 | 56 | 57 | #tab-content { 58 | padding: 0 10px; 59 | overflow: hidden; 60 | overflow-y: scroll; 61 | display: block; 62 | height: calc(100vh - 194px); 63 | } 64 | 65 | #tab-content>table { 66 | display: none; 67 | width: 100% 68 | } 69 | 70 | #tab-content>table.active { 71 | display: table; 72 | } 73 | 74 | #tab-content>table td { 75 | padding: 6px 4px; 76 | border-bottom: 1px solid #ccc; 77 | background: #fff; 78 | transition: background 0.2s; 79 | vertical-align: middle; 80 | } 81 | 82 | #tab-content>table tr { 83 | cursor: pointer; 84 | } 85 | 86 | #tab-content>table thead tr { 87 | position: sticky; 88 | top: 0; 89 | background: #fff; 90 | z-index: 5; 91 | } 92 | 93 | #tab-content>table thead td { 94 | font-weight: bold; 95 | } 96 | 97 | #tab-content>table thead td.level { 98 | width: 40px; 99 | } 100 | 101 | #tab-content>table thead td.date { 102 | width: 145px; 103 | } 104 | 105 | #tab-content>table thead td.message { 106 | /* message */ 107 | /* width: 200px; */ 108 | } 109 | 110 | #tab-content>table thead td.overview { 111 | width: 500px; 112 | } 113 | 114 | #tab-content>table tbody tr td.level { 115 | position: relative; 116 | padding-left: 7px; 117 | } 118 | 119 | #tab-content>table tbody tr td.level::before { 120 | content: ''; 121 | position: absolute; 122 | left: 0; 123 | bottom: 50%; 124 | width: 2px; 125 | height: 12px; 126 | background-color: rgb(249 250 251); 127 | margin-bottom: -5px; 128 | } 129 | 130 | #tab-content>table tbody tr:hover td { 131 | background-color: rgb(249 250 251); 132 | } 133 | 134 | #tab-content>table tbody tr.debug .level { 135 | color: rgb(4 120 87); 136 | } 137 | 138 | #tab-content>table tbody tr.debug:hover td { 139 | background-color: rgb(236 253 245); 140 | } 141 | 142 | #tab-content>table tbody tr.debug td.level::before { 143 | background-color: rgb(4 120 87); 144 | } 145 | 146 | #tab-content>table tbody tr.info .level { 147 | color: rgb(3 105 161); 148 | } 149 | 150 | #tab-content>table tbody tr.info:hover td { 151 | background-color: rgb(240 249 255); 152 | } 153 | 154 | #tab-content>table tbody tr.info td.level::before { 155 | background-color: rgb(3 105 161); 156 | } 157 | 158 | #tab-content>table tbody tr.warn .level { 159 | color: rgb(180 83 9); 160 | } 161 | 162 | #tab-content>table tbody tr.warn:hover td { 163 | background-color: rgb(255 251 235); 164 | } 165 | 166 | #tab-content>table tbody tr.warn td.level::before { 167 | background-color: rgb(180 83 9); 168 | } 169 | 170 | #tab-content>table tbody tr.error .level { 171 | color: rgb(190 18 60); 172 | } 173 | 174 | #tab-content>table tbody tr.error:hover td { 175 | background-color: rgb(255 241 242); 176 | } 177 | 178 | #tab-content>table tbody tr.error td.level::before { 179 | background-color: rgb(190 18 60); 180 | } 181 | 182 | #tab-content>table#all td:nth-child(3) { 183 | display: table-cell; 184 | } 185 | 186 | 187 | #chart { 188 | top: 0; 189 | position: sticky; 190 | padding: 10px 0; 191 | background: #fff; 192 | border-bottom: 2px solid #d2d2d2; 193 | user-select: none; 194 | z-index: 500; 195 | } 196 | 197 | #chart.hidden { 198 | display: none; 199 | } 200 | 201 | #chart .count { 202 | position: absolute; 203 | left: 0; 204 | z-index: 5; 205 | top: -10px; 206 | } 207 | 208 | #chart .zoom { 209 | position: absolute; 210 | right: 0; 211 | z-index: 5; 212 | top: -10px; 213 | } 214 | 215 | #chart .zoom .btn { 216 | --bs-btn-padding-y: 2px; 217 | --bs-btn-padding-x: 4px; 218 | --bs-btn-font-size: .75rem; 219 | } 220 | 221 | #chart .dates { 222 | display: block; 223 | width: 100%; 224 | bottom: 10px; 225 | margin-top: 10px; 226 | position: relative; 227 | padding-top: 10px; 228 | } 229 | 230 | #chart .dates>div { 231 | position: absolute; 232 | width: 80px; 233 | margin-left: -40px; 234 | font-size: 9px; 235 | text-align: center; 236 | } 237 | 238 | #chart .dates::before { 239 | content: ''; 240 | position: absolute; 241 | top: 0px; 242 | left: 10px; 243 | width: calc(100% - 20px); 244 | height: 1px; 245 | background: #bbb; 246 | } 247 | 248 | #chart .dates>div::before { 249 | content: ''; 250 | position: absolute; 251 | top: -9px; 252 | left: 50%; 253 | width: 1px; 254 | height: 7px; 255 | background: #bbb; 256 | } 257 | 258 | #chart .bars { 259 | display: flex; 260 | height: 80px; 261 | align-items: flex-end; 262 | position: relative; 263 | } 264 | 265 | #chart .bars .tick { 266 | width: 10px; 267 | position: absolute; 268 | transition: all 0.2s; 269 | height: 100%; 270 | display: inline-flex; 271 | flex-direction: column; 272 | cursor: pointer; 273 | } 274 | 275 | #chart .bars .tick.highlight { 276 | background: #d2d2d2; 277 | } 278 | 279 | 280 | #chart .bars .tick .content { 281 | height: var(--height); 282 | width: 10px; 283 | position: absolute; 284 | bottom: 0; 285 | background: #b3e5fc; 286 | display: flex; 287 | flex-direction: column; 288 | } 289 | 290 | #chart .bars .tick.highlight .content { 291 | background: #db999a; 292 | } 293 | 294 | /* #chart .bars .tick::before { 295 | content: ''; 296 | background: #b3e5fc; 297 | height: var(--height); 298 | width: 10px; 299 | position: absolute; 300 | bottom: 0; 301 | } 302 | 303 | #chart .bars .tick.highlight::before { 304 | background: #db999a; 305 | } */ 306 | 307 | 308 | /* 309 | 310 | rgb(249 250 251) 311 | rgb(236 253 245) 312 | rgb(240 249 255) 313 | rgb(255 251 235) 314 | rgb(255 241 242) 315 | */ 316 | 317 | #chart .bars .tick>div { 318 | width: 100%; 319 | } 320 | 321 | #chart .bars .tick .debug { 322 | background-color: rgb(110 231 183); 323 | } 324 | 325 | #chart .bars .tick .info { 326 | background-color: rgb(147 197 253); 327 | } 328 | 329 | #chart .bars .tick .warn { 330 | background-color: rgb(252 211 77); 331 | } 332 | 333 | #chart .bars .tick .error { 334 | background-color: rgb(251 113 133); 335 | } 336 | 337 | #chart .bars .tick.hr { 338 | display: block; 339 | width: calc(100% - 10px); 340 | background: transparent; 341 | position: absolute; 342 | transition: all 0.2s; 343 | height: 20px; 344 | bottom: calc(50% - 10px); 345 | line-height: 20px; 346 | overflow: hidden; 347 | padding-left: 5px; 348 | font-size: 10px; 349 | } 350 | 351 | #chart .bars .tick.hr::before { 352 | background: #eee; 353 | height: 1px; 354 | width: 100%; 355 | bottom: 10px; 356 | left: 18px; 357 | } 358 | 359 | #chart .needle { 360 | position: absolute; 361 | top: 0; 362 | bottom: 0; 363 | right: 0; 364 | width: 38px; 365 | pointer-events: none; 366 | } 367 | 368 | #chart .needle::before { 369 | content: ''; 370 | width: 1px; 371 | height: 100%; 372 | left: 19px; 373 | background: #3D91F5; 374 | opacity: 0.5; 375 | position: absolute; 376 | } 377 | 378 | #chart .needle .handle { 379 | left: 0px; 380 | bottom: 0; 381 | position: absolute; 382 | width: 38px; 383 | height: 30px; 384 | cursor: col-resize; 385 | pointer-events: all; 386 | } 387 | 388 | #chart .needle .handle::before { 389 | content: ''; 390 | left: 11px; 391 | bottom: 7px; 392 | background: #3D91F5; 393 | position: absolute; 394 | border-radius: 100%; 395 | width: 17px; 396 | height: 17px; 397 | transition: all 0.2s; 398 | scale: 0; 399 | } 400 | 401 | #chart .needle.ui-draggable-dragging .handle::before, 402 | #chart .needle .handle:hover::before { 403 | scale: 1; 404 | } 405 | 406 | 407 | #tab-content>table tbody tr.highlight td { 408 | background: #eee; 409 | } 410 | 411 | #tab-content>table tbody tr:hover td { 412 | background: #d2d2d2; 413 | } 414 | 415 | .chart-container { 416 | position: relative; 417 | } 418 | 419 | #highlight-date { 420 | position: absolute; 421 | top: -17px; 422 | background: #000; 423 | color: #fff; 424 | padding: 3px 4px; 425 | left: 10px; 426 | font-size: 10px; 427 | transition: all 0.2s; 428 | text-align: center; 429 | word-wrap: inherit; 430 | white-space: nowrap; 431 | z-index: 1000; 432 | } 433 | 434 | #highlight-date::before { 435 | content: ''; 436 | width: 0; 437 | height: 0; 438 | border-left: 5px solid transparent; 439 | border-right: 5px solid transparent; 440 | border-top: 5px solid black; 441 | bottom: -5px; 442 | position: absolute; 443 | } 444 | 445 | #highlight-date.left::before { 446 | left: 1px; 447 | } 448 | 449 | #highlight-date.right::before { 450 | right: 1px; 451 | } 452 | 453 | #highlight-date.center::before { 454 | margin-left: -5px; 455 | left: 50%; 456 | } 457 | 458 | #highlight-date.hidden { 459 | display: none; 460 | } 461 | 462 | 463 | #event-attributes { 464 | position: fixed; 465 | top: 0; 466 | right: 0; 467 | bottom: 0; 468 | width: 60%; 469 | z-index: 1500; 470 | pointer-events: none; 471 | } 472 | 473 | #event-attributes .overlay { 474 | position: fixed; 475 | top: 0; 476 | bottom: 0; 477 | left: 0; 478 | right: 0; 479 | background: #000; 480 | z-index: 0; 481 | opacity: 0.0; 482 | transition: all 0.2s; 483 | cursor: pointer; 484 | } 485 | 486 | #event-attributes .container { 487 | width: 100%; 488 | position: relative; 489 | height: 100%; 490 | overflow: hidden; 491 | overflow-y: auto; 492 | z-index: 5; 493 | background: #fff; 494 | border-left: 1px solid #888; 495 | box-shadow: 0px 0px 0px rgba(0, 0, 0, 0.0); 496 | opacity: 0.0; 497 | transform: translateX(400px); 498 | padding: 10px; 499 | transition: all 0.2s; 500 | max-width: unset; 501 | } 502 | 503 | #event-attributes.active { 504 | pointer-events: all; 505 | } 506 | 507 | #event-attributes.active .container { 508 | opacity: 1.0; 509 | transform: translateX(0px); 510 | box-shadow: -10px 0px 10px rgba(0, 0, 0, 0.1); 511 | } 512 | 513 | #event-attributes.active .overlay { 514 | opacity: 0.3; 515 | } 516 | 517 | #event-attributes .container .info { 518 | font-size: 14px; 519 | } 520 | 521 | #event-attributes .container .json { 522 | background: #eee; 523 | padding: 5px; 524 | font-family: monospace; 525 | font-size: 14px; 526 | line-height: 20px; 527 | } 528 | 529 | .tag { 530 | background-color: #eee; 531 | padding: 2px 7px; 532 | border-radius: 6px; 533 | margin: 4px 5px 0 0; 534 | display: inline-block; 535 | opacity: 0.5; 536 | transition: all 0.2s; 537 | } 538 | 539 | .tag .key { 540 | font-weight: bold; 541 | } 542 | 543 | .tag .value {} 544 | 545 | #tab-content>table tbody tr:hover .tag { 546 | opacity: 1.0; 547 | } 548 | 549 | .daterangepicker .ranges { 550 | overflow: hidden; 551 | height: 265px; 552 | overflow-y: scroll; 553 | } 554 | 555 | .daterangepicker.show-calendar .ranges { 556 | margin: 0; 557 | padding-top: 8px; 558 | border-right: 1px solid #ddd; 559 | } 560 | 561 | .daterangepicker.show-ranges.ltr .drp-calendar.left { 562 | border-left: none; 563 | } 564 | 565 | 566 | .offcanvas-body { 567 | font-size: 14px; 568 | } 569 | 570 | 571 | #off-canvas-syntax { 572 | --bs-offcanvas-width: 50% 573 | } 574 | 575 | @media (max-width: 767.98px) { 576 | #date-range { 577 | width: 100%; 578 | max-width: unset; 579 | border-radius: 0; 580 | margin-bottom: 3px; 581 | } 582 | 583 | #expression { 584 | width: 100%; 585 | border-radius: 0; 586 | margin-left: 0; 587 | margin-bottom: 3px; 588 | } 589 | 590 | #expression-help-button { 591 | margin-left: 0; 592 | border-radius: var(--bs-border-radius); 593 | border-top-right-radius: 0; 594 | border-bottom-right-radius: 0; 595 | } 596 | 597 | .inputs { 598 | justify-content: center; 599 | } 600 | 601 | #tab-content { 602 | height: calc(100vh - 262px); 603 | } 604 | 605 | #event-attributes { 606 | width: 80%; 607 | } 608 | 609 | #tab-content>table td:nth-child(4) { 610 | display: none; 611 | } 612 | 613 | #off-canvas-syntax { 614 | --bs-offcanvas-width: 90% 615 | } 616 | } --------------------------------------------------------------------------------