├── AUTHORS
├── LICENSE
├── README.md
├── config.go
├── config_test.go
├── engine.go
├── entrypoints
├── ik
│ ├── main.go
│ ├── registry.go
│ └── scoreboard_html.go
└── ikb
│ └── main.go
├── fanout.go
├── fluent_router.go
├── fluent_router_test.go
├── glob.go
├── ik.go
├── intvector.go
├── intvector_test.go
├── journal
├── file.go
├── file_test.go
├── path_builder.go
└── path_builder_test.go
├── markup
├── html.go
├── html_test.go
├── markup.go
├── plain.go
└── term.go
├── parsers
├── regexp.go
└── registry.go
├── plugins
├── in_forward.go
├── in_tail.go
├── in_tail_test.go
├── out_file.go
├── out_forward.go
├── out_stdout.go
└── registry.go
├── record_pump.go
├── scorekeeper.go
├── slicer.go
├── spawner.go
├── spawner_test.go
├── stringvector.go
├── stringvector_test.go
├── task
├── recurring_scheduler.go
├── recurring_scheduler_test.go
├── simple.go
└── task.go
└── utils.go
/AUTHORS:
--------------------------------------------------------------------------------
1 | Moriyoshi Koizumi
2 | Shinji Tanaka
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014 Ik authors.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Ik
2 | ==
3 |
4 | Ik (pronounced ai-kay) is planned to be a humble alternative implementation of fluentd.
5 |
6 | "Ik" is also a Dutch word for "I" in English and it concidentally looks very much like the Japanese word "Iku" that literally means "Go".
7 |
8 | Installation
9 | ------------
10 |
11 | ```shell
12 | $ go get github.com/moriyoshi/ik/entrypoints/ik
13 | ```
14 |
15 | Authors
16 | -------
17 |
18 | In alphabetical order:
19 |
20 | - Moriyoshi Koizumi
21 | - Shinji Tanaka
22 |
23 |
--------------------------------------------------------------------------------
/config.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "bufio"
5 | "errors"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "net/url"
10 | "path"
11 | "regexp"
12 | "strings"
13 | )
14 |
15 | type Config struct {
16 | Root *ConfigElement
17 | }
18 |
19 | type ConfigElement struct {
20 | Name string
21 | Args string
22 | Attrs map[string]string
23 | Elems []*ConfigElement
24 | }
25 |
26 | type LineReader interface {
27 | Next() (string, error)
28 | Close() error
29 | Filename() string
30 | LineNumber() int
31 | }
32 |
33 | type parserContext struct {
34 | tag string
35 | tagArgs string
36 | elems []*ConfigElement
37 | attrs map[string]string
38 | opener Opener
39 | }
40 |
41 | type DefaultLineReader struct {
42 | filename string
43 | lineNumber int
44 | inner *bufio.Reader
45 | closer io.Closer
46 | }
47 |
48 | type DefaultOpener http.Dir
49 |
50 | var (
51 | stripCommentRegexp = regexp.MustCompile("\\s*(?:#.*)?$")
52 | startTagRegexp = regexp.MustCompile("^<([a-zA-Z0-9_]+)\\s*(.+?)?>$")
53 | attrRegExp = regexp.MustCompile("^([a-zA-Z0-9_]+)\\s+(.*)$")
54 | )
55 |
56 | func (reader *DefaultLineReader) Next() (string, error) {
57 | reader.lineNumber += 1
58 | line, isPrefix, err := reader.inner.ReadLine()
59 | if isPrefix {
60 | return "", errors.New(fmt.Sprintf("Line too long in %s at line %d", reader.filename, reader.lineNumber))
61 | }
62 | if err != nil {
63 | return "", err
64 | }
65 | return string(line), err
66 | }
67 |
68 | func (reader *DefaultLineReader) Close() error {
69 | if reader.closer != nil {
70 | return reader.closer.Close()
71 | }
72 | return nil
73 | }
74 |
75 | func (reader *DefaultLineReader) Filename() string {
76 | return reader.filename
77 | }
78 |
79 | func (reader *DefaultLineReader) LineNumber() int {
80 | return reader.lineNumber
81 | }
82 |
83 | func NewDefaultLineReader(filename string, reader io.Reader) *DefaultLineReader {
84 | closer, ok := reader.(io.Closer)
85 | if !ok {
86 | closer = nil
87 | }
88 | return &DefaultLineReader{
89 | filename: filename,
90 | lineNumber: 0,
91 | inner: bufio.NewReader(reader),
92 | closer: closer,
93 | }
94 | }
95 |
96 | func (opener DefaultOpener) FileSystem() http.FileSystem {
97 | return http.Dir(opener)
98 | }
99 |
100 | func (opener DefaultOpener) BasePath() string {
101 | return string(opener)
102 | }
103 |
104 | func (opener DefaultOpener) NewOpener(path_ string) Opener {
105 | if !path.IsAbs(path_) {
106 | path_ = path.Join(string(opener), path_)
107 | }
108 | return DefaultOpener(path_)
109 | }
110 |
111 | func NewLineReader(opener Opener, filename string) (LineReader, error) {
112 | file, err := opener.FileSystem().Open(filename)
113 | if err != nil {
114 | return nil, err
115 | }
116 | return NewDefaultLineReader(filename, file), nil
117 | }
118 |
119 | func (opener DefaultOpener) Open(filename string) (http.File, error) {
120 | return http.Dir(opener).Open(filename)
121 | }
122 |
123 | func makeParserContext(tag string, tagArgs string, opener Opener) *parserContext {
124 | return &parserContext{
125 | tag: tag,
126 | tagArgs: tagArgs,
127 | elems: make([]*ConfigElement, 0),
128 | attrs: make(map[string]string),
129 | opener: opener,
130 | }
131 | }
132 |
133 | func makeConfigElementFromContext(context *parserContext) *ConfigElement {
134 | return &ConfigElement{
135 | Name: context.tag,
136 | Args: context.tagArgs,
137 | Attrs: context.attrs,
138 | Elems: context.elems,
139 | }
140 | }
141 |
142 | func handleInclude(reader LineReader, context *parserContext, attrValue string) error {
143 | url_, err := url.Parse(attrValue)
144 | if err != nil {
145 | return err
146 | }
147 | if url_.Scheme == "file" || url_.Path == attrValue {
148 | var abspathPattern string
149 | if path.IsAbs(url_.Path) {
150 | abspathPattern = url_.Path
151 | } else {
152 | abspathPattern = path.Join(context.opener.BasePath(), url_.Path)
153 | }
154 | files, err := Glob(context.opener.FileSystem(), abspathPattern)
155 | if err != nil {
156 | return err
157 | }
158 | for _, file := range files {
159 | newReader, err := NewLineReader(context.opener, file)
160 | if err != nil {
161 | return err
162 | }
163 | defer newReader.Close()
164 | parseConfig(newReader, &parserContext{
165 | tag: context.tag,
166 | tagArgs: context.tagArgs,
167 | elems: context.elems,
168 | attrs: context.attrs,
169 | opener: context.opener.NewOpener(path.Dir(file)),
170 | })
171 | }
172 | return nil
173 | } else {
174 | return errors.New("Not implemented!")
175 | }
176 | }
177 |
178 | func handleSpecialAttrs(reader LineReader, context *parserContext, attrName string, attrValue string) (bool, error) {
179 | if attrName == "include" {
180 | return false, handleInclude(reader, context, attrValue)
181 | }
182 | return false, nil
183 | }
184 |
185 | func parseConfig(reader LineReader, context *parserContext) error {
186 | tagEnd := "" + context.tag + ">"
187 | for {
188 | line, err := reader.Next()
189 | if err != nil {
190 | if err == io.EOF {
191 | break
192 | } else {
193 | return err
194 | }
195 | }
196 |
197 | line = strings.TrimLeft(line, " \t\r\n")
198 | line = stripCommentRegexp.ReplaceAllLiteralString(line, "")
199 | if len(line) == 0 {
200 | continue
201 | } else if submatch := startTagRegexp.FindStringSubmatch(line); submatch != nil {
202 | subcontext := makeParserContext(
203 | submatch[1],
204 | submatch[2],
205 | nil,
206 | )
207 | err = parseConfig(reader, subcontext)
208 | if err != nil {
209 | return err
210 | }
211 | context.elems = append(context.elems, makeConfigElementFromContext(subcontext))
212 | } else if line == tagEnd {
213 | break
214 | } else if submatch := attrRegExp.FindStringSubmatch(line); submatch != nil {
215 | handled, err := handleSpecialAttrs(reader, context, submatch[1], submatch[2])
216 | if err != nil {
217 | return err
218 | }
219 | if !handled {
220 | context.attrs[submatch[1]] = submatch[2]
221 | }
222 | } else {
223 | return errors.New(fmt.Sprintf("Parse error in %s at line %s", reader.Filename(), reader.LineNumber()))
224 | }
225 | }
226 | return nil
227 | }
228 |
229 | func ParseConfig(opener Opener, filename string) (*Config, error) {
230 | context := makeParserContext("(root)", "", opener)
231 | reader, err := NewLineReader(opener, filename)
232 | if err != nil {
233 | return nil, err
234 | }
235 | defer reader.Close()
236 | err = parseConfig(reader, context)
237 | if err != nil {
238 | return nil, err
239 | }
240 | return &Config{Root: makeConfigElementFromContext(context)}, nil
241 | }
242 |
243 | type FluentConfigurer struct {
244 | logger Logger
245 | router *FluentRouter
246 | inputFactoryRegistry InputFactoryRegistry
247 | outputFactoryRegistry OutputFactoryRegistry
248 | }
249 |
250 | func (configurer *FluentConfigurer) Configure(engine Engine, config *Config) error {
251 | for _, v := range config.Root.Elems {
252 | switch v.Name {
253 | case "source":
254 | type_ := v.Attrs["type"]
255 | inputFactory := configurer.inputFactoryRegistry.LookupInputFactory(type_)
256 | if inputFactory == nil {
257 | return errors.New("Could not find input factory: " + type_)
258 | }
259 | input, err := inputFactory.New(engine, v)
260 | if err != nil {
261 | return err
262 | }
263 | err = engine.Launch(input)
264 | if err != nil {
265 | return err
266 | }
267 | configurer.logger.Info("Input plugin loaded: %s", inputFactory.Name())
268 | case "match":
269 | type_ := v.Attrs["type"]
270 | outputFactory := configurer.outputFactoryRegistry.LookupOutputFactory(type_)
271 | if outputFactory == nil {
272 | return errors.New("Could not find output factory: " + type_)
273 | }
274 | output, err := outputFactory.New(engine, v)
275 | if err != nil {
276 | return err
277 | }
278 | configurer.router.AddRule(v.Args, output)
279 | err = engine.Launch(output)
280 | if err != nil {
281 | return err
282 | }
283 | configurer.logger.Info("Output plugin loaded: %s, with Args '%s'", outputFactory.Name(), v.Args)
284 | }
285 | }
286 | return nil
287 | }
288 |
289 | func NewFluentConfigurer(logger Logger, inputFactoryRegistry InputFactoryRegistry, outputFactoryRegistry OutputFactoryRegistry, router *FluentRouter) *FluentConfigurer {
290 | return &FluentConfigurer{
291 | logger: logger,
292 | router: router,
293 | inputFactoryRegistry: inputFactoryRegistry,
294 | outputFactoryRegistry: outputFactoryRegistry,
295 | }
296 | }
297 |
--------------------------------------------------------------------------------
/config_test.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "bytes"
5 | "net/http"
6 | "os"
7 | "strings"
8 | "testing"
9 | )
10 |
11 | type myOpener string
12 | type myFile struct{ b *bytes.Buffer }
13 | type myFileSystem struct{ s string }
14 |
15 | func (f *myFile) Close() error { return nil }
16 | func (f *myFile) Read(b []byte) (int, error) { return f.b.Read(b) }
17 | func (f *myFile) Readdir(int) ([]os.FileInfo, error) { return nil, nil }
18 | func (f *myFile) Seek(int64, int) (int64, error) { return 0, nil }
19 | func (f *myFile) Stat() (os.FileInfo, error) { return nil, nil }
20 |
21 | func (fs *myFileSystem) Open(string) (http.File, error) {
22 | return &myFile{b: bytes.NewBuffer(([]byte)(fs.s))}, nil
23 | }
24 |
25 | func (opener myOpener) NewLineReader(filename string) (LineReader, error) {
26 | return NewDefaultLineReader(filename, strings.NewReader(string(opener))), nil
27 | }
28 |
29 | func (s myOpener) FileSystem() http.FileSystem { return &myFileSystem{s: string(s)} }
30 | func (myOpener) BasePath() string { return "" }
31 | func (myOpener) NewOpener(path string) Opener { return myOpener("") }
32 |
33 | func TestParseConfig(t *testing.T) {
34 | const data = "\n" +
35 | "attr_name1 attr_value1\n" +
36 | "attr_name2 attr_value2\n" +
37 | "\n"
38 | config, err := ParseConfig(myOpener(data), "test.cfg")
39 | if err != nil {
40 | panic(err.Error())
41 | }
42 | if len(config.Root.Elems) != 1 {
43 | t.Fail()
44 | }
45 | if config.Root.Elems[0].Name != "test" {
46 | t.Fail()
47 | }
48 | if len(config.Root.Elems[0].Attrs) != 2 {
49 | t.Fail()
50 | }
51 | if config.Root.Elems[0].Attrs["attr_name1"] != "attr_value1" {
52 | t.Fail()
53 | }
54 | if config.Root.Elems[0].Attrs["attr_name2"] != "attr_value2" {
55 | t.Fail()
56 | }
57 | }
58 |
59 | // vim: sts=4 sw=4 ts=4 noet
60 |
--------------------------------------------------------------------------------
/engine.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "github.com/moriyoshi/ik/task"
5 | "math/rand"
6 | "time"
7 | )
8 |
9 | type recurringTaskDaemon struct {
10 | engine *engineImpl
11 | shutdown bool
12 | }
13 |
14 | func (daemon *recurringTaskDaemon) Run() error {
15 | daemon.engine.recurringTaskScheduler.ProcessEvent()
16 | if daemon.shutdown {
17 | return nil
18 | }
19 | time.Sleep(1000000000)
20 | return Continue
21 | }
22 |
23 | func (daemon *recurringTaskDaemon) Shutdown() error {
24 | daemon.shutdown = true
25 | daemon.engine.recurringTaskScheduler.NoOp()
26 | return nil
27 | }
28 |
29 | type engineImpl struct {
30 | logger Logger
31 | opener Opener
32 | lineParserPluginRegistry LineParserPluginRegistry
33 | randSource rand.Source
34 | scorekeeper *Scorekeeper
35 | defaultPort Port
36 | spawner *Spawner
37 | pluginInstances []PluginInstance
38 | taskRunner task.TaskRunner
39 | recurringTaskScheduler *task.RecurringTaskScheduler
40 | }
41 |
42 | func (engine *engineImpl) Logger() Logger {
43 | return engine.logger
44 | }
45 |
46 | func (engine *engineImpl) Opener() Opener {
47 | return engine.opener
48 | }
49 |
50 | func (engine *engineImpl) LineParserPluginRegistry() LineParserPluginRegistry {
51 | return engine.lineParserPluginRegistry
52 | }
53 |
54 | func (engine *engineImpl) RandSource() rand.Source {
55 | return engine.randSource
56 | }
57 |
58 | func (engine *engineImpl) Scorekeeper() *Scorekeeper {
59 | return engine.scorekeeper
60 | }
61 |
62 | func (engine *engineImpl) SpawneeStatuses() ([]SpawneeStatus, error) {
63 | return engine.spawner.GetSpawneeStatuses()
64 | }
65 |
66 | func (engine *engineImpl) DefaultPort() Port {
67 | return engine.defaultPort
68 | }
69 |
70 | func (engine *engineImpl) Dispose() error {
71 | spawnees, err := engine.spawner.GetRunningSpawnees()
72 | if err != nil {
73 | return err
74 | }
75 | for _, spawnee := range spawnees {
76 | engine.spawner.Kill(spawnee)
77 | }
78 | engine.spawner.PollMultiple(spawnees)
79 | return nil
80 | }
81 |
82 | func (engine *engineImpl) Spawn(spawnee Spawnee) error {
83 | return engine.spawner.Spawn(spawnee)
84 | }
85 |
86 | func (engine *engineImpl) Launch(pluginInstance PluginInstance) error {
87 | var err error
88 | spawnee, ok := pluginInstance.(Spawnee)
89 | if ok {
90 | err = engine.Spawn(spawnee)
91 | if err != nil {
92 | return err
93 | }
94 | }
95 | engine.pluginInstances = append(engine.pluginInstances, pluginInstance)
96 | return nil
97 | }
98 |
99 | func (engine *engineImpl) PluginInstances() []PluginInstance {
100 | retval := make([]PluginInstance, len(engine.pluginInstances))
101 | copy(retval, engine.pluginInstances)
102 | return retval
103 | }
104 |
105 | func (engine *engineImpl) RecurringTaskScheduler() *task.RecurringTaskScheduler {
106 | return engine.recurringTaskScheduler
107 | }
108 |
109 | func (engine *engineImpl) Start() error {
110 | spawnees, err := engine.spawner.GetRunningSpawnees()
111 | if err != nil {
112 | return err
113 | }
114 | return engine.spawner.PollMultiple(spawnees)
115 | }
116 |
117 | func NewEngine(logger Logger, opener Opener, lineParserPluginRegistry LineParserPluginRegistry, scorekeeper *Scorekeeper, defaultPort Port) *engineImpl {
118 | taskRunner := &task.SimpleTaskRunner{}
119 | recurringTaskScheduler := task.NewRecurringTaskScheduler(
120 | func() time.Time { return time.Now() },
121 | taskRunner,
122 | )
123 | engine := &engineImpl{
124 | logger: logger,
125 | opener: opener,
126 | lineParserPluginRegistry: lineParserPluginRegistry,
127 | randSource: NewRandSourceWithTimestampSeed(),
128 | scorekeeper: scorekeeper,
129 | defaultPort: defaultPort,
130 | spawner: NewSpawner(),
131 | pluginInstances: make([]PluginInstance, 0),
132 | taskRunner: taskRunner,
133 | recurringTaskScheduler: recurringTaskScheduler,
134 | }
135 | engine.Spawn(&recurringTaskDaemon{engine, false})
136 | return engine
137 | }
138 |
--------------------------------------------------------------------------------
/entrypoints/ik/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "flag"
6 | "fmt"
7 | "github.com/moriyoshi/ik"
8 | "github.com/moriyoshi/ik/parsers"
9 | "github.com/moriyoshi/ik/plugins"
10 | "github.com/op/go-logging"
11 | "os"
12 | "path"
13 | )
14 |
15 | func usage() {
16 | fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
17 | flag.PrintDefaults()
18 | os.Exit(255)
19 | }
20 |
21 | func configureScoreboards(logger ik.Logger, registry *MultiFactoryRegistry, engine ik.Engine, config *ik.Config) error {
22 | for _, v := range config.Root.Elems {
23 | switch v.Name {
24 | case "scoreboard":
25 | type_ := v.Attrs["type"]
26 | scoreboardFactory := registry.LookupScoreboardFactory(type_)
27 | if scoreboardFactory == nil {
28 | return errors.New("Could not find scoreboard factory: " + type_)
29 | }
30 | scoreboard, err := scoreboardFactory.New(engine, registry, v)
31 | if err != nil {
32 | return err
33 | }
34 | err = engine.Launch(scoreboard)
35 | if err != nil {
36 | return err
37 | }
38 | logger.Info("Scoreboard plugin loaded: %s", scoreboardFactory.Name())
39 | }
40 | }
41 | return nil
42 | }
43 |
44 | func main() {
45 | logger := logging.MustGetLogger("ik")
46 |
47 | var config_file string
48 | var help bool
49 | flag.StringVar(&config_file, "c", "/etc/fluent/fluent.conf", "config file path (default: /etc/fluent/fluent.conf)")
50 | flag.BoolVar(&help, "h", false, "show help")
51 | flag.Parse()
52 |
53 | if help || config_file == "" {
54 | usage()
55 | }
56 |
57 | dir, file := path.Split(config_file)
58 | opener := ik.DefaultOpener(dir)
59 | config, err := ik.ParseConfig(opener, file)
60 | if err != nil {
61 | println(err.Error())
62 | return
63 | }
64 |
65 | scorekeeper := ik.NewScorekeeper(logger)
66 |
67 | registry := NewMultiFactoryRegistry(scorekeeper)
68 |
69 | for _, _plugin := range plugins.GetPlugins() {
70 | switch plugin := _plugin.(type) {
71 | case ik.InputFactory:
72 | registry.RegisterInputFactory(plugin)
73 | case ik.OutputFactory:
74 | registry.RegisterOutputFactory(plugin)
75 | }
76 | }
77 |
78 | for _, _plugin := range parsers.GetPlugins() {
79 | registry.RegisterLineParserPlugin(_plugin)
80 | }
81 |
82 | registry.RegisterScoreboardFactory(&HTMLHTTPScoreboardFactory{})
83 |
84 | router := ik.NewFluentRouter()
85 | engine := ik.NewEngine(logger, opener, registry, scorekeeper, router)
86 | defer func() {
87 | err := engine.Dispose()
88 | if err != nil {
89 | engine.Logger().Error("%s", err.Error())
90 | }
91 | }()
92 |
93 | err = ik.NewFluentConfigurer(logger, registry, registry, router).Configure(engine, config)
94 | if err != nil {
95 | println(err.Error())
96 | return
97 | }
98 | err = configureScoreboards(logger, registry, engine, config)
99 | if err != nil {
100 | println(err.Error())
101 | return
102 | }
103 | engine.Start()
104 | }
105 |
106 | // vim: sts=4 sw=4 ts=4 noet
107 |
--------------------------------------------------------------------------------
/entrypoints/ik/registry.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "github.com/moriyoshi/ik"
7 | )
8 |
9 | type MultiFactoryRegistry struct {
10 | scorekeeper *ik.Scorekeeper
11 | inputFactories map[string]ik.InputFactory
12 | outputFactories map[string]ik.OutputFactory
13 | scoreboardFactories map[string]ik.ScoreboardFactory
14 | lineParserPlugins map[string]ik.LineParserPlugin
15 | lineParserFactoryFactories map[string]ik.LineParserFactoryFactory
16 | plugins []ik.Plugin
17 | }
18 |
19 | func (registry *MultiFactoryRegistry) RegisterInputFactory(factory ik.InputFactory) error {
20 | _, alreadyExists := registry.inputFactories[factory.Name()]
21 | if alreadyExists {
22 | return errors.New(fmt.Sprintf("InputFactory named %s already registered", factory.Name()))
23 | }
24 | registry.inputFactories[factory.Name()] = factory
25 | registry.plugins = append(registry.plugins, factory)
26 | factory.BindScorekeeper(registry.scorekeeper)
27 | return nil
28 | }
29 |
30 | func (registry *MultiFactoryRegistry) LookupInputFactory(name string) ik.InputFactory {
31 | factory, ok := registry.inputFactories[name]
32 | if !ok {
33 | return nil
34 | }
35 | return factory
36 | }
37 |
38 | func (registry *MultiFactoryRegistry) RegisterOutputFactory(factory ik.OutputFactory) error {
39 | _, alreadyExists := registry.outputFactories[factory.Name()]
40 | if alreadyExists {
41 | return errors.New(fmt.Sprintf("OutputFactory named %s already registered", factory.Name()))
42 | }
43 | registry.outputFactories[factory.Name()] = factory
44 | registry.plugins = append(registry.plugins, factory)
45 | factory.BindScorekeeper(registry.scorekeeper)
46 | return nil
47 | }
48 |
49 | func (registry *MultiFactoryRegistry) LookupOutputFactory(name string) ik.OutputFactory {
50 | factory, ok := registry.outputFactories[name]
51 | if !ok {
52 | return nil
53 | }
54 | return factory
55 | }
56 |
57 | func (registry *MultiFactoryRegistry) RegisterScoreboardFactory(factory ik.ScoreboardFactory) error {
58 | _, alreadyExists := registry.scoreboardFactories[factory.Name()]
59 | if alreadyExists {
60 | return errors.New(fmt.Sprintf("ScoreboardFactory named %s already registered", factory.Name()))
61 | }
62 | registry.scoreboardFactories[factory.Name()] = factory
63 | registry.plugins = append(registry.plugins, factory)
64 | factory.BindScorekeeper(registry.scorekeeper)
65 | return nil
66 | }
67 |
68 | func (registry *MultiFactoryRegistry) LookupScoreboardFactory(name string) ik.ScoreboardFactory {
69 | factory, ok := registry.scoreboardFactories[name]
70 | if !ok {
71 | return nil
72 | }
73 | return factory
74 | }
75 |
76 | func (registry *MultiFactoryRegistry) RegisterLineParserPlugin(plugin ik.LineParserPlugin) error {
77 | _, alreadyExists := registry.lineParserPlugins[plugin.Name()]
78 | if alreadyExists {
79 | return errors.New(fmt.Sprintf("LineParserPlugin named %s already registered", plugin.Name()))
80 | }
81 | err := plugin.OnRegistering(func(name string, factory ik.LineParserFactoryFactory) error {
82 | _, alreadyExists := registry.lineParserFactoryFactories[name]
83 | if alreadyExists {
84 | return errors.New(fmt.Sprintf("LineParserFactoryFactory named %s already registered", name))
85 | }
86 | registry.lineParserFactoryFactories[name] = factory
87 | return nil
88 | })
89 | if err != nil {
90 | return err
91 | }
92 | registry.lineParserPlugins[plugin.Name()] = plugin
93 | return nil
94 | }
95 |
96 | func (registry *MultiFactoryRegistry) LookupLineParserFactoryFactory(name string) ik.LineParserFactoryFactory {
97 | factory, ok := registry.lineParserFactoryFactories[name]
98 | if !ok {
99 | return nil
100 | }
101 | return factory
102 | }
103 |
104 | func (registry *MultiFactoryRegistry) Plugins() []ik.Plugin {
105 | retval := make([]ik.Plugin, len(registry.plugins))
106 | copy(retval, registry.plugins)
107 | return retval
108 | }
109 |
110 | func NewMultiFactoryRegistry(scorekeeper *ik.Scorekeeper) *MultiFactoryRegistry {
111 | return &MultiFactoryRegistry{
112 | scorekeeper: scorekeeper,
113 | inputFactories: make(map[string]ik.InputFactory),
114 | outputFactories: make(map[string]ik.OutputFactory),
115 | scoreboardFactories: make(map[string]ik.ScoreboardFactory),
116 | lineParserPlugins: make(map[string]ik.LineParserPlugin),
117 | lineParserFactoryFactories: make(map[string]ik.LineParserFactoryFactory),
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/entrypoints/ik/scoreboard_html.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "github.com/moriyoshi/ik"
7 | "github.com/moriyoshi/ik/markup"
8 | "html"
9 | "html/template"
10 | "net"
11 | "net/http"
12 | "reflect"
13 | "strconv"
14 | "sync/atomic"
15 | "time"
16 | "unsafe"
17 | )
18 |
19 | var mainTemplate = `
20 |
21 |
22 | Ik Scoreboard
23 |
104 |
105 |
106 |
107 | Ik Scoreboard
108 |
109 |
110 | Plugins
111 |
112 | - Input Plugins
113 | -
114 | {{range .InputPlugins}}
115 | {{.Name}}
116 | {{end}}
117 |
- Output Plugins
118 | -
119 | {{range .OutputPlugins}}
120 | {{.Name}}
121 | {{end}}
122 |
123 | - Scoreboard Plugins
124 | -
125 | {{range .ScoreboardPlugins}}
126 | {{.Name}}
127 | {{end}}
128 |
129 |
130 | Plugin Statuses
131 | {{range $plugin, $pluginInstanceStatuses := .PluginInstanceStatusesPerPlugin}}
132 | {{renderPluginName $plugin}}
133 | {{range $_, $pluginInstanceStatus := $pluginInstanceStatuses}}
134 | Instance #{{$pluginInstanceStatus.Id}}
135 | {{with .SpawneeStatus}}
136 |
137 |
138 |
139 | Spawnee ID |
140 | {{.Id}} |
141 |
142 |
143 | Status |
144 | {{renderExitStatusLabel .ExitStatus}} |
145 |
146 |
147 |
148 | {{end}}
149 | Topics
150 | {{if len $pluginInstanceStatus.Topics}}
151 |
152 |
153 |
154 | Name |
155 | Value |
156 | Description |
157 |
158 |
159 |
160 | {{range $pluginInstanceStatus.Topics}}
161 |
162 | {{.DisplayName}} ({{.Name}}) |
163 | {{renderMarkup .Value}} |
164 | {{.Description}} |
165 |
166 | {{end}}
167 |
168 |
169 | {{else}}
170 | No topics available
171 | {{end}}
172 | {{end}}
173 |
174 | {{end}}
175 |
176 |
177 | `
178 |
179 | type HTMLHTTPScoreboard struct {
180 | template *template.Template
181 | factory *HTMLHTTPScoreboardFactory
182 | logger ik.Logger
183 | engine ik.Engine
184 | registry ik.PluginRegistry
185 | listener net.Listener
186 | server http.Server
187 | requests int64
188 | }
189 |
190 | type HTMLHTTPScoreboardFactory struct {
191 | }
192 |
193 | type pluginInstanceStatusTopic struct {
194 | Name string
195 | DisplayName string
196 | Description string
197 | Value ik.Markup
198 | }
199 |
200 | type pluginInstanceStatus struct {
201 | Id int
202 | PluginInstance ik.PluginInstance
203 | SpawneeStatus *ik.SpawneeStatus
204 | Topics []pluginInstanceStatusTopic
205 | }
206 |
207 | type viewModel struct {
208 | InputPlugins []ik.InputFactory
209 | OutputPlugins []ik.OutputFactory
210 | ScoreboardPlugins []ik.ScoreboardFactory
211 | Plugins []ik.Plugin
212 | PluginInstanceStatusesPerPlugin map[ik.Plugin][]pluginInstanceStatus
213 | SpawneeStatuses map[ik.Spawnee]ik.SpawneeStatus
214 | }
215 |
216 | type requestCountFetcher struct{}
217 |
218 | func (fetcher *requestCountFetcher) Markup(scoreboard_ ik.PluginInstance) (ik.Markup, error) {
219 | text, err := fetcher.PlainText(scoreboard_)
220 | if err != nil {
221 | return ik.Markup{}, err
222 | }
223 | return ik.Markup{[]ik.MarkupChunk{{Attrs: 0, Text: text}}}, nil
224 | }
225 |
226 | func (fetcher *requestCountFetcher) PlainText(scoreboard_ ik.PluginInstance) (string, error) {
227 | scoreboard := scoreboard_.(*HTMLHTTPScoreboard)
228 | return strconv.FormatInt(scoreboard.requests, 10), nil
229 | }
230 |
231 | func spawneeName(spawnee ik.Spawnee) string {
232 | switch spawnee_ := spawnee.(type) {
233 | case ik.PluginInstance:
234 | return spawnee_.Factory().Name()
235 | default:
236 | return reflect.TypeOf(spawnee_).Name()
237 | }
238 | }
239 |
240 | func renderExitStatusStyle(err error) string {
241 | switch err_ := err.(type) {
242 | case *ik.ContinueType:
243 | _ = err_
244 | return "running"
245 | default:
246 | return "error"
247 | }
248 | }
249 |
250 | func renderExitStatusLabel(err error) string {
251 | switch err_ := err.(type) {
252 | case *ik.ContinueType:
253 | _ = err_
254 | return `Running`
255 | default:
256 | return err.Error()
257 | }
258 | }
259 |
260 | func renderMarkup(markup_ ik.Markup) template.HTML {
261 | buf := &bytes.Buffer{}
262 | renderer := &markup.HTMLRenderer{Out: buf}
263 | renderer.Render(&markup_)
264 | return template.HTML(buf.String())
265 | }
266 |
267 | func renderPluginType(plugin ik.Plugin) string {
268 | switch plugin.(type) {
269 | case ik.InputFactory:
270 | return "input"
271 | case ik.OutputFactory:
272 | return "output"
273 | case ik.ScoreboardFactory:
274 | return "scoreboard"
275 | default:
276 | return "unknown"
277 | }
278 | }
279 |
280 | func renderPluginName(plugin ik.Plugin) template.HTML {
281 | return template.HTML(
282 | fmt.Sprintf(`%s`,
283 | html.EscapeString(renderPluginType(plugin)),
284 | html.EscapeString(html.EscapeString(plugin.Name())),
285 | ),
286 | )
287 | }
288 |
289 | func (scoreboard *HTMLHTTPScoreboard) Run() error {
290 | scoreboard.server.Serve(scoreboard.listener)
291 | return nil
292 | }
293 |
294 | func (scoreboard *HTMLHTTPScoreboard) Shutdown() error {
295 | return scoreboard.listener.Close()
296 | }
297 |
298 | func (scoreboard *HTMLHTTPScoreboard) Factory() ik.Plugin {
299 | return scoreboard.factory
300 | }
301 |
302 | func (factory *HTMLHTTPScoreboardFactory) Name() string {
303 | return "html_http"
304 | }
305 |
306 | func (scoreboard *HTMLHTTPScoreboard) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
307 | atomic.AddInt64(&scoreboard.requests, 1)
308 | resp.Header().Set("Content-Type", "text/html; charset=utf-8")
309 | resp.WriteHeader(200)
310 | spawneeStatuses_, err := scoreboard.engine.SpawneeStatuses()
311 | spawneeStatuses := make(map[ik.Spawnee]ik.SpawneeStatus)
312 | if err == nil {
313 | for _, spawneeStatus := range spawneeStatuses_ {
314 | spawneeStatuses[spawneeStatus.Spawnee] = spawneeStatus
315 | }
316 | }
317 | plugins := scoreboard.registry.Plugins()
318 | inputPlugins := make([]ik.InputFactory, 0)
319 | outputPlugins := make([]ik.OutputFactory, 0)
320 | scoreboardPlugins := make([]ik.ScoreboardFactory, 0)
321 | for _, plugin := range plugins {
322 | switch plugin_ := plugin.(type) {
323 | case ik.InputFactory:
324 | inputPlugins = append(inputPlugins, plugin_)
325 | case ik.OutputFactory:
326 | outputPlugins = append(outputPlugins, plugin_)
327 | case ik.ScoreboardFactory:
328 | scoreboardPlugins = append(scoreboardPlugins, plugin_)
329 | }
330 | }
331 | pluginInstances := scoreboard.engine.PluginInstances()
332 | pluginInstanceStatusesPerPlugin := make(map[ik.Plugin][]pluginInstanceStatus)
333 | for i, pluginInstance := range pluginInstances {
334 | plugin := pluginInstance.Factory()
335 | topics_ := scoreboard.engine.Scorekeeper().GetTopics(plugin)
336 | topics := make([]pluginInstanceStatusTopic, len(topics_))
337 | for i, topic_ := range topics_ {
338 | topics[i].Name = topic_.Name
339 | topics[i].DisplayName = topic_.DisplayName
340 | topics[i].Description = topic_.Description
341 | topics[i].Value, err = topic_.Fetcher.Markup(pluginInstance)
342 | if err != nil {
343 | errorMessage := err.Error()
344 | scoreboard.logger.Error("%s", errorMessage)
345 | topics[i].Value = ik.Markup{[]ik.MarkupChunk{{Attrs: ik.Embolden, Text: fmt.Sprintf("Error: %s", errorMessage)}}}
346 | }
347 | }
348 | pluginInstanceStatus_ := pluginInstanceStatus{
349 | Id: i + 1,
350 | PluginInstance: pluginInstance,
351 | Topics: topics,
352 | }
353 | spawneeStatus, ok := spawneeStatuses[pluginInstance]
354 | if ok {
355 | pluginInstanceStatus_.SpawneeStatus = &spawneeStatus
356 | }
357 | pluginInstanceStatuses_, ok := pluginInstanceStatusesPerPlugin[plugin]
358 | if !ok {
359 | pluginInstanceStatuses_ = make([]pluginInstanceStatus, 0)
360 | }
361 | pluginInstanceStatuses_ = append(pluginInstanceStatuses_, pluginInstanceStatus_)
362 | pluginInstanceStatusesPerPlugin[plugin] = pluginInstanceStatuses_
363 | }
364 | scoreboard.template.Execute(resp, viewModel{
365 | InputPlugins: inputPlugins,
366 | OutputPlugins: outputPlugins,
367 | ScoreboardPlugins: scoreboardPlugins,
368 | Plugins: plugins,
369 | PluginInstanceStatusesPerPlugin: pluginInstanceStatusesPerPlugin,
370 | SpawneeStatuses: spawneeStatuses,
371 | })
372 | }
373 |
374 | func newHTMLHTTPScoreboard(factory *HTMLHTTPScoreboardFactory, logger ik.Logger, engine ik.Engine, registry ik.PluginRegistry, bind string, readTimeout time.Duration, writeTimeout time.Duration) (*HTMLHTTPScoreboard, error) {
375 | template_, err := template.New("main").Funcs(template.FuncMap{
376 | "spawneeName": spawneeName,
377 | "renderExitStatusStyle": renderExitStatusStyle,
378 | "renderExitStatusLabel": renderExitStatusLabel,
379 | "renderMarkup": renderMarkup,
380 | "renderPluginName": renderPluginName,
381 | "renderPluginType": renderPluginType,
382 | }).Parse(mainTemplate)
383 | if err != nil {
384 | logger.Error("%s", err.Error())
385 | return nil, err
386 | }
387 | server := http.Server{
388 | Addr: bind,
389 | Handler: nil,
390 | ReadTimeout: readTimeout,
391 | WriteTimeout: writeTimeout,
392 | MaxHeaderBytes: 0,
393 | TLSConfig: nil,
394 | TLSNextProto: nil,
395 | }
396 | listener, err := net.Listen("tcp", bind)
397 | if err != nil {
398 | logger.Error("%s", err.Error())
399 | return nil, err
400 | }
401 | retval := &HTMLHTTPScoreboard{
402 | template: template_,
403 | factory: factory,
404 | logger: logger,
405 | engine: engine,
406 | registry: registry,
407 | server: server,
408 | listener: listener,
409 | requests: 0,
410 | }
411 | retval.server.Handler = retval
412 | return retval, nil
413 | }
414 |
415 | var durationSize = unsafe.Sizeof(time.Duration(0))
416 |
417 | func (factory *HTMLHTTPScoreboardFactory) New(engine ik.Engine, registry ik.PluginRegistry, config *ik.ConfigElement) (ik.Scoreboard, error) {
418 | listen, ok := config.Attrs["listen"]
419 | if !ok {
420 | listen = ""
421 | }
422 | netPort, ok := config.Attrs["port"]
423 | if !ok {
424 | netPort = "24226"
425 | }
426 | bind := listen + ":" + netPort
427 | readTimeout := time.Duration(0)
428 | {
429 | valueStr, ok := config.Attrs["read_timeout"]
430 | if ok {
431 | value, err := strconv.ParseInt(valueStr, 10, int(durationSize*8))
432 | if err != nil {
433 | return nil, err
434 | }
435 | readTimeout = time.Duration(value)
436 | }
437 | }
438 | writeTimeout := time.Duration(0)
439 | {
440 | writeTimeoutStr, ok := config.Attrs["write_timeout"]
441 | if ok {
442 | value, err := strconv.ParseInt(writeTimeoutStr, 10, int(durationSize*8))
443 | if err != nil {
444 | return nil, err
445 | }
446 | writeTimeout = time.Duration(value)
447 | }
448 | }
449 | return newHTMLHTTPScoreboard(factory, engine.Logger(), engine, registry, bind, readTimeout, writeTimeout)
450 | }
451 |
452 | func (factory *HTMLHTTPScoreboardFactory) BindScorekeeper(scorekeeper *ik.Scorekeeper) {
453 | scorekeeper.AddTopic(ik.ScorekeeperTopic{
454 | Plugin: factory,
455 | Name: "requests",
456 | DisplayName: "Requests",
457 | Description: "Number of requests accepted",
458 | Fetcher: &requestCountFetcher{},
459 | })
460 | }
461 |
--------------------------------------------------------------------------------
/entrypoints/ikb/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "flag"
7 | "fmt"
8 | termutil "github.com/andrew-d/go-termutil"
9 | "github.com/moriyoshi/ik"
10 | "github.com/moriyoshi/ik/markup"
11 | "github.com/op/go-logging"
12 | "github.com/ugorji/go/codec"
13 | "math"
14 | "net"
15 | "os"
16 | "reflect"
17 | "strconv"
18 | "sync/atomic"
19 | "time"
20 | )
21 |
22 | type Record struct {
23 | Timestamp uint64
24 | Data map[string]interface{}
25 | }
26 |
27 | type IkBench struct {
28 | codec codec.MsgpackHandle
29 | }
30 |
31 | type IkBenchReportData struct {
32 | NumberOfRecordsSent int64
33 | LongestSubmissionTime time.Duration
34 | ShortestSubmissionTime time.Duration
35 | Now time.Time
36 | Start time.Time
37 | }
38 |
39 | type IkBenchReporter interface {
40 | ReportRecordsSent(data IkBenchReportData)
41 | ReportFinal(data IkBenchReportData)
42 | }
43 |
44 | type IkBenchParams struct {
45 | Host string
46 | Simple bool
47 | NumberOfRecordsToSubmit int
48 | NumberOfRecordsSentAtOnce int
49 | Concurrency int
50 | Tag string
51 | Data map[string]interface{}
52 | MaxRetryCount int
53 | ReportingFrequency int
54 | Reporter IkBenchReporter
55 | }
56 |
57 | func (ikb *IkBench) encodeEntrySingle(buf *bytes.Buffer, tag string, record Record) error {
58 | enc := codec.NewEncoder(buf, &ikb.codec)
59 | return enc.Encode([]interface{}{tag, record.Timestamp, record.Data})
60 | }
61 |
62 | func (ikb *IkBench) encodeEntryBulk(buf *bytes.Buffer, tag string, records []Record) error {
63 | enc := codec.NewEncoder(buf, &ikb.codec)
64 | return enc.Encode([]interface{}{tag, records})
65 | }
66 |
67 | func (ikb *IkBench) Submit(conn net.Conn, params *IkBenchParams) error {
68 | time_ := time.Now().Unix()
69 | records := make([]Record, params.NumberOfRecordsSentAtOnce)
70 | for i := 0; i < params.NumberOfRecordsSentAtOnce; i += 1 {
71 | records[i] = Record{Timestamp: uint64(time_), Data: params.Data}
72 | }
73 | buf := bytes.Buffer{}
74 | if params.Simple {
75 | for _, record := range records {
76 | err := ikb.encodeEntrySingle(&buf, params.Tag, record)
77 | if err != nil {
78 | return err
79 | }
80 | }
81 | } else {
82 | err := ikb.encodeEntryBulk(&buf, params.Tag, records)
83 | if err != nil {
84 | return err
85 | }
86 | }
87 | _, err := buf.WriteTo(conn)
88 | return err
89 | }
90 |
91 | func (ikb *IkBench) Run(logger ik.Logger, params *IkBenchParams) {
92 | numberOfRecordsSentAtOnce := params.NumberOfRecordsSentAtOnce
93 | numberOfAttempts := params.NumberOfRecordsToSubmit / numberOfRecordsSentAtOnce
94 | numberOfAttemptsPerProc := numberOfAttempts / params.Concurrency
95 | remainder := numberOfAttempts % params.Concurrency
96 | reportingFrequency := params.ReportingFrequency
97 | numberOfRecordsSent := int64(0)
98 | sync := make(chan int)
99 | start := time.Now()
100 | shortestSubmissionTime := time.Duration(-1)
101 | longestSubmissionTime := time.Duration(-1)
102 | var submissionStart time.Time
103 | for i := 0; i < params.Concurrency; i += 1 {
104 | r := 0
105 | if i < remainder {
106 | r = 1
107 | }
108 | go func(id int, attempts int) {
109 | retryCount := params.MaxRetryCount
110 | var conn net.Conn
111 | var err error
112 | defer func() {
113 | if conn != nil {
114 | conn.Close()
115 | }
116 | }()
117 | outer:
118 | for i := 0; i < attempts; i += 1 {
119 | for {
120 | if conn == nil {
121 | for {
122 | conn, err = net.Dial("tcp", params.Host)
123 | if err != nil {
124 | logger.Error(err.Error())
125 | retryCount -= 1
126 | if retryCount < 0 {
127 | logger.Critical("retry count exceeded") // FIXME
128 | break outer
129 | }
130 | continue
131 | }
132 | break
133 | }
134 | }
135 | if submissionStart.IsZero() {
136 | submissionStart = time.Now()
137 | }
138 | err = ikb.Submit(conn, params)
139 | if err != nil {
140 | err_, ok := err.(net.Error)
141 | if ok {
142 | if err_.Temporary() {
143 | continue
144 | }
145 | closeErr := conn.Close()
146 | if closeErr != nil {
147 | logger.Warning(closeErr.Error())
148 | }
149 | conn = nil
150 | }
151 | logger.Error(err.Error()) // FIXME
152 | break outer
153 | }
154 | now := time.Now()
155 | submissionTime := now.Sub(submissionStart)
156 | submissionStart = now
157 | if shortestSubmissionTime < 0 || shortestSubmissionTime > submissionTime {
158 | shortestSubmissionTime = submissionTime
159 | }
160 | if longestSubmissionTime < submissionTime {
161 | longestSubmissionTime = submissionTime
162 | }
163 | if atomic.AddInt64(&numberOfRecordsSent, int64(numberOfRecordsSentAtOnce))%int64(reportingFrequency) == 0 {
164 | params.Reporter.ReportRecordsSent(IkBenchReportData{
165 | NumberOfRecordsSent: numberOfRecordsSent,
166 | ShortestSubmissionTime: shortestSubmissionTime,
167 | LongestSubmissionTime: longestSubmissionTime,
168 | Now: now,
169 | Start: start,
170 | })
171 | }
172 | break
173 | }
174 | }
175 | sync <- id
176 | }(i, numberOfAttemptsPerProc+r)
177 | }
178 | for i := 0; i < params.Concurrency; i += 1 {
179 | <-sync
180 | }
181 | params.Reporter.ReportFinal(IkBenchReportData{
182 | NumberOfRecordsSent: numberOfRecordsSent,
183 | ShortestSubmissionTime: shortestSubmissionTime,
184 | LongestSubmissionTime: longestSubmissionTime,
185 | Now: time.Now(),
186 | Start: start,
187 | })
188 | }
189 |
190 | func NewIkBench() *IkBench {
191 | codec_ := codec.MsgpackHandle{}
192 | codec_.MapType = reflect.TypeOf(map[string]interface{}(nil))
193 | codec_.RawToString = false
194 | codec_.StructToArray = true
195 | return &IkBench{codec: codec_}
196 | }
197 |
198 | func usage() {
199 | fmt.Fprintf(os.Stderr, "usage: %s [-concurrent N] [-multi N] [-no-packed] [-host HOST] [-data JSON] tag count\n", os.Args[0])
200 | flag.PrintDefaults()
201 | os.Exit(255)
202 | }
203 |
204 | func exitWithMessage(message string, exitStatus int) {
205 | fmt.Fprintf(os.Stderr, "%s: %s\n", os.Args[0], message)
206 | os.Exit(exitStatus)
207 | }
208 |
209 | func exitWithError(err error, exitStatus int) {
210 | exitWithMessage(err.Error(), exitStatus)
211 | }
212 |
213 | type defaultReporter struct {
214 | renderer markup.MarkupRenderer
215 | }
216 |
217 | func (reporter *defaultReporter) ReportRecordsSent(data IkBenchReportData) {
218 | elapsed := float64(data.Now.Sub(data.Start)) / 1e9
219 | reporter.renderer.Render(&ik.Markup{[]ik.MarkupChunk{
220 | ik.MarkupChunk{
221 | Attrs: 0,
222 | Text: fmt.Sprintf("%d records sent (%.3f seconds elapsed, %.3f records per second)\n", data.NumberOfRecordsSent, elapsed, float64(data.NumberOfRecordsSent)/elapsed),
223 | },
224 | }})
225 | }
226 |
227 | func (reporter *defaultReporter) ReportFinal(data IkBenchReportData) {
228 | elapsed := float64(data.Now.Sub(data.Start)) / 1e9
229 | reporter.renderer.Render(&ik.Markup{[]ik.MarkupChunk{
230 | ik.MarkupChunk{
231 | Attrs: ik.Embolden | ik.Yellow,
232 | Text: "Benchmark Duration: ",
233 | },
234 | ik.MarkupChunk{
235 | Attrs: ik.Embolden,
236 | Text: fmt.Sprintf("%.3f seconds\n", elapsed),
237 | },
238 | ik.MarkupChunk{
239 | Attrs: ik.Embolden | ik.Yellow,
240 | Text: "Number of Records Submitted: ",
241 | },
242 | ik.MarkupChunk{
243 | Attrs: ik.Embolden,
244 | Text: fmt.Sprintf("%d\n", data.NumberOfRecordsSent),
245 | },
246 | ik.MarkupChunk{
247 | Attrs: ik.Embolden | ik.Yellow,
248 | Text: "Records per Second: ",
249 | },
250 | ik.MarkupChunk{
251 | Attrs: ik.Embolden,
252 | Text: fmt.Sprintf("%.3f\n", float64(data.NumberOfRecordsSent)/elapsed),
253 | },
254 | ik.MarkupChunk{
255 | Attrs: ik.Embolden | ik.Yellow,
256 | Text: "Average Submission Time: ",
257 | },
258 | ik.MarkupChunk{
259 | Attrs: ik.Embolden,
260 | Text: fmt.Sprintf("%.10f seconds\n", elapsed/float64(data.NumberOfRecordsSent)),
261 | },
262 | ik.MarkupChunk{
263 | Attrs: ik.Embolden,
264 | Text: " Shortest: ",
265 | },
266 | ik.MarkupChunk{
267 | Attrs: ik.Embolden,
268 | Text: fmt.Sprintf("%.10f seconds\n", float64(data.ShortestSubmissionTime)/1e9),
269 | },
270 | ik.MarkupChunk{
271 | Attrs: ik.Embolden,
272 | Text: " Longest: ",
273 | },
274 | ik.MarkupChunk{
275 | Attrs: ik.Embolden,
276 | Text: fmt.Sprintf("%.10f seconds\n", float64(data.LongestSubmissionTime)/1e9),
277 | },
278 | }})
279 | }
280 |
281 | func main() {
282 | var host string
283 | var simple bool
284 | var numberOfRecordsToSubmit int
285 | var numberOfRecordsSentAtOnce int
286 | var concurrency int
287 | var tag string
288 | var jsonString string
289 | flag.IntVar(&concurrency, "concurrent", 1, "number of goroutines")
290 | flag.IntVar(&numberOfRecordsSentAtOnce, "multi", 1, "send multiple records at once")
291 | flag.BoolVar(&simple, "no-packed", false, "don't use lazy deserialization optimize")
292 | flag.StringVar(&host, "host", "localhost:24224", "fluent host")
293 | flag.StringVar(&jsonString, "data", `{ "message": "test" }`, "data to send (in JSON)")
294 | flag.Parse()
295 | args := flag.Args()
296 | if len(args) < 2 {
297 | usage()
298 | }
299 | tag = args[0]
300 | numberOfRecordsToSubmit, err := strconv.Atoi(args[1])
301 | if err != nil {
302 | exitWithError(err, 255)
303 | }
304 | data := make(map[string]interface{})
305 | err = json.Unmarshal([]byte(jsonString), &data)
306 | if err != nil {
307 | exitWithError(err, 255)
308 | }
309 | if numberOfRecordsToSubmit%numberOfRecordsSentAtOnce != 0 {
310 | exitWithMessage("the value of 'count' must be a multiple of 'multi'", 255)
311 | }
312 | if numberOfRecordsToSubmit/numberOfRecordsSentAtOnce < concurrency {
313 | exitWithMessage("the value of 'concurrency' must be equal to or greater than the division of 'count' by 'multi'", 255)
314 | }
315 | var renderer markup.MarkupRenderer
316 | if termutil.Isatty(os.Stdout.Fd()) {
317 | renderer = &markup.TerminalEscapeRenderer{os.Stdout}
318 | } else {
319 | renderer = &markup.PlainRenderer{os.Stdout}
320 | }
321 | ikb := NewIkBench()
322 | ikb.Run(
323 | logging.MustGetLogger("ikb"),
324 | &IkBenchParams{
325 | Host: host,
326 | Simple: simple,
327 | NumberOfRecordsToSubmit: numberOfRecordsToSubmit,
328 | NumberOfRecordsSentAtOnce: numberOfRecordsSentAtOnce,
329 | Concurrency: concurrency,
330 | Tag: tag,
331 | Data: data,
332 | MaxRetryCount: 5,
333 | ReportingFrequency: int(math.Max(math.Pow(10, math.Ceil(math.Log10(float64(numberOfRecordsToSubmit)))-1), 100)),
334 | Reporter: &defaultReporter{renderer: renderer},
335 | },
336 | )
337 | }
338 |
--------------------------------------------------------------------------------
/fanout.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | type Fanout struct {
4 | ports []Port
5 | }
6 |
7 | func (fanout *Fanout) AddPort(port Port) {
8 | fanout.ports = append(fanout.ports, port)
9 | }
10 |
11 | func (fanout *Fanout) Emit(recordSets []FluentRecordSet) error {
12 | for _, port := range fanout.ports {
13 | err := port.Emit(recordSets)
14 | if err != nil {
15 | panic("MUST DO SOMETHING GOOD") // TODO
16 | }
17 | }
18 | return nil
19 | }
20 |
--------------------------------------------------------------------------------
/fluent_router.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "regexp"
5 | )
6 |
7 | type fluentRouterRule struct {
8 | re *regexp.Regexp
9 | port Port
10 | }
11 |
12 | type FluentRouter struct {
13 | rules []*fluentRouterRule
14 | }
15 |
16 | type PatternError struct {
17 | message string
18 | }
19 |
20 | func (self *PatternError) Error() string {
21 | return self.message
22 | }
23 |
24 | func buildRegexpFromGlobPatternInner(pattern string, startPos int) (string, int, error) {
25 | state := 0
26 | chunk := ""
27 |
28 | patternLength := len(pattern)
29 | var i int
30 | for i = startPos; i < patternLength; i += 1 {
31 | if state == 0 {
32 | c := pattern[i]
33 | if c == '*' {
34 | state = 2
35 | } else if c == '}' || c == ',' {
36 | break
37 | } else if c == '{' {
38 | chunk += "(?:"
39 | first := true
40 | for {
41 | i += 1
42 | subchunk, lastPos, err := buildRegexpFromGlobPatternInner(pattern, i)
43 | if err != nil {
44 | return "", 0, err
45 | }
46 | if lastPos == patternLength {
47 | return "", 0, &PatternError{"unexpected end of pattern (in expectation of '}' or ',')"}
48 | }
49 | if !first {
50 | chunk += "|"
51 | }
52 | i = lastPos
53 | chunk += "(?:" + subchunk + ")"
54 | first = false
55 | if pattern[lastPos] == '}' {
56 | break
57 | } else if pattern[lastPos] != ',' {
58 | return "", 0, &PatternError{"never get here"}
59 | }
60 | }
61 | chunk += ")"
62 | } else {
63 | chunk += regexp.QuoteMeta(string(c))
64 | }
65 | } else if state == 1 {
66 | // escape
67 | c := pattern[i]
68 | chunk += regexp.QuoteMeta(string(c))
69 | state = 0
70 | } else if state == 2 {
71 | c := pattern[i]
72 | if c == '*' {
73 | state = 3
74 | } else {
75 | chunk += "[^.]*" + regexp.QuoteMeta(string(c))
76 | state = 0
77 | }
78 | } else if state == 3 {
79 | // recursive any
80 | c := pattern[i]
81 | if c == '*' {
82 | return "", 0, &PatternError{"unexpected *"}
83 | } else if c == '.' {
84 | chunk += "(?:.*\\.|^)"
85 | } else {
86 | chunk += ".*" + regexp.QuoteMeta(string(c))
87 | }
88 | state = 0
89 | }
90 | }
91 | if state == 2 {
92 | chunk += "[^.]*"
93 | } else if state == 3 {
94 | chunk += ".*"
95 | }
96 | return chunk, i, nil
97 | }
98 |
99 | func BuildRegexpFromGlobPattern(pattern string) (string, error) {
100 | chunk, pos, err := buildRegexpFromGlobPatternInner(pattern, 0)
101 | if err != nil {
102 | return "", err
103 | }
104 | if pos != len(pattern) {
105 | return "", &PatternError{"unexpected '" + string(pattern[pos]) + "'"}
106 | }
107 | return "^" + chunk + "$", nil
108 | }
109 |
110 | func (router *FluentRouter) AddRule(pattern string, port Port) error {
111 | chunk, err := BuildRegexpFromGlobPattern(pattern)
112 | if err != nil {
113 | return err
114 | }
115 | re, err := regexp.Compile(chunk)
116 | if err != nil {
117 | return err
118 | }
119 | newRule := &fluentRouterRule{re, port}
120 | router.rules = append(router.rules, newRule)
121 | return nil
122 | }
123 |
124 | func (router *FluentRouter) Emit(recordSets []FluentRecordSet) error {
125 | recordSetsMap := make(map[Port][]FluentRecordSet)
126 | for i := range recordSets {
127 | recordSet := &recordSets[i]
128 | for _, rule := range router.rules {
129 | if rule.re.MatchString(recordSet.Tag) {
130 | recordSetsForPort, ok := recordSetsMap[rule.port]
131 | if !ok {
132 | recordSetsForPort = make([]FluentRecordSet, 0)
133 | } else {
134 | lastRecordSets := &recordSetsForPort[len(recordSetsForPort)-1]
135 | if &lastRecordSets.Records == &recordSet.Records {
136 | continue
137 | }
138 | }
139 | recordSetsMap[rule.port] = append(recordSetsForPort, *recordSet)
140 | }
141 | }
142 | }
143 | for port, recordSets := range recordSetsMap {
144 | err := port.Emit(recordSets)
145 | if err != nil {
146 | return err
147 | }
148 | }
149 | return nil
150 | }
151 |
152 | func NewFluentRouter() *FluentRouter {
153 | return &FluentRouter{make([]*fluentRouterRule, 0)}
154 | }
155 |
--------------------------------------------------------------------------------
/fluent_router_test.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestBuildRegexpFromGlobPattern_0(t *testing.T) {
8 | a, err := BuildRegexpFromGlobPattern("a.b.c")
9 | if err != nil {
10 | t.Fail()
11 | }
12 | if a != "^a\\.b\\.c$" {
13 | t.Fail()
14 | }
15 | }
16 |
17 | func TestBuildRegexpFromGlobPattern_1(t *testing.T) {
18 | a, err := BuildRegexpFromGlobPattern("a.*.c")
19 | if err != nil {
20 | t.Fail()
21 | }
22 | if a != "^a\\.[^.]*\\.c$" {
23 | t.Fail()
24 | }
25 | }
26 |
27 | func TestBuildRegexpFromGlobPattern_2(t *testing.T) {
28 | a, err := BuildRegexpFromGlobPattern("**c")
29 | if err != nil {
30 | t.Fail()
31 | }
32 | if a != "^.*c$" {
33 | t.Fail()
34 | }
35 | }
36 |
37 | func TestBuildRegexpFromGlobPattern_3(t *testing.T) {
38 | a, err := BuildRegexpFromGlobPattern("**.c")
39 | if err != nil {
40 | t.Fail()
41 | }
42 | if a != "^(?:.*\\.|^)c$" {
43 | t.Fail()
44 | }
45 | }
46 |
47 | func TestBuildRegexpFromGlobPattern_4(t *testing.T) {
48 | a, err := BuildRegexpFromGlobPattern("a.{b,c}.d")
49 | println(a)
50 | if err != nil {
51 | t.Fail()
52 | }
53 | if a != "^a\\.(?:(?:b)|(?:c))\\.d$" {
54 | t.Fail()
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/glob.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "net/http"
5 | "path"
6 | "strings"
7 | )
8 |
9 | type globMatcherContext struct {
10 | fs http.FileSystem
11 | path string
12 | restOfComponents []string
13 | resultCollector func(string) error
14 | }
15 |
16 | func doMatch(context globMatcherContext) error {
17 | if len(context.restOfComponents) == 0 {
18 | return context.resultCollector(context.path)
19 | }
20 |
21 | f, err := context.fs.Open(context.path)
22 | if err != nil {
23 | return err
24 | }
25 | defer f.Close()
26 |
27 | info, err := f.Stat()
28 | if err != nil {
29 | return err
30 | }
31 | if !info.IsDir() {
32 | return nil
33 | }
34 |
35 | entries, err := f.Readdir(-1)
36 | if err != nil {
37 | return err
38 | }
39 |
40 | for _, entry := range entries {
41 | name := entry.Name()
42 | matched, err := path.Match(context.restOfComponents[0], name)
43 | if err != nil {
44 | return err
45 | }
46 | if matched {
47 | err := doMatch(globMatcherContext{
48 | fs: context.fs,
49 | path: path.Join(context.path, name),
50 | restOfComponents: context.restOfComponents[1:],
51 | resultCollector: context.resultCollector,
52 | })
53 | if err != nil {
54 | return err
55 | }
56 | }
57 | }
58 | return nil
59 | }
60 |
61 | func Glob(fs http.FileSystem, pattern string) ([]string, error) {
62 | retval := make([]string, 0)
63 | components := strings.Split(pattern, "/")
64 | var path_ string
65 | if len(components) > 0 && components[0] == "" {
66 | path_ = "/"
67 | components = components[1:]
68 | } else {
69 | path_ = "."
70 | }
71 | return retval, doMatch(globMatcherContext{
72 | fs: fs,
73 | path: path_,
74 | restOfComponents: components,
75 | resultCollector: func(match string) error {
76 | retval = append(retval, match)
77 | return nil
78 | },
79 | })
80 | }
81 |
--------------------------------------------------------------------------------
/ik.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "github.com/moriyoshi/ik/task"
5 | "io"
6 | "math/rand"
7 | "net/http"
8 | )
9 |
10 | type FluentRecord struct {
11 | Tag string
12 | Timestamp uint64
13 | Data map[string]interface{}
14 | }
15 |
16 | type TinyFluentRecord struct {
17 | Timestamp uint64
18 | Data map[string]interface{}
19 | }
20 |
21 | type FluentRecordSet struct {
22 | Tag string
23 | Records []TinyFluentRecord
24 | }
25 |
26 | type Port interface {
27 | Emit(recordSets []FluentRecordSet) error
28 | }
29 |
30 | type Spawnee interface {
31 | Run() error
32 | Shutdown() error
33 | }
34 |
35 | type PluginInstance interface {
36 | Spawnee
37 | Factory() Plugin
38 | }
39 |
40 | type Input interface {
41 | PluginInstance
42 | Port() Port
43 | }
44 |
45 | type Output interface {
46 | PluginInstance
47 | Port
48 | }
49 |
50 | type MarkupAttributes int
51 |
52 | const (
53 | Red = 0x00001
54 | Green = 0x00002
55 | Yellow = 0x00003
56 | Blue = 0x00004
57 | Magenta = 0x00005
58 | Cyan = 0x00006
59 | White = 0x00007
60 | Embolden = 0x10000
61 | Underlined = 0x20000
62 | )
63 |
64 | type MarkupChunk struct {
65 | Attrs MarkupAttributes
66 | Text string
67 | }
68 |
69 | type Markup struct {
70 | Chunks []MarkupChunk
71 | }
72 |
73 | type ScoreValueFetcher interface {
74 | PlainText(PluginInstance) (string, error)
75 | Markup(PluginInstance) (Markup, error)
76 | }
77 |
78 | type Disposable interface {
79 | Dispose() error
80 | }
81 |
82 | type Plugin interface {
83 | Name() string
84 | BindScorekeeper(*Scorekeeper)
85 | }
86 |
87 | type ScorekeeperTopic struct {
88 | Plugin Plugin
89 | Name string
90 | DisplayName string
91 | Description string
92 | Fetcher ScoreValueFetcher
93 | }
94 |
95 | type Opener interface {
96 | FileSystem() http.FileSystem
97 | BasePath() string
98 | NewOpener(path string) Opener
99 | }
100 |
101 | type Engine interface {
102 | Disposable
103 | Logger() Logger
104 | Opener() Opener
105 | LineParserPluginRegistry() LineParserPluginRegistry
106 | RandSource() rand.Source
107 | Scorekeeper() *Scorekeeper
108 | DefaultPort() Port
109 | Spawn(Spawnee) error
110 | Launch(PluginInstance) error
111 | SpawneeStatuses() ([]SpawneeStatus, error)
112 | PluginInstances() []PluginInstance
113 | RecurringTaskScheduler() *task.RecurringTaskScheduler
114 | }
115 |
116 | type InputFactory interface {
117 | Plugin
118 | New(engine Engine, config *ConfigElement) (Input, error)
119 | }
120 |
121 | type InputFactoryRegistry interface {
122 | RegisterInputFactory(factory InputFactory) error
123 | LookupInputFactory(name string) InputFactory
124 | }
125 |
126 | type OutputFactory interface {
127 | Plugin
128 | New(engine Engine, config *ConfigElement) (Output, error)
129 | }
130 |
131 | type OutputFactoryRegistry interface {
132 | RegisterOutputFactory(factory OutputFactory) error
133 | LookupOutputFactory(name string) OutputFactory
134 | }
135 |
136 | type PluginRegistry interface {
137 | Plugins() []Plugin
138 | }
139 |
140 | type Scoreboard interface {
141 | PluginInstance
142 | }
143 |
144 | type ScoreboardFactory interface {
145 | Plugin
146 | New(engine Engine, pluginRegistry PluginRegistry, config *ConfigElement) (Scoreboard, error)
147 | }
148 |
149 | type JournalChunk interface {
150 | Disposable
151 | GetReader() (io.Reader, error)
152 | GetNextChunk() JournalChunk
153 | TakeOwnership() bool
154 | }
155 |
156 | type JournalChunkListener func(JournalChunk) error
157 |
158 | type Journal interface {
159 | Disposable
160 | Key() string
161 | Write(data []byte) error
162 | GetTailChunk() JournalChunk
163 | AddNewChunkListener(JournalChunkListener)
164 | AddFlushListener(JournalChunkListener)
165 | Flush(func(JournalChunk) error) error
166 | }
167 |
168 | type JournalGroup interface {
169 | Disposable
170 | GetJournal(key string) Journal
171 | GetJournalKeys() []string
172 | }
173 |
174 | type JournalGroupFactory interface {
175 | GetJournalGroup() JournalGroup
176 | }
177 |
178 | type RecordPacker interface {
179 | Pack(record FluentRecord) ([]byte, error)
180 | }
181 |
182 | type LineParser interface {
183 | Feed(line string) error
184 | }
185 |
186 | type LineParserFactory interface {
187 | New(receiver func(FluentRecord) error) (LineParser, error)
188 | }
189 |
190 | type LineParserFactoryFactory func(engine Engine, config *ConfigElement) (LineParserFactory, error)
191 |
192 | type LineParserPlugin interface {
193 | Name() string
194 | OnRegistering(func(name string, factory LineParserFactoryFactory) error) error
195 | }
196 |
197 | type LineParserPluginRegistry interface {
198 | RegisterLineParserPlugin(plugin LineParserPlugin) error
199 | LookupLineParserFactoryFactory(name string) LineParserFactoryFactory
200 | }
201 |
202 | type Logger interface {
203 | Critical(format string, args ...interface{})
204 | Error(format string, args ...interface{})
205 | Warning(format string, args ...interface{})
206 | Notice(format string, args ...interface{})
207 | Info(format string, args ...interface{})
208 | Debug(format string, args ...interface{})
209 | }
210 |
--------------------------------------------------------------------------------
/intvector.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | type IntVector []int
4 |
5 | func (sv *IntVector) Append(v int) {
6 | oldLength := len(*sv)
7 | sv.ensureCapacity(oldLength + 1)
8 | (*sv)[oldLength] = v
9 | }
10 |
11 | func (sv *IntVector) Push(v int) {
12 | sv.Append(v)
13 | }
14 |
15 | func (sv *IntVector) Pop() int {
16 | retval := (*sv)[len(*sv)-1]
17 | *sv = (*sv)[0 : len(*sv)-1]
18 | return retval
19 | }
20 |
21 | func (sv *IntVector) Shift() int {
22 | retval := (*sv)[0]
23 | *sv = (*sv)[1:len(*sv)]
24 | return retval
25 | }
26 |
27 | func (sv *IntVector) Last() int {
28 | return (*sv)[len(*sv)-1]
29 | }
30 |
31 | func (sv *IntVector) First() int {
32 | return (*sv)[0]
33 | }
34 |
35 | func (sv *IntVector) ensureCapacity(l int) {
36 | if l < 256 {
37 | if l > cap(*sv) {
38 | newSlice := make([]int, l)
39 | copy(newSlice, *sv)
40 | *sv = newSlice
41 | }
42 | } else {
43 | newCapacity := cap(*sv)
44 | if newCapacity < 256 {
45 | newCapacity = 128
46 | }
47 | for l > newCapacity {
48 | newCapacity = 2 * newCapacity
49 | if newCapacity < cap(*sv) {
50 | /* unlikely */
51 | panic("out of memory")
52 | }
53 | }
54 | newSlice := make([]int, newCapacity)
55 | copy(newSlice, *sv)
56 | *sv = newSlice
57 | }
58 | *sv = (*sv)[0:l]
59 | }
60 |
--------------------------------------------------------------------------------
/intvector_test.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import "testing"
4 |
5 | func TestIntVector_Append(t *testing.T) {
6 | sv := make(IntVector, 0)
7 | if len(sv) != 0 {
8 | t.Fail()
9 | }
10 | if cap(sv) != 0 {
11 | t.Fail()
12 | }
13 | for i := 0; i < 512; i += 3 {
14 | if i > 1 && sv[i-1] != 19 {
15 | t.Fail()
16 | }
17 | sv.Append(11)
18 | if len(sv) != i+1 {
19 | t.Fail()
20 | }
21 | if i < 253 {
22 | if cap(sv) != i+1 {
23 | t.Fail()
24 | }
25 | }
26 | if sv[i] != 11 {
27 | t.Fail()
28 | }
29 | sv.Append(17)
30 | if len(sv) != i+2 {
31 | t.Fail()
32 | }
33 | if i < 253 {
34 | if cap(sv) != i+2 {
35 | t.Fail()
36 | }
37 | }
38 | if sv[i] != 11 {
39 | t.Fail()
40 | }
41 | if sv[i+1] != 17 {
42 | t.Fail()
43 | }
44 | sv.Append(19)
45 | if len(sv) != i+3 {
46 | t.Fail()
47 | }
48 | if i < 253 {
49 | if cap(sv) != i+3 {
50 | t.Fail()
51 | }
52 | }
53 | if sv[i] != 11 {
54 | t.Fail()
55 | }
56 | if sv[i+1] != 17 {
57 | t.Fail()
58 | }
59 | if sv[i+2] != 19 {
60 | t.Fail()
61 | }
62 | }
63 | }
64 |
65 | func TestIntVector_Pop(t *testing.T) {
66 | sv := make(IntVector, 2)
67 | sv[0] = 11
68 | sv[1] = 17
69 | if len(sv) != 2 {
70 | t.Fail()
71 | }
72 | if cap(sv) != 2 {
73 | t.Fail()
74 | }
75 | if sv.Pop() != 17 {
76 | t.Fail()
77 | }
78 | if len(sv) != 1 {
79 | t.Fail()
80 | }
81 | if cap(sv) != 2 {
82 | t.Fail()
83 | }
84 | if sv.Pop() != 11 {
85 | t.Fail()
86 | }
87 | if len(sv) != 0 {
88 | t.Fail()
89 | }
90 | if cap(sv) != 2 {
91 | t.Fail()
92 | }
93 | }
94 |
95 | func TestIntVector_Shift(t *testing.T) {
96 | sv := make(IntVector, 2)
97 | sv[0] = 11
98 | sv[1] = 17
99 | if len(sv) != 2 {
100 | t.Fail()
101 | }
102 | if cap(sv) != 2 {
103 | t.Fail()
104 | }
105 | if sv.Shift() != 11 {
106 | t.Fail()
107 | }
108 | if len(sv) != 1 {
109 | t.Fail()
110 | }
111 | if cap(sv) != 1 {
112 | t.Fail()
113 | }
114 | if sv.Shift() != 17 {
115 | t.Fail()
116 | }
117 | if len(sv) != 0 {
118 | t.Fail()
119 | }
120 | if cap(sv) != 0 {
121 | t.Fail()
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/journal/file.go:
--------------------------------------------------------------------------------
1 | package journal
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "github.com/moriyoshi/ik"
7 | "io"
8 | "math/rand"
9 | "os"
10 | "path"
11 | "strings"
12 | "sync"
13 | "sync/atomic"
14 | "time"
15 | "unsafe"
16 | )
17 |
18 | type FileJournalChunkDequeueHead struct {
19 | next *FileJournalChunk
20 | prev *FileJournalChunk
21 | }
22 |
23 | type FileJournalChunkDequeue struct {
24 | first *FileJournalChunk
25 | last *FileJournalChunk
26 | count int
27 | mtx sync.Mutex
28 | }
29 |
30 | type FileJournalChunk struct {
31 | head FileJournalChunkDequeueHead
32 | Path string
33 | Type JournalFileType
34 | TSuffix string
35 | Timestamp int64
36 | UniqueId []byte
37 | refcount int32
38 | }
39 |
40 | type FileJournal struct {
41 | group *FileJournalGroup
42 | key string
43 | chunks FileJournalChunkDequeue
44 | writer io.WriteCloser
45 | position int64
46 | newChunkListeners map[uintptr]ik.JournalChunkListener
47 | flushListeners map[uintptr]ik.JournalChunkListener
48 | mtx sync.Mutex
49 | }
50 |
51 | type FileJournalGroup struct {
52 | factory *FileJournalGroupFactory
53 | pluginInstance ik.PluginInstance
54 | timeGetter func() time.Time
55 | logger ik.Logger
56 | rand *rand.Rand
57 | fileMode os.FileMode
58 | maxSize int64
59 | pathPrefix string
60 | pathSuffix string
61 | journals map[string]*FileJournal
62 | mtx sync.Mutex
63 | }
64 |
65 | type FileJournalGroupFactory struct {
66 | logger ik.Logger
67 | paths map[string]*FileJournalGroup
68 | randSource rand.Source
69 | timeGetter func() time.Time
70 | defaultPathSuffix string
71 | defaultFileMode os.FileMode
72 | maxSize int64
73 | }
74 |
75 | type FileJournalChunkWrapper struct {
76 | journal *FileJournal
77 | chunk *FileJournalChunk
78 | ownershipTaken int64
79 | }
80 |
81 | func (wrapper *FileJournalChunkWrapper) Path() string {
82 | return wrapper.chunk.Path
83 | }
84 |
85 | func (wrapper *FileJournalChunkWrapper) GetReader() (io.Reader, error) {
86 | chunk := (*FileJournalChunk)(atomic.LoadPointer((*unsafe.Pointer)((unsafe.Pointer)(&wrapper.chunk))))
87 | if chunk == nil {
88 | return nil, errors.New("already disposed")
89 | }
90 | return chunk.getReader()
91 | }
92 |
93 | func (wrapper *FileJournalChunkWrapper) GetNextChunk() ik.JournalChunk {
94 | chunk := (*FileJournalChunk)(atomic.LoadPointer((*unsafe.Pointer)((unsafe.Pointer)(&wrapper.chunk))))
95 | retval := (*FileJournalChunkWrapper)(nil)
96 | if chunk != nil {
97 | journal := wrapper.journal
98 | journal.chunks.mtx.Lock()
99 | if chunk.head.prev != nil {
100 | retval = journal.newChunkWrapper(chunk.head.prev)
101 | }
102 | journal.chunks.mtx.Unlock()
103 | }
104 | return retval
105 | }
106 |
107 | func (wrapper *FileJournalChunkWrapper) TakeOwnership() bool {
108 | chunk := (*FileJournalChunk)(atomic.LoadPointer((*unsafe.Pointer)((unsafe.Pointer)(&wrapper.chunk))))
109 | if chunk == nil {
110 | return false
111 | }
112 | if atomic.CompareAndSwapInt64(&wrapper.ownershipTaken, 0, 1) {
113 | wrapper.journal.deleteRef((*FileJournalChunk)(chunk))
114 | return true
115 | } else {
116 | return false
117 | }
118 | }
119 |
120 | func (wrapper *FileJournalChunkWrapper) Dispose() error {
121 | chunk := (*FileJournalChunk)(atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&wrapper.chunk)), nil))
122 | if chunk == nil {
123 | return errors.New("already disposed")
124 | }
125 | err, destroyed := wrapper.journal.deleteRef((*FileJournalChunk)(chunk))
126 | if err != nil {
127 | return err
128 | }
129 | if destroyed && wrapper.ownershipTaken != 0 && chunk.head.next == nil {
130 | // increment the refcount of the last chunk
131 | // to rehold the reference
132 | prevChunk := chunk.head.prev
133 | if prevChunk != nil {
134 | atomic.AddInt32(&prevChunk.refcount, 1)
135 | }
136 | }
137 | return nil
138 | }
139 |
140 | func (journal *FileJournal) newChunkWrapper(chunk *FileJournalChunk) *FileJournalChunkWrapper {
141 | atomic.AddInt32(&chunk.refcount, 1)
142 | return &FileJournalChunkWrapper{journal, chunk, 0}
143 | }
144 |
145 | func (journal *FileJournal) deleteRef(chunk *FileJournalChunk) (error, bool) {
146 | refcount := atomic.AddInt32(&chunk.refcount, -1)
147 | if refcount == 0 {
148 | // first propagate to newer chunk
149 | if prevChunk := chunk.head.prev; prevChunk != nil {
150 | err, _ := journal.deleteRef(prevChunk)
151 | if err != nil {
152 | // undo the change
153 | atomic.AddInt32(&chunk.refcount, 1)
154 | return err, false
155 | }
156 | }
157 | err := os.Remove(chunk.Path)
158 | if err != nil {
159 | // undo the change
160 | atomic.AddInt32(&chunk.refcount, 1)
161 | return err, false
162 | }
163 | {
164 | journal.chunks.mtx.Lock()
165 | prevChunk := chunk.head.prev
166 | nextChunk := chunk.head.next
167 | if prevChunk == nil {
168 | journal.chunks.first = nextChunk
169 | } else {
170 | prevChunk.head.next = nextChunk
171 | }
172 | if nextChunk == nil {
173 | journal.chunks.last = prevChunk
174 | } else {
175 | nextChunk.head.prev = prevChunk
176 | }
177 | journal.chunks.count -= 1
178 | journal.chunks.mtx.Unlock()
179 | }
180 | return nil, true
181 | } else if refcount < 0 {
182 | // should never happen
183 | panic(fmt.Sprintf("something went wrong! chunk=%v, chunks.count=%d", chunk, journal.chunks.count))
184 | }
185 | return nil, false
186 | }
187 |
188 | func (chunk *FileJournalChunk) getReader() (io.Reader, error) {
189 | return os.OpenFile(chunk.Path, os.O_RDONLY, 0)
190 | }
191 |
192 | func (journal *FileJournal) Key() string {
193 | return journal.key
194 | }
195 |
196 | func (journal *FileJournal) notifyFlushListeners(chunk *FileJournalChunk) {
197 | // lock for listener container must be acquired by caller
198 | for _, listener := range journal.flushListeners {
199 | err := listener(journal.newChunkWrapper(chunk))
200 | if err != nil {
201 | journal.group.logger.Error("error occurred during notifying flush event: %s", err.Error())
202 | }
203 | }
204 | }
205 |
206 | func (journal *FileJournal) notifyNewChunkListeners(chunk *FileJournalChunk) {
207 | // lock for listener container must be acquired by caller
208 | for _, listener := range journal.newChunkListeners {
209 | err := listener(journal.newChunkWrapper(chunk))
210 | if err != nil {
211 | journal.group.logger.Error("error occurred during notifying flush event: %s", err.Error())
212 | }
213 | }
214 | }
215 |
216 | func (journal *FileJournal) finalizeChunk(chunk *FileJournalChunk) error {
217 | group := journal.group
218 | variablePortion := BuildJournalPathWithTSuffix(
219 | journal.key,
220 | Rest,
221 | chunk.TSuffix,
222 | )
223 | newPath := group.pathPrefix + variablePortion + group.pathSuffix
224 | err := os.Rename(chunk.Path, newPath)
225 | if err != nil {
226 | return err
227 | }
228 | chunk.Type = Rest
229 | chunk.Path = newPath
230 | journal.notifyFlushListeners(chunk)
231 | return nil
232 | }
233 |
234 | func (journal *FileJournal) Purge() error {
235 | journal.mtx.Lock()
236 | defer journal.mtx.Unlock()
237 | lastChunk := (*FileJournalChunk)(nil)
238 | {
239 | journal.chunks.mtx.Lock()
240 | lastChunk = journal.chunks.last
241 | journal.chunks.mtx.Unlock()
242 | }
243 | // initiate GC
244 | if lastChunk != nil {
245 | err, _ := journal.deleteRef(lastChunk)
246 | if err != nil {
247 | return err
248 | }
249 | }
250 | // journal.chunks can change during the call to deleteRef()
251 | {
252 | journal.chunks.mtx.Lock()
253 | lastChunk = journal.chunks.last
254 | journal.chunks.mtx.Unlock()
255 | }
256 | if lastChunk != nil {
257 | atomic.AddInt32(&lastChunk.refcount, 1)
258 | }
259 | return nil
260 | }
261 |
262 | func (journal *FileJournal) Flush(visitor func(ik.JournalChunk) error) error {
263 |
264 | if visitor != nil {
265 | chunks := make([]*FileJournalChunk, journal.chunks.count)
266 | {
267 | journal.chunks.mtx.Lock()
268 | i := 0
269 | for chunk := journal.chunks.last; chunk != nil; chunk = chunk.head.prev {
270 | chunks[i] = chunk
271 | i += 1
272 | }
273 | journal.chunks.mtx.Unlock()
274 | }
275 | for _, chunk := range chunks {
276 | err := visitor(journal.newChunkWrapper(chunk))
277 | if err != nil {
278 | return err
279 | }
280 | }
281 | }
282 | journal.Purge()
283 | return nil
284 | }
285 |
286 | func (journal *FileJournal) newChunk() (*FileJournalChunk, error) {
287 | group := journal.group
288 | info := BuildJournalPath(
289 | journal.key,
290 | Head,
291 | group.timeGetter(),
292 | group.rand.Int63n(0xfff),
293 | )
294 | chunk := &FileJournalChunk{
295 | head: FileJournalChunkDequeueHead{journal.chunks.first, nil},
296 | Path: (group.pathPrefix + info.VariablePortion + group.pathSuffix),
297 | Type: info.Type,
298 | TSuffix: info.TSuffix,
299 | UniqueId: info.UniqueId,
300 | refcount: 1,
301 | }
302 | file, err := os.OpenFile(chunk.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL, journal.group.fileMode)
303 | if err != nil {
304 | return nil, err
305 | }
306 | if journal.writer != nil {
307 | err := journal.writer.Close()
308 | if err != nil {
309 | return nil, err
310 | }
311 | }
312 |
313 | oldHead := (*FileJournalChunk)(nil)
314 | {
315 | journal.chunks.mtx.Lock()
316 | oldHead = journal.chunks.first
317 | if oldHead != nil {
318 | oldHead.head.prev = chunk
319 | } else {
320 | journal.chunks.last = chunk
321 | }
322 | chunk.head.next = journal.chunks.first
323 | journal.chunks.first = chunk
324 | journal.chunks.count += 1
325 | journal.chunks.mtx.Unlock()
326 | }
327 | chunk.refcount += 1 // for writer
328 |
329 | if oldHead != nil {
330 | err := journal.finalizeChunk(oldHead)
331 | if err != nil {
332 | file.Close()
333 | os.Remove(chunk.Path)
334 | return nil, err
335 | }
336 | err, _ = journal.deleteRef(oldHead) // writer-holding ref
337 | if err != nil {
338 | file.Close()
339 | os.Remove(chunk.Path)
340 | return nil, err
341 | }
342 | }
343 |
344 | journal.writer = file
345 | journal.position = 0
346 | journal.notifyNewChunkListeners(chunk)
347 | return chunk, nil
348 | }
349 |
350 | func (journal *FileJournal) AddFlushListener(listener ik.JournalChunkListener) {
351 | journal.mtx.Lock()
352 | defer journal.mtx.Unlock()
353 | // XXX hack!
354 | journal.flushListeners[uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&listener)))] = listener
355 | }
356 |
357 | func (journal *FileJournal) AddNewChunkListener(listener ik.JournalChunkListener) {
358 | journal.mtx.Lock()
359 | defer journal.mtx.Unlock()
360 | // XXX hack!
361 | journal.newChunkListeners[uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&listener)))] = listener
362 | }
363 |
364 | func (journal *FileJournal) Write(data []byte) error {
365 | journal.mtx.Lock()
366 | defer journal.mtx.Unlock()
367 |
368 | if journal.writer == nil {
369 | if journal.chunks.first == nil {
370 | _, err := journal.newChunk()
371 | if err != nil {
372 | return err
373 | }
374 | }
375 | } else {
376 | if journal.group.maxSize-journal.position < int64(len(data)) {
377 | _, err := journal.newChunk()
378 | if err != nil {
379 | return err
380 | }
381 | }
382 | }
383 |
384 | n, err := journal.writer.Write(data)
385 | if err != nil {
386 | return err
387 | }
388 | if n != len(data) {
389 | return errors.New("not all data could be written")
390 | }
391 | journal.position += int64(n)
392 | return nil
393 | }
394 |
395 | func (journal *FileJournal) GetTailChunk() ik.JournalChunk {
396 | retval := (*FileJournalChunkWrapper)(nil)
397 | {
398 | journal.chunks.mtx.Lock()
399 | if journal.chunks.last != nil {
400 | retval = journal.newChunkWrapper(journal.chunks.last)
401 | }
402 | journal.chunks.mtx.Unlock()
403 | }
404 | return retval
405 | }
406 |
407 | func (journal *FileJournal) Dispose() error {
408 | journal.mtx.Lock()
409 | defer journal.mtx.Unlock()
410 | if journal.writer != nil {
411 | err := journal.writer.Close()
412 | if err != nil {
413 | return err
414 | }
415 | journal.writer = nil
416 | }
417 | return nil
418 | }
419 |
420 | func (journalGroup *FileJournalGroup) Dispose() error {
421 | for _, journal := range journalGroup.journals {
422 | journal.Dispose()
423 | }
424 | return nil
425 | }
426 |
427 | func (journalGroup *FileJournalGroup) GetFileJournal(key string) *FileJournal {
428 | journalGroup.mtx.Lock()
429 | defer journalGroup.mtx.Unlock()
430 |
431 | journal, ok := journalGroup.journals[key]
432 | if ok {
433 | return journal
434 | }
435 | journal = &FileJournal{
436 | group: journalGroup,
437 | key: key,
438 | chunks: FileJournalChunkDequeue{nil, nil, 0, sync.Mutex{}},
439 | writer: nil,
440 | newChunkListeners: make(map[uintptr]ik.JournalChunkListener),
441 | flushListeners: make(map[uintptr]ik.JournalChunkListener),
442 | }
443 | journalGroup.journals[key] = journal
444 | return journal
445 | }
446 |
447 | func (journalGroup *FileJournalGroup) GetJournal(key string) ik.Journal {
448 | return journalGroup.GetFileJournal(key)
449 | }
450 |
451 | func (journalGroup *FileJournalGroup) GetJournalKeys() []string {
452 | journalGroup.mtx.Lock()
453 | defer journalGroup.mtx.Unlock()
454 |
455 | retval := make([]string, len(journalGroup.journals))
456 | i := 0
457 | for k := range journalGroup.journals {
458 | retval[i] = k
459 | i += 1
460 | }
461 | return retval
462 | }
463 |
464 | // http://stackoverflow.com/questions/1525117/whats-the-fastest-algorithm-for-sorting-a-linked-list
465 | // http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html
466 | func sortChunksByTimestamp(chunks *FileJournalChunkDequeue) {
467 | k := 1
468 | lhs := chunks.first
469 | if lhs == nil {
470 | return
471 | }
472 | for {
473 | result := FileJournalChunkDequeue{nil, nil, chunks.count, sync.Mutex{}}
474 | first := true
475 | for {
476 | picked := (*FileJournalChunk)(nil)
477 | lhsSize := 0
478 | rhsSize := k
479 | rhs := lhs
480 | i := k
481 | for i > 0 && rhs.head.next != nil {
482 | i -= 1
483 | rhs = rhs.head.next
484 | }
485 | lhsSize = k - i
486 | for {
487 | if lhsSize != 0 {
488 | if rhsSize != 0 && rhs != nil && lhs.Timestamp < rhs.Timestamp {
489 | picked = rhs
490 | rhs = rhs.head.next
491 | rhsSize -= 1
492 | } else {
493 | picked = lhs
494 | lhs = lhs.head.next
495 | lhsSize -= 1
496 | }
497 | } else {
498 | if rhsSize != 0 && rhs != nil {
499 | picked = rhs
500 | rhs = rhs.head.next
501 | rhsSize -= 1
502 | } else {
503 | break
504 | }
505 | }
506 | if picked.head.prev != nil {
507 | picked.head.prev.head.next = picked.head.next
508 | }
509 | if picked.head.next != nil {
510 | picked.head.next.head.prev = picked.head.prev
511 | }
512 | if result.last == nil {
513 | result.first = picked
514 | } else {
515 | result.last.head.next = picked
516 | }
517 | picked.head.prev = result.last
518 | picked.head.next = nil
519 | result.last = picked
520 | }
521 | lhs = rhs
522 | if lhs == nil {
523 | break
524 | }
525 | first = false
526 | }
527 | if first {
528 | *chunks = result
529 | break
530 | }
531 | k *= 2
532 | lhs = result.first
533 | }
534 | }
535 |
536 | func validateChunks(chunks *FileJournalChunkDequeue) error {
537 | chunkHead := (*FileJournalChunk)(nil)
538 | for chunk := chunks.first; chunk != nil; chunk = chunk.head.next {
539 | if chunk.Type == Head {
540 | if chunkHead != nil {
541 | return errors.New("multiple chunk heads found")
542 | }
543 | chunkHead = chunk
544 | }
545 | }
546 | if chunkHead != chunks.first {
547 | return errors.New("chunk head does not have the newest timestamp")
548 | }
549 | return nil
550 | }
551 |
552 | func scanJournals(logger ik.Logger, pathPrefix string, pathSuffix string) (map[string]*FileJournal, error) {
553 | journals := make(map[string]*FileJournal)
554 | dirname, basename := path.Split(pathPrefix)
555 | if dirname == "" {
556 | dirname = "."
557 | }
558 | d, err := os.OpenFile(dirname, os.O_RDONLY, 0)
559 | if err != nil {
560 | return nil, err
561 | }
562 | finfo, err := d.Stat()
563 | if err != nil {
564 | return nil, err
565 | }
566 | if !finfo.IsDir() {
567 | return nil, errors.New(fmt.Sprintf("%s is not a directory", dirname))
568 | }
569 | for {
570 | files_, err := d.Readdirnames(100)
571 | if err == io.EOF {
572 | break
573 | } else if err != nil {
574 | return nil, err
575 | }
576 | for _, file := range files_ {
577 | if !strings.HasSuffix(file, pathSuffix) {
578 | continue
579 | }
580 | variablePortion := file[len(basename) : len(file)-len(pathSuffix)]
581 | info, err := DecodeJournalPath(variablePortion)
582 | if err != nil {
583 | logger.Warning("warning: unexpected file under the designated directory space (%s) - %s", dirname, file)
584 | continue
585 | }
586 | journalProto, ok := journals[info.Key]
587 | if !ok {
588 | journalProto = &FileJournal{
589 | key: info.Key,
590 | chunks: FileJournalChunkDequeue{nil, nil, 0, sync.Mutex{}},
591 | writer: nil,
592 | }
593 | journals[info.Key] = journalProto
594 | }
595 | chunk := &FileJournalChunk{
596 | head: FileJournalChunkDequeueHead{nil, journalProto.chunks.last},
597 | Type: info.Type,
598 | Path: pathPrefix + info.VariablePortion + pathSuffix,
599 | TSuffix: info.TSuffix,
600 | Timestamp: info.Timestamp,
601 | UniqueId: info.UniqueId,
602 | refcount: 1,
603 | }
604 | if journalProto.chunks.last == nil {
605 | journalProto.chunks.first = chunk
606 | } else {
607 | journalProto.chunks.last.head.next = chunk
608 | }
609 | journalProto.chunks.last = chunk
610 | journalProto.chunks.count += 1
611 | }
612 | }
613 | for _, journalProto := range journals {
614 | sortChunksByTimestamp(&journalProto.chunks)
615 | err := validateChunks(&journalProto.chunks)
616 | if err != nil {
617 | return nil, err
618 | }
619 | }
620 | return journals, nil
621 | }
622 |
623 | func (factory *FileJournalGroupFactory) GetJournalGroup(path string, pluginInstance ik.PluginInstance) (*FileJournalGroup, error) {
624 | registered, ok := factory.paths[path]
625 | if ok {
626 | if registered.pluginInstance == pluginInstance {
627 | return registered, nil
628 | } else {
629 | return nil, errors.New(fmt.Sprintf(
630 | "Other '%s' plugin already use same buffer_path: %s",
631 | registered.pluginInstance.Factory().Name(),
632 | path,
633 | ))
634 | }
635 | }
636 |
637 | var pathPrefix string
638 | var pathSuffix string
639 |
640 | pos := strings.Index(path, "*")
641 | if pos >= 0 {
642 | pathPrefix = path[0:pos]
643 | pathSuffix = path[pos+1:]
644 | } else {
645 | pathPrefix = path + "."
646 | pathSuffix = factory.defaultPathSuffix
647 | }
648 |
649 | journals, err := scanJournals(factory.logger, pathPrefix, pathSuffix)
650 | if err != nil {
651 | return nil, err
652 | }
653 |
654 | journalGroup := &FileJournalGroup{
655 | factory: factory,
656 | pluginInstance: pluginInstance,
657 | timeGetter: factory.timeGetter,
658 | logger: factory.logger,
659 | rand: rand.New(factory.randSource),
660 | fileMode: factory.defaultFileMode,
661 | maxSize: factory.maxSize,
662 | pathPrefix: pathPrefix,
663 | pathSuffix: pathSuffix,
664 | journals: journals,
665 | mtx: sync.Mutex{},
666 | }
667 | for _, journal := range journals {
668 | journal.group = journalGroup
669 | journal.newChunkListeners = make(map[uintptr]ik.JournalChunkListener)
670 | journal.flushListeners = make(map[uintptr]ik.JournalChunkListener)
671 | chunk := journal.chunks.first
672 | file, err := os.OpenFile(chunk.Path, os.O_WRONLY|os.O_APPEND, journal.group.fileMode)
673 | if err != nil {
674 | journalGroup.Dispose()
675 | return nil, err
676 | }
677 | position, err := file.Seek(0, os.SEEK_END)
678 | if err != nil {
679 | file.Close()
680 | journalGroup.Dispose()
681 | return nil, err
682 | }
683 | chunk.refcount += 1 // for writer
684 | journal.writer = file
685 | journal.position = position
686 | }
687 | factory.logger.Info("Path %s is designated to PluginInstance %s", path, pluginInstance.Factory().Name())
688 | factory.paths[path] = journalGroup
689 | return journalGroup, nil
690 | }
691 |
692 | func NewFileJournalGroupFactory(
693 | logger ik.Logger,
694 | randSource rand.Source,
695 | timeGetter func() time.Time,
696 | defaultPathSuffix string,
697 | defaultFileMode os.FileMode,
698 | maxSize int64,
699 | ) *FileJournalGroupFactory {
700 | return &FileJournalGroupFactory{
701 | logger: logger,
702 | paths: make(map[string]*FileJournalGroup),
703 | randSource: randSource,
704 | timeGetter: timeGetter,
705 | defaultPathSuffix: defaultPathSuffix,
706 | defaultFileMode: defaultFileMode,
707 | maxSize: maxSize,
708 | }
709 | }
710 |
--------------------------------------------------------------------------------
/journal/file_test.go:
--------------------------------------------------------------------------------
1 | package journal
2 |
3 | import (
4 | "fmt"
5 | "github.com/moriyoshi/ik"
6 | "io/ioutil"
7 | "log"
8 | "math/rand"
9 | "os"
10 | "testing"
11 | "time"
12 | )
13 |
14 | type DummyPluginInstance struct{ v int }
15 |
16 | type DummyPlugin struct{}
17 |
18 | func (*DummyPlugin) Name() string { return "dummy" }
19 | func (*DummyPlugin) BindScorekeeper(*ik.Scorekeeper) {}
20 |
21 | func (*DummyPluginInstance) Run() error { return nil }
22 | func (*DummyPluginInstance) Shutdown() error { return nil }
23 | func (*DummyPluginInstance) Factory() ik.Plugin { return &DummyPlugin{} }
24 |
25 | func Test_GetJournalGroup(t *testing.T) {
26 | logger := log.New(os.Stderr, "[journal] ", 0)
27 | tempDir, err := ioutil.TempDir("", "ik.journal")
28 | if err != nil {
29 | t.FailNow()
30 | }
31 | factory := NewFileJournalGroupFactory(
32 | logger,
33 | rand.NewSource(0),
34 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
35 | ".log",
36 | os.FileMode(0644),
37 | 0,
38 | )
39 | dummyPluginInstance := &DummyPluginInstance{}
40 | t.Log(tempDir + "/test")
41 | journalGroup1, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
42 | if err != nil {
43 | t.FailNow()
44 | }
45 | journalGroup2, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
46 | if err != nil {
47 | t.Fail()
48 | }
49 | if journalGroup1 != journalGroup2 {
50 | t.Fail()
51 | }
52 | anotherDummyPluginInstance := &DummyPluginInstance{}
53 | if dummyPluginInstance == anotherDummyPluginInstance {
54 | t.Log("WTF?")
55 | t.Fail()
56 | }
57 | _, err = factory.GetJournalGroup(tempDir+"/test", anotherDummyPluginInstance)
58 | if err == nil {
59 | t.Fail()
60 | }
61 | }
62 |
63 | func Test_Journal_GetJournal(t *testing.T) {
64 | logger := log.New(os.Stderr, "[journal] ", 0)
65 | tempDir, err := ioutil.TempDir("", "ik.journal")
66 | if err != nil {
67 | t.FailNow()
68 | }
69 | factory := NewFileJournalGroupFactory(
70 | logger,
71 | rand.NewSource(0),
72 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
73 | ".log",
74 | os.FileMode(0644),
75 | 0,
76 | )
77 | dummyPluginInstance := &DummyPluginInstance{}
78 | t.Log(tempDir + "/test")
79 | journalGroup, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
80 | if err != nil {
81 | t.FailNow()
82 | }
83 | journal1 := journalGroup.GetJournal("key")
84 | if journal1 == nil {
85 | t.FailNow()
86 | }
87 | journal2 := journalGroup.GetJournal("key")
88 | if journal2 == nil {
89 | t.Fail()
90 | }
91 | if journal1 != journal2 {
92 | t.Fail()
93 | }
94 | }
95 |
96 | func Test_Journal_EmitVeryFirst(t *testing.T) {
97 | logger := log.New(os.Stderr, "[journal] ", 0)
98 | tempDir, err := ioutil.TempDir("", "ik.journal")
99 | if err != nil {
100 | t.FailNow()
101 | }
102 | factory := NewFileJournalGroupFactory(
103 | logger,
104 | rand.NewSource(0),
105 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
106 | ".log",
107 | os.FileMode(0644),
108 | 10,
109 | )
110 | dummyPluginInstance := &DummyPluginInstance{}
111 | t.Log(tempDir + "/test")
112 | journalGroup, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
113 | if err != nil {
114 | t.FailNow()
115 | }
116 | journal := journalGroup.GetFileJournal("key")
117 | err = journal.Write([]byte("test"))
118 | if err != nil {
119 | t.FailNow()
120 | }
121 | if journal.position != 4 {
122 | t.Fail()
123 | }
124 | if journal.chunks.count != 1 {
125 | t.Fail()
126 | }
127 | }
128 |
129 | func Test_Journal_EmitTwice(t *testing.T) {
130 | logger := log.New(os.Stderr, "[journal] ", 0)
131 | tempDir, err := ioutil.TempDir("", "ik.journal")
132 | if err != nil {
133 | t.FailNow()
134 | }
135 | factory := NewFileJournalGroupFactory(
136 | logger,
137 | rand.NewSource(0),
138 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
139 | ".log",
140 | os.FileMode(0644),
141 | 10,
142 | )
143 | dummyPluginInstance := &DummyPluginInstance{}
144 | t.Log(tempDir + "/test")
145 | journalGroup, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
146 | if err != nil {
147 | t.FailNow()
148 | }
149 | journal := journalGroup.GetFileJournal("key")
150 | err = journal.Write([]byte("test1"))
151 | if err != nil {
152 | t.FailNow()
153 | }
154 | err = journal.Write([]byte("test2"))
155 | if err != nil {
156 | t.FailNow()
157 | }
158 | if journal.position != 10 {
159 | t.Fail()
160 | }
161 | if journal.chunks.count != 1 {
162 | t.Fail()
163 | }
164 | }
165 |
166 | func Test_Journal_EmitRotating(t *testing.T) {
167 | logger := log.New(os.Stderr, "[journal] ", 0)
168 | tempDir, err := ioutil.TempDir("", "ik.journal")
169 | if err != nil {
170 | t.FailNow()
171 | }
172 | factory := NewFileJournalGroupFactory(
173 | logger,
174 | rand.NewSource(0),
175 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
176 | ".log",
177 | os.FileMode(0644),
178 | 8,
179 | )
180 | dummyPluginInstance := &DummyPluginInstance{}
181 | t.Log(tempDir + "/test")
182 | journalGroup, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
183 | if err != nil {
184 | t.FailNow()
185 | }
186 | journal := journalGroup.GetFileJournal("key")
187 | err = journal.Write([]byte("test1"))
188 | if err != nil {
189 | t.FailNow()
190 | }
191 | err = journal.Write([]byte("test2"))
192 | if err != nil {
193 | t.FailNow()
194 | }
195 | err = journal.Write([]byte("test3"))
196 | if err != nil {
197 | t.FailNow()
198 | }
199 | err = journal.Write([]byte("test4"))
200 | if err != nil {
201 | t.FailNow()
202 | }
203 | err = journal.Write([]byte("test5"))
204 | if err != nil {
205 | t.FailNow()
206 | }
207 | t.Logf("journal.position=%d, journal.chunks.count=%d", journal.position, journal.chunks.count)
208 | if journal.position != 5 {
209 | t.Fail()
210 | }
211 | if journal.chunks.count != 5 {
212 | t.Fail()
213 | }
214 | }
215 |
216 | func shuffle(x []string) {
217 | rng := rand.New(rand.NewSource(0))
218 | for i := 0; i < len(x); i += 1 {
219 | j := rng.Intn(i + 1)
220 | x[i], x[j] = x[j], x[i]
221 | }
222 | }
223 |
224 | func Test_Journal_Scanning_Ok(t *testing.T) {
225 | logger := log.New(os.Stderr, "[journal] ", 0)
226 | tm := time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC)
227 |
228 | for i := 1; i < 100; i++ {
229 | tempDir, err := ioutil.TempDir("", "ik.journal")
230 | if err != nil {
231 | t.FailNow()
232 | }
233 | prefix := tempDir + "/test"
234 | suffix := ".log"
235 | makePaths := func(n int, key string) []string {
236 | paths := make([]string, n)
237 | for i := 0; i < len(paths); i += 1 {
238 | type_ := JournalFileType('q')
239 | if i == 0 {
240 | type_ = JournalFileType('b')
241 | }
242 | path := prefix + "." + BuildJournalPath(key, type_, tm.Add(time.Duration(-i*1e9)), 0).VariablePortion + suffix
243 | paths[i] = path
244 | }
245 | return paths
246 | }
247 |
248 | paths := makePaths(i, "key")
249 | shuffledPaths := make([]string, len(paths))
250 | copy(shuffledPaths, paths)
251 | shuffle(shuffledPaths)
252 | for j, path := range shuffledPaths {
253 | file, err := os.Create(path)
254 | if err != nil {
255 | t.FailNow()
256 | }
257 | _, err = file.Write([]byte(fmt.Sprintf("%08d", j)))
258 | if err != nil {
259 | t.FailNow()
260 | }
261 | file.Close()
262 | }
263 | factory := NewFileJournalGroupFactory(
264 | logger,
265 | rand.NewSource(0),
266 | func() time.Time { return tm },
267 | suffix,
268 | os.FileMode(0644),
269 | 8,
270 | )
271 | dummyPluginInstance := &DummyPluginInstance{}
272 | journalGroup, err := factory.GetJournalGroup(prefix, dummyPluginInstance)
273 | if err != nil {
274 | t.FailNow()
275 | }
276 | journal := journalGroup.GetFileJournal("key")
277 | t.Logf("journal.position=%d, journal.chunks.count=%d", journal.position, journal.chunks.count)
278 | if journal.chunks.count != i {
279 | t.Fail()
280 | }
281 | j := 0
282 | for chunk := journal.chunks.first; chunk != nil; chunk = chunk.head.next {
283 | if chunk.Path != paths[j] {
284 | t.Fail()
285 | }
286 | j += 1
287 | }
288 | journal.Purge()
289 | if journal.chunks.count != 1 {
290 | t.Fail()
291 | }
292 | }
293 | }
294 |
295 | func Test_Journal_Scanning_MultipleHead(t *testing.T) {
296 | logger := log.New(os.Stderr, "[journal] ", 0)
297 | tempDir, err := ioutil.TempDir("", "ik.journal")
298 | if err != nil {
299 | t.FailNow()
300 | }
301 | tm := time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC)
302 | prefix := tempDir + "/test"
303 | suffix := ".log"
304 | createFile := func(key string, type_ JournalFileType, o int) (string, error) {
305 | path := prefix + "." + BuildJournalPath(key, type_, tm.Add(time.Duration(-o*1e9)), 0).VariablePortion + suffix
306 | file, err := os.Create(path)
307 | if err != nil {
308 | return "", err
309 | }
310 | _, err = file.Write([]byte(fmt.Sprintf("%08d", o)))
311 | if err != nil {
312 | return "", err
313 | }
314 | file.Close()
315 | t.Log(path)
316 | return path, nil
317 | }
318 |
319 | paths := make([]string, 4)
320 | {
321 | path, err := createFile("key", JournalFileType('b'), 0)
322 | if err != nil {
323 | t.FailNow()
324 | }
325 | paths[0] = path
326 | }
327 | {
328 | path, err := createFile("key", JournalFileType('b'), 1)
329 | if err != nil {
330 | t.FailNow()
331 | }
332 | paths[1] = path
333 | }
334 | {
335 | path, err := createFile("key", JournalFileType('q'), 2)
336 | if err != nil {
337 | t.FailNow()
338 | }
339 | paths[2] = path
340 | }
341 | {
342 | path, err := createFile("key", JournalFileType('q'), 3)
343 | if err != nil {
344 | t.FailNow()
345 | }
346 | paths[3] = path
347 | }
348 |
349 | factory := NewFileJournalGroupFactory(
350 | logger,
351 | rand.NewSource(0),
352 | func() time.Time { return tm },
353 | suffix,
354 | os.FileMode(0644),
355 | 8,
356 | )
357 | dummyPluginInstance := &DummyPluginInstance{}
358 | _, err = factory.GetJournalGroup(prefix, dummyPluginInstance)
359 | if err == nil {
360 | t.FailNow()
361 | }
362 | t.Log(err.Error())
363 | if err.Error() != "multiple chunk heads found" {
364 | t.Fail()
365 | }
366 | }
367 |
368 | func Test_Journal_FlushListener(t *testing.T) {
369 | logger := log.New(os.Stderr, "[journal] ", 0)
370 | tempDir, err := ioutil.TempDir("", "ik.journal")
371 | if err != nil {
372 | t.FailNow()
373 | }
374 | factory := NewFileJournalGroupFactory(
375 | logger,
376 | rand.NewSource(0),
377 | func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
378 | ".log",
379 | os.FileMode(0644),
380 | 8,
381 | )
382 | dummyPluginInstance := &DummyPluginInstance{}
383 | t.Log(tempDir + "/test")
384 | journalGroup, err := factory.GetJournalGroup(tempDir+"/test", dummyPluginInstance)
385 | if err != nil {
386 | t.FailNow()
387 | }
388 | journal := journalGroup.GetFileJournal("key")
389 | chunks := make([]ik.JournalChunk, 0, 5)
390 | listener := func(chunk ik.JournalChunk) error {
391 | chunks = append(chunks, chunk)
392 | t.Logf("flush %d", len(chunks))
393 | return nil
394 | }
395 | journal.AddFlushListener(listener)
396 | journal.AddFlushListener(listener)
397 | err = journal.Write([]byte("test1"))
398 | if err != nil {
399 | t.FailNow()
400 | }
401 | err = journal.Write([]byte("test2"))
402 | if err != nil {
403 | t.FailNow()
404 | }
405 | err = journal.Write([]byte("test3"))
406 | if err != nil {
407 | t.FailNow()
408 | }
409 | err = journal.Write([]byte("test4"))
410 | if err != nil {
411 | t.FailNow()
412 | }
413 | err = journal.Write([]byte("test5"))
414 | if err != nil {
415 | t.FailNow()
416 | }
417 | t.Logf("journal.position=%d, journal.chunks.count=%d", journal.position, journal.chunks.count)
418 | if journal.position != 5 {
419 | t.Fail()
420 | }
421 | if journal.chunks.count != 5 {
422 | t.Fail()
423 | }
424 | if len(chunks) != 4 {
425 | t.Fail()
426 | }
427 | readAll := func(chunk ik.JournalChunk) string {
428 | reader, err := chunk.GetReader()
429 | if err != nil {
430 | t.FailNow()
431 | }
432 | bytes, err := ioutil.ReadAll(reader)
433 | if err != nil {
434 | t.FailNow()
435 | }
436 | return string(bytes)
437 | }
438 | if readAll(chunks[0]) != "test1" {
439 | t.Fail()
440 | }
441 | if readAll(chunks[1]) != "test2" {
442 | t.Fail()
443 | }
444 | if readAll(chunks[2]) != "test3" {
445 | t.Fail()
446 | }
447 | if readAll(chunks[3]) != "test4" {
448 | t.Fail()
449 | }
450 | journal.Purge()
451 | if journal.chunks.count != 5 {
452 | t.Fail()
453 | }
454 | for _, chunk := range chunks {
455 | chunk.Dispose()
456 | }
457 | t.Logf("journal.position=%d, journal.chunks.count=%d", journal.position, journal.chunks.count)
458 | journal.Purge()
459 | if journal.chunks.count != 1 {
460 | t.Fail()
461 | }
462 | if journal.chunks.first.Type != JournalFileType('b') {
463 | t.Fail()
464 | }
465 | }
466 |
--------------------------------------------------------------------------------
/journal/path_builder.go:
--------------------------------------------------------------------------------
1 | package journal
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/url"
7 | "regexp"
8 | "strconv"
9 | "strings"
10 | "time"
11 | )
12 |
13 | type JournalFileType rune
14 |
15 | const (
16 | Head = JournalFileType('b')
17 | Rest = JournalFileType('q')
18 | )
19 |
20 | type JournalPathInfo struct {
21 | Key string
22 | Type JournalFileType
23 | VariablePortion string
24 | TSuffix string
25 | Timestamp int64 // elapsed time in msec since epoch
26 | UniqueId []byte
27 | }
28 |
29 | var NilJournalPathInfo = JournalPathInfo{"", 0, "", "", 0, nil}
30 |
31 | var pathRegexp, _ = regexp.Compile(fmt.Sprintf("^(.*)[._](%c|%c)([0-9a-fA-F]{1,32})$", Head, Rest))
32 |
33 | func encodeKey(key string) string {
34 | keyLen := len(key)
35 | retval := make([]byte, keyLen*2)
36 | i := 0
37 | for j := 0; j < keyLen; j += 1 {
38 | c := key[j]
39 | if c == '-' || c == '_' || c == '.' || (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') {
40 | cap_ := cap(retval)
41 | if i >= cap_ {
42 | newRetval := make([]byte, cap_+len(key))
43 | copy(newRetval, retval)
44 | retval = newRetval
45 | }
46 | retval[i] = c
47 | i += 1
48 | } else {
49 | cap_ := cap(retval)
50 | if i+3 > cap_ {
51 | newSize := cap_
52 | for {
53 | newSize += len(key)
54 | if i+3 <= newSize {
55 | break
56 | }
57 | if newSize < cap_ {
58 | // unlikely
59 | panic("?")
60 | }
61 | }
62 | newRetval := make([]byte, newSize)
63 | copy(newRetval, retval)
64 | retval = newRetval
65 | }
66 | retval[i] = '%'
67 | retval[i+1] = "0123456789abcdef"[(c>>4)&15]
68 | retval[i+2] = "0123456789abcdef"[c&15]
69 | i += 3
70 | }
71 | }
72 | return string(retval[0:i])
73 | }
74 |
75 | func decodeKey(encoded string) (string, error) {
76 | return url.QueryUnescape(encoded)
77 | }
78 |
79 | func convertTSuffixToUniqueId(tSuffix string) ([]byte, error) {
80 | tSuffixLen := len(tSuffix)
81 | buf := make([]byte, tSuffixLen)
82 | for i := 0; i < tSuffixLen; i += 2 {
83 | ii, err := strconv.ParseUint(tSuffix[i:i+2], 16, 8)
84 | if err != nil {
85 | return nil, err
86 | }
87 | buf[i/2] = byte(ii)
88 | buf[(i+tSuffixLen)/2] = byte(ii)
89 | }
90 | return buf, nil
91 | }
92 |
93 | func convertTSuffixToUnixNano(tSuffix string) (int64, error) {
94 | t, err := strconv.ParseInt(tSuffix, 16, 64)
95 | return t >> 12, err
96 | }
97 |
98 | func IsValidJournalPathInfo(info JournalPathInfo) bool {
99 | return len(info.Key) > 0 && info.Type != 0
100 | }
101 |
102 | func BuildJournalPathWithTSuffix(key string, bq JournalFileType, tSuffix string) string {
103 | encodedKey := encodeKey(key)
104 | return fmt.Sprintf(
105 | "%s.%c%s",
106 | encodedKey,
107 | rune(bq),
108 | tSuffix,
109 | )
110 | }
111 |
112 | func BuildJournalPath(key string, bq JournalFileType, time_ time.Time, randValue int64) JournalPathInfo {
113 | timestamp := time_.UnixNano()
114 | t := (timestamp/1000)<<12 | (randValue & 0xfff)
115 | tSuffix := strconv.FormatInt(t, 16)
116 | if pad := 16 - len(tSuffix); pad > 0 {
117 | // unlikely
118 | tSuffix = strings.Repeat("0", pad) + tSuffix
119 | }
120 | uniqueId, err := convertTSuffixToUniqueId(tSuffix)
121 | if err != nil {
122 | panic("WTF? " + err.Error())
123 | } // should never happen
124 | return JournalPathInfo{
125 | Key: key,
126 | Type: bq,
127 | VariablePortion: BuildJournalPathWithTSuffix(key, bq, tSuffix),
128 | TSuffix: tSuffix,
129 | Timestamp: timestamp,
130 | UniqueId: uniqueId,
131 | }
132 | }
133 |
134 | func firstRune(s string) rune {
135 | for _, r := range s {
136 | return r
137 | }
138 | return -1 // in case the string is empty
139 | }
140 |
141 | func DecodeJournalPath(variablePortion string) (JournalPathInfo, error) {
142 | m := pathRegexp.FindStringSubmatch(variablePortion)
143 | if m == nil {
144 | return NilJournalPathInfo, errors.New("malformed path string")
145 | }
146 | key, err := decodeKey(m[1])
147 | if err != nil {
148 | return NilJournalPathInfo, errors.New("malformed path string")
149 | }
150 | uniqueId, err := convertTSuffixToUniqueId(m[3])
151 | if err != nil {
152 | return NilJournalPathInfo, errors.New("malformed path string")
153 | }
154 | timestamp, err := convertTSuffixToUnixNano(m[3])
155 | if err != nil {
156 | return NilJournalPathInfo, errors.New("malformed path string")
157 | }
158 | return JournalPathInfo{
159 | Key: key,
160 | Type: JournalFileType(firstRune(m[2])),
161 | VariablePortion: variablePortion,
162 | TSuffix: m[3],
163 | Timestamp: timestamp,
164 | UniqueId: uniqueId,
165 | }, nil
166 | }
167 |
--------------------------------------------------------------------------------
/journal/path_builder_test.go:
--------------------------------------------------------------------------------
1 | package journal
2 |
3 | import (
4 | "testing"
5 | "time"
6 | )
7 |
8 | func Test_BuildJournalPath(t *testing.T) {
9 | info := BuildJournalPath(
10 | "test",
11 | JournalFileType('b'),
12 | time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC),
13 | 0x0,
14 | )
15 | t.Logf("%+v", info)
16 | if len(info.UniqueId) != len(info.TSuffix) {
17 | t.Fail()
18 | }
19 | if info.VariablePortion != "test.b4eedd5baba000000" {
20 | t.Fail()
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/markup/html.go:
--------------------------------------------------------------------------------
1 | package markup
2 |
3 | import (
4 | "github.com/moriyoshi/ik"
5 | "html"
6 | )
7 |
8 | type HTMLRenderer struct {
9 | Out Writer
10 | }
11 |
12 | var supportedAttrs = []int{ik.Embolden, ik.Underlined}
13 |
14 | func (renderer *HTMLRenderer) tag(style int) string {
15 | if style == ik.Embolden {
16 | return "b"
17 | } else if style == ik.Underlined {
18 | return "u"
19 | } else {
20 | panic("never get here")
21 | }
22 | }
23 |
24 | func (renderer *HTMLRenderer) Render(markup *ik.Markup) {
25 | out := renderer.Out
26 | appliedAttrs := 0
27 | styleStack := make(ik.IntVector, 0)
28 | for _, chunk := range markup.Chunks {
29 | chunkAttrs := int(chunk.Attrs)
30 | removedAttrs := ^chunkAttrs & appliedAttrs
31 | for _, supportedAttr := range supportedAttrs {
32 | for removedAttrs&supportedAttr != 0 {
33 | poppedStyle := styleStack.Pop()
34 | out.WriteString("")
35 | out.WriteString(renderer.tag(poppedStyle))
36 | out.WriteString(">")
37 | appliedAttrs &= ^poppedStyle
38 | removedAttrs &= ^poppedStyle
39 | }
40 | }
41 | newAttrs := chunkAttrs & ^appliedAttrs
42 | for _, supportedAttr := range supportedAttrs {
43 | if newAttrs&supportedAttr != 0 {
44 | styleStack.Append(supportedAttr)
45 | out.WriteString("<")
46 | out.WriteString(renderer.tag(supportedAttr))
47 | out.WriteString(">")
48 | }
49 | }
50 | appliedAttrs |= newAttrs
51 | out.WriteString(html.EscapeString(chunk.Text))
52 | }
53 | for len(styleStack) > 0 {
54 | poppedStyle := styleStack.Pop()
55 | out.WriteString("")
56 | out.WriteString(renderer.tag(poppedStyle))
57 | out.WriteString(">")
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/markup/html_test.go:
--------------------------------------------------------------------------------
1 | package markup
2 |
3 | import (
4 | "bytes"
5 | "github.com/moriyoshi/ik"
6 | "testing"
7 | )
8 |
9 | func TestHTMLRenderer_Render1(t *testing.T) {
10 | out := bytes.Buffer{}
11 | renderer := &HTMLRenderer{&out}
12 | renderer.Render(&ik.Markup{
13 | []ik.MarkupChunk{
14 | {0, "test"},
15 | {ik.Embolden, "EMBOLDEN"},
16 | {ik.Underlined, "_underlined_"},
17 | },
18 | })
19 | if out.String() != "testEMBOLDEN_underlined_" {
20 | t.Fail()
21 | }
22 | }
23 |
24 | func TestHTMLRenderer_Render2(t *testing.T) {
25 | out := bytes.Buffer{}
26 | renderer := &HTMLRenderer{&out}
27 | renderer.Render(&ik.Markup{
28 | []ik.MarkupChunk{
29 | {0, "test"},
30 | {ik.Embolden, "EMBOLDEN"},
31 | {ik.Embolden | ik.Underlined, "_UNDERLINED_"},
32 | {ik.Underlined, "_underlined_"},
33 | },
34 | })
35 | if out.String() != "testEMBOLDEN_UNDERLINED__underlined_" {
36 | t.Fail()
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/markup/markup.go:
--------------------------------------------------------------------------------
1 | package markup
2 |
3 | import "github.com/moriyoshi/ik"
4 |
5 | type Writer interface {
6 | WriteString(s string) (int, error)
7 | }
8 |
9 | type MarkupRenderer interface {
10 | Render(markup *ik.Markup)
11 | }
12 |
--------------------------------------------------------------------------------
/markup/plain.go:
--------------------------------------------------------------------------------
1 | package markup
2 |
3 | import (
4 | "github.com/moriyoshi/ik"
5 | )
6 |
7 | type PlainRenderer struct {
8 | Out Writer
9 | }
10 |
11 | func (renderer *PlainRenderer) Render(markup *ik.Markup) {
12 | for _, chunk := range markup.Chunks {
13 | renderer.Out.WriteString(chunk.Text)
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/markup/term.go:
--------------------------------------------------------------------------------
1 | package markup
2 |
3 | import (
4 | "github.com/moriyoshi/ik"
5 | "strings"
6 | )
7 |
8 | type TerminalEscapeRenderer struct {
9 | Out Writer
10 | }
11 |
12 | func (renderer *TerminalEscapeRenderer) Render(markup *ik.Markup) {
13 | out := renderer.Out
14 | appliedAttrs := 0
15 | _codes := [4]string{}
16 | for _, chunk := range markup.Chunks {
17 | codes := _codes[:0]
18 | chunkAttrs := int(chunk.Attrs)
19 | removedAttrs := ^chunkAttrs & appliedAttrs
20 | if chunkAttrs&ik.White != 0 {
21 | appliedAttrs &= ^ik.White
22 | removedAttrs |= ik.White
23 | }
24 | newAttrs := chunkAttrs
25 | if removedAttrs != 0 {
26 | codes = append(codes, "0")
27 | }
28 | if newAttrs&ik.Embolden != 0 {
29 | codes = append(codes, "1")
30 | }
31 | if newAttrs&ik.Underlined != 0 {
32 | codes = append(codes, "4")
33 | }
34 | if newAttrs&ik.White != 0 {
35 | codes = append(codes, "3031323334353637"[(newAttrs&ik.White)*2:][:2])
36 | }
37 | if len(codes) > 0 {
38 | out.WriteString("\x1b[" + strings.Join(codes, ";") + "m")
39 | }
40 | appliedAttrs |= newAttrs
41 | out.WriteString(chunk.Text)
42 | }
43 | if appliedAttrs != 0 {
44 | out.WriteString("\x1b[0m")
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/parsers/regexp.go:
--------------------------------------------------------------------------------
1 | package parsers
2 |
3 | import (
4 | "errors"
5 | "github.com/moriyoshi/ik"
6 | "github.com/pbnjay/strptime"
7 | "regexp"
8 | "time"
9 | )
10 |
11 | type RegexpLineParserPlugin struct{}
12 |
13 | type RegexpLineParserFactory struct {
14 | plugin *RegexpLineParserPlugin
15 | logger ik.Logger
16 | timeParser func(value string) (time.Time, error)
17 | regex *regexp.Regexp
18 | }
19 |
20 | type RegexpLineParser struct {
21 | factory *RegexpLineParserFactory
22 | receiver func(ik.FluentRecord) error
23 | }
24 |
25 | func (parser *RegexpLineParser) Feed(line string) error {
26 | regex := parser.factory.regex
27 | g := regex.FindStringSubmatch(line)
28 | data := make(map[string]interface{})
29 | if g == nil {
30 | parser.factory.logger.Error("Unparsed line: " + line)
31 | return nil
32 | }
33 | for i, name := range regex.SubexpNames() {
34 | data[name] = g[i]
35 | }
36 | parser.receiver(ik.FluentRecord{
37 | Tag: "",
38 | Timestamp: 0,
39 | Data: data,
40 | })
41 | return nil
42 | }
43 |
44 | func (*RegexpLineParserPlugin) Name() string {
45 | return "regexp"
46 | }
47 |
48 | func (factory *RegexpLineParserFactory) New(receiver func(ik.FluentRecord) error) (ik.LineParser, error) {
49 | return &RegexpLineParser{
50 | factory: factory,
51 | receiver: receiver,
52 | }, nil
53 | }
54 |
55 | func (plugin *RegexpLineParserPlugin) OnRegistering(visitor func(name string, factoryFactory ik.LineParserFactoryFactory) error) error {
56 | return visitor("regexp", func(engine ik.Engine, config *ik.ConfigElement) (ik.LineParserFactory, error) {
57 | return plugin.New(engine, config)
58 | })
59 | }
60 |
61 | func (plugin *RegexpLineParserPlugin) newRegexpLineParserFactory(logger ik.Logger, timeParser func(value string) (time.Time, error), regex *regexp.Regexp) (*RegexpLineParserFactory, error) {
62 | return &RegexpLineParserFactory{
63 | plugin: plugin,
64 | logger: logger,
65 | timeParser: timeParser,
66 | regex: regex,
67 | }, nil
68 | }
69 |
70 | func (plugin *RegexpLineParserPlugin) New(engine ik.Engine, config *ik.ConfigElement) (ik.LineParserFactory, error) {
71 | timeFormatStr, ok := config.Attrs["time_format"]
72 | var timeParser func(value string) (time.Time, error)
73 | if ok {
74 | timeParser = func(value string) (time.Time, error) {
75 | return strptime.Parse(value, timeFormatStr)
76 | }
77 | } else {
78 | timeParser = func(value string) (time.Time, error) {
79 | // FIXME
80 | return time.Parse(time.RFC3339, value)
81 | }
82 | }
83 | regexStr, ok := config.Attrs["regexp"]
84 | if !ok {
85 | return nil, errors.New("Required attribute `regexp' not found")
86 | }
87 | regex, err := regexp.Compile(regexStr)
88 | if err != nil {
89 | return nil, err
90 | }
91 | return plugin.newRegexpLineParserFactory(engine.Logger(), timeParser, regex)
92 | }
93 |
94 | var _ = AddPlugin(&RegexpLineParserPlugin{})
95 |
--------------------------------------------------------------------------------
/parsers/registry.go:
--------------------------------------------------------------------------------
1 | package parsers
2 |
3 | import "github.com/moriyoshi/ik"
4 |
5 | var _plugins []ik.LineParserPlugin = make([]ik.LineParserPlugin, 0)
6 |
7 | func AddPlugin(plugin ik.LineParserPlugin) bool {
8 | _plugins = append(_plugins, plugin)
9 | return false
10 | }
11 |
12 | func GetPlugins() []ik.LineParserPlugin {
13 | return _plugins
14 | }
15 |
--------------------------------------------------------------------------------
/plugins/in_forward.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "github.com/moriyoshi/ik"
7 | "github.com/ugorji/go/codec"
8 | "io"
9 | "net"
10 | "reflect"
11 | "strconv"
12 | "sync/atomic"
13 | )
14 |
15 | type forwardClient struct {
16 | input *ForwardInput
17 | logger ik.Logger
18 | conn net.Conn
19 | codec *codec.MsgpackHandle
20 | enc *codec.Encoder
21 | dec *codec.Decoder
22 | }
23 |
24 | type ForwardInput struct {
25 | factory *ForwardInputFactory
26 | port ik.Port
27 | logger ik.Logger
28 | bind string
29 | listener net.Listener
30 | codec *codec.MsgpackHandle
31 | clients map[net.Conn]*forwardClient
32 | entries int64
33 | }
34 |
35 | type EntryCountTopic struct{}
36 |
37 | type ConnectionCountTopic struct{}
38 |
39 | type ForwardInputFactory struct {
40 | }
41 |
42 | func coerceInPlace(data map[string]interface{}) {
43 | for k, v := range data {
44 | switch v_ := v.(type) {
45 | case []byte:
46 | data[k] = string(v_) // XXX: byte => rune
47 | case map[string]interface{}:
48 | coerceInPlace(v_)
49 | }
50 | }
51 | }
52 |
53 | func decodeRecordSet(tag []byte, entries []interface{}) (ik.FluentRecordSet, error) {
54 | records := make([]ik.TinyFluentRecord, len(entries))
55 | for i, _entry := range entries {
56 | entry, ok := _entry.([]interface{})
57 | if !ok {
58 | return ik.FluentRecordSet{}, errors.New("Failed to decode recordSet")
59 | }
60 | timestamp, ok := entry[0].(uint64)
61 | if !ok {
62 | return ik.FluentRecordSet{}, errors.New("Failed to decode timestamp field")
63 | }
64 | data, ok := entry[1].(map[string]interface{})
65 | if !ok {
66 | return ik.FluentRecordSet{}, errors.New(fmt.Sprintf("Failed to decode data field", entry[1]))
67 | }
68 | coerceInPlace(data)
69 | records[i] = ik.TinyFluentRecord{
70 | Timestamp: timestamp,
71 | Data: data,
72 | }
73 | }
74 | return ik.FluentRecordSet{
75 | Tag: string(tag), // XXX: byte => rune
76 | Records: records,
77 | }, nil
78 | }
79 |
80 | func (c *forwardClient) decodeEntries() ([]ik.FluentRecordSet, error) {
81 | v := []interface{}{nil, nil, nil}
82 | err := c.dec.Decode(&v)
83 | if err != nil {
84 | return nil, err
85 | }
86 | tag, ok := v[0].([]byte)
87 | if !ok {
88 | return nil, errors.New("Failed to decode tag field")
89 | }
90 |
91 | var retval []ik.FluentRecordSet
92 | switch timestamp_or_entries := v[1].(type) {
93 | case uint64:
94 | timestamp := timestamp_or_entries
95 | data, ok := v[2].(map[string]interface{})
96 | if !ok {
97 | return nil, errors.New(fmt.Sprintf("Failed to decode data field (got %t)", v[2]))
98 | }
99 | coerceInPlace(data)
100 | retval = []ik.FluentRecordSet{
101 | {
102 | Tag: string(tag), // XXX: byte => rune
103 | Records: []ik.TinyFluentRecord{
104 | {
105 | Timestamp: timestamp,
106 | Data: data,
107 | },
108 | },
109 | },
110 | }
111 | case float64:
112 | timestamp := uint64(timestamp_or_entries)
113 | data, ok := v[2].(map[string]interface{})
114 | if !ok {
115 | return nil, errors.New(fmt.Sprintf("Failed to decode data field (got %t)", v[2]))
116 | }
117 | retval = []ik.FluentRecordSet{
118 | {
119 | Tag: string(tag), // XXX: byte => rune
120 | Records: []ik.TinyFluentRecord{
121 | {
122 | Timestamp: timestamp,
123 | Data: data,
124 | },
125 | },
126 | },
127 | }
128 | case []interface{}:
129 | if !ok {
130 | return nil, errors.New("Unexpected payload format")
131 | }
132 | recordSet, err := decodeRecordSet(tag, timestamp_or_entries)
133 | if err != nil {
134 | return nil, err
135 | }
136 | retval = []ik.FluentRecordSet{recordSet}
137 | case []byte:
138 | entries := make([]interface{}, 0)
139 | err := codec.NewDecoderBytes(timestamp_or_entries, c.codec).Decode(&entries)
140 | if err != nil {
141 | return nil, err
142 | }
143 | recordSet, err := decodeRecordSet(tag, entries)
144 | if err != nil {
145 | return nil, err
146 | }
147 | retval = []ik.FluentRecordSet{recordSet}
148 | default:
149 | return nil, errors.New(fmt.Sprintf("Unknown type: %t", timestamp_or_entries))
150 | }
151 | atomic.AddInt64(&c.input.entries, int64(len(retval)))
152 | return retval, nil
153 | }
154 |
155 | func handleInner(c *forwardClient) bool {
156 | recordSets, err := c.decodeEntries()
157 | defer func() {
158 | if len(recordSets) > 0 {
159 | err_ := c.input.Port().Emit(recordSets)
160 | if err_ != nil {
161 | c.logger.Error("%s", err_.Error())
162 | }
163 | }
164 | }()
165 | if err == nil {
166 | return true
167 | }
168 |
169 | err_, ok := err.(net.Error)
170 | if ok {
171 | if err_.Temporary() {
172 | c.logger.Warning("Temporary failure: %s", err_.Error())
173 | return true
174 | }
175 | }
176 | if err == io.EOF {
177 | c.logger.Info("Client %s closed the connection", c.conn.RemoteAddr().String())
178 | } else {
179 | c.logger.Error("%s", err.Error())
180 | }
181 | return false
182 | }
183 |
184 | func (c *forwardClient) handle() {
185 | for handleInner(c) {
186 | }
187 | err := c.conn.Close()
188 | if err != nil {
189 | c.logger.Warning("%s", err.Error())
190 | }
191 | c.input.markDischarged(c)
192 | }
193 |
194 | func newForwardClient(input *ForwardInput, logger ik.Logger, conn net.Conn, _codec *codec.MsgpackHandle) *forwardClient {
195 | c := &forwardClient{
196 | input: input,
197 | logger: logger,
198 | conn: conn,
199 | codec: _codec,
200 | enc: codec.NewEncoder(conn, _codec),
201 | dec: codec.NewDecoder(conn, _codec),
202 | }
203 | input.markCharged(c)
204 | return c
205 | }
206 |
207 | func (input *ForwardInput) Factory() ik.Plugin {
208 | return input.factory
209 | }
210 |
211 | func (input *ForwardInput) Port() ik.Port {
212 | return input.port
213 | }
214 |
215 | func (input *ForwardInput) Run() error {
216 | conn, err := input.listener.Accept()
217 | if err != nil {
218 | input.logger.Warning("%s", err.Error())
219 | return err
220 | }
221 | go newForwardClient(input, input.logger, conn, input.codec).handle()
222 | return ik.Continue
223 | }
224 |
225 | func (input *ForwardInput) Shutdown() error {
226 | for conn, _ := range input.clients {
227 | err := conn.Close()
228 | if err != nil {
229 | input.logger.Warning("Error during closing connection: %s", err.Error())
230 | }
231 | }
232 | return input.listener.Close()
233 | }
234 |
235 | func (input *ForwardInput) Dispose() {
236 | input.Shutdown()
237 | }
238 |
239 | func (input *ForwardInput) markCharged(c *forwardClient) {
240 | input.clients[c.conn] = c
241 | }
242 |
243 | func (input *ForwardInput) markDischarged(c *forwardClient) {
244 | delete(input.clients, c.conn)
245 | }
246 |
247 | func newForwardInput(factory *ForwardInputFactory, logger ik.Logger, engine ik.Engine, bind string, port ik.Port) (*ForwardInput, error) {
248 | _codec := codec.MsgpackHandle{}
249 | _codec.MapType = reflect.TypeOf(map[string]interface{}(nil))
250 | _codec.RawToString = false
251 | listener, err := net.Listen("tcp", bind)
252 | if err != nil {
253 | logger.Warning("%s", err.Error())
254 | return nil, err
255 | }
256 | return &ForwardInput{
257 | factory: factory,
258 | port: port,
259 | logger: logger,
260 | bind: bind,
261 | listener: listener,
262 | codec: &_codec,
263 | clients: make(map[net.Conn]*forwardClient),
264 | entries: 0,
265 | }, nil
266 | }
267 |
268 | func (factory *ForwardInputFactory) Name() string {
269 | return "forward"
270 | }
271 |
272 | func (factory *ForwardInputFactory) New(engine ik.Engine, config *ik.ConfigElement) (ik.Input, error) {
273 | listen, ok := config.Attrs["listen"]
274 | if !ok {
275 | listen = ""
276 | }
277 | netPort, ok := config.Attrs["port"]
278 | if !ok {
279 | netPort = "24224"
280 | }
281 | bind := listen + ":" + netPort
282 | return newForwardInput(factory, engine.Logger(), engine, bind, engine.DefaultPort())
283 | }
284 |
285 | func (factory *ForwardInputFactory) BindScorekeeper(scorekeeper *ik.Scorekeeper) {
286 | scorekeeper.AddTopic(ik.ScorekeeperTopic{
287 | Plugin: factory,
288 | Name: "entries",
289 | DisplayName: "Total number of entries",
290 | Description: "Total number of entries received so far",
291 | Fetcher: &EntryCountTopic{},
292 | })
293 | scorekeeper.AddTopic(ik.ScorekeeperTopic{
294 | Plugin: factory,
295 | Name: "connections",
296 | DisplayName: "Connections",
297 | Description: "Number of connections currently handled",
298 | Fetcher: &ConnectionCountTopic{},
299 | })
300 | }
301 |
302 | func (topic *EntryCountTopic) Markup(input_ ik.PluginInstance) (ik.Markup, error) {
303 | text, err := topic.PlainText(input_)
304 | if err != nil {
305 | return ik.Markup{}, err
306 | }
307 | return ik.Markup{[]ik.MarkupChunk{{Text: text}}}, nil
308 | }
309 |
310 | func (topic *EntryCountTopic) PlainText(input_ ik.PluginInstance) (string, error) {
311 | input := input_.(*ForwardInput)
312 | return strconv.FormatInt(input.entries, 10), nil
313 | }
314 |
315 | func (topic *ConnectionCountTopic) Markup(input_ ik.PluginInstance) (ik.Markup, error) {
316 | text, err := topic.PlainText(input_)
317 | if err != nil {
318 | return ik.Markup{}, err
319 | }
320 | return ik.Markup{[]ik.MarkupChunk{{Text: text}}}, nil
321 | }
322 |
323 | func (topic *ConnectionCountTopic) PlainText(input_ ik.PluginInstance) (string, error) {
324 | input := input_.(*ForwardInput)
325 | return strconv.Itoa(len(input.clients)), nil // XXX: race
326 | }
327 |
328 | var _ = AddPlugin(&ForwardInputFactory{})
329 |
--------------------------------------------------------------------------------
/plugins/in_tail.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | "bytes"
5 | "encoding/hex"
6 | "errors"
7 | "fmt"
8 | "github.com/howeyc/fsnotify"
9 | fileid "github.com/moriyoshi/go-fileid"
10 | "github.com/moriyoshi/ik"
11 | "io"
12 | "net/http"
13 | "os"
14 | "reflect"
15 | "strconv"
16 | "strings"
17 | "sync"
18 | "time"
19 | )
20 |
21 | const DefaultBacklogSize = 2048
22 |
23 | type MyBufferedReader struct {
24 | inner io.Reader
25 | b []byte
26 | r, w int
27 | position int64
28 | eofReached bool
29 | }
30 |
31 | func (b *MyBufferedReader) shift() {
32 | copy(b.b, b.b[b.r:b.w])
33 | b.w -= b.r
34 | b.r = 0
35 | }
36 |
37 | func (b *MyBufferedReader) fillUp() (bool, error) {
38 | if b.eofReached {
39 | return false, nil
40 | }
41 | if b.w == len(b.b) {
42 | return false, nil
43 | }
44 | n, err := b.inner.Read(b.b[b.w:])
45 | if err != nil {
46 | if err == io.EOF {
47 | b.eofReached = true
48 | return true, nil
49 | } else {
50 | return false, err
51 | }
52 | }
53 | b.w += n
54 | return true, nil
55 | }
56 |
57 | func (b *MyBufferedReader) Reset(position int64) error {
58 | b.position = position
59 | b.r = 0
60 | b.w = 0
61 | return nil
62 | }
63 |
64 | func (b *MyBufferedReader) Continue() {
65 | b.eofReached = false
66 | }
67 |
68 | func (b *MyBufferedReader) ReadRest() []byte {
69 | if !b.eofReached {
70 | return nil
71 | }
72 | line := b.b[b.r:b.w]
73 | b.r = b.w
74 | return line
75 | }
76 |
77 | func (b *MyBufferedReader) ReadLine() ([]byte, bool, bool, error) {
78 | for {
79 | i := bytes.IndexByte(b.b[b.r:b.w], '\n')
80 | if i >= 0 {
81 | e := b.r + i
82 | if e > 0 && b.b[e-1] == '\r' {
83 | e -= 1
84 | }
85 | line := b.b[b.r:e]
86 | b.position += int64(i + 1)
87 | b.r += i + 1
88 | return line, false, false, nil
89 | }
90 |
91 | if b.eofReached {
92 | if b.r == b.w {
93 | return b.b[b.r:b.r], false, false, io.EOF
94 | } else {
95 | return b.b[b.r:b.r], false, true, nil
96 | }
97 | }
98 |
99 | b.shift()
100 | more, err := b.fillUp()
101 | if err != nil {
102 | return nil, false, false, err
103 | }
104 |
105 | if !more {
106 | break
107 | }
108 | }
109 | {
110 | line := b.b[b.r:]
111 | b.r = 0
112 | b.w = 0
113 | b.position += int64(len(line))
114 | return line, true, false, nil
115 | }
116 | }
117 |
118 | func NewMyBufferedReader(reader io.Reader, bufSize int, position int64) *MyBufferedReader {
119 | return &MyBufferedReader{
120 | inner: reader,
121 | b: make([]byte, bufSize),
122 | r: 0,
123 | w: 0,
124 | position: position,
125 | eofReached: false,
126 | }
127 | }
128 |
129 | type TailTarget struct {
130 | path string
131 | f *os.File
132 | size int64
133 | id fileid.FileId
134 | }
135 |
136 | type TailEventHandler struct {
137 | logger ik.Logger
138 | path string
139 | rotateWait time.Duration
140 | target TailTarget
141 | pending bool
142 | expiry time.Time
143 | bf *MyBufferedReader
144 | readBufferSize int
145 | decoder func([]byte) (string, error)
146 | stateSaver func(target TailTarget, position int64) error
147 | lineReceiver func(line string) error
148 | closer func() error
149 | }
150 |
151 | func openTarget(path string) (TailTarget, error) {
152 | f, err := os.OpenFile(path, os.O_RDONLY, 0)
153 | if err != nil {
154 | if os.IsNotExist(err) {
155 | return TailTarget{
156 | path: path,
157 | f: nil,
158 | }, nil
159 | } else {
160 | return TailTarget{}, err
161 | }
162 | }
163 |
164 | info, err := f.Stat()
165 | if err != nil {
166 | return TailTarget{}, err
167 | }
168 |
169 | id, err := fileid.GetFileId(path, true)
170 | if err != nil {
171 | return TailTarget{}, err
172 | }
173 |
174 | return TailTarget{
175 | path: path,
176 | f: f,
177 | size: info.Size(),
178 | id: id,
179 | }, nil
180 | }
181 |
182 | func (target *TailTarget) UpdatedOne() (TailTarget, error) {
183 | info, err := target.f.Stat()
184 | if err != nil {
185 | return TailTarget{}, err
186 | }
187 | return TailTarget{
188 | path: target.path,
189 | f: target.f,
190 | size: info.Size(),
191 | id: target.id,
192 | }, nil
193 | }
194 |
195 | func (handler *TailEventHandler) decode(in []byte) (string, error) {
196 | if handler.decoder != nil {
197 | return handler.decoder(in)
198 | } else {
199 | return string(in), nil
200 | }
201 | }
202 |
203 | func (handler *TailEventHandler) fetch() error {
204 | for {
205 | line, ispfx, tryAgain, err := handler.bf.ReadLine()
206 | if err != nil {
207 | if err == io.EOF {
208 | handler.bf.Continue()
209 | break
210 | } else {
211 | return err
212 | }
213 | }
214 | if tryAgain {
215 | if !handler.pending {
216 | break
217 | } else {
218 | line = handler.bf.ReadRest()
219 | if line == nil {
220 | panic("WTF?")
221 | }
222 | handler.logger.Warning("the file was not terminated by line endings and the file seems to have been rotated: %s, position=%d", handler.target.path, handler.bf.position)
223 | }
224 | }
225 | if ispfx {
226 | handler.logger.Warning("line too long: %s, position=%d", handler.target.path, handler.bf.position)
227 | }
228 | stringizedLine, err := handler.decode(line)
229 | if err != nil {
230 | handler.logger.Error("failed to decode line")
231 | stringizedLine = hex.Dump(line)
232 | }
233 | err = handler.lineReceiver(stringizedLine)
234 | if err != nil {
235 | return err
236 | }
237 | }
238 | return nil
239 | }
240 |
241 | func (handler *TailEventHandler) saveState() error {
242 | return handler.stateSaver(
243 | handler.target,
244 | handler.bf.position,
245 | )
246 | }
247 |
248 | func (handler *TailEventHandler) Dispose() error {
249 | var err error
250 | if handler.target.f != nil {
251 | err = handler.target.f.Close()
252 | handler.target.f = nil
253 | }
254 | if handler.closer != nil {
255 | err_ := handler.closer()
256 | if err_ != nil {
257 | err = err_
258 | }
259 | }
260 | return err
261 | }
262 |
263 | func (handler *TailEventHandler) OnChange(now time.Time) error {
264 | var target TailTarget
265 | var err error
266 |
267 | if handler.pending {
268 | // there is pending rotation request
269 | if now.Sub(handler.expiry) >= 0 {
270 | // target was expired
271 | target, err = openTarget(handler.path)
272 | if err != nil {
273 | return err
274 | }
275 | handler.pending = false
276 | } else {
277 | target, err = (&handler.target).UpdatedOne()
278 | if err != nil {
279 | return err
280 | }
281 | }
282 | } else {
283 | target, err = openTarget(handler.path)
284 | if err != nil {
285 | return err
286 | }
287 | if handler.target.f != nil && target.f != nil {
288 | sameFile := fileid.IsSame(handler.target.id, target.id)
289 | err := target.f.Close()
290 | if err != nil {
291 | return err
292 | }
293 | if !sameFile {
294 | // file was replaced
295 | target, err = (&handler.target).UpdatedOne()
296 | if err != nil {
297 | return err
298 | }
299 | handler.logger.Notice("rotation detected: %s", target.path)
300 | handler.pending = true
301 | handler.expiry = now.Add(handler.rotateWait)
302 | } else {
303 | target.f = handler.target.f
304 | }
305 | } else if handler.target.f == nil && target.f != nil {
306 | // file was newly created
307 | handler.logger.Notice("file created: %s", target.path)
308 | } else if handler.target.f != nil && target.f == nil {
309 | // file was moved?
310 | handler.logger.Notice("file moved: %s", target.path)
311 | target, err = (&handler.target).UpdatedOne()
312 | if err != nil {
313 | return err
314 | }
315 | handler.pending = true
316 | handler.expiry = now.Add(handler.rotateWait)
317 | }
318 | }
319 |
320 | fetchNeeded := false
321 | if target.f != handler.target.f || target.size < handler.target.size {
322 | // file was replaced / moved / created / truncated
323 | newPosition := target.size
324 | if target.f != handler.target.f {
325 | if handler.target.f != nil {
326 | err = handler.target.f.Close()
327 | if err != nil {
328 | return err
329 | }
330 | }
331 | newPosition = 0
332 | }
333 | position, err := target.f.Seek(newPosition, os.SEEK_SET)
334 | if err != nil {
335 | return err
336 | }
337 | handler.bf = NewMyBufferedReader(target.f, handler.readBufferSize, position)
338 | fetchNeeded = true
339 | } else {
340 | fetchNeeded = target.size > handler.target.size
341 | }
342 | handler.target = target
343 |
344 | if fetchNeeded {
345 | err = handler.fetch()
346 | if err != nil {
347 | return err
348 | }
349 | err = handler.saveState()
350 | if err != nil {
351 | return err
352 | }
353 | }
354 | return nil
355 | }
356 |
357 | func NewTailEventHandler(
358 | logger ik.Logger,
359 | target TailTarget,
360 | position int64,
361 | rotateWait time.Duration,
362 | readBufferSize int,
363 | decoder func([]byte) (string, error),
364 | stateSaver func(target TailTarget, position int64) error,
365 | lineReceiver func(line string) error,
366 | closer func() error,
367 | ) (*TailEventHandler, error) {
368 | bf := (*MyBufferedReader)(nil)
369 | if target.f != nil {
370 | position, err := target.f.Seek(position, os.SEEK_SET)
371 | if err != nil {
372 | target.f.Close()
373 | return nil, err
374 | }
375 | bf = NewMyBufferedReader(target.f, readBufferSize, position)
376 | target.size = position
377 | } else {
378 | logger.Error("file does not exist: %s", target.path)
379 | }
380 | return &TailEventHandler{
381 | logger: logger,
382 | path: target.path,
383 | rotateWait: rotateWait,
384 | target: target,
385 | pending: false,
386 | expiry: time.Time{},
387 | bf: bf,
388 | readBufferSize: readBufferSize,
389 | decoder: decoder,
390 | stateSaver: stateSaver,
391 | lineReceiver: lineReceiver,
392 | closer: closer,
393 | }, nil
394 | }
395 |
396 | type TailFileInfo interface {
397 | GetPosition() int64
398 | SetPosition(int64)
399 | GetFileId() fileid.FileId
400 | SetFileId(fileid.FileId)
401 | Path() string
402 | IsNew() bool
403 | Save() error
404 | Duplicate() TailFileInfo
405 | Dispose() error
406 | }
407 |
408 | type tailPositionFileData struct {
409 | Path string
410 | Position int64
411 | Id fileid.FileId
412 | }
413 |
414 | type TailPositionFileEntry struct {
415 | positionFile *TailPositionFile
416 | offset int
417 | isNew bool
418 | data tailPositionFileData
419 | }
420 |
421 | type TailPositionFile struct {
422 | logger ik.Logger
423 | refcount int64
424 | path string
425 | f *os.File
426 | view []byte
427 | entries map[string]*TailPositionFileEntry
428 | controlChan chan bool
429 | mtx sync.Mutex
430 | }
431 |
432 | type concreateTailFileInfo struct {
433 | disposed bool
434 | impl *TailPositionFileEntry
435 | }
436 |
437 | func (wrapper *concreateTailFileInfo) Path() string {
438 | if wrapper.disposed {
439 | panic("already disposed")
440 | }
441 | return wrapper.impl.data.Path
442 | }
443 |
444 | func (wrapper *concreateTailFileInfo) IsNew() bool {
445 | if wrapper.disposed {
446 | panic("already disposed")
447 | }
448 | return wrapper.impl.isNew
449 | }
450 |
451 | func (wrapper *concreateTailFileInfo) GetPosition() int64 {
452 | if wrapper.disposed {
453 | panic("already disposed")
454 | }
455 | return wrapper.impl.data.Position
456 | }
457 |
458 | func (wrapper *concreateTailFileInfo) SetPosition(position int64) {
459 | if wrapper.disposed {
460 | panic("already disposed")
461 | }
462 | wrapper.impl.data.Position = position
463 | }
464 |
465 | func (wrapper *concreateTailFileInfo) GetFileId() fileid.FileId {
466 | if wrapper.disposed {
467 | panic("already disposed")
468 | }
469 | return wrapper.impl.data.Id
470 | }
471 |
472 | func (wrapper *concreateTailFileInfo) SetFileId(id fileid.FileId) {
473 | if wrapper.disposed {
474 | panic("already disposed")
475 | }
476 | wrapper.impl.data.Id = id
477 | }
478 |
479 | func (wrapper *concreateTailFileInfo) Duplicate() TailFileInfo {
480 | if wrapper.disposed {
481 | panic("already disposed")
482 | }
483 | return newConcreteTailFileInfo(wrapper.impl)
484 | }
485 |
486 | func (wrapper *concreateTailFileInfo) Save() error {
487 | if wrapper.disposed {
488 | panic("already disposed")
489 | }
490 | return wrapper.impl.save()
491 | }
492 |
493 | func (wrapper *concreateTailFileInfo) Dispose() error {
494 | if wrapper.disposed {
495 | panic("already disposed")
496 | }
497 | wrapper.disposed = true
498 | return wrapper.impl.deleteRef()
499 | }
500 |
501 | func newConcreteTailFileInfo(impl *TailPositionFileEntry) *concreateTailFileInfo {
502 | impl.addRef()
503 | return &concreateTailFileInfo{
504 | disposed: false,
505 | impl: impl,
506 | }
507 | }
508 |
509 | func appendZeroPaddedUint(b []byte, value uint64, size uintptr) []byte {
510 | fieldLen := 0
511 | switch size {
512 | case 1:
513 | fieldLen = 3
514 | case 2:
515 | fieldLen = 5
516 | case 4:
517 | fieldLen = 10
518 | case 8:
519 | fieldLen = 20
520 | default:
521 | panic("WTF?")
522 | }
523 | o := len(b)
524 | requiredCap := o + fieldLen
525 | if requiredCap > cap(b) {
526 | nb := make([]byte, requiredCap)
527 | copy(nb, b)
528 | b = nb
529 | }
530 | b = b[0:requiredCap]
531 | i := o + fieldLen
532 | for {
533 | i -= 1
534 | if i < o {
535 | break
536 | }
537 | b[i] = byte('0') + byte(value%10)
538 | value /= 10
539 | if value == 0 {
540 | break
541 | }
542 | }
543 | for {
544 | i -= 1
545 | if i < o {
546 | break
547 | }
548 | b[i] = '0'
549 | }
550 | return b
551 | }
552 |
553 | func marshalPositionFileData(data *tailPositionFileData) []byte {
554 | b := make([]byte, 0, 16)
555 | b = append(b, data.Path...)
556 | b = append(b, '\t')
557 | b = appendZeroPaddedUint(b, uint64(data.Position), 8)
558 | rv := reflect.ValueOf(data.Id)
559 | n := rv.NumField()
560 | for i := 0; i < n; i += 1 {
561 | v := rv.Field(i)
562 | b = append(b, '\t')
563 | t := v.Type()
564 | switch t.Kind() {
565 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
566 | b = appendZeroPaddedUint(b, uint64(v.Int()), t.Size())
567 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
568 | b = appendZeroPaddedUint(b, v.Uint(), t.Size())
569 | }
570 | }
571 | b = append(b, '\n')
572 | return b
573 | }
574 |
575 | func parseUint(b []byte) (uint64, int, error) {
576 | i := 0
577 | value := (uint64)(0)
578 | for i < len(b) && b[i] >= '0' && b[i] <= '9' {
579 | d := b[i] - '0'
580 | value *= 10
581 | value += uint64(d)
582 | i += 1
583 | }
584 | if i == 0 {
585 | return 0, 0, errors.New("no digits")
586 | }
587 | return value, i, nil
588 | }
589 |
590 | func unmarshalPositionFileData(retval *tailPositionFileData, blob []byte) (int, error) {
591 | i := bytes.IndexByte(blob, '\t')
592 | if i < 0 {
593 | return 0, errors.New("invalid format")
594 | }
595 | retval.Path = string(blob[0:i])
596 |
597 | i += 1
598 | if i >= len(blob) {
599 | return 0, errors.New("unexpected EOF")
600 | }
601 | {
602 | v, e, err := parseUint(blob[i:])
603 | if err != nil {
604 | return 0, err
605 | }
606 | i += e
607 | retval.Position = int64(v)
608 | }
609 |
610 | rv := reflect.ValueOf(&retval.Id).Elem()
611 | fi := 0
612 | n := rv.NumField()
613 | outer:
614 | for {
615 | switch blob[i] {
616 | case '\t':
617 | i += 1
618 | case '\n':
619 | i += 1
620 | break outer
621 | default:
622 | return 0, errors.New("unexpected character: " + string(rune(blob[i])))
623 | }
624 | if fi >= n {
625 | return 0, errors.New("too many fields")
626 | }
627 | if i >= len(blob) {
628 | return 0, errors.New("unexpected EOF")
629 | }
630 | v, e, err := parseUint(blob[i:])
631 | if err != nil {
632 | return 0, err
633 | }
634 | f := rv.Field(fi)
635 | switch f.Kind() {
636 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
637 | f.SetInt(int64(v))
638 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
639 | f.SetUint(v)
640 | }
641 | i += e
642 | fi += 1
643 | }
644 | return i, nil
645 | }
646 |
647 | func (entry *TailPositionFileEntry) save() error {
648 | entry.isNew = false
649 | return entry.positionFile.scheduleUpdate(entry)
650 | }
651 |
652 | func (entry *TailPositionFileEntry) addRef() {
653 | entry.positionFile.addRef()
654 | }
655 |
656 | func (entry *TailPositionFileEntry) deleteRef() error {
657 | return entry.positionFile.deleteRef()
658 | }
659 |
660 | func (positionFile *TailPositionFile) scheduleUpdate(entry *TailPositionFileEntry) error {
661 | blob := marshalPositionFileData(&entry.data)
662 | offset := entry.offset
663 | copy(positionFile.view[offset:offset+len(blob)], blob)
664 | positionFile.controlChan <- false
665 | return nil
666 | }
667 |
668 | func (positionFile *TailPositionFile) doUpdate() {
669 | for needsToBeStopped := range positionFile.controlChan {
670 | if needsToBeStopped {
671 | break
672 | }
673 | _, err := positionFile.f.Seek(0, os.SEEK_SET)
674 | if err != nil {
675 | positionFile.logger.Error("%s", err.Error())
676 | continue
677 | }
678 | n, err := positionFile.f.Write(positionFile.view)
679 | if err != nil {
680 | positionFile.logger.Error("%s", err.Error())
681 | continue
682 | }
683 | if n != len(positionFile.view) {
684 | positionFile.logger.Error("failed to update position file: %s", positionFile.path)
685 | continue
686 | }
687 | }
688 | }
689 |
690 | func (positionFile *TailPositionFile) Get(path string) TailFileInfo {
691 | positionFile.mtx.Lock()
692 | defer positionFile.mtx.Unlock()
693 | entry, ok := positionFile.entries[path]
694 | if !ok {
695 | offset := len(positionFile.view)
696 | entry = &TailPositionFileEntry{
697 | positionFile: positionFile,
698 | offset: offset,
699 | isNew: true,
700 | data: tailPositionFileData{
701 | Path: path,
702 | Position: 0,
703 | Id: fileid.FileId{},
704 | },
705 | }
706 | blob := marshalPositionFileData(&entry.data)
707 | newView := make([]byte, offset+len(blob))
708 | copy(newView, positionFile.view)
709 | copy(newView[offset:], blob)
710 | positionFile.view = newView
711 | positionFile.entries[path] = entry
712 | }
713 |
714 | return newConcreteTailFileInfo(entry)
715 | }
716 |
717 | func (positionFile *TailPositionFile) Dispose() error {
718 | return positionFile.deleteRef()
719 | }
720 |
721 | func readTailPositionFileEntries(positionFile *TailPositionFile, f *os.File) (map[string]*TailPositionFileEntry, error) {
722 | retval := make(map[string]*TailPositionFileEntry)
723 | _, err := f.Seek(0, os.SEEK_SET)
724 | if err != nil {
725 | return nil, err
726 | }
727 | info, err := f.Stat()
728 | if err != nil {
729 | return nil, err
730 | }
731 | blob := make([]byte, int(info.Size()))
732 | n, err := f.Read(blob)
733 | if err != nil {
734 | return nil, err
735 | }
736 | if n != len(blob) {
737 | return nil, errors.New("unexpected EOF")
738 | }
739 | consumed := 0
740 | for offset := 0; offset < len(blob); offset += consumed {
741 | entry := &TailPositionFileEntry{
742 | positionFile: positionFile,
743 | offset: offset,
744 | isNew: false,
745 | data: tailPositionFileData{},
746 | }
747 | consumed, err = unmarshalPositionFileData(&entry.data, blob[offset:])
748 | if err != nil {
749 | return nil, err
750 | }
751 | retval[entry.data.Path] = entry
752 | }
753 | positionFile.view = blob
754 | return retval, nil
755 | }
756 |
757 | func openPositionFile(logger ik.Logger, path string) (*TailPositionFile, error) {
758 | f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) // FIXME
759 | failed := true
760 | defer func() {
761 | if failed && f != nil {
762 | f.Close()
763 | }
764 | }()
765 | var entries map[string]*TailPositionFileEntry
766 | retval := &TailPositionFile{
767 | logger: logger,
768 | refcount: 1,
769 | path: path,
770 | controlChan: make(chan bool),
771 | mtx: sync.Mutex{},
772 | }
773 | if err != nil {
774 | if !os.IsExist(err) {
775 | return nil, err
776 | }
777 | f, err = os.OpenFile(path, os.O_RDWR, 0666)
778 | if err != nil {
779 | return nil, err
780 | }
781 | entries, err = readTailPositionFileEntries(retval, f)
782 | if err != nil {
783 | return nil, err
784 | }
785 | } else {
786 | entries = make(map[string]*TailPositionFileEntry)
787 | }
788 | retval.f = f
789 | retval.entries = entries
790 | failed = false
791 | go retval.doUpdate()
792 | return retval, nil
793 | }
794 |
795 | func (positionFile *TailPositionFile) save() error {
796 | _, err := positionFile.f.Seek(0, os.SEEK_SET)
797 | if err != nil {
798 | return err
799 | }
800 | l, err := positionFile.f.Write(positionFile.view)
801 | if err != nil {
802 | return err
803 | }
804 | if l != len(positionFile.view) {
805 | return errors.New("marshalled data not fully written")
806 | }
807 | return nil
808 | }
809 |
810 | func (positionFile *TailPositionFile) addRef() {
811 | positionFile.refcount += 1
812 | }
813 |
814 | func (positionFile *TailPositionFile) deleteRef() error {
815 | positionFile.refcount -= 1
816 | if positionFile.refcount == 0 {
817 | err := positionFile.f.Close()
818 | if err != nil {
819 | positionFile.refcount += 1 // undo the change
820 | return err
821 | }
822 | positionFile.f = nil
823 | positionFile.controlChan <- true
824 | close(positionFile.controlChan)
825 | } else if positionFile.refcount < 0 {
826 | panic("refcount < 0!")
827 | }
828 | return nil
829 | }
830 |
831 | type TailInputFactory struct {
832 | }
833 |
834 | type TailWatcher struct {
835 | input *TailInput
836 | synthesizedTag string
837 | positionFile *TailPositionFile
838 | tailFileInfo TailFileInfo
839 | lineParser ik.LineParser
840 | handler *TailEventHandler
841 | statWatcher *fsnotify.Watcher
842 | timer *time.Ticker
843 | controlChan chan bool
844 | }
845 |
846 | func (watcher *TailWatcher) cleanup() {
847 | if watcher.statWatcher != nil {
848 | watcher.statWatcher.Close()
849 | watcher.statWatcher = nil
850 | }
851 | if watcher.handler != nil {
852 | watcher.handler.Dispose()
853 | watcher.handler = nil
854 | }
855 | if watcher.timer != nil {
856 | watcher.timer.Stop()
857 | watcher.timer = nil
858 | }
859 | }
860 |
861 | func (watcher *TailWatcher) Run() error {
862 | for {
863 | select {
864 | case err := <-watcher.statWatcher.Error:
865 | watcher.input.logger.Error("%s", err.Error())
866 | case <-watcher.statWatcher.Event:
867 | now := time.Now()
868 | err := watcher.handler.OnChange(now)
869 | if err != nil {
870 | watcher.input.logger.Error("%s", err.Error())
871 | return err
872 | }
873 | return ik.Continue
874 | case now := <-watcher.timer.C:
875 | err := watcher.handler.OnChange(now)
876 | if err != nil {
877 | watcher.input.logger.Error("%s", err.Error())
878 | return err
879 | }
880 | return ik.Continue
881 | case needsToBeStopped := <-watcher.controlChan:
882 | if needsToBeStopped {
883 | break
884 | }
885 | now := time.Now()
886 | err := watcher.handler.OnChange(now)
887 | if err != nil {
888 | watcher.input.logger.Error("%s", err.Error())
889 | return err
890 | }
891 | return ik.Continue
892 | }
893 | }
894 | watcher.cleanup()
895 | return nil
896 | }
897 |
898 | func (watcher *TailWatcher) Shutdown() error {
899 | watcher.controlChan <- true
900 | return nil
901 | }
902 |
903 | func (watcher *TailWatcher) parseLine(line string) error {
904 | return watcher.lineParser.Feed(line)
905 | }
906 |
907 | func buildTagFromPath(path string) string {
908 | b := make([]byte, 0, len(path))
909 | state := 0
910 | for i := 0; i < len(path); i += 1 {
911 | c := path[i]
912 | if c == '/' || c == '/' {
913 | state = 1
914 | } else {
915 | if state == 1 {
916 | b = append(b, '.')
917 | state = 0
918 | }
919 | b = append(b, c)
920 | }
921 | }
922 | if state == 1 {
923 | b = append(b, '.')
924 | }
925 | if b[0] == '.' {
926 | b = b[1:]
927 | }
928 | return string(b)
929 | }
930 |
931 | func (input *TailInput) openTailWatcher(path string) (*TailWatcher, error) {
932 | _, ok := input.watchers[path]
933 | if ok {
934 | return nil, errors.New(fmt.Sprintf("watcher for path %s already exists", path))
935 | }
936 | tailFileInfo := input.positionFile.Get(path)
937 | defer tailFileInfo.Dispose()
938 | target, err := openTarget(path)
939 | if err != nil {
940 | if os.IsNotExist(err) {
941 | target = TailTarget{path, nil, 0, fileid.FileId{}}
942 | } else {
943 | return nil, err
944 | }
945 | }
946 | if tailFileInfo.IsNew() {
947 | tailFileInfo.SetFileId(target.id)
948 | tailFileInfo.SetPosition(target.size)
949 | err = tailFileInfo.Save()
950 | if err != nil {
951 | return nil, err
952 | }
953 | }
954 | watcher := &TailWatcher{
955 | input: input,
956 | synthesizedTag: buildTagFromPath(path),
957 | }
958 | handler, err := NewTailEventHandler(
959 | input.logger,
960 | target,
961 | tailFileInfo.GetPosition(),
962 | input.rotateWait,
963 | input.readBufferSize,
964 | nil, // decoder
965 | func(target TailTarget, position int64) error {
966 | tailFileInfo := watcher.tailFileInfo
967 | tailFileInfo.SetFileId(target.id)
968 | tailFileInfo.SetPosition(position)
969 | return tailFileInfo.Save()
970 | },
971 | func(line string) error {
972 | return watcher.parseLine(line)
973 | },
974 | func() error {
975 | return watcher.tailFileInfo.Dispose()
976 | },
977 | )
978 | if err != nil {
979 | return nil, err
980 | }
981 | lineParser, err := input.lineParserFactory.New(func(record ik.FluentRecord) error {
982 | record.Tag = input.tagPrefix
983 | input.pump.EmitOne(record)
984 | return nil
985 | })
986 | if err != nil {
987 | handler.Dispose()
988 | return nil, err
989 | }
990 | newStatWatcher, err := fsnotify.NewWatcher()
991 | if err != nil {
992 | handler.Dispose()
993 | return nil, err
994 | }
995 | err = newStatWatcher.Watch(path)
996 | if err != nil {
997 | newStatWatcher.Close()
998 | handler.Dispose()
999 | return nil, err
1000 | }
1001 |
1002 | watcher.input = input
1003 | watcher.tailFileInfo = tailFileInfo.Duplicate()
1004 | watcher.handler = handler
1005 | watcher.statWatcher = newStatWatcher
1006 | watcher.lineParser = lineParser
1007 | watcher.timer = time.NewTicker(time.Duration(1000000000)) // XXX
1008 | watcher.controlChan = make(chan bool, 1)
1009 |
1010 | err = input.engine.Spawn(watcher)
1011 | if err != nil {
1012 | watcher.cleanup()
1013 | }
1014 |
1015 | watcher.controlChan <- false
1016 | return watcher, nil
1017 | }
1018 |
1019 | type TailInput struct {
1020 | factory *TailInputFactory
1021 | engine ik.Engine
1022 | port ik.Port
1023 | logger ik.Logger
1024 | pathSet *PathSet
1025 | tagPrefix string
1026 | tagSuffix string
1027 | rotateWait time.Duration
1028 | readFromHead bool
1029 | refreshInterval time.Duration
1030 | readBufferSize int
1031 | lineParserFactory ik.LineParserFactory
1032 | positionFile *TailPositionFile
1033 | pump *ik.RecordPump
1034 | watchers map[string]*TailWatcher
1035 | refreshTimer *time.Ticker
1036 | controlChan chan struct{}
1037 | }
1038 |
1039 | func (input *TailInput) Factory() ik.Plugin {
1040 | return input.factory
1041 | }
1042 |
1043 | func (input *TailInput) Port() ik.Port {
1044 | return input.port
1045 | }
1046 |
1047 | func (input *TailInput) Run() error {
1048 | for {
1049 | select {
1050 | case <-input.refreshTimer.C:
1051 | err := input.refreshWatchers()
1052 | if err != nil {
1053 | return err
1054 | }
1055 | return ik.Continue
1056 | case <-input.controlChan:
1057 | return nil
1058 | }
1059 | }
1060 | return input.cleanup()
1061 | }
1062 |
1063 | func (input *TailInput) cleanup() error {
1064 | input.refreshTimer.Stop()
1065 | errors := []error{
1066 | input.pump.Shutdown(),
1067 | input.positionFile.Dispose(),
1068 | }
1069 | err := (error)(nil)
1070 | for _, err_ := range errors {
1071 | input.logger.Error("%s", err_.Error())
1072 | if err == nil && err_ != nil {
1073 | err = err_
1074 | }
1075 | }
1076 | return err
1077 | }
1078 |
1079 | func (input *TailInput) Shutdown() error {
1080 | input.controlChan <- struct{}{}
1081 | return nil
1082 | }
1083 |
1084 | func (input *TailInput) Dispose() error {
1085 | err := input.Shutdown()
1086 | return err
1087 | }
1088 |
1089 | func (input *TailInput) refreshWatchers() error {
1090 | deleted := make([]*TailWatcher, 0, 8)
1091 | added := make([]*TailWatcher, 0, 8)
1092 | failed := true
1093 | defer func() {
1094 | if failed {
1095 | for _, watcher := range added {
1096 | watcher.Shutdown()
1097 | }
1098 | }
1099 | }()
1100 | newPaths, err := input.pathSet.Expand()
1101 | if err != nil {
1102 | return err
1103 | }
1104 | for path, watcher := range input.watchers {
1105 | _, ok := newPaths[path]
1106 | if !ok {
1107 | deleted = append(deleted, watcher)
1108 | }
1109 | }
1110 | for path, _ := range newPaths {
1111 | _, ok := input.watchers[path]
1112 | if !ok {
1113 | watcher, err := input.openTailWatcher(path)
1114 | if err != nil {
1115 | return err
1116 | }
1117 | added = append(added, watcher)
1118 | }
1119 | }
1120 | input.logger.Info("Refreshing watchers...")
1121 | for _, watcher := range deleted {
1122 | delete(input.watchers, watcher.tailFileInfo.Path())
1123 | watcher.Shutdown()
1124 | input.logger.Info("Deleted watcher for %s", watcher.tailFileInfo.Path())
1125 | }
1126 | failed = false
1127 | for _, watcher := range added {
1128 | input.watchers[watcher.tailFileInfo.Path()] = watcher
1129 | input.logger.Info("Added watcher for %s", watcher.tailFileInfo.Path())
1130 | }
1131 | return nil
1132 | }
1133 |
1134 | func newTailInput(
1135 | factory *TailInputFactory,
1136 | logger ik.Logger,
1137 | engine ik.Engine,
1138 | port ik.Port,
1139 | pathSet *PathSet,
1140 | lineParserFactory ik.LineParserFactory,
1141 | tagPrefix string,
1142 | tagSuffix string,
1143 | rotateWait time.Duration,
1144 | positionFilePath string,
1145 | readFromHead bool,
1146 | refreshInterval time.Duration,
1147 | readBufferSize int,
1148 | ) (*TailInput, error) {
1149 | failed := true
1150 | positionFile, err := openPositionFile(logger, positionFilePath)
1151 | if err != nil {
1152 | return nil, err
1153 | }
1154 | defer func() {
1155 | if failed {
1156 | positionFile.Dispose()
1157 | }
1158 | }()
1159 | pump := ik.NewRecordPump(port, DefaultBacklogSize)
1160 | defer func() {
1161 | if failed {
1162 | pump.Shutdown()
1163 | }
1164 | }()
1165 | input := &TailInput{
1166 | factory: factory,
1167 | engine: engine,
1168 | logger: logger,
1169 | port: port,
1170 | pathSet: pathSet,
1171 | tagPrefix: tagPrefix,
1172 | tagSuffix: tagSuffix,
1173 | rotateWait: rotateWait,
1174 | readFromHead: readFromHead,
1175 | refreshInterval: refreshInterval,
1176 | readBufferSize: readBufferSize,
1177 | lineParserFactory: lineParserFactory,
1178 | pump: pump,
1179 | positionFile: positionFile,
1180 | watchers: make(map[string]*TailWatcher),
1181 | controlChan: make(chan struct{}, 1),
1182 | }
1183 | err = engine.Spawn(pump)
1184 | if err != nil {
1185 | return nil, err
1186 | }
1187 | err = input.refreshWatchers()
1188 | if err != nil {
1189 | return nil, err
1190 | }
1191 | failed = false
1192 | input.refreshTimer = time.NewTicker(refreshInterval)
1193 | return input, nil
1194 | }
1195 |
1196 | func (factory *TailInputFactory) BindScorekeeper(*ik.Scorekeeper) {}
1197 |
1198 | func (factory *TailInputFactory) Name() string {
1199 | return "tail"
1200 | }
1201 |
1202 | type PathSet struct {
1203 | patterns []string
1204 | fs http.FileSystem
1205 | }
1206 |
1207 | func (pathSet *PathSet) Expand() (map[string]struct{}, error) {
1208 | s := make(map[string]struct{})
1209 | for _, pattern := range pathSet.patterns {
1210 | paths, err := ik.Glob(pathSet.fs, pattern)
1211 | if err != nil {
1212 | return nil, err
1213 | }
1214 | for _, path := range paths {
1215 | s[path] = struct{}{}
1216 | }
1217 | }
1218 | return s, nil
1219 | }
1220 |
1221 | func newPathSet(fs http.FileSystem, patterns []string) *PathSet {
1222 | return &PathSet{
1223 | patterns: patterns,
1224 | fs: fs,
1225 | }
1226 | }
1227 |
1228 | func splitAndStrip(s string) []string {
1229 | comps := strings.Split(s, ",")
1230 | retval := make([]string, len(comps))
1231 | for i, c := range comps {
1232 | retval[i] = strings.Trim(c, " \t")
1233 | }
1234 | return retval
1235 | }
1236 |
1237 | func (factory *TailInputFactory) New(engine ik.Engine, config *ik.ConfigElement) (ik.Input, error) {
1238 | tagPrefix := ""
1239 | tagSuffix := ""
1240 | rotateWait, _ := time.ParseDuration("5s")
1241 | positionFilePath := ""
1242 | readFromHead := false
1243 | refreshInterval, _ := time.ParseDuration("1m")
1244 | readBufferSize := 4096
1245 |
1246 | pathStr, ok := config.Attrs["path"]
1247 | if !ok {
1248 | return nil, errors.New("required attribute `path' is not specified")
1249 | }
1250 | pathSet := newPathSet(engine.Opener().FileSystem(), splitAndStrip(pathStr))
1251 | tag, ok := config.Attrs["tag"]
1252 | if !ok {
1253 | return nil, errors.New("required attribute `tag' is not specified")
1254 | }
1255 | i := strings.IndexRune(tag, rune('*'))
1256 | if i >= 0 {
1257 | tagPrefix = tag[:i]
1258 | tagSuffix = tag[i+1:]
1259 | } else {
1260 | tagPrefix = tag
1261 | }
1262 | rotateWaitStr, ok := config.Attrs["rotate_wait"]
1263 | if ok {
1264 | var err error
1265 | rotateWait, err = time.ParseDuration(rotateWaitStr)
1266 | if err != nil {
1267 | return nil, err
1268 | }
1269 | }
1270 | positionFilePath, ok = config.Attrs["pos_file"]
1271 | if !ok {
1272 | return nil, errors.New("requires attribute `pos_file' is not specified")
1273 | }
1274 | readFromHeadStr, ok := config.Attrs["read_from_head"]
1275 | if ok {
1276 | var err error
1277 | readFromHead, err = strconv.ParseBool(readFromHeadStr)
1278 | if err != nil {
1279 | return nil, err
1280 | }
1281 | }
1282 | refreshIntervalStr, ok := config.Attrs["refresh_interval"]
1283 | if ok {
1284 | var err error
1285 | refreshInterval, err = time.ParseDuration(refreshIntervalStr)
1286 | if err != nil {
1287 | return nil, err
1288 | }
1289 | }
1290 | readBufferSizeStr, ok := config.Attrs["read_buffer_size"]
1291 | if ok {
1292 | var err error
1293 | readBufferSize, err = strconv.Atoi(readBufferSizeStr)
1294 | if err != nil {
1295 | return nil, err
1296 | }
1297 | }
1298 |
1299 | format, ok := config.Attrs["format"]
1300 | if !ok {
1301 | return nil, errors.New("requires attribute `format' is not specified")
1302 | }
1303 |
1304 | lineParserFactoryFactory := engine.LineParserPluginRegistry().LookupLineParserFactoryFactory(format)
1305 | if lineParserFactoryFactory == nil {
1306 | return nil, errors.New(fmt.Sprintf("Format `%s' is not supported", format))
1307 | }
1308 | lineParserFactory, err := lineParserFactoryFactory(engine, config)
1309 | if err != nil {
1310 | return nil, err
1311 | }
1312 |
1313 | return newTailInput(
1314 | factory,
1315 | engine.Logger(),
1316 | engine,
1317 | engine.DefaultPort(),
1318 | pathSet,
1319 | lineParserFactory,
1320 | tagPrefix,
1321 | tagSuffix,
1322 | rotateWait,
1323 | positionFilePath,
1324 | readFromHead,
1325 | refreshInterval,
1326 | readBufferSize,
1327 | )
1328 | }
1329 |
1330 | var _ = AddPlugin(&TailInputFactory{})
1331 |
--------------------------------------------------------------------------------
/plugins/in_tail_test.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | fileid "github.com/moriyoshi/go-fileid"
5 | "io"
6 | "io/ioutil"
7 | "strings"
8 | "testing"
9 | )
10 |
11 | func Test_MyBufferedReader(t *testing.T) {
12 | inner := strings.NewReader("aaaaaaaaaaaa\nbbbb\nc")
13 | target := NewMyBufferedReader(inner, 16, 0)
14 | line, ispfx, tryAgain, err := target.ReadLine()
15 | if err != nil {
16 | t.FailNow()
17 | }
18 | if ispfx {
19 | t.Log(14)
20 | t.Fail()
21 | }
22 | if tryAgain {
23 | t.Log(15)
24 | t.Fail()
25 | }
26 | if target.position != 13 {
27 | t.Log(16)
28 | t.Fail()
29 | }
30 | t.Log(string(line))
31 | if string(line) != "aaaaaaaaaaaa" {
32 | t.Log(18)
33 | t.Fail()
34 | }
35 |
36 | line, ispfx, tryAgain, err = target.ReadLine()
37 | if err != nil {
38 | t.FailNow()
39 | }
40 | if ispfx {
41 | t.Log(22)
42 | t.Fail()
43 | }
44 | if tryAgain {
45 | t.Log(23)
46 | t.Fail()
47 | }
48 | if target.position != 18 {
49 | t.Log(24)
50 | t.Fail()
51 | }
52 | t.Log(string(line))
53 | if string(line) != "bbbb" {
54 | t.Log(26)
55 | t.Fail()
56 | }
57 |
58 | line, ispfx, tryAgain, err = target.ReadLine()
59 | if err != nil {
60 | t.FailNow()
61 | }
62 | if ispfx {
63 | t.Log(30)
64 | t.Fail()
65 | }
66 | if !tryAgain {
67 | t.Log(31)
68 | t.Fail()
69 | }
70 | if target.position != 18 {
71 | t.Log(32)
72 | t.Fail()
73 | }
74 | if len(line) != 0 {
75 | t.Log(33)
76 | t.Fail()
77 | }
78 |
79 | line, ispfx, tryAgain, err = target.ReadLine()
80 | if err != nil {
81 | t.FailNow()
82 | }
83 | if ispfx {
84 | t.Log(30)
85 | t.Fail()
86 | }
87 | if !tryAgain {
88 | t.Log(31)
89 | t.Fail()
90 | }
91 | if target.position != 18 {
92 | t.Log(32)
93 | t.Fail()
94 | }
95 | if len(line) != 0 {
96 | t.Log(33)
97 | t.Fail()
98 | }
99 | }
100 |
101 | func Test_MyBufferedReader_EOF(t *testing.T) {
102 | inner := strings.NewReader("aaaaaaaaaaaa\nbbbb\nc\n")
103 | target := NewMyBufferedReader(inner, 16, 0)
104 | line, ispfx, tryAgain, err := target.ReadLine()
105 | if err != nil {
106 | t.FailNow()
107 | }
108 | if ispfx {
109 | t.Log(14)
110 | t.Fail()
111 | }
112 | if tryAgain {
113 | t.Log(15)
114 | t.Fail()
115 | }
116 | if target.position != 13 {
117 | t.Log(16)
118 | t.Fail()
119 | }
120 | t.Log(string(line))
121 | if string(line) != "aaaaaaaaaaaa" {
122 | t.Log(18)
123 | t.Fail()
124 | }
125 |
126 | line, ispfx, tryAgain, err = target.ReadLine()
127 | if err != nil {
128 | t.FailNow()
129 | }
130 | if ispfx {
131 | t.Log(22)
132 | t.Fail()
133 | }
134 | if tryAgain {
135 | t.Log(23)
136 | t.Fail()
137 | }
138 | if target.position != 18 {
139 | t.Log(24)
140 | t.Fail()
141 | }
142 | t.Log(string(line))
143 | if string(line) != "bbbb" {
144 | t.Log(26)
145 | t.Fail()
146 | }
147 |
148 | line, ispfx, tryAgain, err = target.ReadLine()
149 | if err != nil {
150 | t.FailNow()
151 | }
152 | if ispfx {
153 | t.Log(30)
154 | t.Fail()
155 | }
156 | if tryAgain {
157 | t.Log(31)
158 | t.Fail()
159 | }
160 | if target.position != 20 {
161 | t.Log(32)
162 | t.Fail()
163 | }
164 | t.Log(string(line))
165 | if string(line) != "c" {
166 | t.Log(33)
167 | t.Fail()
168 | }
169 |
170 | line, ispfx, tryAgain, err = target.ReadLine()
171 | if err != io.EOF {
172 | t.FailNow()
173 | }
174 | if ispfx {
175 | t.Log(30)
176 | t.Fail()
177 | }
178 | if tryAgain {
179 | t.Log(31)
180 | t.Fail()
181 | }
182 | if target.position != 20 {
183 | t.Log(32)
184 | t.Fail()
185 | }
186 | t.Log(string(line))
187 | if string(line) != "" {
188 | t.Log(33)
189 | t.Fail()
190 | }
191 | }
192 |
193 | func Test_MyBufferedReader_crlf(t *testing.T) {
194 | inner := strings.NewReader("aaaaaaaaaaaa\r\nbbbb\r\nc")
195 | target := NewMyBufferedReader(inner, 16, 0)
196 | line, ispfx, tryAgain, err := target.ReadLine()
197 | if err != nil {
198 | t.FailNow()
199 | }
200 | if ispfx {
201 | t.Log(48)
202 | t.Fail()
203 | }
204 | if tryAgain {
205 | t.Log(49)
206 | t.Fail()
207 | }
208 | if target.position != 14 {
209 | t.Log(50)
210 | t.Fail()
211 | }
212 | t.Log(string(line))
213 | if string(line) != "aaaaaaaaaaaa" {
214 | t.Log(52)
215 | t.Fail()
216 | }
217 |
218 | line, ispfx, tryAgain, err = target.ReadLine()
219 | if err != nil {
220 | t.FailNow()
221 | }
222 | if ispfx {
223 | t.Log(56)
224 | t.Fail()
225 | }
226 | if tryAgain {
227 | t.Log(57)
228 | t.Fail()
229 | }
230 | if target.position != 20 {
231 | t.Log(58)
232 | t.Fail()
233 | }
234 | t.Log(string(line))
235 | if string(line) != "bbbb" {
236 | t.Log(60)
237 | t.Fail()
238 | }
239 |
240 | line, ispfx, tryAgain, err = target.ReadLine()
241 | if err != nil {
242 | t.FailNow()
243 | }
244 | if ispfx {
245 | t.Log(64)
246 | t.Fail()
247 | }
248 | if !tryAgain {
249 | t.Log(65)
250 | t.Fail()
251 | }
252 | if target.position != 20 {
253 | t.Log(66)
254 | t.Fail()
255 | }
256 | if len(line) != 0 {
257 | t.Log(67)
258 | t.Fail()
259 | }
260 |
261 | line, ispfx, tryAgain, err = target.ReadLine()
262 | if err != nil {
263 | t.FailNow()
264 | }
265 | if ispfx {
266 | t.Log(64)
267 | t.Fail()
268 | }
269 | if !tryAgain {
270 | t.Log(65)
271 | t.Fail()
272 | }
273 | if target.position != 20 {
274 | t.Log(66)
275 | t.Fail()
276 | }
277 | if len(line) != 0 {
278 | t.Log(67)
279 | t.Fail()
280 | }
281 | }
282 |
283 | func Test_MyBufferedReader_pfx(t *testing.T) {
284 | inner := strings.NewReader("aaaaaaaa\nbbbb")
285 | target := NewMyBufferedReader(inner, 8, 0)
286 | line, ispfx, tryAgain, err := target.ReadLine()
287 | if err != nil {
288 | t.FailNow()
289 | }
290 | if !ispfx {
291 | t.Log(82)
292 | t.Fail()
293 | }
294 | if tryAgain {
295 | t.Log(83)
296 | t.Fail()
297 | }
298 | if target.position != 8 {
299 | t.Log(84)
300 | t.Fail()
301 | }
302 | t.Log(string(line))
303 | if string(line) != "aaaaaaaa" {
304 | t.Log(86)
305 | t.Fail()
306 | }
307 |
308 | line, ispfx, tryAgain, err = target.ReadLine()
309 | if err != nil {
310 | t.FailNow()
311 | }
312 | if ispfx {
313 | t.Log(90)
314 | t.Fail()
315 | }
316 | if tryAgain {
317 | t.Log(91)
318 | t.Fail()
319 | }
320 | if target.position != 9 {
321 | t.Log(92)
322 | t.Fail()
323 | }
324 | t.Log(string(line))
325 | if string(line) != "" {
326 | t.Log(94)
327 | t.Fail()
328 | }
329 |
330 | line, ispfx, tryAgain, err = target.ReadLine()
331 | if err != nil {
332 | t.FailNow()
333 | }
334 | if ispfx {
335 | t.Log(98)
336 | t.Fail()
337 | }
338 | if !tryAgain {
339 | t.Log(99)
340 | t.Fail()
341 | }
342 | if target.position != 9 {
343 | t.Log(100)
344 | t.Fail()
345 | }
346 | if len(line) != 0 {
347 | t.Log(101)
348 | t.Fail()
349 | }
350 |
351 | line, ispfx, tryAgain, err = target.ReadLine()
352 | if err != nil {
353 | t.FailNow()
354 | }
355 | if ispfx {
356 | t.Log(98)
357 | t.Fail()
358 | }
359 | if !tryAgain {
360 | t.Log(99)
361 | t.Fail()
362 | }
363 | if target.position != 9 {
364 | t.Log(100)
365 | t.Fail()
366 | }
367 | if len(line) != 0 {
368 | t.Log(101)
369 | t.Fail()
370 | }
371 | }
372 |
373 | func Test_marshalledSizeOfPositionFileData(t *testing.T) {
374 | tempFile, err := ioutil.TempFile("", "in_tail")
375 | if err != nil {
376 | t.FailNow()
377 | }
378 | defer tempFile.Close()
379 | id, err := fileid.GetFileId(tempFile.Name(), true)
380 | if err != nil {
381 | t.FailNow()
382 | }
383 | blob := marshalPositionFileData(&tailPositionFileData{
384 | Path: tempFile.Name(),
385 | Position: 1000,
386 | Id: id,
387 | })
388 | t.Log(string(blob))
389 | x := tailPositionFileData{}
390 | consumed, err := unmarshalPositionFileData(&x, blob)
391 | if err != nil {
392 | t.Log(err.Error())
393 | t.FailNow()
394 | }
395 | t.Logf("%d %d", consumed, len(blob))
396 | if consumed != len(blob) {
397 | t.Fail()
398 | }
399 | if x.Position != 1000 {
400 | t.Fail()
401 | }
402 | if !fileid.IsSame(x.Id, id) {
403 | t.Fail()
404 | }
405 | }
406 |
407 | func Test_buildTagFromPath(t *testing.T) {
408 | var result string
409 | result = buildTagFromPath("a/b/c")
410 | t.Log(result)
411 | if result != "a.b.c" {
412 | t.Fail()
413 | }
414 | result = buildTagFromPath("a//b/c")
415 | t.Log(result)
416 | if result != "a.b.c" {
417 | t.Fail()
418 | }
419 | result = buildTagFromPath("/a//b/c")
420 | t.Log(result)
421 | if result != "a.b.c" {
422 | t.Fail()
423 | }
424 | }
425 |
--------------------------------------------------------------------------------
/plugins/out_file.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | "compress/gzip"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | strftime "github.com/jehiah/go-strftime"
9 | "github.com/moriyoshi/ik"
10 | jnl "github.com/moriyoshi/ik/journal"
11 | "io"
12 | "math/rand"
13 | "os"
14 | "path"
15 | "strconv"
16 | "strings"
17 | "time"
18 | )
19 |
20 | type FileOutput struct {
21 | factory *FileOutputFactory
22 | logger ik.Logger
23 | pathPrefix string
24 | pathSuffix string
25 | symlinkPath string
26 | permission os.FileMode
27 | compressionFormat int
28 | journalGroup ik.JournalGroup
29 | slicer *ik.Slicer
30 | timeFormat string
31 | timeSliceFormat string
32 | location *time.Location
33 | c chan []ik.FluentRecordSet
34 | cancel chan bool
35 | disableDraining bool
36 | }
37 |
38 | type FileOutputPacker struct {
39 | output *FileOutput
40 | }
41 |
42 | type FileOutputFactory struct {
43 | }
44 |
45 | const (
46 | compressionNone = 0
47 | compressionGzip = 1
48 | )
49 |
50 | func (packer *FileOutputPacker) Pack(record ik.FluentRecord) ([]byte, error) {
51 | formattedData, err := packer.output.formatData(record.Data)
52 | if err != nil {
53 | return nil, err
54 | }
55 | return ([]byte)(fmt.Sprintf(
56 | "%s\t%s\t%s\n",
57 | packer.output.formatTime(record.Timestamp),
58 | record.Tag,
59 | formattedData,
60 | )), nil
61 | }
62 |
63 | func (output *FileOutput) formatTime(timestamp uint64) string {
64 | timestamp_ := time.Unix(int64(timestamp), 0)
65 | if output.timeFormat == "" {
66 | return timestamp_.Format(time.RFC3339)
67 | } else {
68 | return strftime.Format(output.timeFormat, timestamp_)
69 | }
70 | }
71 |
72 | func (output *FileOutput) formatData(data map[string]interface{}) (string, error) {
73 | b, err := json.Marshal(data)
74 | if err != nil {
75 | return "", err
76 | }
77 | return string(b), nil // XXX: byte => rune
78 | }
79 |
80 | func (output *FileOutput) Emit(recordSets []ik.FluentRecordSet) error {
81 | output.c <- recordSets
82 | return nil
83 | }
84 |
85 | func (output *FileOutput) Factory() ik.Plugin {
86 | return output.factory
87 | }
88 |
89 | func (output *FileOutput) Run() error {
90 | select {
91 | case <-output.cancel:
92 | return nil
93 | case recordSets := <-output.c:
94 | err := output.slicer.Emit(recordSets)
95 | if err != nil {
96 | return err
97 | }
98 | }
99 | return ik.Continue
100 | }
101 |
102 | func (output *FileOutput) Shutdown() error {
103 | output.cancel <- true
104 | return output.journalGroup.Dispose()
105 | }
106 |
107 | func (output *FileOutput) Dispose() {
108 | output.Shutdown()
109 | }
110 |
111 | func buildNextPathName(key string, pathPrefix string, pathSuffix string, suffix string) (string, error) {
112 | i := 0
113 | var path_ string
114 | for {
115 | path_ = fmt.Sprintf(
116 | "%s%s_%d%s%s",
117 | pathPrefix,
118 | key,
119 | i,
120 | pathSuffix,
121 | suffix,
122 | )
123 | _, err := os.Stat(path_)
124 | if err != nil {
125 | if os.IsNotExist(err) {
126 | err = nil
127 | break
128 | } else {
129 | return "", err
130 | }
131 | }
132 | i += 1
133 | }
134 | dir := path.Dir(path_)
135 | err := os.MkdirAll(dir, os.FileMode(os.ModePerm))
136 | if err != nil {
137 | return "", err
138 | }
139 | return path_, nil
140 | }
141 |
142 | func (output *FileOutput) flush(key string, chunk ik.JournalChunk) error {
143 | suffix := ""
144 | if output.compressionFormat == compressionGzip {
145 | suffix = ".gz"
146 | }
147 | outPath, err := buildNextPathName(
148 | key,
149 | output.pathPrefix,
150 | output.pathSuffix,
151 | suffix,
152 | )
153 | if err != nil {
154 | return err
155 | }
156 | var writer io.WriteCloser
157 | writer, err = os.OpenFile(outPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, output.permission)
158 | if err != nil {
159 | return err
160 | }
161 | defer writer.Close()
162 |
163 | if output.compressionFormat == compressionGzip {
164 | writer = gzip.NewWriter(writer)
165 | defer writer.Close()
166 | }
167 |
168 | reader, err := chunk.GetReader()
169 | closer, _ := reader.(io.Closer)
170 | if closer != nil {
171 | defer closer.Close()
172 | }
173 | if err != nil {
174 | return err
175 | }
176 | _, err = io.Copy(writer, reader)
177 | if err != nil {
178 | return err
179 | }
180 | return nil
181 | }
182 |
183 | func (output *FileOutput) attachListeners(journal ik.Journal) {
184 | if output.symlinkPath != "" {
185 | journal.AddNewChunkListener(func(chunk ik.JournalChunk) error {
186 | defer chunk.Dispose()
187 | wrapper, ok := chunk.(*jnl.FileJournalChunkWrapper)
188 | if !ok {
189 | return nil
190 | }
191 | err := os.Remove(output.symlinkPath)
192 | if err != nil && !os.IsNotExist(err) {
193 | return err
194 | } else {
195 | err = os.Symlink(output.symlinkPath, wrapper.Path())
196 | }
197 | if err != nil {
198 | output.logger.Error("Failed to create symbolic link %s", output.symlinkPath)
199 | }
200 | return err
201 | })
202 | }
203 | journal.AddFlushListener(func(chunk ik.JournalChunk) error {
204 | defer chunk.Dispose()
205 | chunk.TakeOwnership()
206 | return output.flush(journal.Key(), chunk)
207 | })
208 | }
209 |
210 | func newFileOutput(factory *FileOutputFactory, logger ik.Logger, randSource rand.Source, pathPrefix string, pathSuffix string, timeFormat string, compressionFormat int, symlinkPath string, permission os.FileMode, bufferChunkLimit int64, timeSliceFormat string, disableDraining bool) (*FileOutput, error) {
211 | if timeSliceFormat == "" {
212 | timeSliceFormat = "%Y%m%d"
213 | }
214 | journalGroupFactory := jnl.NewFileJournalGroupFactory(
215 | logger,
216 | randSource,
217 | func() time.Time { return time.Now() },
218 | pathSuffix,
219 | permission,
220 | bufferChunkLimit,
221 | )
222 | retval := &FileOutput{
223 | factory: factory,
224 | logger: logger,
225 | pathPrefix: pathPrefix,
226 | pathSuffix: pathSuffix,
227 | symlinkPath: symlinkPath,
228 | permission: permission,
229 | compressionFormat: compressionFormat,
230 | timeFormat: timeFormat,
231 | timeSliceFormat: timeSliceFormat,
232 | location: time.UTC,
233 | c: make(chan []ik.FluentRecordSet, 100 /* FIXME */),
234 | cancel: make(chan bool),
235 | disableDraining: disableDraining,
236 | }
237 | journalGroup, err := journalGroupFactory.GetJournalGroup(pathPrefix, retval)
238 | if err != nil {
239 | return nil, err
240 | }
241 |
242 | slicer := ik.NewSlicer(
243 | journalGroup,
244 | func(record ik.FluentRecord) string {
245 | timestamp_ := time.Unix(int64(record.Timestamp), 0)
246 | return strftime.Format(retval.timeSliceFormat, timestamp_)
247 | },
248 | &FileOutputPacker{retval},
249 | logger,
250 | )
251 | slicer.AddNewKeyEventListener(func(last ik.Journal, next ik.Journal) error {
252 | err := (error)(nil)
253 | if last != nil {
254 | err = last.Flush(func(chunk ik.JournalChunk) error {
255 | defer chunk.Dispose()
256 | chunk.TakeOwnership()
257 | return retval.flush(last.Key(), chunk)
258 | })
259 | }
260 | if next != nil {
261 | if !retval.disableDraining {
262 | retval.attachListeners(next)
263 | }
264 | }
265 | return err
266 | })
267 | retval.journalGroup = journalGroup
268 | retval.slicer = slicer
269 | if !disableDraining {
270 | currentKey := strftime.Format(timeSliceFormat, time.Now())
271 | for _, key := range journalGroup.GetJournalKeys() {
272 | if key == currentKey {
273 | journal := journalGroup.GetJournal(key)
274 | retval.attachListeners(journal)
275 | journal.Flush(nil)
276 | }
277 | }
278 | }
279 | return retval, nil
280 | }
281 |
282 | func (factory *FileOutputFactory) Name() string {
283 | return "file"
284 | }
285 |
286 | func (factory *FileOutputFactory) New(engine ik.Engine, config *ik.ConfigElement) (ik.Output, error) {
287 | pathPrefix := ""
288 | pathSuffix := ""
289 | timeFormat := ""
290 | compressionFormat := compressionNone
291 | symlinkPath := ""
292 | permission := 0666
293 | bufferChunkLimit := int64(8 * 1024 * 1024) // 8MB
294 | timeSliceFormat := ""
295 | disableDraining := false
296 |
297 | path, ok := config.Attrs["path"]
298 | if !ok {
299 | return nil, errors.New("'path' parameter is required on file output")
300 | }
301 | timeFormat, _ = config.Attrs["time_format"]
302 | compressionFormatStr, ok := config.Attrs["compress"]
303 | if ok {
304 | if compressionFormatStr == "gz" || compressionFormatStr == "gzip" {
305 | compressionFormat = compressionGzip
306 | } else {
307 | return nil, errors.New("unknown compression format: " + compressionFormatStr)
308 | }
309 | }
310 | symlinkPath, _ = config.Attrs["symlink_path"]
311 | permissionStr, ok := config.Attrs["permission"]
312 | if ok {
313 | var err error
314 | permission, err = strconv.Atoi(permissionStr)
315 | if err != nil {
316 | return nil, err
317 | }
318 | }
319 | pos := strings.Index(path, "*")
320 | if pos >= 0 {
321 | pathPrefix = path[0:pos]
322 | pathSuffix = path[pos+1:]
323 | } else {
324 | pathPrefix = path + "."
325 | pathSuffix = ".log"
326 | }
327 | timeSliceFormat, _ = config.Attrs["time_slice_format"]
328 |
329 | bufferChunkLimitStr, ok := config.Attrs["buffer_chunk_limit"]
330 | if ok {
331 | var err error
332 | bufferChunkLimit, err = ik.ParseCapacityString(bufferChunkLimitStr)
333 | if err != nil {
334 | return nil, err
335 | }
336 | }
337 |
338 | disableDrainingStr, ok := config.Attrs["disable_draining"]
339 | if ok {
340 | var err error
341 | disableDraining, err = strconv.ParseBool(disableDrainingStr)
342 | if err != nil {
343 | return nil, err
344 | }
345 | }
346 |
347 | return newFileOutput(
348 | factory,
349 | engine.Logger(),
350 | engine.RandSource(),
351 | pathPrefix,
352 | pathSuffix,
353 | timeFormat,
354 | compressionFormat,
355 | symlinkPath,
356 | os.FileMode(permission),
357 | bufferChunkLimit,
358 | timeSliceFormat,
359 | disableDraining,
360 | )
361 | }
362 |
363 | func (factory *FileOutputFactory) BindScorekeeper(scorekeeper *ik.Scorekeeper) {
364 | }
365 |
366 | var _ = AddPlugin(&FileOutputFactory{})
367 |
--------------------------------------------------------------------------------
/plugins/out_forward.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "fmt"
7 | "github.com/moriyoshi/ik"
8 | "github.com/ugorji/go/codec"
9 | "net"
10 | "reflect"
11 | "strconv"
12 | "time"
13 | )
14 |
15 | type ForwardOutput struct {
16 | factory *ForwardOutputFactory
17 | logger ik.Logger
18 | codec *codec.MsgpackHandle
19 | bind string
20 | enc *codec.Encoder
21 | conn net.Conn
22 | buffer bytes.Buffer
23 | }
24 |
25 | func (output *ForwardOutput) encodeEntry(tag string, record ik.TinyFluentRecord) error {
26 | v := []interface{}{tag, record.Timestamp, record.Data}
27 | if output.enc == nil {
28 | output.enc = codec.NewEncoder(&output.buffer, output.codec)
29 | }
30 | err := output.enc.Encode(v)
31 | if err != nil {
32 | return err
33 | }
34 | return err
35 | }
36 |
37 | func (output *ForwardOutput) encodeRecordSet(recordSet ik.FluentRecordSet) error {
38 | v := []interface{}{recordSet.Tag, recordSet.Records}
39 | if output.enc == nil {
40 | output.enc = codec.NewEncoder(&output.buffer, output.codec)
41 | }
42 | err := output.enc.Encode(v)
43 | if err != nil {
44 | return err
45 | }
46 | return err
47 | }
48 |
49 | func (output *ForwardOutput) flush() error {
50 | if output.conn == nil {
51 | conn, err := net.Dial("tcp", output.bind)
52 | if err != nil {
53 | output.logger.Error("%#v", err.Error())
54 | return err
55 | } else {
56 | output.conn = conn
57 | }
58 | }
59 | n, err := output.buffer.WriteTo(output.conn)
60 | if err != nil {
61 | output.logger.Error("Write failed. size: %d, buf size: %d, error: %#v", n, output.buffer.Len(), err.Error())
62 | output.conn = nil
63 | return err
64 | }
65 | if n > 0 {
66 | output.logger.Notice("Forwarded: %d bytes (left: %d bytes)\n", n, output.buffer.Len())
67 | }
68 | output.conn.Close()
69 | output.conn = nil
70 | return nil
71 | }
72 |
73 | func (output *ForwardOutput) run_flush(flush_interval int) {
74 | ticker := time.NewTicker(time.Duration(flush_interval) * time.Second)
75 | go func() {
76 | for {
77 | select {
78 | case <-ticker.C:
79 | output.flush()
80 | }
81 | }
82 | }()
83 | }
84 |
85 | func (output *ForwardOutput) Emit(recordSet []ik.FluentRecordSet) error {
86 | for _, recordSet := range recordSet {
87 | err := output.encodeRecordSet(recordSet)
88 | if err != nil {
89 | output.logger.Error("%#v", err)
90 | return err
91 | }
92 | }
93 | return nil
94 | }
95 |
96 | func (output *ForwardOutput) Factory() ik.Plugin {
97 | return output.factory
98 | }
99 |
100 | func (output *ForwardOutput) Run() error {
101 | time.Sleep(1000000000)
102 | return ik.Continue
103 | }
104 |
105 | func (output *ForwardOutput) Shutdown() error {
106 | return nil
107 | }
108 |
109 | type ForwardOutputFactory struct {
110 | }
111 |
112 | func newForwardOutput(factory *ForwardOutputFactory, logger ik.Logger, bind string) (*ForwardOutput, error) {
113 | _codec := codec.MsgpackHandle{}
114 | _codec.MapType = reflect.TypeOf(map[string]interface{}(nil))
115 | _codec.RawToString = false
116 | _codec.StructToArray = true
117 | return &ForwardOutput{
118 | factory: factory,
119 | logger: logger,
120 | codec: &_codec,
121 | bind: bind,
122 | }, nil
123 | }
124 |
125 | func (factory *ForwardOutputFactory) Name() string {
126 | return "forward"
127 | }
128 |
129 | func (factory *ForwardOutputFactory) New(engine ik.Engine, config *ik.ConfigElement) (ik.Output, error) {
130 | host, ok := config.Attrs["host"]
131 | if !ok {
132 | host = "localhost"
133 | }
134 | netPort, ok := config.Attrs["port"]
135 | if !ok {
136 | netPort = "24224"
137 | }
138 | flush_interval_str, ok := config.Attrs["flush_interval"]
139 | if !ok {
140 | flush_interval_str = "60"
141 | }
142 | flush_interval, err := strconv.Atoi(flush_interval_str)
143 | if err != nil {
144 | return nil, errors.New(fmt.Sprintf("Failed to parse flush_interval_str: #v", err))
145 | }
146 | bind := host + ":" + netPort
147 | output, err := newForwardOutput(factory, engine.Logger(), bind)
148 | output.run_flush(flush_interval)
149 | return output, err
150 | }
151 |
152 | func (factory *ForwardOutputFactory) BindScorekeeper(scorekeeper *ik.Scorekeeper) {
153 | }
154 |
155 | var _ = AddPlugin(&ForwardOutputFactory{})
156 |
--------------------------------------------------------------------------------
/plugins/out_stdout.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import (
4 | "fmt"
5 | "github.com/moriyoshi/ik"
6 | "os"
7 | "time"
8 | )
9 |
10 | type StdoutOutput struct {
11 | factory *StdoutOutputFactory
12 | logger ik.Logger
13 | }
14 |
15 | func (output *StdoutOutput) Emit(recordSets []ik.FluentRecordSet) error {
16 | for _, recordSet := range recordSets {
17 | for _, record := range recordSet.Records {
18 | fmt.Fprintf(os.Stdout, "%d %s: %s\n", record.Timestamp, recordSet.Tag, record.Data)
19 | }
20 | }
21 | return nil
22 | }
23 |
24 | func (output *StdoutOutput) Factory() ik.Plugin {
25 | return output.factory
26 | }
27 |
28 | func (output *StdoutOutput) Run() error {
29 | time.Sleep(1000000000)
30 | return ik.Continue
31 | }
32 |
33 | func (output *StdoutOutput) Shutdown() error {
34 | return nil
35 | }
36 |
37 | func (output *StdoutOutput) Dispose() {
38 | output.Shutdown()
39 | }
40 |
41 | type StdoutOutputFactory struct {
42 | }
43 |
44 | func newStdoutOutput(factory *StdoutOutputFactory, logger ik.Logger) (*StdoutOutput, error) {
45 | return &StdoutOutput{
46 | factory: factory,
47 | logger: logger,
48 | }, nil
49 | }
50 |
51 | func (factory *StdoutOutputFactory) Name() string {
52 | return "stdout"
53 | }
54 |
55 | func (factory *StdoutOutputFactory) New(engine ik.Engine, _ *ik.ConfigElement) (ik.Output, error) {
56 | return newStdoutOutput(factory, engine.Logger())
57 | }
58 |
59 | func (factory *StdoutOutputFactory) BindScorekeeper(scorekeeper *ik.Scorekeeper) {
60 | }
61 |
62 | var _ = AddPlugin(&StdoutOutputFactory{})
63 |
--------------------------------------------------------------------------------
/plugins/registry.go:
--------------------------------------------------------------------------------
1 | package plugins
2 |
3 | import "github.com/moriyoshi/ik"
4 |
5 | var _plugins []ik.Plugin = make([]ik.Plugin, 0)
6 |
7 | func AddPlugin(plugin ik.Plugin) bool {
8 | _plugins = append(_plugins, plugin)
9 | return false
10 | }
11 |
12 | func GetPlugins() []ik.Plugin {
13 | return _plugins
14 | }
15 |
--------------------------------------------------------------------------------
/record_pump.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | type RecordPump struct {
8 | port Port
9 | ch chan FluentRecord
10 | control chan bool
11 | buffer map[string]*FluentRecordSet
12 | heartbeat *time.Ticker
13 | }
14 |
15 | func (pump *RecordPump) Port() Port {
16 | return pump.port
17 | }
18 |
19 | func (pump *RecordPump) EmitOne(record FluentRecord) {
20 | pump.ch <- record
21 | }
22 |
23 | func (pump *RecordPump) flush() error {
24 | recordSets := make([]FluentRecordSet, 0, len(pump.buffer))
25 | for _, recordSet := range pump.buffer {
26 | recordSets = append(recordSets, *recordSet)
27 | }
28 | pump.buffer = make(map[string]*FluentRecordSet)
29 | return pump.port.Emit(recordSets)
30 | }
31 |
32 | func (pump *RecordPump) Run() error {
33 | for {
34 | select {
35 | case record := <-pump.ch:
36 | buffer := pump.buffer
37 | recordSet, ok := buffer[record.Tag]
38 | if !ok {
39 | recordSet = &FluentRecordSet{
40 | Tag: record.Tag,
41 | Records: make([]TinyFluentRecord, 0, 16),
42 | }
43 | buffer[record.Tag] = recordSet
44 | }
45 | recordSet.Records = append(recordSet.Records, TinyFluentRecord{
46 | Timestamp: record.Timestamp,
47 | Data: record.Data,
48 | })
49 | break
50 | case <-pump.heartbeat.C:
51 | err := pump.flush()
52 | if err != nil {
53 | return err
54 | }
55 | break
56 | case needsToBeStopped := <-pump.control:
57 | if needsToBeStopped {
58 | pump.heartbeat.Stop()
59 | }
60 | return pump.flush()
61 | }
62 | }
63 | return Continue
64 |
65 | }
66 |
67 | func (pump *RecordPump) Shutdown() error {
68 | pump.control <- true
69 | return nil
70 | }
71 |
72 | func NewRecordPump(port Port, backlog int) *RecordPump {
73 | return &RecordPump{
74 | port: port,
75 | ch: make(chan FluentRecord, backlog),
76 | control: make(chan bool, 1),
77 | buffer: make(map[string]*FluentRecordSet),
78 | heartbeat: time.NewTicker(time.Duration(1000000000)),
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/scorekeeper.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | )
7 |
8 | type Scorekeeper struct {
9 | logger Logger
10 | topics map[Plugin]map[string]ScorekeeperTopic
11 | }
12 |
13 | func (sk *Scorekeeper) GetPlugins() []Plugin {
14 | plugins := make([]Plugin, len(sk.topics))
15 | i := 0
16 | for plugin, _ := range sk.topics {
17 | plugins[i] = plugin
18 | i += 1
19 | }
20 | return plugins
21 | }
22 |
23 | func (sk *Scorekeeper) GetTopics(plugin Plugin) []ScorekeeperTopic {
24 | entries, ok := sk.topics[plugin]
25 | if !ok {
26 | return []ScorekeeperTopic{}
27 | }
28 | topics := make([]ScorekeeperTopic, len(entries))
29 | i := 0
30 | for _, entry := range entries {
31 | topics[i] = entry
32 | i += 1
33 | }
34 | return topics
35 | }
36 |
37 | func (sk *Scorekeeper) AddTopic(topic ScorekeeperTopic) {
38 | sk.logger.Info("AddTopic: plugin=%s, name=%s", topic.Plugin.Name(), topic.Name)
39 | entries, ok := sk.topics[topic.Plugin]
40 | if !ok {
41 | entries = make(map[string]ScorekeeperTopic)
42 | sk.topics[topic.Plugin] = entries
43 | }
44 | entries[topic.Name] = topic
45 | }
46 |
47 | func (sk *Scorekeeper) Fetch(plugin Plugin, name string) (ScoreValueFetcher, error) {
48 | var ok bool
49 | var entries map[string]ScorekeeperTopic
50 | var entry ScorekeeperTopic
51 | entries, ok = sk.topics[plugin]
52 | if ok {
53 | entry, ok = entries[name]
54 | }
55 | if !ok {
56 | return nil, errors.New(fmt.Sprintf("unknown topic: plugin=%s, name=%s", plugin.Name(), name))
57 | }
58 | return entry.Fetcher, nil
59 | }
60 |
61 | func (sk *Scorekeeper) Dispose() {}
62 |
63 | func NewScorekeeper(logger Logger) *Scorekeeper {
64 | return &Scorekeeper{
65 | logger: logger,
66 | topics: make(map[Plugin]map[string]ScorekeeperTopic),
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/slicer.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "sync"
5 | "unsafe"
6 | )
7 |
8 | type SlicerNewKeyEventListener func(last Journal, next Journal) error
9 |
10 | type Slicer struct {
11 | journalGroup JournalGroup
12 | keyGetter func(record FluentRecord) string
13 | packer RecordPacker
14 | logger Logger
15 | keys map[string]bool
16 | newKeyEventListeners map[uintptr]SlicerNewKeyEventListener
17 | mtx sync.Mutex
18 | }
19 |
20 | func (slicer *Slicer) notifySlicerNewKeyEventListeners(last Journal, next Journal) {
21 | // lock for slicer must be acquired by caller
22 | for _, listener := range slicer.newKeyEventListeners {
23 | err := listener(last, next)
24 | if err != nil {
25 | slicer.logger.Error("error occurred during notifying flush event: %s", err.Error())
26 | }
27 | }
28 | }
29 |
30 | func (slicer *Slicer) AddNewKeyEventListener(listener SlicerNewKeyEventListener) {
31 | slicer.mtx.Lock()
32 | defer slicer.mtx.Unlock()
33 | // XXX hack!
34 | slicer.newKeyEventListeners[uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&listener)))] = listener
35 | }
36 |
37 | func (slicer *Slicer) Emit(recordSets []FluentRecordSet) error {
38 | journals := make(map[string]Journal)
39 | lastJournal := (Journal)(nil)
40 | for _, recordSet := range recordSets {
41 | tag := recordSet.Tag
42 | for _, record := range recordSet.Records {
43 | fullRecord := FluentRecord{
44 | tag,
45 | record.Timestamp,
46 | record.Data,
47 | }
48 | key := slicer.keyGetter(fullRecord)
49 | data, err := slicer.packer.Pack(fullRecord)
50 | if err != nil {
51 | return err
52 | }
53 | journal, ok := journals[key]
54 | if !ok {
55 | journal = slicer.journalGroup.GetJournal(key)
56 | journals[key] = journal
57 | slicer.mtx.Lock()
58 | _, ok := slicer.keys[key]
59 | slicer.keys[key] = true
60 | slicer.mtx.Unlock()
61 | if !ok {
62 | slicer.notifySlicerNewKeyEventListeners(lastJournal, journal)
63 | }
64 | lastJournal = journal
65 | }
66 | err = journal.Write(data)
67 | if err != nil {
68 | return err
69 | }
70 | }
71 | }
72 | return nil
73 | }
74 |
75 | func NewSlicer(journalGroup JournalGroup, keyGetter func(record FluentRecord) string, packer RecordPacker, logger Logger) *Slicer {
76 | keys := make(map[string]bool)
77 | for _, key := range journalGroup.GetJournalKeys() {
78 | keys[key] = true
79 | }
80 | return &Slicer{
81 | journalGroup: journalGroup,
82 | keyGetter: keyGetter,
83 | packer: packer,
84 | logger: logger,
85 | keys: keys,
86 | newKeyEventListeners: make(map[uintptr]SlicerNewKeyEventListener),
87 | mtx: sync.Mutex{},
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/spawner.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "sync"
7 | )
8 |
9 | type descriptorListHead struct {
10 | next *spawneeDescriptor
11 | prev *spawneeDescriptor
12 | }
13 |
14 | type descriptorList struct {
15 | count int
16 | first *spawneeDescriptor
17 | last *spawneeDescriptor
18 | }
19 |
20 | type spawneeDescriptor struct {
21 | head_alive descriptorListHead
22 | head_dead descriptorListHead
23 | id int
24 | spawnee Spawnee
25 | exitStatus error
26 | shutdownRequested bool
27 | mtx sync.Mutex
28 | cond *sync.Cond
29 | }
30 |
31 | type SpawneeStatus struct {
32 | Id int
33 | Spawnee Spawnee
34 | ExitStatus error
35 | }
36 |
37 | type dispatchReturnValue struct {
38 | b bool
39 | s []Spawnee
40 | e error
41 | ss []SpawneeStatus
42 | }
43 |
44 | type dispatch struct {
45 | fn func(Spawnee, chan dispatchReturnValue)
46 | spawnee Spawnee
47 | retval chan dispatchReturnValue
48 | }
49 |
50 | const (
51 | Spawned = 1
52 | Stopped = 2
53 | )
54 |
55 | type ContinueType struct{}
56 |
57 | type Panicked struct {
58 | panic interface{}
59 | }
60 |
61 | func (_ *ContinueType) Error() string { return "" }
62 |
63 | func typeName(type_ reflect.Type) string {
64 | if type_.Kind() == reflect.Ptr {
65 | return "*" + typeName(type_.Elem())
66 | } else {
67 | return type_.Name()
68 | }
69 | }
70 |
71 | func (panicked *Panicked) Error() string {
72 | switch panic_ := panicked.panic.(type) {
73 | case string:
74 | return panic_
75 | case error:
76 | return fmt.Sprintf("(%s) %s", typeName(reflect.TypeOf(panic_)), panic_.Error())
77 | default:
78 | type_ := reflect.TypeOf(panic_)
79 | method, ok := type_.MethodByName("String")
80 | if ok && method.Type.NumIn() == 1 {
81 | result := method.Func.Call([]reflect.Value{reflect.ValueOf(panic_)})
82 | if len(result) == 1 && result[0].Type().Kind() == reflect.String {
83 | return fmt.Sprintf("(%s) %s", typeName(type_), result[0].String())
84 | }
85 | }
86 | return fmt.Sprintf("(%s)", typeName(type_))
87 | }
88 | }
89 |
90 | var Continue = &ContinueType{}
91 |
92 | type NotFoundType struct{}
93 |
94 | func (_ *NotFoundType) Error() string { return "not found" }
95 |
96 | var NotFound = &NotFoundType{}
97 |
98 | type spawnerEvent struct {
99 | t int
100 | spawnee Spawnee
101 | }
102 |
103 | type Spawner struct {
104 | alives descriptorList
105 | deads descriptorList
106 | m map[Spawnee]*spawneeDescriptor
107 | c chan dispatch
108 | mtx sync.Mutex
109 | cond *sync.Cond
110 | lastEvent *spawnerEvent
111 | }
112 |
113 | func newDescriptor(spawnee Spawnee, id int) *spawneeDescriptor {
114 | retval := &spawneeDescriptor{
115 | head_alive: descriptorListHead{nil, nil},
116 | head_dead: descriptorListHead{nil, nil},
117 | id: id,
118 | spawnee: spawnee,
119 | exitStatus: Continue,
120 | shutdownRequested: false,
121 | mtx: sync.Mutex{},
122 | cond: nil,
123 | }
124 | retval.cond = sync.NewCond(&retval.mtx)
125 | return retval
126 | }
127 |
128 | func (spawner *Spawner) spawn(spawnee Spawnee, retval chan dispatchReturnValue) {
129 | go func() {
130 | descriptor := newDescriptor(spawnee, len(spawner.m)+1)
131 | func() {
132 | spawner.mtx.Lock()
133 | defer spawner.mtx.Unlock()
134 | if spawner.alives.last != nil {
135 | spawner.alives.last.head_alive.next = descriptor
136 | descriptor.head_alive.prev = spawner.alives.last
137 | }
138 | if spawner.alives.first == nil {
139 | spawner.alives.first = descriptor
140 | }
141 | spawner.alives.last = descriptor
142 | spawner.alives.count += 1
143 | spawner.m[spawnee] = descriptor
144 | // notify the event
145 | spawner.lastEvent = &spawnerEvent{
146 | t: Spawned,
147 | spawnee: spawnee,
148 | }
149 | spawner.cond.Broadcast()
150 | retval <- dispatchReturnValue{true, nil, nil, nil}
151 | }()
152 | var exitStatus error = nil
153 | func() {
154 | defer func() {
155 | r := recover()
156 | if r != nil {
157 | exitStatus = &Panicked{r}
158 | }
159 | }()
160 | exitStatus = Continue
161 | for exitStatus == Continue {
162 | exitStatus = descriptor.spawnee.Run()
163 | }
164 | }()
165 | func() {
166 | spawner.mtx.Lock()
167 | defer spawner.mtx.Unlock()
168 | descriptor.exitStatus = exitStatus
169 | // remove from alive list
170 | if descriptor.head_alive.prev != nil {
171 | descriptor.head_alive.prev.head_alive.next = descriptor.head_alive.next
172 | } else {
173 | spawner.alives.first = descriptor.head_alive.next
174 | }
175 | if descriptor.head_alive.next != nil {
176 | descriptor.head_alive.next.head_alive.prev = descriptor.head_alive.prev
177 | } else {
178 | spawner.alives.last = descriptor.head_alive.prev
179 | }
180 | spawner.alives.count -= 1
181 | // append to dead list
182 | if spawner.deads.last != nil {
183 | spawner.deads.last.head_dead.next = descriptor
184 | descriptor.head_dead.prev = spawner.deads.last
185 | }
186 | if spawner.deads.first == nil {
187 | spawner.deads.first = descriptor
188 | }
189 | spawner.deads.last = descriptor
190 | spawner.deads.count += 1
191 |
192 | // notify the event
193 | spawner.lastEvent = &spawnerEvent{
194 | t: Stopped,
195 | spawnee: spawnee,
196 | }
197 | spawner.cond.Broadcast()
198 | descriptor.cond.Broadcast()
199 | }()
200 | }()
201 | }
202 |
203 | func (spawner *Spawner) kill(spawnee Spawnee, retval chan dispatchReturnValue) {
204 | spawner.mtx.Lock()
205 | descriptor, ok := spawner.m[spawnee]
206 | spawner.mtx.Unlock()
207 | if ok && descriptor.exitStatus != Continue {
208 | descriptor.shutdownRequested = true
209 | err := spawnee.Shutdown()
210 | retval <- dispatchReturnValue{true, nil, err, nil}
211 | } else {
212 | retval <- dispatchReturnValue{false, nil, nil, nil}
213 | }
214 | }
215 |
216 | func (spawner *Spawner) getStatus(spawnee Spawnee, retval chan dispatchReturnValue) {
217 | spawner.mtx.Lock()
218 | defer spawner.mtx.Unlock()
219 | descriptor, ok := spawner.m[spawnee]
220 | if ok {
221 | retval <- dispatchReturnValue{false, nil, descriptor.exitStatus, nil}
222 | } else {
223 | retval <- dispatchReturnValue{false, nil, nil, nil}
224 | }
225 | }
226 |
227 | func (spawner *Spawner) getRunningSpawnees(_ Spawnee, retval chan dispatchReturnValue) {
228 | spawner.mtx.Lock()
229 | defer spawner.mtx.Unlock()
230 | spawnees := make([]Spawnee, spawner.alives.count)
231 | descriptor := spawner.alives.first
232 | i := 0
233 | for descriptor != nil {
234 | spawnees[i] = descriptor.spawnee
235 | descriptor = descriptor.head_alive.next
236 | i += 1
237 | }
238 | retval <- dispatchReturnValue{false, spawnees, nil, nil}
239 | }
240 |
241 | func (spawner *Spawner) getStoppedSpawnees(_ Spawnee, retval chan dispatchReturnValue) {
242 | spawner.mtx.Lock()
243 | defer spawner.mtx.Unlock()
244 | spawnees := make([]Spawnee, spawner.deads.count)
245 | descriptor := spawner.deads.first
246 | i := 0
247 | for descriptor != nil {
248 | spawnees[i] = descriptor.spawnee
249 | descriptor = descriptor.head_dead.next
250 | i += 1
251 | }
252 | retval <- dispatchReturnValue{false, spawnees, nil, nil}
253 | }
254 |
255 | func (spawner *Spawner) getSpawneeStatuses(_ Spawnee, retval chan dispatchReturnValue) {
256 | spawner.mtx.Lock()
257 | defer spawner.mtx.Unlock()
258 | spawneeStatuses := make([]SpawneeStatus, len(spawner.m))
259 | i := 0
260 | for spawnee, descriptor := range spawner.m {
261 | spawneeStatuses[i] = SpawneeStatus{Id: descriptor.id, Spawnee: spawnee, ExitStatus: descriptor.exitStatus}
262 | i += 1
263 | }
264 | retval <- dispatchReturnValue{false, nil, nil, spawneeStatuses}
265 | }
266 |
267 | func (spawner *Spawner) Spawn(spawnee Spawnee) error {
268 | retval := make(chan dispatchReturnValue)
269 | spawner.c <- dispatch{spawner.spawn, spawnee, retval}
270 | retval_ := <-retval
271 | return retval_.e
272 | }
273 |
274 | func (spawner *Spawner) Kill(spawnee Spawnee) (bool, error) {
275 | retval := make(chan dispatchReturnValue)
276 | spawner.c <- dispatch{spawner.kill, spawnee, retval}
277 | retval_ := <-retval
278 | return retval_.b, retval_.e
279 | }
280 |
281 | func (spawner *Spawner) GetStatus(spawnee Spawnee) error {
282 | retval := make(chan dispatchReturnValue)
283 | spawner.c <- dispatch{spawner.getStatus, spawnee, retval}
284 | retval_ := <-retval
285 | return retval_.e
286 | }
287 |
288 | func (spawner *Spawner) GetRunningSpawnees() ([]Spawnee, error) {
289 | retval := make(chan dispatchReturnValue)
290 | spawner.c <- dispatch{spawner.getRunningSpawnees, nil, retval}
291 | retval_ := <-retval
292 | return retval_.s, retval_.e
293 | }
294 |
295 | func (spawner *Spawner) GetStoppedSpawnees() ([]Spawnee, error) {
296 | retval := make(chan dispatchReturnValue)
297 | spawner.c <- dispatch{spawner.getStoppedSpawnees, nil, retval}
298 | retval_ := <-retval
299 | return retval_.s, retval_.e
300 | }
301 |
302 | func (spawner *Spawner) GetSpawneeStatuses() ([]SpawneeStatus, error) {
303 | retval := make(chan dispatchReturnValue)
304 | spawner.c <- dispatch{spawner.getSpawneeStatuses, nil, retval}
305 | retval_ := <-retval
306 | return retval_.ss, retval_.e
307 | }
308 |
309 | func (spawner *Spawner) Poll(spawnee Spawnee) error {
310 | descriptor, ok := spawner.m[spawnee]
311 | if !ok {
312 | return NotFound
313 | }
314 | if func() bool {
315 | spawner.mtx.Lock()
316 | defer spawner.mtx.Unlock()
317 | if descriptor.exitStatus != Continue {
318 | return true
319 | }
320 | return false
321 | }() {
322 | return nil
323 | }
324 | defer descriptor.mtx.Unlock()
325 | descriptor.mtx.Lock()
326 | descriptor.cond.Wait()
327 | return nil
328 | }
329 |
330 | func (spawner *Spawner) PollMultiple(spawnees []Spawnee) error {
331 | spawnees_ := make(map[Spawnee]bool)
332 | for _, spawnee := range spawnees {
333 | spawnees_[spawnee] = true
334 | }
335 | count := len(spawnees_)
336 | for count > 0 {
337 | spawner.mtx.Lock()
338 | spawner.cond.Wait()
339 | lastEvent := spawner.lastEvent
340 | spawner.mtx.Unlock()
341 | if lastEvent.t == Stopped {
342 | spawnee := lastEvent.spawnee
343 | if alive, ok := spawnees_[spawnee]; alive && ok {
344 | spawnees_[spawnee] = false
345 | count -= 1
346 | }
347 | }
348 | }
349 | return nil
350 | }
351 |
352 | func NewSpawner() *Spawner {
353 | c := make(chan dispatch)
354 | // launch the supervisor
355 | go func() {
356 | for {
357 | select {
358 | case disp := <-c:
359 | disp.fn(disp.spawnee, disp.retval)
360 | }
361 | }
362 | }()
363 | spawner := &Spawner{
364 | alives: descriptorList{0, nil, nil},
365 | deads: descriptorList{0, nil, nil},
366 | m: make(map[Spawnee]*spawneeDescriptor),
367 | c: c,
368 | mtx: sync.Mutex{},
369 | cond: nil,
370 | lastEvent: nil,
371 | }
372 | spawner.cond = sync.NewCond(&spawner.mtx)
373 | return spawner
374 | }
375 |
--------------------------------------------------------------------------------
/spawner_test.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "errors"
5 | "testing"
6 | )
7 |
8 | type Foo struct {
9 | state string
10 | c chan string
11 | }
12 |
13 | func (foo *Foo) Run() error {
14 | foo.state = "run"
15 | retval := <-foo.c
16 | foo.state = "stopped"
17 | return errors.New(retval)
18 | }
19 |
20 | func (foo *Foo) Shutdown() error {
21 | foo.c <- "ok"
22 | return nil
23 | }
24 |
25 | type Bar struct{ c chan interface{} }
26 |
27 | func (bar *Bar) Run() error {
28 | panic(<-bar.c)
29 | }
30 |
31 | func (bar *Bar) Shutdown() error {
32 | return nil
33 | }
34 |
35 | type Baz struct{ message string }
36 |
37 | func (baz *Baz) String() string {
38 | return baz.message
39 | }
40 |
41 | func TestSpawner_Spawn(t *testing.T) {
42 | spawner := NewSpawner()
43 | f := &Foo{"", make(chan string)}
44 | spawner.Spawn(f)
45 | err := spawner.GetStatus(f)
46 | if err != Continue {
47 | t.Fail()
48 | }
49 | f.c <- "result"
50 | spawner.Poll(f)
51 | err = spawner.GetStatus(f)
52 | if err == Continue {
53 | t.Fail()
54 | }
55 | if err.Error() != "result" {
56 | t.Fail()
57 | }
58 | }
59 |
60 | func TestSpawner_Panic1(t *testing.T) {
61 | spawner := NewSpawner()
62 | f := &Bar{make(chan interface{})}
63 | spawner.Spawn(f)
64 | err := spawner.GetStatus(f)
65 | if err != Continue {
66 | t.Fail()
67 | }
68 | f.c <- "PANIC"
69 | spawner.Poll(f)
70 | err = spawner.GetStatus(f)
71 | panicked, ok := err.(*Panicked)
72 | if !ok {
73 | t.Fail()
74 | }
75 | if panicked.Error() != "PANIC" {
76 | t.Fail()
77 | }
78 | }
79 |
80 | func TestSpawner_Panic2(t *testing.T) {
81 | spawner := NewSpawner()
82 | f := &Bar{make(chan interface{})}
83 | spawner.Spawn(f)
84 | err := spawner.GetStatus(f)
85 | if err != Continue {
86 | t.Fail()
87 | }
88 | f.c <- &Baz{"BAZ!"}
89 | spawner.Poll(f)
90 | err = spawner.GetStatus(f)
91 | panicked, ok := err.(*Panicked)
92 | if !ok {
93 | t.Fail()
94 | }
95 | if panicked.Error() != "(*Baz) BAZ!" {
96 | t.Fail()
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/stringvector.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | type StringVector []string
4 |
5 | func (sv *StringVector) Append(v string) {
6 | oldLength := len(*sv)
7 | sv.ensureCapacity(oldLength + 1)
8 | (*sv)[oldLength] = v
9 | }
10 |
11 | func (sv *StringVector) Push(v string) {
12 | sv.Append(v)
13 | }
14 |
15 | func (sv *StringVector) Pop() string {
16 | retval := (*sv)[len(*sv)-1]
17 | *sv = (*sv)[0 : len(*sv)-1]
18 | return retval
19 | }
20 |
21 | func (sv *StringVector) Shift() string {
22 | retval := (*sv)[0]
23 | *sv = (*sv)[1:len(*sv)]
24 | return retval
25 | }
26 |
27 | func (sv *StringVector) Last() string {
28 | return (*sv)[len(*sv)-1]
29 | }
30 |
31 | func (sv *StringVector) First() string {
32 | return (*sv)[0]
33 | }
34 |
35 | func (sv *StringVector) ensureCapacity(l int) {
36 | if l < 256 {
37 | if l > cap(*sv) {
38 | newSlice := make([]string, l)
39 | copy(newSlice, *sv)
40 | *sv = newSlice
41 | }
42 | } else {
43 | newCapacity := cap(*sv)
44 | if newCapacity < 256 {
45 | newCapacity = 128
46 | }
47 | for l > newCapacity {
48 | newCapacity = 2 * newCapacity
49 | if newCapacity < cap(*sv) {
50 | /* unlikely */
51 | panic("out of memory")
52 | }
53 | }
54 | newSlice := make([]string, newCapacity)
55 | copy(newSlice, *sv)
56 | *sv = newSlice
57 | }
58 | *sv = (*sv)[0:l]
59 | }
60 |
--------------------------------------------------------------------------------
/stringvector_test.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import "testing"
4 |
5 | func Append(t *testing.T) {
6 | sv := make(StringVector, 0)
7 | if len(sv) != 0 {
8 | t.Fail()
9 | }
10 | if cap(sv) != 0 {
11 | t.Fail()
12 | }
13 | for i := 0; i < 512; i += 3 {
14 | if i > 1 && sv[i-1] != "c" {
15 | t.Fail()
16 | }
17 | sv.Append("a")
18 | if len(sv) != i+1 {
19 | t.Fail()
20 | }
21 | if i < 253 {
22 | if cap(sv) != i+1 {
23 | t.Fail()
24 | }
25 | }
26 | if sv[i] != "a" {
27 | t.Fail()
28 | }
29 | sv.Append("b")
30 | if len(sv) != i+2 {
31 | t.Fail()
32 | }
33 | if i < 253 {
34 | if cap(sv) != i+2 {
35 | t.Fail()
36 | }
37 | }
38 | if sv[i] != "a" {
39 | t.Fail()
40 | }
41 | if sv[i+1] != "b" {
42 | t.Fail()
43 | }
44 | sv.Append("c")
45 | if len(sv) != i+3 {
46 | t.Fail()
47 | }
48 | if i < 253 {
49 | if cap(sv) != i+3 {
50 | t.Fail()
51 | }
52 | }
53 | if sv[i] != "a" {
54 | t.Fail()
55 | }
56 | if sv[i+1] != "b" {
57 | t.Fail()
58 | }
59 | if sv[i+2] != "c" {
60 | t.Fail()
61 | }
62 | }
63 | }
64 |
65 | func TestStringVector_Pop(t *testing.T) {
66 | sv := make(StringVector, 2)
67 | sv[0] = "a"
68 | sv[1] = "b"
69 | if len(sv) != 2 {
70 | t.Fail()
71 | }
72 | if cap(sv) != 2 {
73 | t.Fail()
74 | }
75 | if sv.Pop() != "b" {
76 | t.Fail()
77 | }
78 | if len(sv) != 1 {
79 | t.Fail()
80 | }
81 | if cap(sv) != 2 {
82 | t.Fail()
83 | }
84 | if sv.Pop() != "a" {
85 | t.Fail()
86 | }
87 | if len(sv) != 0 {
88 | t.Fail()
89 | }
90 | if cap(sv) != 2 {
91 | t.Fail()
92 | }
93 | }
94 |
95 | func TestStringVector_Shift(t *testing.T) {
96 | sv := make(StringVector, 2)
97 | sv[0] = "a"
98 | sv[1] = "b"
99 | if len(sv) != 2 {
100 | t.Fail()
101 | }
102 | if cap(sv) != 2 {
103 | t.Fail()
104 | }
105 | if sv.Shift() != "a" {
106 | t.Fail()
107 | }
108 | if len(sv) != 1 {
109 | t.Fail()
110 | }
111 | if cap(sv) != 1 {
112 | t.Fail()
113 | }
114 | if sv.Shift() != "b" {
115 | t.Fail()
116 | }
117 | if len(sv) != 0 {
118 | t.Fail()
119 | }
120 | if cap(sv) != 0 {
121 | t.Fail()
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/task/recurring_scheduler.go:
--------------------------------------------------------------------------------
1 | package task
2 |
3 | import (
4 | "errors"
5 | "sort"
6 | "sync/atomic"
7 | "time"
8 | )
9 |
10 | type RecurringTaskSpec struct {
11 | month []int
12 | dayOfWeek []int
13 | dayOfMonth []int
14 | hour []int
15 | minute []int
16 | rightAt time.Time
17 | }
18 |
19 | type RecurringTaskDescriptor struct {
20 | id int64
21 | spec RecurringTaskSpec
22 | nextTime time.Time
23 | status int
24 | fn func(int64, time.Time, *RecurringTaskSpec) (interface{}, error)
25 | }
26 |
27 | type RecurringTaskDescriptorHeap []*RecurringTaskDescriptor
28 |
29 | type RecurringTaskTimeResolution int
30 |
31 | const (
32 | Insert = iota
33 | Update
34 | TryPop
35 | Delete
36 | NoOp
37 | )
38 |
39 | const (
40 | Stopped = 0
41 | Running = 1
42 | )
43 |
44 | const (
45 | Nanosecond = RecurringTaskTimeResolution(0)
46 | Second = RecurringTaskTimeResolution(1)
47 | Minute = RecurringTaskTimeResolution(2)
48 | Hour = RecurringTaskTimeResolution(3)
49 | Day = RecurringTaskTimeResolution(4)
50 | Month = RecurringTaskTimeResolution(5)
51 | Year = RecurringTaskTimeResolution(6)
52 | )
53 |
54 | type RecurringTaskDaemonCommandResult struct {
55 | descriptor *RecurringTaskDescriptor
56 | diff time.Duration
57 | }
58 |
59 | type RecurringTaskDaemonCommand struct {
60 | command int
61 | descriptor *RecurringTaskDescriptor
62 | time time.Time
63 | result chan RecurringTaskDaemonCommandResult
64 | }
65 |
66 | type RecurringTaskScheduler struct {
67 | pQueue RecurringTaskDescriptorHeap
68 | nowGetter func() time.Time
69 | taskRunner TaskRunner
70 | daemonChan chan RecurringTaskDaemonCommand
71 | nextId int64
72 | }
73 |
74 | func (heap *RecurringTaskDescriptorHeap) insert(elem *RecurringTaskDescriptor) {
75 | l := len(*heap)
76 | if cap(*heap) < l+1 {
77 | newCap := cap(*heap) * 2
78 | if newCap < cap(*heap) {
79 | panic("?")
80 | }
81 | newHeap := make([]*RecurringTaskDescriptor, newCap, l+1)
82 | copy(newHeap, *heap)
83 | *heap = newHeap
84 | } else {
85 | *heap = (*heap)[0 : l+1]
86 | }
87 | (*heap)[l] = elem
88 | heap.bubbleUp(l)
89 | }
90 |
91 | func (heap *RecurringTaskDescriptorHeap) find(elem *RecurringTaskDescriptor) int {
92 | _heap := *heap
93 | for i := 0; i < len(_heap); i += 1 {
94 | if _heap[i] == elem {
95 | return i
96 | }
97 | }
98 | return -1
99 | }
100 |
101 | func (heap *RecurringTaskDescriptorHeap) update(elem *RecurringTaskDescriptor) {
102 | i := heap.find(elem)
103 | if i < 0 {
104 | panic("should never happen")
105 | }
106 | _heap := *heap
107 | copy(_heap[i:], _heap[i+1:])
108 | i = len(_heap) - 1
109 | _heap[i] = elem
110 | heap.bubbleUp(i)
111 | }
112 |
113 | func (heap *RecurringTaskDescriptorHeap) delete(elem *RecurringTaskDescriptor) {
114 | i := heap.find(elem)
115 | if i < 0 {
116 | panic("should never happen")
117 | }
118 | _heap := *heap
119 | copy(_heap[i:], _heap[i+1:])
120 | // readjust the capacity
121 | *heap = _heap[0 : len(_heap)-1]
122 | }
123 |
124 | func (heap *RecurringTaskDescriptorHeap) bubbleUp(i int) {
125 | for i > 0 {
126 | node := &(*heap)[i]
127 | parentIndex := (i - 1) >> 1
128 | parentNode := &(*heap)[parentIndex]
129 | if (*parentNode).status != Running && (*node).status == Running {
130 | break
131 | } else if ((*parentNode).status == Running && (*node).status != Running) || (*parentNode).nextTime.After((*node).nextTime) {
132 | *node, *parentNode = *parentNode, *node
133 | i = parentIndex
134 | } else {
135 | break
136 | }
137 | }
138 | }
139 |
140 | func (spec *RecurringTaskSpec) copy() RecurringTaskSpec {
141 | retval := RecurringTaskSpec{}
142 | if spec.month != nil {
143 | retval.month = make([]int, len(spec.month))
144 | copy(retval.month, spec.month)
145 | }
146 | if spec.dayOfMonth != nil {
147 | retval.dayOfMonth = make([]int, len(spec.dayOfMonth))
148 | copy(retval.dayOfMonth, spec.dayOfMonth)
149 | }
150 | if spec.dayOfWeek != nil {
151 | retval.dayOfWeek = make([]int, len(spec.dayOfWeek))
152 | copy(retval.dayOfWeek, spec.dayOfWeek)
153 | }
154 | if spec.hour != nil {
155 | retval.hour = make([]int, len(spec.hour))
156 | copy(retval.hour, spec.hour)
157 | }
158 | if spec.minute != nil {
159 | retval.minute = make([]int, len(spec.minute))
160 | copy(retval.minute, spec.minute)
161 | }
162 | retval.rightAt = spec.rightAt
163 | return retval
164 | }
165 |
166 | func (spec *RecurringTaskSpec) ensureSorted() {
167 | sort.IntSlice(spec.month).Sort()
168 | sort.IntSlice(spec.dayOfMonth).Sort()
169 | sort.IntSlice(spec.dayOfWeek).Sort()
170 | sort.IntSlice(spec.hour).Sort()
171 | sort.IntSlice(spec.minute).Sort()
172 | }
173 |
174 | func (spec *RecurringTaskSpec) isZero() bool {
175 | return spec.month == nil && spec.dayOfMonth == nil && spec.dayOfWeek == nil && spec.hour == nil && spec.minute == nil && spec.rightAt.IsZero()
176 | }
177 |
178 | type timeStruct struct {
179 | second int
180 | minute int
181 | hour int
182 | day int
183 | month time.Month
184 | year int
185 | location *time.Location
186 | }
187 |
188 | func newTimeStruct(t time.Time) timeStruct {
189 | currentYear, currentMonth, currentDay := t.Date()
190 | currentHour, currentMinute, currentSecond := t.Clock()
191 | return timeStruct{
192 | second: currentSecond,
193 | minute: currentMinute,
194 | hour: currentHour,
195 | day: currentDay,
196 | month: currentMonth,
197 | year: currentYear,
198 | location: t.Location(),
199 | }
200 | }
201 |
202 | func (tm *timeStruct) toTime() time.Time {
203 | return time.Date(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second, 0, tm.location)
204 | }
205 |
206 | func daysIn(month time.Month, year int) int {
207 | return time.Date(year, month+1, 1, 0, 0, 0, 0, time.UTC).Add(-time.Hour).Day()
208 | }
209 |
210 | func (spec *RecurringTaskSpec) resolution() RecurringTaskTimeResolution {
211 | if !spec.rightAt.IsZero() {
212 | return Nanosecond
213 | }
214 | if spec.minute != nil {
215 | return Minute
216 | }
217 | if spec.hour != nil {
218 | return Hour
219 | }
220 | if spec.dayOfWeek != nil || spec.dayOfMonth != nil {
221 | return Day
222 | }
223 | if spec.month != nil {
224 | return Month
225 | }
226 | {
227 | panic("should never get here")
228 | }
229 | }
230 |
231 | func incrementByResolution(t time.Time, res RecurringTaskTimeResolution, amount int) time.Time {
232 | switch res {
233 | case Nanosecond:
234 | return t.Add(time.Duration(amount))
235 | case Second:
236 | return t.Add(time.Duration(amount * 1000000000))
237 | case Minute:
238 | return t.Add(time.Duration(amount * 1000000000 * 60))
239 | case Hour:
240 | return t.Add(time.Duration(amount * 1000000000 * 60 * 60))
241 | case Day:
242 | return t.AddDate(0, 0, amount)
243 | case Month:
244 | return t.AddDate(0, amount, 0)
245 | case Year:
246 | return t.AddDate(amount, 0, 0)
247 | default:
248 | panic("should never get here")
249 | }
250 | }
251 |
252 | func (spec *RecurringTaskSpec) nextTime(now time.Time) (time.Time, error) {
253 | tm := newTimeStruct(now)
254 | next := timeStruct{0, -1, -1, -1, -1, tm.year, tm.location}
255 |
256 | if !spec.rightAt.IsZero() {
257 | return spec.rightAt, nil
258 | }
259 |
260 | if spec.minute != nil {
261 | if len(spec.minute) == 0 {
262 | return time.Time{}, errors.New("invalid time spec")
263 | }
264 | for _, minute := range spec.minute {
265 | if minute >= tm.minute {
266 | next.minute = minute
267 | break
268 | }
269 | }
270 | if next.minute < 0 {
271 | next.minute = spec.minute[0]
272 | tm.hour += 1 // carry
273 | if tm.hour >= 24 {
274 | tm.hour -= 24
275 | tm.day += 1
276 | }
277 | }
278 | } else {
279 | next.minute = tm.minute
280 | }
281 |
282 | if spec.hour != nil {
283 | if len(spec.hour) == 0 {
284 | return time.Time{}, errors.New("invalid time spec")
285 | }
286 | for _, hour := range spec.hour {
287 | if hour >= tm.hour {
288 | next.hour = hour
289 | break
290 | }
291 | }
292 | if next.hour < 0 {
293 | next.hour = spec.hour[0]
294 | tm.day += 1 // carry
295 | }
296 | } else {
297 | next.hour = tm.hour
298 | }
299 |
300 | daysInMonth := daysIn(tm.month, tm.year)
301 | if tm.day > daysInMonth {
302 | tm.day -= daysInMonth
303 | tm.month += 1
304 | if tm.month > 12 {
305 | tm.month -= 12
306 | tm.year += 1
307 | }
308 | daysInMonth = daysIn(tm.month, tm.year)
309 | }
310 |
311 | // if both dayOfWeek and dayOfMonth are specified, the next execution
312 | // time will be either one that comes first (according to crontab(5))
313 |
314 | nextDayCandidate1 := -1
315 | if spec.dayOfWeek != nil {
316 | if len(spec.dayOfWeek) == 0 {
317 | return time.Time{}, errors.New("invalid time spec")
318 | }
319 | currentDayOfWeek := (&tm).toTime().Weekday()
320 | dayOfWeek := -1
321 | for _, dayOfWeek_ := range spec.dayOfWeek {
322 | if dayOfWeek_ >= int(currentDayOfWeek) {
323 | dayOfWeek = dayOfWeek_
324 | }
325 | }
326 | if dayOfWeek < 0 {
327 | nextDayCandidate1 = tm.day + spec.dayOfWeek[0] + 7 - int(currentDayOfWeek)
328 | } else {
329 | nextDayCandidate1 = tm.day + dayOfWeek - int(currentDayOfWeek)
330 | }
331 | }
332 |
333 | nextDayCandidate2 := -1
334 | if spec.dayOfMonth != nil {
335 | if len(spec.dayOfMonth) == 0 {
336 | return time.Time{}, errors.New("invalid time spec")
337 | }
338 | for _, dayOfMonth := range spec.dayOfMonth {
339 | if dayOfMonth >= tm.day {
340 | nextDayCandidate2 = dayOfMonth
341 | break
342 | }
343 | }
344 | if nextDayCandidate2 < 0 {
345 | nextDayCandidate2 = daysInMonth + spec.dayOfMonth[0] - 1
346 | }
347 | }
348 |
349 | if nextDayCandidate1 == -1 && nextDayCandidate2 == -1 {
350 | // do nothing
351 | } else if nextDayCandidate1 == -1 || nextDayCandidate1 > nextDayCandidate2 {
352 | tm.day = nextDayCandidate2
353 | } else {
354 | tm.day = nextDayCandidate1
355 | }
356 |
357 | if tm.day > daysInMonth {
358 | tm.day = tm.day - daysInMonth
359 | tm.month += 1
360 | if tm.month > 12 {
361 | tm.month -= 12
362 | tm.year += 1
363 | }
364 | }
365 | next.day = tm.day
366 |
367 | if spec.month != nil {
368 | if len(spec.dayOfMonth) == 0 {
369 | return time.Time{}, errors.New("invalid time spec")
370 | }
371 | for _, month := range spec.month {
372 | if month >= int(tm.month) {
373 | next.month = time.Month(month)
374 | break
375 | }
376 | }
377 | if next.month < 0 {
378 | next.month = time.Month(spec.month[0])
379 | tm.year += 1
380 | }
381 | } else {
382 | next.month = tm.month
383 | }
384 |
385 | return next.toTime(), nil
386 | }
387 |
388 | func (sched *RecurringTaskScheduler) RunNext() (time.Duration, TaskStatus, error) {
389 | now := sched.nowGetter()
390 | resultChan := make(chan RecurringTaskDaemonCommandResult)
391 | sched.daemonChan <- RecurringTaskDaemonCommand{TryPop, nil, now, resultChan}
392 | result := <-resultChan
393 | descr := result.descriptor
394 | remaining := result.diff
395 | if remaining > 0 {
396 | return remaining, nil, nil
397 | }
398 | taskStatus, err := sched.taskRunner.Run(func() (interface{}, error) {
399 | spec := (&descr.spec).copy()
400 | result, err := descr.fn(descr.id, now, &spec)
401 | if err != nil {
402 | return nil, err
403 | }
404 | if spec.isZero() {
405 | sched.daemonChan <- RecurringTaskDaemonCommand{Delete, descr, time.Time{}, nil}
406 | } else {
407 | _now := sched.nowGetter()
408 | res := (&spec).resolution()
409 | var nextTime time.Time
410 | for {
411 | nextTime, err = spec.nextTime(_now)
412 | if err != nil {
413 | return nil, err
414 | }
415 | if nextTime.Sub(descr.nextTime) > 0 {
416 | break
417 | }
418 | _now = incrementByResolution(_now, res, 1)
419 | }
420 | descr.spec = spec // XXX: hope this is safe
421 | sched.daemonChan <- RecurringTaskDaemonCommand{Update, descr, nextTime, nil}
422 | }
423 | return result, nil
424 | })
425 | return time.Duration(0), taskStatus, err
426 | }
427 |
428 | func (sched *RecurringTaskScheduler) NoOp() {
429 | sched.daemonChan <- RecurringTaskDaemonCommand{NoOp, nil, time.Time{}, nil}
430 | }
431 |
432 | func (sched *RecurringTaskScheduler) ProcessEvent() {
433 | cmd := <-sched.daemonChan
434 | switch cmd.command {
435 | case Insert:
436 | (&sched.pQueue).insert(cmd.descriptor)
437 | case Update:
438 | descr := cmd.descriptor
439 | descr.nextTime = cmd.time
440 | descr.status = Stopped
441 | sched.pQueue.update(descr)
442 | (&sched.pQueue).update(descr)
443 | case TryPop:
444 | descr := sched.pQueue[0]
445 | now := cmd.time
446 | diff := descr.nextTime.Sub(now)
447 | if diff <= 0 {
448 | descr.status = Running
449 | (&sched.pQueue).update(descr)
450 | }
451 | cmd.result <- RecurringTaskDaemonCommandResult{descr, diff}
452 | case Delete:
453 | (&sched.pQueue).delete(cmd.descriptor)
454 | case NoOp:
455 | // do nothing
456 | default:
457 | panic("WTF!")
458 | }
459 | }
460 |
461 | func (sched *RecurringTaskScheduler) _nextId() int64 {
462 | return atomic.AddInt64(&sched.nextId, 1)
463 | }
464 |
465 | func (sched *RecurringTaskScheduler) RegisterTask(spec RecurringTaskSpec, task func(int64, time.Time, *RecurringTaskSpec) (interface{}, error)) (int64, error) {
466 | spec = (&spec).copy()
467 | (&spec).ensureSorted()
468 | id := sched._nextId()
469 | descr := &RecurringTaskDescriptor{
470 | id: id,
471 | spec: spec,
472 | nextTime: time.Time{},
473 | status: Stopped,
474 | fn: task,
475 | }
476 | nextTime, err := spec.nextTime(sched.nowGetter())
477 | if err != nil {
478 | return -1, err
479 | }
480 | descr.nextTime = nextTime
481 | sched.daemonChan <- RecurringTaskDaemonCommand{Insert, descr, time.Time{}, nil}
482 | return id, nil
483 | }
484 |
485 | func NewRecurringTaskScheduler(nowGetter func() time.Time, taskRunner TaskRunner) *RecurringTaskScheduler {
486 | return &RecurringTaskScheduler{
487 | pQueue: make(RecurringTaskDescriptorHeap, 0, 16),
488 | nowGetter: nowGetter,
489 | taskRunner: taskRunner,
490 | daemonChan: make(chan RecurringTaskDaemonCommand, 1),
491 | nextId: 0,
492 | }
493 | }
494 |
--------------------------------------------------------------------------------
/task/recurring_scheduler_test.go:
--------------------------------------------------------------------------------
1 | package task
2 |
3 | import (
4 | "testing"
5 | "time"
6 | )
7 |
8 | type DummyTaskStatus struct {
9 | result interface{}
10 | error error
11 | }
12 |
13 | func (status *DummyTaskStatus) Status() error { return status.error }
14 |
15 | func (status *DummyTaskStatus) Result() interface{} { return status.result }
16 |
17 | func (*DummyTaskStatus) Poll() {}
18 |
19 | type DummyTaskRunner struct {
20 | lastRunTask func() (interface{}, error)
21 | }
22 |
23 | func (runner *DummyTaskRunner) Run(task func() (interface{}, error)) (TaskStatus, error) {
24 | retval, err := task()
25 | return &DummyTaskStatus{retval, err}, nil
26 | }
27 |
28 | type recurringTaskArgs struct {
29 | id int64
30 | firedOn time.Time
31 | spec RecurringTaskSpec
32 | }
33 |
34 | func TestGoal(t *testing.T) {
35 | var now time.Time
36 | runner := &DummyTaskRunner{}
37 | sched := NewRecurringTaskScheduler(func() time.Time { return now }, runner)
38 | results := make([]recurringTaskArgs, 0, 10)
39 |
40 | now = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
41 | id1, err := sched.RegisterTask(
42 | RecurringTaskSpec{
43 | month: nil,
44 | dayOfWeek: nil,
45 | dayOfMonth: nil,
46 | hour: []int{0, 1},
47 | minute: []int{10, 20},
48 | rightAt: time.Time{},
49 | },
50 | func(id int64, on time.Time, spec *RecurringTaskSpec) (interface{}, error) {
51 | results = append(results, recurringTaskArgs{id, on, *spec})
52 | t.Logf("%d: %v, %v", id, on, spec)
53 | return nil, nil
54 | },
55 | )
56 | if err != nil {
57 | t.Log(err)
58 | t.FailNow()
59 | }
60 | sched.ProcessEvent()
61 |
62 | id2, err := sched.RegisterTask(
63 | RecurringTaskSpec{
64 | month: nil,
65 | dayOfWeek: nil,
66 | dayOfMonth: nil,
67 | hour: []int{0},
68 | minute: []int{0, 10},
69 | rightAt: time.Time{},
70 | },
71 | func(id int64, on time.Time, spec *RecurringTaskSpec) (interface{}, error) {
72 | results = append(results, recurringTaskArgs{id, on, *spec})
73 | t.Logf("%d: %v, %v", id, on, spec)
74 | return nil, nil
75 | },
76 | )
77 | if err != nil {
78 | t.Log(err)
79 | t.FailNow()
80 | }
81 | sched.ProcessEvent()
82 |
83 | if id1 == id2 {
84 | t.Fail()
85 | }
86 |
87 | go sched.ProcessEvent()
88 | diff, _, err := sched.RunNext()
89 | if err != nil {
90 | t.Log(err)
91 | t.FailNow()
92 | }
93 | t.Logf("diff=%d", diff)
94 | if diff != 0 {
95 | t.Fail()
96 | }
97 | go sched.ProcessEvent()
98 |
99 | t.Logf("results=%d", len(results))
100 | if len(results) != 1 {
101 | t.Fail()
102 | }
103 | go sched.ProcessEvent() // for update
104 |
105 | go sched.ProcessEvent()
106 | diff, _, err = sched.RunNext()
107 | if err != nil {
108 | t.Log(err)
109 | t.FailNow()
110 | }
111 | t.Logf("diff=%d", diff)
112 | if diff != 10*60*1000000000 {
113 | t.Fail()
114 | }
115 |
116 | //
117 | now = time.Date(1970, 1, 1, 0, 10, 0, 0, time.UTC)
118 |
119 | go sched.ProcessEvent()
120 | diff, _, err = sched.RunNext()
121 | if err != nil {
122 | t.Log(err)
123 | t.FailNow()
124 | }
125 | t.Logf("diff=%d", diff)
126 | if diff != 0 {
127 | t.Fail()
128 | }
129 |
130 | t.Logf("results=%d", len(results))
131 | if len(results) != 2 {
132 | t.Fail()
133 | }
134 | go sched.ProcessEvent() // for update
135 |
136 | go sched.ProcessEvent()
137 | diff, _, err = sched.RunNext()
138 | if err != nil {
139 | t.Log(err)
140 | t.FailNow()
141 | }
142 | t.Logf("diff=%d", diff)
143 | if diff != 0 {
144 | t.Fail()
145 | }
146 |
147 | t.Logf("results=%d", len(results))
148 | if len(results) != 3 {
149 | t.Fail()
150 | }
151 | go sched.ProcessEvent() // for update
152 |
153 | go sched.ProcessEvent()
154 | diff, _, err = sched.RunNext()
155 | if err != nil {
156 | t.Log(err)
157 | t.FailNow()
158 | }
159 | t.Logf("diff=%d", diff)
160 | if diff != 10*60*1000000000 {
161 | t.Fail()
162 | }
163 |
164 | //
165 | now = time.Date(1970, 1, 1, 0, 20, 0, 0, time.UTC)
166 |
167 | go sched.ProcessEvent()
168 | diff, _, err = sched.RunNext()
169 | if err != nil {
170 | t.Log(err)
171 | t.FailNow()
172 | }
173 | t.Logf("diff=%d", diff)
174 | if diff != 0 {
175 | t.Fail()
176 | }
177 |
178 | t.Logf("results=%d", len(results))
179 | if len(results) != 4 {
180 | t.Fail()
181 | }
182 | go sched.ProcessEvent() // for update
183 |
184 | go sched.ProcessEvent()
185 | diff, _, err = sched.RunNext()
186 | if err != nil {
187 | t.Log(err)
188 | t.FailNow()
189 | }
190 | t.Logf("diff=%d", diff)
191 | if diff != 50*60*1000000000 {
192 | t.Fail()
193 | }
194 |
195 | //
196 | now = time.Date(1970, 1, 1, 1, 10, 0, 0, time.UTC)
197 |
198 | go sched.ProcessEvent()
199 | diff, _, err = sched.RunNext()
200 | if err != nil {
201 | t.Log(err)
202 | t.FailNow()
203 | }
204 | t.Logf("diff=%d", diff)
205 | if diff != 0 {
206 | t.Fail()
207 | }
208 |
209 | t.Logf("results=%d", len(results))
210 | if len(results) != 5 {
211 | t.Fail()
212 | }
213 | go sched.ProcessEvent() // for update
214 |
215 | go sched.ProcessEvent()
216 | diff, _, err = sched.RunNext()
217 | if err != nil {
218 | t.Log(err)
219 | t.FailNow()
220 | }
221 | t.Logf("diff=%d", diff)
222 | if diff != 10*60*1000000000 {
223 | t.Fail()
224 | }
225 |
226 | //
227 | now = time.Date(1970, 1, 1, 1, 20, 0, 0, time.UTC)
228 |
229 | go sched.ProcessEvent()
230 | diff, _, err = sched.RunNext()
231 | if err != nil {
232 | t.Log(err)
233 | t.FailNow()
234 | }
235 | t.Logf("diff=%d", diff)
236 | if diff != 0 {
237 | t.Fail()
238 | }
239 |
240 | t.Logf("results=%d", len(results))
241 | if len(results) != 6 {
242 | t.Fail()
243 | }
244 | go sched.ProcessEvent() // for update
245 |
246 | go sched.ProcessEvent()
247 | diff, _, err = sched.RunNext()
248 | if err != nil {
249 | t.Log(err)
250 | t.FailNow()
251 | }
252 | t.Logf("diff=%d", diff)
253 | // nanoseconds from 1970/1/1 01:20:00 to 1970/1/2 00:00:00
254 | if diff != (22*60+40)*60*1000000000 {
255 | t.Fail()
256 | }
257 | }
258 |
--------------------------------------------------------------------------------
/task/simple.go:
--------------------------------------------------------------------------------
1 | package task
2 |
3 | import (
4 | "sync"
5 | "sync/atomic"
6 | )
7 |
8 | type SimpleTaskRunner struct{}
9 |
10 | type simpleTaskStatus struct {
11 | condMutex sync.Mutex
12 | cond *sync.Cond
13 | completed uintptr
14 | status error
15 | result interface{}
16 | }
17 |
18 | func (status *simpleTaskStatus) Result() interface{} {
19 | return status.result
20 | }
21 |
22 | func (status *simpleTaskStatus) Status() error {
23 | completed := atomic.LoadUintptr(&status.completed)
24 | if completed == 0 {
25 | return NotCompleted
26 | }
27 | return status.status
28 | }
29 |
30 | func (status *simpleTaskStatus) Poll() {
31 | completed := atomic.LoadUintptr(&status.completed)
32 | if completed != 0 {
33 | return
34 | }
35 | status.cond.Wait()
36 | }
37 |
38 | func runTask(taskStatus *simpleTaskStatus, task func() (interface{}, error)) {
39 | defer func() {
40 | r := recover()
41 | if r != nil {
42 | taskStatus.status = &PanickedStatus{r}
43 | }
44 | }()
45 | taskStatus.result, taskStatus.status = task()
46 | }
47 |
48 | func (runner *SimpleTaskRunner) Run(task func() (interface{}, error)) (TaskStatus, error) {
49 | taskStatus := &simpleTaskStatus{
50 | condMutex: sync.Mutex{},
51 | completed: 0,
52 | status: NotCompleted,
53 | result: nil,
54 | }
55 | taskStatus.cond = sync.NewCond(&taskStatus.condMutex)
56 |
57 | go func() {
58 | runTask(taskStatus, task)
59 | atomic.StoreUintptr(&taskStatus.completed, 1)
60 | taskStatus.cond.Broadcast()
61 | }()
62 |
63 | return taskStatus, nil
64 | }
65 |
--------------------------------------------------------------------------------
/task/task.go:
--------------------------------------------------------------------------------
1 | package task
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | )
7 |
8 | type NotCompletedStatus struct{}
9 |
10 | var NotCompleted = &NotCompletedStatus{}
11 |
12 | func (_ *NotCompletedStatus) Error() string { return "" }
13 |
14 | type PanickedStatus struct {
15 | panic interface{}
16 | }
17 |
18 | func typeName(type_ reflect.Type) string {
19 | if type_.Kind() == reflect.Ptr {
20 | return "*" + typeName(type_.Elem())
21 | } else {
22 | return type_.Name()
23 | }
24 | }
25 |
26 | func (panicked *PanickedStatus) Error() string {
27 | switch panic_ := panicked.panic.(type) {
28 | case string:
29 | return panic_
30 | case error:
31 | return fmt.Sprintf("(%s) %s", typeName(reflect.TypeOf(panic_)), panic_.Error())
32 | default:
33 | type_ := reflect.TypeOf(panic_)
34 | method, ok := type_.MethodByName("String")
35 | if ok && method.Type.NumIn() == 1 {
36 | result := method.Func.Call([]reflect.Value{reflect.ValueOf(panic_)})
37 | if len(result) == 1 && result[0].Type().Kind() == reflect.String {
38 | return fmt.Sprintf("(%s) %s", typeName(type_), result[0].String())
39 | }
40 | }
41 | return fmt.Sprintf("(%s)", typeName(type_))
42 | }
43 | }
44 |
45 | type TaskStatus interface {
46 | Status() error
47 | Result() interface{}
48 | Poll()
49 | }
50 |
51 | type TaskRunner interface {
52 | Run(func() (interface{}, error)) (TaskStatus, error)
53 | }
54 |
--------------------------------------------------------------------------------
/utils.go:
--------------------------------------------------------------------------------
1 | package ik
2 |
3 | import (
4 | "errors"
5 | "math/rand"
6 | "regexp"
7 | "strconv"
8 | "time"
9 | )
10 |
11 | var capacityRegExp = regexp.MustCompile("^([0-9]+)([kKmMgGtTpPeE])?(i?[bB])?")
12 |
13 | func ParseCapacityString(s string) (int64, error) {
14 | m := capacityRegExp.FindStringSubmatch(s)
15 | if m == nil {
16 | return -1, errors.New("Invalid format: " + s)
17 | }
18 | base := int64(1000)
19 | if len(m[3]) > 0 {
20 | if m[3][0] == 'i' {
21 | base = int64(1024)
22 | }
23 | }
24 | multiply := int64(1)
25 | if len(m[2]) > 0 {
26 | switch m[2][0] {
27 | case 'e', 'E':
28 | multiply *= base
29 | fallthrough
30 | case 'p', 'P':
31 | multiply *= base
32 | fallthrough
33 | case 't', 'T':
34 | multiply *= base
35 | fallthrough
36 | case 'g', 'G':
37 | multiply *= base
38 | fallthrough
39 | case 'm', 'M':
40 | multiply *= base
41 | fallthrough
42 | case 'k', 'K':
43 | multiply *= base
44 | }
45 | }
46 | i, err := strconv.ParseInt(m[1], 10, 64)
47 | if err != nil || multiply*i < i {
48 | return -1, errors.New("Invalid format (out of range): " + s)
49 | }
50 | return multiply * i, nil
51 | }
52 |
53 | func NewRandSourceWithTimestampSeed() rand.Source {
54 | return rand.NewSource(time.Now().UnixNano())
55 | }
56 |
--------------------------------------------------------------------------------