├── .gitattributes
├── ui
├── public
│ ├── favicon.ico
│ ├── manifest.json
│ └── index.html
├── src
│ ├── index.css
│ ├── App.test.js
│ ├── index.js
│ ├── fakescripts.js
│ ├── App.css
│ ├── fakerules.js
│ ├── logo.svg
│ ├── untitled.json
│ ├── registerServiceWorker.js
│ └── TablePaginated.js
├── .gitignore
└── package.json
├── assets
├── cortex_rules.png
├── cortex_scripts.png
└── cortex_playground.png
├── .gitignore
├── pkg
├── executions
│ ├── execution.go
│ ├── execution_gen_test.go
│ └── execution_gen.go
├── store
│ ├── bucket_storage.go
│ ├── messages.go
│ ├── command.go
│ ├── rule_storage.go
│ ├── execution_storage.go
│ ├── script_storage.go
│ ├── event_storage.go
│ ├── fsm_snapshot.go
│ ├── command_gen_test.go
│ ├── transport.go
│ ├── node.go
│ ├── raft.go
│ ├── fsm.go
│ ├── store.go
│ ├── command_gen.go
│ └── node_test.go
├── config
│ ├── config_test.go
│ └── config.go
├── js
│ ├── js.go
│ ├── js_test.go
│ ├── js_gen_test.go
│ └── js_gen.go
├── events
│ ├── sinks
│ │ ├── icinga_test.go
│ │ ├── icinga.go
│ │ ├── site247_test.go
│ │ ├── azure_test.go
│ │ ├── site247.go
│ │ └── azure.go
│ ├── event_test.go
│ ├── event_gen_test.go
│ ├── bucket_gen_test.go
│ ├── event.go
│ ├── bucket.go
│ ├── bucket_gen.go
│ └── event_gen.go
├── matcher
│ ├── matcher_test.go
│ └── match.go
├── util
│ └── util.go
├── rules
│ ├── rule.go
│ └── rule_gen_test.go
└── service
│ ├── service.go
│ └── handlers.go
├── release.sh
├── .goreleaser.yml
├── cmd
└── main.go
├── README.md
└── LICENSE
/.gitattributes:
--------------------------------------------------------------------------------
1 | * linguist-vendored
2 | *.go linguist-vendored=false
3 |
--------------------------------------------------------------------------------
/ui/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/myntra/cortex/HEAD/ui/public/favicon.ico
--------------------------------------------------------------------------------
/assets/cortex_rules.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/myntra/cortex/HEAD/assets/cortex_rules.png
--------------------------------------------------------------------------------
/assets/cortex_scripts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/myntra/cortex/HEAD/assets/cortex_scripts.png
--------------------------------------------------------------------------------
/ui/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | padding: 0;
4 | font-family: sans-serif;
5 | }
6 |
--------------------------------------------------------------------------------
/assets/cortex_playground.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/myntra/cortex/HEAD/assets/cortex_playground.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | cover.out
2 | .DS_Store
3 | data
4 | cmd/cortex
5 | cmd/cmd
6 | .idea/
7 | cmd/data
8 | cmd/build
9 | rice-box.go
10 | vendor/
11 | dist/
12 | cmd/Dockerfile
13 |
--------------------------------------------------------------------------------
/ui/src/App.test.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import App from './App';
4 |
5 | it('renders without crashing', () => {
6 | const div = document.createElement('div');
7 | ReactDOM.render(, div);
8 | ReactDOM.unmountComponentAtNode(div);
9 | });
10 |
--------------------------------------------------------------------------------
/ui/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import './index.css';
4 | import App from './App';
5 | import 'typeface-roboto';
6 | import registerServiceWorker from './registerServiceWorker';
7 |
8 | ReactDOM.render(, document.getElementById('root'));
9 | registerServiceWorker();
10 |
--------------------------------------------------------------------------------
/ui/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/ignore-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 |
6 | # testing
7 | /coverage
8 |
9 | # production
10 | /build
11 |
12 | # misc
13 | .DS_Store
14 | .env.local
15 | .env.development.local
16 | .env.test.local
17 | .env.production.local
18 |
19 | npm-debug.log*
20 | yarn-debug.log*
21 | yarn-error.log*
22 |
--------------------------------------------------------------------------------
/ui/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | }
10 | ],
11 | "start_url": "./index.html",
12 | "display": "standalone",
13 | "theme_color": "#000000",
14 | "background_color": "#ffffff"
15 | }
16 |
--------------------------------------------------------------------------------
/pkg/executions/execution.go:
--------------------------------------------------------------------------------
1 | package executions
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/myntra/cortex/pkg/events"
7 | )
8 |
9 | //go:generate msgp
10 |
11 | // Record stores a rules execution state and result
12 | type Record struct {
13 | ID string `json:"id"`
14 | Bucket events.Bucket `json:"bucket"`
15 | ScriptResult interface{} `json:"script_result"`
16 | HookStatusCode int `json:"hook_status_code"`
17 | CreatedAt time.Time `json:"created_at"`
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/store/bucket_storage.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "github.com/golang/glog"
5 | "github.com/myntra/cortex/pkg/events"
6 | "github.com/myntra/cortex/pkg/rules"
7 | )
8 |
9 | type bucketStorage struct {
10 | es *eventStorage
11 | rs *ruleStorage
12 | }
13 |
14 | func (b *bucketStorage) stash(ruleID string, event *events.Event) error {
15 | glog.Info("stash event ==> ", event)
16 | if b.es.bucketExists(ruleID) {
17 | return b.es.stash(rules.Rule{ID: ruleID}, event)
18 | }
19 |
20 | rule := b.rs.getRule(ruleID)
21 | return b.es.stash(*rule, event)
22 | }
23 |
--------------------------------------------------------------------------------
/ui/src/fakescripts.js:
--------------------------------------------------------------------------------
1 | const fakescripts = [
2 | {
3 | "id": "revenue.js",
4 | "Data": [99, 111, 110, 115, 111, 108, 101, 46, 108, 111, 103, 40, 39, 114, 101, 118, 101, 110, 117, 101, 95, 100, 111, 119, 110, 39, 41]
5 | },
6 | {
7 | "id": "cart.js",
8 | "Data": [99, 111, 110, 115, 111, 108, 101, 46, 108, 111, 103, 40, 39, 99, 97, 114, 116, 95, 100, 111, 119, 110, 39, 41]
9 | },
10 | {
11 | "id": "style.js",
12 | "Data": [99, 111, 110, 115, 111, 108, 101, 46, 108, 111, 103, 40, 39, 115, 116, 121, 108, 101, 95, 100, 111, 119, 110, 39, 41]
13 | }
14 | ]
15 |
16 | export default fakescripts;
--------------------------------------------------------------------------------
/release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [ "$1" = "-h" ]; then
5 | echo "use | $ ./release prod | for a production release or just | $ ./release | for a snapshot release"
6 | exit 0
7 | fi
8 |
9 | if brew ls --versions goreleaser > /dev/null; then
10 | # The package is installed
11 | echo "goreleaser already installed"
12 | else
13 | brew install goreleaser
14 | fi
15 |
16 | if [ "$1" = "prod" ]; then
17 | echo "prodction release"
18 | goreleaser --rm-dist
19 | else
20 | goreleaser --rm-dist --snapshot
21 | echo "this is a snapshot release. for production release run: ./release prod"
22 | echo "run: "
23 | echo "./dist/darwin_amd64/cortex -stderrthreshold=INFO"
24 | fi
--------------------------------------------------------------------------------
/ui/src/App.css:
--------------------------------------------------------------------------------
1 | .App {
2 | text-align: center;
3 | }
4 |
5 | .App-logo {
6 | animation: App-logo-spin infinite 20s linear;
7 | height: 80px;
8 | }
9 |
10 | .App-header {
11 | background-color: #222;
12 | height: 150px;
13 | padding: 20px;
14 | color: white;
15 | }
16 |
17 | .App-title {
18 | font-size: 1.5em;
19 | }
20 |
21 | .App-intro {
22 | font-size: large;
23 | }
24 |
25 | @keyframes App-logo-spin {
26 | from { transform: rotate(0deg); }
27 | to { transform: rotate(360deg); }
28 | }
29 |
30 | .App-root-1{
31 | font-size: 12px !important;
32 | }
33 | .MuiTab-label-103 {
34 | font-size: 12px !important;
35 | }
36 | .MuiTablePagination-toolbar-476 {
37 | font-size: 10px !important;
38 | }
39 |
40 |
--------------------------------------------------------------------------------
/pkg/store/messages.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "github.com/myntra/cortex/pkg/executions"
5 | "github.com/myntra/cortex/pkg/js"
6 | "github.com/myntra/cortex/pkg/rules"
7 | )
8 |
9 | // MessageType of the data entry
10 | type MessageType uint8
11 |
12 | const (
13 | // RuleType denotes the rules.Rule type
14 | RuleType MessageType = 0
15 | // ScriptType denotes the script type
16 | ScriptType = 1
17 | // RecordType denotes the executions.Record type
18 | RecordType = 2
19 | )
20 |
21 | // Messages store entries to the underlying storage
22 | type Messages struct {
23 | Rules map[string]*rules.Rule `json:"rules"`
24 | Records map[string]*executions.Record `json:"records"`
25 | Scripts map[string]*js.Script `json:"script"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/store/command.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "github.com/myntra/cortex/pkg/events"
5 | "github.com/myntra/cortex/pkg/executions"
6 | "github.com/myntra/cortex/pkg/js"
7 | "github.com/myntra/cortex/pkg/rules"
8 | )
9 |
10 | //go:generate msgp
11 |
12 | // Command is the container for a raft command
13 | type Command struct {
14 | Op string `json:"op"` // stash or evict
15 | Rule *rules.Rule `json:"rule,omitempty"`
16 | RuleID string `json:"ruleID,omitempty"`
17 | Event *events.Event `json:"event,omitempty"`
18 | ScriptID string `json:"script_id,omitempty"`
19 | Script *js.Script `json:"script,omitempty"`
20 | Record *executions.Record `json:"record,omitempty"`
21 | RecordID string `json:"record_id,omitempty"`
22 | }
23 |
--------------------------------------------------------------------------------
/ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ui",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@material-ui/core": "^1.4.1",
7 | "@material-ui/icons": "^2.0.0",
8 | "base-64": "^0.1.0",
9 | "brace": "^0.11.1",
10 | "react": "^16.4.1",
11 | "react-ace": "^6.1.4",
12 | "react-dom": "^16.4.1",
13 | "react-json-view": "^1.19.1",
14 | "react-jsonschema-form": "^1.0.3",
15 | "react-scripts": "1.1.4",
16 | "react-swipeable-views": "^0.12.15",
17 | "typeface-roboto": "^0.0.54",
18 | "uuid": "^3.3.2"
19 | },
20 | "scripts": {
21 | "start": "react-scripts start",
22 | "build": "react-scripts build && mv build ../cmd/",
23 | "test": "react-scripts test --env=jsdom",
24 | "eject": "react-scripts eject"
25 | },
26 | "proxy": "http://localhost:4445"
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/config/config_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "net"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestConfig(t *testing.T) {
11 |
12 | raftAddr := ":8878"
13 | httpAddr := ":8879"
14 |
15 | raftListener, err := net.Listen("tcp", raftAddr)
16 | require.NoError(t, err)
17 | httpListener, err := net.Listen("tcp", httpAddr)
18 | require.NoError(t, err)
19 |
20 | cfg := &Config{
21 | NodeID: "node0",
22 | Dir: "./data",
23 | JoinAddr: "",
24 | SnapshotInterval: 30,
25 | DefaultDwell: 3 * 60 * 1000, // 3 minutes
26 | DefaultMaxDwell: 6 * 60 * 1000, // 6 minutes
27 | DefaultDwellDeadline: 2.5 * 60 * 1000, // 2.5 minutes
28 | MaxHistory: 1000,
29 | FlushInterval: 1000,
30 | HTTPAddr: httpAddr,
31 | RaftAddr: raftAddr,
32 | HTTPListener: httpListener,
33 | RaftListener: raftListener,
34 | }
35 |
36 | err = cfg.Validate()
37 | require.NoError(t, err)
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/ui/src/fakerules.js:
--------------------------------------------------------------------------------
1 | const fakerules = [
2 | {
3 | id:'1',
4 | title: "Revenue Down rule",
5 | scriptID: "revenue.js",
6 | hook_endpoint: "http://localhost:4000",
7 | hook_retry: "3",
8 | event_type_patterns: "com.acme.order.node1.check_disk,com.acme.checkout.node1.check_cpu",
9 | dwell: "120",
10 | dwell_deadline: "100",
11 | max_dwell: "240",
12 | },
13 | {
14 | id:'2',
15 | title: "Cart Down rule",
16 | scriptID: "card_down.js",
17 | hook_endpoint: "http://localhost:4000",
18 | hook_retry: "3",
19 | event_type_patterns: "com.acme.cart.node1.check_disk",
20 | dwell: "120",
21 | dwell_deadline: "100",
22 | max_dwell: "240",
23 | },
24 | {
25 | id:'3',
26 | title: "Style Down rule",
27 | scriptID: "style_down.js",
28 | hook_endpoint: "http://localhost:4000",
29 | hook_retry: "3",
30 | event_type_patterns: "com.acme.style.node1.check_node",
31 | dwell: "120",
32 | dwell_deadline: "100",
33 | max_dwell: "240",
34 | }
35 | ];
36 |
37 | export default fakerules;
--------------------------------------------------------------------------------
/pkg/js/js.go:
--------------------------------------------------------------------------------
1 | package js
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/golang/glog"
7 | "github.com/loadimpact/k6/js"
8 | "github.com/loadimpact/k6/lib"
9 | "github.com/loadimpact/k6/stats"
10 | "github.com/spf13/afero"
11 | )
12 |
13 | //go:generate msgp
14 |
15 | // Script contains the javascript code
16 | type Script struct {
17 | ID string `json:"id"`
18 | Data []byte `json:"data"`
19 | }
20 |
21 | // Execute js
22 | func Execute(script *Script, data interface{}) interface{} {
23 | if script == nil || len(script.ID) == 0 {
24 | return nil
25 | }
26 |
27 | r, err := js.New(&lib.SourceData{
28 | Filename: script.ID,
29 | Data: script.Data,
30 | }, afero.NewMemMapFs(), lib.RuntimeOptions{})
31 |
32 | if err != nil {
33 | return err
34 | }
35 | glog.Infof("%v", data)
36 | r.SetSetupData(data)
37 |
38 | vu, err := r.NewVU(make(chan stats.SampleContainer, 100))
39 | if err != nil {
40 | return err
41 | }
42 |
43 | vuc, ok := vu.(*js.VU)
44 |
45 | if !ok {
46 | return err
47 | }
48 |
49 | err = vu.RunOnce(context.Background())
50 | if err != nil {
51 | return err
52 | }
53 |
54 | result := vuc.Runtime.Get("result")
55 |
56 | if result == nil {
57 | return nil
58 | }
59 |
60 | return result.Export()
61 | }
62 |
--------------------------------------------------------------------------------
/pkg/events/sinks/icinga_test.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "testing"
7 |
8 | "github.com/fatih/structs"
9 | )
10 |
11 | var icingaAlert = IcingaAlert{
12 | HostAlias: "hostname-alias",
13 | HostAddress: "1.2.3.4",
14 | ServiceState: "CRITICAL",
15 | ServiceOutput: "connect to address 1.2.3.4 and port 5000: Connection refused",
16 | NotificationAuthorName: "",
17 | ServiceDisplayName: "servicename-26378",
18 | ServiceDescription: "servicename-26378",
19 | LongDateTime: "2032-07-30 19:16:14 +0530",
20 | NotificationComment: "",
21 | HostDisplayName: "hostname",
22 | NotificationType: "PROBLEM",
23 | }
24 |
25 | func TestEventFromIcinga(t *testing.T) {
26 | event := EventFromIcinga(icingaAlert)
27 | if event.EventType != fmt.Sprintf("%s.%s.%s", icingaAlert.ServiceDisplayName, icingaAlert.HostDisplayName, icingaAlert.ServiceOutput) {
28 | t.Errorf("Event type not matching. expected : %s, got: %s", fmt.Sprintf("%s.%s.%s", icingaAlert.ServiceDisplayName, icingaAlert.HostDisplayName, icingaAlert.ServiceOutput), event.EventType)
29 | }
30 | if !reflect.DeepEqual(event.Data, structs.New(icingaAlert).Map()) {
31 | t.Errorf("Event data not matching. expected : %v, got: %v", icingaAlert, event.Data)
32 | }
33 | t.Log("TestEventFromIcinga completed")
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/matcher/matcher_test.go:
--------------------------------------------------------------------------------
1 | package matcher
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | var matcherTests = []struct {
11 | pattern string // rule pattern
12 | eventType string // event.EventType
13 | expected bool // expected result
14 | }{
15 | {"acme*", "acme", false},
16 | {"acme*", "acme.prod", true},
17 | {"acme.prod*", "acme.prod.search", true},
18 | {"acme.prod*.checkout", "acme.prod.search", false},
19 | {"acme.prod*.*", "acme.prod.search", false},
20 | {"acme.prod*.*", "acme.prod-1.search", true},
21 | {"acme.prod.*.*.*", "acme.prod.search.node1.check_disk", true},
22 | {"acme.prod.*.*.check_disk", "acme.prod.search.node1.check_disk", true},
23 | {"acme.prod.*.*.check_loadavg", "acme.prod.search.node1.check_disk", false},
24 | {"*.prod.*.*.check_loadavg", "acme.prod.search.node1.check_loadavg", true},
25 | {"acme.prod.*", "acme.prod.search.node1.check_disk", true},
26 | {"acme.prod.search.node*.check_disk", "acme.prod.search.node1.check_disk", true},
27 | {"acme.prod.search.node*.*", "acme.prod.search.node1.check_disk", true},
28 | {"acme.prod.search.dc1-node*.*", "acme.prod.search.node1.check_disk", false},
29 | }
30 |
31 | func TestMatchers(t *testing.T) {
32 | for _, tc := range matcherTests {
33 | t.Run(fmt.Sprintf("Test if(%v==%v)", tc.eventType, tc.pattern), func(t *testing.T) {
34 | m, err := New(tc.pattern)
35 | require.NoError(t, err)
36 | hasMatch := m.HasMatches(tc.eventType)
37 | require.Equal(t, tc.expected, hasMatch)
38 | })
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/events/sinks/icinga.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/fatih/structs"
8 | "github.com/myntra/cortex/pkg/events"
9 | )
10 |
11 | // IcingaAlert structure for Icinga alert
12 | type IcingaAlert struct {
13 | NotificationType string `json:"notification_type"`
14 | ServiceDescription string `json:"service_description"`
15 | HostAlias string `json:"host_alias"`
16 | HostAddress string `json:"host_address"`
17 | ServiceState string `json:"service_state"`
18 | LongDateTime string `json:"long_date_time"`
19 | ServiceOutput string `json:"service_output"`
20 | NotificationAuthorName string `json:"notification_author_name"`
21 | NotificationComment string `json:"notification_comment"`
22 | HostDisplayName string `json:"host_display_name"`
23 | ServiceDisplayName string `json:"service_display_name"`
24 | }
25 |
26 | // EventFromIcinga converts alerts sent from icinga into cloud events
27 | func EventFromIcinga(alert IcingaAlert) *events.Event {
28 | event := events.Event{
29 | Source: "icinga",
30 | Data: structs.New(alert).Map(),
31 | ContentType: "application/json",
32 | EventTypeVersion: "1.0",
33 | CloudEventsVersion: "0.1",
34 | SchemaURL: "",
35 | EventID: generateUUID().String(),
36 | EventTime: time.Now(),
37 | EventType: fmt.Sprintf("%s.%s.%s", alert.ServiceDisplayName, alert.HostDisplayName, alert.ServiceOutput),
38 | }
39 | return &event
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/events/sinks/site247_test.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "testing"
7 |
8 | "github.com/fatih/structs"
9 | )
10 |
11 | var site247Alert = Site247Alert{
12 | MonitorName: "brand_test",
13 | MonitorGroupName: "search",
14 | SearchPollFrequency: 1,
15 | MonitorID: 2136797812307,
16 | FailedLocations: "Delhi,Bangalore",
17 | MonitorURL: "https://localhost:4000/search?query=brand_test",
18 | IncidentTimeISO: "2018-07-24T18:43:08+0530",
19 | MonitorType: "URL",
20 | Status: "DOWN",
21 | Timezone: "Asia/Calcutta",
22 | IncidentTime: "July 24, 2018 6:43 PM IST",
23 | IncidentReason: "Host Unavailable",
24 | OutageTimeUnixFormat: "1532437988741",
25 | RCALink: "https://www.rcalinkdummy.com/somelink",
26 | Tags: []map[string]interface{}{{"tag": "value"}},
27 | }
28 |
29 | func TestEventFromSite247(t *testing.T) {
30 | event := EventFromSite247(site247Alert)
31 | if event.EventType != fmt.Sprintf("site247.%s.%s.%s", site247Alert.MonitorGroupName, site247Alert.MonitorName, site247Alert.Status) {
32 | t.Errorf("Event type not matching. expected : %s, got: %s", fmt.Sprintf("site247.%s.%s.%s", site247Alert.MonitorGroupName, site247Alert.MonitorName, site247Alert.Status), event.EventType)
33 | }
34 | if !reflect.DeepEqual(event.Data, structs.New(site247Alert).Map()) {
35 | t.Errorf("Event data not matching. expected : %v, got: %v", site247Alert, event.Data)
36 | }
37 | t.Log("TestEventFromSite247 completed")
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/js/js_test.go:
--------------------------------------------------------------------------------
1 | package js
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 |
8 | "github.com/dop251/goja"
9 | )
10 |
11 | func TestSimple(t *testing.T) {
12 | script := []byte(`
13 | let result = 0;
14 | export default function() { result++; }`)
15 |
16 | result := Execute(&Script{ID: "myscript.js", Data: script}, 0)
17 | require.NotNil(t, result)
18 | require.Equal(t, int64(1), result.(int64))
19 |
20 | }
21 |
22 | func TestSimpleBad(t *testing.T) {
23 | script := []byte(`
24 | let result = 0;
25 | export default function() { result++; `)
26 |
27 | result := Execute(&Script{ID: "myscript.js", Data: script}, 0)
28 | require.NotNil(t, result)
29 | _, ok := result.(*goja.Exception)
30 | require.Equal(t, true, ok)
31 | }
32 |
33 | func TestData(t *testing.T) {
34 | script := []byte(`
35 | let result = 0;
36 | export default function(data) { result = result + data.key;}`)
37 |
38 | result := Execute(&Script{ID: "myscript.js", Data: script}, map[string]interface{}{"key": 5})
39 | require.NotNil(t, result)
40 | require.Equal(t, int64(5), result.(int64))
41 | }
42 |
43 | func TestException(t *testing.T) {
44 | script := []byte(`
45 | import http from "k6/http";
46 | import moment from "cdnjs.com/libraries/moment.js/2.18.1";
47 |
48 | export default function() {
49 | http.get("http://test.loadimpact.com/");
50 | console.log(moment().format());
51 | throw "execption"
52 | }`)
53 |
54 | result := Execute(&Script{ID: "myscript.js", Data: script}, nil)
55 |
56 | _, ok := result.(*goja.Exception)
57 | require.Equal(t, true, ok)
58 | err, ok2 := result.(error)
59 | require.Equal(t, true, ok2)
60 | require.Error(t, err)
61 | }
62 |
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | project_name: cortex
2 | before:
3 | hooks:
4 | - go get github.com/GeertJohan/go.rice/rice
5 | - go get ./...
6 | - rm -rf $GOPATH/src/github.com/loadimpact/k6/vendor/github.com/dop251/goja
7 | - rm -rf $GOPATH/src/github.com/loadimpact/k6/vendor/github.com/spf13/afero
8 | - bash build_ui.sh
9 | - go test ./...
10 | builds:
11 | -
12 | main: ./cmd/
13 | env:
14 | - CGO_ENABLED=0
15 | binary: cortex
16 | goos:
17 | - linux
18 | - darwin
19 | goarch:
20 | - amd64
21 | archive:
22 | format: tar.gz
23 | name_template: "{{ .Version }}_{{ .Os }}_{{ .Arch }}"
24 | replacements:
25 | linux: Linux
26 | amd64: x86_64
27 | checksum:
28 | name_template: 'checksums.txt'
29 | snapshot:
30 | name_template: "{{ .Tag }}-next"
31 | changelog:
32 | sort: asc
33 | filters:
34 | exclude:
35 | - '^docs:'
36 | - '^test:'
37 | git:
38 | short_hash: true
39 | release:
40 | # You can disable this pipe in order to not upload any artifacts to
41 | # GitHub.
42 | disable: true
43 | puts:
44 | -
45 | # Unique name of your Put instance. Used to identify the instance.
46 | name: up
47 | # Upload mode. Valid options are `binary` and `archive`.
48 | # If mode is `archive`, variables _Os_, _Arch_ and _Arm_ for target name are not supported.
49 | # In that case these variables are empty.
50 | # Default is `archive`.
51 | mode: archive
52 | # URL to be used as target of the HTTP PUT request
53 | target: http://acme.com/upload
54 | # User that will be used for the deployment
55 | username: goreleaser
56 | # Upload checksums (defaults to false)
57 | # checksum: true
58 | # Upload signatures (defaults to false)
59 | # signature: true
--------------------------------------------------------------------------------
/pkg/store/rule_storage.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 |
7 | "github.com/myntra/cortex/pkg/rules"
8 | )
9 |
10 | type ruleStorage struct {
11 | mu sync.RWMutex
12 | m map[string]*rules.Rule // [ruleID]
13 | }
14 |
15 | func (r *ruleStorage) getRule(ruleID string) *rules.Rule {
16 | r.mu.Lock()
17 | defer r.mu.Unlock()
18 | var rule *rules.Rule
19 | var ok bool
20 | if rule, ok = r.m[ruleID]; !ok {
21 | return nil
22 | }
23 | return rule
24 | }
25 |
26 | func (r *ruleStorage) addRule(rule *rules.Rule) error {
27 | r.mu.Lock()
28 | defer r.mu.Unlock()
29 |
30 | if _, ok := r.m[rule.ID]; ok {
31 | return fmt.Errorf("rule id already exists")
32 | }
33 |
34 | r.m[rule.ID] = rule
35 | return nil
36 | }
37 |
38 | func (r *ruleStorage) updateRule(rule *rules.Rule) error {
39 | r.mu.Lock()
40 | defer r.mu.Unlock()
41 |
42 | if _, ok := r.m[rule.ID]; !ok {
43 | return fmt.Errorf("rule id does not exist")
44 | }
45 |
46 | r.m[rule.ID] = rule
47 |
48 | return nil
49 | }
50 |
51 | func (r *ruleStorage) removeRule(ruleID string) error {
52 | r.mu.Lock()
53 | defer r.mu.Unlock()
54 |
55 | if _, ok := r.m[ruleID]; !ok {
56 | return fmt.Errorf("rule id does not exist")
57 | }
58 |
59 | delete(r.m, ruleID)
60 |
61 | return nil
62 | }
63 |
64 | func (r *ruleStorage) getRules() []*rules.Rule {
65 | r.mu.Lock()
66 | defer r.mu.Unlock()
67 | var rules []*rules.Rule
68 | for _, rule := range r.m {
69 | rules = append(rules, rule)
70 | }
71 | return rules
72 | }
73 |
74 | func (r *ruleStorage) clone() map[string]*rules.Rule {
75 | r.mu.Lock()
76 | defer r.mu.Unlock()
77 | clone := make(map[string]*rules.Rule)
78 | for k, v := range r.m {
79 | clone[k] = v
80 | }
81 | return clone
82 | }
83 |
84 | func (r *ruleStorage) restore(m map[string]*rules.Rule) {
85 | r.m = m
86 | }
87 |
--------------------------------------------------------------------------------
/pkg/matcher/match.go:
--------------------------------------------------------------------------------
1 | package matcher
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "strings"
7 | )
8 |
9 | var metricLineRE = regexp.MustCompile(`^(\*\.|[^.]+\.|\.)*(\*|[^.]+)$`)
10 |
11 | // Matcher matches a rule.EventTypePatterns patterns with eventTypePatterns
12 | type Matcher struct {
13 | regex *regexp.Regexp
14 | }
15 |
16 | // New accepts a rulePattern
17 | func New(rulePattern string) (*Matcher, error) {
18 | regex, err := getRegexp(rulePattern)
19 | if err != nil {
20 | return nil, err
21 | }
22 |
23 | m := &Matcher{
24 | regex: regex,
25 | }
26 |
27 | return m, nil
28 | }
29 |
30 | // NewCompile accepts a regex string
31 | func NewCompile(regexStr string) *Matcher {
32 | return &Matcher{
33 | regex: regexp.MustCompile(regexStr),
34 | }
35 | }
36 |
37 | // GetRegexString returns the compiled regex string
38 | func (m *Matcher) GetRegexString() string {
39 | return m.regex.String()
40 | }
41 |
42 | // HasMatches checks if eventType has matches with the supplied regex
43 | func (m *Matcher) HasMatches(eventType string) bool {
44 | matches := m.regex.FindStringSubmatchIndex(eventType)
45 | if len(matches) > 0 {
46 | return true
47 | }
48 | return false
49 | }
50 |
51 | // getRegexp returns a *regexp.Regexp for the pattern
52 | // reference: https://github.com/prometheus/graphite_exporter/blob/master/mapper.go#L65
53 | func getRegexp(rulePattern string) (*regexp.Regexp, error) {
54 | var regex *regexp.Regexp
55 |
56 | if !metricLineRE.MatchString(rulePattern) {
57 | return nil, fmt.Errorf("unexpected pattern %v. must match %v", rulePattern, metricLineRE.String())
58 | }
59 |
60 | rulePatternRe := strings.Replace(rulePattern, ".", "\\.", -1)
61 | rulePatternRe = strings.Replace(rulePatternRe, "*", "([^*]+)", -1)
62 | regex = regexp.MustCompile("^" + rulePatternRe + "$")
63 | return regex, nil
64 | }
65 |
--------------------------------------------------------------------------------
/pkg/store/execution_storage.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/golang/glog"
7 |
8 | "github.com/myntra/cortex/pkg/executions"
9 | )
10 |
11 | type executionStorage struct {
12 | mu sync.RWMutex
13 | m map[string]*executions.Record
14 | }
15 |
16 | func (e *executionStorage) add(r *executions.Record) error {
17 | e.mu.Lock()
18 | defer e.mu.Unlock()
19 |
20 | e.m[r.ID] = r
21 | return nil
22 | }
23 |
24 | func (e *executionStorage) remove(id string) error {
25 | e.mu.Lock()
26 | defer e.mu.Unlock()
27 |
28 | delete(e.m, id)
29 | return nil
30 | }
31 |
32 | func (e *executionStorage) getRecords(ruleID string) []*executions.Record {
33 | e.mu.Lock()
34 | defer e.mu.Unlock()
35 |
36 | glog.Infof("getRecords %v", ruleID)
37 |
38 | var exs []*executions.Record
39 | for _, record := range e.m {
40 | if record.Bucket.Rule.ID == ruleID {
41 | exs = append(exs, record)
42 | }
43 | }
44 | return exs
45 | }
46 |
47 | func (e *executionStorage) getRecordsCount(ruleID string) int {
48 | e.mu.Lock()
49 | defer e.mu.Unlock()
50 | count := 0
51 | for _, record := range e.m {
52 | if record.Bucket.Rule.ID == ruleID {
53 | count++
54 | }
55 | }
56 |
57 | return count
58 | }
59 |
60 | func (e *executionStorage) getTotalRecordsCount() int {
61 | e.mu.Lock()
62 | defer e.mu.Unlock()
63 | return len(e.m)
64 | }
65 |
66 | func (e *executionStorage) flush(id string) {
67 | e.mu.Lock()
68 | defer e.mu.Unlock()
69 |
70 | delete(e.m, id)
71 | }
72 |
73 | func (e *executionStorage) clone() map[string]*executions.Record {
74 | e.mu.Lock()
75 | defer e.mu.Unlock()
76 | clone := make(map[string]*executions.Record)
77 | for k, v := range e.m {
78 | clone[k] = v
79 | }
80 | return clone
81 | }
82 |
83 | func (e *executionStorage) restore(m map[string]*executions.Record) {
84 | e.m = m
85 | }
86 |
--------------------------------------------------------------------------------
/ui/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
11 |
12 |
13 |
14 |
23 | Cortex
24 |
25 |
26 |
29 |
30 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/pkg/store/script_storage.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 |
7 | "github.com/myntra/cortex/pkg/js"
8 | )
9 |
10 | type scriptStorage struct {
11 | mu sync.RWMutex
12 | m map[string]*js.Script
13 | }
14 |
15 | func (s *scriptStorage) addScript(script *js.Script) error {
16 |
17 | s.mu.Lock()
18 | defer s.mu.Unlock()
19 |
20 | if _, ok := s.m[script.ID]; ok {
21 | return fmt.Errorf("script name already exists. script name must be unique")
22 | }
23 |
24 | s.m[script.ID] = script
25 |
26 | return nil
27 | }
28 |
29 | func (s *scriptStorage) updateScript(script *js.Script) error {
30 |
31 | s.mu.Lock()
32 | defer s.mu.Unlock()
33 |
34 | if _, ok := s.m[script.ID]; !ok {
35 | return fmt.Errorf("script name not found. can't update")
36 | }
37 |
38 | s.m[script.ID] = script
39 | return nil
40 | }
41 |
42 | func (s *scriptStorage) removeScript(id string) error {
43 |
44 | s.mu.Lock()
45 | defer s.mu.Unlock()
46 |
47 | if _, ok := s.m[id]; !ok {
48 | return fmt.Errorf("script name not found. can't remove")
49 | }
50 |
51 | delete(s.m, id)
52 |
53 | return nil
54 | }
55 |
56 | func (s *scriptStorage) getScript(id string) *js.Script {
57 |
58 | s.mu.Lock()
59 | defer s.mu.Unlock()
60 |
61 | if _, ok := s.m[id]; !ok {
62 | return nil
63 | }
64 |
65 | return s.m[id]
66 | }
67 |
68 | func (s *scriptStorage) getScripts() []string {
69 |
70 | s.mu.Lock()
71 | defer s.mu.Unlock()
72 |
73 | var ids []string
74 |
75 | for k := range s.m {
76 | ids = append(ids, k)
77 | }
78 |
79 | return ids
80 | }
81 |
82 | func (s *scriptStorage) clone() map[string]*js.Script {
83 | s.mu.Lock()
84 | defer s.mu.Unlock()
85 | scripts := make(map[string]*js.Script)
86 | for k, v := range s.m {
87 | scripts[k] = v
88 | }
89 | return scripts
90 | }
91 |
92 | func (s *scriptStorage) restore(m map[string]*js.Script) {
93 | s.m = m
94 | }
95 |
--------------------------------------------------------------------------------
/pkg/events/event_test.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 | "time"
7 |
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | type exampleData struct {
12 | Alpha string `json:"alpha"`
13 | Beta int `json:"beta"`
14 | }
15 |
16 | func TestEventHashMatch(t *testing.T) {
17 | existingEvent := &Event{
18 |
19 | EventType: "com.event.fortytwo",
20 | EventTypeVersion: "1.0",
21 | CloudEventsVersion: "0.1",
22 | Source: "/sink",
23 | EventID: "42",
24 | EventTime: time.Now(),
25 | SchemaURL: "http://www.json.org",
26 | ContentType: "application/json",
27 | Data: &exampleData{Alpha: "julie", Beta: 42},
28 | Extensions: map[string]string{"ext1": "value"},
29 | }
30 |
31 | incomingEventDuplicate := &Event{
32 |
33 | EventType: "com.event.fortytwo",
34 | EventTypeVersion: "1.0",
35 | CloudEventsVersion: "0.1",
36 | Source: "/sink",
37 | EventID: "43",
38 | EventTime: time.Now(),
39 | SchemaURL: "http://www.json.org",
40 | ContentType: "application/json",
41 | Data: &exampleData{Alpha: "julie", Beta: 42},
42 | Extensions: map[string]string{"ext1": "value"},
43 | }
44 |
45 | incomingEventUnique := &Event{
46 | EventType: "com.event.fortytwo",
47 | EventTypeVersion: "1.0",
48 | CloudEventsVersion: "0.1",
49 | Source: "/sink",
50 | EventID: "43",
51 | EventTime: time.Now(),
52 | SchemaURL: "http://www.json.org",
53 | ContentType: "application/json",
54 | Data: &exampleData{Alpha: "bobby", Beta: 100},
55 | Extensions: map[string]string{"ext1": "value"},
56 | }
57 |
58 | hash1 := existingEvent.Hash()
59 | hash2 := incomingEventDuplicate.Hash()
60 | hash3 := incomingEventUnique.Hash()
61 |
62 | require.True(t, bytes.Equal(hash1, hash2))
63 | require.False(t, bytes.Equal(hash2, hash3))
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/events/sinks/azure_test.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "testing"
7 | "github.com/fatih/structs"
8 | )
9 |
10 | var azureAlert = AzureAlert{
11 | SchemaID: "Microsoft.Insights/activityLogs",
12 | Data: AzureData{
13 | Status: "Activated",
14 | Context: AzureContext{
15 | Activity: AzureActivity{
16 | Channels: "Admin, Operation",
17 | CorrelationID: "a1be61fd-37ur-ba05-b827-cb874708babf",
18 | EventSource: "ResourceHealth",
19 | EventTimestamp: "2018-09-04T23:09:03.343+00:00",
20 | Level: "Informational",
21 | OperationName: "Microsoft.Resourcehealth/healthevent/Activated/action",
22 | OperationID: "2b37e2d0-7bda-489f-81c6-1447d02265b2",
23 | Properties: AzureActivityProperty{
24 | Title: "Virtual Machine health status changed to unavailable",
25 | Details: "Virtual machine has experienced an unexpected event",
26 | CurrentHealthStatus: "Unavailable",
27 | PreviousHealthStatus: "Available",
28 | Type: "Downtime",
29 | Cause: "PlatformInitiated",
30 | },
31 | ResourceID: "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/",
32 | ResourceGroupName: "",
33 | ResourceProviderName: "Microsoft.Resourcehealth/healthevent/action",
34 | Status: "Active",
35 | SubscriptionID: "",
36 | SubmissionTimestamp: "2018-09-04T23:11:06.1607287+00:00",
37 | ResourceType: "Microsoft.Compute/virtualMachines",
38 | },
39 | },
40 | },
41 | }
42 |
43 | func TestEventFromAzure(t *testing.T) {
44 | event := EventFromAzure(azureAlert)
45 | if event.EventType != fmt.Sprintf("azure.%s", azureAlert.Data.Context.Activity.ResourceID) {
46 | t.Errorf("Event type not matching. expected : %s, got: %s", fmt.Sprintf("azure.%s", azureAlert.Data.Context.Activity.ResourceID), event.EventType)
47 | }
48 | if !reflect.DeepEqual(event.Data, structs.New(azureAlert).Map()) {
49 | t.Errorf("Event data not matching. expected : %v, got: %v", azureAlert, event.Data)
50 | }
51 | t.Log("TestEventFromAzure completed")
52 | }
53 |
--------------------------------------------------------------------------------
/pkg/events/sinks/site247.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/fatih/structs"
8 | "github.com/myntra/cortex/pkg/events"
9 | "github.com/satori/go.uuid"
10 | )
11 |
12 | // Site247Alert structure for site24x7 alert
13 | type Site247Alert struct {
14 | MonitorName string `json:"MONITORNAME,omitempty"`
15 | MonitorGroupName string `json:"MONITOR_GROUPNAME,omitempty"`
16 | SearchPollFrequency int `json:"SEARCH POLLFREQUENCY,omitempty"`
17 | MonitorID int `json:"MONITOR_ID,omitempty"`
18 | FailedLocations string `json:"FAILED_LOCATIONS,omitempty"`
19 | MonitorURL string `json:"MONITORURL,omitempty"`
20 | IncidentTimeISO string `json:"INCIDENT_TIME_ISO,omitempty"`
21 | MonitorType string `json:"MONITORTYPE,omitempty"`
22 | Status string `json:"STATUS,omitempty"`
23 | Timezone string `json:"TIMEZONE,omitempty"`
24 | IncidentTime string `json:"INCIDENT_TIME,omitempty"`
25 | IncidentReason string `json:"INCIDENT_REASON,omitempty"`
26 | OutageTimeUnixFormat string `json:"OUTAGE_TIME_UNIX_FORMAT,omitempty"`
27 | RCALink string `json:"RCA_LINK,omitempty"`
28 | Tags []map[string]interface{} `json:"JSON_TAGS,omitempty"`
29 | }
30 |
31 | // EventFromSite247 converts alerts sent from site24x7 into cloud events
32 | func EventFromSite247(alert Site247Alert) *events.Event {
33 | event := events.Event{
34 | Source: "site247",
35 | Data: structs.New(alert).Map(),
36 | ContentType: "application/json",
37 | EventTypeVersion: "1.0",
38 | CloudEventsVersion: "0.1",
39 | SchemaURL: "",
40 | EventID: generateUUID().String(),
41 | EventTime: time.Now(),
42 | EventType: fmt.Sprintf("site247.%s.%s.%s", alert.MonitorGroupName, alert.MonitorName, alert.Status),
43 | }
44 | return &event
45 | }
46 |
47 | func generateUUID() uuid.UUID {
48 | uid := uuid.NewV4()
49 | return uid
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/util/util.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io/ioutil"
8 | "net"
9 | "net/http"
10 | "time"
11 |
12 | "github.com/golang/glog"
13 | "github.com/pkg/errors"
14 | "github.com/sethgrid/pester"
15 | )
16 |
17 | // JoinRequest is the request to join a node
18 | type JoinRequest struct {
19 | NodeID string `json:"nodeID"`
20 | Addr string `json:"addr"`
21 | }
22 |
23 | // Validate validates the request
24 | func (j *JoinRequest) Validate() error {
25 |
26 | if j.NodeID == "" {
27 | return fmt.Errorf("nodeID is empty")
28 | }
29 |
30 | _, err := net.DialTimeout("tcp", j.Addr, time.Second*3)
31 | if err != nil {
32 | return fmt.Errorf("invalid addr %v", err)
33 | }
34 |
35 | return nil
36 | }
37 |
38 | // ErrStatus sends a http error status
39 | func ErrStatus(w http.ResponseWriter, r *http.Request, message string, statusCode int, err error) {
40 | var content []byte
41 | var e error
42 |
43 | content, e = ioutil.ReadAll(r.Body)
44 | if e != nil {
45 | glog.Error("ioutil.ReadAll failed")
46 | }
47 |
48 | glog.Errorf("msg %v, r.Body %v, err: %v", message, string(content), errors.Wrap(err, ""))
49 |
50 | http.Error(w, message, statusCode)
51 | }
52 |
53 | // RetryPost posts the value to a remote endpoint. also retries
54 | func RetryPost(val interface{}, url string, retry int) int {
55 |
56 | b := new(bytes.Buffer)
57 | err := json.NewEncoder(b).Encode(val)
58 | if err != nil {
59 | glog.Errorf("http post bucket encoding failed. %v %v", err, url)
60 | return http.StatusInternalServerError
61 | }
62 | req, err := http.NewRequest("POST", url, b)
63 | if err != nil {
64 | glog.Errorf("http post rule bucket newrequest failed. %v %v", err, url)
65 | return http.StatusInternalServerError
66 | }
67 | req.Header.Add("Content-type", "application/json")
68 |
69 | client := pester.New()
70 | client.MaxRetries = retry
71 | resp, err := client.Do(req)
72 | if err != nil {
73 | glog.Errorf("http post rule bucket client.Do failed %v %v", err, url)
74 | return http.StatusInternalServerError
75 | }
76 |
77 | defer resp.Body.Close()
78 |
79 | if resp.StatusCode != 200 && resp.StatusCode != 202 {
80 | glog.Errorf("http post rule bucket unexpected status code %v %v", err, resp.StatusCode)
81 | }
82 |
83 | return resp.StatusCode
84 | }
85 |
--------------------------------------------------------------------------------
/pkg/events/sinks/azure.go:
--------------------------------------------------------------------------------
1 | package sinks
2 |
3 | import (
4 | "github.com/myntra/cortex/pkg/events"
5 | "time"
6 | "fmt"
7 | "github.com/fatih/structs"
8 | )
9 |
10 | type AzureAlert struct {
11 | SchemaID string `json:"schemaId"`
12 | Data AzureData `json:"data"`
13 | }
14 |
15 | type AzureData struct {
16 | Status string `json:"activated"`
17 | Context AzureContext `json:"context"`
18 | }
19 |
20 | type AzureContext struct {
21 | Activity AzureActivity `json:"activityLog"`
22 | }
23 |
24 | type AzureActivity struct {
25 | Channels string `json:"channels"`
26 | CorrelationID string `json:"correlationId"`
27 | EventSource string `json:"eventSource"`
28 | EventTimestamp string `json:"eventTimestamp"`
29 | EventDataID string `json:"eventDataId"`
30 | Level string `json:"level"`
31 | OperationName string `json:"operationName"`
32 | OperationID string `json:"operationId"`
33 | Properties AzureActivityProperty `json:"properties"`
34 | ResourceID string `json:"resourceId"`
35 | ResourceGroupName string `json:"resourceGroupName"`
36 | ResourceProviderName string `json:"resourceProviderName"`
37 | Status string `json:"status"`
38 | SubscriptionID string `json:"subscriptionId"`
39 | SubmissionTimestamp string `json:"submissionTimestamp"`
40 | ResourceType string `json:"resourceType"`
41 | }
42 |
43 | type AzureActivityProperty struct {
44 | Title string `json:"title"`
45 | Details string `json:"details"`
46 | CurrentHealthStatus string `json:"currentHealthStatus"`
47 | PreviousHealthStatus string `json:"previousHealthStatus"`
48 | Type string `json:"type"`
49 | Cause string `json:"cause"`
50 | }
51 |
52 | // EventFromAzure converts alerts sent from azure into cloud events
53 | func EventFromAzure(alert AzureAlert) *events.Event {
54 | event := events.Event{
55 | Source: "azure",
56 | Data: structs.New(alert).Map(),
57 | ContentType: "application/json",
58 | EventTypeVersion: "1.0",
59 | CloudEventsVersion: "0.1",
60 | SchemaURL: "",
61 | EventID: generateUUID().String(),
62 | EventTime: time.Now(),
63 | EventType: fmt.Sprintf("azure.%s", alert.Data.Context.Activity.ResourceID),
64 | }
65 | return &event
66 | }
67 |
--------------------------------------------------------------------------------
/ui/src/logo.svg:
--------------------------------------------------------------------------------
1 |
8 |
--------------------------------------------------------------------------------
/pkg/store/event_storage.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "sync"
7 |
8 | "github.com/golang/glog"
9 | "github.com/myntra/cortex/pkg/events"
10 | "github.com/myntra/cortex/pkg/rules"
11 | )
12 |
13 | type eventStorage struct {
14 | mu sync.RWMutex
15 | m map[string]*events.Bucket // [ruleID]
16 | }
17 |
18 | func (e *eventStorage) stash(rule rules.Rule, event *events.Event) error {
19 | e.mu.Lock()
20 | defer e.mu.Unlock()
21 | glog.Infof("stash event ==> %+v", event)
22 | ruleID := rule.ID
23 | if _, ok := e.m[ruleID]; !ok {
24 | bucket := events.NewBucket(rule)
25 | bucket.Events = append(bucket.Events, event)
26 | e.m[ruleID] = bucket
27 | return nil
28 | }
29 |
30 | // dedup, reschedule flusher(sliding wait window), frequency count
31 | dup := false
32 | for _, existingEvent := range e.m[ruleID].Events {
33 | // check if source is equal
34 | if existingEvent.Source == event.Source {
35 | // check if equal hash
36 | if bytes.Equal(existingEvent.Hash(), event.Hash()) {
37 | dup = true
38 | }
39 | }
40 | }
41 | // is a duplicate event, skip appending event to bucket
42 | if dup {
43 | return nil
44 | }
45 | // update event
46 | e.m[ruleID].AddEvent(event)
47 |
48 | return nil
49 | }
50 |
51 | func (e *eventStorage) flushLock(ruleID string) error {
52 | e.mu.Lock()
53 | defer e.mu.Unlock()
54 |
55 | if _, ok := e.m[ruleID]; !ok {
56 | return fmt.Errorf("bucket with id %v not found", ruleID)
57 | }
58 |
59 | // update flush lock
60 | bucket := e.m[ruleID]
61 | bucket.FlushLock = true
62 | e.m[ruleID] = bucket
63 |
64 | return nil
65 | }
66 |
67 | func (e *eventStorage) flushBucket(ruleID string) error {
68 | e.mu.Lock()
69 | defer e.mu.Unlock()
70 |
71 | if _, ok := e.m[ruleID]; !ok {
72 | return fmt.Errorf("bucket with id %v not found", ruleID)
73 | }
74 |
75 | delete(e.m, ruleID)
76 | return nil
77 | }
78 |
79 | func (e *eventStorage) bucketExists(ruleID string) bool {
80 | _, ok := e.m[ruleID]
81 | return ok
82 | }
83 |
84 | func (e *eventStorage) getBucket(ruleID string) *events.Bucket {
85 | e.mu.Lock()
86 | defer e.mu.Unlock()
87 | var rb *events.Bucket
88 | var ok bool
89 | if rb, ok = e.m[ruleID]; !ok {
90 | return nil
91 | }
92 | return rb
93 | }
94 |
95 | func (e *eventStorage) clone() map[string]*events.Bucket {
96 | e.mu.Lock()
97 | defer e.mu.Unlock()
98 | clone := make(map[string]*events.Bucket)
99 | for k, v := range e.m {
100 | clone[k] = v
101 | }
102 | return clone
103 | }
104 |
105 | func (e *eventStorage) restore(m map[string]*events.Bucket) {
106 | e.m = m
107 | }
108 |
--------------------------------------------------------------------------------
/pkg/store/fsm_snapshot.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "github.com/golang/glog"
5 | "github.com/tinylib/msgp/msgp"
6 |
7 | "github.com/hashicorp/raft"
8 | )
9 |
10 | type persister func(*Messages, *msgp.Writer, raft.SnapshotSink) error
11 |
12 | type fsmSnapShot struct {
13 | messages *Messages
14 | persisters []persister
15 | }
16 |
17 | func (f *fsmSnapShot) Release() {
18 | glog.Info("release =>")
19 | }
20 |
21 | func (f *fsmSnapShot) Persist(sink raft.SnapshotSink) error {
22 | glog.Info("persist =>")
23 |
24 | msgpWriter := msgp.NewWriter(sink)
25 |
26 | for _, fn := range f.persisters {
27 | if err := fn(f.messages, msgpWriter, sink); err != nil {
28 | err = sink.Cancel()
29 | glog.Errorf("persist err %v\n", err)
30 | return err
31 | }
32 | }
33 |
34 | return nil
35 | }
36 |
37 | func persistRules(messages *Messages, writer *msgp.Writer, sink raft.SnapshotSink) error {
38 |
39 | for _, rule := range messages.Rules {
40 | if _, err := sink.Write([]byte{byte(RuleType)}); err != nil {
41 | glog.Errorf("persistRules %v", err)
42 | continue
43 | }
44 |
45 | glog.Info("persist rule msg size ", rule.Msgsize())
46 | // Encode message.
47 | err := rule.EncodeMsg(writer)
48 | if err != nil {
49 | glog.Errorf("persistRules %v", err)
50 | continue
51 | }
52 |
53 | err = writer.Flush()
54 | glog.Infof("persistRules %+v %v\n", rule, err)
55 |
56 | }
57 |
58 | return nil
59 | }
60 |
61 | func persistScripts(messages *Messages, writer *msgp.Writer, sink raft.SnapshotSink) error {
62 |
63 | for _, script := range messages.Scripts {
64 | if _, err := sink.Write([]byte{byte(ScriptType)}); err != nil {
65 | glog.Errorf("persistScripts %v", err)
66 | continue
67 | }
68 |
69 | glog.Info("persist script msg size ", script.Msgsize())
70 |
71 | // Encode message.
72 | err := script.EncodeMsg(writer)
73 | if err != nil {
74 | glog.Errorf("persistScripts %v", err)
75 | continue
76 | }
77 |
78 | err = writer.Flush()
79 | glog.Infof("persistScripts %+v %v \n", script, err)
80 | }
81 | return nil
82 | }
83 |
84 | func persistRecords(messages *Messages, writer *msgp.Writer, sink raft.SnapshotSink) error {
85 |
86 | for _, record := range messages.Records {
87 | if _, err := sink.Write([]byte{byte(RecordType)}); err != nil {
88 | glog.Errorf("persistRecords %v", err)
89 | continue
90 | }
91 |
92 | glog.Info("persist record msg size ", record.Msgsize())
93 | // Encode message.
94 | err := record.EncodeMsg(writer)
95 | if err != nil {
96 | glog.Errorf("persistRecords %v", err)
97 | continue
98 | }
99 |
100 | err = writer.Flush()
101 | glog.Infof("persistRecords %+v %v \n", record, err)
102 | }
103 | return nil
104 | }
105 |
--------------------------------------------------------------------------------
/pkg/events/event_gen_test.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalEvent(t *testing.T) {
13 | v := Event{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgEvent(b *testing.B) {
36 | v := Event{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgEvent(b *testing.B) {
45 | v := Event{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalEvent(b *testing.B) {
57 | v := Event{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodeEvent(t *testing.T) {
71 | v := Event{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := Event{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodeEvent(b *testing.B) {
95 | v := Event{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodeEvent(b *testing.B) {
109 | v := Event{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/ui/src/untitled.json:
--------------------------------------------------------------------------------
1 | {
2 | "events":[],
3 | "rules": [
4 | {
5 | "id": "1",
6 | "title": "Revenue Down rule",
7 | "scriptID": "revenue.js",
8 | "hook_endpoint": "http://localhost:4000",
9 | "hook_retry": "3",
10 | "event_types": "com.acme.order.node1.check_disk,com.acme.checkout.node1.check_cpu",
11 | "wait_window": "120",
12 | "wait_window_threshold": "100",
13 | "max_wait_window": "240"
14 | },
15 | {
16 | "id": "2",
17 | "title": "Cart Down rule",
18 | "scriptID": "card_down.js",
19 | "hook_endpoint": "http://localhost:4000",
20 | "hook_retry": "3",
21 | "event_types": "com.acme.cart.node1.check_disk",
22 | "wait_window": "120",
23 | "wait_window_threshold": "100",
24 | "max_wait_window": "240"
25 | },
26 | {
27 | "id": "3",
28 | "title": "Style Down rule",
29 | "scriptID": "style_down.js",
30 | "hook_endpoint": "http://localhost:4000",
31 | "hook_retry": "3",
32 | "event_types": "com.acme.style.node1.check_node",
33 | "wait_window": "120",
34 | "wait_window_threshold": "100",
35 | "max_wait_window": "240"
36 | }
37 | ],
38 | "scripts": [
39 | {
40 | "id": "revenue.js",
41 | "Data": [
42 | 99,
43 | 111,
44 | 110,
45 | 115,
46 | 111,
47 | 108,
48 | 101,
49 | 46,
50 | 108,
51 | 111,
52 | 103,
53 | 40,
54 | 39,
55 | 114,
56 | 101,
57 | 118,
58 | 101,
59 | 110,
60 | 117,
61 | 101,
62 | 95,
63 | 100,
64 | 111,
65 | 119,
66 | 110,
67 | 39,
68 | 41
69 | ]
70 | },
71 | {
72 | "id": "cart.js",
73 | "Data": [
74 | 99,
75 | 111,
76 | 110,
77 | 115,
78 | 111,
79 | 108,
80 | 101,
81 | 46,
82 | 108,
83 | 111,
84 | 103,
85 | 40,
86 | 39,
87 | 99,
88 | 97,
89 | 114,
90 | 116,
91 | 95,
92 | 100,
93 | 111,
94 | 119,
95 | 110,
96 | 39,
97 | 41
98 | ]
99 | },
100 | {
101 | "id": "style.js",
102 | "Data": [
103 | 99,
104 | 111,
105 | 110,
106 | 115,
107 | 111,
108 | 108,
109 | 101,
110 | 46,
111 | 108,
112 | 111,
113 | 103,
114 | 40,
115 | 39,
116 | 115,
117 | 116,
118 | 121,
119 | 108,
120 | 101,
121 | 95,
122 | 100,
123 | 111,
124 | 119,
125 | 110,
126 | 39,
127 | 41
128 | ]
129 | }
130 | ]
131 | }
132 |
--------------------------------------------------------------------------------
/pkg/js/js_gen_test.go:
--------------------------------------------------------------------------------
1 | package js
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalScript(t *testing.T) {
13 | v := Script{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgScript(b *testing.B) {
36 | v := Script{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgScript(b *testing.B) {
45 | v := Script{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalScript(b *testing.B) {
57 | v := Script{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodeScript(t *testing.T) {
71 | v := Script{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := Script{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodeScript(b *testing.B) {
95 | v := Script{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodeScript(b *testing.B) {
109 | v := Script{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/pkg/events/bucket_gen_test.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalBucket(t *testing.T) {
13 | v := Bucket{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgBucket(b *testing.B) {
36 | v := Bucket{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgBucket(b *testing.B) {
45 | v := Bucket{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalBucket(b *testing.B) {
57 | v := Bucket{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodeBucket(t *testing.T) {
71 | v := Bucket{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := Bucket{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodeBucket(b *testing.B) {
95 | v := Bucket{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodeBucket(b *testing.B) {
109 | v := Bucket{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/pkg/executions/execution_gen_test.go:
--------------------------------------------------------------------------------
1 | package executions
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalRecord(t *testing.T) {
13 | v := Record{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgRecord(b *testing.B) {
36 | v := Record{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgRecord(b *testing.B) {
45 | v := Record{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalRecord(b *testing.B) {
57 | v := Record{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodeRecord(t *testing.T) {
71 | v := Record{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := Record{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodeRecord(b *testing.B) {
95 | v := Record{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodeRecord(b *testing.B) {
109 | v := Record{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/pkg/store/command_gen_test.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalCommand(t *testing.T) {
13 | v := Command{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgCommand(b *testing.B) {
36 | v := Command{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgCommand(b *testing.B) {
45 | v := Command{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalCommand(b *testing.B) {
57 | v := Command{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodeCommand(t *testing.T) {
71 | v := Command{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := Command{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodeCommand(b *testing.B) {
95 | v := Command{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodeCommand(b *testing.B) {
109 | v := Command{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
--------------------------------------------------------------------------------
/pkg/js/js_gen.go:
--------------------------------------------------------------------------------
1 | package js
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "github.com/tinylib/msgp/msgp"
7 | )
8 |
9 | // DecodeMsg implements msgp.Decodable
10 | func (z *Script) DecodeMsg(dc *msgp.Reader) (err error) {
11 | var field []byte
12 | _ = field
13 | var zb0001 uint32
14 | zb0001, err = dc.ReadMapHeader()
15 | if err != nil {
16 | return
17 | }
18 | for zb0001 > 0 {
19 | zb0001--
20 | field, err = dc.ReadMapKeyPtr()
21 | if err != nil {
22 | return
23 | }
24 | switch msgp.UnsafeString(field) {
25 | case "ID":
26 | z.ID, err = dc.ReadString()
27 | if err != nil {
28 | return
29 | }
30 | case "Data":
31 | z.Data, err = dc.ReadBytes(z.Data)
32 | if err != nil {
33 | return
34 | }
35 | default:
36 | err = dc.Skip()
37 | if err != nil {
38 | return
39 | }
40 | }
41 | }
42 | return
43 | }
44 |
45 | // EncodeMsg implements msgp.Encodable
46 | func (z *Script) EncodeMsg(en *msgp.Writer) (err error) {
47 | // map header, size 2
48 | // write "ID"
49 | err = en.Append(0x82, 0xa2, 0x49, 0x44)
50 | if err != nil {
51 | return
52 | }
53 | err = en.WriteString(z.ID)
54 | if err != nil {
55 | return
56 | }
57 | // write "Data"
58 | err = en.Append(0xa4, 0x44, 0x61, 0x74, 0x61)
59 | if err != nil {
60 | return
61 | }
62 | err = en.WriteBytes(z.Data)
63 | if err != nil {
64 | return
65 | }
66 | return
67 | }
68 |
69 | // MarshalMsg implements msgp.Marshaler
70 | func (z *Script) MarshalMsg(b []byte) (o []byte, err error) {
71 | o = msgp.Require(b, z.Msgsize())
72 | // map header, size 2
73 | // string "ID"
74 | o = append(o, 0x82, 0xa2, 0x49, 0x44)
75 | o = msgp.AppendString(o, z.ID)
76 | // string "Data"
77 | o = append(o, 0xa4, 0x44, 0x61, 0x74, 0x61)
78 | o = msgp.AppendBytes(o, z.Data)
79 | return
80 | }
81 |
82 | // UnmarshalMsg implements msgp.Unmarshaler
83 | func (z *Script) UnmarshalMsg(bts []byte) (o []byte, err error) {
84 | var field []byte
85 | _ = field
86 | var zb0001 uint32
87 | zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
88 | if err != nil {
89 | return
90 | }
91 | for zb0001 > 0 {
92 | zb0001--
93 | field, bts, err = msgp.ReadMapKeyZC(bts)
94 | if err != nil {
95 | return
96 | }
97 | switch msgp.UnsafeString(field) {
98 | case "ID":
99 | z.ID, bts, err = msgp.ReadStringBytes(bts)
100 | if err != nil {
101 | return
102 | }
103 | case "Data":
104 | z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data)
105 | if err != nil {
106 | return
107 | }
108 | default:
109 | bts, err = msgp.Skip(bts)
110 | if err != nil {
111 | return
112 | }
113 | }
114 | }
115 | o = bts
116 | return
117 | }
118 |
119 | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
120 | func (z *Script) Msgsize() (s int) {
121 | s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.BytesPrefixSize + len(z.Data)
122 | return
123 | }
124 |
--------------------------------------------------------------------------------
/cmd/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "fmt"
7 | "net/http"
8 | "os"
9 |
10 | "crawshaw.io/littleboss"
11 | "github.com/golang/glog"
12 | "github.com/heetch/confita"
13 | "github.com/heetch/confita/backend/flags"
14 |
15 | "github.com/GeertJohan/go.rice"
16 | "github.com/myntra/cortex/pkg/config"
17 | "github.com/myntra/cortex/pkg/service"
18 | )
19 |
20 | var (
21 | //raft
22 | bind string
23 | join string
24 | dir string
25 | id string
26 | defaultDwell uint64
27 | defaultDwellDeadline uint64
28 | defaultMaxDwell uint64
29 |
30 | // build
31 | version = "dev"
32 | commit = "none"
33 | date = "unknown"
34 |
35 | cfg *config.Config
36 | )
37 |
38 | func usage() {
39 | fmt.Fprintf(os.Stderr, "usage: \n\n ./cortex -stderrthreshold=INFO -log_dir=$(pwd) -id=node1 lb=start & \n "+
40 | "./cortex-new-version -stderrthreshold=INFO -log_dir=$(pwd) -id=node1 -lb=reload \n ./cortex -lb=stop \n \n")
41 | flag.PrintDefaults()
42 |
43 | os.Exit(2)
44 | }
45 |
46 | func init() {
47 | flag.Usage = usage
48 | cfg = &config.Config{
49 | NodeID: "",
50 | Dir: "./data",
51 | JoinAddr: "",
52 | DefaultDwell: 3 * 60 * 1000, // 3 minutes
53 | DefaultMaxDwell: 6 * 60 * 1000, // 6 minutes
54 | DefaultDwellDeadline: 2.5 * 60 * 1000, // 2.5 minutes
55 | MaxHistory: 1000,
56 | FlushInterval: 1000,
57 | SnapshotInterval: 30,
58 | }
59 | }
60 |
61 | func main() {
62 |
63 | lb := littleboss.New("cortex")
64 | lb.Command("service", flag.String("service", "start", "littleboss start command"))
65 | flagRaft := lb.Listener("raft", "tcp", ":4444", "-raft :4444")
66 | flagHTTP := lb.Listener("http", "tcp", ":4445", "-http :4445")
67 |
68 | box := rice.MustFindBox("build")
69 |
70 | glog.Infof("Boxing the build folder - %s", box.Name())
71 |
72 | loader := confita.NewLoader(flags.NewBackend())
73 | err := loader.Load(context.Background(), cfg)
74 | if err != nil {
75 | glog.Infof("%v\n", err)
76 | usage()
77 | }
78 |
79 | glog.Infof("raft addr %v, http addr %v\n", flagRaft.String(), flagHTTP.String())
80 |
81 | lb.Run(func(ctx context.Context) {
82 | run(context.Background(), flagRaft, flagHTTP)
83 | })
84 |
85 | glog.Info("cortex exited")
86 | }
87 |
88 | func run(ctx context.Context, flagRaft, flagHTTP *littleboss.ListenerFlag) {
89 |
90 | cfg.HTTPAddr = flagHTTP.String()
91 | cfg.RaftAddr = flagRaft.String()
92 | cfg.HTTPListener = flagHTTP.Listener()
93 | cfg.RaftListener = flagRaft.Listener()
94 | cfg.EnableFileServer = true
95 |
96 | svc, err := service.New(cfg)
97 | if err != nil {
98 | glog.Error(err)
99 | os.Exit(1)
100 | }
101 |
102 | go func() {
103 | if err := svc.Start(); err != nil {
104 | if err == http.ErrServerClosed {
105 | return
106 | }
107 | glog.Fatal(err)
108 | }
109 | }()
110 |
111 | <-ctx.Done()
112 | svc.Shutdown(ctx)
113 |
114 | }
115 |
--------------------------------------------------------------------------------
/pkg/events/event.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/cnf/structhash"
7 | )
8 |
9 | //go:generate msgp
10 |
11 | // Event wraps cloudevent.CloudEvent
12 | type Event struct {
13 | // Type of occurrence which has happened. Often this property is
14 | // used for routing, observability, policy enforcement, etc.
15 | // REQUIRED.
16 | EventType string `json:"eventType"`
17 |
18 | // The version of the eventType. This enables the interpretation of
19 | // data by eventual consumers, requires the consumer to be knowledgeable
20 | // about the producer.
21 | // OPTIONAL.
22 | EventTypeVersion string `json:"eventTypeVersion,omitempty"`
23 |
24 | // The version of the CloudEvents specification which the event
25 | // uses. This enables the interpretation of the context.
26 | // REQUIRED.
27 | CloudEventsVersion string `json:"cloudEventsVersion"`
28 |
29 | // This describes the event producer. Often this will include information
30 | // such as the type of the event source, the organization publishing the
31 | // event, and some unique idenfitiers. The exact syntax and semantics behind
32 | // the data encoded in the URI is event producer defined.
33 | // REQUIRED.
34 | Source string `json:"source"`
35 |
36 | // ID of the event. The semantics of this string are explicitly undefined to
37 | // ease the implementation of producers. Enables deduplication.
38 | // REQUIRED.
39 | EventID string `json:"eventID"`
40 |
41 | // Timestamp of when the event happened. RFC3339.
42 | // OPTIONAL.
43 | EventTime time.Time `json:"eventTime,omitempty"`
44 |
45 | // A link to the schema that the data attribute adheres to. RFC3986.
46 | // OPTIONAL.
47 | SchemaURL string `json:"schemaURL,omitempty"`
48 |
49 | // Describe the data encoding format. RFC2046.
50 | // OPTIONAL.
51 | ContentType string `json:"contentType,omitempty"`
52 |
53 | // This is for additional metadata and this does not have a mandated
54 | // structure. This enables a place for custom fields a producer or middleware
55 | // might want to include and provides a place to test metadata before adding
56 | // them to the CloudEvents specification. See the Extensions document for a
57 | // list of possible properties.
58 | // OPTIONAL. This is a map, but an 'interface{}' for flexibility.
59 | Extensions interface{} `json:"extensions,omitempty"`
60 |
61 | // The event payload. The payload depends on the eventType, schemaURL and
62 | // eventTypeVersion, the payload is encoded into a media format which is
63 | // specified by the contentType attribute (e.g. application/json).
64 | //
65 | // If the contentType value is "application/json", or any media type with a
66 | // structured +json suffix, the implementation MUST translate the data attribute
67 | // value into a JSON value, and set the data member of the envelope JSON object
68 | // to this JSON value.
69 | // OPTIONAL.
70 | Data interface{} `json:"data,omitempty"`
71 | hash []byte
72 | }
73 |
74 | // Hash returns md5 hash string of the type
75 | func (e *Event) Hash() []byte {
76 | if len(e.hash) == 0 {
77 | data := new(Event)
78 | data.CloudEventsVersion = e.CloudEventsVersion
79 | data.ContentType = e.ContentType
80 | data.Data = e.Data
81 | //data.EventID = e.EventID
82 | data.EventType = e.EventType
83 | data.EventTypeVersion = e.EventTypeVersion
84 | data.Extensions = e.Extensions
85 | data.SchemaURL = e.SchemaURL
86 | data.Source = e.Source
87 |
88 | e.hash = structhash.Md5(data, 1)
89 | }
90 |
91 | return e.hash
92 | }
93 |
--------------------------------------------------------------------------------
/pkg/store/transport.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "errors"
5 | "io"
6 | "log"
7 | "net"
8 | "time"
9 |
10 | "github.com/hashicorp/raft"
11 | )
12 |
13 | var (
14 | errNotAdvertisable = errors.New("local bind address is not advertisable")
15 | errNotTCP = errors.New("local address is not a TCP address")
16 | )
17 |
18 | // TCPStreamLayer implements StreamLayer interface for plain TCP.
19 | type TCPStreamLayer struct {
20 | advertise net.Addr
21 | listener *net.TCPListener
22 | }
23 |
24 | // NewTCPTransport returns a NetworkTransport that is built on top of
25 | // a TCP streaming transport layer.
26 | func NewTCPTransport(
27 | bindListener net.Listener,
28 | advertise net.Addr,
29 | maxPool int,
30 | timeout time.Duration,
31 | logOutput io.Writer,
32 | ) (*raft.NetworkTransport, error) {
33 | return newTCPTransport(bindListener, advertise, func(stream raft.StreamLayer) *raft.NetworkTransport {
34 | return raft.NewNetworkTransport(stream, maxPool, timeout, logOutput)
35 | })
36 | }
37 |
38 | // NewTCPTransportWithLogger returns a NetworkTransport that is built on top of
39 | // a TCP streaming transport layer, with log output going to the supplied Logger
40 | func NewTCPTransportWithLogger(
41 | bindListener net.Listener,
42 | advertise net.Addr,
43 | maxPool int,
44 | timeout time.Duration,
45 | logger *log.Logger,
46 | ) (*raft.NetworkTransport, error) {
47 | return newTCPTransport(bindListener, advertise, func(stream raft.StreamLayer) *raft.NetworkTransport {
48 | return raft.NewNetworkTransportWithLogger(stream, maxPool, timeout, logger)
49 | })
50 | }
51 |
52 | // NewTCPTransportWithConfig returns a NetworkTransport that is built on top of
53 | // a TCP streaming transport layer, using the given config struct.
54 | func NewTCPTransportWithConfig(
55 | bindListener net.Listener,
56 | advertise net.Addr,
57 | config *raft.NetworkTransportConfig,
58 | ) (*raft.NetworkTransport, error) {
59 | return newTCPTransport(bindListener, advertise, func(stream raft.StreamLayer) *raft.NetworkTransport {
60 | config.Stream = stream
61 | return raft.NewNetworkTransportWithConfig(config)
62 | })
63 | }
64 |
65 | func newTCPTransport(list net.Listener,
66 | advertise net.Addr,
67 | transportCreator func(stream raft.StreamLayer) *raft.NetworkTransport) (*raft.NetworkTransport, error) {
68 |
69 | // Create stream
70 | stream := &TCPStreamLayer{
71 | advertise: advertise,
72 | listener: list.(*net.TCPListener),
73 | }
74 |
75 | // Verify that we have a usable advertise address
76 | addr, ok := stream.Addr().(*net.TCPAddr)
77 | if !ok {
78 | list.Close()
79 | return nil, errNotTCP
80 | }
81 | if addr.IP.IsUnspecified() {
82 | list.Close()
83 | return nil, errNotAdvertisable
84 | }
85 |
86 | // Create the network transport
87 | trans := transportCreator(stream)
88 | return trans, nil
89 | }
90 |
91 | // Dial implements the StreamLayer interface.
92 | func (t *TCPStreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) {
93 | return net.DialTimeout("tcp", string(address), timeout)
94 | }
95 |
96 | // Accept implements the net.Listener interface.
97 | func (t *TCPStreamLayer) Accept() (c net.Conn, err error) {
98 | return t.listener.Accept()
99 | }
100 |
101 | // Close implements the net.Listener interface.
102 | func (t *TCPStreamLayer) Close() (err error) {
103 | return t.listener.Close()
104 | }
105 |
106 | // Addr implements the net.Listener interface.
107 | func (t *TCPStreamLayer) Addr() net.Addr {
108 | // Use an advertise addr if provided
109 | if t.advertise != nil {
110 | return t.advertise
111 | }
112 | return t.listener.Addr()
113 | }
114 |
--------------------------------------------------------------------------------
/pkg/events/bucket.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 | "time"
9 |
10 | "github.com/golang/glog"
11 | "github.com/myntra/cortex/pkg/rules"
12 | "github.com/sethgrid/pester"
13 | )
14 |
15 | // NewBucket creates a new Bucket
16 | func NewBucket(rule rules.Rule) *Bucket {
17 | return &Bucket{
18 | flushWait: rule.Dwell,
19 | dwellResetAt: time.Now(),
20 | UpdatedAt: time.Now(),
21 | CreatedAt: time.Now(),
22 | Rule: rule,
23 | }
24 | }
25 |
26 | //go:generate msgp
27 |
28 | // Bucket contains the rule for a collection of events and the events
29 | type Bucket struct {
30 | Rule rules.Rule `json:"rule"`
31 | Events []*Event `json:"events"`
32 | FlushLock bool `json:"flush_lock"`
33 | UpdatedAt time.Time `json:"updated_at"`
34 | CreatedAt time.Time `json:"created_at"`
35 | dwellResetAt time.Time
36 | flushWait uint64
37 | }
38 |
39 | // AddEvent to the bucket
40 | func (rb *Bucket) AddEvent(event *Event) {
41 | glog.Infof("add event %v ==> %+v\n", event.EventID, event)
42 | rb.Events = append(rb.Events, event)
43 | rb.updateDwell()
44 | }
45 |
46 | // Post posts rulebucket to the configured hook endpoint
47 | func (rb *Bucket) Post() error {
48 |
49 | b := new(bytes.Buffer)
50 | err := json.NewEncoder(b).Encode(rb)
51 | if err != nil {
52 | return err
53 | }
54 | req, err := http.NewRequest("POST", rb.Rule.HookEndpoint, b)
55 | if err != nil {
56 | return err
57 | }
58 | req.Header.Add("Content-type", "application/json")
59 |
60 | client := pester.New()
61 | client.MaxRetries = rb.Rule.HookRetry
62 |
63 | resp, err := client.Do(req)
64 | if err != nil {
65 | return err
66 | }
67 |
68 | defer resp.Body.Close()
69 |
70 | if resp.StatusCode != 200 && resp.StatusCode != 202 {
71 | return fmt.Errorf("invalid status code return from %v endpoint", rb.Rule.HookEndpoint)
72 | }
73 |
74 | return nil
75 | }
76 |
77 | // GetDwellDuration converts dwell(ms) to time.Duration
78 | func (rb *Bucket) getDwellDuration() time.Duration {
79 | return time.Millisecond * time.Duration(rb.Rule.Dwell)
80 | }
81 |
82 | // getDwellDeadlineDuration converts dwell_deadline(ms) to time.Duration
83 | func (rb *Bucket) getDwellDeadlineDuration() time.Duration {
84 | return time.Millisecond * time.Duration(rb.Rule.DwellDeadline)
85 | }
86 |
87 | // getMaxDwell converts max_dwell(ms) to time.Duration
88 | func (rb *Bucket) getMaxDwell() time.Duration {
89 | return time.Millisecond * time.Duration(rb.Rule.MaxDwell)
90 | }
91 |
92 | // CanFlush returns if the bucket can be evicted from the db
93 | func (rb *Bucket) CanFlush() bool {
94 | return time.Since(rb.CreatedAt) >= time.Millisecond*time.Duration(rb.flushWait)
95 | }
96 |
97 | // CanFlushIn returns time left for flush
98 | func (rb *Bucket) CanFlushIn() time.Duration {
99 | return time.Millisecond*time.Duration(rb.flushWait) - time.Since(rb.CreatedAt)
100 | }
101 |
102 | // UpdateDwell updates flush waiting duration
103 | func (rb *Bucket) updateDwell() {
104 | glog.Infof("updateDwell ")
105 | timeSinceDwellReset := time.Since(rb.dwellResetAt)
106 |
107 | glog.Infof("updateDwell %v %v %v %v", timeSinceDwellReset, rb.getDwellDuration(), rb.getMaxDwell(), rb.getDwellDeadlineDuration())
108 | if (timeSinceDwellReset + rb.getDwellDuration()) >= rb.getMaxDwell() {
109 | rb.UpdatedAt = time.Now()
110 | return
111 | }
112 |
113 | if timeSinceDwellReset >= rb.getDwellDeadlineDuration() {
114 | glog.Info("updateDwell flushwait + dwell")
115 | rb.dwellResetAt = time.Now()
116 | rb.flushWait = rb.flushWait + rb.Rule.Dwell
117 | }
118 |
119 | rb.UpdatedAt = time.Now()
120 | }
121 |
--------------------------------------------------------------------------------
/pkg/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "fmt"
5 | "net"
6 | "os"
7 | "strconv"
8 | "strings"
9 | "time"
10 |
11 | "github.com/golang/glog"
12 | )
13 |
14 | // Config is required for initializing the service
15 | type Config struct {
16 | NodeID string `config:"id"`
17 | Dir string `config:"dir"`
18 | JoinAddr string `config:"join"`
19 | FlushInterval uint64 `config:"flush_interval"`
20 | SnapshotInterval int `config:"snapshot_interval"`
21 | DefaultDwell uint64 `config:"dwell"`
22 | DefaultDwellDeadline uint64 `config:"dwell_deadline"`
23 | DefaultMaxDwell uint64 `config:"max_dwell"`
24 | MaxHistory int `config:"max_history"`
25 | Version string `config:"version"`
26 | Commit string `config:"commit"`
27 | Date string `config:"date"`
28 | EnableFileServer bool
29 |
30 | RaftAddr string
31 | HTTPAddr string
32 | RaftListener net.Listener
33 | HTTPListener net.Listener
34 | }
35 |
36 | // Validate the config
37 | func (c *Config) Validate() error {
38 |
39 | glog.Infof("Validating config %v \n", c)
40 |
41 | if c.RaftAddr == "" {
42 | return fmt.Errorf("missing raft address. eg: -raft :8080")
43 | }
44 |
45 | if c.HTTPAddr == "" {
46 | return fmt.Errorf("missing http address. eg: -http :8081")
47 | }
48 |
49 | rf := strings.SplitAfter(c.RaftAddr, ":")
50 | if len(rf) != 2 || rf[0] != ":" {
51 | return fmt.Errorf("invalid raft address. eg: -raft :8080")
52 | }
53 |
54 | hf := strings.SplitAfter(c.HTTPAddr, ":")
55 | if len(hf) != 2 || hf[0] != ":" {
56 | return fmt.Errorf("invalid http address. eg: -http :8081")
57 | }
58 |
59 | raftPort, err := strconv.Atoi(rf[1])
60 | if err != nil {
61 | return fmt.Errorf("invalid raft address. eg: -raft :8080")
62 | }
63 |
64 | httpPort, err := strconv.Atoi(hf[1])
65 | if err != nil {
66 | return fmt.Errorf("invalid http address. eg: -http :8081")
67 | }
68 |
69 | if httpPort-raftPort != 1 {
70 | return fmt.Errorf("invalid raft http address. eg: -raft : 8080" +
71 | "eg: -http :8081. the http port should be the next port relative to the raft port")
72 | }
73 |
74 | if c.RaftListener == nil {
75 | return fmt.Errorf("raft listener is nil")
76 | }
77 |
78 | if c.HTTPListener == nil {
79 | return fmt.Errorf("http listener is nil")
80 | }
81 |
82 | err = c.validateDir()
83 | if err != nil {
84 | return err
85 | }
86 |
87 | if c.FlushInterval == 0 {
88 | return fmt.Errorf("flush_interval is not set")
89 | }
90 |
91 | if c.SnapshotInterval == 0 {
92 | return fmt.Errorf("snapshot_interval in minutes is not set")
93 | }
94 |
95 | if c.DefaultDwell == 0 {
96 | return fmt.Errorf("dwell is not set")
97 | }
98 |
99 | if c.DefaultDwellDeadline == 0 {
100 | return fmt.Errorf("dwell_deadline is not set")
101 | }
102 |
103 | if c.DefaultMaxDwell == 0 {
104 | return fmt.Errorf("max_dwell is not set")
105 | }
106 |
107 | return nil
108 |
109 | }
110 |
111 | func (c *Config) validateNodeID() bool {
112 | return c.NodeID != ""
113 | }
114 |
115 | func (c *Config) validateDir() error {
116 | if c.Dir == "" {
117 | return fmt.Errorf("raft dir is not set")
118 | }
119 |
120 | if _, err := os.Stat(c.Dir); os.IsNotExist(err) {
121 | err := os.Mkdir(c.Dir, os.ModePerm)
122 | if err != nil {
123 | return fmt.Errorf("raft dir err %v", err)
124 | }
125 | }
126 |
127 | return nil
128 | }
129 |
130 | func getAddr(addr string) string {
131 | tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
132 | if err != nil {
133 | panic(fmt.Errorf("resolveTCPAddr failed: %v", err))
134 |
135 | }
136 | glog.Infof("getAddr: %v", tcpAddr.String())
137 | return tcpAddr.String()
138 | }
139 |
140 | func checkAddrFree(addr string) bool {
141 | conn, err := net.DialTimeout("tcp", addr, time.Second)
142 | if err != nil {
143 | glog.Errorf("err %v\n", err)
144 | }
145 | if conn != nil {
146 | conn.Close()
147 | glog.Errorf("addr %v is is not available ", addr)
148 | return false
149 | }
150 | return true
151 | }
152 |
--------------------------------------------------------------------------------
/pkg/rules/rule.go:
--------------------------------------------------------------------------------
1 | package rules
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/myntra/cortex/pkg/matcher"
7 | )
8 |
9 | //go:generate msgp
10 |
11 | // Rule is the array of related service events
12 | type Rule struct {
13 | Title string `json:"title"`
14 | ID string `json:"id"`
15 | ScriptID string `json:"script_id"` // javascript script which is called before hookEndPoint is called.
16 | HookEndpoint string `json:"hook_endpoint"` // endpoint which accepts a POST json objects
17 | HookRetry int `json:"hook_retry"` // number of retries while attempting to post
18 | EventTypePatterns []string `json:"event_type_patterns"` // a list of event types to look for. wildcards are allowed.
19 | Dwell uint64 `json:"dwell"` // dwell duration in milliseconds for events to arrive
20 | DwellDeadline uint64 `json:"dwell_deadline"` // dwell duration threshold after which arriving events expand the dwell window
21 | MaxDwell uint64 `json:"max_dwell"` // maximum dwell duration including expansion
22 | Regexes []string `json:"regexes,omitempty"` // generated regex string array from event types
23 | Disabled bool `json:"disabled,omitempty"` // if the rule is disabled
24 | }
25 |
26 | // Validate rule data
27 | func (r *Rule) Validate() error {
28 |
29 | for _, pattern := range r.EventTypePatterns {
30 | m, err := matcher.New(pattern)
31 | if err != nil {
32 | return fmt.Errorf("invalid event type pattern %v, err: %v", pattern, err)
33 | }
34 |
35 | r.Regexes = append(r.Regexes, m.GetRegexString())
36 | }
37 |
38 | return nil
39 | }
40 |
41 | // HasMatching checks whether the rule has a matching event type pattern
42 | func (r *Rule) HasMatching(eventType string) bool {
43 | if r.Disabled {
44 | return false
45 | }
46 | for _, regexStr := range r.Regexes {
47 | m := matcher.NewCompile(regexStr)
48 | if m.HasMatches(eventType) {
49 | return true
50 | }
51 | }
52 | return false
53 | }
54 |
55 | // PublicRule is used to create, update a request and is returned as a response
56 | type PublicRule struct {
57 | Title string `json:"title"`
58 | ID string `json:"id"`
59 | ScriptID string `json:"script_id"` // javascript script which is called before hookEndPoint is called.
60 | HookEndpoint string `json:"hook_endpoint"` // endpoint which accepts a POST json objects
61 | HookRetry int `json:"hook_retry"` // number of retries while attempting to post
62 | EventTypePatterns []string `json:"event_type_patterns"` // a list of event types to look for. wildcards are allowed.
63 | Dwell uint64 `json:"dwell"` // dwell duration in milliseconds for events to arrive
64 | DwellDeadline uint64 `json:"dwell_deadline"` // dwell duration threshold after which arriving events expand the dwell window
65 | MaxDwell uint64 `json:"max_dwell"` // maximum dwell duration including expansion
66 | Disabled bool `json:"disabled,omitempty"` // if the rule is disabled
67 | }
68 |
69 | // NewFromPublic creates a rule from a public rule
70 | func NewFromPublic(r *PublicRule) *Rule {
71 | return &Rule{
72 | Title: r.Title,
73 | ID: r.ID,
74 | ScriptID: r.ScriptID,
75 | HookEndpoint: r.HookEndpoint,
76 | HookRetry: r.HookRetry,
77 | EventTypePatterns: r.EventTypePatterns,
78 | Dwell: r.Dwell,
79 | DwellDeadline: r.DwellDeadline,
80 | MaxDwell: r.MaxDwell,
81 | Disabled: r.Disabled,
82 | }
83 | }
84 |
85 | // NewFromPrivate creates public rule from a private rule
86 | func NewFromPrivate(r *Rule) *PublicRule {
87 | return &PublicRule{
88 | Title: r.Title,
89 | ID: r.ID,
90 | ScriptID: r.ScriptID,
91 | HookEndpoint: r.HookEndpoint,
92 | HookRetry: r.HookRetry,
93 | EventTypePatterns: r.EventTypePatterns,
94 | Dwell: r.Dwell,
95 | DwellDeadline: r.DwellDeadline,
96 | MaxDwell: r.MaxDwell,
97 | Disabled: r.Disabled,
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/pkg/service/service.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "context"
5 | "log"
6 | "net"
7 | "net/http"
8 | "time"
9 |
10 | "github.com/go-chi/chi"
11 | "github.com/go-chi/chi/middleware"
12 | "github.com/golang/glog"
13 |
14 | "strings"
15 |
16 | "github.com/GeertJohan/go.rice"
17 | "github.com/myntra/cortex/pkg/config"
18 | "github.com/myntra/cortex/pkg/store"
19 | )
20 |
21 | // Service encapsulates the http server and the raft store
22 | type Service struct {
23 | srv *http.Server
24 | node *store.Node
25 | listener net.Listener
26 | snapshotInterval int
27 | httpAddr string
28 | }
29 |
30 | // Shutdown the service
31 | func (s *Service) Shutdown(ctx context.Context) error {
32 | s.srv.Shutdown(ctx)
33 | if err := s.node.Shutdown(); err != nil {
34 | return err
35 | }
36 | return nil
37 | }
38 |
39 | // Start the service
40 | func (s *Service) Start() error {
41 |
42 | // start the raft node
43 | if err := s.node.Start(); err != nil {
44 | return err
45 | }
46 |
47 | // start the http service
48 | go func() {
49 | if err := s.srv.Serve(s.listener); err != nil {
50 | glog.Infof("server closed %v", err)
51 | }
52 | }()
53 |
54 | go func() {
55 | ticker := time.NewTicker(time.Minute * time.Duration(s.snapshotInterval))
56 | for {
57 | select {
58 | case <-ticker.C:
59 | glog.Infof("take snapshot => %v", s.node.Snapshot())
60 | }
61 | }
62 | }()
63 |
64 | glog.Infof("======> join addr %v%v\n", getOutboundIP(), s.httpAddr)
65 | glog.Infof("======> open ui http://%v%v/ui or http://localhost%v/ui\n", getOutboundIP(), s.httpAddr, s.httpAddr)
66 |
67 | return nil
68 | }
69 |
70 | func getOutboundIP() net.IP {
71 | conn, err := net.Dial("udp", "8.8.8.8:80")
72 | if err != nil {
73 | log.Fatal(err)
74 | }
75 | defer conn.Close()
76 |
77 | localAddr := conn.LocalAddr().(*net.UDPAddr)
78 |
79 | return localAddr.IP
80 | }
81 |
82 | // fileServer starts the file server and return the file
83 | func fileServer(r chi.Router, path string) {
84 | if strings.ContainsAny(path, "{}*") {
85 | panic("FileServer does not permit URL parameters.")
86 | }
87 |
88 | fs := http.StripPrefix(path, http.FileServer(rice.MustFindBox("build").HTTPBox()))
89 |
90 | path += "*"
91 |
92 | r.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
93 | fs.ServeHTTP(w, r)
94 | }))
95 | }
96 |
97 | // New returns the http service wrapper for the store.
98 | func New(cfg *config.Config) (*Service, error) {
99 |
100 | node, err := store.NewNode(cfg)
101 | if err != nil {
102 | return nil, err
103 | }
104 |
105 | svc := &Service{
106 | node: node,
107 | snapshotInterval: cfg.SnapshotInterval,
108 | httpAddr: cfg.HTTPAddr,
109 | }
110 |
111 | router := chi.NewRouter()
112 | router.Use(middleware.Recoverer)
113 | router.Use(func(next http.Handler) http.Handler {
114 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
115 | glog.Infof("Received request on %s %s", r.URL.String(), r.RequestURI)
116 | next.ServeHTTP(w, r)
117 | })
118 | })
119 |
120 | if cfg.EnableFileServer {
121 | fileServer(router, "/ui")
122 | }
123 |
124 | router.Post("/event", svc.leaderProxy(svc.eventHandler))
125 | router.Post("/event/sink/site247", svc.leaderProxy(svc.site247AlertHandler))
126 | router.Post("/event/sink/icinga", svc.leaderProxy(svc.icingaAlertHandler))
127 | router.Post("/event/sink/azure", svc.leaderProxy(svc.azureAlertHandler))
128 |
129 | router.Get("/rules", svc.getRulesHandler)
130 | router.Get("/rules/{id}", svc.getRuleHandler)
131 | router.Get("/rules/{id}/executions", svc.getRulesExecutions)
132 | router.Post("/rules", svc.leaderProxy(svc.addRuleHandler))
133 | router.Put("/rules", svc.leaderProxy(svc.updateRuleHandler))
134 | router.Delete("/rules/{id}", svc.leaderProxy(svc.removeRuleHandler))
135 |
136 | router.Get("/scripts", svc.getScriptListHandler)
137 | router.Get("/scripts/{id}", svc.getScriptHandler)
138 | router.Post("/scripts", svc.leaderProxy(svc.addScriptHandler))
139 | router.Put("/scripts", svc.leaderProxy(svc.updateScriptHandler))
140 | router.Delete("/scripts/{id}", svc.leaderProxy(svc.removeScriptHandler))
141 |
142 | router.Get("/leave/{id}", svc.leaveHandler)
143 | router.Post("/join", svc.joinHandler)
144 |
145 | router.NotFound(func(w http.ResponseWriter, r *http.Request) {
146 | http.Redirect(w, r, "/ui/"+r.URL.String(), 302)
147 | })
148 |
149 | srv := &http.Server{
150 | ReadTimeout: 10 * time.Second,
151 | WriteTimeout: 10 * time.Second,
152 | IdleTimeout: 60 * time.Second,
153 | Handler: router,
154 | }
155 |
156 | svc.srv = srv
157 | svc.listener = cfg.HTTPListener
158 |
159 | return svc, nil
160 | }
161 |
--------------------------------------------------------------------------------
/ui/src/registerServiceWorker.js:
--------------------------------------------------------------------------------
1 | // In production, we register a service worker to serve assets from local cache.
2 |
3 | // This lets the app load faster on subsequent visits in production, and gives
4 | // it offline capabilities. However, it also means that developers (and users)
5 | // will only see deployed updates on the "N+1" visit to a page, since previously
6 | // cached resources are updated in the background.
7 |
8 | // To learn more about the benefits of this model, read https://goo.gl/KwvDNy.
9 | // This link also includes instructions on opting out of this behavior.
10 |
11 | const isLocalhost = Boolean(
12 | window.location.hostname === 'localhost' ||
13 | // [::1] is the IPv6 localhost address.
14 | window.location.hostname === '[::1]' ||
15 | // 127.0.0.1/8 is considered localhost for IPv4.
16 | window.location.hostname.match(
17 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
18 | )
19 | );
20 |
21 | export default function register() {
22 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
23 | // The URL constructor is available in all browsers that support SW.
24 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location);
25 | if (publicUrl.origin !== window.location.origin) {
26 | // Our service worker won't work if PUBLIC_URL is on a different origin
27 | // from what our page is served on. This might happen if a CDN is used to
28 | // serve assets; see https://github.com/facebookincubator/create-react-app/issues/2374
29 | return;
30 | }
31 |
32 | window.addEventListener('load', () => {
33 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
34 |
35 | if (isLocalhost) {
36 | // This is running on localhost. Lets check if a service worker still exists or not.
37 | checkValidServiceWorker(swUrl);
38 |
39 | // Add some additional logging to localhost, pointing developers to the
40 | // service worker/PWA documentation.
41 | navigator.serviceWorker.ready.then(() => {
42 | console.log(
43 | 'This web app is being served cache-first by a service ' +
44 | 'worker. To learn more, visit https://goo.gl/SC7cgQ'
45 | );
46 | });
47 | } else {
48 | // Is not local host. Just register service worker
49 | registerValidSW(swUrl);
50 | }
51 | });
52 | }
53 | }
54 |
55 | function registerValidSW(swUrl) {
56 | navigator.serviceWorker
57 | .register(swUrl)
58 | .then(registration => {
59 | registration.onupdatefound = () => {
60 | const installingWorker = registration.installing;
61 | installingWorker.onstatechange = () => {
62 | if (installingWorker.state === 'installed') {
63 | if (navigator.serviceWorker.controller) {
64 | // At this point, the old content will have been purged and
65 | // the fresh content will have been added to the cache.
66 | // It's the perfect time to display a "New content is
67 | // available; please refresh." message in your web app.
68 | console.log('New content is available; please refresh.');
69 | } else {
70 | // At this point, everything has been precached.
71 | // It's the perfect time to display a
72 | // "Content is cached for offline use." message.
73 | console.log('Content is cached for offline use.');
74 | }
75 | }
76 | };
77 | };
78 | })
79 | .catch(error => {
80 | console.error('Error during service worker registration:', error);
81 | });
82 | }
83 |
84 | function checkValidServiceWorker(swUrl) {
85 | // Check if the service worker can be found. If it can't reload the page.
86 | fetch(swUrl)
87 | .then(response => {
88 | // Ensure service worker exists, and that we really are getting a JS file.
89 | if (
90 | response.status === 404 ||
91 | response.headers.get('content-type').indexOf('javascript') === -1
92 | ) {
93 | // No service worker found. Probably a different app. Reload the page.
94 | navigator.serviceWorker.ready.then(registration => {
95 | registration.unregister().then(() => {
96 | window.location.reload();
97 | });
98 | });
99 | } else {
100 | // Service worker found. Proceed as normal.
101 | registerValidSW(swUrl);
102 | }
103 | })
104 | .catch(() => {
105 | console.log(
106 | 'No internet connection found. App is running in offline mode.'
107 | );
108 | });
109 | }
110 |
111 | export function unregister() {
112 | if ('serviceWorker' in navigator) {
113 | navigator.serviceWorker.ready.then(registration => {
114 | registration.unregister();
115 | });
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/pkg/executions/execution_gen.go:
--------------------------------------------------------------------------------
1 | package executions
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "github.com/tinylib/msgp/msgp"
7 | )
8 |
9 | // DecodeMsg implements msgp.Decodable
10 | func (z *Record) DecodeMsg(dc *msgp.Reader) (err error) {
11 | var field []byte
12 | _ = field
13 | var zb0001 uint32
14 | zb0001, err = dc.ReadMapHeader()
15 | if err != nil {
16 | return
17 | }
18 | for zb0001 > 0 {
19 | zb0001--
20 | field, err = dc.ReadMapKeyPtr()
21 | if err != nil {
22 | return
23 | }
24 | switch msgp.UnsafeString(field) {
25 | case "ID":
26 | z.ID, err = dc.ReadString()
27 | if err != nil {
28 | return
29 | }
30 | case "Bucket":
31 | err = z.Bucket.DecodeMsg(dc)
32 | if err != nil {
33 | return
34 | }
35 | case "ScriptResult":
36 | z.ScriptResult, err = dc.ReadIntf()
37 | if err != nil {
38 | return
39 | }
40 | case "HookStatusCode":
41 | z.HookStatusCode, err = dc.ReadInt()
42 | if err != nil {
43 | return
44 | }
45 | case "CreatedAt":
46 | z.CreatedAt, err = dc.ReadTime()
47 | if err != nil {
48 | return
49 | }
50 | default:
51 | err = dc.Skip()
52 | if err != nil {
53 | return
54 | }
55 | }
56 | }
57 | return
58 | }
59 |
60 | // EncodeMsg implements msgp.Encodable
61 | func (z *Record) EncodeMsg(en *msgp.Writer) (err error) {
62 | // map header, size 5
63 | // write "ID"
64 | err = en.Append(0x85, 0xa2, 0x49, 0x44)
65 | if err != nil {
66 | return
67 | }
68 | err = en.WriteString(z.ID)
69 | if err != nil {
70 | return
71 | }
72 | // write "Bucket"
73 | err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
74 | if err != nil {
75 | return
76 | }
77 | err = z.Bucket.EncodeMsg(en)
78 | if err != nil {
79 | return
80 | }
81 | // write "ScriptResult"
82 | err = en.Append(0xac, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74)
83 | if err != nil {
84 | return
85 | }
86 | err = en.WriteIntf(z.ScriptResult)
87 | if err != nil {
88 | return
89 | }
90 | // write "HookStatusCode"
91 | err = en.Append(0xae, 0x48, 0x6f, 0x6f, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65)
92 | if err != nil {
93 | return
94 | }
95 | err = en.WriteInt(z.HookStatusCode)
96 | if err != nil {
97 | return
98 | }
99 | // write "CreatedAt"
100 | err = en.Append(0xa9, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
101 | if err != nil {
102 | return
103 | }
104 | err = en.WriteTime(z.CreatedAt)
105 | if err != nil {
106 | return
107 | }
108 | return
109 | }
110 |
111 | // MarshalMsg implements msgp.Marshaler
112 | func (z *Record) MarshalMsg(b []byte) (o []byte, err error) {
113 | o = msgp.Require(b, z.Msgsize())
114 | // map header, size 5
115 | // string "ID"
116 | o = append(o, 0x85, 0xa2, 0x49, 0x44)
117 | o = msgp.AppendString(o, z.ID)
118 | // string "Bucket"
119 | o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
120 | o, err = z.Bucket.MarshalMsg(o)
121 | if err != nil {
122 | return
123 | }
124 | // string "ScriptResult"
125 | o = append(o, 0xac, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74)
126 | o, err = msgp.AppendIntf(o, z.ScriptResult)
127 | if err != nil {
128 | return
129 | }
130 | // string "HookStatusCode"
131 | o = append(o, 0xae, 0x48, 0x6f, 0x6f, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65)
132 | o = msgp.AppendInt(o, z.HookStatusCode)
133 | // string "CreatedAt"
134 | o = append(o, 0xa9, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
135 | o = msgp.AppendTime(o, z.CreatedAt)
136 | return
137 | }
138 |
139 | // UnmarshalMsg implements msgp.Unmarshaler
140 | func (z *Record) UnmarshalMsg(bts []byte) (o []byte, err error) {
141 | var field []byte
142 | _ = field
143 | var zb0001 uint32
144 | zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
145 | if err != nil {
146 | return
147 | }
148 | for zb0001 > 0 {
149 | zb0001--
150 | field, bts, err = msgp.ReadMapKeyZC(bts)
151 | if err != nil {
152 | return
153 | }
154 | switch msgp.UnsafeString(field) {
155 | case "ID":
156 | z.ID, bts, err = msgp.ReadStringBytes(bts)
157 | if err != nil {
158 | return
159 | }
160 | case "Bucket":
161 | bts, err = z.Bucket.UnmarshalMsg(bts)
162 | if err != nil {
163 | return
164 | }
165 | case "ScriptResult":
166 | z.ScriptResult, bts, err = msgp.ReadIntfBytes(bts)
167 | if err != nil {
168 | return
169 | }
170 | case "HookStatusCode":
171 | z.HookStatusCode, bts, err = msgp.ReadIntBytes(bts)
172 | if err != nil {
173 | return
174 | }
175 | case "CreatedAt":
176 | z.CreatedAt, bts, err = msgp.ReadTimeBytes(bts)
177 | if err != nil {
178 | return
179 | }
180 | default:
181 | bts, err = msgp.Skip(bts)
182 | if err != nil {
183 | return
184 | }
185 | }
186 | }
187 | o = bts
188 | return
189 | }
190 |
191 | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
192 | func (z *Record) Msgsize() (s int) {
193 | s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 7 + z.Bucket.Msgsize() + 13 + msgp.GuessSize(z.ScriptResult) + 15 + msgp.IntSize + 10 + msgp.TimeSize
194 | return
195 | }
196 |
--------------------------------------------------------------------------------
/pkg/rules/rule_gen_test.go:
--------------------------------------------------------------------------------
1 | package rules
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "bytes"
7 | "testing"
8 |
9 | "github.com/tinylib/msgp/msgp"
10 | )
11 |
12 | func TestMarshalUnmarshalPublicRule(t *testing.T) {
13 | v := PublicRule{}
14 | bts, err := v.MarshalMsg(nil)
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 | left, err := v.UnmarshalMsg(bts)
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 | if len(left) > 0 {
23 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
24 | }
25 |
26 | left, err = msgp.Skip(bts)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | if len(left) > 0 {
31 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
32 | }
33 | }
34 |
35 | func BenchmarkMarshalMsgPublicRule(b *testing.B) {
36 | v := PublicRule{}
37 | b.ReportAllocs()
38 | b.ResetTimer()
39 | for i := 0; i < b.N; i++ {
40 | v.MarshalMsg(nil)
41 | }
42 | }
43 |
44 | func BenchmarkAppendMsgPublicRule(b *testing.B) {
45 | v := PublicRule{}
46 | bts := make([]byte, 0, v.Msgsize())
47 | bts, _ = v.MarshalMsg(bts[0:0])
48 | b.SetBytes(int64(len(bts)))
49 | b.ReportAllocs()
50 | b.ResetTimer()
51 | for i := 0; i < b.N; i++ {
52 | bts, _ = v.MarshalMsg(bts[0:0])
53 | }
54 | }
55 |
56 | func BenchmarkUnmarshalPublicRule(b *testing.B) {
57 | v := PublicRule{}
58 | bts, _ := v.MarshalMsg(nil)
59 | b.ReportAllocs()
60 | b.SetBytes(int64(len(bts)))
61 | b.ResetTimer()
62 | for i := 0; i < b.N; i++ {
63 | _, err := v.UnmarshalMsg(bts)
64 | if err != nil {
65 | b.Fatal(err)
66 | }
67 | }
68 | }
69 |
70 | func TestEncodeDecodePublicRule(t *testing.T) {
71 | v := PublicRule{}
72 | var buf bytes.Buffer
73 | msgp.Encode(&buf, &v)
74 |
75 | m := v.Msgsize()
76 | if buf.Len() > m {
77 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
78 | }
79 |
80 | vn := PublicRule{}
81 | err := msgp.Decode(&buf, &vn)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | buf.Reset()
87 | msgp.Encode(&buf, &v)
88 | err = msgp.NewReader(&buf).Skip()
89 | if err != nil {
90 | t.Error(err)
91 | }
92 | }
93 |
94 | func BenchmarkEncodePublicRule(b *testing.B) {
95 | v := PublicRule{}
96 | var buf bytes.Buffer
97 | msgp.Encode(&buf, &v)
98 | b.SetBytes(int64(buf.Len()))
99 | en := msgp.NewWriter(msgp.Nowhere)
100 | b.ReportAllocs()
101 | b.ResetTimer()
102 | for i := 0; i < b.N; i++ {
103 | v.EncodeMsg(en)
104 | }
105 | en.Flush()
106 | }
107 |
108 | func BenchmarkDecodePublicRule(b *testing.B) {
109 | v := PublicRule{}
110 | var buf bytes.Buffer
111 | msgp.Encode(&buf, &v)
112 | b.SetBytes(int64(buf.Len()))
113 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
114 | dc := msgp.NewReader(rd)
115 | b.ReportAllocs()
116 | b.ResetTimer()
117 | for i := 0; i < b.N; i++ {
118 | err := v.DecodeMsg(dc)
119 | if err != nil {
120 | b.Fatal(err)
121 | }
122 | }
123 | }
124 |
125 | func TestMarshalUnmarshalRule(t *testing.T) {
126 | v := Rule{}
127 | bts, err := v.MarshalMsg(nil)
128 | if err != nil {
129 | t.Fatal(err)
130 | }
131 | left, err := v.UnmarshalMsg(bts)
132 | if err != nil {
133 | t.Fatal(err)
134 | }
135 | if len(left) > 0 {
136 | t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
137 | }
138 |
139 | left, err = msgp.Skip(bts)
140 | if err != nil {
141 | t.Fatal(err)
142 | }
143 | if len(left) > 0 {
144 | t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
145 | }
146 | }
147 |
148 | func BenchmarkMarshalMsgRule(b *testing.B) {
149 | v := Rule{}
150 | b.ReportAllocs()
151 | b.ResetTimer()
152 | for i := 0; i < b.N; i++ {
153 | v.MarshalMsg(nil)
154 | }
155 | }
156 |
157 | func BenchmarkAppendMsgRule(b *testing.B) {
158 | v := Rule{}
159 | bts := make([]byte, 0, v.Msgsize())
160 | bts, _ = v.MarshalMsg(bts[0:0])
161 | b.SetBytes(int64(len(bts)))
162 | b.ReportAllocs()
163 | b.ResetTimer()
164 | for i := 0; i < b.N; i++ {
165 | bts, _ = v.MarshalMsg(bts[0:0])
166 | }
167 | }
168 |
169 | func BenchmarkUnmarshalRule(b *testing.B) {
170 | v := Rule{}
171 | bts, _ := v.MarshalMsg(nil)
172 | b.ReportAllocs()
173 | b.SetBytes(int64(len(bts)))
174 | b.ResetTimer()
175 | for i := 0; i < b.N; i++ {
176 | _, err := v.UnmarshalMsg(bts)
177 | if err != nil {
178 | b.Fatal(err)
179 | }
180 | }
181 | }
182 |
183 | func TestEncodeDecodeRule(t *testing.T) {
184 | v := Rule{}
185 | var buf bytes.Buffer
186 | msgp.Encode(&buf, &v)
187 |
188 | m := v.Msgsize()
189 | if buf.Len() > m {
190 | t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
191 | }
192 |
193 | vn := Rule{}
194 | err := msgp.Decode(&buf, &vn)
195 | if err != nil {
196 | t.Error(err)
197 | }
198 |
199 | buf.Reset()
200 | msgp.Encode(&buf, &v)
201 | err = msgp.NewReader(&buf).Skip()
202 | if err != nil {
203 | t.Error(err)
204 | }
205 | }
206 |
207 | func BenchmarkEncodeRule(b *testing.B) {
208 | v := Rule{}
209 | var buf bytes.Buffer
210 | msgp.Encode(&buf, &v)
211 | b.SetBytes(int64(buf.Len()))
212 | en := msgp.NewWriter(msgp.Nowhere)
213 | b.ReportAllocs()
214 | b.ResetTimer()
215 | for i := 0; i < b.N; i++ {
216 | v.EncodeMsg(en)
217 | }
218 | en.Flush()
219 | }
220 |
221 | func BenchmarkDecodeRule(b *testing.B) {
222 | v := Rule{}
223 | var buf bytes.Buffer
224 | msgp.Encode(&buf, &v)
225 | b.SetBytes(int64(buf.Len()))
226 | rd := msgp.NewEndlessReader(buf.Bytes(), b)
227 | dc := msgp.NewReader(rd)
228 | b.ReportAllocs()
229 | b.ResetTimer()
230 | for i := 0; i < b.N; i++ {
231 | err := v.DecodeMsg(dc)
232 | if err != nil {
233 | b.Fatal(err)
234 | }
235 | }
236 | }
237 |
--------------------------------------------------------------------------------
/pkg/store/node.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 | "strconv"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | "github.com/myntra/cortex/pkg/executions"
14 | "github.com/myntra/cortex/pkg/js"
15 |
16 | "github.com/golang/glog"
17 | "github.com/hashicorp/raft"
18 | "github.com/myntra/cortex/pkg/config"
19 | "github.com/myntra/cortex/pkg/events"
20 | "github.com/myntra/cortex/pkg/rules"
21 | "github.com/myntra/cortex/pkg/util"
22 | )
23 |
24 | // Node represents a raft node
25 | type Node struct {
26 | mu sync.RWMutex
27 | store *defaultStore
28 | }
29 |
30 | // NewNode returns a new raft node
31 | func NewNode(cfg *config.Config) (*Node, error) {
32 | glog.Infof("NewNode %v\n", cfg)
33 | if err := cfg.Validate(); err != nil {
34 | return nil, fmt.Errorf("invalid config: %v", err)
35 | }
36 |
37 | store, err := newStore(cfg)
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | node := &Node{store: store}
43 |
44 | return node, nil
45 | }
46 |
47 | // Start the node
48 | func (n *Node) Start() error {
49 | return n.store.open()
50 | }
51 |
52 | // Shutdown the node
53 | func (n *Node) Shutdown() error {
54 | n.mu.Lock()
55 | defer n.mu.Unlock()
56 | err := n.store.close()
57 | if err != nil {
58 | glog.Errorf("error shutting down node %v\n", err)
59 | return err
60 | }
61 | time.Sleep(time.Second)
62 | glog.Info("node shut down")
63 | return nil
64 | }
65 |
66 | // LeaderAddr returns the http addr of the leader of the cluster. If empty, the current node is the leader
67 | func (n *Node) LeaderAddr() string {
68 |
69 | if n.store.raft.State() == raft.Leader {
70 | return ""
71 | }
72 |
73 | raftAddress := string(n.store.raft.Leader())
74 |
75 | fields := strings.Split(raftAddress, ":")
76 |
77 | if len(fields) == 0 || len(fields) != 2 {
78 | glog.Errorf("invalid raftAddress %v", raftAddress)
79 | return ""
80 | }
81 |
82 | raftPortStr := fields[1]
83 | raftPort, err := strconv.Atoi(raftPortStr)
84 | if err != nil {
85 | glog.Errorf("invalid port %v %v", raftAddress, raftPortStr)
86 | return ""
87 | }
88 |
89 | tcpPort := raftPort + 1
90 | tcpURL := fields[0]
91 | if tcpURL == "" {
92 | tcpURL = "0.0.0.0"
93 | }
94 |
95 | glog.Info("LeaderAddr ", tcpURL, tcpPort)
96 |
97 | tcpAddr := fmt.Sprintf("%s:%d", tcpURL, tcpPort)
98 |
99 | return tcpAddr
100 | }
101 |
102 | // AddRule adds a rule to the store
103 | func (n *Node) AddRule(rule *rules.Rule) error {
104 | if err := rule.Validate(); err != nil {
105 | return err
106 | }
107 | return n.store.addRule(rule)
108 | }
109 |
110 | // UpdateRule updates a rule to the store
111 | func (n *Node) UpdateRule(rule *rules.Rule) error {
112 | if err := rule.Validate(); err != nil {
113 | return err
114 | }
115 | return n.store.updateRule(rule)
116 | }
117 |
118 | // Stash adds a event to the store
119 | func (n *Node) Stash(event *events.Event) error {
120 | return n.store.matchAndStash(event)
121 | }
122 |
123 | // RemoveRule removes a rule from the store
124 | func (n *Node) RemoveRule(ruleID string) error {
125 | return n.store.removeRule(ruleID)
126 | }
127 |
128 | // GetRule returns all the stored rules
129 | func (n *Node) GetRule(ruleID string) *rules.Rule {
130 | return n.store.getRule(ruleID)
131 | }
132 |
133 | // GetRuleExectutions returns the executions for a rule
134 | func (n *Node) GetRuleExectutions(ruleID string) []*executions.Record {
135 | return n.store.getRecords(ruleID)
136 | }
137 |
138 | // GetRules returns all the stored rules
139 | func (n *Node) GetRules() []*rules.Rule {
140 | return n.store.getRules()
141 | }
142 |
143 | // AddScript adds a script to the db
144 | func (n *Node) AddScript(script *js.Script) error {
145 | return n.store.addScript(script)
146 | }
147 |
148 | // UpdateScript updates an already added script
149 | func (n *Node) UpdateScript(script *js.Script) error {
150 | return n.store.updateScript(script)
151 | }
152 |
153 | // RemoveScript remove a script from the db
154 | func (n *Node) RemoveScript(id string) error {
155 | return n.store.removeScript(id)
156 | }
157 |
158 | // GetScripts returns all script ids
159 | func (n *Node) GetScripts() []string {
160 | return n.store.getScripts()
161 | }
162 |
163 | // GetScript returns the script data
164 | func (n *Node) GetScript(id string) *js.Script {
165 | return n.store.getScript(id)
166 | }
167 |
168 | // Join a remote node at the addr
169 | func (n *Node) Join(nodeID, addr string) error {
170 | return n.store.acceptJoin(nodeID, addr)
171 | }
172 |
173 | // Leave a remote node
174 | func (n *Node) Leave(nodeID string) error {
175 | return n.store.acceptLeave(nodeID)
176 | }
177 |
178 | // Snapshot takes a snapshot of the store
179 | func (n *Node) Snapshot() error {
180 | return n.store.snapshot()
181 | }
182 |
183 | func httpRaftJoin(joinAddr, nodeID, bindAddr string) error {
184 |
185 | jr := &util.JoinRequest{
186 | NodeID: nodeID,
187 | Addr: bindAddr,
188 | }
189 |
190 | err := jr.Validate()
191 | if err != nil {
192 | return err
193 | }
194 |
195 | b := new(bytes.Buffer)
196 | err = json.NewEncoder(b).Encode(jr)
197 | if err != nil {
198 | return err
199 | }
200 |
201 | glog.Infof("joinRequest Body %v", b.String())
202 |
203 | req, err := http.NewRequest("POST", "http://"+joinAddr+"/join", b)
204 | if err != nil {
205 | return err
206 | }
207 |
208 | client := &http.Client{}
209 | resp, err := client.Do(req)
210 | if err != nil {
211 | return err
212 | }
213 |
214 | if resp.StatusCode != 200 {
215 | return fmt.Errorf("join failed, unexpected status code %v", resp.StatusCode)
216 | }
217 |
218 | return nil
219 | }
220 |
--------------------------------------------------------------------------------
/pkg/store/raft.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "crypto/rand"
5 | "encoding/hex"
6 | "fmt"
7 | "io/ioutil"
8 | "net"
9 | "os"
10 | "path/filepath"
11 | "strings"
12 | "time"
13 |
14 | "github.com/golang/glog"
15 | "github.com/hashicorp/raft"
16 | raftboltdb "github.com/hashicorp/raft-boltdb"
17 | )
18 |
19 | func (d *defaultStore) open() error {
20 |
21 | id := d.opt.NodeID
22 |
23 | if id == "" {
24 | data, err := ioutil.ReadFile(filepath.Join(d.opt.Dir, "node.id"))
25 | id = strings.TrimSpace(string(data))
26 | if os.IsNotExist(err) || id == "" {
27 | var data [4]byte
28 | if _, err := rand.Read(data[:]); err != nil {
29 | panic("random error: " + err.Error())
30 | }
31 | id = hex.EncodeToString(data[:])[:7]
32 | err = ioutil.WriteFile(filepath.Join(d.opt.Dir, "node.id"), []byte(id+"\n"), 0600)
33 | if err != nil {
34 | return err
35 | }
36 | } else if err != nil {
37 | return err
38 | }
39 | }
40 |
41 | glog.Info("opening raft store \n")
42 | config := raft.DefaultConfig()
43 | config.LocalID = raft.ServerID(id)
44 |
45 | // Setup Raft communication.
46 | addr, err := net.ResolveTCPAddr("tcp", d.opt.RaftAddr)
47 | if err != nil {
48 | return err
49 | }
50 |
51 | //raft.NewTCPTransportWithConfig
52 | transport, err := NewTCPTransport(d.opt.RaftListener, addr, 3, 10*time.Second, os.Stderr)
53 | if err != nil {
54 | return err
55 | }
56 |
57 | glog.Info("created raft transport \n")
58 | // Create the snapshot store. This allows the Raft to truncate the log.
59 | snapshots, err := raft.NewFileSnapshotStore(d.opt.Dir, retainSnapshotCount, os.Stderr)
60 | if err != nil {
61 | return fmt.Errorf("file snapshot store: %s", err)
62 | }
63 |
64 | glog.Info("created snapshot store \n")
65 |
66 | // Create the log store and stable store.
67 | var logStore raft.LogStore
68 | var stableStore raft.StableStore
69 |
70 | glog.Info("raft.db => ", filepath.Join(d.opt.Dir, "raft.db"))
71 | boltDB, err := raftboltdb.NewBoltStore(filepath.Join(d.opt.Dir, "raft.db"))
72 | if err != nil {
73 | return fmt.Errorf("new bolt store: %s", err)
74 | }
75 | logStore = boltDB
76 | stableStore = boltDB
77 |
78 | glog.Info("created boltdb store \n")
79 | // Instantiate the Raft systemd.
80 | ra, err := raft.NewRaft(config, (*fsm)(d), logStore, stableStore, snapshots, transport)
81 | if err != nil {
82 | return fmt.Errorf("new raft: %s", err)
83 | }
84 | d.raft = ra
85 | d.boltDB = boltDB
86 |
87 | glog.Info("created raft systemd \n")
88 |
89 | // bootstrap single node configuration
90 | if d.opt.JoinAddr == "" {
91 | glog.Infof("starting %v in a single node cluster \n", d.opt.NodeID)
92 | configuration := raft.Configuration{
93 | Servers: []raft.Server{
94 | {
95 | ID: config.LocalID,
96 | Address: transport.LocalAddr(),
97 | },
98 | },
99 | }
100 | ra.BootstrapCluster(configuration)
101 |
102 | // since in bootstrap mode, block until leadership is attained.
103 | loop:
104 | for {
105 | select {
106 | case leader := <-d.raft.LeaderCh():
107 | glog.Info("isLeader ", leader)
108 | if leader {
109 | break loop
110 | }
111 | }
112 | }
113 | } else {
114 | // join a remote node
115 | glog.Infof("join a remote node %v\n", d.opt.JoinAddr)
116 | err := httpRaftJoin(d.opt.JoinAddr, d.opt.NodeID, d.opt.RaftAddr)
117 | if err != nil {
118 | return err
119 | }
120 | }
121 |
122 | go d.flusher()
123 |
124 | return nil
125 | }
126 |
127 | func (d *defaultStore) snapshot() error {
128 | f := d.raft.Snapshot()
129 | return f.Error()
130 | }
131 |
132 | func (d *defaultStore) close() error {
133 | d.quitFlusherChan <- struct{}{}
134 | f := d.raft.Shutdown()
135 | if f.Error() != nil {
136 | return f.Error()
137 | }
138 |
139 | // close the raft database
140 | if d.boltDB != nil {
141 | d.boltDB.Close()
142 | }
143 |
144 | glog.Info("raft shut down")
145 | glog.Flush()
146 | return nil
147 | }
148 |
149 | func (d *defaultStore) acceptJoin(nodeID, addr string) error {
150 | glog.Infof("received join request for remote node %s at %s", nodeID, addr)
151 |
152 | configFuture := d.raft.GetConfiguration()
153 | if err := configFuture.Error(); err != nil {
154 | glog.Infof("failed to get raft configuration: %v", err)
155 | return err
156 | }
157 |
158 | for _, srv := range configFuture.Configuration().Servers {
159 | // If a node already exists with either the joining node's ID or address,
160 | // that node may need to be removed from the config first.
161 | if srv.ID == raft.ServerID(nodeID) || srv.Address == raft.ServerAddress(addr) {
162 | // However if *both* the ID and the address are the same, then nothing -- not even
163 | // a join operation -- is needed.
164 | if srv.Address == raft.ServerAddress(addr) && srv.ID == raft.ServerID(nodeID) {
165 | glog.Infof("node %s at %s already member of cluster, ignoring join request", nodeID, addr)
166 | return nil
167 | }
168 |
169 | future := d.raft.RemoveServer(srv.ID, 0, 0)
170 | if err := future.Error(); err != nil {
171 | return fmt.Errorf("error removing existing node %s at %s: %s", nodeID, addr, err)
172 | }
173 | }
174 | }
175 |
176 | f := d.raft.AddVoter(raft.ServerID(nodeID), raft.ServerAddress(addr), 0, 0)
177 | if f.Error() != nil {
178 | return f.Error()
179 | }
180 | glog.Infof("node %s at %s joined successfully", nodeID, addr)
181 | return nil
182 |
183 | }
184 |
185 | func (d *defaultStore) acceptLeave(nodeID string) error {
186 |
187 | glog.Infof("received leave request for remote node %s", nodeID)
188 |
189 | cf := d.raft.GetConfiguration()
190 |
191 | if err := cf.Error(); err != nil {
192 | glog.Infof("failed to get raft configuration")
193 | return err
194 | }
195 |
196 | for _, server := range cf.Configuration().Servers {
197 | if server.ID == raft.ServerID(nodeID) {
198 | f := d.raft.RemoveServer(server.ID, 0, 0)
199 | if err := f.Error(); err != nil {
200 | glog.Infof("failed to remove server %s", nodeID)
201 | return err
202 | }
203 |
204 | glog.Infof("node %s left successfully", nodeID)
205 | return nil
206 | }
207 | }
208 |
209 | glog.Infof("node %s not exists in raft group", nodeID)
210 |
211 | return nil
212 |
213 | }
214 |
--------------------------------------------------------------------------------
/pkg/store/fsm.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "fmt"
5 | "io"
6 |
7 | "github.com/golang/glog"
8 | "github.com/hashicorp/raft"
9 | "github.com/myntra/cortex/pkg/events"
10 | "github.com/myntra/cortex/pkg/executions"
11 | "github.com/myntra/cortex/pkg/js"
12 | "github.com/myntra/cortex/pkg/rules"
13 | "github.com/tinylib/msgp/msgp"
14 | )
15 |
16 | type fsm defaultStore
17 |
18 | func (f *fsm) Apply(l *raft.Log) interface{} {
19 | c := Command{}
20 |
21 | left, err := c.UnmarshalMsg(l.Data)
22 | if err != nil {
23 | return err
24 | }
25 |
26 | if len(left) > 0 {
27 | return fmt.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
28 | }
29 |
30 | left, err = msgp.Skip(l.Data)
31 | if err != nil {
32 | return err
33 | }
34 |
35 | if len(left) > 0 {
36 | return fmt.Errorf("%d bytes left over after Skip(): %q", len(left), left)
37 |
38 | }
39 |
40 | glog.Infof("fsm apply ==> %+v\n", c)
41 | // if err := json.Unmarshal(l.Data, &c); err != nil {
42 | // panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error()))
43 | // }
44 |
45 | switch c.Op {
46 | case "stash":
47 | return f.applyStash(c.RuleID, c.Event)
48 | case "add_rule":
49 | return f.applyAddRule(c.Rule)
50 | case "update_rule":
51 | return f.applyUpdateRule(c.Rule)
52 | case "remove_rule":
53 | return f.applyRemoveRule(c.RuleID)
54 | case "flush_bucket":
55 | return f.applyFlushBucket(c.RuleID)
56 | case "flush_lock":
57 | return f.applyFlushLock(c.RuleID)
58 | case "add_script":
59 | return f.applyAddScript(c.Script)
60 | case "update_script":
61 | return f.applyUpdateScript(c.Script)
62 | case "remove_script":
63 | return f.applyRemoveScript(c.ScriptID)
64 | case "add_record":
65 | return f.applyAddRecord(c.Record)
66 | case "remove_record":
67 | return f.applyRemoveRecord(c.RecordID)
68 | default:
69 | panic(fmt.Sprintf("unrecognized command op: %s", c.Op))
70 | }
71 |
72 | }
73 |
74 | func (f *fsm) applyStash(ruleID string, event *events.Event) interface{} {
75 | return f.bucketStorage.stash(ruleID, event)
76 | }
77 |
78 | func (f *fsm) applyAddRule(rule *rules.Rule) interface{} {
79 | return f.bucketStorage.rs.addRule(rule)
80 | }
81 |
82 | func (f *fsm) applyUpdateRule(rule *rules.Rule) interface{} {
83 | return f.bucketStorage.rs.updateRule(rule)
84 | }
85 |
86 | func (f *fsm) applyRemoveRule(ruleID string) interface{} {
87 | return f.bucketStorage.rs.removeRule(ruleID)
88 | }
89 |
90 | func (f *fsm) applyFlushBucket(ruleID string) interface{} {
91 | return f.bucketStorage.es.flushBucket(ruleID)
92 | }
93 |
94 | func (f *fsm) applyFlushLock(ruleID string) interface{} {
95 | return f.bucketStorage.es.flushLock(ruleID)
96 | }
97 |
98 | func (f *fsm) applyAddScript(script *js.Script) interface{} {
99 | return f.scriptStorage.addScript(script)
100 | }
101 |
102 | func (f *fsm) applyUpdateScript(script *js.Script) interface{} {
103 | return f.scriptStorage.updateScript(script)
104 | }
105 |
106 | func (f *fsm) applyRemoveScript(id string) interface{} {
107 | return f.scriptStorage.removeScript(id)
108 | }
109 |
110 | func (f *fsm) applyAddRecord(r *executions.Record) interface{} {
111 | return f.executionStorage.add(r)
112 | }
113 |
114 | func (f *fsm) applyRemoveRecord(id string) interface{} {
115 | return f.executionStorage.remove(id)
116 | }
117 |
118 | func (f *fsm) Snapshot() (raft.FSMSnapshot, error) {
119 | glog.Info("snapshot =>")
120 |
121 | rules := f.bucketStorage.rs.clone()
122 | scripts := f.scriptStorage.clone()
123 | records := f.executionStorage.clone()
124 |
125 | return &fsmSnapShot{
126 | persisters: f.persisters,
127 | messages: &Messages{
128 | Rules: rules,
129 | Scripts: scripts,
130 | Records: records,
131 | }}, nil
132 | }
133 |
134 | type restorer func(messages *Messages, reader *msgp.Reader) error
135 |
136 | func (f *fsm) Restore(rc io.ReadCloser) error {
137 | glog.Info("restore <=")
138 | defer rc.Close()
139 |
140 | // body, _ := ioutil.ReadAll(rc)
141 | // glog.Infoln(string(body))
142 |
143 | messages := &Messages{
144 | Rules: make(map[string]*rules.Rule),
145 | Scripts: make(map[string]*js.Script),
146 | Records: make(map[string]*executions.Record),
147 | }
148 |
149 | msgpReader := msgp.NewReader(rc)
150 |
151 | msgType := make([]byte, 1)
152 | for {
153 | // Read the message type
154 | _, err := msgpReader.Read(msgType)
155 | if err == io.EOF {
156 | glog.Infof("err => %v", err)
157 | break
158 | } else if err != nil {
159 | glog.Error(err)
160 | return err
161 | }
162 |
163 | // Decode
164 | msg := MessageType(msgType[0])
165 | glog.Infof("resotre, messageType %+v\n", msg)
166 | if fn := f.restorers[msg]; fn != nil {
167 | if err := fn(messages, msgpReader); err != nil {
168 | glog.Error(err)
169 | return err
170 | }
171 | } else {
172 | glog.Error(fmt.Errorf("Unrecognized msg type %d", msg))
173 | return fmt.Errorf("Unrecognized msg type %d", msg)
174 | }
175 |
176 | }
177 |
178 | f.bucketStorage.rs.restore(messages.Rules)
179 | f.scriptStorage.restore(messages.Scripts)
180 | f.executionStorage.restore(messages.Records)
181 |
182 | return nil
183 | }
184 |
185 | func restoreRules(messages *Messages, reader *msgp.Reader) error {
186 | var rule rules.Rule
187 | err := rule.DecodeMsg(reader)
188 | if err != nil {
189 | glog.Error(err)
190 | return err
191 | }
192 |
193 | glog.Infof("restoreRules %+v\n", rule)
194 |
195 | if &rule == nil {
196 | return fmt.Errorf("restored rule nil")
197 | }
198 |
199 | rulePtr := &rule
200 | err = rulePtr.Validate()
201 | if err != nil {
202 | return err
203 | }
204 |
205 | messages.Rules[rule.ID] = rulePtr
206 | return nil
207 | }
208 |
209 | func restoreScripts(messages *Messages, reader *msgp.Reader) error {
210 | var script js.Script
211 | err := script.DecodeMsg(reader)
212 | if err != nil {
213 | glog.Error(err)
214 | return err
215 | }
216 |
217 | glog.Infof("restoreScripts %+v\n", script)
218 |
219 | if &script == nil {
220 | return fmt.Errorf("restored script nil")
221 | }
222 |
223 | messages.Scripts[script.ID] = &script
224 | return nil
225 | }
226 |
227 | func restoreRecords(messages *Messages, reader *msgp.Reader) error {
228 | var record executions.Record
229 | err := record.DecodeMsg(reader)
230 | if err != nil {
231 | glog.Error(err)
232 | return err
233 | }
234 |
235 | glog.Infof("restoreRecords %+v\n", record)
236 |
237 | if &record == nil {
238 | return fmt.Errorf("restored record nil")
239 | }
240 |
241 | messages.Records[record.ID] = &record
242 |
243 | return nil
244 | }
245 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | **Cortex** is a fault-tolerant events correlation engine. It groups and correlates incoming events for further actions:
3 | creating/resolving incidents/alerts or for doing root cause analysis.
4 |
5 | - Built-in regex matcher for capturing events into groups(*here called as a bucket*).
6 | - Built-in ES6 javascript interpreter(https://docs.k6.io/docs/modules) for executing correlation logic on buckets.
7 | - React UI for creating new rules, correlation scripts, list of rule execution history and a playground to simulate correlation executions.
8 | - REST API crud for rules, scripts and execution history.
9 | - Cloudevents input and output(https://cloudevents.io/).
10 | - Fault Tolerance built on top of https://github.com/hashicorp/raft and https://github.com/boltdb/bolt .
11 | - Single fat self-supervising binary using https://github.com/crawshaw/littleboss .
12 | - MessagePack encoding/decoding for raft entries using https://github.com/tinylib/msgp .
13 |
14 | The project is **alpha** quality and not yet ready for production.
15 |
16 | ## Summary:
17 |
18 | *Find relationship between N events received at M different points in time using regex matchers and javascript*
19 |
20 | To know more about event correlation in general, please read: https://en.wikipedia.org/wiki/Event_correlation
21 |
22 | ### Similar Commercial Products
23 |
24 | 1. https://console.bluemix.net/catalog/services/event-management
25 | 2. https://www.bigpanda.io/blog/algorithmic-alert-correlation/
26 | 3. https://docs.servicenow.com/bundle/kingston-it-operations-management/page/product/event-management/concept/c_EMEventCorrelationRules.html
27 |
28 |
29 | ## Use Cases
30 | - Alerts/Events Correlation
31 | - Event Gateway
32 | - FAAS
33 | - Incidents Management
34 |
35 | ## How it works:
36 |
37 | Cortex runs the following steps to achieve event corrrelation:
38 |
39 | 1. **Match** : incoming alert --> (convert from site 24x7/icinga ) --> (match rule) --> **Collect**
40 | 2. **Collect** --> (add to the rule bucket which *dwells* around until the configured time) --> **Execute**
41 | 3. **Execute** --> (flush after Dwell period) --> (execute configured script) --> *Post*
42 | 4. **Post** --> (if result is set from script, post the result to the HookEndPoint or post the bucket itself if result is nil)
43 |
44 |
45 | ## Screenshots
46 |
47 | 
48 |
49 | 
50 |
51 | 
52 |
53 | ## Rules
54 |
55 | A rule contains an array of patterns used to capture events in a *bucket*
56 |
57 | ```json
58 | {
59 | "title": "a test rule",
60 | "id": "test-rule-id-1",
61 | "eventTypePatterns": ["acme.prod.icinga.check_disk", "acme.prod.site247.*"],
62 | "scriptID": "myscript.js",
63 | "dwell": 4000,
64 | "dwellDeadline": 3800,
65 | "maxDwell": 8000,
66 | "hookEndpoint": "http://localhost:3000/testrule",
67 | "hookRetry": 2
68 | }
69 | ```
70 |
71 | where:
72 |
73 | *EventTypePatterns* is the pattern of events to be collected in a bucket.
74 |
75 | *Dwell* is the wait duration since the first matched event.
76 |
77 |
78 | Possible patterns:
79 |
80 | ```
81 | {rule pattern, incoming event type, expected match}
82 | {"acme*", "acme", false},
83 | {"acme*", "acme.prod", true},
84 | {"acme.prod*", "acme.prod.search", true},
85 | {"acme.prod*.checkout", "acme.prod.search", false},
86 | {"acme.prod*.*", "acme.prod.search", false},
87 | {"acme.prod*.*", "acme.prod-1.search", true},
88 | {"acme.prod.*.*.*", "acme.prod.search.node1.check_disk", true},
89 | {"acme.prod.*.*.check_disk", "acme.prod.search.node1.check_disk", true},
90 | {"acme.prod.*.*.check_loadavg", "acme.prod.search.node1.check_disk", false},
91 | {"*.prod.*.*.check_loadavg", "acme.prod.search.node1.check_loadavg", true},
92 | {"acme.prod.*", "acme.prod.search.node1.check_disk", true},
93 | {"acme.prod.search.node*.check_disk", "acme.prod.search.node1.check_disk", true},
94 | {"acme.prod.search.node*.*", "acme.prod.search.node1.check_disk", true},
95 | {"acme.prod.search.dc1-node*.*", "acme.prod.search.node1.check_disk", false},
96 | ```
97 |
98 | ## Events
99 |
100 | Alerts are accepted as a cloudevents.io event(https://github.com/cloudevents/spec/blob/master/json-format.md). Site 24x7 and Icinga integration sinks are also provided.
101 |
102 | The engine collects similar events in a bucket over a time window using a regex matcher and then executes a JS(ES6) script. The script contains the correlation logic which can further create incidents or alerts. The JS environment is limited and is achieved by embedding k6.io javascript interpreter(https://docs.k6.io/docs/modules). This is an excellent library built on top of https://github.com/dop251/goja
103 |
104 |
105 | For the above example rule, incoming events with `eventType` matching one of `eventTypePatterns` will be put in the same bucket:
106 |
107 | ```json
108 | {
109 | "rule": {},
110 | "events": [{
111 | "cloudEventsVersion": "0.1",
112 | "eventType": "acme.prod.site247.search_down",
113 | "source": "site247",
114 | "eventID": "C234-1234-1234",
115 | "eventTime": "2018-04-05T17:31:00Z",
116 | "extensions": {
117 | "comExampleExtension": "value"
118 | },
119 | "contentType": "application/json",
120 | "data": {
121 | "appinfoA": "abc",
122 | "appinfoB": 123,
123 | "appinfoC": true
124 | }
125 | }]
126 | }
127 | ```
128 |
129 | ## Scripts
130 |
131 | After the `dwell` period, the configured `myscript.js` will be invoked and the bucket will be passed along:
132 |
133 | ```js
134 | import http from "k6/http";
135 | // result is a special variable
136 | let result = null
137 | // the entry function called by default
138 | export default function(bucket) {
139 | bucket.events.foreach((event) => {
140 | // create incident or alert or do nothing
141 | http.Post("http://acme.com/incident")
142 | // if result is set. it will picked up the engine and posted to hookEndPoint
143 | })
144 | }`
145 | ```
146 |
147 | If `result` is set, it will be posted to the hookEndPoint. The `bucket` itself will be reset and evicted from the `collect` loop. The execution `record` will then be stored and can be fetched later.
148 |
149 | A new `bucket` will be created when an event matches the rule again.
150 |
151 | ## Hooks
152 |
153 | Rule results can be posted to a configured http endpoint. The remote endpoint should be able to accept a `POST : application/json` request.
154 |
155 | ```
156 | "hookEndpoint": "http://localhost:3000/testrule",
157 | "hookRetry": 2
158 | ```
159 |
160 |
161 | ## Local Deployment
162 |
163 | 1. git clone https://github.com/myntra/cortex
164 | 2. ./release.sh
165 |
166 | Starts a single node server.
167 |
168 | ## Production Deployment
169 |
170 | TODO
171 |
172 |
173 |
174 |
--------------------------------------------------------------------------------
/pkg/events/bucket_gen.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "github.com/tinylib/msgp/msgp"
7 | )
8 |
9 | // DecodeMsg implements msgp.Decodable
10 | func (z *Bucket) DecodeMsg(dc *msgp.Reader) (err error) {
11 | var field []byte
12 | _ = field
13 | var zb0001 uint32
14 | zb0001, err = dc.ReadMapHeader()
15 | if err != nil {
16 | return
17 | }
18 | for zb0001 > 0 {
19 | zb0001--
20 | field, err = dc.ReadMapKeyPtr()
21 | if err != nil {
22 | return
23 | }
24 | switch msgp.UnsafeString(field) {
25 | case "Rule":
26 | err = z.Rule.DecodeMsg(dc)
27 | if err != nil {
28 | return
29 | }
30 | case "Events":
31 | var zb0002 uint32
32 | zb0002, err = dc.ReadArrayHeader()
33 | if err != nil {
34 | return
35 | }
36 | if cap(z.Events) >= int(zb0002) {
37 | z.Events = (z.Events)[:zb0002]
38 | } else {
39 | z.Events = make([]*Event, zb0002)
40 | }
41 | for za0001 := range z.Events {
42 | if dc.IsNil() {
43 | err = dc.ReadNil()
44 | if err != nil {
45 | return
46 | }
47 | z.Events[za0001] = nil
48 | } else {
49 | if z.Events[za0001] == nil {
50 | z.Events[za0001] = new(Event)
51 | }
52 | err = z.Events[za0001].DecodeMsg(dc)
53 | if err != nil {
54 | return
55 | }
56 | }
57 | }
58 | case "FlushLock":
59 | z.FlushLock, err = dc.ReadBool()
60 | if err != nil {
61 | return
62 | }
63 | case "UpdatedAt":
64 | z.UpdatedAt, err = dc.ReadTime()
65 | if err != nil {
66 | return
67 | }
68 | case "CreatedAt":
69 | z.CreatedAt, err = dc.ReadTime()
70 | if err != nil {
71 | return
72 | }
73 | default:
74 | err = dc.Skip()
75 | if err != nil {
76 | return
77 | }
78 | }
79 | }
80 | return
81 | }
82 |
83 | // EncodeMsg implements msgp.Encodable
84 | func (z *Bucket) EncodeMsg(en *msgp.Writer) (err error) {
85 | // map header, size 5
86 | // write "Rule"
87 | err = en.Append(0x85, 0xa4, 0x52, 0x75, 0x6c, 0x65)
88 | if err != nil {
89 | return
90 | }
91 | err = z.Rule.EncodeMsg(en)
92 | if err != nil {
93 | return
94 | }
95 | // write "Events"
96 | err = en.Append(0xa6, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73)
97 | if err != nil {
98 | return
99 | }
100 | err = en.WriteArrayHeader(uint32(len(z.Events)))
101 | if err != nil {
102 | return
103 | }
104 | for za0001 := range z.Events {
105 | if z.Events[za0001] == nil {
106 | err = en.WriteNil()
107 | if err != nil {
108 | return
109 | }
110 | } else {
111 | err = z.Events[za0001].EncodeMsg(en)
112 | if err != nil {
113 | return
114 | }
115 | }
116 | }
117 | // write "FlushLock"
118 | err = en.Append(0xa9, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x63, 0x6b)
119 | if err != nil {
120 | return
121 | }
122 | err = en.WriteBool(z.FlushLock)
123 | if err != nil {
124 | return
125 | }
126 | // write "UpdatedAt"
127 | err = en.Append(0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
128 | if err != nil {
129 | return
130 | }
131 | err = en.WriteTime(z.UpdatedAt)
132 | if err != nil {
133 | return
134 | }
135 | // write "CreatedAt"
136 | err = en.Append(0xa9, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
137 | if err != nil {
138 | return
139 | }
140 | err = en.WriteTime(z.CreatedAt)
141 | if err != nil {
142 | return
143 | }
144 | return
145 | }
146 |
147 | // MarshalMsg implements msgp.Marshaler
148 | func (z *Bucket) MarshalMsg(b []byte) (o []byte, err error) {
149 | o = msgp.Require(b, z.Msgsize())
150 | // map header, size 5
151 | // string "Rule"
152 | o = append(o, 0x85, 0xa4, 0x52, 0x75, 0x6c, 0x65)
153 | o, err = z.Rule.MarshalMsg(o)
154 | if err != nil {
155 | return
156 | }
157 | // string "Events"
158 | o = append(o, 0xa6, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73)
159 | o = msgp.AppendArrayHeader(o, uint32(len(z.Events)))
160 | for za0001 := range z.Events {
161 | if z.Events[za0001] == nil {
162 | o = msgp.AppendNil(o)
163 | } else {
164 | o, err = z.Events[za0001].MarshalMsg(o)
165 | if err != nil {
166 | return
167 | }
168 | }
169 | }
170 | // string "FlushLock"
171 | o = append(o, 0xa9, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x63, 0x6b)
172 | o = msgp.AppendBool(o, z.FlushLock)
173 | // string "UpdatedAt"
174 | o = append(o, 0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
175 | o = msgp.AppendTime(o, z.UpdatedAt)
176 | // string "CreatedAt"
177 | o = append(o, 0xa9, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
178 | o = msgp.AppendTime(o, z.CreatedAt)
179 | return
180 | }
181 |
182 | // UnmarshalMsg implements msgp.Unmarshaler
183 | func (z *Bucket) UnmarshalMsg(bts []byte) (o []byte, err error) {
184 | var field []byte
185 | _ = field
186 | var zb0001 uint32
187 | zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
188 | if err != nil {
189 | return
190 | }
191 | for zb0001 > 0 {
192 | zb0001--
193 | field, bts, err = msgp.ReadMapKeyZC(bts)
194 | if err != nil {
195 | return
196 | }
197 | switch msgp.UnsafeString(field) {
198 | case "Rule":
199 | bts, err = z.Rule.UnmarshalMsg(bts)
200 | if err != nil {
201 | return
202 | }
203 | case "Events":
204 | var zb0002 uint32
205 | zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
206 | if err != nil {
207 | return
208 | }
209 | if cap(z.Events) >= int(zb0002) {
210 | z.Events = (z.Events)[:zb0002]
211 | } else {
212 | z.Events = make([]*Event, zb0002)
213 | }
214 | for za0001 := range z.Events {
215 | if msgp.IsNil(bts) {
216 | bts, err = msgp.ReadNilBytes(bts)
217 | if err != nil {
218 | return
219 | }
220 | z.Events[za0001] = nil
221 | } else {
222 | if z.Events[za0001] == nil {
223 | z.Events[za0001] = new(Event)
224 | }
225 | bts, err = z.Events[za0001].UnmarshalMsg(bts)
226 | if err != nil {
227 | return
228 | }
229 | }
230 | }
231 | case "FlushLock":
232 | z.FlushLock, bts, err = msgp.ReadBoolBytes(bts)
233 | if err != nil {
234 | return
235 | }
236 | case "UpdatedAt":
237 | z.UpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
238 | if err != nil {
239 | return
240 | }
241 | case "CreatedAt":
242 | z.CreatedAt, bts, err = msgp.ReadTimeBytes(bts)
243 | if err != nil {
244 | return
245 | }
246 | default:
247 | bts, err = msgp.Skip(bts)
248 | if err != nil {
249 | return
250 | }
251 | }
252 | }
253 | o = bts
254 | return
255 | }
256 |
257 | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
258 | func (z *Bucket) Msgsize() (s int) {
259 | s = 1 + 5 + z.Rule.Msgsize() + 7 + msgp.ArrayHeaderSize
260 | for za0001 := range z.Events {
261 | if z.Events[za0001] == nil {
262 | s += msgp.NilSize
263 | } else {
264 | s += z.Events[za0001].Msgsize()
265 | }
266 | }
267 | s += 10 + msgp.BoolSize + 10 + msgp.TimeSize + 10 + msgp.TimeSize
268 | return
269 | }
270 |
--------------------------------------------------------------------------------
/pkg/events/event_gen.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "github.com/tinylib/msgp/msgp"
7 | )
8 |
9 | // DecodeMsg implements msgp.Decodable
10 | func (z *Event) DecodeMsg(dc *msgp.Reader) (err error) {
11 | var field []byte
12 | _ = field
13 | var zb0001 uint32
14 | zb0001, err = dc.ReadMapHeader()
15 | if err != nil {
16 | return
17 | }
18 | for zb0001 > 0 {
19 | zb0001--
20 | field, err = dc.ReadMapKeyPtr()
21 | if err != nil {
22 | return
23 | }
24 | switch msgp.UnsafeString(field) {
25 | case "EventType":
26 | z.EventType, err = dc.ReadString()
27 | if err != nil {
28 | return
29 | }
30 | case "EventTypeVersion":
31 | z.EventTypeVersion, err = dc.ReadString()
32 | if err != nil {
33 | return
34 | }
35 | case "CloudEventsVersion":
36 | z.CloudEventsVersion, err = dc.ReadString()
37 | if err != nil {
38 | return
39 | }
40 | case "Source":
41 | z.Source, err = dc.ReadString()
42 | if err != nil {
43 | return
44 | }
45 | case "EventID":
46 | z.EventID, err = dc.ReadString()
47 | if err != nil {
48 | return
49 | }
50 | case "EventTime":
51 | z.EventTime, err = dc.ReadTime()
52 | if err != nil {
53 | return
54 | }
55 | case "SchemaURL":
56 | z.SchemaURL, err = dc.ReadString()
57 | if err != nil {
58 | return
59 | }
60 | case "ContentType":
61 | z.ContentType, err = dc.ReadString()
62 | if err != nil {
63 | return
64 | }
65 | case "Extensions":
66 | z.Extensions, err = dc.ReadIntf()
67 | if err != nil {
68 | return
69 | }
70 | case "Data":
71 | z.Data, err = dc.ReadIntf()
72 | if err != nil {
73 | return
74 | }
75 | default:
76 | err = dc.Skip()
77 | if err != nil {
78 | return
79 | }
80 | }
81 | }
82 | return
83 | }
84 |
85 | // EncodeMsg implements msgp.Encodable
86 | func (z *Event) EncodeMsg(en *msgp.Writer) (err error) {
87 | // map header, size 10
88 | // write "EventType"
89 | err = en.Append(0x8a, 0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65)
90 | if err != nil {
91 | return
92 | }
93 | err = en.WriteString(z.EventType)
94 | if err != nil {
95 | return
96 | }
97 | // write "EventTypeVersion"
98 | err = en.Append(0xb0, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
99 | if err != nil {
100 | return
101 | }
102 | err = en.WriteString(z.EventTypeVersion)
103 | if err != nil {
104 | return
105 | }
106 | // write "CloudEventsVersion"
107 | err = en.Append(0xb2, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
108 | if err != nil {
109 | return
110 | }
111 | err = en.WriteString(z.CloudEventsVersion)
112 | if err != nil {
113 | return
114 | }
115 | // write "Source"
116 | err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
117 | if err != nil {
118 | return
119 | }
120 | err = en.WriteString(z.Source)
121 | if err != nil {
122 | return
123 | }
124 | // write "EventID"
125 | err = en.Append(0xa7, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x44)
126 | if err != nil {
127 | return
128 | }
129 | err = en.WriteString(z.EventID)
130 | if err != nil {
131 | return
132 | }
133 | // write "EventTime"
134 | err = en.Append(0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65)
135 | if err != nil {
136 | return
137 | }
138 | err = en.WriteTime(z.EventTime)
139 | if err != nil {
140 | return
141 | }
142 | // write "SchemaURL"
143 | err = en.Append(0xa9, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x52, 0x4c)
144 | if err != nil {
145 | return
146 | }
147 | err = en.WriteString(z.SchemaURL)
148 | if err != nil {
149 | return
150 | }
151 | // write "ContentType"
152 | err = en.Append(0xab, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65)
153 | if err != nil {
154 | return
155 | }
156 | err = en.WriteString(z.ContentType)
157 | if err != nil {
158 | return
159 | }
160 | // write "Extensions"
161 | err = en.Append(0xaa, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73)
162 | if err != nil {
163 | return
164 | }
165 | err = en.WriteIntf(z.Extensions)
166 | if err != nil {
167 | return
168 | }
169 | // write "Data"
170 | err = en.Append(0xa4, 0x44, 0x61, 0x74, 0x61)
171 | if err != nil {
172 | return
173 | }
174 | err = en.WriteIntf(z.Data)
175 | if err != nil {
176 | return
177 | }
178 | return
179 | }
180 |
181 | // MarshalMsg implements msgp.Marshaler
182 | func (z *Event) MarshalMsg(b []byte) (o []byte, err error) {
183 | o = msgp.Require(b, z.Msgsize())
184 | // map header, size 10
185 | // string "EventType"
186 | o = append(o, 0x8a, 0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65)
187 | o = msgp.AppendString(o, z.EventType)
188 | // string "EventTypeVersion"
189 | o = append(o, 0xb0, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
190 | o = msgp.AppendString(o, z.EventTypeVersion)
191 | // string "CloudEventsVersion"
192 | o = append(o, 0xb2, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
193 | o = msgp.AppendString(o, z.CloudEventsVersion)
194 | // string "Source"
195 | o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
196 | o = msgp.AppendString(o, z.Source)
197 | // string "EventID"
198 | o = append(o, 0xa7, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x44)
199 | o = msgp.AppendString(o, z.EventID)
200 | // string "EventTime"
201 | o = append(o, 0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65)
202 | o = msgp.AppendTime(o, z.EventTime)
203 | // string "SchemaURL"
204 | o = append(o, 0xa9, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x52, 0x4c)
205 | o = msgp.AppendString(o, z.SchemaURL)
206 | // string "ContentType"
207 | o = append(o, 0xab, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65)
208 | o = msgp.AppendString(o, z.ContentType)
209 | // string "Extensions"
210 | o = append(o, 0xaa, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73)
211 | o, err = msgp.AppendIntf(o, z.Extensions)
212 | if err != nil {
213 | return
214 | }
215 | // string "Data"
216 | o = append(o, 0xa4, 0x44, 0x61, 0x74, 0x61)
217 | o, err = msgp.AppendIntf(o, z.Data)
218 | if err != nil {
219 | return
220 | }
221 | return
222 | }
223 |
224 | // UnmarshalMsg implements msgp.Unmarshaler
225 | func (z *Event) UnmarshalMsg(bts []byte) (o []byte, err error) {
226 | var field []byte
227 | _ = field
228 | var zb0001 uint32
229 | zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
230 | if err != nil {
231 | return
232 | }
233 | for zb0001 > 0 {
234 | zb0001--
235 | field, bts, err = msgp.ReadMapKeyZC(bts)
236 | if err != nil {
237 | return
238 | }
239 | switch msgp.UnsafeString(field) {
240 | case "EventType":
241 | z.EventType, bts, err = msgp.ReadStringBytes(bts)
242 | if err != nil {
243 | return
244 | }
245 | case "EventTypeVersion":
246 | z.EventTypeVersion, bts, err = msgp.ReadStringBytes(bts)
247 | if err != nil {
248 | return
249 | }
250 | case "CloudEventsVersion":
251 | z.CloudEventsVersion, bts, err = msgp.ReadStringBytes(bts)
252 | if err != nil {
253 | return
254 | }
255 | case "Source":
256 | z.Source, bts, err = msgp.ReadStringBytes(bts)
257 | if err != nil {
258 | return
259 | }
260 | case "EventID":
261 | z.EventID, bts, err = msgp.ReadStringBytes(bts)
262 | if err != nil {
263 | return
264 | }
265 | case "EventTime":
266 | z.EventTime, bts, err = msgp.ReadTimeBytes(bts)
267 | if err != nil {
268 | return
269 | }
270 | case "SchemaURL":
271 | z.SchemaURL, bts, err = msgp.ReadStringBytes(bts)
272 | if err != nil {
273 | return
274 | }
275 | case "ContentType":
276 | z.ContentType, bts, err = msgp.ReadStringBytes(bts)
277 | if err != nil {
278 | return
279 | }
280 | case "Extensions":
281 | z.Extensions, bts, err = msgp.ReadIntfBytes(bts)
282 | if err != nil {
283 | return
284 | }
285 | case "Data":
286 | z.Data, bts, err = msgp.ReadIntfBytes(bts)
287 | if err != nil {
288 | return
289 | }
290 | default:
291 | bts, err = msgp.Skip(bts)
292 | if err != nil {
293 | return
294 | }
295 | }
296 | }
297 | o = bts
298 | return
299 | }
300 |
301 | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
302 | func (z *Event) Msgsize() (s int) {
303 | s = 1 + 10 + msgp.StringPrefixSize + len(z.EventType) + 17 + msgp.StringPrefixSize + len(z.EventTypeVersion) + 19 + msgp.StringPrefixSize + len(z.CloudEventsVersion) + 7 + msgp.StringPrefixSize + len(z.Source) + 8 + msgp.StringPrefixSize + len(z.EventID) + 10 + msgp.TimeSize + 10 + msgp.StringPrefixSize + len(z.SchemaURL) + 12 + msgp.StringPrefixSize + len(z.ContentType) + 11 + msgp.GuessSize(z.Extensions) + 5 + msgp.GuessSize(z.Data)
304 | return
305 | }
306 |
--------------------------------------------------------------------------------
/pkg/store/store.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/hashicorp/raft-boltdb"
8 | "github.com/satori/go.uuid"
9 |
10 | "github.com/golang/glog"
11 | "github.com/hashicorp/raft"
12 | "github.com/myntra/cortex/pkg/config"
13 | "github.com/myntra/cortex/pkg/events"
14 | "github.com/myntra/cortex/pkg/executions"
15 | "github.com/myntra/cortex/pkg/rules"
16 |
17 | "net/url"
18 |
19 | "github.com/myntra/cortex/pkg/js"
20 | "github.com/myntra/cortex/pkg/util"
21 | )
22 |
23 | const (
24 | retainSnapshotCount = 2
25 | raftTimeout = 10 * time.Second
26 | )
27 |
28 | type defaultStore struct {
29 | opt *config.Config
30 | boltDB *raftboltdb.BoltStore
31 | raft *raft.Raft
32 | scriptStorage *scriptStorage
33 | bucketStorage *bucketStorage
34 | executionStorage *executionStorage
35 | executionBucketQueue chan *events.Bucket
36 | quitFlusherChan chan struct{}
37 | persisters []persister
38 | restorers map[MessageType]restorer
39 | }
40 |
41 | func newStore(opt *config.Config) (*defaultStore, error) {
42 |
43 | // register persisters
44 | var persisters []persister
45 | persisters = append(persisters, persistRules, persistRecords, persistScripts)
46 |
47 | restorers := make(map[MessageType]restorer)
48 |
49 | restorers[RuleType] = restoreRules
50 | restorers[RecordType] = restoreRecords
51 | restorers[ScriptType] = restoreScripts
52 |
53 | store := &defaultStore{
54 | scriptStorage: &scriptStorage{
55 | m: make(map[string]*js.Script),
56 | },
57 | executionStorage: &executionStorage{
58 | m: make(map[string]*executions.Record),
59 | },
60 | bucketStorage: &bucketStorage{
61 | es: &eventStorage{
62 | m: make(map[string]*events.Bucket),
63 | },
64 | rs: &ruleStorage{
65 | m: make(map[string]*rules.Rule),
66 | },
67 | },
68 | opt: opt,
69 | quitFlusherChan: make(chan struct{}),
70 | executionBucketQueue: make(chan *events.Bucket, 1000),
71 | persisters: persisters,
72 | restorers: restorers,
73 | }
74 |
75 | return store, nil
76 | }
77 |
78 | func (d *defaultStore) executor() {
79 | for {
80 | select {
81 | case rb := <-d.executionBucketQueue:
82 | glog.Infof("received bucket %+v\n", rb)
83 | go func(rb *events.Bucket) {
84 | statusCode := 0
85 | var noScriptResult bool
86 | result := js.Execute(d.getScript(rb.Rule.ScriptID), rb)
87 | glog.Infof("Result of the script execution \n%v", result)
88 | if result == nil {
89 | noScriptResult = true
90 | }
91 | if _, err := url.ParseRequestURI(rb.Rule.HookEndpoint); err != nil {
92 | glog.Infoln("Invalid HookEndpoint. Skipping post request")
93 | } else {
94 | if noScriptResult {
95 | statusCode = util.RetryPost(rb, rb.Rule.HookEndpoint, rb.Rule.HookRetry)
96 | } else {
97 | statusCode = util.RetryPost(result, rb.Rule.HookEndpoint, rb.Rule.HookRetry)
98 | }
99 | }
100 |
101 | id := uuid.NewV4()
102 | record := &executions.Record{
103 | ID: id.String(),
104 | Bucket: *rb,
105 | ScriptResult: result,
106 | HookStatusCode: statusCode,
107 | CreatedAt: time.Now(),
108 | }
109 |
110 | glog.Infof("addRecord %v\n", record)
111 | glog.Infoln("err => ", d.addRecord(record))
112 |
113 | }(rb)
114 | }
115 | }
116 | }
117 |
118 | func (d *defaultStore) flusher() {
119 |
120 | go d.executor()
121 |
122 | ticker := time.NewTicker(time.Millisecond * time.Duration(d.opt.FlushInterval))
123 | loop:
124 | for {
125 | select {
126 | case <-ticker.C:
127 | if d.raft.State() != raft.Leader {
128 | glog.Info("node is not leader, skipping flush")
129 | continue
130 | }
131 |
132 | glog.Infof("rule flusher started ===============================> \n")
133 |
134 | for ruleID, bucket := range d.bucketStorage.es.clone() {
135 | glog.Infof("rule flusher ==> %v with size %v canflush ? %v, can flush in %v, has flush lock ? %v",
136 | ruleID, len(bucket.Events), bucket.CanFlush(), bucket.CanFlushIn(), bucket.FlushLock)
137 |
138 | if bucket.CanFlush() && !bucket.FlushLock {
139 | go func(currRuleID string) {
140 | err := d.flushLock(currRuleID)
141 | if err != nil {
142 | glog.Errorf("error taking flush lock on bucket %v %v", currRuleID, err)
143 | }
144 |
145 | glog.Infof("lock taken %v %v\n", currRuleID, d.bucketStorage.es.clone()[currRuleID].FlushLock)
146 | }(ruleID)
147 | }
148 |
149 | if bucket.FlushLock {
150 | go func(currRuleID string, currBucket *events.Bucket) {
151 | glog.Infof("post bucket to execution %+v\n", currBucket.Rule.ID)
152 | d.executionBucketQueue <- currBucket
153 |
154 | err := d.flushBucket(currRuleID)
155 | if err != nil {
156 | glog.Errorf("error flushing bucket %v %v\n", currRuleID, err)
157 | }
158 | }(ruleID, bucket)
159 | }
160 | }
161 |
162 | glog.Infof("rule flusher done ===============================> \n")
163 |
164 | case <-d.quitFlusherChan:
165 | break loop
166 | }
167 | }
168 |
169 | }
170 |
171 | func (d *defaultStore) expirer() {
172 | ticker := time.NewTicker(time.Hour)
173 |
174 | for {
175 | select {
176 | case <-ticker.C:
177 | if d.raft.State() != raft.Leader {
178 | glog.Info("node is not leader, skipping expire")
179 | continue
180 | }
181 | if d.executionStorage.getTotalRecordsCount() > d.opt.MaxHistory {
182 | // TODO, remove oldest records
183 | }
184 | }
185 |
186 | }
187 | }
188 |
189 | func (d *defaultStore) applyCMD(cmd Command) error {
190 | if d.raft.State() != raft.Leader {
191 | return fmt.Errorf("not leader")
192 | }
193 |
194 | glog.Infof("apply cmd %v\n marshalling", cmd)
195 |
196 | b, err := cmd.MarshalMsg(nil)
197 | if err != nil {
198 | glog.Errorf("stash %v err %v\n", cmd, err)
199 | return err
200 | }
201 |
202 | glog.Infof("==> apply %+v\n", cmd)
203 | f := d.raft.Apply(b, raftTimeout)
204 | return f.Error()
205 | }
206 |
207 | func (d *defaultStore) matchAndStash(event *events.Event) error {
208 | glog.Info("match and stash event ==> ", event)
209 | for _, rule := range d.getRules() {
210 | go d.match(rule, event)
211 | }
212 | return nil
213 | }
214 |
215 | func (d *defaultStore) match(rule *rules.Rule, event *events.Event) error {
216 | glog.Info("match event ==> ", event)
217 | if rule.HasMatching(event.EventType) {
218 | go d.stash(rule.ID, event)
219 | }
220 | return nil
221 |
222 | }
223 |
224 | func (d *defaultStore) stash(ruleID string, event *events.Event) error {
225 | glog.Info("apply stash event ==> ", event)
226 | return d.applyCMD(Command{
227 | Op: "stash",
228 | RuleID: ruleID,
229 | Event: event,
230 | })
231 | }
232 |
233 | func (d *defaultStore) addRule(rule *rules.Rule) error {
234 |
235 | if rule.Dwell == 0 || rule.DwellDeadline == 0 || rule.MaxDwell == 0 {
236 | rule.Dwell = d.opt.DefaultDwell
237 | rule.DwellDeadline = d.opt.DefaultDwellDeadline
238 | rule.MaxDwell = d.opt.DefaultMaxDwell
239 | }
240 |
241 | return d.applyCMD(Command{
242 | Op: "add_rule",
243 | Rule: rule,
244 | })
245 | }
246 |
247 | func (d *defaultStore) updateRule(rule *rules.Rule) error {
248 | return d.applyCMD(Command{
249 | Op: "update_rule",
250 | Rule: rule,
251 | })
252 | }
253 |
254 | func (d *defaultStore) addScript(script *js.Script) error {
255 | return d.applyCMD(Command{
256 | Op: "add_script",
257 | Script: script,
258 | })
259 | }
260 |
261 | func (d *defaultStore) updateScript(script *js.Script) error {
262 | return d.applyCMD(Command{
263 | Op: "update_script",
264 | Script: script,
265 | })
266 | }
267 |
268 | func (d *defaultStore) removeScript(id string) error {
269 | return d.applyCMD(Command{
270 | Op: "remove_script",
271 | ScriptID: id,
272 | })
273 | }
274 |
275 | func (d *defaultStore) removeRule(ruleID string) error {
276 | return d.applyCMD(Command{
277 | Op: "remove_rule",
278 | RuleID: ruleID,
279 | })
280 | }
281 |
282 | func (d *defaultStore) flushBucket(ruleID string) error {
283 | return d.applyCMD(Command{
284 | Op: "flush_bucket",
285 | RuleID: ruleID,
286 | })
287 | }
288 |
289 | func (d *defaultStore) flushLock(ruleID string) error {
290 | return d.applyCMD(Command{
291 | Op: "flush_lock",
292 | RuleID: ruleID,
293 | })
294 | }
295 |
296 | func (d *defaultStore) addRecord(r *executions.Record) error {
297 | return d.applyCMD(Command{
298 | Op: "add_record",
299 | Record: r,
300 | })
301 | }
302 |
303 | func (d *defaultStore) removeRecord(id string) error {
304 | return d.applyCMD(Command{
305 | Op: "remove_record",
306 | RecordID: id,
307 | })
308 | }
309 |
310 | func (d *defaultStore) getScripts() []string {
311 | return d.scriptStorage.getScripts()
312 | }
313 |
314 | func (d *defaultStore) getScript(id string) *js.Script {
315 | return d.scriptStorage.getScript(id)
316 | }
317 |
318 | func (d *defaultStore) getRules() []*rules.Rule {
319 | return d.bucketStorage.rs.getRules()
320 | }
321 |
322 | func (d *defaultStore) getRule(ruleID string) *rules.Rule {
323 | return d.bucketStorage.rs.getRule(ruleID)
324 | }
325 |
326 | func (d *defaultStore) getRecords(ruleID string) []*executions.Record {
327 | return d.executionStorage.getRecords(ruleID)
328 | }
329 |
330 | func (d *defaultStore) getRecordsCount(ruleID string) int {
331 | return d.executionStorage.getRecordsCount(ruleID)
332 | }
333 |
--------------------------------------------------------------------------------
/pkg/store/command_gen.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | // Code generated by github.com/tinylib/msgp DO NOT EDIT.
4 |
5 | import (
6 | "github.com/myntra/cortex/pkg/events"
7 | "github.com/myntra/cortex/pkg/executions"
8 | "github.com/myntra/cortex/pkg/js"
9 | "github.com/myntra/cortex/pkg/rules"
10 | "github.com/tinylib/msgp/msgp"
11 | )
12 |
13 | // DecodeMsg implements msgp.Decodable
14 | func (z *Command) DecodeMsg(dc *msgp.Reader) (err error) {
15 | var field []byte
16 | _ = field
17 | var zb0001 uint32
18 | zb0001, err = dc.ReadMapHeader()
19 | if err != nil {
20 | return
21 | }
22 | for zb0001 > 0 {
23 | zb0001--
24 | field, err = dc.ReadMapKeyPtr()
25 | if err != nil {
26 | return
27 | }
28 | switch msgp.UnsafeString(field) {
29 | case "Op":
30 | z.Op, err = dc.ReadString()
31 | if err != nil {
32 | return
33 | }
34 | case "Rule":
35 | if dc.IsNil() {
36 | err = dc.ReadNil()
37 | if err != nil {
38 | return
39 | }
40 | z.Rule = nil
41 | } else {
42 | if z.Rule == nil {
43 | z.Rule = new(rules.Rule)
44 | }
45 | err = z.Rule.DecodeMsg(dc)
46 | if err != nil {
47 | return
48 | }
49 | }
50 | case "RuleID":
51 | z.RuleID, err = dc.ReadString()
52 | if err != nil {
53 | return
54 | }
55 | case "Event":
56 | if dc.IsNil() {
57 | err = dc.ReadNil()
58 | if err != nil {
59 | return
60 | }
61 | z.Event = nil
62 | } else {
63 | if z.Event == nil {
64 | z.Event = new(events.Event)
65 | }
66 | err = z.Event.DecodeMsg(dc)
67 | if err != nil {
68 | return
69 | }
70 | }
71 | case "ScriptID":
72 | z.ScriptID, err = dc.ReadString()
73 | if err != nil {
74 | return
75 | }
76 | case "Script":
77 | if dc.IsNil() {
78 | err = dc.ReadNil()
79 | if err != nil {
80 | return
81 | }
82 | z.Script = nil
83 | } else {
84 | if z.Script == nil {
85 | z.Script = new(js.Script)
86 | }
87 | err = z.Script.DecodeMsg(dc)
88 | if err != nil {
89 | return
90 | }
91 | }
92 | case "Record":
93 | if dc.IsNil() {
94 | err = dc.ReadNil()
95 | if err != nil {
96 | return
97 | }
98 | z.Record = nil
99 | } else {
100 | if z.Record == nil {
101 | z.Record = new(executions.Record)
102 | }
103 | err = z.Record.DecodeMsg(dc)
104 | if err != nil {
105 | return
106 | }
107 | }
108 | case "RecordID":
109 | z.RecordID, err = dc.ReadString()
110 | if err != nil {
111 | return
112 | }
113 | default:
114 | err = dc.Skip()
115 | if err != nil {
116 | return
117 | }
118 | }
119 | }
120 | return
121 | }
122 |
123 | // EncodeMsg implements msgp.Encodable
124 | func (z *Command) EncodeMsg(en *msgp.Writer) (err error) {
125 | // map header, size 8
126 | // write "Op"
127 | err = en.Append(0x88, 0xa2, 0x4f, 0x70)
128 | if err != nil {
129 | return
130 | }
131 | err = en.WriteString(z.Op)
132 | if err != nil {
133 | return
134 | }
135 | // write "Rule"
136 | err = en.Append(0xa4, 0x52, 0x75, 0x6c, 0x65)
137 | if err != nil {
138 | return
139 | }
140 | if z.Rule == nil {
141 | err = en.WriteNil()
142 | if err != nil {
143 | return
144 | }
145 | } else {
146 | err = z.Rule.EncodeMsg(en)
147 | if err != nil {
148 | return
149 | }
150 | }
151 | // write "RuleID"
152 | err = en.Append(0xa6, 0x52, 0x75, 0x6c, 0x65, 0x49, 0x44)
153 | if err != nil {
154 | return
155 | }
156 | err = en.WriteString(z.RuleID)
157 | if err != nil {
158 | return
159 | }
160 | // write "Event"
161 | err = en.Append(0xa5, 0x45, 0x76, 0x65, 0x6e, 0x74)
162 | if err != nil {
163 | return
164 | }
165 | if z.Event == nil {
166 | err = en.WriteNil()
167 | if err != nil {
168 | return
169 | }
170 | } else {
171 | err = z.Event.EncodeMsg(en)
172 | if err != nil {
173 | return
174 | }
175 | }
176 | // write "ScriptID"
177 | err = en.Append(0xa8, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x49, 0x44)
178 | if err != nil {
179 | return
180 | }
181 | err = en.WriteString(z.ScriptID)
182 | if err != nil {
183 | return
184 | }
185 | // write "Script"
186 | err = en.Append(0xa6, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74)
187 | if err != nil {
188 | return
189 | }
190 | if z.Script == nil {
191 | err = en.WriteNil()
192 | if err != nil {
193 | return
194 | }
195 | } else {
196 | err = z.Script.EncodeMsg(en)
197 | if err != nil {
198 | return
199 | }
200 | }
201 | // write "Record"
202 | err = en.Append(0xa6, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64)
203 | if err != nil {
204 | return
205 | }
206 | if z.Record == nil {
207 | err = en.WriteNil()
208 | if err != nil {
209 | return
210 | }
211 | } else {
212 | err = z.Record.EncodeMsg(en)
213 | if err != nil {
214 | return
215 | }
216 | }
217 | // write "RecordID"
218 | err = en.Append(0xa8, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x49, 0x44)
219 | if err != nil {
220 | return
221 | }
222 | err = en.WriteString(z.RecordID)
223 | if err != nil {
224 | return
225 | }
226 | return
227 | }
228 |
229 | // MarshalMsg implements msgp.Marshaler
230 | func (z *Command) MarshalMsg(b []byte) (o []byte, err error) {
231 | o = msgp.Require(b, z.Msgsize())
232 | // map header, size 8
233 | // string "Op"
234 | o = append(o, 0x88, 0xa2, 0x4f, 0x70)
235 | o = msgp.AppendString(o, z.Op)
236 | // string "Rule"
237 | o = append(o, 0xa4, 0x52, 0x75, 0x6c, 0x65)
238 | if z.Rule == nil {
239 | o = msgp.AppendNil(o)
240 | } else {
241 | o, err = z.Rule.MarshalMsg(o)
242 | if err != nil {
243 | return
244 | }
245 | }
246 | // string "RuleID"
247 | o = append(o, 0xa6, 0x52, 0x75, 0x6c, 0x65, 0x49, 0x44)
248 | o = msgp.AppendString(o, z.RuleID)
249 | // string "Event"
250 | o = append(o, 0xa5, 0x45, 0x76, 0x65, 0x6e, 0x74)
251 | if z.Event == nil {
252 | o = msgp.AppendNil(o)
253 | } else {
254 | o, err = z.Event.MarshalMsg(o)
255 | if err != nil {
256 | return
257 | }
258 | }
259 | // string "ScriptID"
260 | o = append(o, 0xa8, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x49, 0x44)
261 | o = msgp.AppendString(o, z.ScriptID)
262 | // string "Script"
263 | o = append(o, 0xa6, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74)
264 | if z.Script == nil {
265 | o = msgp.AppendNil(o)
266 | } else {
267 | o, err = z.Script.MarshalMsg(o)
268 | if err != nil {
269 | return
270 | }
271 | }
272 | // string "Record"
273 | o = append(o, 0xa6, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64)
274 | if z.Record == nil {
275 | o = msgp.AppendNil(o)
276 | } else {
277 | o, err = z.Record.MarshalMsg(o)
278 | if err != nil {
279 | return
280 | }
281 | }
282 | // string "RecordID"
283 | o = append(o, 0xa8, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x49, 0x44)
284 | o = msgp.AppendString(o, z.RecordID)
285 | return
286 | }
287 |
288 | // UnmarshalMsg implements msgp.Unmarshaler
289 | func (z *Command) UnmarshalMsg(bts []byte) (o []byte, err error) {
290 | var field []byte
291 | _ = field
292 | var zb0001 uint32
293 | zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
294 | if err != nil {
295 | return
296 | }
297 | for zb0001 > 0 {
298 | zb0001--
299 | field, bts, err = msgp.ReadMapKeyZC(bts)
300 | if err != nil {
301 | return
302 | }
303 | switch msgp.UnsafeString(field) {
304 | case "Op":
305 | z.Op, bts, err = msgp.ReadStringBytes(bts)
306 | if err != nil {
307 | return
308 | }
309 | case "Rule":
310 | if msgp.IsNil(bts) {
311 | bts, err = msgp.ReadNilBytes(bts)
312 | if err != nil {
313 | return
314 | }
315 | z.Rule = nil
316 | } else {
317 | if z.Rule == nil {
318 | z.Rule = new(rules.Rule)
319 | }
320 | bts, err = z.Rule.UnmarshalMsg(bts)
321 | if err != nil {
322 | return
323 | }
324 | }
325 | case "RuleID":
326 | z.RuleID, bts, err = msgp.ReadStringBytes(bts)
327 | if err != nil {
328 | return
329 | }
330 | case "Event":
331 | if msgp.IsNil(bts) {
332 | bts, err = msgp.ReadNilBytes(bts)
333 | if err != nil {
334 | return
335 | }
336 | z.Event = nil
337 | } else {
338 | if z.Event == nil {
339 | z.Event = new(events.Event)
340 | }
341 | bts, err = z.Event.UnmarshalMsg(bts)
342 | if err != nil {
343 | return
344 | }
345 | }
346 | case "ScriptID":
347 | z.ScriptID, bts, err = msgp.ReadStringBytes(bts)
348 | if err != nil {
349 | return
350 | }
351 | case "Script":
352 | if msgp.IsNil(bts) {
353 | bts, err = msgp.ReadNilBytes(bts)
354 | if err != nil {
355 | return
356 | }
357 | z.Script = nil
358 | } else {
359 | if z.Script == nil {
360 | z.Script = new(js.Script)
361 | }
362 | bts, err = z.Script.UnmarshalMsg(bts)
363 | if err != nil {
364 | return
365 | }
366 | }
367 | case "Record":
368 | if msgp.IsNil(bts) {
369 | bts, err = msgp.ReadNilBytes(bts)
370 | if err != nil {
371 | return
372 | }
373 | z.Record = nil
374 | } else {
375 | if z.Record == nil {
376 | z.Record = new(executions.Record)
377 | }
378 | bts, err = z.Record.UnmarshalMsg(bts)
379 | if err != nil {
380 | return
381 | }
382 | }
383 | case "RecordID":
384 | z.RecordID, bts, err = msgp.ReadStringBytes(bts)
385 | if err != nil {
386 | return
387 | }
388 | default:
389 | bts, err = msgp.Skip(bts)
390 | if err != nil {
391 | return
392 | }
393 | }
394 | }
395 | o = bts
396 | return
397 | }
398 |
399 | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
400 | func (z *Command) Msgsize() (s int) {
401 | s = 1 + 3 + msgp.StringPrefixSize + len(z.Op) + 5
402 | if z.Rule == nil {
403 | s += msgp.NilSize
404 | } else {
405 | s += z.Rule.Msgsize()
406 | }
407 | s += 7 + msgp.StringPrefixSize + len(z.RuleID) + 6
408 | if z.Event == nil {
409 | s += msgp.NilSize
410 | } else {
411 | s += z.Event.Msgsize()
412 | }
413 | s += 9 + msgp.StringPrefixSize + len(z.ScriptID) + 7
414 | if z.Script == nil {
415 | s += msgp.NilSize
416 | } else {
417 | s += z.Script.Msgsize()
418 | }
419 | s += 7
420 | if z.Record == nil {
421 | s += msgp.NilSize
422 | } else {
423 | s += z.Record.Msgsize()
424 | }
425 | s += 9 + msgp.StringPrefixSize + len(z.RecordID)
426 | return
427 | }
428 |
--------------------------------------------------------------------------------
/ui/src/TablePaginated.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import { withStyles } from '@material-ui/core/styles';
4 | import Table from '@material-ui/core/Table';
5 | import TableBody from '@material-ui/core/TableBody';
6 | import TableCell from '@material-ui/core/TableCell';
7 | import TableHead from '@material-ui/core/TableHead';
8 | import TableFooter from '@material-ui/core/TableFooter';
9 | import TablePagination from '@material-ui/core/TablePagination';
10 | import TableRow from '@material-ui/core/TableRow';
11 | import Paper from '@material-ui/core/Paper';
12 | import IconButton from '@material-ui/core/IconButton';
13 | import FirstPageIcon from '@material-ui/icons/FirstPage';
14 | import KeyboardArrowLeft from '@material-ui/icons/KeyboardArrowLeft';
15 | import KeyboardArrowRight from '@material-ui/icons/KeyboardArrowRight';
16 | import LastPageIcon from '@material-ui/icons/LastPage';
17 | import JSONTree from 'react-json-view';
18 | import Dialog from '@material-ui/core/Dialog';
19 | import DialogActions from '@material-ui/core/DialogActions';
20 | import DialogContent from '@material-ui/core/DialogContent';
21 | import DialogTitle from '@material-ui/core/DialogTitle';
22 | import Button from '@material-ui/core/Button';
23 | import OutlineIcon from '@material-ui/icons/OpenInNew';
24 |
25 | // shopping_basket
26 |
27 | const actionsStyles = theme => ({
28 | root: {
29 | flexShrink: 0,
30 | color: theme.palette.text.secondary,
31 | marginLeft: theme.spacing.unit * 2.5
32 | },
33 | });
34 |
35 | class TablePaginationActions extends React.Component {
36 | handleFirstPageButtonClick = event => {
37 | this.props.onChangePage(event, 0);
38 | };
39 |
40 | handleBackButtonClick = event => {
41 | this.props.onChangePage(event, this.props.page - 1);
42 | };
43 |
44 | handleNextButtonClick = event => {
45 | this.props.onChangePage(event, this.props.page + 1);
46 | };
47 |
48 | handleLastPageButtonClick = event => {
49 | this.props.onChangePage(
50 | event,
51 | Math.max(0, Math.ceil(this.props.count / this.props.rowsPerPage) - 1),
52 | );
53 | };
54 |
55 | render() {
56 | const { classes, count, page, rowsPerPage, theme } = this.props;
57 |
58 | return (
59 |
60 |
65 | {theme.direction === 'rtl' ? : }
66 |
67 |
72 | {theme.direction === 'rtl' ? : }
73 |
74 | = Math.ceil(count / rowsPerPage) - 1}
77 | aria-label="Next Page"
78 | >
79 | {theme.direction === 'rtl' ? : }
80 |
81 | = Math.ceil(count / rowsPerPage) - 1}
84 | aria-label="Last Page"
85 | >
86 | {theme.direction === 'rtl' ? : }
87 |
88 |
89 | );
90 | }
91 | }
92 |
93 | TablePaginationActions.propTypes = {
94 | classes: PropTypes.object.isRequired,
95 | count: PropTypes.number.isRequired,
96 | onChangePage: PropTypes.func.isRequired,
97 | page: PropTypes.number.isRequired,
98 | rowsPerPage: PropTypes.number.isRequired,
99 | theme: PropTypes.object.isRequired,
100 | };
101 |
102 | const TablePaginationActionsWrapped = withStyles(actionsStyles, { withTheme: true })(
103 | TablePaginationActions,
104 | );
105 |
106 | const styles = theme => ({
107 | root: {
108 | width: '100%',
109 | marginTop: theme.spacing.unit * 3,
110 | },
111 | table: {
112 | minWidth: 500
113 | },
114 | tableWrapper: {
115 | overflowX: 'auto',
116 | marginTop:'6px'
117 | },
118 | });
119 |
120 | class TablePaginated extends React.Component {
121 | constructor(props) {
122 | super(props);
123 |
124 | this.state = {
125 | data: props.data,
126 | page: 0,
127 | rowsPerPage: 5,
128 | ruleDialogOpen:false,
129 | selectedHistory:""
130 | };
131 | }
132 |
133 | handleChangePage = (event, page) => {
134 | this.setState({ page });
135 | };
136 |
137 | handleChangeRowsPerPage = event => {
138 | this.setState({ rowsPerPage: event.target.value });
139 | };
140 |
141 | componentWillReceiveProps = (nextProps) => {
142 | if(nextProps.data !== this.props.data){
143 | this.setState({data:nextProps.data});
144 | }
145 | }
146 |
147 | handleScriptResult = (data) => {
148 | let finalString;
149 |
150 | if(typeof data === "string"){
151 | finalString = {
152 | result : data
153 | }
154 | }else if(typeof data === "object" && data !== null){
155 | finalString = data;
156 | }else{
157 | finalString = {
158 | result : "No result data"
159 | }
160 | }
161 | this.setState({selectedHistory:finalString,
162 | ruleDialogOpen:true})
163 | }
164 |
165 | handleBucketResult = (data) => {
166 | this.setState({selectedHistory:data,
167 | ruleDialogOpen:true})
168 | }
169 |
170 | getFormattedDateTime = (utcDateTime) => {
171 | let d = new Date(utcDateTime);
172 | let dformat = [d.getMonth()+1, d.getDate(), d.getFullYear()].join('/') + ' ' +
173 | [d.getHours(), d.getMinutes(), d.getSeconds()].join(':');
174 | return dformat;
175 | }
176 |
177 | render() {
178 | let self = this;
179 | let colCss = {
180 | fontSize:'12px',
181 | fontWeight:300,
182 | textAlign:'center'
183 | }
184 | const { classes } = this.props;
185 | const { data, rowsPerPage, page } = this.state;
186 | const emptyRows = rowsPerPage - Math.min(rowsPerPage, data.length - page * rowsPerPage);
187 |
188 | return (
189 |
190 |
209 |
210 |
211 |
212 |
213 |
216 | ID
217 |
218 |
221 | Event Bucket
222 |
223 |
226 | Script Result
227 |
228 |
231 | Hook Status
232 |
233 |
237 | Time
238 |
239 |
240 |
241 |
242 | {data.slice(page * rowsPerPage, page * rowsPerPage + rowsPerPage).map((row,index) => {
243 | return (
244 |
245 |
246 | {index + 1}
247 |
248 | self.handleBucketResult(row.bucket)}>
249 |
250 |
251 | self.handleScriptResult(row.script_result)}>
252 |
253 |
254 |
255 | {row.hook_status_code}
256 |
257 |
258 | {self.getFormattedDateTime(row.created_at)}
259 |
260 |
261 | );
262 | })}
263 | {emptyRows > 0 && (
264 |
265 |
266 |
267 | )}
268 |
269 |
270 |
271 |
280 |
281 |
282 |
283 |
284 |
285 | );
286 | }
287 | }
288 |
289 | TablePaginated.propTypes = {
290 | classes: PropTypes.object.isRequired,
291 | };
292 |
293 | export default withStyles(styles)(TablePaginated);
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/pkg/store/node_test.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "io/ioutil"
7 | "math/rand"
8 | "net"
9 | "os"
10 | "strconv"
11 | "testing"
12 | "time"
13 |
14 | "github.com/myntra/cortex/pkg/executions"
15 |
16 | "github.com/cenkalti/backoff"
17 | "github.com/stretchr/testify/require"
18 |
19 | "github.com/golang/glog"
20 |
21 | "github.com/myntra/cortex/pkg/config"
22 | "github.com/myntra/cortex/pkg/events"
23 | "github.com/myntra/cortex/pkg/js"
24 | "github.com/myntra/cortex/pkg/rules"
25 | )
26 |
27 | var testevent = events.Event{
28 | EventType: "acme.prod.icinga.check_disk",
29 | EventTypeVersion: "1.0",
30 | CloudEventsVersion: "0.1",
31 | Source: "/sink",
32 | EventID: "42",
33 | EventTime: time.Now(),
34 | SchemaURL: "http://www.json.org",
35 | ContentType: "application/json",
36 | Data: map[string]interface{}{"Alpha": "julie", "Beta": 42},
37 | Extensions: map[string]string{"ext1": "value"},
38 | }
39 |
40 | var testRule = rules.Rule{
41 | ID: "test-rule-id-1",
42 | HookEndpoint: "http://localhost:3000/testrule",
43 | HookRetry: 2,
44 | EventTypePatterns: []string{"acme.prod.icinga.check_disk", "acme.prod.site247.cart_down"},
45 | ScriptID: "myscript",
46 | }
47 |
48 | var testRuleUpdated = rules.Rule{
49 | ID: "test-rule-id-1",
50 | HookEndpoint: "http://localhost:3000/testrule",
51 | HookRetry: 2,
52 | EventTypePatterns: []string{"apple.prod.icinga.check_disk", "acme.prod.site247.cart_down"},
53 | ScriptID: "myscript",
54 | }
55 |
56 | func newTestEvent(id, key string) events.Event {
57 | return events.Event{
58 | EventType: key + "acme.prod.icinga.check_disk",
59 | EventTypeVersion: "1.0",
60 | CloudEventsVersion: "0.1",
61 | Source: "/sink",
62 | EventID: id + "42",
63 | EventTime: time.Now(),
64 | SchemaURL: "http://www.json.org",
65 | ContentType: "application/json",
66 | Data: map[string]interface{}{id + "Alpha": "julie", "Beta": 42},
67 | Extensions: map[string]string{"ext1": "value"},
68 | }
69 | }
70 |
71 | func newTestRule(key string) rules.Rule {
72 | return rules.Rule{
73 | ID: key + "test-rule-id-1",
74 | HookEndpoint: "http://localhost:3000/testrule",
75 | HookRetry: 2,
76 | EventTypePatterns: []string{key + "acme.prod.icinga.check_disk", key + "acme.prod.site247.cart_down"},
77 | ScriptID: "myscript",
78 | Dwell: 30 * 1000,
79 | DwellDeadline: 20 * 1000,
80 | MaxDwell: 90 * 1000,
81 | }
82 | }
83 |
84 | func singleNode(t *testing.T, httpAddr, raftAddr string, f func(node *Node)) {
85 |
86 | tmpDir, _ := ioutil.TempDir("", "store_test")
87 | defer os.RemoveAll(tmpDir)
88 |
89 | raftListener, err := net.Listen("tcp", raftAddr)
90 | require.NoError(t, err)
91 |
92 | httpListener, err := net.Listen("tcp", httpAddr)
93 | require.NoError(t, err)
94 |
95 | // open store
96 | cfg := &config.Config{
97 | NodeID: "node0",
98 | Dir: tmpDir,
99 | DefaultDwell: 4000,
100 | DefaultMaxDwell: 8000,
101 | DefaultDwellDeadline: 3800,
102 | MaxHistory: 1000,
103 | FlushInterval: 1000,
104 | SnapshotInterval: 30,
105 | HTTPAddr: httpAddr,
106 | RaftAddr: raftAddr,
107 | HTTPListener: httpListener,
108 | RaftListener: raftListener,
109 | }
110 |
111 | node, err := NewNode(cfg)
112 | require.NoError(t, err)
113 |
114 | err = node.Start()
115 | require.NoError(t, err)
116 |
117 | glog.Infof("node started. 5s")
118 | // run test
119 | time.Sleep(time.Second * 5)
120 | glog.Infof("running test ")
121 | f(node)
122 |
123 | // close node
124 | err = node.Shutdown()
125 | require.NoError(t, err)
126 |
127 | err = httpListener.Close()
128 | require.NoError(t, err)
129 | glog.Info("done test ")
130 | }
131 |
132 | func TestRuleSingleNode(t *testing.T) {
133 | raftAddr := ":11878"
134 | httpAddr := ":11879"
135 | singleNode(t, httpAddr, raftAddr, func(node *Node) {
136 |
137 | err := node.AddRule(&testRule)
138 | require.NoError(t, err)
139 |
140 | rule := node.GetRule(testRule.ID)
141 | require.True(t, rule.ID == testRule.ID)
142 |
143 | err = node.UpdateRule(&testRuleUpdated)
144 | require.NoError(t, err)
145 |
146 | updatedRule := node.GetRule(testRule.ID)
147 | require.True(t, updatedRule.EventTypePatterns[0] == testRuleUpdated.EventTypePatterns[0])
148 |
149 | err = node.RemoveRule(testRule.ID)
150 | require.NoError(t, err)
151 |
152 | rule = node.GetRule(testRule.ID)
153 | require.Nil(t, rule)
154 |
155 | })
156 | }
157 |
158 | func TestScriptSingleNode(t *testing.T) {
159 | raftAddr := ":22878"
160 | httpAddr := ":22879"
161 | singleNode(t, httpAddr, raftAddr, func(node *Node) {
162 | script := []byte(`
163 | let result = 0;
164 | export default function() { result++; }`)
165 |
166 | // add script
167 | err := node.AddScript(&js.Script{ID: "myscript", Data: script})
168 | require.NoError(t, err)
169 |
170 | // get script
171 |
172 | respScript := node.GetScript("myscript")
173 | require.True(t, bytes.Equal(script, respScript.Data))
174 |
175 | // remove script
176 |
177 | err = node.RemoveScript("myscript")
178 | require.NoError(t, err)
179 |
180 | // get script
181 | respScript = node.GetScript("myscript")
182 | require.Nil(t, respScript)
183 |
184 | })
185 | }
186 |
187 | func TestOrphanEventSingleNode(t *testing.T) {
188 | raftAddr := ":35878"
189 | httpAddr := ":35879"
190 | singleNode(t, httpAddr, raftAddr, func(node *Node) {
191 | err := node.Stash(&testevent)
192 | require.NoError(t, err)
193 |
194 | var rb *events.Bucket
195 | loop:
196 | for {
197 | select {
198 | case rb = <-node.store.executionBucketQueue:
199 | fmt.Println("rb=>", rb)
200 |
201 | case <-time.After(time.Millisecond * time.Duration(node.store.opt.DefaultDwell+1000)):
202 | break loop
203 | }
204 |
205 | }
206 |
207 | require.Nil(t, rb)
208 | })
209 | }
210 |
211 | func TestEventSingleNode(t *testing.T) {
212 | raftAddr := ":46878"
213 | httpAddr := ":46879"
214 | singleNode(t, httpAddr, raftAddr, func(node *Node) {
215 |
216 | err := node.AddRule(&testRule)
217 | require.NoError(t, err)
218 | err = node.Stash(&testevent)
219 | require.NoError(t, err)
220 |
221 | var records []*executions.Record
222 | operation := func() error {
223 | records = node.GetRuleExectutions(testRule.ID)
224 |
225 | if len(records) == 0 {
226 | return fmt.Errorf("")
227 | }
228 |
229 | return nil // or an error
230 | }
231 |
232 | err = backoff.Retry(operation, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), node.store.opt.DefaultMaxDwell*3))
233 | require.NoError(t, err)
234 | require.True(t, len(records) == 1, fmt.Sprintf("len is %v", len(records)))
235 |
236 | t.Logf("%+v\n", records[0])
237 | })
238 | }
239 |
240 | func TestMultipleEventSingleRule(t *testing.T) {
241 | raftAddr := ":27878"
242 | httpAddr := ":27879"
243 | singleNode(t, httpAddr, raftAddr, func(node *Node) {
244 |
245 | t.Run("Test stash multiple events before dwell time", func(t *testing.T) {
246 | key := "my"
247 | myTestRule := newTestRule("my")
248 | n := 5
249 |
250 | s := rand.NewSource(time.Now().UnixNano())
251 | r := rand.New(s)
252 | intervals := make(map[int]events.Event)
253 |
254 | var firstInterval uint64
255 | for i := 0; i < n; i++ {
256 |
257 | interval := r.Intn(int(myTestRule.DwellDeadline - 100))
258 | if i == 0 {
259 | firstInterval = uint64(interval)
260 | }
261 | intervals[interval] = newTestEvent(strconv.Itoa(i), key)
262 | }
263 |
264 | err := node.AddRule(&myTestRule)
265 | require.NoError(t, err)
266 |
267 | for interval, te := range intervals {
268 | go func(interval int, te events.Event) {
269 | time.Sleep(time.Millisecond * time.Duration(interval))
270 | err = node.Stash(&te)
271 | require.NoError(t, err)
272 | }(interval, te)
273 | }
274 |
275 | glog.Info("sleeping ...")
276 | time.Sleep(time.Millisecond * time.Duration(myTestRule.Dwell+firstInterval+3000))
277 | glog.Info("sleeping done")
278 |
279 | records := node.GetRuleExectutions(myTestRule.ID)
280 | require.True(t, len(records) == 1, fmt.Sprintf("len is %v", len(records)))
281 | require.True(t, len(records[0].Bucket.Events) == n, fmt.Sprintf("len is %v", len(records[0].Bucket.Events)))
282 | })
283 |
284 | t.Run("Test stash multiple events after dwell time", func(t *testing.T) {
285 | key := "hi"
286 | myTestRule := newTestRule("hi")
287 | n := 5
288 |
289 | s := rand.NewSource(time.Now().UnixNano())
290 | r := rand.New(s)
291 | intervals := make(map[int]events.Event)
292 |
293 | // before dwell deadline
294 | for i := 0; i < n; i++ {
295 | interval := int(myTestRule.DwellDeadline) + 1000*i
296 | intervals[interval] = newTestEvent(strconv.Itoa(i), key)
297 | }
298 |
299 | // after dwell deadline
300 | for i := 5; i < 10; i++ {
301 | interval := r.Intn(int(myTestRule.Dwell - myTestRule.DwellDeadline))
302 | intervals[interval] = newTestEvent(strconv.Itoa(i), key)
303 | }
304 |
305 | // 5 events will be deduped
306 | for i := 5; i < 10; i++ {
307 | interval := r.Intn(int(myTestRule.Dwell - myTestRule.DwellDeadline))
308 | intervals[interval] = newTestEvent(strconv.Itoa(i), key)
309 | }
310 |
311 | for k := range intervals {
312 | glog.Infof("intervals %v\n", k)
313 | }
314 | err := node.AddRule(&myTestRule)
315 | require.NoError(t, err)
316 |
317 | for interval, te := range intervals {
318 | go func(interval int, te events.Event) {
319 | time.Sleep(time.Millisecond * time.Duration(interval))
320 | glog.Info("send event ", time.Millisecond*time.Duration(interval))
321 | err = node.Stash(&te)
322 | require.NoError(t, err)
323 | }(interval, te)
324 | }
325 |
326 | var records []*executions.Record
327 | operation := func() error {
328 | records = node.GetRuleExectutions(myTestRule.ID)
329 |
330 | if len(records) == 0 {
331 | return fmt.Errorf("")
332 | }
333 |
334 | return nil // or an error
335 | }
336 |
337 | err = backoff.Retry(operation, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), myTestRule.MaxDwell*3))
338 | require.NoError(t, err)
339 | require.True(t, len(records[0].Bucket.Events) == 10, fmt.Sprintf("len is %v", len(records[0].Bucket.Events)))
340 | })
341 |
342 | })
343 | }
344 |
345 | func TestNodeSnapshot(t *testing.T) {
346 | tmpDir, _ := ioutil.TempDir("", "store_test")
347 | defer os.RemoveAll(tmpDir)
348 |
349 | raftAddr := ":28878"
350 | httpAddr := ":28879"
351 |
352 | raftListener, err := net.Listen("tcp", raftAddr)
353 | require.NoError(t, err)
354 | httpListener, err := net.Listen("tcp", httpAddr)
355 | require.NoError(t, err)
356 |
357 | // open store
358 | cfg := &config.Config{
359 | NodeID: "node0",
360 | Dir: tmpDir,
361 | DefaultDwell: 4000,
362 | DefaultMaxDwell: 8000,
363 | DefaultDwellDeadline: 3800,
364 | MaxHistory: 1000,
365 | FlushInterval: 1000,
366 | SnapshotInterval: 30,
367 | HTTPAddr: httpAddr,
368 | RaftAddr: raftAddr,
369 | HTTPListener: httpListener,
370 | RaftListener: raftListener,
371 | }
372 |
373 | node, err := NewNode(cfg)
374 | require.NoError(t, err)
375 |
376 | err = node.Start()
377 | require.NoError(t, err)
378 |
379 | glog.Infof("node started. 5s")
380 | // run test
381 | time.Sleep(time.Second * 5)
382 |
383 | script := []byte(`
384 | let result = 0;
385 | export default function() { result++; }`)
386 |
387 | // add script
388 | err = node.AddScript(&js.Script{ID: "myscript", Data: script})
389 | require.NoError(t, err)
390 | err = node.AddRule(&testRule)
391 | require.NoError(t, err)
392 |
393 | rule := node.GetRule(testRule.ID)
394 | require.True(t, testRule.ID == rule.ID)
395 |
396 | err = node.Stash(&testevent)
397 | require.NoError(t, err)
398 |
399 | time.Sleep(time.Millisecond * time.Duration(node.store.opt.DefaultDwell+5000))
400 |
401 | glog.Infof("take a snapshot")
402 |
403 | err = node.Snapshot()
404 | require.NoError(t, err)
405 |
406 | time.Sleep(time.Second * 2)
407 | // close node <===================
408 | err = node.Shutdown()
409 | require.NoError(t, err)
410 |
411 | time.Sleep(time.Second * 2)
412 |
413 | raftListener, err = net.Listen("tcp", raftAddr)
414 | require.NoError(t, err)
415 |
416 | cfg.RaftListener = raftListener
417 |
418 | // start again ==================>
419 | err = node.Start()
420 | require.NoError(t, err)
421 |
422 | glog.Infof("node started. 5s")
423 | // run test
424 | time.Sleep(time.Second * 5)
425 |
426 | rule = node.GetRule(testRule.ID)
427 | require.True(t, testRule.ID == rule.ID)
428 |
429 | respScript := node.GetScript("myscript")
430 | require.NotNil(t, respScript)
431 | require.True(t, bytes.Equal(script, respScript.Data))
432 |
433 | records := node.GetRuleExectutions(testRule.ID)
434 | require.False(t, len(records) == 0)
435 | require.True(t, records[0].Bucket.Rule.ID == testRule.ID)
436 |
437 | // close node
438 | err = node.Shutdown()
439 | require.NoError(t, err)
440 |
441 | err = httpListener.Close()
442 | require.NoError(t, err)
443 | }
444 |
--------------------------------------------------------------------------------
/pkg/service/handlers.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io/ioutil"
7 | "net/http"
8 | "net/http/httputil"
9 |
10 | "github.com/myntra/cortex/pkg/executions"
11 |
12 | "github.com/go-chi/chi"
13 | "github.com/golang/glog"
14 | "github.com/imdario/mergo"
15 | "github.com/myntra/cortex/pkg/events"
16 | "github.com/myntra/cortex/pkg/events/sinks"
17 | "github.com/myntra/cortex/pkg/js"
18 | "github.com/myntra/cortex/pkg/rules"
19 | "github.com/myntra/cortex/pkg/util"
20 | "github.com/satori/go.uuid"
21 | )
22 |
23 | func (s *Service) leaderProxy(h http.HandlerFunc) http.HandlerFunc {
24 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
25 |
26 | leaderAddr := s.node.LeaderAddr()
27 | if leaderAddr == "" {
28 | h.ServeHTTP(w, r)
29 | } else {
30 | glog.Infof("proxying request to leader at %v", leaderAddr)
31 | proxy := httputil.ReverseProxy{Director: func(r *http.Request) {
32 | r.URL.Scheme = "http"
33 | r.URL.Host = leaderAddr
34 | r.Host = leaderAddr
35 | }}
36 |
37 | proxy.ServeHTTP(w, r)
38 |
39 | }
40 | })
41 | }
42 |
43 | // eventHandler expects a event in request body and aggregates by type
44 | func (s *Service) eventHandler(w http.ResponseWriter, r *http.Request) {
45 |
46 | body, err := ioutil.ReadAll(r.Body)
47 | if err != nil {
48 | util.ErrStatus(w, r, "invalid request body, expected a cloudevents.io event", http.StatusNotAcceptable, err)
49 | return
50 | }
51 |
52 | defer r.Body.Close()
53 |
54 | var event events.Event
55 | err = json.Unmarshal(body, &event)
56 | if err != nil {
57 | util.ErrStatus(w, r, "parsing failed, expected a cloudevents.io event", http.StatusNotAcceptable, err)
58 | return
59 | }
60 |
61 | err = s.node.Stash(&event)
62 | if err != nil {
63 | util.ErrStatus(w, r, "error stashing event", http.StatusInternalServerError, err)
64 | return
65 | }
66 |
67 | w.Header().Set("Content-Type", "application/json")
68 | w.WriteHeader(http.StatusOK)
69 | }
70 |
71 | func (s *Service) addRuleHandler(w http.ResponseWriter, r *http.Request) {
72 | reqBody, err := ioutil.ReadAll(r.Body)
73 | if err != nil {
74 | util.ErrStatus(w, r, "invalid request body, expected a valid rule", http.StatusNotAcceptable, err)
75 | return
76 | }
77 |
78 | defer r.Body.Close()
79 |
80 | var rule rules.PublicRule
81 | err = json.Unmarshal(reqBody, &rule)
82 | if err != nil {
83 | util.ErrStatus(w, r, "rule parsing failed", http.StatusNotAcceptable, err)
84 | return
85 | }
86 |
87 | if rule.ID == "" {
88 | uid := uuid.NewV4()
89 | rule.ID = uid.String()
90 | }
91 |
92 | err = s.node.AddRule(rules.NewFromPublic(&rule))
93 | if err != nil {
94 | util.ErrStatus(w, r, "adding rule failed", http.StatusNotAcceptable, err)
95 | return
96 | }
97 |
98 | b, err := json.Marshal(&rule)
99 | if err != nil {
100 | util.ErrStatus(w, r, "rules parsing failed", http.StatusNotFound, err)
101 | return
102 | }
103 |
104 | w.Header().Set("Content-Type", "application/json")
105 | w.WriteHeader(http.StatusOK)
106 | w.Write(b)
107 | }
108 |
109 | func (s *Service) updateRuleHandler(w http.ResponseWriter, r *http.Request) {
110 | reqBody, err := ioutil.ReadAll(r.Body)
111 | if err != nil {
112 | util.ErrStatus(w, r, "invalid request body, expected a valid rule", http.StatusNotAcceptable, err)
113 | return
114 | }
115 |
116 | defer r.Body.Close()
117 |
118 | var rule rules.PublicRule
119 | err = json.Unmarshal(reqBody, &rule)
120 | if err != nil {
121 | util.ErrStatus(w, r, "rule parsing failed", http.StatusNotAcceptable, err)
122 | return
123 | }
124 |
125 | existingRule := s.node.GetRule(rule.ID)
126 | if existingRule == nil {
127 | util.ErrStatus(w, r, "update rule failed, rule not found", http.StatusNotFound, fmt.Errorf("rule is nil"))
128 | }
129 |
130 | existingPublicRule := rules.NewFromPrivate(existingRule)
131 |
132 | if err := mergo.Merge(&rule, existingPublicRule); err != nil {
133 | util.ErrStatus(w, r, "updating rule failed", http.StatusInternalServerError, err)
134 | return
135 | }
136 |
137 | err = s.node.UpdateRule(rules.NewFromPublic(&rule))
138 | if err != nil {
139 | util.ErrStatus(w, r, "updating rule failed", http.StatusNotAcceptable, err)
140 | return
141 | }
142 |
143 | b, err := json.Marshal(&rule)
144 | if err != nil {
145 | util.ErrStatus(w, r, "updating rule failed. rules parsing failed", http.StatusNotFound, err)
146 | return
147 | }
148 |
149 | w.Header().Set("Content-Type", "application/json")
150 | w.WriteHeader(http.StatusOK)
151 | w.Write(b)
152 | }
153 |
154 | func (s *Service) removeRuleHandler(w http.ResponseWriter, r *http.Request) {
155 | ruleID := chi.URLParam(r, "id")
156 | err := s.node.RemoveRule(ruleID)
157 | if err != nil {
158 | util.ErrStatus(w, r, "could not remove rule", http.StatusNotFound, err)
159 | return
160 | }
161 | w.WriteHeader(http.StatusOK)
162 | }
163 |
164 | func (s *Service) getRuleHandler(w http.ResponseWriter, r *http.Request) {
165 | ruleID := chi.URLParam(r, "id")
166 |
167 | rule := s.node.GetRule(ruleID)
168 | if rule == nil {
169 | util.ErrStatus(w, r, "rule not found", http.StatusNotFound, fmt.Errorf("rule is nil"))
170 | return
171 | }
172 |
173 | b, err := json.Marshal(rules.NewFromPrivate(rule))
174 | if err != nil {
175 | util.ErrStatus(w, r, "rules parsing failed", http.StatusNotFound, err)
176 | return
177 | }
178 |
179 | w.Header().Set("Content-Type", "application/json")
180 | w.WriteHeader(http.StatusOK)
181 | w.Write(b)
182 |
183 | }
184 |
185 | func (s *Service) getRulesHandler(w http.ResponseWriter, r *http.Request) {
186 | privateRules := s.node.GetRules()
187 |
188 | publicRules := make([]*rules.PublicRule, 0)
189 |
190 | for _, privateRule := range privateRules {
191 | publicRules = append(publicRules, rules.NewFromPrivate(privateRule))
192 | }
193 |
194 | b, err := json.Marshal(&publicRules)
195 | if err != nil {
196 | util.ErrStatus(w, r, "rules parsing failed", http.StatusNotFound, err)
197 | return
198 | }
199 |
200 | w.Header().Set("Content-Type", "application/json")
201 | w.WriteHeader(http.StatusOK)
202 | w.Write(b)
203 |
204 | }
205 |
206 | func (s *Service) getRulesExecutions(w http.ResponseWriter, r *http.Request) {
207 | ruleID := chi.URLParam(r, "id")
208 | records := make([]*executions.Record, 0)
209 | rs := s.node.GetRuleExectutions(ruleID)
210 | records = append(records, rs...)
211 |
212 | b, err := json.Marshal(records)
213 | if err != nil {
214 | util.ErrStatus(w, r, "records marshalling failed", http.StatusNotFound, err)
215 | return
216 | }
217 |
218 | w.Header().Set("Content-Type", "application/json")
219 | w.WriteHeader(http.StatusOK)
220 | w.Write(b)
221 |
222 | }
223 |
224 | // ScriptRequest is the container for add/update script
225 | type ScriptRequest struct {
226 | ID string `json:"id"`
227 | Data []byte `json:"data"`
228 | }
229 |
230 | // Validate validates the scriptrequst
231 | func (s *ScriptRequest) Validate() error {
232 | if s.ID == "" {
233 | return fmt.Errorf("no id provided")
234 | }
235 |
236 | if len(s.Data) == 0 {
237 | return fmt.Errorf("script data len 0")
238 | }
239 |
240 | // validationBucket := events.Bucket{
241 | // Events: []*events.Event{
242 | // &events.Event{},
243 | // },
244 | // }
245 |
246 | // // result := js.Execute(s.Data, validationBucket)
247 | // // ex, ok := result.(*goja.Exception)
248 | // // if ok {
249 | // // return fmt.Errorf("error executing script %v", ex)
250 | // // }
251 |
252 | return nil
253 | }
254 |
255 | func (s *Service) addScriptHandler(w http.ResponseWriter, r *http.Request) {
256 |
257 | scriptData, err := ioutil.ReadAll(r.Body)
258 | if err != nil {
259 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
260 | return
261 | }
262 |
263 | defer r.Body.Close()
264 | sr := &ScriptRequest{}
265 | err = json.Unmarshal(scriptData, sr)
266 | if err != nil {
267 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
268 | return
269 | }
270 |
271 | err = sr.Validate()
272 | if err != nil {
273 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
274 | return
275 | }
276 |
277 | script := &js.Script{ID: sr.ID, Data: sr.Data}
278 | err = s.node.AddScript(script)
279 | if err != nil {
280 | util.ErrStatus(w, r, "error adding script", http.StatusNotAcceptable, err)
281 | return
282 | }
283 |
284 | w.Header().Set("Content-Type", "application/json")
285 | w.WriteHeader(http.StatusOK)
286 |
287 | }
288 |
289 | func (s *Service) updateScriptHandler(w http.ResponseWriter, r *http.Request) {
290 |
291 | scriptData, err := ioutil.ReadAll(r.Body)
292 | if err != nil {
293 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
294 | return
295 | }
296 |
297 | defer r.Body.Close()
298 | sr := &ScriptRequest{}
299 | err = json.Unmarshal(scriptData, sr)
300 | if err != nil {
301 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
302 | return
303 | }
304 |
305 | err = sr.Validate()
306 | if err != nil {
307 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
308 | return
309 | }
310 |
311 | script := &js.Script{ID: sr.ID, Data: sr.Data}
312 | err = s.node.UpdateScript(script)
313 | if err != nil {
314 | util.ErrStatus(w, r, "error adding script", http.StatusNotAcceptable, err)
315 | return
316 | }
317 |
318 | w.Header().Set("Content-Type", "application/json")
319 | w.WriteHeader(http.StatusOK)
320 |
321 | }
322 |
323 | func (s *Service) removeScriptHandler(w http.ResponseWriter, r *http.Request) {
324 | scriptID := chi.URLParam(r, "id")
325 | err := s.node.RemoveScript(scriptID)
326 | if err != nil {
327 | util.ErrStatus(w, r, "could not remove script", http.StatusNotFound, err)
328 | return
329 | }
330 |
331 | w.Header().Set("Content-Type", "application/json")
332 | w.WriteHeader(http.StatusOK)
333 |
334 | }
335 |
336 | func (s *Service) getScriptHandler(w http.ResponseWriter, r *http.Request) {
337 | scriptID := chi.URLParam(r, "id")
338 | script := s.node.GetScript(scriptID)
339 | if script == nil || len(script.Data) == 0 {
340 | util.ErrStatus(w, r, "script not found", http.StatusNotFound, fmt.Errorf("script data len 0"))
341 | return
342 | }
343 |
344 | b, err := json.Marshal(&script)
345 | if err != nil {
346 | util.ErrStatus(w, r, "error writing script data ", http.StatusNotFound, err)
347 | return
348 | }
349 |
350 | w.Header().Set("Content-Type", "application/json")
351 | w.WriteHeader(http.StatusOK)
352 | w.Write(b)
353 |
354 | }
355 |
356 | func (s *Service) getScriptListHandler(w http.ResponseWriter, r *http.Request) {
357 | scriptIds := make([]string, 0)
358 | sids := s.node.GetScripts()
359 | scriptIds = append(scriptIds, sids...)
360 |
361 | b, err := json.Marshal(&scriptIds)
362 | if err != nil {
363 | util.ErrStatus(w, r, "scripts list parsing failed", http.StatusNotFound, err)
364 | return
365 | }
366 |
367 | w.Header().Set("Content-Type", "application/json")
368 | w.WriteHeader(http.StatusOK)
369 | w.Write(b)
370 | }
371 |
372 | func (s *Service) site247AlertHandler(w http.ResponseWriter, r *http.Request) {
373 |
374 | alertData, err := ioutil.ReadAll(r.Body)
375 | if err != nil {
376 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
377 | return
378 | }
379 |
380 | defer r.Body.Close()
381 | alert := &sinks.Site247Alert{}
382 | err = json.Unmarshal(alertData, alert)
383 | if err != nil {
384 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
385 | return
386 | }
387 |
388 | event := sinks.EventFromSite247(*alert)
389 |
390 | err = s.node.Stash(event)
391 | if err != nil {
392 | util.ErrStatus(w, r, "error stashing event", http.StatusInternalServerError, err)
393 | return
394 | }
395 |
396 | b, err := json.Marshal(event)
397 | if err != nil {
398 | util.ErrStatus(w, r, "error writing event data", http.StatusNotAcceptable, err)
399 | return
400 | }
401 |
402 | w.Header().Set("Content-Type", "application/json")
403 | w.WriteHeader(http.StatusOK)
404 | w.Write(b)
405 | }
406 |
407 | func (s *Service) icingaAlertHandler(w http.ResponseWriter, r *http.Request) {
408 |
409 | alertData, err := ioutil.ReadAll(r.Body)
410 | if err != nil {
411 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
412 | return
413 | }
414 |
415 | defer r.Body.Close()
416 | alert := &sinks.IcingaAlert{}
417 | err = json.Unmarshal(alertData, alert)
418 | if err != nil {
419 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
420 | return
421 | }
422 |
423 | event := sinks.EventFromIcinga(*alert)
424 |
425 | err = s.node.Stash(event)
426 | if err != nil {
427 | util.ErrStatus(w, r, "error stashing event", http.StatusInternalServerError, err)
428 | return
429 | }
430 |
431 | b, err := json.Marshal(event)
432 | if err != nil {
433 | util.ErrStatus(w, r, "error writing event data", http.StatusNotAcceptable, err)
434 | return
435 | }
436 |
437 | w.Header().Set("Content-Type", "application/json")
438 | w.WriteHeader(http.StatusOK)
439 | w.Write(b)
440 | }
441 |
442 | func (s *Service) azureAlertHandler(w http.ResponseWriter, r *http.Request) {
443 |
444 | alertData, err := ioutil.ReadAll(r.Body)
445 | if err != nil {
446 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
447 | return
448 | }
449 |
450 | defer r.Body.Close()
451 | alert := &sinks.AzureAlert{}
452 | err = json.Unmarshal(alertData, alert)
453 | if err != nil {
454 | util.ErrStatus(w, r, "invalid request body", http.StatusNotAcceptable, err)
455 | return
456 | }
457 |
458 | event := sinks.EventFromAzure(*alert)
459 |
460 | err = s.node.Stash(event)
461 | if err != nil {
462 | util.ErrStatus(w, r, "error stashing event", http.StatusInternalServerError, err)
463 | return
464 | }
465 |
466 | b, err := json.Marshal(event)
467 | if err != nil {
468 | util.ErrStatus(w, r, "error writing event data", http.StatusNotAcceptable, err)
469 | return
470 | }
471 |
472 | w.Header().Set("Content-Type", "application/json")
473 | w.WriteHeader(http.StatusOK)
474 | w.Write(b)
475 | }
476 |
477 | func (s *Service) leaveHandler(w http.ResponseWriter, r *http.Request) {
478 | id := chi.URLParam(r, "id")
479 | err := s.node.Leave(id)
480 | if err != nil {
481 | util.ErrStatus(w, r, "could not leave node ", http.StatusNotFound, err)
482 | return
483 | }
484 | w.Header().Set("Content-Type", "application/json")
485 | w.WriteHeader(http.StatusOK)
486 | }
487 |
488 | func (s *Service) joinHandler(w http.ResponseWriter, r *http.Request) {
489 |
490 | reqBody, err := ioutil.ReadAll(r.Body)
491 | if err != nil {
492 | util.ErrStatus(w, r, "invalid request body, expected a valid joinRequest", http.StatusNotAcceptable, err)
493 | return
494 | }
495 |
496 | defer r.Body.Close()
497 |
498 | joinRequest := &util.JoinRequest{}
499 | err = json.Unmarshal(reqBody, joinRequest)
500 | if err != nil {
501 | util.ErrStatus(w, r, "joinRequest parsing failed", http.StatusNotAcceptable, err)
502 | return
503 | }
504 |
505 | err = joinRequest.Validate()
506 | if err != nil {
507 | util.ErrStatus(w, r, "joinRequest validation failed", http.StatusNotAcceptable, err)
508 | return
509 | }
510 |
511 | err = s.node.Join(joinRequest.NodeID, joinRequest.Addr)
512 | if err != nil {
513 | util.ErrStatus(w, r, "joinining failed", http.StatusNotAcceptable, err)
514 | return
515 | }
516 | w.Header().Set("Content-Type", "application/json")
517 | w.WriteHeader(http.StatusOK)
518 |
519 | }
520 |
--------------------------------------------------------------------------------