├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── backends ├── README.md └── ledger_writer.go ├── brefactor ├── app │ ├── cmd │ │ ├── common.go │ │ ├── config_manage.go │ │ ├── root.go │ │ └── run.go │ └── main.go ├── core │ ├── blockdb.go │ ├── business_reader.go │ ├── command_parser.go │ ├── command_processor.go │ └── interface.go ├── core_interface │ ├── interface.go │ └── model.go ├── deployment │ └── config.toml ├── go.mod ├── instruction │ ├── audit.go │ ├── db_models.go │ ├── executor.go │ ├── executor_collection.go │ ├── executor_doc.go │ ├── executor_index.go │ ├── executor_permission.go │ ├── history.go │ ├── instruction.go │ ├── instruction_test.go │ ├── instructions.txt │ └── tools.go ├── nodedata │ └── config │ │ └── config.toml ├── plugins │ ├── clients │ │ └── og │ │ │ ├── json_util.go │ │ │ ├── og.go │ │ │ ├── og_test.go │ │ │ ├── toOp.go │ │ │ ├── toOp_test.go │ │ │ ├── ws.go │ │ │ └── ws_test.go │ └── listeners │ │ └── web │ │ ├── http_processor.go │ │ ├── http_processor_test.go │ │ ├── json_util.go │ │ └── json_util_test.go ├── storage │ ├── mongo_client.go │ └── mongo_client_test.go └── syncer │ ├── syncer.go │ └── ws_info_receiver.go ├── cmd ├── root.go └── run.go ├── common └── bytes │ └── bytes.go ├── config.toml ├── deployment ├── config.toml ├── k8s_blockdb.yaml ├── k8s_infra.yaml ├── k8s_mongodb.yaml └── k8s_restheart.yaml ├── dockerfile_gitub ├── docs └── sequence_diagram ├── engine ├── component_interface.go └── engine.go ├── go.mod ├── go.sum ├── listener ├── README.md └── listener.go ├── main.go ├── multiplexer ├── bimap.go ├── connection_builder.go ├── dialog_context.go ├── multiplexer.go ├── multiplexer_test.go └── observer.go ├── mylog └── loggers.go ├── ogws ├── audit_reader.go ├── audit_write_test.go ├── audit_writer.go ├── client.go ├── model.go ├── original_data.go └── timezone_test.go ├── plugins ├── client │ └── og │ │ ├── fetch_data.go │ │ ├── og.go │ │ └── og_test.go └── server │ ├── jsondata │ └── json_data_processor.go │ ├── kafka │ ├── listener.go │ └── listener_test.go │ ├── log4j2 │ ├── model.go │ └── processor.go │ ├── mongodb │ ├── extractor.go │ ├── message │ │ ├── message.go │ │ ├── message_test.go │ │ ├── op_command.go │ │ ├── op_command_reply.go │ │ ├── op_delete.go │ │ ├── op_get_more.go │ │ ├── op_insert.go │ │ ├── op_kill_cursors.go │ │ ├── op_msg.go │ │ ├── op_msg_test.go │ │ ├── op_query.go │ │ ├── op_reply.go │ │ ├── op_reserved.go │ │ ├── op_update.go │ │ └── opcode.go │ ├── pool.go │ └── processor.go │ ├── socket │ └── connection_processor.go │ └── web │ ├── api.go │ ├── http_processor.go │ └── model.go ├── processors ├── README.md ├── logevent.go └── processor.go └── scripts ├── consumer.py ├── kafka_simple_producer.py ├── kafka_test.py ├── run_ws_server.py └── websocket_server.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | build* 8 | 9 | # Test binary, build with `go test -c` 10 | *.test 11 | .idea* 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | /.project 15 | /datadir/ 16 | BlockDB 17 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.13-alpine as builder 2 | 3 | RUN apk add --no-cache make gcc musl-dev linux-headers git 4 | 5 | ENV GOPROXY https://goproxy.cn 6 | ENV GO111MODULE on 7 | 8 | #ADD . /BlockDB 9 | #WORKDIR /BlockDB 10 | #RUN make blockdb 11 | # 12 | # 13 | #FROM alpine:latest 14 | #WORKDIR / 15 | #COPY --from=builder BlockDB/config.toml . 16 | #COPY --from=builder BlockDB/build/blockdb . 17 | 18 | WORKDIR /go/src/github.com/annchain/BlockDB 19 | COPY go.mod . 20 | COPY go.sum . 21 | RUN go mod download 22 | 23 | COPY . . 24 | RUN make blockdb 25 | 26 | # Copy OG into basic alpine image 27 | FROM alpine:latest 28 | 29 | RUN apk add --no-cache curl iotop busybox-extras tzdata 30 | 31 | WORKDIR / 32 | COPY --from=builder /go/src/github.com/annchain/BlockDB/deployment/config.toml . 33 | COPY --from=builder /go/src/github.com/annchain/BlockDB/blockdb . 34 | 35 | # for a temp running folder. This should be mounted from the outside 36 | RUN mkdir /rw 37 | 38 | EXPOSE 28017 28018 28019 8080 39 | CMD ["./blockdb", "--config", "config.toml", "-n", "run"] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: blockdb all 2 | all:blockdb 3 | blockdb: 4 | go build -o ./blockdb ./main.go -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BlockDB 2 | Are you looking for a super easy-to-use blockchain database that can record anything to it, feeling just like MySQL or MongoDB, with the features provided by blockchain such as immutability, distributed ledger and high availability being integrated? You got to the right place. 3 | 4 | BlockDB is an out-of-box database that is built on blockchain, providing the SQL/NoSQL query ability seamlessly. 5 | 6 | 7 | # Features 8 | ## Blockchain (Distributed Ledger Technology) based 9 | As a blockchain(DLT) based database, BlockDB will honestly record all activities performed on the database. Full operation logs and change histories will be recorded in an append-only style to provide immutability, security and auditability. 10 | 11 | We provide a modern DLT implementation: [Annchain.OG](https://github.com/annchain/OG) as the default BlockDB backend. Annchain.OG is leveraging DAG (Directed Acyclic Graph), DKG (Distributed Key Generation), BLS threshold signature and so many other modern technologies to support 10k+ TPS (Transactions per second) upon public network, comparing to the traditional chain-based DLT that can only reach 100 or less. 12 | 13 | ## Immutability 14 | BlockDB provides two ways of auditing: operation auditing and data auditing. Both auditing methods are leveraging the capability of immutability provided by DLT. 15 | 16 | Users may configure the BlockDB to either way, according to their purpose. 17 | ### Immutable Data Storage 18 | In this mode, BlockDB acts as an immutable append-only database. Currently the majority of successful applications of blockchain uses blockchain as "immutable append-only distributed data storage". Proofs, certificates, transactions are all those records that will never be changed and will need complete immutability. However the complexity of setting up distributed ledger and adapting records onto it is too high to welcome normal developers. 19 | 20 | **We greatly simplify the process of immutable storage in BlockDB**: 21 | In BlockDB, all processes of DLT are well-encapsulated. Developers with little knowledge of cryptographic, distributed systems, p2p communications could still setup BlockDB in a really short time. Building systems upon BlockDB is as easy as building one upon traditional SQL/NoSQL databases. Developers may focus on the business logic and let BlockDB handle all the rest tricky part of the work. 22 | 23 | ### CRUD Operation Auditing 24 | In this mode, BlockDB is acting like a proxy. It will delegate all CRUD operations to the target database (like MongoDB). During the delegation, these operations will be sent to DLT and recorded. If you have a legacy system using traditional database, feel free to configure BlockDB as a middle proxy to enable full audit capability of every insertion, update,deletion or even query. 25 | 26 | ## Database Sharing 27 | Since BlockDB is based on DLT, it is really natural to implement trustless high-availability data sharing among multiple parties around the world. There will be no single center for the data storage, thus modifications to data without complete consensus are not possible in BlockDB. 28 | 29 | 30 | ## Interfaces 31 | BlockDB supports multiple ways for you to send data. All ways listed here are configurable to enable/disable. 32 | 33 | ### Kafka 34 | BlockDB can listen to a Kafka MQ and consume the data. 35 | 36 | ### Log4j 37 | BlockDB opens a Log4j appender receiver to allow direct write from log frameworks. 38 | 39 | ### Socket 40 | BlockDB opens a Socket listener to receive JSON stream. 41 | 42 | ### HTTP 43 | BlockDB opens an HTTP listener to receive JSON requests. 44 | 45 | ### Intercept 46 | *For CRUD Operation Auditing only*: change the target database URL from original one to the one provided by BlockDB so that all operations will be fully audited. 47 | 48 | 49 | ## Queries 50 | BlockDB supports MongoDB style query on all data records. There are mainly three parts in the queriable data: 51 | 52 | + *DLT info*: Info of the chain structure. e.g., the height, hash, etc. 53 | + *Audit info*: Info of the audit meta. e.g., operator, timestamp, IP, browser, etc. 54 | + *User defined info*: Customizable info provided by user. Can be anything. 55 | 56 | Note that all data are recorded **directly and completely** onto the ledger. BlockDB stores not only the hash of content (which most of the certificate applications do), but also the **full content** user provides. This enables data flowing among parties. -------------------------------------------------------------------------------- /backends/README.md: -------------------------------------------------------------------------------- 1 | # Backends 2 | 3 | Backends are the set of supported backend databases. 4 | 5 | In the backend, You should provide the following implementations: 6 | 7 | - Connector: build backend connection according to connection string 8 | - Requester: reuse the Connection and fire request to the target database 9 | 10 | -------------------------------------------------------------------------------- /backends/ledger_writer.go: -------------------------------------------------------------------------------- 1 | package backends 2 | 3 | type LedgerWriter interface { 4 | EnqueueSendToLedger(data interface{}) error 5 | } 6 | -------------------------------------------------------------------------------- /brefactor/app/cmd/common.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/annchain/commongo/files" 5 | "github.com/sirupsen/logrus" 6 | "github.com/spf13/viper" 7 | "os" 8 | "path" 9 | ) 10 | 11 | type FolderConfig struct { 12 | Root string 13 | Log string 14 | Data string 15 | Config string 16 | Private string 17 | } 18 | 19 | func ensureFolder(folder string, perm os.FileMode) { 20 | err := files.MkDirPermIfNotExists(folder, perm) 21 | if err != nil { 22 | logrus.WithError(err).WithField("path", folder).Fatal("failed to create folder") 23 | } 24 | } 25 | 26 | func defaultPath(givenPath string, defaultRoot string, suffix string) string { 27 | if givenPath == "" { 28 | return path.Join(defaultRoot, suffix) 29 | } 30 | if path.IsAbs(givenPath) { 31 | return givenPath 32 | } 33 | return path.Join(defaultRoot, givenPath) 34 | } 35 | 36 | func ensureFolders() FolderConfig { 37 | config := FolderConfig{ 38 | Root: viper.GetString("dir.root"), 39 | Log: defaultPath(viper.GetString("dir.log"), viper.GetString("dir.root"), "log"), 40 | Data: defaultPath(viper.GetString("dir.data"), viper.GetString("dir.root"), "data"), 41 | Config: defaultPath(viper.GetString("dir.config"), viper.GetString("dir.root"), "config"), 42 | Private: defaultPath(viper.GetString("dir.private"), viper.GetString("dir.root"), "private"), 43 | } 44 | ensureFolder(config.Root, 0755) 45 | ensureFolder(config.Log, 0755) 46 | ensureFolder(config.Data, 0755) 47 | ensureFolder(config.Config, 0755) 48 | ensureFolder(config.Private, 0700) 49 | return config 50 | 51 | } 52 | -------------------------------------------------------------------------------- /brefactor/app/cmd/config_manage.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/annchain/commongo/files" 6 | "github.com/annchain/commongo/format" 7 | "github.com/annchain/commongo/httplib" 8 | "github.com/annchain/commongo/utilfuncs" 9 | log "github.com/sirupsen/logrus" 10 | "github.com/spf13/viper" 11 | "net/url" 12 | "os" 13 | "path" 14 | "path/filepath" 15 | "time" 16 | ) 17 | 18 | // readConfig will respect {configdir}/config.toml first. 19 | // If not found, get config from online source {configurl} 20 | // {configdir}/injected.toml is the config issued by bootstrap server. 21 | // finally merge env config so that any config can be override by env variables. 22 | // Importance order: 23 | // 1, ENV 24 | // 2, injected.toml 25 | // 3, config.toml or online toml if config.toml is not found 26 | func readConfig(configFolder string) { 27 | configPath := path.Join(configFolder, "config.toml") 28 | 29 | if files.FileExists(configPath) { 30 | mergeLocalConfig(configPath) 31 | } else { 32 | if viper.GetString("url.config") == "" { 33 | panic("either local config or configurl should be provided") 34 | } 35 | 36 | mergeOnlineConfig(viper.GetString("url.config")) 37 | } 38 | 39 | // load injected config from ogbootstrap if any 40 | injectedPath := path.Join(configFolder, "injected.toml") 41 | if files.FileExists(injectedPath) { 42 | log.Info("merging local config file") 43 | mergeLocalConfig(injectedPath) 44 | } 45 | 46 | mergeEnvConfig() 47 | // print running config in console. 48 | b, err := format.PrettyJson(viper.AllSettings()) 49 | utilfuncs.PanicIfError(err, "dump json") 50 | fmt.Println(b) 51 | } 52 | 53 | func mergeEnvConfig() { 54 | // env override 55 | viper.SetEnvPrefix("blockdb") 56 | viper.AutomaticEnv() 57 | } 58 | 59 | //func writeConfig() { 60 | // configPath := files.FixPrefixPath(viper.GetString("rootdir"), path.Join(ConfigDir, "config_dump.toml")) 61 | // err := viper.WriteConfigAs(configPath) 62 | // utilfuncs.PanicIfError(err, "dump config") 63 | //} 64 | 65 | func mergeOnlineConfig(configPath string) { 66 | _, err := url.Parse(configPath) 67 | if err != nil { 68 | utilfuncs.PanicIfError(err, "config is should be valid server url or toml file has suffix .toml") 69 | } 70 | fileName := "og_config_" + time.Now().Format("20060102_150405") + ".toml" 71 | fmt.Println("read from config", configPath) 72 | req := httplib.NewBeegoRequest(configPath, "GET") 73 | req.Debug(true) 74 | req.SetTimeout(60*time.Second, 60*time.Second) 75 | err = req.ToFile(fileName) 76 | if err != nil { 77 | _ = os.Remove(fileName) 78 | fmt.Println(req.String()) 79 | } 80 | utilfuncs.PanicIfError(err, "get config from server error") 81 | 82 | file, err := os.Open(fileName) 83 | if err != nil { 84 | _ = os.Remove(fileName) 85 | } 86 | utilfuncs.PanicIfError(err, fmt.Sprintf("Error on opening config file: %s", fileName)) 87 | defer file.Close() 88 | 89 | viper.SetConfigType("toml") 90 | err = viper.MergeConfig(file) 91 | _ = os.Remove(fileName) 92 | utilfuncs.PanicIfError(err, fmt.Sprintf("Error on reading config file: %s", fileName)) 93 | } 94 | 95 | func mergeLocalConfig(configPath string) { 96 | absPath, err := filepath.Abs(configPath) 97 | utilfuncs.PanicIfError(err, fmt.Sprintf("Error on parsing config file path: %s", absPath)) 98 | 99 | file, err := os.Open(absPath) 100 | utilfuncs.PanicIfError(err, fmt.Sprintf("Error on opening config file: %s", absPath)) 101 | defer file.Close() 102 | 103 | viper.SetConfigType("toml") 104 | err = viper.MergeConfig(file) 105 | utilfuncs.PanicIfError(err, fmt.Sprintf("Error on reading config file: %s", absPath)) 106 | return 107 | } 108 | -------------------------------------------------------------------------------- /brefactor/app/cmd/root.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "github.com/annchain/commongo/program" 19 | "github.com/spf13/cobra" 20 | "github.com/spf13/viper" 21 | _ "net/http/pprof" 22 | ) 23 | 24 | // Execute adds all child commands to the root command and sets flags appropriately. 25 | // This is called by main.main(). It only needs to happen once to the rootCmd. 26 | func Execute() { 27 | defer program.DumpStack(true) 28 | _ = rootCmd.Execute() 29 | } 30 | 31 | // rootCmd represents the base command when called without any subcommands 32 | var rootCmd = &cobra.Command{ 33 | Use: "BlockDB", 34 | Short: "Undeniable DB", 35 | Long: `BlockDB to da moon`, 36 | } 37 | 38 | func init() { 39 | // Here you will define your flags and configuration settings. 40 | // Cobra supports persistent flags, which, if defined here, 41 | // will be global for your application. 42 | 43 | // NOTE: cobra parameters follow the rule of traditional parameter format. (dir-root) 44 | // viper parameters follow the rule of toml format. (dir.root) 45 | // viper will do the transformation. 46 | // Use viper format elsewhere you need a parameter. 47 | 48 | // folders 49 | rootCmd.PersistentFlags().StringP("dir-root", "r", "nodedata", "Folder for all data of one node") 50 | rootCmd.PersistentFlags().String("dir-log", "", "Log folder. Default to {dir.root}/log") 51 | rootCmd.PersistentFlags().String("dir-data", "", "Data folder. Default to {dir.root}/data") 52 | rootCmd.PersistentFlags().String("dir-config", "", "Config folder. Default to {dir.root}/config") 53 | rootCmd.PersistentFlags().String("dir-private", "", "Private folder. Default to {dir.root}/private") 54 | 55 | rootCmd.PersistentFlags().String("url-config", "", "URL for online config") 56 | 57 | // identity generation 58 | rootCmd.PersistentFlags().BoolP("gen-key", "g", false, "Automatically generate a private key if the privkey is missing.") 59 | 60 | rootCmd.PersistentFlags().Bool("log-stdout", true, "Whether the log will be printed to stdout") 61 | rootCmd.PersistentFlags().Bool("log-file", false, "Whether the log will be printed to file") 62 | rootCmd.PersistentFlags().Bool("log-line-number", false, "Whether the log will contain line number") 63 | rootCmd.PersistentFlags().String("log-level", "debug", "Logging verbosity, possible values:[panic, fatal, error, warn, info, debug]") 64 | 65 | rootCmd.PersistentFlags().Bool("multifile-by-level", false, "Output separate log files according to their level") 66 | rootCmd.PersistentFlags().Bool("multifile-by-module", false, "Output separate log files according to their module") 67 | 68 | _ = viper.BindPFlag("dir.root", rootCmd.PersistentFlags().Lookup("dir-root")) 69 | _ = viper.BindPFlag("dir.log", rootCmd.PersistentFlags().Lookup("dir-log")) 70 | _ = viper.BindPFlag("dir.data", rootCmd.PersistentFlags().Lookup("dir-data")) 71 | _ = viper.BindPFlag("dir.config", rootCmd.PersistentFlags().Lookup("dir-config")) 72 | _ = viper.BindPFlag("dir.private", rootCmd.PersistentFlags().Lookup("dir-private")) 73 | _ = viper.BindPFlag("url.config", rootCmd.PersistentFlags().Lookup("url-config")) 74 | 75 | _ = viper.BindPFlag("gen.key", rootCmd.PersistentFlags().Lookup("gen-key")) 76 | 77 | _ = viper.BindPFlag("log.stdout", rootCmd.PersistentFlags().Lookup("log-stdout")) 78 | _ = viper.BindPFlag("log.file", rootCmd.PersistentFlags().Lookup("log-file")) 79 | _ = viper.BindPFlag("log.line_number", rootCmd.PersistentFlags().Lookup("log-line-number")) 80 | _ = viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level")) 81 | 82 | _ = viper.BindPFlag("multifile_by_level", rootCmd.PersistentFlags().Lookup("multifile-by-level")) 83 | _ = viper.BindPFlag("multifile_by_module", rootCmd.PersistentFlags().Lookup("multifile-by-module")) 84 | 85 | rootCmd.PersistentFlags().Int("id", 0, "Node Id for debugging") 86 | _ = viper.BindPFlag("id", rootCmd.PersistentFlags().Lookup("id")) 87 | } 88 | -------------------------------------------------------------------------------- /brefactor/app/cmd/run.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | package cmd 15 | 16 | import ( 17 | "github.com/ZhongAnTech/BlockDB/brefactor/core" 18 | "github.com/annchain/commongo/mylog" 19 | "github.com/sirupsen/logrus" 20 | "github.com/spf13/cobra" 21 | "os" 22 | "os/signal" 23 | "syscall" 24 | ) 25 | 26 | // runCmd represents the run command 27 | var runCmd = &cobra.Command{ 28 | Use: "run", 29 | Short: "Start a BlockDB instance", 30 | Long: `Start a BlockDB instance`, 31 | Run: func(cmd *cobra.Command, args []string) { 32 | logrus.Info("BlockDB Starting") 33 | folderConfigs := ensureFolders() 34 | readConfig(folderConfigs.Config) 35 | mylog.InitLogger(logrus.StandardLogger(), mylog.LogConfig{ 36 | MaxSize: 10, 37 | MaxBackups: 100, 38 | MaxAgeDays: 90, 39 | Compress: true, 40 | LogDir: folderConfigs.Log, 41 | OutputFile: "blockdb", 42 | }) 43 | 44 | // init logs and other facilities before the node starts 45 | 46 | blockdb := core.BlockDB{} 47 | blockdb.InitDefault() 48 | blockdb.Setup() 49 | blockdb.Start() 50 | 51 | // prevent sudden stop. Do your clean up here 52 | var gracefulStop = make(chan os.Signal) 53 | 54 | signal.Notify(gracefulStop, syscall.SIGTERM) 55 | signal.Notify(gracefulStop, syscall.SIGINT) 56 | 57 | func() { 58 | sig := <-gracefulStop 59 | logrus.Infof("caught sig: %+v", sig) 60 | logrus.Info("Exiting... Please do no kill me") 61 | blockdb.Stop() 62 | os.Exit(0) 63 | }() 64 | 65 | }, 66 | } 67 | 68 | func init() { 69 | rootCmd.AddCommand(runCmd) 70 | // ./mongodb run -c config.toml 71 | 72 | // Here you will define your flags and configuration settings. 73 | 74 | // Cobra supports Persistent Flags which will work for this command 75 | // and all subcommands, e.g.: 76 | // runCmd.PersistentFlags().String("foo", "", "A help for foo") 77 | 78 | // Cobra supports local flags which will only run when this command 79 | // is called directly, e.g.: 80 | // runCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") 81 | } 82 | -------------------------------------------------------------------------------- /brefactor/app/main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "github.com/ZhongAnTech/BlockDB/brefactor/app/cmd" 19 | "math/rand" 20 | "time" 21 | ) 22 | 23 | func main() { 24 | rand.Seed(time.Now().UnixNano()) 25 | cmd.Execute() 26 | } 27 | -------------------------------------------------------------------------------- /brefactor/core/blockdb.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "github.com/ZhongAnTech/BlockDB/brefactor/plugins/clients/og" 6 | "github.com/ZhongAnTech/BlockDB/brefactor/plugins/listeners/web" 7 | "github.com/ZhongAnTech/BlockDB/brefactor/storage" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | "time" 11 | ) 12 | 13 | type BlockDB struct { 14 | components []Component 15 | } 16 | 17 | func (n *BlockDB) Start() { 18 | for _, component := range n.components { 19 | logrus.Infof("Starting %s", component.Name()) 20 | component.Start() 21 | logrus.Infof("Started: %s", component.Name()) 22 | 23 | } 24 | logrus.Info("BlockDB engine started") 25 | } 26 | 27 | func (n *BlockDB) Stop() { 28 | //status.Stopped = true 29 | for i := len(n.components) - 1; i >= 0; i-- { 30 | component := n.components[i] 31 | logrus.Infof("Stopping %s", component.Name()) 32 | component.Stop() 33 | logrus.Infof("Stopped: %s", component.Name()) 34 | } 35 | logrus.Info("BlockDB engine stopped gracefully") 36 | } 37 | 38 | func (n *BlockDB) Name() string { 39 | return "BlockDB" 40 | } 41 | 42 | func (n *BlockDB) InitDefault() { 43 | n.components = []Component{} 44 | } 45 | 46 | func (n *BlockDB) Setup() { 47 | // init components. 48 | 49 | // External data storage facilities. (Dai Yunong) 50 | // StorageExecutor 51 | connectionTimeout := time.Millisecond * time.Duration(viper.GetInt("storage.timeout_connect_ms")) 52 | ctx, _ := context.WithTimeout(context.Background(), connectionTimeout) 53 | storageExecutor := storage.Connect(ctx, 54 | viper.GetString("storage.mongodb.url"), 55 | viper.GetString("storage.mongodb.database"), 56 | viper.GetString("storage.mongodb.auth_method"), 57 | viper.GetString("storage.mongodb.username"), 58 | viper.GetString("storage.mongodb.password")) 59 | 60 | // will inject the storageExecutor to multiple components. 61 | businessReader := NewBusinessReader(storageExecutor) 62 | 63 | // TODO: RPC server to receive http requests. (Wu Jianhang) 64 | if viper.GetBool("listener.http.enabled") { 65 | p := &web.HttpListener{ 66 | JsonCommandParser: &DefaultJsonCommandParser{}, // parse json command 67 | BlockDBCommandProcessor: &DefaultCommandProcessor{}, // send command to ledger 68 | Config: web.HttpListenerConfig{ 69 | Port: viper.GetInt("listener.http.port"), 70 | MaxContentLength: viper.GetInt64("listener.http.max_content_length"), 71 | DBActionTimeoutMs: viper.GetInt("listener.http.timeout_db_ms"), 72 | }, 73 | BusinessReader: businessReader, 74 | } 75 | 76 | p.Setup() 77 | n.components = append(n.components, p) 78 | } 79 | 80 | // TODO: Command Executor (Fang Ning) 81 | // CommandExecutor 82 | 83 | // TODO: Blockchain sender to send new tx consumed from queue. (Ding Qingyun) 84 | client := &og.OgClient{ 85 | Config: og.OgClientConfig{ 86 | LedgerUrl: viper.GetString("blockchain.og.url"), 87 | RetryTimes: viper.GetInt("blockchain.og.retry_times"), 88 | }, 89 | } 90 | client.InitDefault() 91 | n.components = append(n.components, client) 92 | 93 | // TODO: Sync manager to sync from lastHeight to maxHeight. (Wu Jianhang) 94 | // LedgerSyncer 95 | 96 | // TODO: Websocket server to receive new sequencer messages. (Ding Qingyun) 97 | // BlockchainListener 98 | 99 | } 100 | -------------------------------------------------------------------------------- /brefactor/core/business_reader.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 8 | "go.mongodb.org/mongo-driver/bson" 9 | ) 10 | 11 | type BusinessReader struct { 12 | storageExecutor core_interface.StorageExecutor 13 | } 14 | 15 | func NewBusinessReader(storageExecutor core_interface.StorageExecutor) *BusinessReader { 16 | s := &BusinessReader{ 17 | storageExecutor: storageExecutor, 18 | } 19 | return s 20 | } 21 | 22 | func (s *BusinessReader) Close(ctx context.Context) error { 23 | return s.storageExecutor.Close(ctx) 24 | } 25 | 26 | func (s *BusinessReader) Info(ctx context.Context, opHash string) ([]byte, error) { 27 | filter := bson.M{"op_hash": opHash} 28 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 29 | if err != nil { 30 | return nil, err 31 | } 32 | if len(response.Content) != 1 { 33 | return nil, errors.New("miss info") 34 | } 35 | return json.Marshal(response.Content[0]) 36 | } 37 | 38 | func (s *BusinessReader) Actions(ctx context.Context, opHash string) ([]byte, error) { 39 | filter := bson.M{"op_hash": opHash} 40 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 41 | if err != nil { 42 | return nil, err 43 | } 44 | return json.Marshal(response.Content) 45 | } 46 | 47 | func (s *BusinessReader) Action(ctx context.Context, opHash string, version int) ([]byte, error) { 48 | filter := bson.M{"op_hash": opHash, "version": version} 49 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 50 | if err != nil { 51 | return nil, err 52 | } 53 | if len(response.Content) != 1 { 54 | return nil, errors.New("miss action") 55 | } 56 | return json.Marshal(response.Content[0]) 57 | } 58 | 59 | func (s *BusinessReader) Values(ctx context.Context, opHash string) ([]byte, error) { 60 | filter := bson.M{"op_hash": opHash} 61 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return json.Marshal(response.Content) 66 | } 67 | 68 | func (s *BusinessReader) Value(ctx context.Context, opHash string, version int) ([]byte, error) { 69 | filter := bson.M{"op_hash": opHash, "version": version} 70 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 71 | if err != nil { 72 | return nil, err 73 | } 74 | if len(response.Content) != 1 { 75 | return nil, errors.New("miss value") 76 | } 77 | return json.Marshal(response.Content[0]) 78 | } 79 | 80 | func (s *BusinessReader) CurrentValue(ctx context.Context, opHash string) ([]byte, error) { 81 | filter := bson.M{"op_hash": opHash} 82 | response, err := s.storageExecutor.Select(ctx, "sample_collection", filter, nil, 0, 0) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | if len(response.Content) != 1 { 88 | return nil, errors.New("miss info") 89 | } 90 | 91 | info := response.Content[0] 92 | version := info["latest_version"] 93 | return s.Value(ctx, opHash, version.(int)) 94 | } 95 | -------------------------------------------------------------------------------- /brefactor/core/command_parser.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 5 | "github.com/sirupsen/logrus" 6 | ) 7 | 8 | type DefaultJsonCommandParser struct { 9 | } 10 | 11 | func (d DefaultJsonCommandParser) FromJson(json string) (core_interface.BlockDBCommand, error) { 12 | logrus.WithField("json", json).Info("TODO: process json command") 13 | return core_interface.DefaultBlockDBCommand{}, nil 14 | } 15 | -------------------------------------------------------------------------------- /brefactor/core/command_processor.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 5 | "github.com/sirupsen/logrus" 6 | ) 7 | 8 | type DefaultCommandProcessor struct { 9 | } 10 | 11 | func (d DefaultCommandProcessor) Process(command core_interface.BlockDBCommand) (core_interface.CommandProcessResult, error) { 12 | logrus.WithField("cmd", command).Info("TODO: process this command") 13 | return core_interface.CommandProcessResult{ 14 | Hash: "0x00", 15 | OK: true, 16 | }, nil 17 | } 18 | -------------------------------------------------------------------------------- /brefactor/core/interface.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | type Component interface { 4 | Start() 5 | Stop() 6 | // Get the component name 7 | Name() string 8 | } 9 | -------------------------------------------------------------------------------- /brefactor/core_interface/interface.go: -------------------------------------------------------------------------------- 1 | package core_interface 2 | 3 | import ( 4 | "context" 5 | "go.mongodb.org/mongo-driver/bson" 6 | ) 7 | 8 | // BlockDBCommand is the raw data operation applied on ledger. no additional info 9 | type BlockDBCommand interface { 10 | } 11 | 12 | // BLockDBMessage is the enriched message including BlockDBCommand. 13 | type BlockDBMessage interface { 14 | } 15 | 16 | type BlockDBCommandProcessor interface { 17 | Process(command BlockDBCommand) (CommandProcessResult, error) // better to be implemented in async way. 18 | 19 | } 20 | 21 | type JsonCommandParser interface { 22 | FromJson(json string) (BlockDBCommand, error) 23 | } 24 | 25 | type BlockchainOperator interface { 26 | EnqueueSendToLedger(command BlockDBMessage) error 27 | } 28 | 29 | type CommandExecutor interface{} 30 | 31 | type StorageExecutor interface { 32 | //插入的json对应对bson形式;返回插入成功后的主键id 33 | Insert(ctx context.Context, collectionName string, val bson.M) (string, error) 34 | //在collect中删除主键id为hash 35 | Delete(ctx context.Context, collectionName string, id string) (int64, error) 36 | /** 37 | filter:筛选条件 为空:则表示全取 38 | sort:排序条件 为空:则表示不排序 39 | limit:查找出来的数据量;为0: 则表示全部 40 | skip:跳过skip条文档 为0:则表示逐条取 41 | skip+limit:跳过skip个文档后,取limit个文档 42 | */ 43 | Select(ctx context.Context, collectionName string, 44 | filter bson.M, sort bson.M, limit int64, skip int64) (response SelectResponse, err error) 45 | //在collect中查找主键id为hash的文档 46 | SelectById(ctx context.Context, collectionName string, id string) (response SelectResponse, err error) 47 | //将filter更新为update 48 | Update(ctx context.Context, collectionName string, filter, update bson.M, operation string) (count int64, err error) 49 | //将filter更新为update 50 | //UpdateById(ctx context.Context, collectionName string, id string, update bson.M, operation string) (count int64, err error) 51 | //创建collection 返回创建失败的错误信息;成功则返回nil 52 | CreateCollection(ctx context.Context, collectionName string) (err error) 53 | //创建索引,返回创建后的索引名字 54 | CreateIndex(ctx context.Context, collectionName string, indexName, column string) (createdIndexName string, err error) 55 | //删除索引 56 | DropIndex(ctx context.Context, collectionName string, indexName string) (err error) 57 | //返回该collect对应的数据库大小、索引大小、文档个数、索引个数 58 | CollectionInfo(ctx context.Context, collection string) (resp CollectionInfoResponse, err error) 59 | //关闭连接 60 | Close(ctx context.Context) error 61 | } 62 | 63 | type LedgerSyncer interface{} 64 | 65 | type BlockchainListener interface{} 66 | -------------------------------------------------------------------------------- /brefactor/core_interface/model.go: -------------------------------------------------------------------------------- 1 | package core_interface 2 | 3 | import "go.mongodb.org/mongo-driver/bson" 4 | 5 | type CommandProcessResult struct { 6 | Hash string 7 | OK bool 8 | } 9 | 10 | type DefaultBlockDBCommand struct { 11 | } 12 | 13 | type SelectResponse struct { 14 | Content []bson.M 15 | } 16 | 17 | type CollectionInfoResponse struct { 18 | StorageSize int32 19 | TotalIndexSize int32 20 | Count int32 21 | NIndexes int32 22 | } 23 | -------------------------------------------------------------------------------- /brefactor/deployment/config.toml: -------------------------------------------------------------------------------- 1 | [log] 2 | level = "trace" 3 | 4 | [listener] 5 | [listener.http] 6 | enabled = true 7 | port = 8080 8 | max_content_length = 10000000 9 | 10 | [blockchain] 11 | [blockchain.og] 12 | enabled = true 13 | url="http://nbstock.top:30022/new_archive" 14 | idle_connection_seconds = 5 15 | buffer_size = 100 16 | retry_times = 5 -------------------------------------------------------------------------------- /brefactor/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ZhongAnTech/BlockDB/brefactor 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/annchain/commongo v0.0.8 7 | github.com/gorilla/mux v1.7.4 8 | github.com/kr/pretty v0.2.0 // indirect 9 | github.com/libp2p/go-libp2p-core v0.6.1 10 | github.com/sirupsen/logrus v1.6.0 11 | github.com/spf13/cobra v1.0.0 12 | github.com/spf13/viper v1.7.0 13 | github.com/stretchr/testify v1.5.1 // indirect 14 | go.mongodb.org/mongo-driver v1.4.0 15 | golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 // indirect 16 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 17 | ) 18 | -------------------------------------------------------------------------------- /brefactor/instruction/audit.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "github.com/ZhongAnTech/BlockDB/brefactor/storage" 5 | "go.mongodb.org/mongo-driver/bson" 6 | "log" 7 | ) 8 | 9 | func Audit(op string, hash string, coll string, timestamp string, data map[string]interface{}, pk string, sig string) error { 10 | auditdb := storage.InitMongo(url, BlockDataBase, AuditCollection) 11 | audit := bson.M{ 12 | {"op_hash", hash}, 13 | {"collection", coll}, 14 | {"operation", op}, 15 | {"timestamp", timestamp}, 16 | {"data", data}, 17 | {"public_key", pk}, 18 | {"signature", sig}} 19 | _, err := auditdb.Insert(audit) 20 | if err != nil { 21 | log.Fatal("failed to insert data to history.") 22 | return err 23 | } 24 | _ = auditdb.Close() 25 | return nil 26 | } 27 | 28 | //{"op":"create_collection","name":"sample_collection","feature":{"allow_update":false, "allow_update_members": ["0x123456", "0x123456", "0x123456", "0x123456"]},"public_key": "0x769153474351324"} 29 | -------------------------------------------------------------------------------- /brefactor/instruction/db_models.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | // current data 4 | type MasterDataDoc struct { 5 | OpHash string `json:"op_hash"` 6 | Collection string `json:"collection"` //操作的数据表 7 | Feature CollectionFeature `json:"feature"` 8 | PublicKey string `json:"public_key"` //公钥 9 | Signature string `json:"signature"` //签名 10 | Timestamp int64 `json:"timestamp"` 11 | } 12 | 13 | // history data 14 | type MasterHistoryDoc struct { 15 | OpHash string `json:"op_hash"` 16 | Version int `json:"version"` 17 | TxHash string `json:"tx_hash"` 18 | Collection string `json:"collection"` //操作的数据表 19 | Feature CollectionFeature `json:"feature"` 20 | PublicKey string `json:"public_key"` //公钥 21 | Signature string `json:"signature"` //签名 22 | Timestamp int64 `json:"timestamp"` 23 | } 24 | 25 | // operation 26 | type MasterOpRecordDoc struct { 27 | OpHash string `json:"op_hash"` 28 | TxHash string `json:"tx_hash"` 29 | Collection string `json:"collection"` //操作的数据表 30 | Feature CollectionFeature `json:"feature"` 31 | PublicKey string `json:"public_key"` //公钥 32 | Signature string `json:"signature"` //签名 33 | Timestamp int64 `json:"timestamp"` 34 | } 35 | 36 | // info table 37 | type MasterDocInfoDoc struct { 38 | Collection string `json:"collection"` //操作的数据表 39 | Version int `json:"version"` 40 | CreatedAt int64 `json:"created_at"` // timestamp ms 41 | CreatedBy string `json:"created_by"` 42 | ModifiedAt int64 `json:"modified_at"` // timestamp ms 43 | ModifiedBy string `json:"modified_by"` 44 | } 45 | 46 | // Audit table. merged to oprecord 47 | type AuditModel struct { 48 | OpHash string `json:"_id"` //数据的hash 49 | //Collection string `json:"collection"` //操作的数据表 50 | Operation string `json:"operation"` 51 | Timestamp string `json:"timestamp"` 52 | Data map[string]interface{} `json:"data"` //操作记录 53 | PublicKey string `json:"public_key"` //公钥 54 | Signature string `json:"signature"` //签名 55 | } 56 | 57 | // OpDoc is the task queue filled by chain sync. 58 | // update OpDoc once the OpDoc is executed. 59 | type OpDoc struct { 60 | Order int32 `json:"oder"` 61 | IsExecuted bool `json:"is_executed"` 62 | TxHash string `json:"tx_hash"` 63 | OpHash string `json:"op_hash"` 64 | OpStr string `json:"op_str"` 65 | Signature string `json:"signature"` 66 | PublicKey string `json:"public_key"` 67 | } 68 | 69 | // oprecord table. One for each collection 70 | type OpRecordDoc struct { 71 | DocId string `json:"doc_id"` // 文档Id 72 | OpHash string `json:"op_hash"` //数据的hash 73 | Version int `json:"version"` 74 | //Collection string `json:"collection"` //操作的数据表 75 | Operation string `json:"operation"` 76 | Timestamp string `json:"timestamp"` 77 | Data map[string]interface{} `json:"data"` //操作记录 78 | PublicKey string `json:"public_key"` //公钥 79 | Signature string `json:"signature"` //签名 80 | } 81 | 82 | // history table。 83 | type HistoryDoc struct { 84 | DocId string `json:"doc_id"` // 文档Id 85 | Version int `json:"version"` 86 | //Collection string `json:"collection"` //操作的数据表 87 | Timestamp string `json:"timestamp"` 88 | Data map[string]interface{} `json:"history"` //历史版本 89 | PublicKey string `json:"public_key"` //公钥 90 | Signature string `json:"signature"` //签名 91 | } 92 | 93 | // info table 94 | type DocInfoDoc struct { 95 | DocId string `json:"doc_id"` // 文档Id 96 | Version int `json:"version"` 97 | //Collection string `json:"collection"` //操作的数据表 98 | CreatedAt int64 `json:"created_at"` // timestamp ms 99 | CreatedBy string `json:"created_by"` 100 | ModifiedAt int64 `json:"modified_at"` // timestamp ms 101 | ModifiedBy string `json:"modified_by"` 102 | } 103 | -------------------------------------------------------------------------------- /brefactor/instruction/executor.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 9 | "github.com/sirupsen/logrus" 10 | "go.mongodb.org/mongo-driver/bson" 11 | "log" 12 | "time" 13 | ) 14 | 15 | var ( 16 | DataType = "data" // for actual data storage 17 | HistoryType = "history" // data history versions 18 | OpRecordType = "oprecord" // 19 | DocInfoType = "info" // document info 20 | //AuditCollection = "audit" 21 | ) 22 | 23 | var NamePattern = map[string]string{ 24 | DataType: "%s_" + DataType, 25 | HistoryType: "%s_" + HistoryType, 26 | OpRecordType: "%s_" + OpRecordType, 27 | DocInfoType: "%s_" + DocInfoType, 28 | //AuditCollection: "%s_" + AuditCollection, 29 | } 30 | 31 | var InitCollections = []string{DataType, HistoryType, OpRecordType, DocInfoType} 32 | 33 | var ( 34 | filter = bson.M{"is_executed": "false"} 35 | sort = bson.M{"oder": 1} 36 | ) 37 | 38 | type InstructionExecutorConfig struct { 39 | BatchSize int64 40 | ReadTimeout time.Duration 41 | WriteTimeout time.Duration 42 | ErrorInterval time.Duration 43 | } 44 | 45 | type InstructionExecutor struct { 46 | Config InstructionExecutorConfig 47 | storageExecutor core_interface.StorageExecutor 48 | quit chan bool 49 | jumpTable map[string]func(opContext GeneralCommand) error 50 | } 51 | 52 | func (t *InstructionExecutor) InitDefault() { 53 | t.quit = make(chan bool) 54 | t.jumpTable = map[string]func(opStr GeneralCommand) error{ 55 | CreateCollection: t.createCollection, 56 | UpdateCollection: t.updateColl, 57 | Insert: t.insertDoc, 58 | Update: t.updateDoc, 59 | Delete: t.deleteDoc, 60 | CreateIndex: t.createIndex, 61 | DropIndex: t.dropIndex, 62 | } 63 | } 64 | 65 | func (t *InstructionExecutor) Start() { 66 | go t.runCommand() 67 | } 68 | 69 | func (t *InstructionExecutor) Stop() { 70 | t.quit <- true 71 | } 72 | 73 | func (t *InstructionExecutor) Name() string { 74 | return "InstructionExecutor" 75 | } 76 | 77 | func (t *InstructionExecutor) formatCollectionName(collName string, collType string) string { 78 | return fmt.Sprintf(NamePattern[collType], collName) 79 | } 80 | 81 | // runCommand continuously fetches command from database. 82 | func (t *InstructionExecutor) runCommand() { 83 | 84 | for { 85 | select { 86 | case <-t.quit: 87 | // TODO: do clean work. 88 | return 89 | default: 90 | didSome := t.doBatchJob() 91 | if didSome { 92 | continue 93 | } 94 | time.Sleep(t.Config.ErrorInterval) 95 | } 96 | } 97 | } 98 | 99 | func (t *InstructionExecutor) doBatchJob() (didSome bool) { 100 | 101 | ctx, _ := context.WithTimeout(context.Background(), t.Config.ReadTimeout) 102 | 103 | resp, err := t.storageExecutor.Select(ctx, CommandCollection, filter, sort, t.Config.BatchSize, 0) 104 | if err != nil { 105 | logrus.WithError(err).Warn("failed to fetch instructions") 106 | return false 107 | } 108 | if len(resp.Content) == 0 { 109 | logrus.Debug("no further instructions to be processed") 110 | return false 111 | } 112 | for _, ins := range resp.Content { 113 | // to json 114 | data, err := bson.Marshal(ins) 115 | if err != nil { 116 | logrus.WithField("value", ins).Warn("failed to marshal command") 117 | continue 118 | } 119 | 120 | // do deserialization and validations 121 | op := OpDoc{} 122 | err = bson.Unmarshal(data, &op) 123 | if err != nil { 124 | logrus.WithField("value", ins).Warn("failed to unmarshal op") 125 | continue 126 | } 127 | 128 | // TODO: signature validation (do not validate inside executor) 129 | // TODO: hash validation 130 | 131 | //opStrObject["op_hash"] = op.OpHash 132 | //opStrObject["signature"] = op.Signature 133 | 134 | err = t.Execute(GeneralCommand{ 135 | TxHash: op.TxHash, 136 | OpHash: op.OpHash, 137 | OpStr: op.OpStr, 138 | Signature: op.Signature, 139 | PublicKey: op.PublicKey, 140 | }) 141 | if err != nil { 142 | logrus.WithError(err).WithField("op", op.OpStr).Error("failed to execute op") 143 | // TODO: retry or mark as failed, according to err type 144 | continue 145 | } 146 | 147 | ctx, _ := context.WithTimeout(context.Background(), t.Config.WriteTimeout) 148 | // update excute state 149 | exeFilter := bson.M{"op_hash": op.OpHash} 150 | update := bson.M{"is_executed": true} 151 | _, err = t.storageExecutor.Update(ctx, CommandCollection, exeFilter, update, "set") 152 | if err != nil { 153 | log.Println("failed to update execute state.") 154 | continue 155 | } 156 | } 157 | return true 158 | } 159 | 160 | func (t *InstructionExecutor) Execute(command GeneralCommand) (err error) { 161 | opStrObject := make(map[string]interface{}) 162 | err = json.Unmarshal([]byte(command.OpStr), &opStrObject) 163 | if err != nil { 164 | return errors.New("failed to unmarshal opStr") 165 | } 166 | 167 | op := opStrObject["op"].(string) 168 | opFunction, ok := t.jumpTable[op] 169 | 170 | if !ok { 171 | return errors.New("unsupported op: " + op) 172 | } 173 | 174 | err = opFunction(command) 175 | return 176 | } 177 | 178 | //更新Coll 179 | func (t *InstructionExecutor) UpdateCollectionFeatures(collection string, feature map[string]interface{}) (bool, *CollectionCommand) { 180 | flag := false 181 | var curColl *CollectionCommand 182 | for _, curColl = range Colls { 183 | if curColl.Collection == collection { 184 | curColl.Feature = feature 185 | flag = true 186 | //for k:=range feature{ 187 | // curColl.Feature[k]=feature[k] 188 | // flag=true 189 | //} 190 | break 191 | } 192 | } 193 | return flag, curColl 194 | } 195 | 196 | //更新Indexes 197 | //func UpdateCollectionIndex(collection string,index map[string]string)(bool,*IndexCommand){ 198 | // flag:=false 199 | // var curIndex *IndexCommand 200 | // for _,curIndex=range Indexes{ 201 | // if curIndex.Collection == collection{ 202 | // for k:=range index{ 203 | // delete(curIndex.Index,k) 204 | // flag=true 205 | // } 206 | // break 207 | // } 208 | // } 209 | // return flag,curIndex 210 | //} 211 | -------------------------------------------------------------------------------- /brefactor/instruction/executor_doc.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "encoding/json" 5 | "go.mongodb.org/mongo-driver/bson" 6 | ) 7 | 8 | func (t *InstructionExecutor) insertDoc(instruction OpContext) error { 9 | com := &InsertCommand{} 10 | err := json.Unmarshal([]byte(instruction), com) 11 | if err != nil { 12 | log.Println("failed to unmarshal insert command.") 13 | return err 14 | } 15 | //TODO: Verification of signature 16 | if Check(Insert, com.Collection, com.PublicKey) { 17 | com.Timestamp = timestamp 18 | version, err := InsertInfo(com.OpHash, com.Collection, com.PublicKey, com.Timestamp) 19 | if err != nil { 20 | log.Println("failed to insert info.") 21 | return err 22 | } 23 | //inset data 24 | data := bson.M{{"op_hash", com.OpHash}, {"collection", com.Collection}, {"data", com.Data}, 25 | {"public_key", com.PublicKey}, {"signature", com.Signature}, {"timestamp", com.Timestamp}} 26 | blockdb := mongoutils.InitMgo(url, BlockDataBase, com.Collection) 27 | _, err = blockdb.Insert(data) 28 | if err != nil { 29 | log.Println("failed to insert data, ophash: " + com.OpHash) 30 | return err 31 | } 32 | _ = blockdb.Close() 33 | 34 | err = OpRecord(Insert, version, com.OpHash, com.Collection, timestamp, com.Data, com.PublicKey, com.Signature) 35 | if err != nil { 36 | return err 37 | } 38 | err = HistoryRecord(Insert, com.OpHash, version, com.Collection, timestamp, com.Data, com.PublicKey, com.Signature) 39 | if err != nil { 40 | return err 41 | } 42 | err = Audit(Insert, com.OpHash, com.Collection, timestamp, com.Data, com.PublicKey, com.Signature) 43 | if err != nil { 44 | return err 45 | } 46 | } else { 47 | log.Println("insert permission denied") 48 | } 49 | return nil 50 | } 51 | 52 | func (t *InstructionExecutor) updateDoc(instruction OpContext) error { 53 | com := &UpdateCommand{} 54 | err := json.Unmarshal([]byte(instruction), com) 55 | if err != nil { 56 | log.Println("failed to unmarshal update command.") 57 | return err 58 | } 59 | //fmt.Println(com) 60 | //TODO: Verification of signature 61 | if Check(Update, com.Collection, com.PublicKey) { 62 | com.Timestamp = timestamp 63 | hash := com.Query["op_hash"] 64 | version, err := UpdateInfo(hash) 65 | if err != nil { 66 | log.Println("failed to update info.") 67 | return err 68 | } 69 | //fmt.Println("finish update info") 70 | data := make(map[string]interface{}) 71 | data["query"] = com.Query 72 | data["set"] = com.Set 73 | data["unset"] = com.Unset 74 | blockdb := mongoutils.InitMgo(url, BlockDataBase, com.Collection) 75 | filter := bson.M{{"op_hash", hash}} 76 | if len(com.Set) != 0 { 77 | set_update := bson.M{} 78 | for k, v := range com.Set { 79 | set_update = append(set_update, bson.E{"data." + k, v}) 80 | } 81 | _, err = blockdb.Update(filter, set_update, "set") 82 | if err != nil { 83 | log.Println("failed to update data.") 84 | return err 85 | } 86 | 87 | } 88 | if len(com.Unset) != 0 { 89 | unset_update := bson.M{} 90 | for _, k := range com.Unset { 91 | unset_update = append(unset_update, bson.E{"data." + k, ""}) 92 | } 93 | _, err = blockdb.Update(filter, unset_update, "unset") 94 | if err != nil { 95 | log.Println("failed to update data.") 96 | return err 97 | } 98 | } 99 | _ = blockdb.Close() 100 | 101 | err = OpRecord(Update, version, hash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 102 | if err != nil { 103 | return err 104 | } 105 | err = HistoryRecord(Update, hash, version, com.Collection, timestamp, data, com.PublicKey, com.Signature) 106 | if err != nil { 107 | return err 108 | } 109 | err = Audit(Update, hash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 110 | if err != nil { 111 | return err 112 | } 113 | } else { 114 | log.Println("update permission denied") 115 | } 116 | return nil 117 | } 118 | 119 | func (t *InstructionExecutor) deleteDoc(instruction OpContext) error { 120 | com := &DeleteCommand{} 121 | err := json.Unmarshal([]byte(instruction), com) 122 | if err != nil { 123 | log.Println("failed to unmarshal delete command.") 124 | return err 125 | } 126 | //TODO: Verification of signature 127 | //权限验证 128 | if Check(Delete, com.Collection, com.PublicKey) { 129 | com.Timestamp = timestamp 130 | hash := com.Query["op_hash"] 131 | data := make(map[string]interface{}) 132 | data["query"] = com.Query 133 | blockdb := mongoutils.InitMgo(url, BlockDataBase, com.Collection) 134 | _, err = blockdb.Delete(hash) 135 | if err != nil { 136 | log.Println("failed to delete data.") 137 | return err 138 | } 139 | _ = blockdb.Close() 140 | 141 | version, err := UpdateInfo(hash) 142 | if err != nil { 143 | log.Println("failed to update info.") 144 | return err 145 | } 146 | err = OpRecord(Delete, version, hash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 147 | if err != nil { 148 | return err 149 | } 150 | err = HistoryRecord(Delete, hash, version, com.Collection, timestamp, nil, com.PublicKey, com.Signature) 151 | if err != nil { 152 | return err 153 | } 154 | err = Audit(Delete, hash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 155 | if err != nil { 156 | return err 157 | } 158 | } else { 159 | log.Println("delete permission denied") 160 | } 161 | return nil 162 | } 163 | -------------------------------------------------------------------------------- /brefactor/instruction/executor_index.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import "encoding/json" 4 | 5 | func (t *InstructionExecutor) createIndex(instruction OpContext) error { 6 | com := &IndexCommand{} 7 | err := json.Unmarshal([]byte(instruction), com) 8 | if err != nil { 9 | log.Println("failed to unmarshal create_index command.") 10 | return err 11 | } 12 | //TODO: Verification of signature 13 | com.Timestamp = timestamp 14 | data := make(map[string]interface{}) 15 | data["index"] = com.Index 16 | version, err := InsertInfo(com.OpHash, com.Collection, com.PublicKey, com.Timestamp) 17 | if err != nil { 18 | log.Println("failed to insert info.") 19 | return err 20 | } 21 | //Indexes=append(Indexes,com) 22 | blockdb := mongoutils.InitMgo(url, BlockDataBase, com.Collection) 23 | for k, v := range com.Index { 24 | _, err = blockdb.CreateIndex(k, "data."+v) 25 | if err != nil { 26 | log.Println("failed to create index for: data." + v) 27 | return err 28 | } 29 | } 30 | _ = blockdb.Close() 31 | 32 | err = OpRecord(CreateIndex, version, com.OpHash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 33 | if err != nil { 34 | return err 35 | } 36 | //HistoryRecord(com.OpHash,info.Version,com.Collection,timestamp,data,com.PublicKey,com.Signature) 37 | err = Audit(CreateIndex, com.OpHash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 38 | if err != nil { 39 | return err 40 | } 41 | return nil 42 | } 43 | 44 | func (t *InstructionExecutor) dropIndex(instruction OpContext) error { 45 | strconv.FormatInt(time.Now().Unix(), 10) 46 | 47 | com := &IndexCommand{} 48 | err := json.Unmarshal([]byte(instruction), com) 49 | if err != nil { 50 | log.Println("failed to unmarshal drop_index command.") 51 | return err 52 | } 53 | //ok,index:=UpdateCollectionIndex(com.Collection,com.Index) 54 | com.Timestamp = ts() 55 | data := make(map[string]interface{}) 56 | data["index"] = com.Index 57 | version, err := InsertInfo(com.OpHash, com.Collection, com.PublicKey, com.Timestamp) 58 | if err != nil { 59 | log.Println("failed to insert info.") 60 | return err 61 | } 62 | 63 | blockdb := mongoutils.InitMgo(url, BlockDataBase, com.Collection) 64 | for k := range com.Index { 65 | err = blockdb.DropIndex(k) 66 | if err != nil { 67 | log.Println("failed to drop index: " + k) 68 | return err 69 | } 70 | } 71 | _ = blockdb.Close() 72 | 73 | err = OpRecord(DropIndex, version, com.OpHash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 74 | if err != nil { 75 | return err 76 | } 77 | err = Audit(DropIndex, com.OpHash, com.Collection, timestamp, data, com.PublicKey, com.Signature) 78 | if err != nil { 79 | return err 80 | } 81 | //data["index"]=index.Index 82 | //HistoryRecord("",com.Collection,timestamp,data,com.PublicKey,com.Signature) 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /brefactor/instruction/executor_permission.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | // TODO: do not enable permission verification unless you can load all collections at start up 4 | func (t *InstructionExecutor) PermissionVerify(op string, collection string, publickey string) bool { 5 | return true 6 | } 7 | 8 | //权限验证 9 | //func (t *InstructionExecutor) Check(op string, collection string, publickey string) bool { 10 | // flag := false 11 | //outside: 12 | // for _, coll := range Colls { 13 | // if coll.Collection == collection { 14 | // if coll.PublicKey == publickey { 15 | // switch op { 16 | // case Insert, UpdateCollection: 17 | // flag = true 18 | // case Update: 19 | // if coll.Feature["allow_update"].(bool) == true { 20 | // flag = true 21 | // } 22 | // case Delete: 23 | // if coll.Feature["allow_delete"].(bool) == true { 24 | // flag = true 25 | // } 26 | // } 27 | // } else if coll.Feature["cooperate"].(bool) == true { 28 | // switch op { 29 | // case Insert: 30 | // allows := coll.Feature["allow_insert_members"].([]string) 31 | // for _, pk := range allows { 32 | // if pk == publickey { 33 | // flag = true 34 | // } 35 | // } 36 | // case Update: 37 | // allows := coll.Feature["allow_update_members"].([]string) 38 | // for _, pk := range allows { 39 | // if pk == publickey { 40 | // flag = true 41 | // } 42 | // } 43 | // case Delete: 44 | // allows := coll.Feature["allow_delete_members"].([]string) 45 | // for _, pk := range allows { 46 | // if pk == publickey { 47 | // flag = true 48 | // } 49 | // } 50 | // } 51 | // } 52 | // break outside 53 | // } 54 | // } 55 | // return flag 56 | //} 57 | -------------------------------------------------------------------------------- /brefactor/instruction/history.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "go.mongodb.org/mongo-driver/bson" 7 | "log" 8 | "strconv" 9 | "time" 10 | ) 11 | 12 | func OpRecord(op string, version int, hash string, coll string, timestamp string, data map[string]interface{}, pk string, sig string) error { 13 | //oprecord:=OpRecordDoc{hash,version,coll,op,timestamp,data,pk,sig} 14 | //TODO: Insert(HistoryDataBase,OpRecordType,oprecord) 15 | historydb := mongoutils.InitMgo(url, BlockDataBase, OpRecordType) 16 | record := bson.M{{"op_hash", hash}, {"version", version}, {"collection", coll}, {"operation", op}, 17 | {"timestamp", timestamp}, {"data", data}, {"public_key", pk}, {"signature", sig}} 18 | _, err := historydb.Insert(record) 19 | if err != nil { 20 | log.Fatal("failed to insert data to OpRecord.") 21 | return err 22 | } 23 | _ = historydb.Close() 24 | return nil 25 | } 26 | 27 | func HistoryRecord(op string, hash string, version int, coll string, timestamp string, data map[string]interface{}, pk string, sig string) error { 28 | historydb := mongoutils.InitMgo(url, BlockDataBase, HistoryType) 29 | switch op { 30 | case Update: 31 | blockdb := mongoutils.InitMgo(url, BlockDataBase, coll) 32 | filter := bson.M{{"op_hash", hash}} 33 | response, err := blockdb.Select(filter, bson.M{}, 10, 0) 34 | if err != nil { 35 | return err 36 | } 37 | if len(response.Content) == 0 { 38 | return errors.New("not found ophash in history.") 39 | } 40 | c := &InsertCommand{} 41 | err = json.Unmarshal([]byte(response.Content[0]), &c) 42 | if err != nil { 43 | return err 44 | } 45 | data = c.Data 46 | _ = blockdb.Close() 47 | } 48 | record := bson.M{{"op_hash", hash}, {"version", version}, {"collection", coll}, {"timestamp", timestamp}, 49 | {"data", data}, {"public_key", pk}, {"signature", sig}} 50 | _, err := historydb.Insert(record) 51 | if err != nil { 52 | log.Fatal("failed to insert data to history.") 53 | return err 54 | } 55 | _ = historydb.Close() 56 | return nil 57 | } 58 | 59 | func InsertInfo(hash string, coll string, pubkey string, timestamp string) (int, error) { 60 | infodb := mongoutils.InitMgo(url, BlockDataBase, DocInfoType) 61 | filter := bson.M{{"op_hash", hash}} 62 | response, err := infodb.Select(filter, bson.M{}, 10, 0) 63 | if err != nil { 64 | return -1, err 65 | } 66 | if len(response.Content) > 0 { 67 | return -1, errors.New("ophash hash existed.") 68 | } 69 | info := &DocInfoDoc{hash, 0, coll, timestamp, pubkey, timestamp} 70 | info_data := bson.M{{"op_hash", info.OpHash}, {"version", 0}, {"collection", info.Collection}, 71 | {"create_time", info.CreateTime}, {"create_by", info.CreateBy}, {"last_modified", info.LastModified}} 72 | _, err = infodb.Insert(info_data) 73 | if err != nil { 74 | return -1, err 75 | } 76 | _ = infodb.Close() 77 | return 0, nil 78 | } 79 | 80 | func UpdateInfo(hash string) (int, error) { 81 | infodb := mongoutils.InitMgo(url, BlockDataBase, DocInfoType) 82 | filter := bson.M{{"op_hash", hash}} 83 | response, err := infodb.Select(filter, bson.M{}, 10, 0) 84 | if err != nil { 85 | return -1, err 86 | } 87 | if len(response.Content) == 0 { 88 | return -1, errors.New("ophash doesn't exist.") 89 | } 90 | c := make(map[string]interface{}) 91 | err = json.Unmarshal([]byte(response.Content[0]), &c) 92 | if err != nil { 93 | return -1, err 94 | } 95 | version_map := c["version"].(map[string]interface{}) 96 | version, err := strconv.Atoi(version_map["$numberInt"].(string)) 97 | if err != nil { 98 | return -1, err 99 | } 100 | version = version + 1 101 | lastModified := strconv.FormatInt(time.Now().Unix(), 10) 102 | update := bson.M{{"version", version}, {"last_modified", lastModified}} 103 | _, err = infodb.Update(filter, update, "set") 104 | if err != nil { 105 | return -1, err 106 | } 107 | _ = infodb.Close() 108 | return version, nil 109 | } 110 | 111 | func GetOpRecordsById(hash string, coll string) []OpRecordDoc { 112 | var res []OpRecordDoc 113 | //TODO:SelectById(hash) 114 | return res 115 | } 116 | 117 | func GetHistoryRecord(hash string, coll string) []HistoryDoc { 118 | var res []HistoryDoc 119 | //TODO:SelectById(hash) 120 | return res 121 | } 122 | -------------------------------------------------------------------------------- /brefactor/instruction/instruction.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | const ( 4 | CreateCollection string = "create_collection" 5 | UpdateCollection string = "update_collection" 6 | Insert string = "insert" 7 | Update string = "update" 8 | Delete string = "delete" 9 | CreateIndex string = "hint_create_index" 10 | DropIndex string = "hint_drop_index" 11 | 12 | CommandCollection string = "_op" 13 | MasterCollection string = "_master" 14 | ) 15 | 16 | //var Colls []*CollectionCommand 17 | 18 | //var Indexes []*IndexCommand 19 | 20 | type CollectionFeature struct { 21 | AllowUpdate bool `json:"allow_update"` 22 | AllowDelete bool `json:"allow_delete"` 23 | Cooperate bool `json:"cooperate"` 24 | AllowInsertMembers []string `json:"allow_insert_members"` 25 | AllowUpdateMembers []string `json:"allow_update_members"` 26 | AllowDeleteMembers []string `json:"allow_delete_members"` 27 | } 28 | 29 | type GeneralCommand struct { 30 | TxHash string `json:"tx_hash"` 31 | OpHash string `json:"op_hash"` 32 | OpStr string `json:"op_str"` 33 | Signature string `json:"signature"` 34 | PublicKey string `json:"public_key"` 35 | } 36 | 37 | type CollectionCommand struct { 38 | //OpHash string `json:"op_hash"` //产生数据的hash作为主键 39 | Op string `json:"op"` 40 | Collection string `json:"collection"` //要操作的数据表 41 | Feature CollectionFeature `json:"feature"` 42 | PublicKey string `json:"public_key"` //公钥 43 | } 44 | 45 | type InsertCommand struct { 46 | //OpHash string `json:"op_hash"` //产生数据的hash作为主键 47 | Op string `json:"op"` 48 | Collection string `json:"collection"` //要操作的数据表 49 | Data map[string]interface{} `json:"data"` 50 | PublicKey string `json:"public_key"` //公钥 51 | } 52 | 53 | type UpdateCommand struct { 54 | //OpHash string `json:"op_hash"` 55 | Op string `json:"op"` 56 | Collection string `json:"collection"` //要操作的数据表 57 | Query map[string]string `json:"query"` 58 | Set map[string]interface{} `json:"set"` 59 | Unset []string `json:"unset"` 60 | PublicKey string `json:"public_key"` //公钥 61 | } 62 | 63 | type DeleteCommand struct { 64 | //OpHash string `json:"op_hash"` 65 | Op string `json:"op"` 66 | Collection string `json:"collection"` //要操作的数据表 67 | Query map[string]string `json:"query"` 68 | PublicKey string `json:"public_key"` //公钥 69 | } 70 | 71 | type IndexCommand struct { 72 | //OpHash string `json:"op_hash"` 73 | Op string `json:"op"` 74 | Collection string `json:"collection"` //要操作的数据表 75 | Index map[string]string `json:"index"` 76 | PublicKey string `json:"public_key"` //公钥 77 | } 78 | -------------------------------------------------------------------------------- /brefactor/instruction/instruction_test.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "os" 9 | "testing" 10 | ) 11 | 12 | func TestUnmarshal(t *testing.T) { 13 | //str:=`{"op":"insert", "collection":"sample_collection", "data":{"name":"fudan", "address":{"city":"Shanghai", "road":"xxx"}, "logo":{"url":"http://a.png"}, "teachers":["T1", "T2", "T3"]}, "public_key":"0x769153474351324", "signature":"0x169153474351324"}` 14 | //str:=`{"op": "update","name": "sample_collection","query": {"op_hash": "0x739483392203"},"set": {"name": "fudanNew","address.city": "Shanghai North East","logo": {}},"unset": ["teachers"],"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x739483392203"}` 15 | str := `{"op": "hint_create_index","name": "sample_collection","index": {"idx_address_city": "address.city","idx_address_city2": "address.city2"},"public_key": "0x769153474351324","signature": "0x169153474351324"}` 16 | cmd := &IndexCommand{} 17 | err := json.Unmarshal([]byte(str), cmd) 18 | if err != nil { 19 | fmt.Println(err) 20 | } 21 | fmt.Println(cmd) 22 | //str2,err:=json.Marshal(cmd) 23 | //if err != nil { 24 | // fmt.Println("marshal err") 25 | //} 26 | //fmt.Println(string(str2)) 27 | } 28 | 29 | func TestInstruction(t *testing.T) { 30 | file, err := os.Open("./instructions.txt") 31 | if err != nil { 32 | fmt.Println("文件打开失败 = ", err) 33 | } 34 | defer file.Close() 35 | reader := bufio.NewReader(file) 36 | i := 0 37 | for { 38 | str, err := reader.ReadString('\n') //读到一个换行就结束 39 | if err == io.EOF { //io.EOF 表示文件的末尾 40 | break 41 | } 42 | if len(str) == 0 { 43 | break 44 | } 45 | //fmt.Print(str) 46 | c := make(map[string]interface{}) 47 | err = json.Unmarshal([]byte(str), &c) 48 | if err != nil { 49 | panic(err) 50 | } 51 | op := c["op"].(string) 52 | err = Execute(op, str) 53 | if err != nil { 54 | panic(err) 55 | } 56 | i++ 57 | fmt.Println(i) 58 | 59 | } 60 | fmt.Println("文件读取结束...") 61 | } 62 | -------------------------------------------------------------------------------- /brefactor/instruction/instructions.txt: -------------------------------------------------------------------------------- 1 | {"op": "create_collection","collection": "sample_collection","feature": {"allow_update": true,"allow_delete": true,"cooperate": true,"allow_insert_members": ["0x123456", "0x123456", "0x123456", "0x123456"],"allow_update_members": ["0x123456", "0x123456", "0x123456", "0x123456"],"allow_delete_members": ["0x123456", "0x123456", "0x123456", "0x123456"]},"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x53452345"} 2 | {"op": "update_collection","collection": "sample_collection","feature": {"allow_update": true,"allow_delete": true,"cooperate": true,"allow_insert_members": ["0x123456"],"allow_update_members": ["0x123456", "0x123456"],"allow_delete_members": ["0x123456", "0x123456", "0x123456"]},"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x53452345"} 3 | {"op": "insert","collection": "sample_collection","data": {"name": "fudan","address": {"city": "Shanghai","road": "xxx"},"logo": {"url": "http://a.png"},"teachers": ["T1", "T2", "T3"]},"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x739483392203"} 4 | {"op": "update","collection": "sample_collection","query": {"op_hash": "0x739483392203"},"set": {"name": "fudanNew","address.city": "Shanghai North East","logo": {}},"unset": ["teachers"],"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x739483392203"} 5 | {"op": "delete","collection": "sample_collection","query": {"op_hash": "0x739483392203"},"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x7394833985960"} 6 | {"op": "hint_create_index","collection": "sample_collection","index": {"idx_address_city": "address.city","idx_name": "name"},"public_key": "0x769153474351324","signature": "0x169153474351324","op_hash":"0x7394838409260"} 7 | {"op": "hint_drop_index","collection": "sample_collection","index": {"idx_address_city": "address.city"},"public_key": "0x769153474351324","signature":"0x169153474351324","op_hash":"0x92234783390304"} 8 | -------------------------------------------------------------------------------- /brefactor/instruction/tools.go: -------------------------------------------------------------------------------- 1 | package instruction 2 | 3 | import ( 4 | "go.mongodb.org/mongo-driver/bson" 5 | "time" 6 | ) 7 | 8 | func toDoc(v interface{}) (doc bson.M, err error) { 9 | data, err := bson.Marshal(v) 10 | if err != nil { 11 | return 12 | } 13 | 14 | err = bson.Unmarshal(data, &doc) 15 | return 16 | } 17 | 18 | func ts() int64 { 19 | return time.Now().UnixNano() / 1_000_000 20 | } 21 | -------------------------------------------------------------------------------- /brefactor/nodedata/config/config.toml: -------------------------------------------------------------------------------- 1 | [log] 2 | level = "trace" 3 | 4 | [listener] 5 | [listener.http] 6 | enabled = true 7 | port = 8080 8 | max_content_length = 10000000 9 | timeout_db_ms = 60000 10 | 11 | [storage] 12 | [storage.mongodb] 13 | url = "mongodb://paichepai.win:27017" 14 | database = "blockdb" 15 | auth_method = "" 16 | username = "" 17 | password = "" 18 | 19 | timeout_connect_ms = 5000 20 | 21 | 22 | [blockchain] 23 | [blockchain.og] 24 | enabled = true 25 | url="http://nbstock.top:30022/new_archive" 26 | idle_connection_seconds = 5 27 | buffer_size = 100 28 | retry_times = 5 -------------------------------------------------------------------------------- /brefactor/plugins/clients/og/og_test.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import ( 4 | "fmt" 5 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 6 | "github.com/sirupsen/logrus" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestNewOgProcessor(t *testing.T) { 12 | 13 | var blockMess = core_interface.BlockDBMessage{ 14 | OpHash: "0x3475623705236", 15 | Signature: "0x169153474351324", 16 | PublicKey: "0x769153474351324", 17 | Data: `{"op":"insert","collection":"sample_collection","op_data":{"name":"fudan","address":{"city":"Shanghai","road":"xxx"},"logo":{"url":"http://a.png"},"teachers":["T1","T2","T3",]}}`, 18 | } 19 | 20 | logrus.SetLevel(logrus.TraceLevel) 21 | p := NewOgClient(OgClientConfig{LedgerUrl: "http://nbstock.top:30022/new_archive", RetryTimes: 3}) 22 | p.Start() 23 | defer p.Stop() 24 | fmt.Println(blockMess) 25 | 26 | p.EnqueueSendToLedger(&blockMess) 27 | 28 | //data := gettestData() 29 | //p.EnqueueSendToLedger(&data) 30 | time.Sleep(time.Second) 31 | 32 | } 33 | -------------------------------------------------------------------------------- /brefactor/plugins/clients/og/toOp.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/ZhongAnTech/BlockDB/brefactor/plugins/serve/mongo" 7 | "go.mongodb.org/mongo-driver/bson" 8 | 9 | "sort" 10 | "strings" 11 | ) 12 | 13 | type allData struct { 14 | Type int `json:"type"` 15 | Transaction string `json:"transaction"` 16 | Sequencer string `json:"sequencer"` 17 | archive interface{} `json:"archive"` 18 | } 19 | 20 | type Archive struct { 21 | Height int `json:"hieght"` 22 | Type int `json:"type"` 23 | TxHash string `json:"tx_hash"` 24 | OpHash string `json:"op_hash"` 25 | PublicKey string `json:"public_key"` 26 | Signature string `json:"signature"` 27 | Parents []string `json:"parents"` 28 | AccountNonce int `json:"account_nonce"` 29 | MindNonce int `json:"mind_nonce"` 30 | Weight int `json:"weight"` 31 | Data string `json:"data"` 32 | } 33 | 34 | type Op struct { 35 | Order int `json:"order"` 36 | Height int `json:"height"` 37 | IsExecuted bool `json:"is_executed"` 38 | TxHash string `json:"tx_hash"` 39 | OpHash string `json:"op_hash"` 40 | PublicKey string `json:"public_key"` 41 | Signature string `json:"signature"` 42 | OpStr string `json:"op_str"` 43 | } 44 | 45 | var order int32 = 0 46 | 47 | type Archives []Archive 48 | type ByHash struct { 49 | Archives 50 | } 51 | 52 | func (s ByHash) Len() int { 53 | panic("implement me") 54 | } 55 | 56 | func (s ByHash) Swap(i, j int) { 57 | panic("implement me") 58 | } 59 | 60 | func (s ByHash) Less(i, j int) bool { 61 | return s.Archives[i].TxHash < s.Archives[j].TxHash 62 | } 63 | 64 | func ToStruct(str string) Archives { 65 | s1 := strings.Split(str, "},") 66 | s2 := strings.Split(s1[0], "\"data\": {") 67 | fmt.Println(s2[1]) 68 | 69 | s3 := strings.Split(s2[1], "\"archive\":") 70 | fmt.Println(s3[1]) 71 | 72 | var archiveMsg Archive 73 | //反序列化 74 | err := json.Unmarshal([]byte(s3[1]), &archiveMsg) 75 | if err != nil { 76 | fmt.Printf("unmarshal err = %v\n", err) 77 | } 78 | fmt.Printf("反序列化后 Data = %v\n", archiveMsg) 79 | 80 | //pubKeyBytes,err := hex.DecodeString(archiveMsg.PublicKey) 81 | //if err != nil { 82 | // fmt.Println(err) 83 | //} 84 | // 85 | //pubKey,err := crypto.UnmarshalSecp256k1PublicKey(pubKeyBytes) 86 | //if err != nil { 87 | // fmt.Println(err) 88 | //} 89 | // 90 | //opHash, err := hex.DecodeString(archiveMsg.OpHash) 91 | //if err != nil { 92 | // fmt.Println(err) 93 | //} 94 | // 95 | //signatureBytes, err := hex.DecodeString(archiveMsg.Signature) 96 | //if err != nil { 97 | // fmt.Println(err) 98 | //} 99 | // 100 | //data := Normalize(string(archiveMsg.Data)) 101 | //hash := sha256.Sum256([]byte(data)) 102 | // 103 | //if !bytes.Equal(opHash, hash[:]) { 104 | // fmt.Println("invalid op_hash") 105 | //} 106 | // 107 | //isSuccess, err := pubKey.Verify(hash[:], signatureBytes) 108 | //if err != nil || !isSuccess { 109 | // fmt.Println("invalid signature") 110 | //} 111 | 112 | var archiveMsgs []Archive 113 | archiveMsgs = append(archiveMsgs, archiveMsg) 114 | fmt.Println("archiveMsgs: ", archiveMsgs) 115 | return archiveMsgs 116 | } 117 | 118 | func test(archiveMsgs []Archive) { 119 | sort.Sort(ByHash{archiveMsgs}) 120 | for i, v := range archiveMsgs { 121 | var op = Op{ 122 | Order: i, 123 | Height: v.Height, 124 | IsExecuted: false, 125 | TxHash: v.TxHash, 126 | OpHash: v.OpHash, 127 | PublicKey: v.PublicKey, 128 | Signature: v.Signature, 129 | OpStr: v.Data, 130 | } 131 | 132 | fmt.Println("op: ", op) 133 | mgo := mongo.InitMgo("mongodb://localhost:27017", "test", "op") 134 | mgo2 := mongo.InitMgo("mongodb://localhost:27017", "test", "isOnChain") 135 | 136 | //update := bson.D{{"$set", data}} 137 | id, err := mgo.Insert(bson.D{ 138 | {"is_executed", op.IsExecuted}, 139 | {"tx_hash", op.TxHash}, 140 | {"op_hash", op.OpHash}, 141 | {"public_key", op.PublicKey}, 142 | {"signature", op.Signature}, 143 | {"op_str", op.OpStr}, 144 | }) 145 | fmt.Println(id, err) 146 | 147 | update := bson.D{ 148 | {"tx_hash", op.TxHash}, 149 | {"op_hash", op.OpHash}, 150 | {"status", 0}, 151 | } 152 | 153 | update2 := bson.D{ 154 | {"tx_hash", op.TxHash}, 155 | {"op_hash", op.OpHash}, 156 | {"status", 1}, 157 | } 158 | mgo2.Update(update, update2, "unset") 159 | } 160 | 161 | } 162 | -------------------------------------------------------------------------------- /brefactor/plugins/clients/og/toOp_test.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import "testing" 4 | 5 | func TestName(t *testing.T) { 6 | ToStruct("{\n \"data\": {\n \"type\": 4,\n \"transaction\": null,\n \"sequencer\": null,\n \"archive\": {\n \"type\": 4,\n \"tx_hash\": \"0xdce3ce9ea590c038026ed7dafd5942904afd05fa49ab569f85b95e16b48a7559\",\n \"op_hash\":\"0xdce3ce9ea590c038026ed7dafd5942904afd05fa49ab569f85b95e16b48a7559\",\n \"public_key\":\"yI6Iuays+WNl+ecgeafs+W3nuWO\",\n \"signature\":\"yI6Iuays+WNl+ecgeafs+W3nuWO\",\n \"parents\": [\n \"0xe9bb1e421198495ea622e996cf4439ec09e435f0689547a5yI6Iuays+WNl+ecgeafs+W3nuWOa20d97d5e8cab780\",\n \"0xc668349cdea78651c8da8fcf7344dcf49513649de98dd8c35c78d9678ceb8936\"\n ],\n \"account_nonce\": 16,\n \"mind_nonce\": 1,\n \"weight\": 177,\n \"data\": \"eyJpZGVudGl0eSI6IuWImOW5syIsInR5cGUiOiJvZ2Jyb3dzZXJfdGVzdCIsImlwIjoiMTk4LjUxLjExMC4xIiwicHJpbWFyeV9rZXkiOiIiLCJ0aW1lc3RhbXAiOjE1OTI3OTY2NDI1NzksImRhdGEiOnsiYWRkcmVzcyI6Iuays+WNl+ecgeafs+W3nuWOv+a3hOW3neiDoei3r3fluqcgOTY5ODAwIiwiYmxvb2RfZ3JvdXAiOiJCLSIsImNvbXBhbnkiOiLmlbDlrZcxMDDnp5HmioDmnInpmZDlhazlj7giLCJqb2IiOiLmsb3ovaYv5pGp5omY6L2m5bel56iL5biIIiwibWFpbCI6InRhbzk5QHlhaG9vLmNvbSIsIm5hbWUiOiLkuIfmhaciLCJyZXNpZGVuY2UiOiLlub/kuJznnIHlk4jlsJTmu6jluILmtqrln47lvKDot69I5bqnIDMzNjA0NCIsInNleCI6IkYiLCJzc24iOiI1MjI3MjYxOTg2MDQxMjk0NDkiLCJ1c2VybmFtZSI6Imp6aGVuZyIsIndlYnNpdGUiOlsiaHR0cDovLzc5LmNuLyIsImh0dHBzOi8vd3d3LnlhbmcuY24vIl19LCJiZWZvcmUiOiIiLCJhZnRlciI6IiJ9\"\n }\n },\n \"err\": \"\"\n}") 7 | test() 8 | } 9 | -------------------------------------------------------------------------------- /brefactor/plugins/clients/og/ws.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "io/ioutil" 8 | "net/http" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | //获取最新高度 14 | func Http() int { 15 | response, err := http.Get("http://nbstock.top:30022//v1/sequencer") 16 | if err != nil { 17 | fmt.Println(err) 18 | } 19 | defer response.Body.Close() 20 | body, err := ioutil.ReadAll(response.Body) 21 | str := string(body) 22 | 23 | s1 := strings.Split(str, ",") 24 | s2 := strings.Split(s1[4], ":") 25 | height, err := strconv.Atoi(s2[1]) 26 | if err != nil { 27 | fmt.Println("can't trans string to int") 28 | } 29 | fmt.Println(height) 30 | return height 31 | } 32 | 33 | //获取该高度的交易hash 34 | func GetHashes(url string) []string { 35 | response, err := http.Get(url) 36 | if err != nil { 37 | fmt.Println(err) 38 | } 39 | defer response.Body.Close() 40 | body, err := ioutil.ReadAll(response.Body) 41 | str := string(body) 42 | fmt.Println(str) 43 | if strings.Contains(str, "\"hashes\":null") { 44 | return nil 45 | } 46 | s1 := strings.Split(str, "[") 47 | s2 := strings.Split(s1[1], "]") 48 | s3 := strings.Split(s2[0], ",") 49 | fmt.Println(s3) 50 | return s3 51 | } 52 | 53 | //获取交易内容 54 | func HttpHash(url string) string { 55 | response, err := http.Get(url) 56 | if err != nil { 57 | fmt.Println(err) 58 | } 59 | defer response.Body.Close() 60 | body, err := ioutil.ReadAll(response.Body) 61 | fmt.Println(body) 62 | str := string(body) 63 | fmt.Println(str) 64 | return str 65 | } 66 | 67 | func hashByHeight(height int) { 68 | url1 := "http://nbstock.top:30022/transaction_hashes?height=" + strconv.Itoa(height) 69 | hashes := GetHashes(url1) 70 | 71 | if hashes == nil { 72 | fmt.Println("no tx with type = 4 in height: ", height) 73 | } else { 74 | var txDatas []Archive 75 | for _, v := range hashes { 76 | fmt.Println(v) 77 | url2 := "http://nbstock.top:30022/transaction?hash=" + v[1:len(v)-1] 78 | txData := HttpHash(url2) 79 | if strings.Contains(txData, "\"type\":4") == true { 80 | //验签,反序列化放到结构体,存入数据库 81 | 82 | txDatas = ToStruct(txData) 83 | fmt.Println("type=4---------", txData) 84 | 85 | } 86 | 87 | } 88 | //排序 89 | test(txDatas) 90 | } 91 | } 92 | 93 | func DownChain() { 94 | height := 0 95 | preHeight := 0 96 | for { 97 | if height == Http() { 98 | continue 99 | } 100 | time.Sleep(10 * time.Millisecond) 101 | height = Http() 102 | if height-1 > preHeight { 103 | for i := preHeight + 1; i < height; i++ { 104 | hashByHeight(i) 105 | } 106 | } 107 | preHeight = height 108 | hashByHeight(height) 109 | } 110 | 111 | } 112 | -------------------------------------------------------------------------------- /brefactor/plugins/clients/og/ws_test.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestWs(t *testing.T) { 9 | for { 10 | 11 | time.Sleep(3 * time.Second) 12 | height := Http() 13 | hashByHeight(height) 14 | 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /brefactor/plugins/listeners/web/http_processor_test.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "encoding/json" 8 | "fmt" 9 | "github.com/libp2p/go-libp2p-core/crypto" 10 | "net/http" 11 | "net/http/httptest" 12 | "testing" 13 | ) 14 | 15 | func TestHttpListener_Handle(t *testing.T) { 16 | httpListener := HttpListener{ 17 | Config: HttpListenerConfig{MaxContentLength: 1e7}, 18 | } 19 | data := `{ 20 | "op": "insert", 21 | "collection": "sample_collection", 22 | "data": { 23 | "name": "fudan", 24 | "address": { 25 | "city": "Shanghai", 26 | "road": "xxx" 27 | }, 28 | "logo": { 29 | "url": "http://a.png" 30 | }, 31 | "teachers": [ 32 | "T1", "T2", "T3" 33 | ] 34 | }, 35 | "public_key": "02c3a28b7e83c83f90c56861210b418dfc7a7302a9449c4c4597eb6e0ce415b944" 36 | }` 37 | 38 | priBytes, _ := hex.DecodeString("42f909a1a4cc546f270306b1b69c45434a1e37cddf2d834ea377cd5e92c5d3d5") 39 | pri, err := crypto.UnmarshalSecp256k1PrivateKey(priBytes) 40 | if err != nil { 41 | fmt.Println(err.Error()) 42 | } 43 | dataBytes := []byte(Normalize(data)) 44 | hash := sha256.Sum256(dataBytes) 45 | signature, _ := pri.Sign(hash[:]) 46 | message := &Message{ 47 | OpStr: dataBytes, 48 | OpHash: hex.EncodeToString(hash[:]), 49 | PublicKey: "02c3a28b7e83c83f90c56861210b418dfc7a7302a9449c4c4597eb6e0ce415b944", 50 | Signature: hex.EncodeToString(signature), 51 | } 52 | msg, _ := json.Marshal(message) 53 | req := httptest.NewRequest(http.MethodPost, "http://url.com", bytes.NewReader(msg)) 54 | w := httptest.NewRecorder() 55 | httpListener.Handle(w, req) 56 | resp := w.Result() 57 | if resp.StatusCode != http.StatusOK { 58 | t.Error("Handle function work incorrectly.") 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /brefactor/plugins/listeners/web/json_util_test.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | func TestNormalize(t *testing.T) { 11 | const json1 = `{ 12 | "name": {"first": "Tom", "last": "Anderson"}, 13 | "age":37, 14 | "children": ["Sara","Alex","Jack"], 15 | "fav.movie": "Deer Hunter", 16 | "friends": [ 17 | {"age": true, "first": "\"Dale", "last": null, "nets": ["ig", "fb", "tw"]}, 18 | {"first": "Roger", "last": "Craig", "age": 68.11, "nets": ["fb", "tw"]}, 19 | {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} 20 | ] 21 | }` 22 | 23 | const json2 = `{ 24 | "fav.movie": "Deer Hunter", 25 | "friends": [ 26 | {"first": "\"Dale", "last": null, "age": true, "nets": ["ig", "fb", "tw"]}, 27 | {"first": "Roger", "last": "Craig", "age": 68.11, "nets": ["fb", "tw"]}, 28 | {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} 29 | ], 30 | "age":37, 31 | "name": {"first": "Tom", "last": "Anderson"}, 32 | "children": ["Sara","Alex","Jack"] 33 | }` 34 | 35 | n1 := Normalize(json1) 36 | n2 := Normalize(json2) 37 | fmt.Println(n1) 38 | if !json.Valid([]byte(n1)) { 39 | t.Error("Normalize function work incorrectly.") 40 | } 41 | if strings.Compare(n1, n2) != 0 { 42 | t.Error("Results are not the same.") 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /brefactor/storage/mongo_client.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "github.com/ZhongAnTech/BlockDB/brefactor/core_interface" 6 | "github.com/sirupsen/logrus" 7 | "go.mongodb.org/mongo-driver/bson" 8 | "go.mongodb.org/mongo-driver/bson/primitive" 9 | "go.mongodb.org/mongo-driver/mongo" 10 | "go.mongodb.org/mongo-driver/mongo/options" 11 | "go.mongodb.org/mongo-driver/x/bsonx" 12 | "log" 13 | ) 14 | 15 | type MongoClient struct { 16 | client *mongo.Client 17 | database *mongo.Database 18 | collections map[string]*mongo.Collection // for collection cache 19 | } 20 | 21 | func Connect(ctx context.Context, url string, databaseName string, authMechanism string, username string, password string) *MongoClient { 22 | clientOptions := options.Client().ApplyURI(url) 23 | if authMechanism != "" { 24 | clientOptions.Auth = &options.Credential{ 25 | AuthMechanism: authMechanism, 26 | //AuthMechanismProperties: nil, 27 | //AuthSource: "", 28 | Username: username, 29 | Password: password, 30 | //PasswordSet: false, 31 | } 32 | } 33 | 34 | client, err := mongo.Connect(ctx, clientOptions) 35 | if err != nil { 36 | log.Fatal(err) 37 | } 38 | err = client.Ping(ctx, nil) 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | 43 | mClient := &MongoClient{} 44 | mClient.client = client 45 | mClient.database = client.Database(databaseName) 46 | mClient.collections = make(map[string]*mongo.Collection) 47 | return mClient 48 | } 49 | 50 | //插入一个文档 51 | func (mc *MongoClient) Insert(ctx context.Context, collectionName string, val bson.M) (string, error) { 52 | collect := mc.ensureColl(collectionName) 53 | id, err := collect.InsertOne(ctx, val) 54 | if err != nil { 55 | logrus.WithError(err).Warn("failed to insert to ") 56 | } 57 | return id.InsertedID.(primitive.ObjectID).Hex(), err 58 | 59 | } 60 | 61 | //根据key value删除集合下所有符合条件的文档 62 | func (mc *MongoClient) Delete(ctx context.Context, collectionName string, id string) (int64, error) { 63 | collect := mc.ensureColl(collectionName) 64 | 65 | filter := bson.M{"_id": id} 66 | count, err := collect.DeleteMany(ctx, filter, nil) 67 | if err != nil { 68 | logrus.WithError(err).Warn("failed to delete") 69 | } 70 | return count.DeletedCount, err 71 | } 72 | 73 | //根据fileter查询文档 74 | func (mc *MongoClient) Select(ctx context.Context, collectionName string, 75 | filter bson.M, sort bson.M, limit int64, skip int64) (response core_interface.SelectResponse, err error) { 76 | 77 | collect := mc.ensureColl(collectionName) 78 | 79 | result, err := collect.Find(ctx, filter, options.Find().SetSort(sort).SetLimit(limit).SetSkip(skip)) 80 | if err != nil { 81 | logrus.WithError(err).Warn("failed to select") 82 | return 83 | } 84 | 85 | for result.Next(ctx) { 86 | var ele bson.M 87 | err := result.Decode(&ele) 88 | if err != nil { 89 | logrus.WithError(err).Warn("failed to select") 90 | return 91 | } 92 | response.Content = append(response.Content, ele) 93 | } 94 | return 95 | } 96 | 97 | //根据主键查数据 98 | func (mc *MongoClient) SelectById(ctx context.Context, collectionName string, id string) (response core_interface.SelectResponse, err error) { 99 | collect := mc.ensureColl(collectionName) 100 | 101 | filter := bson.M{"_id": id} 102 | result, err := collect.Find(ctx, filter) 103 | if err != nil { 104 | logrus.WithError(err).Warn("failed to select") 105 | return 106 | } 107 | for result.Next(ctx) { 108 | var ele bson.M 109 | err := result.Decode(&ele) 110 | if err != nil { 111 | logrus.WithError(err).Warn("failed to select") 112 | return 113 | } 114 | response.Content = append(response.Content, ele) 115 | } 116 | return 117 | } 118 | 119 | //TODO根据filter更新所有符合条件的文档 120 | func (mc *MongoClient) Update(ctx context.Context, collectionName string, filter, update bson.M, operation string) (count int64, err error) { 121 | collect := mc.ensureColl(collectionName) 122 | 123 | var result *mongo.UpdateResult 124 | switch operation { 125 | case "set": 126 | update1 := bson.M{"$set": update} 127 | result, err = collect.UpdateMany(ctx, filter, update1) 128 | if err != nil { 129 | logrus.WithError(err).Warn("failed to update") 130 | return 131 | } 132 | case "unset": 133 | update1 := bson.M{"$unset": update} 134 | result, err = collect.UpdateMany(ctx, filter, update1) 135 | if err != nil { 136 | logrus.WithError(err).Warn("failed to update") 137 | return 138 | } 139 | } 140 | count = result.UpsertedCount 141 | return 142 | } 143 | func (mc *MongoClient) CreateCollection(ctx context.Context, collectionName string) (err error) { 144 | res := mc.database.RunCommand(ctx, bson.M{"create": collectionName}) 145 | err = res.Err() 146 | if err != nil { 147 | logrus.WithError(err).Warn("failed to create collection") 148 | return 149 | } 150 | return 151 | } 152 | 153 | //创建单个索引 154 | func (mc *MongoClient) CreateIndex(ctx context.Context, collectionName string, indexName, column string) (createdIndexName string, err error) { 155 | collect := mc.ensureColl(collectionName) 156 | 157 | Doc := bsonx.Doc{{column, bsonx.Int32(1)}} 158 | idx := mongo.IndexModel{ 159 | Keys: Doc, 160 | Options: options.Index().SetUnique(false).SetName(indexName), 161 | } 162 | createdIndexName, err = collect.Indexes().CreateOne(ctx, idx) 163 | if err != nil { 164 | logrus.WithError(err).Warn("failed to create index") 165 | } 166 | return 167 | } 168 | 169 | //index名字 170 | func (mc *MongoClient) DropIndex(ctx context.Context, collectionName string, indexName string) (err error) { 171 | collect := mc.ensureColl(collectionName) 172 | 173 | _, err = collect.Indexes().DropOne(ctx, indexName) 174 | if err != nil { 175 | logrus.WithError(err).Warn("failed to drop index") 176 | } 177 | return 178 | 179 | } 180 | 181 | //返回数据库大小、索引大小、文档个数、索引个数 182 | func (mc *MongoClient) CollectionInfo(ctx context.Context, collection string) (resp core_interface.CollectionInfoResponse, err error) { 183 | res := mc.database.RunCommand(ctx, bson.M{"collStats": collection}) 184 | var document bson.M 185 | err = res.Decode(&document) 186 | if err != nil { 187 | logrus.WithError(err).Warn("failed to get collection info") 188 | } 189 | 190 | resp = core_interface.CollectionInfoResponse{ 191 | StorageSize: document["storageSize"].(int32), 192 | TotalIndexSize: document["totalIndexSize"].(int32), 193 | Count: document["count"].(int32), 194 | NIndexes: document["nindexes"].(int32), 195 | } 196 | return 197 | } 198 | 199 | //func (m *MongoCollection)CreateAccount()error 200 | func (mc *MongoClient) Close(ctx context.Context) error { 201 | err := mc.database.Client().Disconnect(ctx) 202 | 203 | if err != nil { 204 | logrus.WithError(err).Warn("failed to close") 205 | } 206 | return err 207 | } 208 | 209 | func (mc *MongoClient) ensureColl(name string) *mongo.Collection { 210 | if v, ok := mc.collections[name]; ok { 211 | return v 212 | } else { 213 | v = mc.database.Collection(name) 214 | mc.collections[name] = v 215 | return v 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /brefactor/storage/mongo_client_test.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "go.mongodb.org/mongo-driver/bson" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestMgo(t *testing.T) { 12 | ctx, _ := context.WithTimeout(context.Background(), time.Second*5) 13 | 14 | mgo := Connect(ctx, "mongodb://paichepai.win:27017", "test", "", "", "") 15 | hex1, err := mgo.Insert(ctx, "coll", bson.M{{"a", 1}, {"b", "abc"}}) 16 | if err != nil { 17 | t.Error("fail to insert: ", err) 18 | } 19 | 20 | hex2, err := mgo.Insert(ctx, "coll", bson.M{{"a", 2}, {"b", "efg"}}) 21 | if err != nil { 22 | t.Error("fail to insert: ", err) 23 | } 24 | 25 | _, err = mgo.Update(ctx, "coll", bson.M{{"a", 1}, {"b", "abc"}}, bson.M{{"a", 3}, {"b", "klm"}}, "set") 26 | if err != nil { 27 | t.Error("fail to update: ", err) 28 | } 29 | 30 | response, err := mgo.Select(ctx, "coll", bson.M{{"a", bson.M{{"$ne", nil}}}}, bson.M{{"a", -1}}, 0, 0) 31 | if err != nil { 32 | t.Error("fail to select: ", err) 33 | } 34 | fmt.Println(response) 35 | 36 | ciResp, err := mgo.CollectionInfo(ctx, "coll") 37 | if err != nil { 38 | t.Error(err) 39 | } 40 | fmt.Println(ciResp) 41 | 42 | _, err = mgo.Delete(ctx, "coll", hex1) 43 | if err != nil { 44 | t.Error("fail to delete: ", err) 45 | } 46 | 47 | _, err = mgo.Delete(ctx, "coll", hex2) 48 | if err != nil { 49 | t.Error("fail to delete: ", err) 50 | } 51 | 52 | hex3, err := mgo.Insert(ctx, "coll1", bson.M{{"a", 1}, {"b", "abc"}}) 53 | if err != nil { 54 | t.Error("fail to insert: ", err) 55 | } 56 | 57 | _, err = mgo.Delete(ctx, "coll1", hex3) 58 | if err != nil { 59 | t.Error("fail to delete: ", err) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /brefactor/syncer/syncer.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | // ChainOperator is the pulling way to query the latest sequencer and txs. 4 | type ChainOperator interface { 5 | QueryHeight() (int64, error) 6 | // TODO: Qingyun enrich interface and implement plugins/clients/og/OgChainOperator 7 | } 8 | 9 | // ChainEventReceiver is the pushing way to receive the latest sequencer 10 | type ChainEventReceiver interface { 11 | Connect() 12 | EventChannel() chan int64 // Maybe you will pass a more complicate object in the channel 13 | // TODO: Qingyun enrich interface and implement plugins/clients/og/OgChainEventReceiver 14 | 15 | } 16 | 17 | type OgChainSyncer struct { 18 | // TODO: (priority) pull latest height and sync: startup, every 5 min (in case websocket is down) 19 | // TODO: receive websocket push (receive latest height) and sync (realtime) 20 | 21 | // table: op 22 | MaxSyncedHeight int64 23 | ChainOperator ChainOperator 24 | InfoReceiver ChainEventReceiver 25 | 26 | quit chan bool 27 | } 28 | 29 | func (o *OgChainSyncer) Start() { 30 | // load max height from ledger 31 | go o.loop() 32 | } 33 | 34 | func (o *OgChainSyncer) Stop() { 35 | panic("implement me") 36 | } 37 | 38 | func (o *OgChainSyncer) Name() string { 39 | return "OgChainSyncer" 40 | } 41 | 42 | func (o *OgChainSyncer) QueryHeight() (int64, error) { 43 | panic("implement me") 44 | } 45 | 46 | func (o *OgChainSyncer) loop() { 47 | for { 48 | select { 49 | case <-o.quit: 50 | return 51 | case newHeight := <-o.InfoReceiver.EventChannel(): 52 | // TODO: compare local max height and sync if behind. 53 | if newHeight > o.MaxSyncedHeight { 54 | // TODO: sync. 55 | } 56 | 57 | //case <- timeout (be very careful when you handle the timer reset to prevent blocking.) 58 | default: 59 | 60 | // TODO: (priority) pull latest height and sync: startup, every 5 min (in case websocket is down) 61 | // TODO: receive websocket push (receive latest height) and sync (realtime) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /brefactor/syncer/ws_info_receiver.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | type WebsocketInfoReceiver struct { 4 | } 5 | 6 | func (w WebsocketInfoReceiver) Start() { 7 | panic("implement me") 8 | } 9 | 10 | func (w WebsocketInfoReceiver) Stop() { 11 | panic("implement me") 12 | } 13 | 14 | func (w WebsocketInfoReceiver) Name() string { 15 | return "WebsocketInfoReceiver" 16 | } 17 | -------------------------------------------------------------------------------- /cmd/run.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | package cmd 15 | 16 | import ( 17 | "fmt" 18 | "github.com/annchain/BlockDB/engine" 19 | log "github.com/sirupsen/logrus" 20 | "github.com/spf13/cobra" 21 | "github.com/spf13/viper" 22 | "os" 23 | "os/signal" 24 | "path/filepath" 25 | "sort" 26 | "syscall" 27 | ) 28 | 29 | // runCmd represents the run command 30 | var runCmd = &cobra.Command{ 31 | Use: "run", 32 | Short: "Start a full node", 33 | Long: `Start a full node`, 34 | Run: func(cmd *cobra.Command, args []string) { 35 | // init logs and other facilities before the node starts 36 | readConfig() 37 | initLogger() 38 | defer DumpStack() 39 | 40 | log.Info("BlockDB Starting") 41 | eng := engine.NewEngine() 42 | eng.Start() 43 | 44 | // prevent sudden stop. Do your clean up here 45 | var gracefulStop = make(chan os.Signal) 46 | 47 | signal.Notify(gracefulStop, syscall.SIGTERM) 48 | signal.Notify(gracefulStop, syscall.SIGINT) 49 | 50 | func() { 51 | sig := <-gracefulStop 52 | log.Infof("caught sig: %+v", sig) 53 | log.Info("Exiting... Please do no kill me") 54 | eng.Stop() 55 | os.Exit(0) 56 | }() 57 | 58 | }, 59 | } 60 | 61 | func readConfig() { 62 | configPath := viper.GetString("config") 63 | 64 | absPath, err := filepath.Abs(configPath) 65 | fmt.Println(absPath) 66 | panicIfError(err, fmt.Sprintf("Error on parsing config file path: %s", absPath)) 67 | 68 | file, err := os.Open(absPath) 69 | panicIfError(err, fmt.Sprintf("Error on opening config file: %s", absPath)) 70 | defer file.Close() 71 | 72 | viper.SetConfigType("toml") 73 | err = viper.MergeConfig(file) 74 | panicIfError(err, fmt.Sprintf("Error on reading config file: %s", absPath)) 75 | 76 | viper.SetEnvPrefix("blockdb") 77 | viper.AutomaticEnv() // read in environment variables that match 78 | 79 | keys := viper.AllKeys() 80 | sort.Strings(keys) 81 | for _, key := range keys { 82 | fmt.Printf("%s:%v\n", key, viper.Get(key)) 83 | } 84 | 85 | } 86 | -------------------------------------------------------------------------------- /common/bytes/bytes.go: -------------------------------------------------------------------------------- 1 | package bytes 2 | 3 | // GetUInt32 get an uint32 from byte array with a start position. 4 | // This is for those little-endian bytes. 5 | func GetUInt32(b []byte, pos int) uint32 { 6 | return uint32(b[pos]) | uint32(b[pos+1])<<8 | uint32(b[pos+2])<<16 | uint32(b[pos+3])<<24 7 | } 8 | 9 | // SetUInt32 set an uint32 into byte array at a position. 10 | func SetUInt32(b []byte, pos int, i uint32) { 11 | b[pos] = byte(i) 12 | b[pos+1] = byte(i >> 8) 13 | b[pos+2] = byte(i >> 16) 14 | b[pos+3] = byte(i >> 24) 15 | } 16 | 17 | // GetInt32 get an int32 from byte array with a start position. 18 | func GetInt32(b []byte, pos int) int32 { 19 | return int32(b[pos]) | int32(b[pos+1])<<8 | int32(b[pos+2])<<16 | int32(b[pos+3])<<24 20 | } 21 | 22 | // SetInt32 set an int32 into byte array at a position. 23 | func SetInt32(b []byte, pos int, i int32) { 24 | b[pos] = byte(i) 25 | b[pos+1] = byte(i >> 8) 26 | b[pos+2] = byte(i >> 16) 27 | b[pos+3] = byte(i >> 24) 28 | } 29 | 30 | // GetInt64 get an int64 from byte array with a start position. 31 | func GetInt64(b []byte, pos int) int64 { 32 | return int64(b[pos]) | int64(b[pos+1])<<8 | int64(b[pos+2])<<16 | int64(b[pos+3])<<24 | 33 | int64(b[pos+4])<<32 | int64(b[pos+5])<<40 | int64(b[pos+6])<<48 | int64(b[pos+7])<<56 34 | } 35 | -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | [log] 2 | level = "trace" 3 | 4 | [listener] 5 | [listener.mongodb] 6 | enabled = false 7 | incoming_port = 28017 8 | incoming_max_connection = 10 9 | idle_connection_seconds = 30 10 | ignore_meta_query = true 11 | 12 | [listener.log4j2Socket] 13 | enabled = false 14 | incoming_port = 28018 15 | incoming_max_connection = 10 16 | idle_connection_seconds = 30 17 | 18 | [listener.jsonSocket] 19 | enabled = false 20 | incoming_port = 28019 21 | incoming_max_connection = 10 22 | idle_connection_seconds = 30 23 | 24 | [listener.kafka] 25 | enabled = false 26 | address = "172.28.152.102:30092" 27 | topic = "anlink" 28 | group_id = "groupid" 29 | 30 | [listener.http] 31 | enabled = true 32 | port = 8080 33 | enable_audit = true 34 | enable_health = true 35 | max_content_length = 10000000 36 | 37 | 38 | [backend] 39 | [backend.mongodb] 40 | # see http://mongodb.github.io/mongo-java-driver/3.6/javadoc/?com/mongodb/ConnectionString.html for full connection parameters 41 | connection_string = "mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database.collection][?options]]" 42 | url = "172.28.152.101:27017" 43 | 44 | [og] 45 | enabled = true 46 | url="http://127.0.0.1:8000/new_archive" 47 | #url="http://172.28.152.106:30100/new_archive" 48 | idle_connection_seconds = 5 49 | buffer_size = 100 50 | retry_times = 5 51 | 52 | [og.wsclient] 53 | enabled = false 54 | url="ws://172.28.152.106:30102/ws" 55 | 56 | [audit] 57 | [audit.mongodb] 58 | connection_string = "mongodb://@172.28.152.101:27017" 59 | database = "blockDB" 60 | collection = "queryblockdb" 61 | 62 | [debug] 63 | enabled = false 64 | port = 8091 -------------------------------------------------------------------------------- /deployment/config.toml: -------------------------------------------------------------------------------- 1 | [listener] 2 | [listener.mongodb] 3 | enabled = false 4 | incoming_port = 28017 5 | incoming_max_connection = 10 6 | idle_connection_seconds = 30 7 | ignore_meta_query = true 8 | 9 | [listener.log4j2Socket] 10 | enabled = false 11 | incoming_port = 28018 12 | incoming_max_connection = 10 13 | idle_connection_seconds = 30 14 | 15 | [listener.jsonSocket] 16 | enabled = false 17 | incoming_port = 28019 18 | incoming_max_connection = 10 19 | idle_connection_seconds = 30 20 | 21 | [listener.kafka] 22 | enabled = false 23 | address = "10.253.11.192:9092" 24 | topic = "tech-tech-anlink-web-gateway-201907101551" 25 | 26 | [listener.http] 27 | enabled = true 28 | port = 8080 29 | enable_audit = true 30 | enable_health = true 31 | max_content_length = 10000000 32 | 33 | [backend] 34 | [backend.mongodb] 35 | # see http://mongodb.github.io/mongo-java-driver/3.6/javadoc/?com/mongodb/ConnectionString.html for full connection parameters 36 | connection_string = "mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database.collection][?options]]" 37 | url = "172.28.152.101:27017" 38 | 39 | [og] 40 | enabled = true 41 | # url="http://172.28.152.101:8000/new_archive" 42 | url="http://127.0.0.1:8000/new_archive" 43 | idle_connection_seconds = 5 44 | buffer_size = 100 45 | retry_times = 3 46 | 47 | [og.wsclient] 48 | enabled = false 49 | url="ws://172.28.152.106:30102/ws" 50 | 51 | [audit] 52 | [audit.mongodb] 53 | connection_string = "mongodb://@172.28.152.101:30052" 54 | database = "blockDB" 55 | collection = "queryblockdb" 56 | [debug] 57 | enabled = false 58 | port = 8091 59 | 60 | [log] 61 | level = "trace" -------------------------------------------------------------------------------- /deployment/k8s_blockdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: blockdb 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: blockdb 11 | spec: 12 | containers: 13 | - name: blockdb 14 | image: 172.28.152.101:5000/blockdb 15 | env: 16 | - name: BLOCKDB_LISTENER.KAFKA.ADDRESS 17 | value: "kafka-service.production:9092" 18 | - name: BLOCKDB_LISTENER.KAFKA.TOPIC 19 | value: "anlink" 20 | - name: BLOCKDB_BACKEND.MONGODB.URL 21 | value: "mongodb://mongo-service.production:27017" 22 | - name: BLOCKDB_OG.URL 23 | value: "http://ogblockdb-service.production:8000/new_archive" 24 | - name: BLOCKDB_OG.WSCLIENT.URL 25 | value: "ws://ogblockdb-service.production:8002/ws" 26 | - name: BLOCKDB_AUDIT.MONGODB.CONNECTION_STRING 27 | value: "mongodb://blockdb:wjs123@mongo-service.production:27017" 28 | - name: BLOCKDB_LOG.LOG_DIR 29 | value: "/rw/log/" 30 | - name: TZ 31 | value: "Asia/Shanghai" 32 | ports: 33 | - name: mongodb 34 | containerPort: 28017 35 | - name: log4j2socket 36 | containerPort: 28018 37 | - name: jsonsocket 38 | containerPort: 28019 39 | - name: http 40 | containerPort: 8080 41 | command: 42 | ["./blockdb", "-c", "/opt/config.toml", "-m", "-n", "-l", "/rw/log/", "-d", "/rw/datadir", "run"] 43 | ------- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: blockdb 48 | spec: 49 | selector: 50 | app: blockdb 51 | type: NodePort 52 | ports: 53 | - name: mongodb 54 | port: 28017 55 | nodePort: 30317 56 | - name: log4jsocket 57 | port: 28018 58 | nodePort: 30318 59 | - name: jsonsocket 60 | port: 28019 61 | nodePort: 30319 62 | - name: http 63 | port: 8080 64 | nodePort: 30320 65 | -------- 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | name: ogblockdb-0 70 | spec: 71 | selector: 72 | statefulset.kubernetes.io/pod-name: ogblockdb-0 73 | ports: 74 | - name: p2p 75 | port: 8001 76 | targetPort: 8001 -------------------------------------------------------------------------------- /deployment/k8s_infra.yaml: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Namespace", 4 | "metadata": { 5 | "name": "production", 6 | "labels": { 7 | "name": "production" 8 | } 9 | } 10 | } 11 | -----kafka----- 12 | apiVersion: extensions/v1beta1 13 | kind: Deployment 14 | metadata: 15 | labels: 16 | app: kafka 17 | name: kafka 18 | name: kafka-deployment 19 | spec: 20 | replicas: 1 21 | selector: 22 | matchLabels: 23 | name: kafka 24 | template: 25 | metadata: 26 | labels: 27 | app: kafka 28 | name: kafka 29 | spec: 30 | containers: 31 | - env: 32 | - name: KAFKA_ADVERTISED_PORT 33 | value: "30050" 34 | - name: KAFKA_ADVERTISED_HOST_NAME 35 | value: 47.100.222.11 36 | - name: KAFKA_ZOOKEEPER_CONNECT 37 | value: zookeeper-service.production:2181 38 | - name: KAFKA_BROKER_ID 39 | value: "1" 40 | image: wurstmeister/kafka 41 | imagePullPolicy: IfNotPresent 42 | name: kafka 43 | ports: 44 | - containerPort: 9092 45 | protocol: TCP 46 | resources: 47 | limits: 48 | cpu: "1" 49 | terminationGracePeriodSeconds: 30 50 | -----kafka service----- 51 | apiVersion: v1 52 | kind: Service 53 | metadata: 54 | name: kafka-service 55 | spec: 56 | selector: 57 | app: kafka 58 | type: NodePort 59 | ports: 60 | - name: kafka-port 61 | port: 9092 62 | targetPort: 9092 63 | nodePort: 30050 64 | -----zookeeper----- 65 | apiVersion: extensions/v1beta1 66 | kind: Deployment 67 | metadata: 68 | labels: 69 | app: zookeeper 70 | name: zookeeper 71 | spec: 72 | replicas: 1 73 | selector: 74 | matchLabels: 75 | app: zookeeper 76 | template: 77 | metadata: 78 | labels: 79 | app: zookeeper 80 | spec: 81 | containers: 82 | - image: wurstmeister/zookeeper 83 | name: zookeeper 84 | ports: 85 | - containerPort: 2181 86 | protocol: TCP 87 | resources: 88 | limits: 89 | cpu: "1" 90 | -----zookeeper service----- 91 | apiVersion: v1 92 | kind: Service 93 | metadata: 94 | name: zookeeper-service 95 | spec: 96 | selector: 97 | app: zookeeper 98 | ports: 99 | - name: zookeeper-port 100 | port: 2181 101 | targetPort: 2181 -------------------------------------------------------------------------------- /deployment/k8s_mongodb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | app: mongo 5 | spec: 6 | serviceName: mongo 7 | template: 8 | metadata: 9 | labels: 10 | app: mongo 11 | spec: 12 | containers: 13 | - name: mongo 14 | image: mongo 15 | ports: 16 | - containerPort: 27017 17 | env: 18 | - name: MONGO_INITDB_ROOT_USERNAME 19 | value: root 20 | - name: MONGO_INITDB_ROOT_PASSWORD 21 | value: wjs123 22 | volumeMounts: 23 | - name: mongo-persistent-storage 24 | mountPath: /data/db 25 | volumeClaimTemplates: 26 | - metadata: 27 | name: mongo-persistent-storage 28 | annotations: 29 | volume.beta.kubernetes.io/storage-class: "prod-huge" 30 | spec: 31 | accessModes: [ "ReadWriteOnce" ] 32 | resources: 33 | requests: 34 | storage: 50Gi 35 | --------------- 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | name: mongo-service 40 | spec: 41 | selector: 42 | app: mongo 43 | type: NodePort 44 | ports: 45 | - name: mongo-port 46 | port: 27017 47 | targetPort: 27017 48 | nodePort: 30052 -------------------------------------------------------------------------------- /deployment/k8s_restheart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: restheart 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: restheart 11 | spec: 12 | containers: 13 | - name: restheart 14 | image: softinstigate/restheart 15 | env: 16 | - name: MONGO_URI 17 | value: "mongodb://u:p@mongo-service.production:27017" 18 | ports: 19 | - name: query 20 | containerPort: 8080 21 | ------- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: restheart 26 | spec: 27 | selector: 28 | app: restheart 29 | type: NodePort 30 | ports: 31 | - name: query 32 | port: 8080 33 | nodePort: 30216 -------------------------------------------------------------------------------- /dockerfile_gitub: -------------------------------------------------------------------------------- 1 | # Build OG from alpine based golang environment 2 | FROM golang:1.12-alpine as builder 3 | 4 | RUN apk add --no-cache make gcc musl-dev linux-headers git 5 | 6 | ENV GOPROXY https://goproxy.io 7 | ENV GO111MODULE on 8 | 9 | WORKDIR /go/src/github.com/annchain/BlockDB 10 | COPY go.mod . 11 | COPY go.sum . 12 | RUN go mod download 13 | 14 | COPY . . 15 | RUN make blockdb 16 | 17 | # Copy OG into basic alpine image 18 | FROM alpine:latest 19 | 20 | RUN apk add --no-cache curl iotop busybox-extras tzdata 21 | 22 | COPY --from=builder /go/src/github.com/annchain/BlockDB/deployment/config.toml /opt/config.toml 23 | COPY --from=builder /go/src/github.com/annchain/BlockDB/build/blockdb /opt/ 24 | 25 | # for a temp running folder. This should be mounted from the outside 26 | RUN mkdir /rw 27 | 28 | EXPOSE 28017 28018 28019 29 | 30 | WORKDIR /opt 31 | 32 | CMD ["./blockdb", "--config", "/opt/config.toml", "--multifile_by_level", "--log_line_number", "--log_dir", "/rw/log/", "--datadir", "/rw/datadir", "run"] -------------------------------------------------------------------------------- /docs/sequence_diagram: -------------------------------------------------------------------------------- 1 | # https://sequencediagram.org/ 2 | 3 | title BlockDB 4 | 5 | activate Application 6 | Application->BlockDB: JSON string 7 | activate BlockDB 8 | BlockDB->AnnchainOG: Archive data with docID 9 | activate AnnchainOG 10 | BlockDB-->Application: docID 11 | deactivate BlockDB 12 | deactivateafter Application 13 | AnnchainOG->AnnchainOG: Consensus 14 | AnnchainOG->BlockDB: Push event with docIDs 15 | deactivate BlockDB 16 | deactivate AnnchainOG 17 | activate BlockDB 18 | opt push model 19 | BlockDB->Application: Push event with docID 20 | end 21 | opt pull model 22 | Application->BlockDB: Pull status of docID 23 | activate Application 24 | BlockDB ->Application: status of docID 25 | deactivateafter Application 26 | end 27 | 28 | 29 | ==Query by MongoDB Query Language== 30 | activate Application 31 | Application->BlockDB: Docs query request 32 | BlockDB->Application: Docs query response 33 | deactivateafter Application 34 | deactivate BlockDB 35 | 36 | ==Query by docID == 37 | activate Application 38 | Application->BlockDB: docID 39 | BlockDB->Application: Docs query response 40 | deactivateafter Application 41 | deactivate BlockDB 42 | -------------------------------------------------------------------------------- /engine/component_interface.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | package engine 15 | 16 | type Component interface { 17 | Start() 18 | Stop() 19 | // Get the component name 20 | Name() string 21 | } 22 | -------------------------------------------------------------------------------- /engine/engine.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | _ "net/http/pprof" 7 | "time" 8 | 9 | "github.com/annchain/BlockDB/backends" 10 | "github.com/annchain/BlockDB/listener" 11 | "github.com/annchain/BlockDB/multiplexer" 12 | "github.com/annchain/BlockDB/ogws" 13 | "github.com/annchain/BlockDB/plugins/client/og" 14 | "github.com/annchain/BlockDB/plugins/server/jsondata" 15 | "github.com/annchain/BlockDB/plugins/server/kafka" 16 | "github.com/annchain/BlockDB/plugins/server/log4j2" 17 | "github.com/annchain/BlockDB/plugins/server/mongodb" 18 | "github.com/annchain/BlockDB/plugins/server/socket" 19 | "github.com/annchain/BlockDB/plugins/server/web" 20 | "github.com/sirupsen/logrus" 21 | "github.com/spf13/viper" 22 | ) 23 | 24 | type Engine struct { 25 | components []Component 26 | } 27 | 28 | func NewEngine() *Engine { 29 | engine := new(Engine) 30 | engine.components = []Component{} 31 | engine.registerComponents() 32 | return engine 33 | } 34 | 35 | func (n *Engine) Start() { 36 | for _, component := range n.components { 37 | logrus.Infof("Starting %s", component.Name()) 38 | component.Start() 39 | logrus.Infof("Started: %s", component.Name()) 40 | 41 | } 42 | if viper.GetBool("debug.enabled") { 43 | port := viper.GetInt("debug.port") 44 | go logrus.Fatal(http.ListenAndServe("localhost:"+fmt.Sprintf("%d", port), nil)) 45 | } 46 | 47 | logrus.Info("BlockDB Engine Started") 48 | } 49 | 50 | func (n *Engine) Stop() { 51 | //status.Stopped = true 52 | for i := len(n.components) - 1; i >= 0; i-- { 53 | comp := n.components[i] 54 | logrus.Infof("Stopping %s", comp.Name()) 55 | comp.Stop() 56 | logrus.Infof("Stopped: %s", comp.Name()) 57 | } 58 | logrus.Info("BlockDB Engine Stopped") 59 | } 60 | 61 | func (n *Engine) registerComponents() { 62 | 63 | var defaultLedgerWriter backends.LedgerWriter 64 | 65 | auditWriter := ogws.NewMongoDBAuditWriter( 66 | viper.GetString("audit.mongodb.connection_string"), 67 | viper.GetString("audit.mongodb.database"), 68 | viper.GetString("audit.mongodb.collection"), 69 | ) 70 | originalDataProcessor := auditWriter.GetOriginalDataProcessor() 71 | 72 | if viper.GetBool("og.enabled") { 73 | url := viper.GetString("og.url") 74 | p := og.NewOgProcessor(og.OgProcessorConfig{LedgerUrl: url, 75 | IdleConnectionTimeout: time.Second * time.Duration(viper.GetInt("og.idle_connection_seconds")), 76 | BufferSize: viper.GetInt("og.buffer_size"), 77 | RetryTimes: viper.GetInt("og.retry_times"), 78 | }, originalDataProcessor) 79 | defaultLedgerWriter = p 80 | n.components = append(n.components, p) 81 | } 82 | 83 | // MongoDB incoming 84 | if viper.GetBool("listener.mongodb.enabled") { 85 | url := viper.GetString("backend.mongodb.url") 86 | if url != "" { 87 | builder := multiplexer.NewDefaultTCPConnectionBuilder(url) 88 | observerFactory := mongodb.NewExtractorFactory(defaultLedgerWriter, &mongodb.ExtractorConfig{ 89 | IgnoreMetaQuery: viper.GetBool("listener.mongodb.ignore_meta_query"), 90 | }) 91 | mp := multiplexer.NewMultiplexer(builder, observerFactory) 92 | l := listener.NewGeneralTCPListener(mp, viper.GetInt("listener.mongodb.incoming_port"), 93 | viper.GetInt("listener.mongodb.incoming_max_connection")) 94 | 95 | n.components = append(n.components, l) 96 | } 97 | } 98 | 99 | if viper.GetBool("listener.log4j2Socket.enabled") { 100 | // Incoming connection handler 101 | p := log4j2.NewLog4j2SocketProcessor( 102 | log4j2.Log4j2SocketProcessorConfig{ 103 | IdleConnectionTimeout: time.Second * time.Duration(viper.GetInt("listener.log4j2Socket.idle_connection_seconds")), 104 | }, 105 | defaultLedgerWriter, 106 | ) 107 | l := listener.NewGeneralTCPListener(p, viper.GetInt("listener.log4j2Socket.incoming_port"), 108 | viper.GetInt("listener.log4j2Socket.incoming_max_connection")) 109 | n.components = append(n.components, l) 110 | } 111 | 112 | if viper.GetBool("listener.jsonSocket.enabled") { 113 | // Incoming connection handler 114 | p := socket.NewSocketProcessor( 115 | socket.SocketConnectionProcessorConfig{ 116 | IdleConnectionTimeout: time.Second * time.Duration(viper.GetInt("listener.jsonSocket.idle_connection_seconds")), 117 | }, 118 | jsondata.NewJsonDataProcessor(jsondata.JsonDataProcessorConfig{}), 119 | defaultLedgerWriter, 120 | ) 121 | l := listener.NewGeneralTCPListener(p, viper.GetInt("listener.jsonSocket.incoming_port"), 122 | viper.GetInt("listener.jsonSocket.incoming_max_connection")) 123 | n.components = append(n.components, l) 124 | } 125 | 126 | if viper.GetBool("listener.kafka.enabled") { 127 | // Incoming connection handler 128 | p := kafka.NewKafkaListener(kafka.KafkaProcessorConfig{ 129 | Topic: viper.GetString("listener.kafka.topic"), 130 | Address: viper.GetString("listener.kafka.address"), 131 | GroupId: viper.GetString("listener.kafka.group_id"), 132 | }, 133 | jsondata.NewJsonDataProcessor(jsondata.JsonDataProcessorConfig{}), 134 | defaultLedgerWriter, 135 | ) 136 | n.components = append(n.components, p) 137 | } 138 | if viper.GetBool("og.wsclient.enabled") { 139 | w := ogws.NewOGWSClient(viper.GetString("og.wsclient.url"), auditWriter) 140 | n.components = append(n.components, w) 141 | } 142 | if viper.GetBool("listener.http.enabled") { 143 | p := web.NewHttpListener(web.HttpListenerConfig{ 144 | Port: viper.GetInt("listener.http.port"), 145 | EnableAudit: viper.GetBool("listener.http.enable_audit"), 146 | EnableHealth: viper.GetBool("listener.http.enable_health"), 147 | MaxContentLength: viper.GetInt64("listener.http.max_content_length"), 148 | }, 149 | jsondata.NewJsonDataProcessor(jsondata.JsonDataProcessorConfig{}), 150 | defaultLedgerWriter, 151 | auditWriter, 152 | ) 153 | n.components = append(n.components, p) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/annchain/BlockDB 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/annchain/commongo v0.0.0-20200727100046-6b7cd76c21f4 7 | github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 // indirect 8 | github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 9 | github.com/gorilla/mux v1.7.4 10 | github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 // indirect 11 | github.com/latifrons/gorews v0.0.0-20190802105101-b718f6088654 12 | github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570 // indirect 13 | github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f 14 | github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042 // indirect 15 | github.com/mitchellh/go-homedir v1.1.0 16 | github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 17 | github.com/segmentio/kafka-go v0.4.1 18 | github.com/sirupsen/logrus v1.6.0 19 | github.com/spf13/cobra v1.0.0 20 | github.com/spf13/viper v1.7.1 21 | github.com/stretchr/objx v0.1.1 // indirect 22 | github.com/tebeka/strftime v0.1.5 // indirect 23 | go.mongodb.org/mongo-driver v1.4.0 24 | ) 25 | -------------------------------------------------------------------------------- /listener/README.md: -------------------------------------------------------------------------------- 1 | # Listeners 2 | 3 | 4 | -------------------------------------------------------------------------------- /listener/listener.go: -------------------------------------------------------------------------------- 1 | package listener 2 | 3 | import ( 4 | "fmt" 5 | "github.com/annchain/BlockDB/processors" 6 | "github.com/sirupsen/logrus" 7 | "net" 8 | ) 9 | 10 | type ProxyListener interface { 11 | Start() 12 | Stop() 13 | } 14 | 15 | type GeneralTCPProxyListener struct { 16 | processor processors.ConnectionProcessor 17 | port int 18 | ln net.Listener 19 | closed bool 20 | maxConnectionSize int 21 | } 22 | 23 | func (l *GeneralTCPProxyListener) Name() string { 24 | return fmt.Sprintf("GeneralTCPProxyListener listening on %d", l.port) 25 | } 26 | 27 | func NewGeneralTCPListener(p processors.ConnectionProcessor, port int, maxConnectionSize int) *GeneralTCPProxyListener { 28 | return &GeneralTCPProxyListener{ 29 | processor: p, 30 | port: port, 31 | maxConnectionSize: maxConnectionSize, 32 | } 33 | } 34 | 35 | func (l *GeneralTCPProxyListener) Start() { 36 | // start all prerequisites first. This is a block action 37 | // Do not return until ready. After this the listener will start to accept connections. 38 | l.processor.Start() 39 | 40 | go func() { 41 | ln, err := net.Listen("tcp", fmt.Sprintf(":%v", l.port)) 42 | if err != nil { 43 | logrus.WithError(err).WithField("port", l.port).Error("error listening on port") 44 | return 45 | } 46 | logrus.WithField("port", l.port).Info("server running on port") 47 | l.ln = ln 48 | // to limit the total number of accepted connections. 49 | maxChan := make(chan bool, l.maxConnectionSize) 50 | 51 | for { 52 | maxChan <- true 53 | conn, err := ln.Accept() 54 | if err != nil { 55 | if l.closed { 56 | break 57 | } 58 | logrus.WithError(err).Error("error accepting connection") 59 | continue 60 | } 61 | 62 | logrus.WithField("remote", conn.RemoteAddr()).Trace("accepted connection ") 63 | go func() { 64 | // release limit 65 | defer func(maxChan chan bool) { <-maxChan }(maxChan) 66 | err := l.processor.ProcessConnection(conn) 67 | if err != nil { 68 | logrus.WithField("conn", conn.RemoteAddr()).WithError(err).Warn("error on connection") 69 | } 70 | }() 71 | } 72 | }() 73 | 74 | } 75 | func (l *GeneralTCPProxyListener) Stop() { 76 | l.closed = true 77 | err := l.ln.Close() 78 | if err != nil { 79 | logrus.WithError(err).Error("error closing connection") 80 | } 81 | l.processor.Stop() 82 | 83 | } 84 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 NAME HERE 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import "github.com/annchain/BlockDB/cmd" 18 | 19 | func main() { 20 | cmd.Execute() 21 | } 22 | -------------------------------------------------------------------------------- /multiplexer/bimap.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import ( 4 | "errors" 5 | "github.com/sirupsen/logrus" 6 | "net" 7 | "sync" 8 | ) 9 | 10 | type BiMapConn struct { 11 | sourceTargetMap map[net.Conn]net.Conn 12 | targetSourceMap map[net.Conn]net.Conn 13 | mapLock sync.RWMutex 14 | } 15 | 16 | func NewBiMapConn() *BiMapConn { 17 | return &BiMapConn{ 18 | sourceTargetMap: make(map[net.Conn]net.Conn), 19 | targetSourceMap: make(map[net.Conn]net.Conn), 20 | } 21 | } 22 | 23 | func (b *BiMapConn) RegisterPair(source net.Conn, target net.Conn) error { 24 | b.mapLock.Lock() 25 | defer b.mapLock.Unlock() 26 | 27 | if _, ok := b.sourceTargetMap[source]; ok { 28 | return errors.New("duplicate source") 29 | } 30 | if _, ok := b.targetSourceMap[target]; ok { 31 | return errors.New("duplicate target") 32 | } 33 | b.sourceTargetMap[source] = target 34 | b.targetSourceMap[target] = source 35 | return nil 36 | } 37 | 38 | func (b *BiMapConn) UnregisterPair(part net.Conn) (counterPart net.Conn) { 39 | b.mapLock.Lock() 40 | defer b.mapLock.Unlock() 41 | 42 | logrus.WithField("part", part.RemoteAddr().String()).Info("unregistering") 43 | 44 | if v, ok := b.sourceTargetMap[part]; ok { 45 | counterPart = v 46 | delete(b.sourceTargetMap, part) 47 | delete(b.targetSourceMap, v) 48 | return 49 | } 50 | if v, ok := b.targetSourceMap[part]; ok { 51 | counterPart = v 52 | delete(b.targetSourceMap, part) 53 | delete(b.sourceTargetMap, v) 54 | return 55 | } 56 | return nil 57 | } 58 | 59 | func (b *BiMapConn) GetCounterPart(part net.Conn) (counterPart net.Conn) { 60 | if v, ok := b.sourceTargetMap[part]; ok { 61 | counterPart = v 62 | return 63 | } 64 | if v, ok := b.targetSourceMap[part]; ok { 65 | counterPart = v 66 | return 67 | } 68 | return nil 69 | } 70 | 71 | func (b *BiMapConn) Size() int { 72 | return len(b.sourceTargetMap) 73 | } 74 | -------------------------------------------------------------------------------- /multiplexer/connection_builder.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import "net" 4 | 5 | type ConnectionBuilder interface { 6 | BuildConnection() (net.Conn, error) 7 | } 8 | type DefaultTCPConnectionBuilder struct { 9 | Url string 10 | } 11 | 12 | func NewDefaultTCPConnectionBuilder(url string) *DefaultTCPConnectionBuilder { 13 | return &DefaultTCPConnectionBuilder{ 14 | Url: url, 15 | } 16 | } 17 | 18 | func (b *DefaultTCPConnectionBuilder) BuildConnection() (net.Conn, error) { 19 | c, err := net.Dial("tcp", b.Url) 20 | return c, err 21 | } 22 | -------------------------------------------------------------------------------- /multiplexer/dialog_context.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import "net" 4 | 5 | type DialogContext struct { 6 | User string 7 | Source net.Conn 8 | Target net.Conn 9 | } 10 | -------------------------------------------------------------------------------- /multiplexer/multiplexer.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import ( 4 | "bufio" 5 | "github.com/sirupsen/logrus" 6 | "io" 7 | "net" 8 | "time" 9 | ) 10 | 11 | type Multiplexer struct { 12 | observerFactory ObserverFactory 13 | targetConnectionBuilder ConnectionBuilder 14 | closed bool 15 | biMapConn *BiMapConn 16 | } 17 | 18 | func NewMultiplexer(targetConnectionBuilder ConnectionBuilder, observerFactory ObserverFactory) *Multiplexer { 19 | return &Multiplexer{ 20 | observerFactory: observerFactory, 21 | targetConnectionBuilder: targetConnectionBuilder, 22 | biMapConn: NewBiMapConn(), 23 | } 24 | } 25 | 26 | func (m *Multiplexer) buildConnection() (target net.Conn, err error) { 27 | target, err = m.targetConnectionBuilder.BuildConnection() 28 | return 29 | } 30 | 31 | func (p *Multiplexer) StartBidirectionalForwarding(context DialogContext) { 32 | logrus.WithField("from", context.Source.RemoteAddr().String()). 33 | WithField("to", context.Target.RemoteAddr().String()). 34 | Info("start multiplexer bidirectional forwarding") 35 | 36 | observer := p.observerFactory.GetInstance(context) 37 | 38 | go p.keepForwarding(context.Source, context.Target, []*bufio.Writer{bufio.NewWriter(observer.GetIncomingWriter())}) 39 | go p.keepForwarding(context.Target, context.Source, []*bufio.Writer{bufio.NewWriter(observer.GetOutgoingWriter())}) 40 | } 41 | 42 | func (m *Multiplexer) keepForwarding(source net.Conn, target net.Conn, observerWriter []*bufio.Writer) { 43 | var buffer = make([]byte, 1024) 44 | reader := bufio.NewReader(source) 45 | writer := bufio.NewWriter(target) 46 | 47 | allWriters := []*bufio.Writer{writer} 48 | allWriters = append(allWriters, observerWriter...) 49 | 50 | for !m.closed { 51 | logrus.Trace("gonna read bytes....") 52 | sizeRead, err := reader.Read(buffer) 53 | logrus.WithField("len", sizeRead).WithError(err).Trace("read bytes") 54 | if err == io.EOF { 55 | logrus.WithField("addr", source.RemoteAddr().String()).Warn("EOF") 56 | _ = m.quitPair(source) 57 | break 58 | } else if err != nil { 59 | logrus.WithField("addr", source.RemoteAddr().String()).WithError(err).Warn("read error") 60 | _ = m.quitPair(source) 61 | break 62 | } 63 | 64 | // forward the message to all writers 65 | // TODO: optimize it so that observer do not block target 66 | for _, writer := range allWriters { 67 | sizeWritten, err := writer.Write(buffer[0:sizeRead]) 68 | _ = writer.Flush() 69 | logrus.WithError(err).WithField("len", sizeWritten).Trace("wrote bytes") 70 | if err != nil { 71 | logrus.WithField("len", sizeWritten).WithError(err).Warn("error on writing") 72 | break 73 | } 74 | } 75 | } 76 | } 77 | 78 | func (m *Multiplexer) ProcessConnection(source net.Conn) (err error) { 79 | // use connection builder to build a target connection 80 | // make pair 81 | // monitor them 82 | logrus.Trace("in") 83 | 84 | // build a writer 85 | target, err := m.buildConnection() 86 | if err != nil { 87 | logrus.WithError(err).Warn("failed to build target connection") 88 | // close the conn 89 | _ = source.Close() 90 | return err 91 | } 92 | // register both connection in the symmetric pool 93 | err = m.biMapConn.RegisterPair(source, target) 94 | if err != nil { 95 | _ = source.Close() 96 | _ = target.Close() 97 | return err 98 | } 99 | // build context to store connection info such as IP, identity, etc 100 | context := DialogContext{ 101 | Source: source, 102 | Target: target, 103 | } 104 | 105 | m.StartBidirectionalForwarding(context) 106 | return nil 107 | } 108 | 109 | func (p *Multiplexer) quitPair(part net.Conn) (err error) { 110 | logrus.Debug("pair quitting") 111 | err = part.Close() 112 | if err != nil { 113 | logrus.WithError(err).Warn("error on closing part") 114 | } 115 | 116 | // find the counter part and close it also. 117 | counterPart := p.biMapConn.UnregisterPair(part) 118 | if counterPart == nil { 119 | // already unregistered by others 120 | return 121 | } 122 | err = counterPart.Close() 123 | if err != nil { 124 | logrus.WithError(err).Warn("error on closing counterpart") 125 | } 126 | 127 | return err 128 | } 129 | 130 | func (m *Multiplexer) Start() { 131 | go func() { 132 | for !m.closed { 133 | logrus.WithField("size", m.biMapConn.Size()).Debug("poolsize") 134 | time.Sleep(time.Second * 60) 135 | } 136 | }() 137 | } 138 | 139 | func (m *Multiplexer) Stop() { 140 | m.closed = true 141 | } 142 | -------------------------------------------------------------------------------- /multiplexer/multiplexer_test.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/sirupsen/logrus" 6 | "net" 7 | "testing" 8 | ) 9 | 10 | func TestPool(t *testing.T) { 11 | 12 | ln, err := net.Listen("tcp", fmt.Sprintf(":%v", 27017)) 13 | if err != nil { 14 | logrus.WithError(err).WithField("port", 5656).Error("error listening on port") 15 | return 16 | } 17 | 18 | builder := NewDefaultTCPConnectionBuilder("172.28.152.101:27017") 19 | observer := NewDumper("req", "resp") 20 | 21 | multiplexer := NewMultiplexer(builder, observer) 22 | 23 | for { 24 | conn, err := ln.Accept() 25 | logrus.WithField("conn", conn.RemoteAddr()).Info("Accepted") 26 | if err != nil { 27 | logrus.WithError(err).Error("error accepting connection") 28 | return 29 | } 30 | go func() { 31 | // release limit 32 | err := multiplexer.ProcessConnection(conn) 33 | if err != nil { 34 | logrus.WithField("conn", conn.RemoteAddr()).WithError(err).Warn("error on connection") 35 | } 36 | multiplexer.Start() 37 | }() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /multiplexer/observer.go: -------------------------------------------------------------------------------- 1 | package multiplexer 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | ) 8 | 9 | type ObserverFactory interface { 10 | GetInstance(context DialogContext) Observer 11 | } 12 | 13 | type Observer interface { 14 | GetIncomingWriter() io.Writer 15 | GetOutgoingWriter() io.Writer 16 | } 17 | 18 | type ByteDumper struct { 19 | Name string 20 | } 21 | 22 | func (b *ByteDumper) Write(p []byte) (n int, err error) { 23 | fmt.Println(b.Name) 24 | fmt.Println(hex.Dump(p)) 25 | return len(p), nil 26 | } 27 | 28 | type Dumper struct { 29 | incoming io.Writer 30 | outgoing io.Writer 31 | } 32 | 33 | func NewDumper(incomingName string, outgoingName string) *Dumper { 34 | return &Dumper{ 35 | incoming: &ByteDumper{incomingName}, 36 | outgoing: &ByteDumper{outgoingName}, 37 | } 38 | } 39 | func (d *Dumper) GetIncomingWriter() io.Writer { 40 | return d.incoming 41 | } 42 | 43 | func (d *Dumper) GetOutgoingWriter() io.Writer { 44 | return d.outgoing 45 | } 46 | -------------------------------------------------------------------------------- /mylog/loggers.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Annchain Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | package mylog 15 | 16 | import ( 17 | "fmt" 18 | "github.com/lestrrat/go-file-rotatelogs" 19 | "github.com/sirupsen/logrus" 20 | "io" 21 | "os" 22 | "path" 23 | "path/filepath" 24 | "time" 25 | ) 26 | 27 | func panicIfError(err error, message string) { 28 | if err != nil { 29 | fmt.Println(message) 30 | fmt.Println(err.Error()) 31 | os.Exit(1) 32 | } 33 | } 34 | 35 | func RotateLog(abspath string) *rotatelogs.RotateLogs { 36 | logFile, err := rotatelogs.New( 37 | abspath+"%Y%m%d%H%M.log", 38 | rotatelogs.WithLinkName(abspath+".log"), 39 | rotatelogs.WithMaxAge(24*time.Hour*7), 40 | rotatelogs.WithRotationTime(time.Hour*24), 41 | ) 42 | panicIfError(err, "err init log") 43 | return logFile 44 | } 45 | 46 | func InitLogger(logger *logrus.Logger, logdir string, outputFile string) *logrus.Logger { 47 | var writer io.Writer 48 | if logdir != "" { 49 | folderPath, err := filepath.Abs(logdir) 50 | panicIfError(err, fmt.Sprintf("Error on parsing log path: %s", logdir)) 51 | 52 | abspath, err := filepath.Abs(path.Join(logdir, outputFile)) 53 | panicIfError(err, fmt.Sprintf("Error on parsing log file path: %s", logdir)) 54 | 55 | err = os.MkdirAll(folderPath, os.ModePerm) 56 | panicIfError(err, fmt.Sprintf("Error on creating log dir: %s", folderPath)) 57 | 58 | logrus.WithField("path", abspath).Info("Additional logger") 59 | //logFile, err := os.OpenFile(abspath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) 60 | panicIfError(err, fmt.Sprintf("Error on creating log file: %s", abspath)) 61 | //write a message to just one files 62 | 63 | writer = io.MultiWriter(logger.Out, RotateLog(abspath)) 64 | } else { 65 | writer = logger.Out 66 | } 67 | return &logrus.Logger{ 68 | Level: logger.Level, 69 | Formatter: logger.Formatter, 70 | Out: writer, 71 | Hooks: logger.Hooks, 72 | ReportCaller: logger.ReportCaller, 73 | ExitFunc: logger.ExitFunc, 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /ogws/audit_reader.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "time" 7 | 8 | bson2 "github.com/globalsign/mgo/bson" 9 | "github.com/sirupsen/logrus" 10 | "go.mongodb.org/mongo-driver/bson" 11 | "go.mongodb.org/mongo-driver/bson/primitive" 12 | "go.mongodb.org/mongo-driver/mongo/options" 13 | ) 14 | 15 | type RawData struct { 16 | Id primitive.ObjectID `json:"id" bson:"_id,omitempty"` 17 | AuditEvent 18 | } 19 | 20 | func (l *MongoDBAuditWriter) Query(filter bson.M, limit, skip int64) (resp []RawData, count int64, err error) { 21 | ctx, _ := context.WithTimeout(context.Background(), 8*time.Second) 22 | if logrus.GetLevel() > logrus.DebugLevel { 23 | logData, _ := json.Marshal(&filter) 24 | logrus.WithField("filter", string(logData)).Trace("query filter") 25 | } 26 | count, err = l.coll.CountDocuments(ctx, filter) 27 | if err != nil { 28 | logrus.WithError(err).Error("count collection: %s error", l.coll.Name()) 29 | return 30 | } 31 | cur, err := l.coll.Find(ctx, filter, &options.FindOptions{Limit: &limit, Skip: &skip, Sort: bson.M{"_id": -1}}) 32 | if err != nil { 33 | logrus.WithError(err).Errorf("find from collection: %s error", l.coll.Name()) 34 | return 35 | } 36 | defer cur.Close(ctx) 37 | for cur.Next(ctx) { 38 | var o RawData 39 | var event AuditEvent 40 | val := cur.Current.Lookup("_id") 41 | var id primitive.ObjectID 42 | err := val.Unmarshal(&id) 43 | if err != nil { 44 | logrus.WithError(err).WithField("val", val).Error("decode id failed") 45 | continue 46 | } 47 | o.Id = id 48 | err = bson2.Unmarshal(cur.Current, &event) 49 | if err != nil { 50 | logrus.WithError(err).Error("decode failed") 51 | continue 52 | } 53 | o.AuditEvent = event 54 | resp = append(resp, o) 55 | } 56 | err = cur.Err() 57 | if err != nil { 58 | logrus.WithError(err).Error("read err") 59 | return 60 | } 61 | return 62 | } 63 | -------------------------------------------------------------------------------- /ogws/audit_write_test.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | "time" 7 | 8 | bson2 "github.com/globalsign/mgo/bson" 9 | "go.mongodb.org/mongo-driver/bson" 10 | ) 11 | 12 | type InnerData struct { 13 | Type string `json:"type"` 14 | Person Person `json:"person"` 15 | Loc string `json:"loc"` 16 | } 17 | 18 | type Person struct { 19 | Age int 20 | Name string 21 | } 22 | 23 | func TestWriter(t *testing.T) { 24 | var o = &AuditEvent{} 25 | o.AccountNonce = 23 26 | o.Hash = "hash" 27 | o.Height = 34 28 | o.MineNonce = 21 29 | o.Data = &AuditEventDetail{ 30 | Ip: "1.3.4.5", 31 | Identity: "23455", 32 | PrimaryKey: "haah", 33 | Timestamp: time.Now().Format(time.RFC3339), 34 | Data: InnerData{Person: Person{ 35 | Age: 0, 36 | Name: "", 37 | }, 38 | Type: "haha", 39 | Loc: "shanghai", 40 | }, 41 | } 42 | bytes, err := bson.Marshal(o) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | t.Log(string(bytes)) 47 | var e = &AuditEvent{} 48 | err = bson.Unmarshal(bytes, e) 49 | out, err := json.MarshalIndent(e, "", "\t") 50 | t.Log(string(out)) 51 | 52 | bytes, err = bson2.Marshal(o) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | t.Log(string(bytes)) 57 | e = &AuditEvent{} 58 | err = bson2.Unmarshal(bytes, e) 59 | out, err = json.MarshalIndent(e, "", "\t") 60 | t.Log(string(out)) 61 | } 62 | -------------------------------------------------------------------------------- /ogws/audit_writer.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/sirupsen/logrus" 8 | "go.mongodb.org/mongo-driver/bson" 9 | "go.mongodb.org/mongo-driver/mongo" 10 | "go.mongodb.org/mongo-driver/mongo/options" 11 | "go.mongodb.org/mongo-driver/mongo/readpref" 12 | ) 13 | 14 | // AuditWriter is the OG 15 | type AuditWriter interface { 16 | // receive OG event and write it to the backend storage 17 | WriteOGMessage(o *AuditEvent) error 18 | GetCollection() *mongo.Collection 19 | GetOriginalDataProcessor() OriginalDataProcessor 20 | Query(f bson.M, limit, offset int64) ([]RawData, int64, error) 21 | } 22 | 23 | type MongoDBAuditWriter struct { 24 | connectionString string 25 | coll *mongo.Collection 26 | originalDataProcessor OriginalDataProcessor 27 | } 28 | 29 | func (m *MongoDBAuditWriter) WriteOGMessage(o *AuditEvent) error { 30 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 31 | bytes, err := bson.Marshal(o) 32 | if err != nil { 33 | return err 34 | } 35 | _, err = m.coll.InsertOne(ctx, bytes) 36 | if err != nil { 37 | return err 38 | } 39 | return nil 40 | } 41 | 42 | func NewMongoDBAuditWriter(connectionString string, database string, collection string) *MongoDBAuditWriter { 43 | m := &MongoDBAuditWriter{connectionString: connectionString} 44 | ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) 45 | client, err := mongo.Connect(ctx, options.Client().ApplyURI(m.connectionString)) 46 | if err != nil { 47 | logrus.WithError(err).Fatal("failed to connect to audit mongo") 48 | } 49 | coll := client.Database(database).Collection(collection) 50 | m.coll = coll 51 | m.originalDataProcessor = &originalDataProcessor{ 52 | coll: client.Database(database).Collection(collection + "_original_data_"), 53 | } 54 | ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) 55 | err = client.Ping(ctx, readpref.Primary()) 56 | if err != nil { 57 | logrus.WithError(err).Error("ping mongo err,will panic") 58 | panic(err) 59 | } 60 | m.createUsersIndex(m.coll) 61 | m.createUsersIndex(m.originalDataProcessor.GetCollection()) 62 | return m 63 | } 64 | 65 | func (m *MongoDBAuditWriter) createUsersIndex(coll *mongo.Collection) { 66 | unique := true 67 | ctx, _ := context.WithTimeout(context.Background(), time.Second*10) 68 | _, err := coll.Indexes().CreateMany(ctx, []mongo.IndexModel{ 69 | { 70 | Keys: bson.M{"hash": 1}, 71 | Options: &options.IndexOptions{Unique: &unique}}, 72 | }) 73 | if err != nil { 74 | logrus.WithError(err).Warn("create index error") 75 | } 76 | } 77 | 78 | func (m *MongoDBAuditWriter) GetCollection() *mongo.Collection { 79 | return m.coll 80 | } 81 | 82 | func (m *MongoDBAuditWriter) GetOriginalDataProcessor() OriginalDataProcessor { 83 | return m.originalDataProcessor 84 | } 85 | -------------------------------------------------------------------------------- /ogws/client.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "net/http" 7 | "net/url" 8 | "time" 9 | 10 | "github.com/annchain/BlockDB/processors" 11 | "github.com/latifrons/gorews" 12 | "github.com/sirupsen/logrus" 13 | "github.com/spf13/viper" 14 | ) 15 | 16 | type OGWSClient struct { 17 | url *url.URL 18 | auditWriter AuditWriter 19 | client *gorews.GorewsClient 20 | OGHeight int 21 | } 22 | 23 | func (o *OGWSClient) SetHeight(height int) { 24 | o.OGHeight = height 25 | } 26 | 27 | func NewOGWSClient(ustr string, auditWriter AuditWriter) *OGWSClient { 28 | // connect to ws server 29 | u, err := url.Parse(ustr) 30 | if err != nil { 31 | logrus.WithField("url", viper.GetString("og.wsclient.url")).Fatal("cannot parse ogws client") 32 | } 33 | 34 | return &OGWSClient{ 35 | url: u, 36 | auditWriter: auditWriter, 37 | } 38 | } 39 | 40 | func (o *OGWSClient) Start() { 41 | 42 | logrus.WithField("url", o.url).Info("connecting to ws") 43 | 44 | o.client = gorews.NewGorewsClient() 45 | var headers http.Header 46 | err := o.client.Start(o.url.String(), headers, time.Second*5, time.Second*5, time.Second*5) 47 | if err != nil { 48 | logrus.WithError(err).Fatal("init ws client") 49 | } 50 | 51 | logrus.WithField("url", o.url).Info("connected to ws") 52 | 53 | o.client.Outgoing <- []byte("{\"event\":\"new_tx\"}") 54 | 55 | go func() { 56 | for { 57 | msg := <-o.client.Incoming 58 | _, err := o.handleMessage(msg) 59 | if err != nil { 60 | logrus.WithError(err).Warn("failed to handle message: " + string(msg)) 61 | } 62 | } 63 | }() 64 | } 65 | 66 | func (o *OGWSClient) Stop() { 67 | o.client.Stop() 68 | } 69 | 70 | func (OGWSClient) Name() string { 71 | return "OGWSClient" 72 | } 73 | 74 | const ( 75 | TxTypeSequencer = 1 76 | TxTypeArchive = 4 77 | ) 78 | 79 | func (o *OGWSClient) handleMessage(bytes []byte) (result OGMessageList, err error) { 80 | var ogmss OGMessageList 81 | err = json.Unmarshal(bytes, &ogmss) 82 | if err != nil { 83 | return 84 | } 85 | for _, ogms := range ogmss.Nodes { 86 | switch ogms.Type { 87 | case TxTypeSequencer: 88 | o.SetHeight(ogms.Height) 89 | logrus.Info("height is updated") 90 | case TxTypeArchive: 91 | // base64 decode 92 | dataBytes, err := base64.StdEncoding.DecodeString(ogms.DataBase64) 93 | if err != nil { 94 | logrus.WithError(err).Warn("failed to decode base64 string. Skip this event.") 95 | continue 96 | } 97 | if o.OGHeight < ogms.Height-2 { 98 | o.SetHeight(ogms.Height) 99 | } 100 | 101 | var logEvent processors.LogEvent 102 | err = json.Unmarshal(dataBytes, &logEvent) 103 | if err != nil { 104 | logrus.WithError(err).Warn("failed to decode logEvent. Skip this event.") 105 | continue 106 | } 107 | 108 | auditEventDetail := FromLogEvent(&logEvent) 109 | 110 | auditEvent := &AuditEvent{ 111 | Signature: ogms.Signature, 112 | Type: ogms.Type, 113 | PublicKey: ogms.PublicKey, 114 | AccountNonce: ogms.AccountNonce, 115 | Hash: ogms.Hash, 116 | Height: ogms.Height, 117 | MineNonce: ogms.MineNonce, 118 | ParentsHash: ogms.ParentsHash, 119 | Version: ogms.Version, 120 | Weight: ogms.Weight, 121 | Data: &auditEventDetail, 122 | } 123 | err = o.auditWriter.WriteOGMessage(auditEvent) 124 | if err != nil { 125 | logrus.WithError(err).Warn("failed to write ledger.") 126 | continue 127 | } 128 | o.auditWriter.GetOriginalDataProcessor().DeleteOne(ogms.Hash) 129 | logrus.WithField("event timestamp", auditEvent.Height).Debug("wrote audit event") 130 | 131 | default: 132 | continue 133 | } 134 | } 135 | return 136 | } 137 | -------------------------------------------------------------------------------- /ogws/model.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | "time" 6 | ) 7 | 8 | type OGMessageList struct { 9 | Nodes []OGMessage `json:"nodes"` 10 | } 11 | 12 | // received from OG 13 | type OGMessage struct { 14 | Type int `json:"type"` 15 | Hash string `json:"hash"` 16 | ParentsHash []string `json:"parents_hash"` 17 | AccountNonce int `json:"account_nonce"` 18 | Height int `json:"height"` 19 | PublicKey string `json:"public_key"` 20 | Signature string `json:"signature"` 21 | MineNonce int `json:"mine_nonce"` 22 | Weight int `json:"weight"` 23 | Version int `json:"version"` 24 | DataBase64 string `json:"data"` 25 | } 26 | 27 | // push to MongoDB 28 | type AuditEvent struct { 29 | Type int `json:"type"` 30 | Hash string `json:"hash"` 31 | ParentsHash []string `json:"parents_hash"` 32 | AccountNonce int `json:"account_nonce"` 33 | Height int `json:"height"` 34 | PublicKey string `json:"public_key"` 35 | Signature string `json:"signature"` 36 | MineNonce int `json:"mine_nonce"` 37 | Weight int `json:"weight"` 38 | Version int `json:"version"` 39 | Data *AuditEventDetail `json:"data"` 40 | } 41 | 42 | type AuditEventDetail struct { 43 | Identity string `json:"identity"` 44 | Type string `json:"type"` 45 | Ip string `json:"ip"` 46 | PrimaryKey string `json:"primary_key"` 47 | Timestamp string `json:"timestamp"` 48 | Data interface{} `json:"data"` 49 | Before string `json:"before"` 50 | After string `json:"after"` 51 | } 52 | 53 | func FromLogEvent(l *processors.LogEvent) (a AuditEventDetail) { 54 | strt := time.Unix(0, l.Timestamp*int64(1000000)) 55 | loc, err := time.LoadLocation("Local") 56 | if err != nil { 57 | panic(err) 58 | } 59 | strt = strt.In(loc) 60 | 61 | a = AuditEventDetail{ 62 | Type: l.Type, 63 | Data: l.Data, 64 | PrimaryKey: l.PrimaryKey, 65 | Ip: l.Ip, 66 | Identity: l.Identity, 67 | After: l.After, 68 | Before: l.Before, 69 | Timestamp: strt.Format("2006-01-02 15:04:05"), 70 | } 71 | return 72 | } 73 | -------------------------------------------------------------------------------- /ogws/original_data.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "time" 7 | 8 | bson2 "github.com/globalsign/mgo/bson" 9 | "github.com/sirupsen/logrus" 10 | "go.mongodb.org/mongo-driver/bson" 11 | "go.mongodb.org/mongo-driver/bson/primitive" 12 | "go.mongodb.org/mongo-driver/mongo" 13 | "go.mongodb.org/mongo-driver/mongo/options" 14 | ) 15 | 16 | const UpdateTimeFormat = "2006-01-02T15:04:05.999Z07:00" 17 | 18 | type OriginalDataProcessor interface { 19 | DeleteOne(hash string) error 20 | DeleteMany(hashes []string) error 21 | InsertOne(hash string, data interface{}) error 22 | UpdateHash(Id primitive.ObjectID, hash string) error 23 | GetExpired(duration time.Duration, limit, offset int64) ([]OriginalData, int64, error) 24 | Query(f bson.M, limit, offset int64) ([]OriginalData, int64, error) 25 | GetCollection() *mongo.Collection 26 | } 27 | 28 | type originalDataProcessor struct { 29 | coll *mongo.Collection 30 | } 31 | 32 | type OriginalRawData struct { 33 | Data interface{} `json:"data" bson:"data"` 34 | Hash string `json:"hash" bson:"hash"` 35 | UpdateTime string `json:"update_time" bson:"update_time"` 36 | } 37 | 38 | type OriginalData struct { 39 | Id primitive.ObjectID `json:"id" bson:"_id,omitempty"` 40 | OriginalRawData 41 | } 42 | 43 | func (o *originalDataProcessor) DeleteOne(hash string) error { 44 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 45 | f := bson.M{"hash": hash} 46 | _, err := o.coll.DeleteOne(ctx, f) 47 | if err != nil { 48 | return err 49 | } 50 | return nil 51 | } 52 | 53 | func (o *originalDataProcessor) DeleteMany(hashes []string) error { 54 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 55 | f := bson.M{"hash": bson.M{"$in": hashes}} 56 | _, err := o.coll.DeleteMany(ctx, f) 57 | if err != nil { 58 | return err 59 | } 60 | return nil 61 | } 62 | 63 | func (o *originalDataProcessor) InsertOne(hash string, data interface{}) error { 64 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 65 | od := &OriginalRawData{ 66 | Data: data, 67 | Hash: hash, 68 | UpdateTime: time.Now().Format(UpdateTimeFormat), 69 | } 70 | bytes, err := bson.Marshal(od) 71 | if err != nil { 72 | return err 73 | } 74 | _, err = o.coll.InsertOne(ctx, bytes) 75 | if err != nil { 76 | return err 77 | } 78 | return nil 79 | } 80 | 81 | func (o *originalDataProcessor) UpdateHash(Id primitive.ObjectID, hash string) error { 82 | ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) 83 | f := bson.M{"_id": Id} 84 | _, err := o.coll.UpdateOne(ctx, f, bson.M{"hash": hash}) 85 | if err != nil { 86 | return err 87 | } 88 | return nil 89 | } 90 | 91 | func (o *originalDataProcessor) GetExpired(duration time.Duration, limit, offset int64) (resp []OriginalData, count int64, err error) { 92 | timeFilter := time.Now().Add(-duration).Format(UpdateTimeFormat) 93 | filter := bson.M{"update_time": bson.M{"$lt": timeFilter}} 94 | return o.Query(filter, limit, offset) 95 | } 96 | 97 | func (l *originalDataProcessor) Query(filter bson.M, limit, skip int64) (resp []OriginalData, count int64, err error) { 98 | ctx, _ := context.WithTimeout(context.Background(), 8*time.Second) 99 | if logrus.GetLevel() > logrus.DebugLevel { 100 | logData, _ := json.Marshal(&filter) 101 | logrus.WithField("filter", string(logData)).Trace("query filter") 102 | } 103 | count, err = l.coll.CountDocuments(ctx, filter) 104 | if err != nil { 105 | return 106 | } 107 | cur, err := l.coll.Find(ctx, filter, &options.FindOptions{Limit: &limit, Skip: &skip, Sort: bson.M{"_id": -1}}) 108 | if err != nil { 109 | return 110 | } 111 | defer cur.Close(ctx) 112 | for cur.Next(ctx) { 113 | var o OriginalData 114 | var rawData OriginalRawData 115 | val := cur.Current.Lookup("_id") 116 | var id primitive.ObjectID 117 | err := val.Unmarshal(&id) 118 | if err != nil { 119 | logrus.WithError(err).WithField("val", val).Error("decode id failed") 120 | continue 121 | } 122 | o.Id = id 123 | err = bson2.Unmarshal(cur.Current, &rawData) 124 | if err != nil { 125 | logrus.WithError(err).Error("decode failed") 126 | continue 127 | } 128 | o.OriginalRawData = rawData 129 | resp = append(resp, o) 130 | } 131 | err = cur.Err() 132 | if err != nil { 133 | logrus.WithError(err).Error("read err") 134 | return 135 | } 136 | return 137 | } 138 | 139 | func (m *originalDataProcessor) GetCollection() *mongo.Collection { 140 | return m.coll 141 | } 142 | -------------------------------------------------------------------------------- /ogws/timezone_test.go: -------------------------------------------------------------------------------- 1 | package ogws 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestTimeZone(t *testing.T) { 10 | l := int64(1564453129000) 11 | strt := time.Unix(0, l*int64(1000000)) 12 | loc, err := time.LoadLocation("Local") 13 | if err != nil { 14 | panic(err) 15 | } 16 | strt = strt.In(loc) 17 | fmt.Println(strt.Format("2006-01-02 15:04:05")) 18 | } 19 | -------------------------------------------------------------------------------- /plugins/client/og/fetch_data.go: -------------------------------------------------------------------------------- 1 | package og 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | const checkInterval = time.Second*5 10 | 11 | func (o *OgProcessor) fetchData() { 12 | 13 | pingTicker := time.NewTicker(checkInterval) 14 | defer pingTicker.Stop() 15 | outside: 16 | for { 17 | select { 18 | case <-pingTicker.C: 19 | limit:= int64(100) 20 | offset:= int64(0) 21 | count :=int64(10) 22 | for offset 0 { 32 | s = fmt.Sprintf("%f + i%f", r, i) 33 | } else { 34 | s = fmt.Sprintf("%f - i%f", real(c), -i) 35 | } 36 | return json.Marshal(&s) 37 | } 38 | 39 | type testObjectData struct { 40 | H []byte 41 | K uint32 42 | L string 43 | } 44 | 45 | func TestNewOgProcessor(t *testing.T) { 46 | logrus.SetLevel(logrus.TraceLevel) 47 | p := NewOgProcessor(OgProcessorConfig{LedgerUrl: "http://172.28.152.101:8040//new_archive", RetryTimes: 3, BufferSize: 15}) 48 | p.Start() 49 | defer p.Stop() 50 | p.EnqueueSendToLedger("this is a message") 51 | data := gettestData() 52 | p.EnqueueSendToLedger(data) 53 | time.Sleep(time.Second) 54 | } 55 | 56 | func gettestData() *testData { 57 | data := testData{ 58 | A: 45566, 59 | B: "what is this ? a message ?, test message", 60 | c: 56.78, 61 | D: complex(34.566, 78.9023), 62 | F: testObjectData{ 63 | H: []byte{0x04, 0x05, 0x06, 0x07, 0x08, 0x09}, 64 | K: 67, 65 | L: "this this a string of test message", 66 | }, 67 | } 68 | return &data 69 | } 70 | 71 | func TestBatch(t *testing.T) { 72 | logrus.SetLevel(logrus.WarnLevel) 73 | data := gettestData() 74 | p := NewOgProcessor(OgProcessorConfig{LedgerUrl: "http://172.28.152.101:8000//new_archive", BufferSize: 100, RetryTimes: 3}) 75 | p.Start() 76 | defer p.Stop() 77 | for { 78 | select { 79 | case <-time.After(20 * time.Microsecond): 80 | go p.EnqueueSendToLedger(data) 81 | } 82 | 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /plugins/server/jsondata/json_data_processor.go: -------------------------------------------------------------------------------- 1 | package jsondata 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/annchain/BlockDB/processors" 6 | "github.com/sirupsen/logrus" 7 | "time" 8 | ) 9 | 10 | type JsonDataProcessorConfig struct { 11 | } 12 | 13 | type JsonDataProcessor struct { 14 | config JsonDataProcessorConfig 15 | } 16 | 17 | func NewJsonDataProcessor(config JsonDataProcessorConfig) *JsonDataProcessor { 18 | return &JsonDataProcessor{ 19 | config: config, 20 | } 21 | } 22 | 23 | func (m *JsonDataProcessor) Start() { 24 | logrus.Info("JsonDataProcessor started") 25 | } 26 | 27 | func (m *JsonDataProcessor) Stop() { 28 | logrus.Info("JsonDataProcessor stopped") 29 | } 30 | 31 | func (m *JsonDataProcessor) ParseCommand(bytes []byte) (events []*processors.LogEvent, err error) { 32 | var c processors.LogEvent 33 | if err := json.Unmarshal(bytes, &c); err != nil { 34 | logrus.WithError(err).Warn("bad format") 35 | return nil, err 36 | } 37 | 38 | if c.Type == "" { 39 | c.Type = "json" 40 | } 41 | if c.Timestamp == 0 { 42 | c.Timestamp = time.Now().UnixNano() / 1e6 43 | } 44 | return []*processors.LogEvent{&c}, nil 45 | 46 | } 47 | -------------------------------------------------------------------------------- /plugins/server/kafka/listener.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/annchain/BlockDB/backends" 10 | "github.com/annchain/BlockDB/processors" 11 | "github.com/segmentio/kafka-go" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | type KafkaProcessorConfig struct { 16 | Topic string 17 | Address string 18 | GroupId string 19 | } 20 | 21 | type KafkaListener struct { 22 | config KafkaProcessorConfig 23 | ledgerWriter backends.LedgerWriter 24 | dataProcessor processors.DataProcessor 25 | 26 | wg sync.WaitGroup 27 | stopped bool 28 | } 29 | 30 | func (k *KafkaListener) Name() string { 31 | return "KafkaListener" 32 | } 33 | 34 | func NewKafkaListener(config KafkaProcessorConfig, dataProcessor processors.DataProcessor, ledgerWriter backends.LedgerWriter) *KafkaListener { 35 | return &KafkaListener{ 36 | config: config, 37 | ledgerWriter: ledgerWriter, 38 | dataProcessor: dataProcessor, 39 | } 40 | } 41 | 42 | func (k *KafkaListener) Start() { 43 | if k.config.GroupId == "" { 44 | ps, _ := kafka.LookupPartitions(context.Background(), "tcp", k.config.Address, k.config.Topic) 45 | 46 | //currently we will listen to all partitions 47 | for _, p := range ps { 48 | k.wg.Add(1) 49 | go k.doListen(p.ID) 50 | } 51 | } else { 52 | k.wg.Add(1) 53 | go k.doListen(0) 54 | } 55 | logrus.Info("KafkaListener started") 56 | } 57 | 58 | func (k *KafkaListener) Stop() { 59 | k.stopped = true 60 | k.wg.Wait() 61 | logrus.Info("KafkaListener stopped") 62 | } 63 | 64 | func (k *KafkaListener) doListen(partitionId int) { 65 | brokers := strings.Split(k.config.Address, ";") 66 | r := kafka.NewReader(kafka.ReaderConfig{ 67 | Brokers: brokers, 68 | Topic: k.config.Topic, 69 | Partition: partitionId, 70 | MinBytes: 1, // 1B 71 | MaxBytes: 10e6, // 10MB, 72 | GroupID: k.config.GroupId, 73 | }) 74 | defer func() { 75 | _ = r.Close() 76 | k.wg.Done() 77 | }() 78 | if k.config.GroupId == "" { 79 | deadlineContext, _ := context.WithDeadline(context.Background(), time.Now().Add(time.Second*3)) 80 | err := r.SetOffsetAt(deadlineContext, time.Now()) 81 | if err != nil { 82 | logrus.WithError(err).Error("cannot set offset to partition") 83 | return 84 | } 85 | } 86 | logrus.WithField("brokers", brokers).WithField("groupid", k.config.GroupId).WithField("partition", partitionId).WithField("topic", k.config.Topic).Info("kafka consumer started") 87 | 88 | for !k.stopped { 89 | m, err := r.ReadMessage(context.Background()) 90 | if err != nil { 91 | logrus.WithError(err).WithField("partition", partitionId).Error("read msg error") 92 | time.Sleep(time.Second * 1) 93 | continue 94 | } 95 | s := string(m.Value) 96 | logrus.WithFields(logrus.Fields{ 97 | "partition": m.Partition, 98 | "offset": m.Offset, 99 | "msg": s, 100 | }).Info("message") 101 | 102 | events, err := k.dataProcessor.ParseCommand(m.Value) 103 | for _, event := range events { 104 | err = k.ledgerWriter.EnqueueSendToLedger(event) 105 | if err != nil { 106 | logrus.WithError(err).Warn("send to ledger err") 107 | } 108 | } 109 | } 110 | 111 | } 112 | -------------------------------------------------------------------------------- /plugins/server/kafka/listener_test.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | "time" 8 | 9 | "github.com/annchain/BlockDB/plugins/server/jsondata" 10 | "github.com/spf13/viper" 11 | ) 12 | 13 | func TestListener(t *testing.T) { 14 | 15 | f, _ := os.Open(`D:\ws\gitpublic\annchain\BlockDB\config.toml`) 16 | defer f.Close() 17 | viper.SetConfigType("toml") 18 | 19 | viper.ReadConfig(f) 20 | viper.Get("listener.kafka.address") 21 | viper.Debug() 22 | 23 | config := KafkaProcessorConfig{ 24 | Topic: viper.GetString("listener.kafka.topic"), 25 | Address: viper.GetString("listener.kafka.address"), 26 | GroupId: viper.GetString("listener.kafka.group_id"), 27 | } 28 | fmt.Println(config) 29 | l := NewKafkaListener(config, &jsondata.JsonDataProcessor{}, &ledgerSender{}) 30 | l.Start() 31 | 32 | for true { 33 | time.Sleep(time.Second) 34 | } 35 | } 36 | 37 | type ledgerSender struct { 38 | } 39 | 40 | func (l *ledgerSender) EnqueueSendToLedger(data interface{}) { 41 | fmt.Println(data) 42 | } 43 | -------------------------------------------------------------------------------- /plugins/server/log4j2/model.go: -------------------------------------------------------------------------------- 1 | package log4j2 2 | 3 | type Log4j2SocketEventInstant struct { 4 | Timestamp int64 `json:"epochSecond"` 5 | } 6 | 7 | type Log4j2SocketEvent struct { 8 | LoggerName string `json:"loggerName"` 9 | Message string `json:"message"` 10 | Instant Log4j2SocketEventInstant `json:"instant"` 11 | ContextMap map[string]interface{} `json:"contextMap"` 12 | } 13 | -------------------------------------------------------------------------------- /plugins/server/log4j2/processor.go: -------------------------------------------------------------------------------- 1 | package log4j2 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "github.com/annchain/BlockDB/backends" 7 | "github.com/annchain/BlockDB/processors" 8 | "github.com/sirupsen/logrus" 9 | "io" 10 | "net" 11 | "time" 12 | ) 13 | 14 | type Log4j2SocketProcessorConfig struct { 15 | IdleConnectionTimeout time.Duration 16 | } 17 | 18 | type Log4j2SocketProcessor struct { 19 | config Log4j2SocketProcessorConfig 20 | ledgerWriter backends.LedgerWriter 21 | } 22 | 23 | func NewLog4j2SocketProcessor(config Log4j2SocketProcessorConfig, ledgerWriter backends.LedgerWriter) *Log4j2SocketProcessor { 24 | return &Log4j2SocketProcessor{ 25 | config: config, 26 | ledgerWriter: ledgerWriter, 27 | } 28 | } 29 | 30 | func (m *Log4j2SocketProcessor) Start() { 31 | logrus.Info("Log4j2SocketProcessor started") 32 | } 33 | 34 | func (m *Log4j2SocketProcessor) Stop() { 35 | logrus.Info("Log4j2SocketProcessor stopped") 36 | } 37 | 38 | func (m *Log4j2SocketProcessor) ProcessConnection(conn net.Conn) error { 39 | reader := bufio.NewReader(conn) 40 | for { 41 | conn.SetReadDeadline(time.Now().Add(m.config.IdleConnectionTimeout)) 42 | str, err := reader.ReadString(byte(0)) 43 | if err != nil { 44 | if err == io.EOF { 45 | logrus.Info("target closed") 46 | return nil 47 | } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 48 | logrus.Info("target timeout") 49 | conn.Close() 50 | return nil 51 | } 52 | return err 53 | } 54 | str = str[:len(str)-1] 55 | // query command 56 | //fmt.Println(str) 57 | //fmt.Println(hex.Dump(bytes)) 58 | event := m.ParseCommand([]byte(str)) 59 | if event == nil { 60 | logrus.WithError(err).Warn("nil command") 61 | continue 62 | } 63 | event.Ip = conn.RemoteAddr().String() 64 | //fmt.Printf("%+v\n", event) 65 | 66 | // store it to blockchain 67 | //bytes, err := json.Marshal(event) 68 | //if err != nil { 69 | // logrus.WithError(err).Warn("cannot marshal event") 70 | //} 71 | //logrus.WithField("data", string(bytes)).Info("Send to OG") 72 | err = m.ledgerWriter.EnqueueSendToLedger(event) 73 | if err != nil { 74 | logrus.WithError(err).Warn("send to ledger err") 75 | } 76 | } 77 | } 78 | 79 | func (m *Log4j2SocketProcessor) ParseCommand(bytes []byte) *processors.LogEvent { 80 | log4j := Log4j2SocketEvent{} 81 | if err := json.Unmarshal(bytes, &log4j); err != nil { 82 | logrus.WithError(err).Warn("bad format") 83 | //fmt.Println(hex.Dump(bytes)) 84 | return nil 85 | } 86 | cmap := log4j.ContextMap 87 | cmap["message"] = log4j.Message 88 | 89 | data, err := json.Marshal(cmap) 90 | if err != nil { 91 | logrus.WithError(err).Warn("bad format") 92 | //fmt.Println(hex.Dump(bytes)) 93 | return nil 94 | } 95 | event := processors.LogEvent{ 96 | Timestamp: log4j.Instant.Timestamp, 97 | Data: string(data), 98 | Type: "log4j", 99 | } 100 | return &event 101 | 102 | } 103 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/message.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "fmt" 5 | "github.com/annchain/BlockDB/common/bytes" 6 | "github.com/globalsign/mgo/bson" 7 | "strconv" 8 | ) 9 | 10 | const ( 11 | HeaderLen = 16 12 | ) 13 | 14 | type Message struct { 15 | DBUser string `json:"db_user"` 16 | DB string `json:"db"` 17 | Collection string `json:"collection"` 18 | Op string `json:"op"` 19 | DocID string `json:"id"` 20 | MongoMsg MongoMessage `json:"db_log"` 21 | } 22 | 23 | type MongoMessage interface { 24 | ExtractBasic() (user, db, collection, op, docId string) 25 | } 26 | 27 | type MessageHeader struct { 28 | MessageSize uint32 `json:"size"` 29 | RequestID uint32 `json:"req_id"` 30 | ResponseTo uint32 `json:"resp_to"` 31 | OpCode OpCode `json:"opcode"` 32 | } 33 | 34 | func DecodeHeader(b []byte) (*MessageHeader, error) { 35 | if len(b) < HeaderLen { 36 | return nil, fmt.Errorf("not enough length for header decoding, expect %d, get %d", HeaderLen, len(b)) 37 | } 38 | 39 | m := &MessageHeader{ 40 | MessageSize: bytes.GetUInt32(b, 0), 41 | RequestID: bytes.GetUInt32(b, 4), 42 | ResponseTo: bytes.GetUInt32(b, 8), 43 | OpCode: OpCode(bytes.GetUInt32(b, 12)), 44 | } 45 | return m, nil 46 | } 47 | 48 | // readCString read collection full name from byte, starting at pos. 49 | // Return the collection full name in string and the length of full name. 50 | func readCString(b []byte, pos int) (string, int, error) { 51 | index := -1 52 | for i := pos; i < len(b); i++ { 53 | if b[i] == byte(0) { 54 | index = i 55 | break 56 | } 57 | } 58 | if index < 0 { 59 | return "", 0, fmt.Errorf("cannot read full collection name from bytes: %x", b) 60 | } 61 | 62 | cBytes := b[pos : index+1] 63 | s := "" 64 | for len(cBytes) > 0 { 65 | s = s + string(cBytes[0]) 66 | cBytes = cBytes[1:] 67 | } 68 | 69 | return s, index - pos + 1, nil 70 | } 71 | 72 | // readDocument read a bson.Document from a byte array. The read start from "pos", 73 | // returns a bson.Document and the size of the document in bytes. Or return error 74 | // if the read meets any problems. 75 | func readDocument(b []byte, pos int) (bson.D, int, error) { 76 | if len(b) < pos+4 { 77 | return nil, 0, fmt.Errorf("document too small for single size") 78 | } 79 | size := bytes.GetUInt32(b, pos) 80 | if len(b) < pos+int(size) { 81 | return nil, 0, fmt.Errorf("document too small for doc") 82 | } 83 | docB := b[pos : pos+int(size)] 84 | var doc bson.D 85 | err := bson.Unmarshal(docB, &doc) 86 | if err != nil { 87 | return nil, 0, fmt.Errorf("cannot unmarshal it to bson, err: %v", err) 88 | } 89 | return doc, int(size), nil 90 | } 91 | 92 | // isFlagSetUint32 checks flag status. Return true when it is on, otherwise false. 93 | func isFlagSetUInt32(b []byte, pos int, flagPos int) bool { 94 | // flag must in [0, 31] 95 | if flagPos > 31 || flagPos < 0 { 96 | return false 97 | } 98 | 99 | p := bytes.GetUInt32(b, pos) 100 | if p&FlagUIntSet[flagPos] > 0 { 101 | return true 102 | } 103 | return false 104 | } 105 | 106 | // isFlagInt32Set checks flag status. Return true when it is on, otherwise false. 107 | func isFlagSetInt32(b []byte, pos int, flagPos int) bool { 108 | // flag must in [0, 31] 109 | if flagPos > 31 || flagPos < 0 { 110 | return false 111 | } 112 | 113 | p := bytes.GetInt32(b, pos) 114 | if p&FlagIntSet[flagPos] > 0 { 115 | return true 116 | } 117 | return false 118 | } 119 | 120 | func init() { 121 | 122 | // init all flag positions 123 | for i := 0; i < len(FlagUIntSet); i++ { 124 | ui32, _ := strconv.ParseUint(flagSetBinary[i], 2, 32) 125 | FlagUIntSet[i] = uint32(ui32) 126 | i32, _ := strconv.ParseInt(flagSetBinary[i], 2, 32) 127 | FlagIntSet[i] = int32(i32) 128 | } 129 | 130 | } 131 | 132 | var FlagUIntSet [32]uint32 133 | var FlagIntSet [32]int32 134 | 135 | var flagSetBinary = [32]string{ 136 | "00000000" + "00000000" + "00000000" + "00000001", 137 | "00000000" + "00000000" + "00000000" + "00000010", 138 | "00000000" + "00000000" + "00000000" + "00000100", 139 | "00000000" + "00000000" + "00000000" + "00001000", 140 | "00000000" + "00000000" + "00000000" + "00010000", 141 | "00000000" + "00000000" + "00000000" + "00100000", 142 | "00000000" + "00000000" + "00000000" + "01000000", 143 | "00000000" + "00000000" + "00000000" + "10000000", 144 | 145 | "00000000" + "00000000" + "00000001" + "00000000", 146 | "00000000" + "00000000" + "00000010" + "00000000", 147 | "00000000" + "00000000" + "00000100" + "00000000", 148 | "00000000" + "00000000" + "00001000" + "00000000", 149 | "00000000" + "00000000" + "00010000" + "00000000", 150 | "00000000" + "00000000" + "00100000" + "00000000", 151 | "00000000" + "00000000" + "01000000" + "00000000", 152 | "00000000" + "00000000" + "10000000" + "00000000", 153 | 154 | "00000000" + "00000001" + "00000000" + "00000000", 155 | "00000000" + "00000010" + "00000000" + "00000000", 156 | "00000000" + "00000100" + "00000000" + "00000000", 157 | "00000000" + "00001000" + "00000000" + "00000000", 158 | "00000000" + "00010000" + "00000000" + "00000000", 159 | "00000000" + "00100000" + "00000000" + "00000000", 160 | "00000000" + "01000000" + "00000000" + "00000000", 161 | "00000000" + "10000000" + "00000000" + "00000000", 162 | 163 | "00000001" + "00000000" + "00000000" + "00000000", 164 | "00000010" + "00000000" + "00000000" + "00000000", 165 | "00000100" + "00000000" + "00000000" + "00000000", 166 | "00001000" + "00000000" + "00000000" + "00000000", 167 | "00010000" + "00000000" + "00000000" + "00000000", 168 | "00100000" + "00000000" + "00000000" + "00000000", 169 | "01000000" + "00000000" + "00000000" + "00000000", 170 | "10000000" + "00000000" + "00000000" + "00000000", 171 | } 172 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/message_test.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | ) 7 | 8 | func TestIsFlagSet(t *testing.T) { 9 | 10 | b, _ := hex.DecodeString("38000000") 11 | if isFlagSetUInt32(b, 0, 33) { 12 | t.Fatalf("isFlagSetUInt32 should be false, because flagpos larger than 31") 13 | } 14 | if isFlagSetUInt32(b, 0, -1) { 15 | t.Fatalf("isFlagSetUInt32 should be false, because flagpos smaller than 0") 16 | } 17 | 18 | // matching byte: 00010000. flag pos 4 19 | b, _ = hex.DecodeString("38000000") 20 | if !isFlagSetInt32(b, 0, 4) { 21 | t.Fatalf("isFlagSetInt32 should return true") 22 | } 23 | 24 | // matching byte 00000000 00000000 00010000 00000000. flag pos 20 25 | // b 38001111 00111000 00000000 00010001 00010001 26 | // ^ 27 | b, _ = hex.DecodeString("38001111") 28 | if !isFlagSetUInt32(b, 0, 20) { 29 | t.Fatalf("isFlagSetUInt32 should return true") 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_command.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type CommandMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewCommandMessage(header *MessageHeader, b []byte) (*CommandMessage, error) { 14 | 15 | //fmt.Println("new command data: ", b) 16 | return nil, nil 17 | } 18 | 19 | func (m *CommandMessage) ParseCommand() []*processors.LogEvent { 20 | 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_command_reply.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type CommandReplyMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewCommandReplyMessage(header *MessageHeader, b []byte) (*CommandReplyMessage, error) { 14 | 15 | //fmt.Println("new command reply data: ", b) 16 | return nil, nil 17 | } 18 | 19 | func (m *CommandReplyMessage) ParseCommand() []*processors.LogEvent { 20 | 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_delete.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type DeleteMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewDeleteMessage(header *MessageHeader, b []byte) (*DeleteMessage, error) { 14 | 15 | //fmt.Println("new delete data: ", b) 16 | return nil, nil 17 | } 18 | 19 | func (m *DeleteMessage) ParseCommand() []*processors.LogEvent { 20 | 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_get_more.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type GetMoreMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewGetMoreMessage(header *MessageHeader, b []byte) (*GetMoreMessage, error) { 14 | 15 | //fmt.Println("new get more data: ", b) 16 | return nil, nil 17 | } 18 | 19 | func (m *GetMoreMessage) ParseCommand() []*processors.LogEvent { 20 | 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_insert.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type InsertMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewInsertMessage(header *MessageHeader, b []byte) (*InsertMessage, error) { 14 | 15 | //fmt.Println("new insert data: ", b) 16 | 17 | return nil, nil 18 | } 19 | 20 | func (m *InsertMessage) ParseCommand() []*processors.LogEvent { 21 | 22 | return nil 23 | } 24 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_kill_cursors.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type KillCursorsMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewKillCursorsMessage(header *MessageHeader, b []byte) (*KillCursorsMessage, error) { 14 | 15 | //fmt.Println("new kill cursors data: ", b) 16 | return nil, nil 17 | } 18 | 19 | func (m *KillCursorsMessage) ParseCommand() []*processors.LogEvent { 20 | 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_msg.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/annchain/BlockDB/common/bytes" 8 | "github.com/globalsign/mgo/bson" 9 | ) 10 | 11 | type MsgMessage struct { 12 | Header *MessageHeader `json:"header"` 13 | Flags msgFlags `json:"flags"` 14 | Sections []section `json:"sections"` 15 | CheckSum uint32 `json:"check_sum"` 16 | } 17 | 18 | func NewMsgMessage(header *MessageHeader, b []byte) (*MsgMessage, error) { 19 | 20 | p := make([]byte, len(b)) 21 | copy(p, b) 22 | 23 | pos := HeaderLen 24 | 25 | // read flags 26 | flags := newMsgFlags(p, pos) 27 | pos += 4 28 | 29 | // read sections 30 | sectionsBytes := int(header.MessageSize) - pos 31 | if flags.CheckSumPresent { 32 | // reduce the length of checkSum 33 | sectionsBytes -= 4 34 | } 35 | var secs []section 36 | for sectionsBytes > 0 { 37 | sec, offset, err := newSection(p, pos) 38 | if err != nil { 39 | return nil, err 40 | } 41 | secs = append(secs, sec) 42 | pos += offset 43 | sectionsBytes -= offset 44 | } 45 | 46 | // read check sum 47 | checkSum := uint32(0) 48 | if flags.CheckSumPresent { 49 | checkSum = bytes.GetUInt32(p, pos) 50 | } 51 | 52 | mm := &MsgMessage{} 53 | mm.Header = header 54 | mm.Flags = flags 55 | mm.Sections = secs 56 | mm.CheckSum = checkSum 57 | 58 | return mm, nil 59 | } 60 | 61 | func (mm *MsgMessage) ExtractBasic() (user, db, collection, op, docId string) { 62 | 63 | for _, s := range mm.Sections { 64 | switch sec := s.(type) { 65 | case *sectionBody: 66 | user, db, op, collection = mm.extractFromBody(sec) 67 | case *sectionDocumentSequence: 68 | docId = mm.extractFromSeq(sec) 69 | default: 70 | return 71 | } 72 | } 73 | return 74 | } 75 | 76 | func (mm *MsgMessage) extractFromBody(secBody *sectionBody) (user, db, op, collection string) { 77 | 78 | doc := secBody.Document 79 | // user 80 | if v, ok := doc["saslSupportedMechs"]; ok { 81 | if user, ok = v.(string); !ok { 82 | fmt.Println("saslSupportedMechs not string: ", doc) 83 | } 84 | } 85 | // db 86 | if v, ok := doc["$db"]; ok { 87 | if db, ok = v.(string); !ok { 88 | fmt.Println("$db not string: ", doc) 89 | } 90 | } 91 | // op and collection 92 | if v, ok := doc["update"]; ok { 93 | op = "update" 94 | collection, ok = v.(string) 95 | if !ok { 96 | fmt.Println("collection not string: ", doc) 97 | } 98 | } else if v, ok := doc["insert"]; ok { 99 | op = "insert" 100 | collection, ok = v.(string) 101 | if !ok { 102 | fmt.Println("collection not string: ", doc) 103 | } 104 | } else if v, ok := doc["query"]; ok { 105 | op = "query" 106 | collection, ok = v.(string) 107 | if !ok { 108 | fmt.Println("collection not string: ", doc) 109 | } 110 | } else if v, ok := doc["delete"]; ok { 111 | op = "delete" 112 | collection, ok = v.(string) 113 | if !ok { 114 | fmt.Println("collection not string: ", doc) 115 | } 116 | } 117 | 118 | return 119 | } 120 | 121 | func (mm *MsgMessage) extractFromSeq(secSeq *sectionDocumentSequence) (docId string) { 122 | 123 | docs := secSeq.Documents 124 | if len(docs) < 1 { 125 | return 126 | } 127 | 128 | var idI interface{} 129 | var ok bool 130 | for _, doc := range docs { 131 | if idI, ok = doc["_id"]; ok { 132 | break 133 | } 134 | v, ok := doc["q"] 135 | if !ok { 136 | continue 137 | } 138 | vb, ok := v.(bson.D) 139 | if !ok { 140 | continue 141 | } 142 | if idI, ok = (vb.Map())["_id"]; ok { 143 | break 144 | } 145 | } 146 | if idI == nil { 147 | return 148 | } 149 | 150 | switch id := idI.(type) { 151 | case bson.ObjectId: 152 | return id.Hex() 153 | case string: 154 | return id 155 | case int: 156 | return string(id) 157 | default: 158 | return "" 159 | } 160 | } 161 | 162 | type msgFlags struct { 163 | CheckSumPresent bool `json:"check_sum"` 164 | MoreToCome bool `json:"more_to_come"` 165 | ExhaustAllowed bool `json:"exhaust_allowed"` 166 | } 167 | 168 | func newMsgFlags(b []byte, pos int) msgFlags { 169 | 170 | flag := msgFlags{ 171 | CheckSumPresent: isFlagSetUInt32(b, pos, 0), 172 | MoreToCome: isFlagSetUInt32(b, pos, 1), 173 | ExhaustAllowed: isFlagSetUInt32(b, pos, 16), 174 | } 175 | return flag 176 | } 177 | 178 | func (mf *msgFlags) MarshalJSON() ([]byte, error) { 179 | r := map[string]bool{} 180 | if mf.CheckSumPresent { 181 | r["check_sum"] = true 182 | } 183 | if mf.MoreToCome { 184 | r["more_to_come"] = true 185 | } 186 | if mf.ExhaustAllowed { 187 | r["exhaust_allowed"] = true 188 | } 189 | return json.Marshal(r) 190 | } 191 | 192 | type section interface { 193 | kind() sectionType 194 | } 195 | 196 | func newSection(b []byte, pos int) (section, int, error) { 197 | if len(b) < pos+1 { 198 | return nil, 0, fmt.Errorf("document too small for section type") 199 | } 200 | 201 | sType := sectionType(b[pos]) 202 | pos++ 203 | 204 | switch sType { 205 | case singleDocument: 206 | doc, size, err := readDocument(b, pos) 207 | if err != nil { 208 | return nil, 0, err 209 | } 210 | s := §ionBody{ 211 | PayloadType: singleDocument, 212 | Document: doc.Map(), 213 | } 214 | return s, 1 + int(size), nil 215 | 216 | case documentSequence: 217 | // read doc sequence size 218 | if len(b) < 4 { 219 | return nil, 0, fmt.Errorf("document too small for docSeq size") 220 | } 221 | size := bytes.GetUInt32(b, pos) 222 | if len(b) < pos+int(size) { 223 | return nil, 0, fmt.Errorf("document too small for docSeq") 224 | } 225 | pos += 4 226 | 227 | // read identifier 228 | identifier, idSize, err := readCString(b, pos) 229 | if err != nil { 230 | return nil, 0, fmt.Errorf("read cstring error: %v", err) 231 | } 232 | pos += idSize 233 | 234 | // read documents 235 | var docs []bson.M 236 | bytesLeft := int(size) - 4 - idSize 237 | for bytesLeft > 0 { 238 | doc, docSize, err := readDocument(b, pos) 239 | if err != nil { 240 | return nil, 0, fmt.Errorf("read doc error: %v", err) 241 | } 242 | docs = append(docs, doc.Map()) 243 | bytesLeft -= docSize 244 | } 245 | 246 | s := §ionDocumentSequence{ 247 | PayloadType: documentSequence, 248 | Size: size, 249 | Identifier: identifier, 250 | Documents: docs, 251 | } 252 | return s, 1 + int(size), nil 253 | 254 | default: 255 | return nil, 0, fmt.Errorf("unknown section type: %v", sType) 256 | } 257 | 258 | } 259 | 260 | type sectionBody struct { 261 | PayloadType sectionType `json:"type"` 262 | Document bson.M `json:"document"` 263 | } 264 | 265 | func (s *sectionBody) kind() sectionType { 266 | return s.PayloadType 267 | } 268 | 269 | type sectionDocumentSequence struct { 270 | PayloadType sectionType `json:"type"` 271 | Size uint32 `json:"size"` 272 | Identifier string `json:"identifier"` 273 | Documents []bson.M `json:"documents"` 274 | } 275 | 276 | func (s *sectionDocumentSequence) kind() sectionType { 277 | return s.PayloadType 278 | } 279 | 280 | type sectionType byte 281 | 282 | const ( 283 | singleDocument sectionType = iota 284 | documentSequence 285 | ) 286 | 287 | //func (m *MsgMessage) ParseCommand() []*processors.LogEvent { 288 | // 289 | // return nil 290 | //} 291 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_msg_test.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "fmt" 7 | "testing" 8 | ) 9 | 10 | func TestNewMsgMessage(t *testing.T) { 11 | 12 | data1 := "82000000d9acb76000000000dd07000000000000006d0000001069736d61737465720001000000027361736c537570706f727465644d65636873000b00000061646d696e2e726f6f740002246462000600000061646d696e00032472656164507265666572656e63650017000000026d6f646500080000007072696d617279000000" 13 | if b := msgTester(t, data1); b != nil { 14 | fmt.Println(string(b)) 15 | } 16 | 17 | data2 := "f8000000fe09e55600000000dd07000000000000007a00000002696e73657274000600000070726f787900086f7264657265640001036c736964001e000000056964001000000004f565892c2495440ab9dc0ea84c98f2b100022464620004000000756e6900032472656164507265666572656e63650017000000026d6f646500080000007072696d6172790000000168000000646f63756d656e7473005a000000075f6964005d11dee343096d19ac7c070c02696e736572745f74696d650014000000323031392d30362d32352031363a34343a313900026461746100010000000002647269766572000800000070796d6f6e676f0000" 18 | if b := msgTester(t, data2); b != nil { 19 | fmt.Println(string(b)) 20 | } 21 | 22 | } 23 | 24 | func msgTester(t *testing.T, dataHex string) []byte { 25 | 26 | dataBytes, _ := hex.DecodeString(dataHex) 27 | 28 | header, err := DecodeHeader(dataBytes) 29 | if err != nil { 30 | t.Fatalf("decode header error: %v", err) 31 | return nil 32 | } 33 | msg, err := NewMsgMessage(header, dataBytes) 34 | if err != nil { 35 | t.Fatalf("create msg message error: %v", err) 36 | return nil 37 | } 38 | 39 | b, err := json.Marshal(msg) 40 | if err != nil { 41 | t.Fatalf("json marshal message error: %v", err) 42 | return nil 43 | } 44 | 45 | return b 46 | } 47 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_query.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/annchain/BlockDB/common/bytes" 7 | "github.com/globalsign/mgo/bson" 8 | ) 9 | 10 | type QueryMessage struct { 11 | Header *MessageHeader `json:"header"` 12 | Flags queryFlags `json:"flags"` 13 | Coll string `json:"collection"` 14 | Skip int32 `json:"skip"` 15 | Limit int32 `json:"limit"` 16 | Query bson.M `json:"query"` 17 | Fields bson.M `json:"fields"` 18 | } 19 | 20 | func NewQueryMessage(header *MessageHeader, b []byte) (*QueryMessage, error) { 21 | 22 | //fmt.Println("new query data: ", b) 23 | 24 | p := make([]byte, len(b)) 25 | copy(p, b) 26 | 27 | p = p[HeaderLen:] 28 | 29 | // read flags 30 | flags := newQueryFlags(p, 0) 31 | p = p[4:] 32 | 33 | // read collection full name 34 | coll, collLen, _ := readCString(p, 0) 35 | p = p[collLen:] 36 | 37 | skip := bytes.GetInt32(p, 0) 38 | limit := bytes.GetInt32(p, 4) 39 | p = p[8:] 40 | 41 | // read query document 42 | querySize := bytes.GetUInt32(p, 0) 43 | queryBytes := p[:querySize] 44 | 45 | var queryBson bson.D 46 | err := bson.Unmarshal(queryBytes, &queryBson) 47 | if err != nil { 48 | return nil, fmt.Errorf("read query document error, cannot unmarshal it to bson, err: %v", err) 49 | } 50 | p = p[querySize:] 51 | 52 | // read fields 53 | var fieldsBson bson.D 54 | if len(p) > 0 { 55 | fieldsSize := bytes.GetUInt32(p, 0) 56 | fieldsBytes := p[:fieldsSize] 57 | err = bson.Unmarshal(fieldsBytes, &fieldsBson) 58 | if err != nil { 59 | return nil, fmt.Errorf("read fields document error, cannot unmarshal it to bson, err: %v", err) 60 | } 61 | } 62 | 63 | qm := &QueryMessage{} 64 | qm.Header = header 65 | qm.Flags = flags 66 | qm.Coll = coll 67 | qm.Skip = skip 68 | qm.Limit = limit 69 | qm.Query = queryBson.Map() 70 | if fieldsBson != nil { 71 | qm.Fields = fieldsBson.Map() 72 | } 73 | 74 | return qm, nil 75 | } 76 | 77 | func (qm *QueryMessage) ExtractBasic() (user, db, collection, op, docId string) { 78 | // TODO 79 | 80 | return 81 | } 82 | 83 | type queryFlags struct { 84 | Reserved bool 85 | TailableCursor bool 86 | SlaveOk bool 87 | OplogReplay bool 88 | NoCursorTimeout bool 89 | AwaitData bool 90 | Exhaust bool 91 | Partial bool 92 | } 93 | 94 | func newQueryFlags(b []byte, pos int) queryFlags { 95 | q := queryFlags{ 96 | Reserved: isFlagSetInt32(b, pos, 0), 97 | TailableCursor: isFlagSetInt32(b, pos, 1), 98 | SlaveOk: isFlagSetInt32(b, pos, 2), 99 | OplogReplay: isFlagSetInt32(b, pos, 3), 100 | NoCursorTimeout: isFlagSetInt32(b, pos, 4), 101 | AwaitData: isFlagSetInt32(b, pos, 5), 102 | Exhaust: isFlagSetInt32(b, pos, 6), 103 | Partial: isFlagSetInt32(b, pos, 7), 104 | } 105 | return q 106 | } 107 | 108 | func (qf *queryFlags) MarshalJSON() ([]byte, error) { 109 | r := map[string]bool{} 110 | if qf.Reserved { 111 | r["reserved"] = true 112 | } 113 | if qf.TailableCursor { 114 | r["tailable_cursor"] = true 115 | } 116 | if qf.SlaveOk { 117 | r["slave_ok"] = true 118 | } 119 | if qf.OplogReplay { 120 | r["log_reply"] = true 121 | } 122 | if qf.NoCursorTimeout { 123 | r["no_cursor_timeout"] = true 124 | } 125 | if qf.AwaitData { 126 | r["await_data"] = true 127 | } 128 | if qf.Exhaust { 129 | r["exhaust"] = true 130 | } 131 | if qf.Partial { 132 | r["partial"] = true 133 | } 134 | return json.Marshal(r) 135 | } 136 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_reply.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/annchain/BlockDB/common/bytes" 7 | "github.com/globalsign/mgo/bson" 8 | ) 9 | 10 | type ReplyMessage struct { 11 | Header *MessageHeader `json:"header"` 12 | Flags replyFlags `json:"flags"` 13 | CursorID int64 `json:"cursor_id"` 14 | StartFrom int32 `json:"start_from"` 15 | Number int32 `json:"number"` 16 | Documents []bson.M `json:"documents"` 17 | } 18 | 19 | func NewReplyMessage(header *MessageHeader, b []byte) (*ReplyMessage, error) { 20 | 21 | //fmt.Println("new reply data: ", b) 22 | 23 | p := make([]byte, len(b)) 24 | copy(p, b) 25 | 26 | pos := HeaderLen 27 | 28 | // read flags 29 | flags := newReplyFlags(p, pos) 30 | pos += 4 31 | // read cursor id 32 | cursorID := bytes.GetInt64(p, pos) 33 | pos += 8 34 | // read start_from 35 | startFrom := bytes.GetInt32(p, pos) 36 | pos += 4 37 | // read number returned 38 | number := bytes.GetInt32(p, pos) 39 | pos += 4 40 | 41 | // read documents 42 | var docs []bson.M 43 | bytesLeft := int(header.MessageSize) - pos 44 | for bytesLeft > 0 { 45 | doc, docSize, err := readDocument(b, pos) 46 | if err != nil { 47 | return nil, fmt.Errorf("read doc error: %v", err) 48 | } 49 | docs = append(docs, doc.Map()) 50 | bytesLeft -= docSize 51 | } 52 | 53 | rm := &ReplyMessage{} 54 | rm.Header = header 55 | rm.Flags = flags 56 | rm.CursorID = cursorID 57 | rm.StartFrom = startFrom 58 | rm.Number = number 59 | rm.Documents = docs 60 | 61 | return rm, nil 62 | } 63 | 64 | func (rm *ReplyMessage) ExtractBasic() (user, db, collection, op, docId string) { 65 | // TODO 66 | 67 | return 68 | } 69 | 70 | type replyFlags struct { 71 | CursorNotFound bool `json:"cursor_not_found"` 72 | QueryFailure bool `json:"query_failure"` 73 | ShardConfigStale bool `json:"shard_config_stale"` 74 | AwaitCapable bool `json:"await_capable"` 75 | } 76 | 77 | func newReplyFlags(b []byte, pos int) replyFlags { 78 | return replyFlags{ 79 | CursorNotFound: isFlagSetInt32(b, pos, 0), 80 | QueryFailure: isFlagSetInt32(b, pos, 1), 81 | ShardConfigStale: isFlagSetInt32(b, pos, 2), 82 | AwaitCapable: isFlagSetInt32(b, pos, 3), 83 | } 84 | } 85 | 86 | func (rf *replyFlags) MarshalJSON() ([]byte, error) { 87 | r := map[string]bool{} 88 | if rf.CursorNotFound { 89 | r["cursor_not_found"] = true 90 | } 91 | if rf.QueryFailure { 92 | r["query_failure"] = true 93 | } 94 | if rf.ShardConfigStale { 95 | r["shard_config_stale"] = true 96 | } 97 | if rf.AwaitCapable { 98 | r["await_capable"] = true 99 | } 100 | return json.Marshal(r) 101 | } 102 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_reserved.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type ReservedMessage struct { 8 | header *MessageHeader 9 | 10 | // TODO body not implemented 11 | } 12 | 13 | func NewReservedMessage(header *MessageHeader, b []byte) (*ReservedMessage, error) { 14 | 15 | //fmt.Println("new reserved data: ", b) 16 | 17 | return nil, nil 18 | } 19 | 20 | func (m *ReservedMessage) ParseCommand() []*processors.LogEvent { 21 | 22 | return nil 23 | } 24 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/op_update.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "github.com/annchain/BlockDB/processors" 5 | ) 6 | 7 | type UpdateMessage struct { 8 | header *MessageHeader 9 | collection string 10 | flags string 11 | selector string 12 | update string 13 | } 14 | 15 | func NewUpdateMessage(header *MessageHeader, b []byte) (*UpdateMessage, error) { 16 | 17 | //fmt.Println("new update data: ", b) 18 | 19 | b = b[HeaderLen+4:] 20 | coll, collLen, _ := readCString(b, 0) 21 | b = b[collLen:] 22 | 23 | m := &UpdateMessage{} 24 | m.header = header 25 | m.collection = coll 26 | 27 | // TODO extract selector and update parts 28 | 29 | return nil, nil 30 | } 31 | 32 | func (m *UpdateMessage) ParseCommand() []*processors.LogEvent { 33 | 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /plugins/server/mongodb/message/opcode.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "fmt" 4 | 5 | type OpCode uint32 6 | 7 | const ( 8 | OpReply = OpCode(1) 9 | OpUpdate = OpCode(2001) 10 | OpInsert = OpCode(2002) 11 | Reserved = OpCode(2003) 12 | OpQuery = OpCode(2004) 13 | OpGetMore = OpCode(2005) 14 | OpDelete = OpCode(2006) 15 | OpKillCursors = OpCode(2007) 16 | OpCommand = OpCode(2010) 17 | OpCommandReply = OpCode(2011) 18 | OpMsg = OpCode(2013) 19 | ) 20 | 21 | func (op *OpCode) String() string { 22 | switch *op { 23 | case OpReply: 24 | return "OpReply" 25 | case OpUpdate: 26 | return "OpUpdate" 27 | case OpInsert: 28 | return "OpInsert" 29 | case Reserved: 30 | return "Reserved" 31 | case OpQuery: 32 | return "OpQuery" 33 | case OpGetMore: 34 | return "OpGetMore" 35 | case OpDelete: 36 | return "OpDelete" 37 | case OpKillCursors: 38 | return "OpKillCursors" 39 | case OpCommand: 40 | return "OpCommand" 41 | case OpCommandReply: 42 | return "OpCommandReply" 43 | case OpMsg: 44 | return "OpMsg" 45 | default: 46 | return fmt.Sprintf("unknown op %d", *op) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /plugins/server/mongodb/pool.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | type Pool struct { 11 | url string 12 | max int 13 | connChan chan net.Conn 14 | mu sync.RWMutex 15 | } 16 | 17 | func NewPool(url string, maxConn int) *Pool { 18 | p := &Pool{} 19 | p.url = url 20 | p.max = maxConn 21 | return p 22 | } 23 | 24 | func (pool *Pool) newConn(url string) (net.Conn, error) { 25 | retrySleep := 50 * time.Millisecond 26 | for retryCount := 7; retryCount > 0; retryCount-- { 27 | c, err := net.Dial("tcp", url) 28 | if err == nil { 29 | return c, nil 30 | } 31 | // TODO log the error 32 | fmt.Println(fmt.Sprintf("dial error: %v", err)) 33 | 34 | time.Sleep(retrySleep) 35 | retrySleep = retrySleep * 2 36 | } 37 | 38 | return nil, fmt.Errorf("failed to create new connection to url: %s", url) 39 | } 40 | 41 | func (pool *Pool) Release(c net.Conn) error { 42 | pool.mu.Lock() 43 | defer pool.mu.Unlock() 44 | 45 | return pool.release(c) 46 | } 47 | func (pool *Pool) release(c net.Conn) error { 48 | pool.connChan <- c 49 | 50 | return nil 51 | } 52 | 53 | func (pool *Pool) Acquire() net.Conn { 54 | pool.mu.Lock() 55 | defer pool.mu.Unlock() 56 | 57 | conn := <-pool.connChan 58 | 59 | return conn 60 | } 61 | -------------------------------------------------------------------------------- /plugins/server/mongodb/processor.go: -------------------------------------------------------------------------------- 1 | package mongodb 2 | 3 | //type MongoProcessor struct { 4 | // config MongoProcessorConfig 5 | // 6 | // readPool *Pool 7 | // writePool *Pool 8 | //} 9 | //type MongoProcessorConfig struct { 10 | // IdleConnectionTimeout time.Duration 11 | //} 12 | // 13 | //func (m *MongoProcessor) Stop() { 14 | // logrus.Info("MongoProcessor stopped") 15 | //} 16 | // 17 | //func (m *MongoProcessor) Start() { 18 | // logrus.Info("MongoProcessor started") 19 | // // start consuming queue 20 | //} 21 | // 22 | //func NewMongoProcessor(config MongoProcessorConfig) *MongoProcessor { 23 | // //TODO move mongo url into config 24 | // url := "172.28.152.101:27017" 25 | // 26 | // return &MongoProcessor{ 27 | // config: config, 28 | // readPool: NewPool(url, 10), 29 | // writePool: NewPool(url, 10), 30 | // } 31 | //} 32 | // 33 | //func (m *MongoProcessor) ProcessConnection(conn net.Conn) error { 34 | // defer conn.Close() 35 | // 36 | // fmt.Println("start process connection") 37 | // 38 | // // http://docs.mongodb.org/manual/faq/diagnostics/#faq-keepalive 39 | // if conn, ok := conn.(*net.TCPConn); ok { 40 | // conn.SetKeepAlivePeriod(2 * time.Minute) 41 | // conn.SetKeepAlive(true) 42 | // } 43 | // 44 | // reader := bufio.NewReader(conn) 45 | // 46 | // backend := m.writePool.Acquire() 47 | // defer m.writePool.Release(backend) 48 | // 49 | // for { 50 | // conn.SetReadDeadline(time.Now().Add(m.config.IdleConnectionTimeout)) 51 | // 52 | // cmdHeader := make([]byte, message.HeaderLen) 53 | // _, err := reader.Read(cmdHeader) 54 | // if err != nil { 55 | // if err == io.EOF { 56 | // fmt.Println("target closed") 57 | // logrus.Info("target closed") 58 | // return nil 59 | // } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 60 | // fmt.Println("target timeout") 61 | // logrus.Info("target timeout") 62 | // conn.Close() 63 | // return nil 64 | // } 65 | // return err 66 | // } 67 | // 68 | // // query command 69 | // msgSize := bytes.GetInt32(cmdHeader, 0) 70 | // fmt.Println("msgsize: ", msgSize) 71 | // 72 | // cmdBody := make([]byte, msgSize-message.HeaderLen) 73 | // _, err = reader.Read(cmdBody) 74 | // if err != nil { 75 | // fmt.Println("read body error: ", err) 76 | // return err 77 | // } 78 | // fmt.Println(fmt.Sprintf("msg header: %x", cmdHeader)) 79 | // fmt.Println(fmt.Sprintf("msg body: %x", cmdBody)) 80 | // 81 | // cmdFull := append(cmdHeader, cmdBody...) 82 | // err = m.messageHandler(cmdFull, conn, backend) 83 | // if err != nil { 84 | // // TODO handle err 85 | // return err 86 | // } 87 | // 88 | // } 89 | // return nil 90 | //} 91 | // 92 | //func (m *MongoProcessor) messageHandler(bytes []byte, client, backend net.Conn) error { 93 | // // 94 | // //var msg message.RequestMessage 95 | // //fmt.Println("Request--->") 96 | // //fmt.Println(hex.Dump(bytes)) 97 | // //err := msg.Decode(bytes) 98 | // //if err != nil { 99 | // // // TODO handle err 100 | // // return err 101 | // //} 102 | // // 103 | // ////var pool *Pool 104 | // ////if msg.ReadOnly() { 105 | // //// pool = m.readPool 106 | // ////} else { 107 | // //// pool = m.writePool 108 | // ////} 109 | // ////backend := pool.Acquire() 110 | // ////defer pool.Release(backend) 111 | // // 112 | // //err = msg.WriteTo(backend) 113 | // //if err != nil { 114 | // // // TODO handle err 115 | // // return err 116 | // //} 117 | // // 118 | // //var msgResp message.ResponseMessage 119 | // //err = msgResp.ReadFromMongo(backend) 120 | // //if err != nil { 121 | // // // TODO handle err 122 | // // return err 123 | // //} 124 | // //fmt.Println("<---Response") 125 | // ////fmt.Println(hex.Dump(msgResp.payload)) 126 | // //err = msgResp.WriteTo(client) 127 | // //if err != nil { 128 | // // // TODO handle err 129 | // // return err 130 | // //} 131 | // // 132 | // ////err = m.handleBlockDBEvents(&msgResp) 133 | // ////if err != nil { 134 | // //// // TODO handle err 135 | // //// return err 136 | // ////} 137 | // 138 | // return nil 139 | //} 140 | // 141 | //func (m *MongoProcessor) handleBlockDBEvents(msg message.Message) error { 142 | // // TODO not implemented yet 143 | // 144 | // events := msg.ParseCommand() 145 | // 146 | // fmt.Println("block db events: ", events) 147 | // 148 | // return nil 149 | //} 150 | -------------------------------------------------------------------------------- /plugins/server/socket/connection_processor.go: -------------------------------------------------------------------------------- 1 | package socket 2 | 3 | import ( 4 | "bufio" 5 | "github.com/annchain/BlockDB/backends" 6 | "github.com/annchain/BlockDB/processors" 7 | "github.com/sirupsen/logrus" 8 | "io" 9 | "net" 10 | "time" 11 | ) 12 | 13 | type SocketConnectionProcessorConfig struct { 14 | IdleConnectionTimeout time.Duration 15 | } 16 | 17 | type SocketConnectionProcessor struct { 18 | config SocketConnectionProcessorConfig 19 | dataProcessor processors.DataProcessor 20 | ledgerWriter backends.LedgerWriter 21 | } 22 | 23 | func NewSocketProcessor(config SocketConnectionProcessorConfig, dataProcessor processors.DataProcessor, ledgerWriter backends.LedgerWriter) *SocketConnectionProcessor { 24 | return &SocketConnectionProcessor{ 25 | config: config, 26 | dataProcessor: dataProcessor, 27 | ledgerWriter: ledgerWriter, 28 | } 29 | } 30 | 31 | func (s *SocketConnectionProcessor) ProcessConnection(conn net.Conn) error { 32 | reader := bufio.NewReader(conn) 33 | for { 34 | conn.SetReadDeadline(time.Now().Add(s.config.IdleConnectionTimeout)) 35 | str, err := reader.ReadString(byte(0)) 36 | if err != nil { 37 | if err == io.EOF { 38 | logrus.Info("target closed") 39 | return nil 40 | } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 41 | logrus.Info("target timeout") 42 | conn.Close() 43 | return nil 44 | } 45 | return err 46 | } 47 | str = str[:len(str)-1] 48 | // query command 49 | //fmt.Println(str) 50 | //fmt.Println(hex.Dump(bytes)) 51 | events, err := s.dataProcessor.ParseCommand([]byte(str)) 52 | if events == nil || err != nil { 53 | logrus.WithError(err).Warn("nil command") 54 | continue 55 | } 56 | for _, event := range events { 57 | event.Ip = conn.RemoteAddr().String() 58 | err = s.ledgerWriter.EnqueueSendToLedger(event) 59 | if err != nil { 60 | logrus.WithError(err).Warn("send to ledger err") 61 | } 62 | } 63 | } 64 | } 65 | 66 | func (SocketConnectionProcessor) Start() { 67 | logrus.Info("SocketConnectionProcessor started") 68 | } 69 | 70 | func (SocketConnectionProcessor) Stop() { 71 | logrus.Info("SocketConnectionProcessor stopped") 72 | } 73 | -------------------------------------------------------------------------------- /plugins/server/web/api.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "net/http" 7 | 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func (l *HttpListener) Query(rw http.ResponseWriter, req *http.Request) { 12 | data, err := ioutil.ReadAll(req.Body) 13 | if err != nil || len(data) == 0 { 14 | http.Error(rw, "miss content", http.StatusBadRequest) 15 | return 16 | } 17 | 18 | logrus.Tracef("get query request data: %s", string(data)) 19 | var request AuditDataQueryRequest 20 | err = json.Unmarshal(data, &request) 21 | if err != nil { 22 | http.Error(rw, err.Error(), http.StatusBadRequest) 23 | return 24 | } 25 | filter := request.ToFilter() 26 | if request.PageNum < 1 { 27 | request.PageNum = 1 28 | } 29 | if request.PageSize < 1 { 30 | request.PageNum = 10 31 | } 32 | skip := (request.PageNum - 1) * request.PageSize 33 | 34 | logrus.Tracef("start logging a filter: %v", filter) 35 | //for key, value := range filter { 36 | // logrus.Tracef("filter key: %s, value: %v", key, value) 37 | //} 38 | respData, total, err := l.auditWriter.Query(filter, request.PageSize, skip) 39 | if err != nil { 40 | logrus.WithError(err).Error("read failed") 41 | http.Error(rw, err.Error(), http.StatusInternalServerError) 42 | return 43 | } 44 | logrus.Tracef("finished query filter: %v", filter) 45 | var resp AuditDataQueryResponse 46 | resp.Total = total 47 | resp.Data = respData 48 | RespOk(rw, resp) 49 | return 50 | 51 | } 52 | 53 | func (l *HttpListener) QueryOriginal(rw http.ResponseWriter, req *http.Request) { 54 | data, err := ioutil.ReadAll(req.Body) 55 | if err != nil || len(data) == 0 { 56 | http.Error(rw, "miss content", http.StatusBadRequest) 57 | return 58 | } 59 | var request OriginalDataRequest 60 | err = json.Unmarshal(data, &request) 61 | if err != nil { 62 | http.Error(rw, err.Error(), http.StatusBadRequest) 63 | return 64 | } 65 | filter := request.Filter 66 | if request.PageNum < 1 { 67 | request.PageNum = 1 68 | } 69 | if request.PageSize < 1 { 70 | request.PageNum = 10 71 | } 72 | skip := (request.PageNum - 1) * request.PageSize 73 | originalData, total, err := l.auditWriter.GetOriginalDataProcessor().Query(filter, request.PageSize, skip) 74 | if err != nil { 75 | logrus.WithError(err).Error("read failed") 76 | http.Error(rw, err.Error(), http.StatusInternalServerError) 77 | return 78 | } 79 | var resp OriginalDataQueryResponse 80 | resp.Total = total 81 | resp.Data = originalData 82 | RespOk(rw, resp) 83 | return 84 | 85 | } 86 | 87 | func (l *HttpListener) QueryGrammar(rw http.ResponseWriter, req *http.Request) { 88 | data, err := ioutil.ReadAll(req.Body) 89 | if err != nil || len(data) == 0 { 90 | http.Error(rw, "miss content", http.StatusBadRequest) 91 | return 92 | } 93 | var request AuditDataGrammarRequest 94 | err = json.Unmarshal(data, &request) 95 | if err != nil { 96 | http.Error(rw, err.Error(), http.StatusBadRequest) 97 | return 98 | } 99 | filter := request.Filter 100 | if request.PageNum < 1 { 101 | request.PageNum = 1 102 | } 103 | if request.PageSize < 1 { 104 | request.PageNum = 10 105 | } 106 | skip := (request.PageNum - 1) * request.PageSize 107 | respData, total, err := l.auditWriter.Query(filter, request.PageSize, skip) 108 | if err != nil { 109 | logrus.WithError(err).Error("read failed") 110 | http.Error(rw, err.Error(), http.StatusInternalServerError) 111 | return 112 | } 113 | var resp AuditDataQueryResponse 114 | resp.Total = total 115 | resp.Data = respData 116 | RespOk(rw, resp) 117 | return 118 | } 119 | 120 | func RespOk(rw http.ResponseWriter, result interface{}) { 121 | rw.Header().Set("Content-Type", "application/json") 122 | rw.WriteHeader(http.StatusOK) 123 | data, err := json.Marshal(result) 124 | if err != nil { 125 | http.Error(rw, err.Error(), http.StatusInternalServerError) 126 | return 127 | } 128 | rw.Write(data) 129 | } 130 | -------------------------------------------------------------------------------- /plugins/server/web/http_processor.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "sync" 8 | 9 | "github.com/annchain/BlockDB/backends" 10 | "github.com/annchain/BlockDB/ogws" 11 | "github.com/annchain/BlockDB/processors" 12 | "github.com/gorilla/mux" 13 | "github.com/sirupsen/logrus" 14 | ) 15 | 16 | type HttpListenerConfig struct { 17 | Port int 18 | EnableAudit bool 19 | EnableHealth bool 20 | MaxContentLength int64 21 | } 22 | 23 | type HttpListener struct { 24 | config HttpListenerConfig 25 | ledgerWriter backends.LedgerWriter 26 | dataProcessor processors.DataProcessor 27 | 28 | wg sync.WaitGroup 29 | stopped bool 30 | router *mux.Router 31 | auditWriter ogws.AuditWriter 32 | } 33 | 34 | func (l *HttpListener) Name() string { 35 | return "HttpListener" 36 | } 37 | 38 | func NewHttpListener(config HttpListenerConfig, dataProcessor processors.DataProcessor, ledgerWriter backends.LedgerWriter, auditWriter ogws.AuditWriter) *HttpListener { 39 | if config.MaxContentLength == 0 { 40 | config.MaxContentLength = 1e7 41 | } 42 | l := &HttpListener{ 43 | config: config, 44 | ledgerWriter: ledgerWriter, 45 | dataProcessor: dataProcessor, 46 | router: mux.NewRouter(), 47 | auditWriter: auditWriter, 48 | } 49 | if l.config.EnableAudit { 50 | l.router.Methods("POST").Path("/audit").HandlerFunc(l.Handle) 51 | l.router.Methods("GET", "POST").Path("/query").HandlerFunc(l.Query) 52 | l.router.Methods("GET", "POST").Path("/queryGrammar").HandlerFunc(l.QueryGrammar) 53 | } 54 | l.router.Methods("GET", "POST").Path("/health").HandlerFunc(l.Health) 55 | 56 | return l 57 | } 58 | 59 | func (l *HttpListener) Start() { 60 | go l.doListen() 61 | logrus.Info("HttpListener started") 62 | } 63 | 64 | func (l *HttpListener) Stop() { 65 | l.stopped = true 66 | logrus.Info("HttpListener stopped") 67 | } 68 | 69 | func (l *HttpListener) Handle(rw http.ResponseWriter, req *http.Request) { 70 | if req.ContentLength > l.config.MaxContentLength { 71 | http.Error(rw, http.StatusText(http.StatusRequestEntityTooLarge), http.StatusRequestEntityTooLarge) 72 | return 73 | } 74 | 75 | data, err := ioutil.ReadAll(req.Body) 76 | if err != nil || len(data) == 0 { 77 | http.Error(rw, "miss content", http.StatusBadRequest) 78 | return 79 | } 80 | logrus.Tracef("get audit request data: %s", string(data)) 81 | 82 | events, err := l.dataProcessor.ParseCommand(data) 83 | if err != nil { 84 | http.Error(rw, err.Error(), http.StatusBadRequest) 85 | return 86 | } 87 | for _, event := range events { 88 | logrus.Tracef("write event to ledger: %s", event.String()) 89 | err = l.ledgerWriter.EnqueueSendToLedger(event) 90 | if err != nil { 91 | logrus.WithError(err).Warn("send to ledger err") 92 | http.Error(rw, err.Error(), http.StatusInternalServerError) 93 | } 94 | } 95 | 96 | logrus.Tracef("write to ledger ends, data: %s", events[0].PrimaryKey) 97 | rw.Header().Set("Content-Type", "application/json") 98 | rw.WriteHeader(http.StatusOK) 99 | rw.Write([]byte("{}")) 100 | 101 | } 102 | 103 | func (l *HttpListener) Health(rw http.ResponseWriter, req *http.Request) { 104 | rw.WriteHeader(http.StatusOK) 105 | rw.Write([]byte("ok")) 106 | } 107 | 108 | func (l *HttpListener) doListen() { 109 | logrus.Fatal(http.ListenAndServe(":"+fmt.Sprintf("%d", l.config.Port), l.router)) 110 | } 111 | -------------------------------------------------------------------------------- /plugins/server/web/model.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/annchain/BlockDB/ogws" 7 | "go.mongodb.org/mongo-driver/bson" 8 | ) 9 | 10 | type AuditDataQueryRequest struct { 11 | Type string `json:"type"` 12 | 13 | Ip string `json:"ip"` 14 | 15 | PrimaryKey string `json:"primary_key"` 16 | 17 | Timestamp string `json:"timestamp"` 18 | Identity string `json:"identity"` 19 | OtherCondition bson.M `json:"other_condition"` 20 | PageNum int64 `json:"page_num"` 21 | PageSize int64 `json:"page_size"` 22 | } 23 | 24 | type AuditDataQueryResponse struct { 25 | Total int64 `json:"total"` 26 | Data []ogws.RawData `json:"data"` 27 | } 28 | 29 | type AuditDataGrammarRequest struct { 30 | Filter bson.M `json:"filter"` 31 | PageNum int64 `json:"page_num"` 32 | PageSize int64 `json:"page_size"` 33 | } 34 | 35 | type OriginalDataQueryResponse struct { 36 | Total int64 `json:"total"` 37 | Data []ogws.OriginalData `json:"data"` 38 | } 39 | 40 | type OriginalDataRequest struct { 41 | Filter bson.M `json:"filter"` 42 | PageNum int64 `json:"page_num"` 43 | PageSize int64 `json:"page_size"` 44 | } 45 | 46 | type CommonResponse struct { 47 | Message string `json:"message"` 48 | Result interface{} `json:"result"` 49 | } 50 | 51 | func (request *AuditDataQueryRequest) ToFilter() bson.M { 52 | userId := request.Identity 53 | filter := bson.M{} 54 | if request.Ip != "" { 55 | filter["data.ip"] = request.Ip 56 | } 57 | 58 | if request.PrimaryKey != "" { 59 | filter["data.primarykey"] = request.PrimaryKey 60 | } 61 | 62 | if str := strings.Split(request.Timestamp, ";"); len(str) == 2 { 63 | filter["data.timestamp"] = bson.M{ 64 | "$gte": str[0], 65 | "$lt": str[1], 66 | } 67 | } else if request.Timestamp != "" { 68 | filter["data.timestamp"] = request.Timestamp 69 | } 70 | if request.Type != "" { 71 | filter["data.type"] = request.Type 72 | } 73 | if userId != "" { 74 | filter["data.identity"] = userId 75 | } 76 | if len(request.OtherCondition) > 0 { 77 | if len(filter) > 0 { 78 | var filters []bson.M 79 | filters = append(filters, filter, request.OtherCondition) 80 | filter = bson.M{"$or": filters} 81 | } 82 | } 83 | return filter 84 | } 85 | -------------------------------------------------------------------------------- /processors/README.md: -------------------------------------------------------------------------------- 1 | # Processors 2 | Processors are responsible for handling incoming connections. 3 | 4 | Processors include routers. They proxy requests to backend implementations. They also audit the request in parallel(or in a 5 | synchronized way). 6 | 7 | -------------------------------------------------------------------------------- /processors/logevent.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type LogEvent struct { 8 | Identity string `json:"identity"` 9 | Type string `json:"type"` 10 | Ip string `json:"ip"` 11 | PrimaryKey string `json:"primary_key"` 12 | Timestamp int64 `json:"timestamp"` 13 | Data interface{} `json:"data"` 14 | Before string `json:"before"` 15 | After string `json:"after"` 16 | } 17 | 18 | func (e *LogEvent) String() string { 19 | b, _ := json.Marshal(e) 20 | return string(b) 21 | } 22 | -------------------------------------------------------------------------------- /processors/processor.go: -------------------------------------------------------------------------------- 1 | package processors 2 | 3 | import "net" 4 | 5 | type ConnectionProcessor interface { 6 | // handleConnection reads the connection and extract the incoming message 7 | // note that this may be a long connection so take care of the connection reuse. 8 | ProcessConnection(conn net.Conn) error 9 | // Do not return until ready 10 | Start() 11 | Stop() 12 | } 13 | 14 | type DataProcessor interface { 15 | ParseCommand(bytes []byte) ([]*LogEvent, error) 16 | } 17 | -------------------------------------------------------------------------------- /scripts/consumer.py: -------------------------------------------------------------------------------- 1 | from kafka import KafkaConsumer 2 | 3 | if __name__ == '__main__': 4 | consumer = KafkaConsumer('anlink', bootstrap_servers=['47.100.222.11:30040']) 5 | for message in consumer: 6 | # message value and key are raw bytes -- decode if necessary! 7 | # e.g., for unicode: `message.value.decode('utf-8')` 8 | print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, 9 | message.offset, message.key, 10 | message.value)) 11 | -------------------------------------------------------------------------------- /scripts/kafka_simple_producer.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | from kafka import KafkaConsumer, KafkaProducer 4 | 5 | d = { 6 | "private_data": "Your own data here", 7 | "my_array": ["It", "supports", "array"], 8 | "my_inner_object": { 9 | "supported": True, 10 | "when": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') 11 | } 12 | 13 | } 14 | e = { 15 | "identity": "user_id_234823", 16 | "type": "test", 17 | "ip": "222.333.22.33", 18 | "primary_key": "unique_id_2852", 19 | "timestamp": int(datetime.datetime.now().timestamp()) * 1000, 20 | "data": d, 21 | "before": None, 22 | "after": None, 23 | 24 | } 25 | 26 | if __name__ == '__main__': 27 | producer = KafkaProducer(bootstrap_servers=['47.100.222.11:30050']) 28 | 29 | for i in range(1): 30 | ss = json.dumps(e) 31 | # ss += '\0' 32 | # producer.send('tech-tech-anlink-web-gateway-201907101551', bytes(ss, 'utf-8')) 33 | producer.send('anlink', bytes(ss, 'utf-8')) 34 | producer.flush() 35 | print(ss) 36 | 37 | -------------------------------------------------------------------------------- /scripts/kafka_test.py: -------------------------------------------------------------------------------- 1 | import threading, logging, time 2 | import multiprocessing 3 | import json 4 | 5 | from kafka import KafkaConsumer, KafkaProducer 6 | 7 | 8 | class Producer(threading.Thread): 9 | def __init__(self): 10 | threading.Thread.__init__(self) 11 | self.stop_event = threading.Event() 12 | 13 | def stop(self): 14 | self.stop_event.set() 15 | 16 | def run(self): 17 | producer = KafkaProducer(bootstrap_servers='10.253.11.192:9092') 18 | 19 | while not self.stop_event.is_set(): 20 | for i in range(1): 21 | d = { 22 | "thread": "http-nio-8080-exec-5", 23 | "level": "INFO", 24 | "loggerName": "auditing", 25 | "message": "TTT", 26 | "endOfBatch": False, 27 | "loggerFqcn": "org.apache.logging.slf4j.Log4jLogger", 28 | "instant": { 29 | "epochSecond": 1561375556, 30 | "nanoOfSecond": 447000000 31 | }, 32 | "contextMap": { 33 | "id": "122", 34 | "user": "XXX" 35 | }, 36 | "threadId": i, 37 | "threadPriority": 5 38 | } 39 | e = { 40 | "Identity": "hahaha", 41 | "Type": "mongodb", 42 | "Ip": "172.28.152.101", 43 | "PrimaryKey": "nothing", 44 | "TimeStamp": 1561375556, 45 | "Data": d, 46 | "Before": "og", 47 | "After": "nothing", 48 | 49 | } 50 | ss = json.dumps(e) 51 | # ss += '\0' 52 | # producer.send('tech-tech-anlink-web-gateway-201907101551', bytes(ss, 'utf-8')) 53 | producer.send('anlink', bytes(ss, 'utf-8')) 54 | # producer.send('tech-tech-anlink-web-gateway-201907101551', bytes(str(i), 'utf-8')) 55 | 56 | time.sleep(0.1) 57 | print(ss) 58 | print("end") 59 | producer.close() 60 | 61 | 62 | class Consumer(multiprocessing.Process): 63 | def __init__(self): 64 | multiprocessing.Process.__init__(self) 65 | self.stop_event = multiprocessing.Event() 66 | 67 | def stop(self): 68 | self.stop_event.set() 69 | 70 | def run(self): 71 | consumer = KafkaConsumer(bootstrap_servers='localhost:9092', 72 | auto_offset_reset='earliest', 73 | consumer_timeout_ms=1000) 74 | consumer.subscribe(['my-topic']) 75 | 76 | while not self.stop_event.is_set(): 77 | for message in consumer: 78 | print(message) 79 | if self.stop_event.is_set(): 80 | break 81 | 82 | consumer.close() 83 | 84 | 85 | def main(): 86 | tasks = [ 87 | Producer(), 88 | # Consumer() 89 | ] 90 | 91 | for t in tasks: 92 | t.start() 93 | 94 | time.sleep(10) 95 | 96 | for task in tasks: 97 | task.stop() 98 | 99 | for task in tasks: 100 | task.join() 101 | 102 | 103 | if __name__ == "__main__": 104 | logging.basicConfig( 105 | format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', 106 | level=logging.INFO 107 | ) 108 | main() 109 | -------------------------------------------------------------------------------- /scripts/run_ws_server.py: -------------------------------------------------------------------------------- 1 | from websocket_server import WebsocketServer 2 | 3 | 4 | # Called for every client connecting (after handshake) 5 | def new_client(client, server): 6 | print("New client connected and was given id %d" % client['id']) 7 | server.send_message_to_all("Hey all, a new client has joined us") 8 | 9 | 10 | # Called for every client disconnecting 11 | def client_left(client, server): 12 | print("Client(%d) disconnected" % client['id']) 13 | 14 | 15 | # Called when a client sends a message 16 | def message_received(client, server, message): 17 | if len(message) > 200: 18 | message = message[:200] + '..' 19 | print("Client(%d) said: %s" % (client['id'], message)) 20 | client['handler'].send_message('I received your ' + message) 21 | 22 | 23 | PORT = 9001 24 | server = WebsocketServer(PORT) 25 | server.set_fn_new_client(new_client) 26 | server.set_fn_client_left(client_left) 27 | server.set_fn_message_received(message_received) 28 | server.run_forever() 29 | --------------------------------------------------------------------------------