├── keys ├── .gitignore ├── rootCa.srl ├── nginx.jks ├── nginx.p12 ├── nats-client.jks ├── nats-client.p12 ├── nats-server.jks ├── nats-server.p12 ├── cassandra-client.jks ├── cassandra-client.p12 ├── cassandra-server.jks ├── cassandra-server.p12 ├── cassandra-truststore.jks ├── pub.key ├── dcrontab1.csr ├── dcrontab2.csr ├── dcrontab3.csr ├── node1.csr ├── dcrontab1.crt ├── dcrontab2.crt ├── dcrontab3.crt ├── node1.key ├── dcrontab2.key ├── dcrontab3.key ├── dcrontab1.key ├── nats-client.csr ├── nginx.crt ├── cassandra-client.csr ├── nats-client.crt ├── nats-server.crt ├── cassandra-client.crt ├── cassandra-server.crt ├── nginx.csr ├── nats-server.csr ├── cassandra-server.csr ├── cert.pem ├── rootCa.crt ├── node1.crt ├── nginx.key ├── nats-server.key ├── nats-client.key ├── cassandra-client.key ├── cassandra-server.key ├── key.pem ├── priv.key ├── rootCa.key ├── ca.crt └── ca.key ├── .gitignore ├── supervisor.conf ├── dockercmd.sh ├── dcrontab.sh ├── dcrontab ├── gorocksdb │ ├── dynflag.go │ ├── staticflag_linux.go │ ├── snapshot.go │ ├── options_env.go │ ├── options_compression.go │ ├── README.md │ ├── ratelimiter.go │ ├── gorocksdb.h │ ├── cache.go │ ├── options_flush.go │ ├── LICENSE │ ├── cf_handle.go │ ├── cow.go │ ├── slice.go │ ├── env.go │ ├── dbpath.go │ ├── options_write.go │ ├── array.go │ ├── comparator.go │ ├── util.go │ ├── checkpoint.go │ ├── sst_file_writer.go │ ├── doc.go │ ├── compaction_filter.go │ ├── slice_transform.go │ ├── gorocksdb.c │ ├── options_transaction.go │ ├── options_ingest.go │ ├── options_transactiondb.go │ ├── filter_policy.go │ ├── iterator.go │ ├── transaction.go │ ├── transactiondb.go │ ├── backup.go │ ├── options_read.go │ ├── merge_operator.go │ ├── options_compaction.go │ ├── options_block_based_table.go │ └── write_batch.go ├── nats.go └── commander.go ├── dcron.supervisor.conf ├── go.mod ├── gencerts.sh ├── config.json ├── wait-for ├── Makefile ├── Dockerfile ├── go.sum ├── README.md └── LICENSE /keys/.gitignore: -------------------------------------------------------------------------------- 1 | production/ 2 | -------------------------------------------------------------------------------- /keys/rootCa.srl: -------------------------------------------------------------------------------- 1 | 7DBF7074A5AF926D1065FB68BADF46474E41E5A0 2 | -------------------------------------------------------------------------------- /keys/nginx.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nginx.jks -------------------------------------------------------------------------------- /keys/nginx.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nginx.p12 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dcrontab-data 2 | keys/ 3 | /dcron 4 | cpphelloworld/cpphelloworld 5 | *.o 6 | *.so 7 | -------------------------------------------------------------------------------- /keys/nats-client.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nats-client.jks -------------------------------------------------------------------------------- /keys/nats-client.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nats-client.p12 -------------------------------------------------------------------------------- /keys/nats-server.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nats-server.jks -------------------------------------------------------------------------------- /keys/nats-server.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/nats-server.p12 -------------------------------------------------------------------------------- /keys/cassandra-client.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/cassandra-client.jks -------------------------------------------------------------------------------- /keys/cassandra-client.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/cassandra-client.p12 -------------------------------------------------------------------------------- /keys/cassandra-server.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/cassandra-server.jks -------------------------------------------------------------------------------- /keys/cassandra-server.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/cassandra-server.p12 -------------------------------------------------------------------------------- /supervisor.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [include] 5 | files = /etc/supervisor/conf.d/*.conf 6 | -------------------------------------------------------------------------------- /keys/cassandra-truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sfproductlabs/dcrontab/HEAD/keys/cassandra-truststore.jks -------------------------------------------------------------------------------- /dockercmd.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | # executed on docker-image (aws task) startup 3 | supervisord -c /etc/supervisor.conf 4 | 5 | -------------------------------------------------------------------------------- /dcrontab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #rm -rf dcrontab-data/ && ./dcron 3 | #./dcron -addr localhost:6001 -nodeid 1 4 | ./dcron -nodeid 1 5 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/dynflag.go: -------------------------------------------------------------------------------- 1 | // +build !linux !static 2 | 3 | package gorocksdb 4 | 5 | // #cgo LDFLAGS: -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy 6 | import "C" 7 | -------------------------------------------------------------------------------- /dcron.supervisor.conf: -------------------------------------------------------------------------------- 1 | [program:dcron] 2 | command=/bin/bash -c "cd /app/dcrontab && [ -z '$NODEID' ] && nice -n 5 ./dcron -nodeid 1 || nice -n 5 ./dcron -nodeid $NODEID" 3 | redirect_stderr=true 4 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/staticflag_linux.go: -------------------------------------------------------------------------------- 1 | // +build static 2 | 3 | package gorocksdb 4 | 5 | // #cgo LDFLAGS: -l:librocksdb.a -l:libstdc++.a -l:libz.a -l:libbz2.a -l:libsnappy.a -lm 6 | import "C" 7 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/dioptre/dcrontab/v3 2 | 3 | require ( 4 | github.com/lni/dragonboat v2.1.7+incompatible 5 | github.com/lni/dragonboat/v3 v3.1.0 6 | github.com/lni/goutils v1.0.1 7 | github.com/nats-io/nats.go v1.8.1 8 | golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 // indirect 9 | google.golang.org/grpc v1.24.0 // indirect 10 | ) 11 | 12 | go 1.13 13 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/snapshot.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // Snapshot provides a consistent view of read operations in a DB. 7 | type Snapshot struct { 8 | c *C.rocksdb_snapshot_t 9 | } 10 | 11 | // NewNativeSnapshot creates a Snapshot object. 12 | func NewNativeSnapshot(c *C.rocksdb_snapshot_t) *Snapshot { 13 | return &Snapshot{c} 14 | } 15 | -------------------------------------------------------------------------------- /keys/pub.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0V7g8EINWlaNY3TxnqzH 3 | GJJyA0+ducA1+0pxy/5n8lR1GUIFwltn5RUEjjOObC/sXd6WQNib4WaVVuWwQtxh 4 | j5ievhbmz2ZMsgrVkQ072ywcyUsN8yBv5OX2/SPmuW2Dme3+4kZDrteTZ17BkcuS 5 | DJcszfNL+KhJoIcgQaAKR0QuL9dcF/JvKgZ6Hfq3XSFgtDIEdzyWsuIyHp20Etti 6 | /l3jp8TM2F/Alx1iqkl4JU24FcgLXtQjtkdwPo52OAP5dp9SWM8w7rSbW2P2B4D1 7 | e5l86j6TaFW8UFHThZEeOxGKLZU3UOisW7HiNjYncu2gXXOKSIirEtkOjsjNSF8y 8 | uQIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /keys/dcrontab1.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIBUzCBvQIBADAUMRIwEAYDVQQDDAlkY3JvbnRhYjEwgZ8wDQYJKoZIhvcNAQEB 3 | BQADgY0AMIGJAoGBAM5E/CopJTj/kCfg5uBD/fIUAujz2iXkPHEjSE0ncfpk91Mp 4 | iQbka+roLgv7QR9wKp72TfXRntzVyonytom8rNPy4GhKzkzSNH3Ocg+zAOxn1xYH 5 | FzooRg0Pg+LfT708cmIatWhXKOAytiOiCoEGyjfgvx1iQLMdtDyki17UhV3FAgMB 6 | AAGgADANBgkqhkiG9w0BAQsFAAOBgQDJmIKp4L63Q/DwOe7Fzvlx50deFP0AMwaF 7 | UPJkpGAaevgCYtJALo9rD2m9Gg23U+MBsJ7Bgnzt8h5Nczo7J5XT2MQ61lHNw5Zr 8 | qZqSVRIeu/pxcA+ViC+TlnUGRCsanT1lnNV3u4IGGQKI7UHgE8kIqlh4zL9Ybnd+ 9 | EMA+Ne20yA== 10 | -----END CERTIFICATE REQUEST----- 11 | -------------------------------------------------------------------------------- /keys/dcrontab2.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIBUzCBvQIBADAUMRIwEAYDVQQDDAlkY3JvbnRhYjIwgZ8wDQYJKoZIhvcNAQEB 3 | BQADgY0AMIGJAoGBAMCBiC9B5N88jZfbt8N7B7/nGD0muFzIjDJXd7Oqvuw0Negv 4 | cPej8a/0k+AfKT3AuyR3XaJQINJwmbpodoaZLg5u1pfNLr9xMxd+iePv75DWooLb 5 | drCaaQQM9hjS7w4NDZMnpM35IqrZmE/PRA/qTVgtPBwq/xTLwHzP9LZCAfsvAgMB 6 | AAGgADANBgkqhkiG9w0BAQsFAAOBgQB7DVj99YHWnoxF52lbL6nTstbCBgt0hG4e 7 | rmsGbRaUiPjNHT21YiaVwVKMkomSMWAki1ufrxjethynL+XwAjdpA7QcX6X8/3Ng 8 | Q0tlnvXjP7BKxCPpg2bVSC4V7gRlgxaVv2vmZNchyCsABtJUFYRviqYFrI61Bvsc 9 | GoPtgp0/aw== 10 | -----END CERTIFICATE REQUEST----- 11 | -------------------------------------------------------------------------------- /keys/dcrontab3.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIBUzCBvQIBADAUMRIwEAYDVQQDDAlkY3JvbnRhYjMwgZ8wDQYJKoZIhvcNAQEB 3 | BQADgY0AMIGJAoGBALuYBvOTrrgKOkfe9HOSmDtoPOiVUCer/Uck94NDsjfrsD9/ 4 | IwEEr5ng1qhj6++vsUd3ZiUbprRYLaoirSPh4m21fvgfyX+Lx5IremygjU2tYjEE 5 | o4qi4mQUbpPNJwNHEMhvB0Ux+WSYR9N+V73UAJ8dyvDz/hV48C3E7UkpFvMtAgMB 6 | AAGgADANBgkqhkiG9w0BAQsFAAOBgQCsD/qmQrmhmwXt9laiDwUvea/Xffw+FDn4 7 | mIgUepfF1bW9a2JNs3GBCDysDs5LWLiT1ZCZOZVuBcm1RCYNWHTTJ5/frR4ujWTX 8 | ERwhjEYnD7HERclTZqo7zyoA43QmahH92acYchSVmioE6jSvDibTRaN93rbKRASo 9 | YlA2GYG2Tg== 10 | -----END CERTIFICATE REQUEST----- 11 | -------------------------------------------------------------------------------- /gencerts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -z "$1" ] 3 | then 4 | echo "No node supplied" 5 | exit 1 6 | fi 7 | 8 | if [ -z "$2" ] 9 | then 10 | echo "Skipping CA Generation" 11 | else 12 | openssl genrsa -out rootCa.key 4096 13 | openssl req -new -x509 -days 365 -key keys/rootCa.key -out keys/rootCa.crt -subj "/OU=SFPL" 14 | fi 15 | 16 | openssl genrsa -out keys/dcrontab$1.key 1024 17 | openssl req -new -key keys/dcrontab$1.key -out keys/dcrontab$1.csr -subj "/CN=dcrontab$1" 18 | openssl x509 -req -days 365 -in keys/dcrontab$1.csr -CA keys/rootCa.crt -CAkey keys/rootCa.key -set_serial 01 -out keys/dcrontab$1.crt -------------------------------------------------------------------------------- /keys/node1.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIBmTCCAQICAQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx 3 | ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9j 4 | YWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGiACbds8OvCSFBTja 5 | SzCIFScvCsg7eY1SuWdqTlSann7P83YHWpbLv+tYmOVKEn4DbA4+gqix9BkY1uI8 6 | dtG7Fgxkpfka2gevKGvyZPZ/wwiEj8d76TO7WBkbCJz8wVTIyS4DO6Hwzf/ViZ2l 7 | thRDyRXf5fhKKPkL3Flh86D3iwIDAQABoAAwDQYJKoZIhvcNAQELBQADgYEAcduG 8 | o/GeL2LRx7PAsl9x2UX+QUPfOEGL17E9Bzel8bMCZ8B3Z23zr+IuGGSrPLvYTqbo 9 | N1tNvzB9UREzTHTj4+5KUrgmhWvohWi1vS3KxTjX2Ca/NyQif8yRLMfEadbB1N8L 10 | w9Qv5dd9hONQel0P8cHZ5EiBmaLjhCIozgZRkg0= 11 | -----END CERTIFICATE REQUEST----- 12 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_env.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // EnvOptions represents options for env. 7 | type EnvOptions struct { 8 | c *C.rocksdb_envoptions_t 9 | } 10 | 11 | // NewDefaultEnvOptions creates a default EnvOptions object. 12 | func NewDefaultEnvOptions() *EnvOptions { 13 | return NewNativeEnvOptions(C.rocksdb_envoptions_create()) 14 | } 15 | 16 | // NewNativeEnvOptions creates a EnvOptions object. 17 | func NewNativeEnvOptions(c *C.rocksdb_envoptions_t) *EnvOptions { 18 | return &EnvOptions{c: c} 19 | } 20 | 21 | // Destroy deallocates the EnvOptions object. 22 | func (opts *EnvOptions) Destroy() { 23 | C.rocksdb_envoptions_destroy(opts.c) 24 | opts.c = nil 25 | } 26 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_compression.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // CompressionOptions represents options for different compression algorithms like Zlib. 4 | type CompressionOptions struct { 5 | WindowBits int 6 | Level int 7 | Strategy int 8 | MaxDictBytes int 9 | } 10 | 11 | // NewDefaultCompressionOptions creates a default CompressionOptions object. 12 | func NewDefaultCompressionOptions() *CompressionOptions { 13 | return NewCompressionOptions(-14, -1, 0, 0) 14 | } 15 | 16 | // NewCompressionOptions creates a CompressionOptions object. 17 | func NewCompressionOptions(windowBits, level, strategy, maxDictBytes int) *CompressionOptions { 18 | return &CompressionOptions{ 19 | WindowBits: windowBits, 20 | Level: level, 21 | Strategy: strategy, 22 | MaxDictBytes: maxDictBytes, 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /keys/dcrontab1.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICQzCCASsCAQEwDQYJKoZIhvcNAQELBQAwPzEPMA0GA1UEAwwGcm9vdENhMRAw 3 | DgYDVQQLDAdTVEFHSU5HMQ0wCwYDVQQKDARTRlBMMQswCQYDVQQGEwJVUzAeFw0x 4 | OTExMDUyMzQwMjhaFw0yMDExMDQyMzQwMjhaMBQxEjAQBgNVBAMMCWRjcm9udGFi 5 | MTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzkT8KiklOP+QJ+Dm4EP98hQC 6 | 6PPaJeQ8cSNITSdx+mT3UymJBuRr6uguC/tBH3AqnvZN9dGe3NXKifK2ibys0/Lg 7 | aErOTNI0fc5yD7MA7GfXFgcXOihGDQ+D4t9PvTxyYhq1aFco4DK2I6IKgQbKN+C/ 8 | HWJAsx20PKSLXtSFXcUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEALUGZFw2ewmR9 9 | +t1JHlSeYqY7KRSaD7nUxonk3NzEDd1Auddq0uLtd+ZIV9sJXMo3Jspp2jT7YaRJ 10 | Y4IoFx0I1lGqqxkEJMlgTbrFqEa8gHRIf6TzZBR2vEcFSz2jdzcpjbyO59x8caCl 11 | LO5LDSk/wovjvD/2mSMSGA3sMLjkrdXCvUx5m/uZXslSkCigImPTtwkQIAMGIstG 12 | 2v/+Bo2DSxLaPs6EpRCy/9yecz5TPiEGW2rcc53BWrU/xSXnRUJefrfUsxavsiGF 13 | 0BjM2mOCG+vcS7rL6pEmJjtOzZ3aT+52ElNnFEySBQA8A+nPdrFjJGB7kTK70UcX 14 | mJauK2AbTQ== 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /keys/dcrontab2.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICQzCCASsCAQEwDQYJKoZIhvcNAQELBQAwPzEPMA0GA1UEAwwGcm9vdENhMRAw 3 | DgYDVQQLDAdTVEFHSU5HMQ0wCwYDVQQKDARTRlBMMQswCQYDVQQGEwJVUzAeFw0x 4 | OTExMDUyMzQwMzFaFw0yMDExMDQyMzQwMzFaMBQxEjAQBgNVBAMMCWRjcm9udGFi 5 | MjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwIGIL0Hk3zyNl9u3w3sHv+cY 6 | PSa4XMiMMld3s6q+7DQ16C9w96Pxr/ST4B8pPcC7JHddolAg0nCZumh2hpkuDm7W 7 | l80uv3EzF36J4+/vkNaigtt2sJppBAz2GNLvDg0NkyekzfkiqtmYT89ED+pNWC08 8 | HCr/FMvAfM/0tkIB+y8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAB9CPjOJ9HEAe 9 | E43qSAbgQOiwq0KhJi8trXnJ8DaX7FXXywarbOGQ6DzfdKmYh80vK7BMzLawaZDR 10 | sABX8DPLu0Ccj+35gM25wHCUoIMR6xTJGu1ZxLdRiMcs0ubAHSB/Udgz5riDQdAY 11 | bE3WXPVxXLTNxiTnW/oXHmp4H36CjNOZaTMpNNGIKFyiJv9lgT0+WR1/Eprlwczn 12 | sZD7XojKGVnme3Jvo5kFImnTGv2Z0C+hcopVZf+h+NcstKh/+6CPcnDutpXOXGCA 13 | 3GSmMPpn3Vzc1vShPLHVcODZcH1ZhfInR6dk0wo28GdmPn4XmT65P3Iw+j+yqNP6 14 | DD7Vyjq7xg== 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /keys/dcrontab3.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICQzCCASsCAQEwDQYJKoZIhvcNAQELBQAwPzEPMA0GA1UEAwwGcm9vdENhMRAw 3 | DgYDVQQLDAdTVEFHSU5HMQ0wCwYDVQQKDARTRlBMMQswCQYDVQQGEwJVUzAeFw0x 4 | OTExMDUyMzQwMzNaFw0yMDExMDQyMzQwMzNaMBQxEjAQBgNVBAMMCWRjcm9udGFi 5 | MzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAu5gG85OuuAo6R970c5KYO2g8 6 | 6JVQJ6v9RyT3g0OyN+uwP38jAQSvmeDWqGPr76+xR3dmJRumtFgtqiKtI+HibbV+ 7 | +B/Jf4vHkit6bKCNTa1iMQSjiqLiZBRuk80nA0cQyG8HRTH5ZJhH035XvdQAnx3K 8 | 8PP+FXjwLcTtSSkW8y0CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEACIPczlfX2pC3 9 | nr8srx82QqxMGURILe2y51wD2EHng48qdu/8STLXuDDY8yGUl8ZA6cR8wYm546GA 10 | 8P4yL9RKXcTR1V4szogXwEFFXmjOF/cndR6H2XPDEKEEKsopACFD2LMqbehnGcQp 11 | 9+zJEuMORz0Z5yxysGKyyOaDtkjeowrMY91eDdS28cRSDwyqbzV7mgef/QkCwLk+ 12 | 5/tMeOWVHSnLU1kWuq5l1yWKq6PMGHFmNHlvHjTvn1JNW2CoGjlJCUPbINi448Zy 13 | UzvLUSE6TOfhi8biqId3wVHCdQ6knyFTvu1bWfynX4Fxki2grXmY3WYJ1jqJIibl 14 | Th5FvWi3sA== 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /keys/node1.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIICXAIBAAKBgQDGiACbds8OvCSFBTjaSzCIFScvCsg7eY1SuWdqTlSann7P83YH 3 | WpbLv+tYmOVKEn4DbA4+gqix9BkY1uI8dtG7Fgxkpfka2gevKGvyZPZ/wwiEj8d7 4 | 6TO7WBkbCJz8wVTIyS4DO6Hwzf/ViZ2lthRDyRXf5fhKKPkL3Flh86D3iwIDAQAB 5 | AoGBAKTgL/jsi3sGPyIFuzh3Nz/cqWUFBFN4WCi6dysvEE40kMZsv7nNvuxJX899 6 | cva4MtTuuUmqwyS+vtUFKG/kkbnf85mWE5ZmryNJ6YBCMyGftC26CZDf0jPazk5p 7 | wVHKmf8qAzzC0GwWSrvH09/IjF7b6KAPunn7UdPhgtiNWVGRAkEA/HY3MU3ET1ME 8 | Mp9aF1hSXwr/oL41/xev7kBXCcWXYq6DO0LFckGFI4XugKFgaETs0pPcNHjEI8QQ 9 | 1oEmuoKjAwJBAMlQS18v92lKCKeTUuAi4lA2e17S9i7N8/6U4gqcZZTt76FCDRTG 10 | DifxtMoGfkyvX1pM/wVkjgM7fBAljq/I7tkCQFdsADS8fywQzTVSZPoMhYmP33Ek 11 | LMKGy08vTHR/zapIT/0nZAPqmFXyvOIh/6Zbp75LoUcyQok4DbSZyHVxY00CQGZ6 12 | EjQdRbN6NVegfhKMj2nHohuOBjWjL25LuZkvLeKXEGGlLuWBQFmDB8XDcm105HQi 13 | N7s2CbiJgkjx5WWEQlkCQBVIXKM/1avupyGgJoL2YGG0dz1pLF+ylNyelMNQyUqJ 14 | 4WCByjJqMnL2kYqCslwnN7g55JPOXyoWilvzlrx+wpA= 15 | -----END RSA PRIVATE KEY----- 16 | -------------------------------------------------------------------------------- /keys/dcrontab2.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIICXAIBAAKBgQDAgYgvQeTfPI2X27fDewe/5xg9JrhcyIwyV3ezqr7sNDXoL3D3 3 | o/Gv9JPgHyk9wLskd12iUCDScJm6aHaGmS4ObtaXzS6/cTMXfonj7++Q1qKC23aw 4 | mmkEDPYY0u8ODQ2TJ6TN+SKq2ZhPz0QP6k1YLTwcKv8Uy8B8z/S2QgH7LwIDAQAB 5 | AoGAUMuMztjHKHKPsvbCr8bJP9/MKo4/vqFlRhOz57J4I0begS64KRej7oemMOCj 6 | PInRgcKfh+o9fFVKEBSb+m38B9Rgr/sCNwKRxGzEsXqPOV6o3cOknuW1qqkh222y 7 | /SQWi189oJ/DMPY54QjqX1QvR17FWNeT+hwg0jzJGLJ/hvECQQDeIo5cj/pbBHkN 8 | s9QXZJemD81yo46rk4S8WshPog2zEIS2ImZbPyGTf/EywUD8m50uGIM7ix64E2ju 9 | mPqdC1HtAkEA3dqe4H0+4+EoA0NX1Wi+INLGROVZynTsf3QgZkcJeySnV38Eq3XR 10 | XykcTOwpdl9BY987OShCsQzANqeI1OSOCwJBAKQMwTZ63ln5o60K8ceU8qohYYkY 11 | YXU1LQItyWGGPLfRqzEXlqAsr49PHcJuFqB4DEzOrZx70eP/8a7EK9FvWsECQFXj 12 | eXa3hk16sZ7xx2NbdaAHebLah/SVZ6nAL3rpLYZ7Hj4Fcq1eo3wP0mEFXNfy8MHV 13 | fnwWVhttMEz4d0TUZxkCQDt1a9r1/W37EVTbAKpF0enJSXuFn1M37Bh3kMXp8ZWk 14 | LY0LHNF5rAYBMQVLcSuB8EobZCUBEmAq1j4YV8KUa2s= 15 | -----END RSA PRIVATE KEY----- 16 | -------------------------------------------------------------------------------- /keys/dcrontab3.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIICWwIBAAKBgQC7mAbzk664CjpH3vRzkpg7aDzolVAnq/1HJPeDQ7I367A/fyMB 3 | BK+Z4NaoY+vvr7FHd2YlG6a0WC2qIq0j4eJttX74H8l/i8eSK3psoI1NrWIxBKOK 4 | ouJkFG6TzScDRxDIbwdFMflkmEfTfle91ACfHcrw8/4VePAtxO1JKRbzLQIDAQAB 5 | AoGAKT6WoK0ip/IPuEeLxXFAqScU5SoW8nH9AMxe4uxaXbMjEALa5llqZlGpwGIf 6 | uJYPSyA0RApdT59ngtWW7aG3T3lOl9bTUNFJl+f+Yc7TPDMt3HHeO7oU6FDX8esc 7 | l7hmuc/euoWKISk2DKsytrbfYa1GzmuFjBpziqQo+UB78QECQQDdd0/a280fzk/S 8 | NAyR1SSg3rrVIAKZ3mAxyKvGJHvWHB/YvoDQosr5xvo1GhTicZCIATp+AK+Q2SkX 9 | GDTZfELBAkEA2NiSsW1EnO5rBiO3ScZopW4hbPToYNfE457f/u9Kbjq+iTvhGqss 10 | vSoz7WFfXIMyZBqOdliDIf9wtKldsmtHbQJAXWfqBTu5SxXf8p9DQeqstTDbifIj 11 | RrRUzAp/8GUI2egowmiuudA/XZO/idADSPJu6/3gpq4xXh1JSFVMTQIswQJAHimh 12 | 3gQw8xNG18fICv8KN1imVZ2LZU0eGftTdQab99E8B6LIoQkGZBmmt3I/7YJiVAJN 13 | prCRLP13E+orGlJJ0QJAWE7uB1dJLnigSUU7iOYi1R1iBbuKad1ztgNUo7HAil1z 14 | LvjiKdavUD7Jv9nF7DNS7IfTvdJqAmx8SoCYskIbig== 15 | -----END RSA PRIVATE KEY----- 16 | -------------------------------------------------------------------------------- /keys/dcrontab1.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIICXgIBAAKBgQDORPwqKSU4/5An4ObgQ/3yFALo89ol5DxxI0hNJ3H6ZPdTKYkG 3 | 5Gvq6C4L+0EfcCqe9k310Z7c1cqJ8raJvKzT8uBoSs5M0jR9znIPswDsZ9cWBxc6 4 | KEYND4Pi30+9PHJiGrVoVyjgMrYjogqBBso34L8dYkCzHbQ8pIte1IVdxQIDAQAB 5 | AoGBAM1k/iBRZmJSgU5kciIBMl8atCcZozE2C0Wxl25Uh/eJEIp8ApmdXSTF3AxM 6 | PfW7MIWnx8uu110ZehxRVpjjCsgazQ614tAmEk3OjYOlLjRMKc7CaGeJCO2QJP51 7 | dURJ8SXzRl8b7Y6gN6Ad+TxyS2gzSOqMopsesxn1V6B+d3NlAkEA/hickK+iSUEg 8 | hQnVc06CvVXZ6JSYvWRiJgmVgZWdah1Eyy9bf4RzdbShpnorpMtrhhwMK5EsQuxy 9 | KOi4iTzwAwJBAM/QosffBaKjtTexvZgfPfHUThS64lexcSHix8/DyojXKCDOLYCu 10 | qkPOHQm9UaNpuo+xW9fiy5LUt8/9VpXxRJcCQQC+3SXd2NACTpVq2XQoZPT326EP 11 | f28r9Fd1UwWmjQftSYb/SNdl9cv0wnYaYvKdmt9toWvS3f1tV8Z4zDaxf8CZAkEA 12 | xMexrAMhyeysh01V0zsaI+i9+uEvvIv4LXcZb0HdgZACnnW9HpHGBiig1H0hVdSi 13 | 5Lez2QwXajTkzRz9pUPthwJASbB2s2u0bQ+fea8DbIOGEcZhhz/AOPhtyta94Dzk 14 | NeGTAGGASCnG+Yxinawy0YfXFG7gLw7Ob4NJZ/gSwtRvlA== 15 | -----END RSA PRIVATE KEY----- 16 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/README.md: -------------------------------------------------------------------------------- 1 | # gorocksdb, a Go wrapper for RocksDB 2 | 3 | [![Build Status](https://travis-ci.org/tecbot/gorocksdb.svg)](https://travis-ci.org/tecbot/gorocksdb) [![GoDoc](https://godoc.org/github.com/tecbot/gorocksdb?status.svg)](http://godoc.org/github.com/tecbot/gorocksdb) 4 | 5 | ## Install 6 | 7 | You'll need to build [RocksDB](https://github.com/facebook/rocksdb) v5.16+ on your machine. 8 | 9 | After that, you can install gorocksdb using the following command: 10 | 11 | CGO_CFLAGS="-I/path/to/rocksdb/include" \ 12 | CGO_LDFLAGS="-L/path/to/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" \ 13 | go get github.com/tecbot/gorocksdb 14 | 15 | Please note that this package might upgrade the required RocksDB version at any moment. 16 | Vendoring is thus highly recommended if you require high stability. 17 | 18 | *The [embedded CockroachDB RocksDB](https://github.com/cockroachdb/c-rocksdb) is no longer supported in gorocksdb.* 19 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/ratelimiter.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | 7 | // RateLimiter, is used to control write rate of flush and 8 | // compaction. 9 | type RateLimiter struct { 10 | c *C.rocksdb_ratelimiter_t 11 | } 12 | 13 | // NewDefaultRateLimiter creates a default RateLimiter object. 14 | func NewRateLimiter(rate_bytes_per_sec, refill_period_us int64, fairness int32) *RateLimiter { 15 | return NewNativeRateLimiter(C.rocksdb_ratelimiter_create( 16 | C.int64_t(rate_bytes_per_sec), 17 | C.int64_t(refill_period_us), 18 | C.int32_t(fairness), 19 | )) 20 | } 21 | 22 | // NewNativeRateLimiter creates a native RateLimiter object. 23 | func NewNativeRateLimiter(c *C.rocksdb_ratelimiter_t) *RateLimiter { 24 | return &RateLimiter{c} 25 | } 26 | 27 | // Destroy deallocates the RateLimiter object. 28 | func (self *RateLimiter) Destroy() { 29 | C.rocksdb_ratelimiter_destroy(self.c) 30 | self.c = nil 31 | } 32 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/gorocksdb.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "rocksdb/c.h" 3 | 4 | // This API provides convenient C wrapper functions for rocksdb client. 5 | 6 | /* Base */ 7 | 8 | extern void gorocksdb_destruct_handler(void* state); 9 | 10 | /* CompactionFilter */ 11 | 12 | extern rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx); 13 | 14 | /* Comparator */ 15 | 16 | extern rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx); 17 | 18 | /* Filter Policy */ 19 | 20 | extern rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx); 21 | extern void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s); 22 | 23 | /* Merge Operator */ 24 | 25 | extern rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx); 26 | extern void gorocksdb_mergeoperator_delete_value(void* state, const char* v, size_t s); 27 | 28 | /* Slice Transform */ 29 | 30 | extern rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx); 31 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/cache.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // Cache is a cache used to store data read from data in memory. 7 | type Cache struct { 8 | c *C.rocksdb_cache_t 9 | } 10 | 11 | // NewLRUCache creates a new LRU Cache object with the capacity given. 12 | func NewLRUCache(capacity int) *Cache { 13 | return NewNativeCache(C.rocksdb_cache_create_lru(C.size_t(capacity))) 14 | } 15 | 16 | // NewNativeCache creates a Cache object. 17 | func NewNativeCache(c *C.rocksdb_cache_t) *Cache { 18 | return &Cache{c} 19 | } 20 | 21 | // GetUsage returns the Cache memory usage. 22 | func (c *Cache) GetUsage() int { 23 | return int(C.rocksdb_cache_get_usage(c.c)) 24 | } 25 | 26 | // GetPinnedUsage returns the Cache pinned memory usage. 27 | func (c *Cache) GetPinnedUsage() int { 28 | return int(C.rocksdb_cache_get_pinned_usage(c.c)) 29 | } 30 | 31 | // Destroy deallocates the Cache object. 32 | func (c *Cache) Destroy() { 33 | C.rocksdb_cache_destroy(c.c) 34 | c.c = nil 35 | } 36 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_flush.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // FlushOptions represent all of the available options when manual flushing the 7 | // database. 8 | type FlushOptions struct { 9 | c *C.rocksdb_flushoptions_t 10 | } 11 | 12 | // NewDefaultFlushOptions creates a default FlushOptions object. 13 | func NewDefaultFlushOptions() *FlushOptions { 14 | return NewNativeFlushOptions(C.rocksdb_flushoptions_create()) 15 | } 16 | 17 | // NewNativeFlushOptions creates a FlushOptions object. 18 | func NewNativeFlushOptions(c *C.rocksdb_flushoptions_t) *FlushOptions { 19 | return &FlushOptions{c} 20 | } 21 | 22 | // SetWait specify if the flush will wait until the flush is done. 23 | // Default: true 24 | func (opts *FlushOptions) SetWait(value bool) { 25 | C.rocksdb_flushoptions_set_wait(opts.c, boolToChar(value)) 26 | } 27 | 28 | // Destroy deallocates the FlushOptions object. 29 | func (opts *FlushOptions) Destroy() { 30 | C.rocksdb_flushoptions_destroy(opts.c) 31 | opts.c = nil 32 | } 33 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2016 Thomas Adam 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is furnished 8 | to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/cf_handle.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import "unsafe" 7 | 8 | // ColumnFamilyHandle represents a handle to a ColumnFamily. 9 | type ColumnFamilyHandle struct { 10 | c *C.rocksdb_column_family_handle_t 11 | } 12 | 13 | // NewNativeColumnFamilyHandle creates a ColumnFamilyHandle object. 14 | func NewNativeColumnFamilyHandle(c *C.rocksdb_column_family_handle_t) *ColumnFamilyHandle { 15 | return &ColumnFamilyHandle{c} 16 | } 17 | 18 | // UnsafeGetCFHandler returns the underlying c column family handle. 19 | func (h *ColumnFamilyHandle) UnsafeGetCFHandler() unsafe.Pointer { 20 | return unsafe.Pointer(h.c) 21 | } 22 | 23 | // Destroy calls the destructor of the underlying column family handle. 24 | func (h *ColumnFamilyHandle) Destroy() { 25 | C.rocksdb_column_family_handle_destroy(h.c) 26 | } 27 | 28 | type ColumnFamilyHandles []*ColumnFamilyHandle 29 | 30 | func (cfs ColumnFamilyHandles) toCSlice() columnFamilySlice { 31 | cCFs := make(columnFamilySlice, len(cfs)) 32 | for i, cf := range cfs { 33 | cCFs[i] = cf.c 34 | } 35 | return cCFs 36 | } 37 | -------------------------------------------------------------------------------- /keys/nats-client.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN NEW CERTIFICATE REQUEST----- 2 | MIIC4jCCAcoCAQAwRDELMAkGA1UEBhMCVVMxDTALBgNVBAoTBFNGUEwxEDAOBgNV 3 | BAsTB1NUQUdJTkcxFDASBgNVBAMTC05BVFMtQ0xJRU5UMIIBIjANBgkqhkiG9w0B 4 | AQEFAAOCAQ8AMIIBCgKCAQEAuUwmnc/FN2VErYuUWxbbGtlgpBYUc3Y1Cg1dahWI 5 | 6IIv1iw+TqiYDv4cXxRwwvKwpgHDUbTFGz8cmyXo8+NMWvAGeoHGdY/Msz0iIqRp 6 | SNHOow87s7fJjyway7gDafVCG1D6Vkh6gOLrwyxU0IB/GO9uGaAt9Zm6uXb3III/ 7 | ptaTDKF06JR9P7vXj0ceqBMTHDYvtI/dZ0bPHcRmr+7Y1rD1+P0emIulx2/weJvK 8 | AWG14agzm0jUOqHvoTJauflcqXW+GgxjDQPLfeseGuAcZeTQSuPPZNs0C+HgUJFs 9 | 6Pf8YyzKVYuqVWLh3eJCeJct9L8cijitdhyfDqJa+nf+FQIDAQABoFkwVwYJKoZI 10 | hvcNAQkOMUowSDAnBgNVHREEIDAeghxuYXRzLWNsaWVudC5zdGFnaW5nLnNmcGwu 11 | Y29tMB0GA1UdDgQWBBSOCUF9rfLPjotgceySZVkNw+iC/jANBgkqhkiG9w0BAQsF 12 | AAOCAQEAUGoTYGcGpVK4FUL5SREfQUa/Qk/MxBkIWoE5sgJ/MBj+nFnoIN/0SKTY 13 | dGMqf92Pc23j09P0zJ9BIaQXfE+hPUbX9zqucZtDjCy1skF8yZhY4MIyv9mKsQsE 14 | KKmw9lglu3LaXGm8Eho0IpJHBCm+yWevdE/FY4X7HiTW3t03gw9WfhL2oav6wDuJ 15 | 60AsWe7SJbsYQ9/F8E3l5MZ+u1QNPoDCHJxcN7R8q8JtGelY7UN0ohgwuLu7Fss7 16 | 9tT2XQUoT7xGSNv4OGPELhkG8LOJiJ5Du+HtOhXxCOgpMekv0qn68zI1mGdRc6bw 17 | lsiBaUUEqiHmtyM+vuVuIssLnYX5Sw== 18 | -----END NEW CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /keys/nginx.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDBjCCAe4CFH2/cHSlr5JtEGX7aLrfRkdOQeWcMA0GCSqGSIb3DQEBCwUAMD8x 3 | DzANBgNVBAMMBnJvb3RDYTEQMA4GA1UECwwHU1RBR0lORzENMAsGA1UECgwEU0ZQ 4 | TDELMAkGA1UEBhMCVVMwIBcNMTkwNjE5MTg0MDQ1WhgPMjEwMTA4MDgxODQwNDVa 5 | MD4xCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRTRlBMMRAwDgYDVQQLEwdTVEFHSU5H 6 | MQ4wDAYDVQQDEwVOR0lOWDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB 7 | AIcHaHK4YcfY8zV0+kc6GosGw1HM0hdZmgCiKJnb1ARgsA1DWeiKy5JOAFShzDVN 8 | XCZ1E5YLBaPRYUrTFkn2Vra9tzd80zd/2M6jDM7cuK9E4y3HV7oQ+LZ+Hjt1ieuQ 9 | PyLMjmukv9znITS+8BCwY9SJxfZQWAqYR/51oJmthbeVOdaIuz6I8T8bSB/jdAEe 10 | Kb4mVsZPPhaBZ9n5meKU7C76xjYeaM/2cXzghwDveb+uPYTmO6P6aND+fGT6WZQV 11 | TTvskakOjnMxhTNErcssTKkbd4I4I/1DhlCX+5Ua0rggI2GpfdgNqpY/YqqSv1BA 12 | PtZ6YZRAYA0OoGyvWuCeK+8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAcPtd9S1O 13 | 14YGI4Z0jh56SXP/Vxe1V3zCKdEqP1v53g5cOEDWN1G7zegNloZlSXzrI/5n9QC/ 14 | a8V+6Wkk8/d91utnJIB1mvb5Y4VHbcQvq1RQYIICZykKKzw3hRnAk9WTvKB5nVZe 15 | RwAKcHhUVW90x+eR10rH6vo3LZdIKiXrq5UhQsFdh41FlglO+VWqVnYHEtrjNzNG 16 | 5Ize6ROcMe/lpOui/1HTcgbH0Si/UcH2Uwt7F+dKamt1+Y0OO0HlwTIuB/eUjbg+ 17 | DD8MfVVHAP9cA7weWCFFyflC2SkzdDt6PqYjoGV4h767i9GfdyS4eOt+mp/Uqt3m 18 | qfmpg9wy51JTTA== 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /keys/cassandra-client.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN NEW CERTIFICATE REQUEST----- 2 | MIIC7DCCAdQCAQAwSTELMAkGA1UEBhMCVVMxDTALBgNVBAoTBFNGUEwxEDAOBgNV 3 | BAsTB1NUQUdJTkcxGTAXBgNVBAMTEENBU1NBTkRSQS1DTElFTlQwggEiMA0GCSqG 4 | SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCUBEAUxPN3m1wo3TBu6O79TwVfOTij0kVD 5 | 1kp3HA4yPJF2g54s1Lufy6lbdtIyYmFfDqN+HY4Pz2FAKkdpdnak8FZ0bjhmlyYA 6 | zMQvNFi9OZBWlOJKGCJaizPuI0ITh72I/qpMWuV/fV3ujk09dosBFHtYA5w6CZVz 7 | QvFbku4Za64P0SRLPKqabP6jNoRRB6ZvoocHx3jFamd9FVY9OFcIm2QjuQTlHGzc 8 | vYSEN3gaa5IU+p4XkiakzDPL83eyP5R9IPzspCQUTBHP+dBJbYxIkHGyzMq0EWC6 9 | bc/0mssY9YOLyWLba1NM7Qp0rt4d313HxR7YPp3dkhazbZrAnMVLAgMBAAGgXjBc 10 | BgkqhkiG9w0BCQ4xTzBNMCwGA1UdEQQlMCOCIWNhc3NhbmRyYS1jbGllbnQuc3Rh 11 | Z2luZy5zZnBsLmNvbTAdBgNVHQ4EFgQUAaNC0QtniIPNZ5gsRQ2G0mgDelYwDQYJ 12 | KoZIhvcNAQELBQADggEBAGP5tO2Lr8ylfVwOaadApwjAX/FTk2ksa4jiL23Rc3CY 13 | mpXQ7Y7ao0UoHbImQzOIGA1ZqDaoV4smhpWqLyTabr61nPyciSB96bG98zmVzG/+ 14 | ITecOxIaY8QAikRLmer5jfaMRP2E9ZzLYVPDGBno+BinEioNTLfD8vVCv9v5ocme 15 | w1WeN3DJknCiesHG9aoxD7Af57gNJH8bZ1eu+eaTOBfvtTcU66+vxJtyVRed04hM 16 | 60JcxnyAKiVT7hINI1Vw5cE5dYaWSt3fe5YtGUPFLoYQ6MFjUl0hjHYbc+JBzEaX 17 | +DJyXYeZIm90e/Mjk3j0tw9mY81yOdPVuMErnn0C8O4= 18 | -----END NEW CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /keys/nats-client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDDDCCAfQCFH2/cHSlr5JtEGX7aLrfRkdOQeWgMA0GCSqGSIb3DQEBCwUAMD8x 3 | DzANBgNVBAMMBnJvb3RDYTEQMA4GA1UECwwHU1RBR0lORzENMAsGA1UECgwEU0ZQ 4 | TDELMAkGA1UEBhMCVVMwIBcNMTkwNjE5MTg0MDUyWhgPMjEwMTA4MDgxODQwNTJa 5 | MEQxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRTRlBMMRAwDgYDVQQLEwdTVEFHSU5H 6 | MRQwEgYDVQQDEwtOQVRTLUNMSUVOVDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC 7 | AQoCggEBALlMJp3PxTdlRK2LlFsW2xrZYKQWFHN2NQoNXWoViOiCL9YsPk6omA7+ 8 | HF8UcMLysKYBw1G0xRs/HJsl6PPjTFrwBnqBxnWPzLM9IiKkaUjRzqMPO7O3yY8s 9 | Gsu4A2n1QhtQ+lZIeoDi68MsVNCAfxjvbhmgLfWZurl29yCCP6bWkwyhdOiUfT+7 10 | 149HHqgTExw2L7SP3WdGzx3EZq/u2Naw9fj9HpiLpcdv8HibygFhteGoM5tI1Dqh 11 | 76EyWrn5XKl1vhoMYw0Dy33rHhrgHGXk0Erjz2TbNAvh4FCRbOj3/GMsylWLqlVi 12 | 4d3iQniXLfS/HIo4rXYcnw6iWvp3/hUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEA 13 | pjEXiB0TAzZ5Pa7hGTGzsWjl7zeFnZjdpjo295byNCt2XaownSLlxtOHoE6x1BiQ 14 | QR9Pd/lnJKKQFdLjgC1OVWonmikJe/CgEVYGL1R3Y5Xeic8dW7+Pxu5KWd8SgOis 15 | pKxfJYpeTaw6Ew1LnoW1T0UeFO2Sf45TRYKnSXXxy8oNpdvSZiIXXOnjiqzmKH4k 16 | eNaNTh+uz9FaBcqxLglY/TH+bottMvn0IuRgffwATzILlNmqsbtRxUOmjEl64QV6 17 | rqNatVaYcW68BbEiwkSQEFQu+C2eeFmS9VhxauH46VeHsJ0zUda0lJ50P11lzgNI 18 | miDu/sN228c8p+a5Wz/zIA== 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /keys/nats-server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDDDCCAfQCFH2/cHSlr5JtEGX7aLrfRkdOQeWfMA0GCSqGSIb3DQEBCwUAMD8x 3 | DzANBgNVBAMMBnJvb3RDYTEQMA4GA1UECwwHU1RBR0lORzENMAsGA1UECgwEU0ZQ 4 | TDELMAkGA1UEBhMCVVMwIBcNMTkwNjE5MTg0MDUwWhgPMjEwMTA4MDgxODQwNTBa 5 | MEQxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRTRlBMMRAwDgYDVQQLEwdTVEFHSU5H 6 | MRQwEgYDVQQDEwtOQVRTLVNFUlZFUjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC 7 | AQoCggEBALjTlhbLD+W37RI4XfxyYErtE+7WG1TFm/CMIybIgzJgtimgWyjpgudl 8 | UBOzFKtaNzjveA/tfU81zgKQiJGj8bSKsDj/uUdsRdZoBZlE8me3EjGkDVjGtd/d 9 | sSEWcrBJEltafy9ssHAHlkF4ZvLoe06H65+bzdNkANdFF98nflt4yI5qip2TDVoc 10 | lajJbeNeKaWHBnpgWD8UuYwbzZiG+B65HEOdn5Leo+/c8RpRuM10z0z0WMx5/mvv 11 | /B3hZAD96YnNnOPI3HFHqoCHnA9syikPDyB0t5/0oetwhNt2i/ns9E5W3etYIoMD 12 | hXrb9Yuph42MeYdof75R6H6/Vaw11OsCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEA 13 | jcv41csbDS9zvoiDzAOQOtbLkSFs9U+95C+miW+wgTpxaBfYskx4RC/DRm5xvr8g 14 | 7Ll27VrMTpPGqxOlADyvWy/bl/j2dGo9CxLaE3usH0hnUfp7WYVU4Lyrq+xiMHo8 15 | P0pdWqsO1Z76WVuij2FoquR8hRyY0X/MPVnY3hRs/mPNTjKpIc+ShO5m9RYZndjy 16 | yrUXN39R2O5vWsBpjVKHJbM3SmGVlMaKI9DwAZBs9rX2QS3KkqQ2H/2vt+AfN9Dk 17 | VfMcFdC0D+zFu70X4MtUz3NdPSTRtpkOyOY5EWS0to4hyZBDOpGGLKKsFPvkg5nl 18 | KDCo1q+KKGy6FEw8rEz4iA== 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /keys/cassandra-client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDETCCAfkCFH2/cHSlr5JtEGX7aLrfRkdOQeWeMA0GCSqGSIb3DQEBCwUAMD8x 3 | DzANBgNVBAMMBnJvb3RDYTEQMA4GA1UECwwHU1RBR0lORzENMAsGA1UECgwEU0ZQ 4 | TDELMAkGA1UEBhMCVVMwIBcNMTkwNjE5MTg0MDQ4WhgPMjEwMTA4MDgxODQwNDha 5 | MEkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRTRlBMMRAwDgYDVQQLEwdTVEFHSU5H 6 | MRkwFwYDVQQDExBDQVNTQU5EUkEtQ0xJRU5UMIIBIjANBgkqhkiG9w0BAQEFAAOC 7 | AQ8AMIIBCgKCAQEAlARAFMTzd5tcKN0wbuju/U8FXzk4o9JFQ9ZKdxwOMjyRdoOe 8 | LNS7n8upW3bSMmJhXw6jfh2OD89hQCpHaXZ2pPBWdG44ZpcmAMzELzRYvTmQVpTi 9 | ShgiWosz7iNCE4e9iP6qTFrlf31d7o5NPXaLARR7WAOcOgmVc0LxW5LuGWuuD9Ek 10 | Szyqmmz+ozaEUQemb6KHB8d4xWpnfRVWPThXCJtkI7kE5Rxs3L2EhDd4GmuSFPqe 11 | F5ImpMwzy/N3sj+UfSD87KQkFEwRz/nQSW2MSJBxsszKtBFgum3P9JrLGPWDi8li 12 | 22tTTO0KdK7eHd9dx8Ue2D6d3ZIWs22awJzFSwIDAQABMA0GCSqGSIb3DQEBCwUA 13 | A4IBAQBJLXIec5leHGQ6KVdBeReMwhKWdnpf8smp7ADOW4qkEwScRqIYixDiEe4t 14 | Y8gwOq6knQcB434ato750hSLpnKvlPhRa2WOOU+5yeEia5S2pbyUb4cgodwJHAy8 15 | sLvEWvIayBO5F2H/bVZcWuOll2pTmLtD6jBnl1kwZaZ0ClI06SzFi8OyGe1hjzaS 16 | x/OVicsHUY0Q2DeUPZv44f6CPFybXMgpgALLJx1yPnLoeYuE/eY/1PWe47CiyUKw 17 | m9P2pnEkHhT8bfDDev8hyfsSXG3OzemAZsxCneZfajyGW/meqK5qW8sgWqjJEV9k 18 | Qi0WBxNwAzII9SoHlJVBDY0qAkvx 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /keys/cassandra-server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDETCCAfkCFH2/cHSlr5JtEGX7aLrfRkdOQeWdMA0GCSqGSIb3DQEBCwUAMD8x 3 | DzANBgNVBAMMBnJvb3RDYTEQMA4GA1UECwwHU1RBR0lORzENMAsGA1UECgwEU0ZQ 4 | TDELMAkGA1UEBhMCVVMwIBcNMTkwNjE5MTg0MDQ3WhgPMjEwMTA4MDgxODQwNDda 5 | MEkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRTRlBMMRAwDgYDVQQLEwdTVEFHSU5H 6 | MRkwFwYDVQQDExBDQVNTQU5EUkEtU0VSVkVSMIIBIjANBgkqhkiG9w0BAQEFAAOC 7 | AQ8AMIIBCgKCAQEAhwxwELrrQAoWxFlU35rAoOZP1+oXUeWUbs/yOhMMymsCJQLK 8 | 2G7nJSKWB5bt27y471Ge06b9B19Unixb7+S/D+Xefh4DZvypSbj0U2lExLknkuRs 9 | /ZjIvl96CWBc6wv0WpPomTk5x8RlUIi+f9tF3Y+CPSjA7y8Hz5BzNuttFheVe4Bn 10 | bEumSOR9m+s/M4uKremeIQ/P4mL8JWa8aG5iJtYjpg6XVKmTmYDUjndhtyj+OEBM 11 | saez6A1AmHMXWvYGYQ+EQ9eXtgv/f+sbF6G/lELoqNkG44ka15gLQ5ZnKfSzeWmI 12 | Kl0gbYgZG5BW6y52J+EzYp5xCPeMFzTJTK3hQwIDAQABMA0GCSqGSIb3DQEBCwUA 13 | A4IBAQAh722bv6BvhTcUw+iMIIhgMpH9fzQ0/yG56/mZtp1uIlcLXRoSZ9sJUjff 14 | mAc/MskUXmukdHbc1ibloENQ1euodpcZP73pyczC/DQLjSrP4xdGiQnRuUT+dA8M 15 | q+AnbPC/wpunbe+7Mu2/Wix4h/qD+u1iIpLFe+R4fn5sYV0gn3OHcivj67eApCcY 16 | ekVyHj7jnmFdueF2/VpZzgFvS+rFdy4IV9X5SgSEyZ9xQxmKQaxbXuulNnCJYuje 17 | 3aUb48dKP2mzF77d+jJLYOY3LbVZsdmeAINcxUHPxXYZqubGB9M7lvQMLnXA3Cww 18 | Iht4LCETVXJutxROwulRczX75zqt 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /keys/nginx.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN NEW CERTIFICATE REQUEST----- 2 | MIIDADCCAegCAQAwPjELMAkGA1UEBhMCVVMxDTALBgNVBAoTBFNGUEwxEDAOBgNV 3 | BAsTB1NUQUdJTkcxDjAMBgNVBAMTBU5HSU5YMIIBIjANBgkqhkiG9w0BAQEFAAOC 4 | AQ8AMIIBCgKCAQEAhwdocrhhx9jzNXT6RzoaiwbDUczSF1maAKIomdvUBGCwDUNZ 5 | 6IrLkk4AVKHMNU1cJnUTlgsFo9FhStMWSfZWtr23N3zTN3/YzqMMzty4r0TjLcdX 6 | uhD4tn4eO3WJ65A/IsyOa6S/3OchNL7wELBj1InF9lBYCphH/nWgma2Ft5U51oi7 7 | PojxPxtIH+N0AR4pviZWxk8+FoFn2fmZ4pTsLvrGNh5oz/ZxfOCHAO95v649hOY7 8 | o/po0P58ZPpZlBVNO+yRqQ6OczGFM0StyyxMqRt3gjgj/UOGUJf7lRrSuCAjYal9 9 | 2A2qlj9iqpK/UEA+1nphlEBgDQ6gbK9a4J4r7wIDAQABoH0wewYJKoZIhvcNAQkO 10 | MW4wbDBLBgNVHREERDBCghRhcGkuc3RhZ2luZy5zZnBsLmNvbYIUYnBzLnN0YWdp 11 | bmcuc2ZwbC5jb22CFHNzby5zdGFnaW5nLnNmcGwuY29tMB0GA1UdDgQWBBTt4uxD 12 | gXdVKcpJ3fgaopMaoas+wTANBgkqhkiG9w0BAQsFAAOCAQEAYFq85WxAOdYG3slr 13 | FPVetzkk64xUYfWSuYLsuY3RJxup4FyYznMeYeSax9B3eetyQMKSxrLbtw1MLTGX 14 | lNb5G7P976pYtOi5tvlA+DLkT3PRbyUiW53UT/cHov8b2GWe772QFb6Ble3kl5Io 15 | hF7aGIAqmTDsTiBepMZuobzQ6lodG9a72/IFJRCDXjd2TvncGLL0BJEho1/r5Ce4 16 | 2eFYVtNwiwTZQpN9EBGo3JBtn6uXjMZKMMwba2FJDINQiAD6EA43dRNdFXfDdUlw 17 | uFDlNtNImlD2kCzi56q0J6us9Hp9psNMSNzZKNE1solSF8o6rBsoKeAU3v5H5AsN 18 | +DwW4Q== 19 | -----END NEW CERTIFICATE REQUEST----- 20 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/cow.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // COWList implements a copy-on-write list. It is intended to be used by go 9 | // callback registry for CGO, which is read-heavy with occasional writes. 10 | // Reads do not block; Writes do not block reads (or vice versa), but only 11 | // one write can occur at once; 12 | type COWList struct { 13 | v *atomic.Value 14 | mu *sync.Mutex 15 | } 16 | 17 | // NewCOWList creates a new COWList. 18 | func NewCOWList() *COWList { 19 | var list []interface{} 20 | v := &atomic.Value{} 21 | v.Store(list) 22 | return &COWList{v: v, mu: new(sync.Mutex)} 23 | } 24 | 25 | // Append appends an item to the COWList and returns the index for that item. 26 | func (c *COWList) Append(i interface{}) int { 27 | c.mu.Lock() 28 | defer c.mu.Unlock() 29 | list := c.v.Load().([]interface{}) 30 | newLen := len(list) + 1 31 | newList := make([]interface{}, newLen) 32 | copy(newList, list) 33 | newList[newLen-1] = i 34 | c.v.Store(newList) 35 | return newLen - 1 36 | } 37 | 38 | // Get gets the item at index. 39 | func (c *COWList) Get(index int) interface{} { 40 | list := c.v.Load().([]interface{}) 41 | return list[index] 42 | } 43 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/slice.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | import "C" 5 | import "unsafe" 6 | 7 | // Slice is used as a wrapper for non-copy values 8 | type Slice struct { 9 | data *C.char 10 | size C.size_t 11 | freed bool 12 | } 13 | 14 | type Slices []*Slice 15 | 16 | func (slices Slices) Destroy() { 17 | for _, s := range slices { 18 | s.Free() 19 | } 20 | } 21 | 22 | // NewSlice returns a slice with the given data. 23 | func NewSlice(data *C.char, size C.size_t) *Slice { 24 | return &Slice{data, size, false} 25 | } 26 | 27 | // StringToSlice is similar to NewSlice, but can be called with 28 | // a Go string type. This exists to make testing integration 29 | // with Gorocksdb easier. 30 | func StringToSlice(data string) *Slice { 31 | return NewSlice(C.CString(data), C.size_t(len(data))) 32 | } 33 | 34 | // Data returns the data of the slice. 35 | func (s *Slice) Data() []byte { 36 | return charToByte(s.data, s.size) 37 | } 38 | 39 | // Size returns the size of the data. 40 | func (s *Slice) Size() int { 41 | return int(s.size) 42 | } 43 | 44 | // Free frees the slice data. 45 | func (s *Slice) Free() { 46 | if !s.freed { 47 | C.free(unsafe.Pointer(s.data)) 48 | s.freed = true 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /keys/nats-server.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN NEW CERTIFICATE REQUEST----- 2 | MIIDDjCCAfYCAQAwRDELMAkGA1UEBhMCVVMxDTALBgNVBAoTBFNGUEwxEDAOBgNV 3 | BAsTB1NUQUdJTkcxFDASBgNVBAMTC05BVFMtU0VSVkVSMIIBIjANBgkqhkiG9w0B 4 | AQEFAAOCAQ8AMIIBCgKCAQEAuNOWFssP5bftEjhd/HJgSu0T7tYbVMWb8IwjJsiD 5 | MmC2KaBbKOmC52VQE7MUq1o3OO94D+19TzXOApCIkaPxtIqwOP+5R2xF1mgFmUTy 6 | Z7cSMaQNWMa1392xIRZysEkSW1p/L2ywcAeWQXhm8uh7Tofrn5vN02QA10UX3yd+ 7 | W3jIjmqKnZMNWhyVqMlt414ppYcGemBYPxS5jBvNmIb4HrkcQ52fkt6j79zxGlG4 8 | zXTPTPRYzHn+a+/8HeFkAP3pic2c48jccUeqgIecD2zKKQ8PIHS3n/Sh63CE23aL 9 | +ez0Tlbd61gigwOFetv1i6mHjYx5h2h/vlHofr9VrDXU6wIDAQABoIGEMIGBBgkq 10 | hkiG9w0BCQ4xdDByMFEGA1UdEQRKMEiCFm5hdHMxLnN0YWdpbmcuc2ZwbC5jb22C 11 | Fm5hdHMyLnN0YWdpbmcuc2ZwbC5jb22CFm5hdHMzLnN0YWdpbmcuc2ZwbC5jb20w 12 | HQYDVR0OBBYEFPQJFhApWzCTkOGFtCO2duNbiMsBMA0GCSqGSIb3DQEBCwUAA4IB 13 | AQBNl6nV6ZMdNL/M9t4Y7aXMYvZECEVv4Ggw5hWxIEpj+rodT0aU0bex7y6ayJ+C 14 | A39tCAULlmQRpiCNuHFs+XOrZg5fPF68wJPf774MKg5DO2GlgccOvr4B6Sbn1TTO 15 | 03MlNITGLTZRPwTizaC63YVNKiwFndMO/1+EGs7Z3Emh9QQz2jf9vAkpUKzCoyeI 16 | lASBGz4fGzbfFzax0g202jcMf9D2TvKKmtUOu251FiTSTBtMTO8z+A65oLxCmCv+ 17 | XXymVq1BI4W+bkIBhiROhGbAD/E9Uq5h86IXiTH/5iXQCyZn3eW7aFF2d4z2V7tv 18 | tLKL7BDyHJAocIHZAuZaAXXA 19 | -----END NEW CERTIFICATE REQUEST----- 20 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/env.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // Env is a system call environment used by a database. 7 | type Env struct { 8 | c *C.rocksdb_env_t 9 | } 10 | 11 | // NewDefaultEnv creates a default environment. 12 | func NewDefaultEnv() *Env { 13 | return NewNativeEnv(C.rocksdb_create_default_env()) 14 | } 15 | 16 | // NewNativeEnv creates a Environment object. 17 | func NewNativeEnv(c *C.rocksdb_env_t) *Env { 18 | return &Env{c} 19 | } 20 | 21 | // SetBackgroundThreads sets the number of background worker threads 22 | // of a specific thread pool for this environment. 23 | // 'LOW' is the default pool. 24 | // Default: 1 25 | func (env *Env) SetBackgroundThreads(n int) { 26 | C.rocksdb_env_set_background_threads(env.c, C.int(n)) 27 | } 28 | 29 | // SetHighPriorityBackgroundThreads sets the size of the high priority 30 | // thread pool that can be used to prevent compactions from stalling 31 | // memtable flushes. 32 | func (env *Env) SetHighPriorityBackgroundThreads(n int) { 33 | C.rocksdb_env_set_high_priority_background_threads(env.c, C.int(n)) 34 | } 35 | 36 | // Destroy deallocates the Env object. 37 | func (env *Env) Destroy() { 38 | C.rocksdb_env_destroy(env.c) 39 | env.c = nil 40 | } 41 | -------------------------------------------------------------------------------- /keys/cassandra-server.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN NEW CERTIFICATE REQUEST----- 2 | MIIDJDCCAgwCAQAwSTELMAkGA1UEBhMCVVMxDTALBgNVBAoTBFNGUEwxEDAOBgNV 3 | BAsTB1NUQUdJTkcxGTAXBgNVBAMTEENBU1NBTkRSQS1TRVJWRVIwggEiMA0GCSqG 4 | SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCHDHAQuutAChbEWVTfmsCg5k/X6hdR5ZRu 5 | z/I6EwzKawIlAsrYbuclIpYHlu3bvLjvUZ7Tpv0HX1SeLFvv5L8P5d5+HgNm/KlJ 6 | uPRTaUTEuSeS5Gz9mMi+X3oJYFzrC/Rak+iZOTnHxGVQiL5/20Xdj4I9KMDvLwfP 7 | kHM2620WF5V7gGdsS6ZI5H2b6z8zi4qt6Z4hD8/iYvwlZrxobmIm1iOmDpdUqZOZ 8 | gNSOd2G3KP44QEyxp7PoDUCYcxda9gZhD4RD15e2C/9/6xsXob+UQuio2QbjiRrX 9 | mAtDlmcp9LN5aYgqXSBtiBkbkFbrLnYn4TNinnEI94wXNMlMreFDAgMBAAGggZUw 10 | gZIGCSqGSIb3DQEJDjGBhDCBgTBgBgNVHREEWTBXghtjYXNzYW5kcmExLnN0YWdp 11 | bmcuc2ZwbC5jb22CG2Nhc3NhbmRyYTIuc3RhZ2luZy5zZnBsLmNvbYIbY2Fzc2Fu 12 | ZHJhMy5zdGFnaW5nLnNmcGwuY29tMB0GA1UdDgQWBBQj+EDOhCUu/3BEfxE2Qf3s 13 | fIjc/TANBgkqhkiG9w0BAQsFAAOCAQEAN0CmRbIGW9dobnueRdHrV46G1p/sfYkm 14 | soL1b4/czXhKrsm4t4R7WnOrevjU+kg4KJD4Cx+h3Wx1BEeXpEcA6Y2ke1xBPIB1 15 | bCLCB6JHVzj0p+o/ITlVOcjKiQedgEItr/2VSfaTotKWE4yeO8dHXZOYGbvtE08c 16 | X6dtl96YD4I323zBGakAxtZnu8X4IYZ8VNGoQIOBwSw4/QywgLf3bkDsJ3LbSMAY 17 | 2le7lj1YuYNct97wm3lOlpVns+EgZgJ+doQ8dTE40SGDa6vF7sznVwk5gXOYFTox 18 | oCjCQcf/xtOFhIUuJnpMYqyOsFo62eJ2AjxNGCysYSAmPDaOLlxr2Q== 19 | -----END NEW CERTIFICATE REQUEST----- 20 | -------------------------------------------------------------------------------- /keys/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDXTCCAkWgAwIBAgIJAKLdQVPy90jjMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV 3 | BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX 4 | aWRnaXRzIFB0eSBMdGQwHhcNMTkwMjAzMTQ0OTM1WhcNMjAwMjAzMTQ0OTM1WjBF 5 | MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 6 | ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 7 | CgKCAQEA7i7IIEdICTiSTVx+ma6xHxOtcbd6wGW3nkxlCkJ1UuV8NmY5ovMsGnGD 8 | hJJtUQ2j5ig5BcJUf3tezqCNW4tKnSOgSISfEAKvpn2BPvaFq3yx2Yjz0ruvcGKp 9 | DMZBXmB/AAtGyN/UFXzkrcfppmLHJTaBYGG6KnmU43gPkSDy4iw46CJFUOupc51A 10 | FIz7RsE7mbT1plCM8e75gfqaZSn2k+Wmy+8n1HGyYHhVISRVvPqkS7gVLSVEdTea 11 | UtKP1Vx/818/HDWk3oIvDVWI9CFH73elNxBkMH5zArSNIBTehdnehyAevjY4RaC/ 12 | kK8rslO3e4EtJ9SnA4swOjCiqAIQEwIDAQABo1AwTjAdBgNVHQ4EFgQUv5rc9Smm 13 | 9c4YnNf3hR49t4rH4yswHwYDVR0jBBgwFoAUv5rc9Smm9c4YnNf3hR49t4rH4ysw 14 | DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEATcL9CAAXg0u//eYUAlQa 15 | L+l8yKHS1rsq1sdmx7pvsmfZ2g8ONQGfSF3TkzkI2OOnCBokeqAYuyT8awfdNUtE 16 | EHOihv4ZzhK2YZVuy0fHX2d4cCFeQpdxno7aN6B37qtsLIRZxkD8PU60Dfu9ea5F 17 | DDynnD0TUabna6a0iGn77yD8GPhjaJMOz3gMYjQFqsKL252isDVHEDbpVxIzxPmN 18 | w1+WK8zRNdunAcHikeoKCuAPvlZ83gDQHp07dYdbuZvHwGj0nfxBLc9qt90XsBtC 19 | 4IYR7c/bcLMmKXYf0qoQ4OzngsnPI5M+v9QEHvYWaKVwFY4CTcSNJEwfXw+BAeO5 20 | OA== 21 | -----END CERTIFICATE----- -------------------------------------------------------------------------------- /keys/rootCa.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDYTCCAkmgAwIBAgIULXnApfOY4/s+IXpAEXMHorIKLDgwDQYJKoZIhvcNAQEL 3 | BQAwPzEPMA0GA1UEAwwGcm9vdENhMRAwDgYDVQQLDAdTVEFHSU5HMQ0wCwYDVQQK 4 | DARTRlBMMQswCQYDVQQGEwJVUzAgFw0xOTA2MTkxODQwNDRaGA8yMTAxMDgwODE4 5 | NDA0NFowPzEPMA0GA1UEAwwGcm9vdENhMRAwDgYDVQQLDAdTVEFHSU5HMQ0wCwYD 6 | VQQKDARTRlBMMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC 7 | AQoCggEBAMT87ZxF3AtCdKR6wCUbUwAT14/BZdapIdEC+IEtIdzT1c+RnXjNefFU 8 | q2tAJ9L0KZ0nMXZClp4/9J/0Y9hEx84uXe61riOOIWp/iL35jorZC2jY8gVaVpLM 9 | v1ZvFXQV4coeI+zoP6BY2qzKt9om6ZYzfnTL0kF7k1TR5w67YqTQhyNyavpecrTe 10 | jhZcruuU2ya5ltafmHJhWWpxuw8Crv2pArtRY3LKyEzIBFjSE5XN6M38C5XPC0ZX 11 | +DLGtpDnedQMTP6n90EeeWcFwqe/xJYrpzyxH3qwMu/HttyWrEpJNIuvKyZobMpS 12 | mGks5MEHG/Cy4EXi7Gb7aps8YeqFmxkCAwEAAaNTMFEwHQYDVR0OBBYEFN9+Bc/+ 13 | /mi56ZLImNqyNRt/8CF4MB8GA1UdIwQYMBaAFN9+Bc/+/mi56ZLImNqyNRt/8CF4 14 | MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFnGiUZT/sMlcJjn 15 | wdgqq91j0an3Mf29Zt5BnUZjkMUT6hMSMQOEjGTgYuTGC0Iha0V/pXsOdMqpwomK 16 | xxS2oU1a48O0PBflS0AUhwJ8n7jNxbnCw+KgjsZihqXqZXnqB6lSZ5ogzdIbZUXg 17 | VQw6CrLZSX3V6qwjyjY/9XQxFqKUKbaJTmDMrY7RUdVZ5z2pl4XkEltPOl6+SFjF 18 | DWoGaoNDhxVGgqBBZxL02d3nT8r0rE2Xa+uoN7f2YbQVlhsvcKkJbiuTqQ8/ss+f 19 | cYnRLLp5TY5hdGgXQTjMDb8m9OfWzAYzCtvJS90839tFBS80yi/1M24hxUa85Gqk 20 | INXq/II= 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /keys/node1.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwWTELMAkGA1UEBhMCQVUxEzARBgNV 3 | BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0 4 | ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE5MDkzMDIwMjYyNFoXDTIwMDkyOTIw 5 | MjYyNFowWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNV 6 | BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0 7 | MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGiACbds8OvCSFBTjaSzCIFScv 8 | Csg7eY1SuWdqTlSann7P83YHWpbLv+tYmOVKEn4DbA4+gqix9BkY1uI8dtG7Fgxk 9 | pfka2gevKGvyZPZ/wwiEj8d76TO7WBkbCJz8wVTIyS4DO6Hwzf/ViZ2lthRDyRXf 10 | 5fhKKPkL3Flh86D3iwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBeo3YKPYCGo+k7 11 | 2qVf7IVl1asRsOR9fhAkumd1TbWA/N3uZQP34/mYX8EXoaW/haeTMBv1G75uK596 12 | ZSb1gxN4kCpcooN1VCf7FJKUSbZTOGB/Z7l77T/FDfkAkMtWpulIc5lzM+SqubTj 13 | mVKhUsQr/7VvgSTHZNWR0rZTuuW+WhowCHdvCmKZpD1kNIm3Ek3U0cCTy9Q8OdZB 14 | uHXdplXnZSfm0d+g3U/DaVexpY2AxvNpx7aFxuGZ/5EcZCss27n5Ge44+fdjD+eV 15 | 3LfURnefoZtn09fhYWZqKkKGHZun67XXAUny8RzlLBa8rPR5bcTSbZpLb/qxP7RS 16 | MNqpZ2leRwbgmcTG+/ex9Evk73+EdbYICM3dCKRI8ixxyfqw6H54SpjaLrwwynZJ 17 | IHI5bfszvGmwhI9ZSlBLvp8+rzxLasU1JdPUlqQ7f6xlPznwyWBnE8sfJxRxxi5A 18 | putcz4aznzR7B4HUZHmN1pYRo744/4S+3Oma+m8etiRtcrNzNxb8KV03Xi+hEjed 19 | LSK6BXqrf/jQnzTe6/kcRIreRAKKLLkWIxwTLloactsahqsfkHwdUDpoTxva95i1 20 | IPNPfJW4GEO8Bca60PaigKGxa3LNkCVleAUzkDc2CFTtTa8XlEfmkT15XpJF6XUs 21 | gUvLwXQ3GwtWT48gEHH/ulSQzxufZA== 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/dbpath.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import "unsafe" 7 | 8 | // DBPath represents options for a dbpath. 9 | type DBPath struct { 10 | c *C.rocksdb_dbpath_t 11 | } 12 | 13 | // NewDBPath creates a DBPath object 14 | // with the given path and target_size. 15 | func NewDBPath(path string, target_size uint64) *DBPath { 16 | cpath := C.CString(path) 17 | defer C.free(unsafe.Pointer(cpath)) 18 | return NewNativeDBPath(C.rocksdb_dbpath_create(cpath, C.uint64_t(target_size))) 19 | } 20 | 21 | // NewNativeDBPath creates a DBPath object. 22 | func NewNativeDBPath(c *C.rocksdb_dbpath_t) *DBPath { 23 | return &DBPath{c} 24 | } 25 | 26 | // Destroy deallocates the DBPath object. 27 | func (dbpath *DBPath) Destroy() { 28 | C.rocksdb_dbpath_destroy(dbpath.c) 29 | } 30 | 31 | // NewDBPathsFromData creates a slice with allocated DBPath objects 32 | // from paths and target_sizes. 33 | func NewDBPathsFromData(paths []string, target_sizes []uint64) []*DBPath { 34 | dbpaths := make([]*DBPath, len(paths)) 35 | for i, path := range paths { 36 | targetSize := target_sizes[i] 37 | dbpaths[i] = NewDBPath(path, targetSize) 38 | } 39 | 40 | return dbpaths 41 | } 42 | 43 | // DestroyDBPaths deallocates all DBPath objects in dbpaths. 44 | func DestroyDBPaths(dbpaths []*DBPath) { 45 | for _, dbpath := range dbpaths { 46 | dbpath.Destroy() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_write.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // WriteOptions represent all of the available options when writing to a 7 | // database. 8 | type WriteOptions struct { 9 | c *C.rocksdb_writeoptions_t 10 | } 11 | 12 | // NewDefaultWriteOptions creates a default WriteOptions object. 13 | func NewDefaultWriteOptions() *WriteOptions { 14 | return NewNativeWriteOptions(C.rocksdb_writeoptions_create()) 15 | } 16 | 17 | // NewNativeWriteOptions creates a WriteOptions object. 18 | func NewNativeWriteOptions(c *C.rocksdb_writeoptions_t) *WriteOptions { 19 | return &WriteOptions{c} 20 | } 21 | 22 | // SetSync sets the sync mode. If true, the write will be flushed 23 | // from the operating system buffer cache before the write is considered complete. 24 | // If this flag is true, writes will be slower. 25 | // Default: false 26 | func (opts *WriteOptions) SetSync(value bool) { 27 | C.rocksdb_writeoptions_set_sync(opts.c, boolToChar(value)) 28 | } 29 | 30 | // DisableWAL sets whether WAL should be active or not. 31 | // If true, writes will not first go to the write ahead log, 32 | // and the write may got lost after a crash. 33 | // Default: false 34 | func (opts *WriteOptions) DisableWAL(value bool) { 35 | C.rocksdb_writeoptions_disable_WAL(opts.c, C.int(btoi(value))) 36 | } 37 | 38 | // Destroy deallocates the WriteOptions object. 39 | func (opts *WriteOptions) Destroy() { 40 | C.rocksdb_writeoptions_destroy(opts.c) 41 | opts.c = nil 42 | } 43 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/array.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "stdlib.h" 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import ( 7 | "reflect" 8 | "unsafe" 9 | ) 10 | 11 | type charsSlice []*C.char 12 | type sizeTSlice []C.size_t 13 | type columnFamilySlice []*C.rocksdb_column_family_handle_t 14 | 15 | func (s charsSlice) c() **C.char { 16 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) 17 | return (**C.char)(unsafe.Pointer(sH.Data)) 18 | } 19 | 20 | func (s sizeTSlice) c() *C.size_t { 21 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) 22 | return (*C.size_t)(unsafe.Pointer(sH.Data)) 23 | } 24 | 25 | func (s columnFamilySlice) c() **C.rocksdb_column_family_handle_t { 26 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&s)) 27 | return (**C.rocksdb_column_family_handle_t)(unsafe.Pointer(sH.Data)) 28 | } 29 | 30 | // bytesSliceToCSlices converts a slice of byte slices to two slices with C 31 | // datatypes. One containing pointers to copies of the byte slices and one 32 | // containing their sizes. 33 | // IMPORTANT: All the contents of the charsSlice array are malloced and 34 | // should be freed using the Destroy method of charsSlice. 35 | func byteSlicesToCSlices(vals [][]byte) (charsSlice, sizeTSlice) { 36 | if len(vals) == 0 { 37 | return nil, nil 38 | } 39 | 40 | chars := make(charsSlice, len(vals)) 41 | sizes := make(sizeTSlice, len(vals)) 42 | for i, val := range vals { 43 | chars[i] = (*C.char)(C.CBytes(val)) 44 | sizes[i] = C.size_t(len(val)) 45 | } 46 | 47 | return chars, sizes 48 | } 49 | 50 | func (s charsSlice) Destroy() { 51 | for _, chars := range s { 52 | C.free(unsafe.Pointer(chars)) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "ClusterID": 128, 3 | "Addresses": [ 4 | "localhost:63001", 5 | "localhost:63002", 6 | "localhost:63003" 7 | ], 8 | "UseHostnameIdentification": false, 9 | "Debug": true, 10 | "UseTLS": true, 11 | "TLSCert" : "./keys/node1.crt", 12 | "TLSKey" : "./keys/node1.key", 13 | "TLSCACert" : "./keys/ca.crt", 14 | "NatsService": { 15 | "Service": "nats", 16 | "Hosts": [ 17 | "tls://localhost:4222" 18 | ], 19 | "Context": "toc.track.notify", 20 | "Secure": true, 21 | "CACert": "./keys/rootCa.crt", 22 | "Cert": "./keys/nats-client.crt", 23 | "Key": "./keys/nats-client.key", 24 | "Format": "json" 25 | }, 26 | "Commands": [ 27 | { 28 | "Type": "shell", 29 | "Exec" : "ls", 30 | "Args" : "-al", 31 | "Timeout" : 300, 32 | "Secure": false, 33 | "Retry": 0, 34 | "Critical" : false, 35 | "Cron": { 36 | "Minute" : "52", 37 | "Hour" : "*", 38 | "Day" : "*", 39 | "Month" : "*", 40 | "Weekday" : "*", 41 | "Once": false 42 | }, 43 | "Comment" : "Demo command runs every 52nd minute" 44 | }, 45 | { 46 | "Type": "nats", 47 | "Exec" : "system.check", 48 | "Args" : "{'ok':true}", 49 | "Timeout" : 3000, 50 | "Secure": false, 51 | "Retry": 3, 52 | "Critical" :true 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/comparator.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // A Comparator object provides a total order across slices that are 7 | // used as keys in an sstable or a database. 8 | type Comparator interface { 9 | // Three-way comparison. Returns value: 10 | // < 0 iff "a" < "b", 11 | // == 0 iff "a" == "b", 12 | // > 0 iff "a" > "b" 13 | Compare(a, b []byte) int 14 | 15 | // The name of the comparator. 16 | Name() string 17 | } 18 | 19 | // NewNativeComparator creates a Comparator object. 20 | func NewNativeComparator(c *C.rocksdb_comparator_t) Comparator { 21 | return nativeComparator{c} 22 | } 23 | 24 | type nativeComparator struct { 25 | c *C.rocksdb_comparator_t 26 | } 27 | 28 | func (c nativeComparator) Compare(a, b []byte) int { return 0 } 29 | func (c nativeComparator) Name() string { return "" } 30 | 31 | // Hold references to comperators. 32 | var comperators = NewCOWList() 33 | 34 | type comperatorWrapper struct { 35 | name *C.char 36 | comparator Comparator 37 | } 38 | 39 | func registerComperator(cmp Comparator) int { 40 | return comperators.Append(comperatorWrapper{C.CString(cmp.Name()), cmp}) 41 | } 42 | 43 | //export gorocksdb_comparator_compare 44 | func gorocksdb_comparator_compare(idx int, cKeyA *C.char, cKeyALen C.size_t, cKeyB *C.char, cKeyBLen C.size_t) C.int { 45 | keyA := charToByte(cKeyA, cKeyALen) 46 | keyB := charToByte(cKeyB, cKeyBLen) 47 | return C.int(comperators.Get(idx).(comperatorWrapper).comparator.Compare(keyA, keyB)) 48 | } 49 | 50 | //export gorocksdb_comparator_name 51 | func gorocksdb_comparator_name(idx int) *C.char { 52 | return comperators.Get(idx).(comperatorWrapper).name 53 | } 54 | -------------------------------------------------------------------------------- /wait-for: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | TIMEOUT=15 4 | QUIET=0 5 | 6 | echoerr() { 7 | if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi 8 | } 9 | 10 | usage() { 11 | exitcode="$1" 12 | cat << USAGE >&2 13 | Usage: 14 | $cmdname host:port [-t timeout] [-- command args] 15 | -q | --quiet Do not output any status messages 16 | -t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout 17 | -- COMMAND ARGS Execute command with args after the test finishes 18 | USAGE 19 | exit "$exitcode" 20 | } 21 | 22 | wait_for() { 23 | for i in `seq $TIMEOUT` ; do 24 | nc -z "$HOST" "$PORT" > /dev/null 2>&1 25 | 26 | result=$? 27 | if [ $result -eq 0 ] ; then 28 | if [ $# -gt 0 ] ; then 29 | exec "$@" 30 | fi 31 | exit 0 32 | fi 33 | sleep 1 34 | done 35 | echo "Operation timed out" >&2 36 | exit 1 37 | } 38 | 39 | while [ $# -gt 0 ] 40 | do 41 | case "$1" in 42 | *:* ) 43 | HOST=$(printf "%s\n" "$1"| cut -d : -f 1) 44 | PORT=$(printf "%s\n" "$1"| cut -d : -f 2) 45 | shift 1 46 | ;; 47 | -q | --quiet) 48 | QUIET=1 49 | shift 1 50 | ;; 51 | -t) 52 | TIMEOUT="$2" 53 | if [ "$TIMEOUT" = "" ]; then break; fi 54 | shift 2 55 | ;; 56 | --timeout=*) 57 | TIMEOUT="${1#*=}" 58 | shift 1 59 | ;; 60 | --) 61 | shift 62 | break 63 | ;; 64 | --help) 65 | usage 0 66 | ;; 67 | *) 68 | echoerr "Unknown argument: $1" 69 | usage 1 70 | ;; 71 | esac 72 | done 73 | 74 | if [ "$HOST" = "" -o "$PORT" = "" ]; then 75 | echoerr "Error: you need to provide a host and port to test." 76 | usage 2 77 | fi 78 | 79 | wait_for "$@" 80 | -------------------------------------------------------------------------------- /keys/nginx.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAhwdocrhhx9jzNXT6RzoaiwbDUczSF1maAKIomdvUBGCwDUNZ 3 | 6IrLkk4AVKHMNU1cJnUTlgsFo9FhStMWSfZWtr23N3zTN3/YzqMMzty4r0TjLcdX 4 | uhD4tn4eO3WJ65A/IsyOa6S/3OchNL7wELBj1InF9lBYCphH/nWgma2Ft5U51oi7 5 | PojxPxtIH+N0AR4pviZWxk8+FoFn2fmZ4pTsLvrGNh5oz/ZxfOCHAO95v649hOY7 6 | o/po0P58ZPpZlBVNO+yRqQ6OczGFM0StyyxMqRt3gjgj/UOGUJf7lRrSuCAjYal9 7 | 2A2qlj9iqpK/UEA+1nphlEBgDQ6gbK9a4J4r7wIDAQABAoIBAF5/b7iBk2TSTvsF 8 | X7aDUlojpIPVzN4shduVoMZ0kv9xTyQpE33rzKYaUp7oEMro1enQG6HxGsRX3zgH 9 | XFzsuijvwo5hnEwF/Aw5DdijndiDZF4AWUPNZtIIl03TZ29lJtpKwA1TYjbNxm5s 10 | hgOX/nWa6eEEv2jCw/ewCHIkfyVHkeYCqIwxcO+e+KNhcV3iL8D7/cb7gzqIFObb 11 | luCVo1QnKR2e1syeXLTKphmO8mp3zpkBB8ORNfmjFWTvxq5bF9pZnu4o3O23I4Pv 12 | UwHuNSUDheLXXGFJ7hL6TOF6djfO6sKGdT3aS0a4Ekrg4vPvxWRj4ukGfDVyifqo 13 | wRz5+LkCgYEA1/xIALx60K0ZF49znI8S5pE8vJBreQW7gB6qbctin+n/DcXNGqCK 14 | LChG2dnZBKoZxz8r2CEFR3at08R+ZdYo4BxtHyvugaSsT/TfrlGTSttnEHpYADqv 15 | kn+gLDOHBRfHTvn9IAKfHmXkWlMCY6yA/YxwR1RPyvlORHk9rl2vko0CgYEAoAuJ 16 | BhySFMD2IzjjmYnErfYs2c6wn9pOXJgMLYNQkrAlsU0Uyv+WddbbJ5swtp2TBvQ7 17 | tcaAJ1koy3S5N8Z28v7FMXyYmdQBtbcHztMJOBVVz8os2KrN3uFWNPTRaLWlRxZt 18 | JSPCz/GfQDVjA5XYSWMgj6ubSeNIPxOEc56mV2sCgYBTsUNpXZ+titWlRoYDDYHC 19 | bFb/rmqmy6zbOYbK4rvFwZN+j74pajoAYF8L/ZlVcGBqz7S6nBkClx9GZuafqXvP 20 | U9SI9qoF/Hx0wyuFzcBoiloGjZHaDOOJvgCgABt6wrsNhFssczebPNpekbnkS6z0 21 | Et5TFX2R5kcIy342OcFV5QKBgHqK2ov3bFK/vLcTZBG3zwR0LbtH6y06di2nrgtb 22 | NGJcxT+QiY81S/tGW99jkFUDKgR1FRMFvjJirj26TedK6uD7+fj+Mcn27qg4UJ4o 23 | MDLlAyNq5hOwFJnHZ758harrCnX30fJSA2Q82XbMadfg8A6g+9mFyA03HaZOR2hP 24 | 3YWpAoGAS8T1iH7AccO41abixX8g5HujxHSrQIAQdDUSAPer0U8kK+MKcyFVj7mc 25 | 0mXOHXlm4iOzhJiIHom29lPfGNO3YqIqLfmKSkS3103kSxYwEwqM2Qvpam51gaaX 26 | wvZ6wS/79zql3QkfNitA0A1FDF9NXZkmMVvhr4nXYOzAcdSsQiw= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /keys/nats-server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAuNOWFssP5bftEjhd/HJgSu0T7tYbVMWb8IwjJsiDMmC2KaBb 3 | KOmC52VQE7MUq1o3OO94D+19TzXOApCIkaPxtIqwOP+5R2xF1mgFmUTyZ7cSMaQN 4 | WMa1392xIRZysEkSW1p/L2ywcAeWQXhm8uh7Tofrn5vN02QA10UX3yd+W3jIjmqK 5 | nZMNWhyVqMlt414ppYcGemBYPxS5jBvNmIb4HrkcQ52fkt6j79zxGlG4zXTPTPRY 6 | zHn+a+/8HeFkAP3pic2c48jccUeqgIecD2zKKQ8PIHS3n/Sh63CE23aL+ez0Tlbd 7 | 61gigwOFetv1i6mHjYx5h2h/vlHofr9VrDXU6wIDAQABAoIBAFEMn/2g0HjZHgHm 8 | a/ifI3DX4+zRQSz8dvSWT3fbaAT4eglpA3C+baQRPntVBZRavGqgQJMjftb0bkGb 9 | pffP39dfFgX4xgP3C+0IsqPSo0Yt9rGEEkJC6VChQ+KRmHuLn5wl5C4kyH5oJVDS 10 | frq4jhIS8pWMWkb/zIqaBuzZwcRURT/bdgb4kUUD+fSicO/eXFfWyr9LYS8r8S7z 11 | uvGf0DC1VvT4LknguLHBWabq2oCC0OOVViBN5sIaAIZsuyUX7l1jKeqJmMJsWXeB 12 | aq73MoVTPvHssm0MDVPzlU9b58ybKTitmYtMsvXM2BA5WpPGQjNK/NO8HDlaeuv/ 13 | Q1DCAFECgYEA2ZbCZG08rO0D8NSvB1wrNAW1Dg15C1aRTMPVajKyaCaaOHxlmGvM 14 | 9k7Jb73nUXb4HkkoJ8UNv5jvGwPPrEr2BtWPqU7R/V4LE+RjeRliTc1QprtRF/TT 15 | AvOHFoUuUtxcel1HuryOykDA6lwn0XlG7238KpdACsFpXOdb1O2117kCgYEA2XQ8 16 | AEqXmNxKIrdmF/DudPoJaDVDuRjRquEVdXo0W6iB4C+Ev4Za8H7X4c/R7QWultKN 17 | tMlssvs5XeC8knyzJqG8Yq6dNlFJgWtvQQV4YqVcC8MFbFyxvHiKZpZ6eGjRQ5Q7 18 | pcMwA2Pjggj7nA7ixu27ZytwkM1+ukOFQGmCG8MCgYAEcuyumP0wm/gEqJSblerc 19 | OXGmQ+hBrxdxdl+auEalkoR8uy5om2upbZr4ye084S3ZkK1dgs3xKPNang1U/INj 20 | SGKTl6T7hTy8v2K5SyeTZ6tKfXH/B2Z5HPmaIEb+MrtBVT7uNoTE5eYL5K57vfsR 21 | UjJhvI32qlW5LJHYHLQnIQKBgGWth/DK8B+1waSCBObk7VsdHxDu9v+xrzgrjxEj 22 | VyAUpsXg8j1r4VHava8EsbAWnBBB26jRFlaKScSvtPz5+fryXVf3NeOUFr7BMYBB 23 | woy9rVCCnCKTJThvVKerbRCoUlE7Grjjj6vGJTdEZAIQo9FIj/wpv7S1obAOqlv3 24 | +bJpAoGAE3/90jHOk+yn8EcfBayFz1nVBGrEMKedtDrk8eeAhLU1G0oRcpVKjRWK 25 | lUJN2H7wejKGHX9gWAXbvdLnC0y6cdzsjgAXJySX8UwXUuVZ1vt6INKWE8dr+bH1 26 | YW/pBczY58SzxMq/1JkrNCcyC9g8ts3aorJEVEVx4oom9KFNZoQ= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /keys/nats-client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEAuUwmnc/FN2VErYuUWxbbGtlgpBYUc3Y1Cg1dahWI6IIv1iw+ 3 | TqiYDv4cXxRwwvKwpgHDUbTFGz8cmyXo8+NMWvAGeoHGdY/Msz0iIqRpSNHOow87 4 | s7fJjyway7gDafVCG1D6Vkh6gOLrwyxU0IB/GO9uGaAt9Zm6uXb3III/ptaTDKF0 5 | 6JR9P7vXj0ceqBMTHDYvtI/dZ0bPHcRmr+7Y1rD1+P0emIulx2/weJvKAWG14agz 6 | m0jUOqHvoTJauflcqXW+GgxjDQPLfeseGuAcZeTQSuPPZNs0C+HgUJFs6Pf8YyzK 7 | VYuqVWLh3eJCeJct9L8cijitdhyfDqJa+nf+FQIDAQABAoIBAB3Ygi/RE3cEvLcn 8 | UFILsGb2tHY5iDnAxHJGDNHVfiPVNFdO8QpCZZukmnVGGZOM+58BkbBLaaQl2jmz 9 | NX5zEg1js+dvl8O/0bzdD+58hOjRwnLJNViRNcn11KsTjJk4g9lahbf4HKM96bOa 10 | JF8hE1e02QsPndd9V3IgE1YNJKt+hhZKXhRu7Co/x26A2U/ZxyiEmcLyQ210gBbW 11 | iW9uwf7ANeKN8IJJTREH3ZmTpE8lUmT/TMtIwKgA5rRWpm+Vrio766hox4ysMiqT 12 | xRdLbG2rrhnrltvzb8j46t6TozYXXsiwr1c1Tq5sLiUy1ASv9ZbdLh1xXGrRc5Hz 13 | /PqDKYECgYEA5Z+ZtH/thxWPoDe19vjj4LivFSwPsjLZ5H9yn2uTQCWblQ8QFToV 14 | 3kEvMrHovdgdJYNne+s+qR04c/aAzi5wSfXXvbmqS/+1+G8/+DckruYkxKIwGjAP 15 | FtR1Hsh/0pmsGz8dMy9YKZmJNifeOKbrQEWoc+YbfDECIzfj4sMsPp0CgYEAzpUV 16 | Ofyo4x+A3K7E3SqZP7hRNbS8KH7Uxd55WbLwwfraF5ta3t/rdsIuJjeVtLDX4R5I 17 | 0pfAJ2kKD2ClEJ0XPff0xr8l2ZyWtFYEsWhT1OGghiOqR4DLcQUokHVh+k3gLltF 18 | O/c7/l0aQ2VIZfylX7HueW6tGttrCcYR0vQRJ9kCgYEAgDuUA9gZVwH0dn4GMYkn 19 | AV1B8WnszhBV3KRL57mUvxMBgsmpqqEIi1kNuZrfJf3lsQhkJzDlil3WI/R6VCpc 20 | 7QDM3ZgtAUopXjcKm1x4MPA3glQs1sMV+iLl0S4ZLAxDr+ecL/R1oK2skcDL+glF 21 | gVwlSawslztrPS2+Junl8IUCgYEAy/wn9pdHCOZNrxuf5qeupV3SSs7JUM9UOIIi 22 | n3DzDQgvnEnaI+NFtYGuYixW1hIdSPxHejNAGWo99cs/krUbgyQ5fpq2vDdEFJQv 23 | lCJpTPbyNCI5fNtvh41JbcOK2MdXC08ygTDy+4thaJU1NQeRIHoo+Z1Qrwlkm0UK 24 | ypwX6JkCgYEAgdvjZuJTrAkJUV3ZlC5WiUuYJSKo+oFrYElIGdkin8oZowW3sYOf 25 | GDxyBLL5gGOyFkiclHRBoFIPzAKWMOfQqpCjD1n/OYDutRPI1LIDbaOlf4CTbSz9 26 | DW5q7a9ssII+XVctuu0LK02CyLSvghu6Bpuwfcyt5ZcHuJtBjA/Lf5k= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /keys/cassandra-client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAlARAFMTzd5tcKN0wbuju/U8FXzk4o9JFQ9ZKdxwOMjyRdoOe 3 | LNS7n8upW3bSMmJhXw6jfh2OD89hQCpHaXZ2pPBWdG44ZpcmAMzELzRYvTmQVpTi 4 | ShgiWosz7iNCE4e9iP6qTFrlf31d7o5NPXaLARR7WAOcOgmVc0LxW5LuGWuuD9Ek 5 | Szyqmmz+ozaEUQemb6KHB8d4xWpnfRVWPThXCJtkI7kE5Rxs3L2EhDd4GmuSFPqe 6 | F5ImpMwzy/N3sj+UfSD87KQkFEwRz/nQSW2MSJBxsszKtBFgum3P9JrLGPWDi8li 7 | 22tTTO0KdK7eHd9dx8Ue2D6d3ZIWs22awJzFSwIDAQABAoIBAFD+umojE+9BTn71 8 | +ojWYeCvGwtubnYGm+xxGLJw4bvPtPGbODTj+/+HHJd2P7NeblXr1r8uQQyZuGe6 9 | OvWU43uCIGz521cYIq2RX1FNXbm+BCO8uM8+wZ22SqZnGMNDJg4O/PVnT+ohmgLu 10 | T4nwkkP3Bz+0SgrgL62w1g38KtoxXxdWULzy9d4KVDq4gkLDVQsVUHWZv+6OkZwP 11 | HTJG5mnR5P9O0Xkgp23DmifY1HeoGS8ey9V7lv2YxnTNnfV5yN4mT9vxPOwEnG1s 12 | 2nhnwPuCSzcVeDZarCBgRjQ8UyJDZRG2GXkKVJVQNbEkMlOa5rFC/L7xAfDpsjRj 13 | 4k87vRECgYEA7OTujKPtHumI4jIWBnCLKdM96YplHEz+YWPWz/LHiFDwHRwrXL77 14 | 7VwnV9GezJUHDWbMm742jcb4D1hV49U6tCmSbjCk44tTcVO8b59PcNUm9c0ryF5Z 15 | nE8/oyndovbvZ21KJjgt5bAO9fKXrmIkwN3fdwkjv921f9JykungGv8CgYEAn/RL 16 | V4MxM4m2tV59f8Cb1x+yEieMuWlBmxagWwWNve/YvmP4KEVK6TEK2K6FOPk+/hGP 17 | zKptKy65Ot6T8xGLuDkQkDtL/AofsyF414LauXHgE9KMz/A+x+xeoHXNMbyyUrwe 18 | 2D+PR9anqdmF/mEauSb9jhjC8YRBIkrqOL5hUbUCgYEAq1/2nzHb0bgauaCIRIJY 19 | epqCOJhRC0c749KpTWiSJrB+9GAzknXrpMw8/g/8dH3/SxeqP6U8rBZb0CYUhKz2 20 | Oe21LBfuwVpCtEJimNHEU5kwBTQ3T03KQAUowW4BE+rWSJlMwhd0RKy4Nf8Y8iO3 21 | +PrJtcCb5yqKKf1hu5yL7psCgYA/HkJgvRc8NBqSBUBWmiWkxRAak9q3C27lCQ+r 22 | +0wMaEnKD17MXVOLI1wZVvyhF9GgpkNtSs8bXCuhrFULdKACyRndIFkCkughYpLx 23 | z+QJi4MVr177at0LPR22CeY3uzNO7IbrZGFgwraUko6Ka70E1Pr3CJRcg4jERHWa 24 | gJ2chQKBgQDq9oQXPLulyMNQLm7ChfA9JlxJ6vHExRc/eVKGbj9mkgBim96Bq1J5 25 | U+blGkbEApqMD7Adr3mZB9M3AYi1hM2HjtVZYHuOeFsJN4rpMlylOB3IPPkjkRcd 26 | arqBrds1lUTJt06IjWl3o1RD7taM/ymE8xdvMEA50Dsk/GfLa+XfIA== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /keys/cassandra-server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAhwxwELrrQAoWxFlU35rAoOZP1+oXUeWUbs/yOhMMymsCJQLK 3 | 2G7nJSKWB5bt27y471Ge06b9B19Unixb7+S/D+Xefh4DZvypSbj0U2lExLknkuRs 4 | /ZjIvl96CWBc6wv0WpPomTk5x8RlUIi+f9tF3Y+CPSjA7y8Hz5BzNuttFheVe4Bn 5 | bEumSOR9m+s/M4uKremeIQ/P4mL8JWa8aG5iJtYjpg6XVKmTmYDUjndhtyj+OEBM 6 | saez6A1AmHMXWvYGYQ+EQ9eXtgv/f+sbF6G/lELoqNkG44ka15gLQ5ZnKfSzeWmI 7 | Kl0gbYgZG5BW6y52J+EzYp5xCPeMFzTJTK3hQwIDAQABAoIBAHK7CVXdRtDgAVmM 8 | PwO8WBwZrglT0b382e6OcU4PMxWgdXD1/1iCV5L/4A/QlIDSwxZl9BYBzwL9ZUj/ 9 | jJewcPVTp4LjgqZyzmtYWxGl5Atw9YLg1wtXSmwZdvcSS75QKKbu83OA32qsmhoi 10 | Ih1DLwa8zbuhrM9G2E7pfEhTGBjkcN5MCoB/hRjv62PlMfCzlBDnzwUjAYJPvbW7 11 | axXjqpQvcMOX5QzIckTFBeiM/9G+8Nvt6pl87jamZ4SvK2fc07ZzTeDXQHIgVLfz 12 | Y3EqdEp7tf6boC9uekLlPSVnMZo4kMnGxaXNijiidsIr+Yy2dXAgZMy8pH1fNHnk 13 | BgwNOUECgYEAzTBGFP0HZme/jSGcNlg5jYLZmlftUVkjnGDxfBvgGB/03Cqbuz7G 14 | 0mddQ7tCeaWLIwKjnWSvaomF2uuIkcI37HPCXLc6pOAemXhmWW8nVb12taJYtVYl 15 | AyNLcPwZ8RC7Y/7wADHV0SSD/3tZyxr+LVizz7grAUwk7LLRSQo5oyECgYEAqH21 16 | iL9q/mOHO5mTodS5c/tJLUTE1MKPUYs1VywkNADdpHm/vmABy5FdnvOusf5Wqdiw 17 | Q9cTdju+GSM5Bt4qe0J5RTI4rgaOZ9ghsks37MxRpLVNn14jsMcJxXJJc594K0JA 18 | xtX/QZwwlC+S4PuD2ffrpe96V2/2/2khp3Fl2+MCgYBoC6J94hBujQ1Es1ZS+Em2 19 | yDMvYIamSV0VXtMU682mbg3r1m9Bc4O+DUvtnHcI2DjFeAEfPn70tud5KxYFU0ao 20 | T8qu2PqTwFHD4JmFoCeCAqC/WaNB0HzUKoGGuU8uPh3Hhu6MKrkRZELKufG+W+Gn 21 | fOMw4WJtNf9DRyl4sAaqAQKBgQCdRtLIdh2ynkcinCXRvxbpq5vQFMxC6eYMMQy2 22 | dW7J2DEMkdUpKMckNWoVsNPWNFrDKpDGkqIJEJVPM7DMt18iNZervshnsZkGWBqE 23 | KnBmqwZe4Bo39BEOt4xocVkdA7ORdcOvlxwxK1GxUYMiXcTjFugxuYwSyGHgYktW 24 | 2KRDDwKBgQCE9O4M2IyxXQi/pa8K0DtcYUvZhoKPmGiCsUojpYPkAO8KClr6iYF8 25 | txu63ErtL7Q5+dcYS4TND25d2YqVj4G3UOrNYwo9nyywzSbBrrk6QsyFfqj+EThK 26 | XMy8O3G8oV6h5KuPIBAVZOhJNvdOPOesq0IN7vmdrUvmUVJzkkyucw== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /keys/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDuLsggR0gJOJJN 3 | XH6ZrrEfE61xt3rAZbeeTGUKQnVS5Xw2Zjmi8ywacYOEkm1RDaPmKDkFwlR/e17O 4 | oI1bi0qdI6BIhJ8QAq+mfYE+9oWrfLHZiPPSu69wYqkMxkFeYH8AC0bI39QVfOSt 5 | x+mmYsclNoFgYboqeZTjeA+RIPLiLDjoIkVQ66lznUAUjPtGwTuZtPWmUIzx7vmB 6 | +pplKfaT5abL7yfUcbJgeFUhJFW8+qRLuBUtJUR1N5pS0o/VXH/zXz8cNaTegi8N 7 | VYj0IUfvd6U3EGQwfnMCtI0gFN6F2d6HIB6+NjhFoL+QryuyU7d7gS0n1KcDizA6 8 | MKKoAhATAgMBAAECggEAd5g/3o1MK20fcP7PhsVDpHIR9faGCVNJto9vcI5cMMqP 9 | 6xS7PgnSDFkRC6EmiLtLn8Z0k2K3YOeGfEP7lorDZVG9KoyE/doLbpK4MfBAwBG1 10 | j6AHpbmd5tVzQrnNmuDjBBelbDmPWVbD0EqAFI6mphXPMqD/hFJWIz1mu52Kt2s6 11 | ++MkdqLO0ORDNhKmzu6SADQEcJ9Suhcmv8nccMmwCsIQAUrfg3qOyqU4//8QB8ZM 12 | josO3gMUesihVeuF5XpptFjrAliPgw9uIG0aQkhVbf/17qy0XRi8dkqXj3efxEDp 13 | 1LSqZjBFiqJlFchbz19clwavMF/FhxHpKIhhmkkRSQKBgQD9blaWSg/2AGNhRfpX 14 | Yq+6yKUkUD4jL7pmX1BVca6dXqILWtHl2afWeUorgv2QaK1/MJDH9Gz9Gu58hJb3 15 | ymdeAISwPyHp8euyLIfiXSAi+ibKXkxkl1KQSweBM2oucnLsNne6Iv6QmXPpXtro 16 | nTMoGQDS7HVRy1on5NQLMPbUBQKBgQDwmN+um8F3CW6ZV1ZljJm7BFAgNyJ7m/5Q 17 | YUcOO5rFbNsHexStrx/h8jYnpdpIVlxACjh1xIyJ3lOCSAWfBWCS6KpgeO1Y484k 18 | EYhGjoUsKNQia8UWVt+uWnwjVSDhQjy5/pSH9xyFrUfDg8JnSlhsy0oC0C/PBjxn 19 | hxmADSLnNwKBgQD2A51USVMTKC9Q50BsgeU6+bmt9aNMPvHAnPf76d5q78l4IlKt 20 | wMs33QgOExuYirUZSgjRwknmrbUi9QckRbxwOSqVeMOwOWLm1GmYaXRf39u2CTI5 21 | V9gTMHJ5jnKd4gYDnaA99eiOcBhgS+9PbgKSAyuUlWwR2ciL/4uDzaVeDQKBgDym 22 | vRSeTRn99bSQMMZuuD5N6wkD/RxeCbEnpKrw2aZVN63eGCtkj0v9LCu4gptjseOu 23 | 7+a4Qplqw3B/SXN5/otqPbEOKv8Shl/PT6RBv06PiFKZClkEU2T3iH27sws2EGru 24 | w3C3GaiVMxcVewdg1YOvh5vH8ZVlxApxIzuFlDvnAoGAN5w+gukxd5QnP/7hcLDZ 25 | F+vesAykJX71AuqFXB4Wh/qFY92CSm7ImexWA/L9z461+NKeJwb64Nc53z59oA10 26 | /3o2OcIe44kddZXQVP6KTZBd7ySVhbtOiK3/pCy+BQRsrC7d71W914DxNWadwZ+a 27 | jtwwKjDzmPwdIXDSQarCx0U= 28 | -----END PRIVATE KEY----- -------------------------------------------------------------------------------- /keys/priv.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDRXuDwQg1aVo1j 3 | dPGerMcYknIDT525wDX7SnHL/mfyVHUZQgXCW2flFQSOM45sL+xd3pZA2JvhZpVW 4 | 5bBC3GGPmJ6+FubPZkyyCtWRDTvbLBzJSw3zIG/k5fb9I+a5bYOZ7f7iRkOu15Nn 5 | XsGRy5IMlyzN80v4qEmghyBBoApHRC4v11wX8m8qBnod+rddIWC0MgR3PJay4jIe 6 | nbQS22L+XeOnxMzYX8CXHWKqSXglTbgVyAte1CO2R3A+jnY4A/l2n1JYzzDutJtb 7 | Y/YHgPV7mXzqPpNoVbxQUdOFkR47EYotlTdQ6KxbseI2Nidy7aBdc4pIiKsS2Q6O 8 | yM1IXzK5AgMBAAECggEBAIFK3Q+hSoup/WAYvIjnW3+yF9Qxm041A2DuJP37WgeM 9 | 3k89tT7DoZIW+5ww/FeU9Js3NIRQ/1ofNkPWTZXMcDbz4buzes9C9kPQavhLOtZr 10 | FE++GJw2QMMSvAaw3+9MUMOC9C6Zg3x6Rd3E5zZ4hvVded6oyxaAHg+SwnmkMhS1 11 | SPgzoko+ANoFM7+sd5Nek7XLWlNBcDg1YR9h5yDGi29Jf6IVT2SHnevTMNFXtmnF 12 | zkvjiRPqFN3DDB7olrqSnV000lM9m4CkksZ8Iazl7Mx3gb0vXCSfHWwotbR5CbkP 13 | JP2VCJLGiFnqH5ceVU9tJndFQQOoHU2FYhbylnHx8G0CgYEA6fegfTdChv1gs9Go 14 | 1EVJ1RL00QQPpvZ7wKMkv/BOdrEOAcVzG02Vlmqm2RmifuH4EBwi1mt5FkW6Mk9k 15 | 5sofwmVgpSK96HEbY1RY0iOVi+aAZBuZWcDmknWgqCP6jqArc+tq3qIrlKWZBi3c 16 | 8rMiWWi1IV8AnZMsntl/uTxsMd8CgYEA5RZJafbEhW02NoHbkRPX4VNaQngax3uK 17 | dWyzxXrgMNpKDP0ghBToRXcqq98MHshtrHfdoxGVlXoqxFuqcEtGrBPx1LN1avAT 18 | xeoxhB7GU+19JxK9LYjSy+MQ84g1AFM+gTSpCGtW70nW1j5EhNaM46fIclTcrP4c 19 | KPydMUrhHmcCgYAHijNh1aYPM5sqMFeAf7shYrsBAWB/wPG8A4XrqZLdwFbzN6m9 20 | 94IaltVJqcnSzPVxj3aP8ma0kQqvFF/sEBd17E5xA+2a0ApR6SXzn7HyEDuS/lCy 21 | 08Sac6/5uy8X6ZF86tlG03MUF0IXMOMt7xfsSbOMn8MlUNjfRaMYFRpngQKBgH9i 22 | Zywu9ZQj6FNi3g7L8+zthnETjACEp7cn3Mbgzq0blLWoQ15uqcGGxAmSG5E6bhHL 23 | DUlRy9W0evd46UtL8F5bMMvPhmkSWkxhL2uCGVIt6rvBZcReIqkQ+CoTATXPFCCQ 24 | gbWWyl6Atp+nPD6JdwpYD9PTcGoZWttHrLjuJEPdAoGBAId0T3r02Dt33HzMuv8S 25 | DHC/l+SuzQwk5m3x217/YXh5Oxk1n1dAinPCbLTt0DPyf4FpPBAPz+Gqkc6zgoH/ 26 | JFhmfRW9drmxCGP3FSJ/H3NhsXwSkCfsvTFzNoSH3i+K+mkyWGcwgSygHNX/xwJo 27 | 9MVBkpawPJ4OT/5SYN2XykrT 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /keys/rootCa.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDE/O2cRdwLQnSk 3 | esAlG1MAE9ePwWXWqSHRAviBLSHc09XPkZ14zXnxVKtrQCfS9CmdJzF2QpaeP/Sf 4 | 9GPYRMfOLl3uta4jjiFqf4i9+Y6K2Qto2PIFWlaSzL9WbxV0FeHKHiPs6D+gWNqs 5 | yrfaJumWM350y9JBe5NU0ecOu2Kk0Icjcmr6XnK03o4WXK7rlNsmuZbWn5hyYVlq 6 | cbsPAq79qQK7UWNyyshMyARY0hOVzejN/AuVzwtGV/gyxraQ53nUDEz+p/dBHnln 7 | BcKnv8SWK6c8sR96sDLvx7bclqxKSTSLrysmaGzKUphpLOTBBxvwsuBF4uxm+2qb 8 | PGHqhZsZAgMBAAECggEBAIDnmn224WWk2TuA8BfYJ5pXYao6vm+YQf7KWN//LreZ 9 | D8NC4K9alr+rznjihKzxBgJG0YE31eQ8qdG6VVQCvlVX1jQNQ6UUrx1H062tju+h 10 | VGwJ+2YxVZG+1j6zdnB+mUBwNMYkvO4q9v43sdPZvuWBUPERJ8eeHC1XJkJnsQd0 11 | HosezsBAft43+QwtqUJ4BoxP+RBVg2IkIA+Wy3smFv7+OpHd5ZzxeKc/h66YCJNE 12 | Np0F/YjjbQW0h320DX6zIK0ovZv9kXdtweFdeMRhCQuwJAIYc2sY8El76SjlLNc3 13 | 4GUNR2O/dw87FwbgOvcnulpgmGcgRLWZzKlagjY36NkCgYEA96OTao+cYPawrphP 14 | Kmw3zM3cP2Vq/r4Rwo4v6i9ivN6YLVBGAI9BdsyHEMDNSmdIBkAe/wzWjqXml3sU 15 | S/nCG5eM7i6b4gDbG6DOENQCzvRcvDvQJjvXX1TsnbW26FnWcHa8dsEMUHo9sT+y 16 | Z89c/VIGx1N/I1XFIE+uMBEQXMsCgYEAy6OPPYxkyT9NaEW43TZyVifs0ViAQdaN 17 | xsKGNLd0MJoGvHdBQaiZ0eiYop0FzjxnJMtR5qL3nad3KTCK3lS3Od6ajM7PfFvn 18 | 3GBx0rHt2gOBxbrIzQDuXhco3+Oudd7GXM6hqaITFtEgGZEujCIz7hzPZTNU+Hbg 19 | 3iXuCmZebysCgYBHs/VbRXniGYvPAgrqHauKgZDkSllQqXg6/TCiflX4mfs2I6gh 20 | QVmTLGVvVGQcxwrzxF01PGJjq2W5NsoJtaeelaMY1CYOqnDG7yi7dqfZhKfVWPxM 21 | FcemstBQWe08GccdiUPInt0R8FMsn2To6gHi131e+a42bP9G+8tT35z9QQKBgHUo 22 | h7uobz/dfKJ2IpKaxZNN6xDScw/t6amltuLTfTSNGT0K+29UGhNLV62O24048TqZ 23 | A2jG7+EzPPpkpDCf2r6flJlYTsVEdxQyJKpMlgRtKhdhjC9tzrYxauHMs027OrKg 24 | ayAWUBc7f9VV0srqzqe6yZN2wfclJeCJP/eZx9D7AoGANDttn+JIQIyrL/SREmJG 25 | Fph7LZ0JcxLC0oPajR3l4a7c+3iwsfnBAkxExjbrbgP9DpNcfs5V6YbgYIAW19dh 26 | EM5kuwodP+UfNvrOr7l1esrb1XWd0o3vn1bFRbgmhROroYNqDPmU/V2utD4q508u 27 | SrLDnGcz26YXoIqrOMgdfmg= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Lei Ni (nilei81@gmail.com). 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | OS := $(shell uname) 16 | ROCKSDB_MAJOR_VER=5 17 | ifeq ($(OS),Darwin) 18 | ROCKSDB_SO_FILE=librocksdb.$(ROCKSDB_MAJOR_VER).dylib 19 | else ifeq ($(OS),Linux) 20 | ROCKSDB_SO_FILE=librocksdb.so.$(ROCKSDB_MAJOR_VER) 21 | else 22 | $(error OS type $(OS) not supported) 23 | endif 24 | 25 | ROCKSDB_INC_PATH ?= 26 | ROCKSDB_LIB_PATH ?= 27 | # in /usr/local/lib? 28 | ifeq ($(ROCKSDB_LIB_PATH),) 29 | ifeq ($(ROCKSDB_INC_PATH),) 30 | ifneq ($(wildcard /usr/local/lib/$(ROCKSDB_SO_FILE)),) 31 | ifneq ($(wildcard /usr/local/include/rocksdb/c.h),) 32 | $(info rocksdb lib found at /usr/local/lib/$(ROCKSDB_SO_FILE)) 33 | ROCKSDB_LIB_PATH=/usr/local/lib 34 | endif 35 | endif 36 | endif 37 | endif 38 | 39 | ifeq ($(ROCKSDB_LIB_PATH),) 40 | CDEPS_LDFLAGS=-lrocksdb 41 | else 42 | CDEPS_LDFLAGS=-L$(ROCKSDB_LIB_PATH) -lrocksdb 43 | endif 44 | ifneq ($(ROCKSDB_INC_PATH),) 45 | CGO_CXXFLAGS=CGO_CFLAGS="-I$(ROCKSDB_INC_PATH)" 46 | endif 47 | CGO_LDFLAGS=CGO_LDFLAGS="$(CDEPS_LDFLAGS)" 48 | GOCMD=$(CGO_LDFLAGS) $(CGO_CXXFLAGS) go build -v 49 | 50 | all: dcrontab 51 | 52 | dcrontab: 53 | $(GOCMD) -o dcron github.com/dioptre/dcrontab/v3/dcrontab 54 | 55 | clean: 56 | @rm -f dcron 57 | 58 | .PHONY: dcrontab clean 59 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/util.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | // #include 3 | 4 | import "C" 5 | import ( 6 | "reflect" 7 | "unsafe" 8 | ) 9 | 10 | // btoi converts a bool value to int. 11 | func btoi(b bool) int { 12 | if b { 13 | return 1 14 | } 15 | return 0 16 | } 17 | 18 | // boolToChar converts a bool value to C.uchar. 19 | func boolToChar(b bool) C.uchar { 20 | if b { 21 | return 1 22 | } 23 | return 0 24 | } 25 | 26 | // charToByte converts a *C.char to a byte slice. 27 | func charToByte(data *C.char, len C.size_t) []byte { 28 | var value []byte 29 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 30 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 31 | return value 32 | } 33 | 34 | // byteToChar returns *C.char from byte slice. 35 | func byteToChar(b []byte) *C.char { 36 | var c *C.char 37 | if len(b) > 0 { 38 | c = (*C.char)(unsafe.Pointer(&b[0])) 39 | } 40 | return c 41 | } 42 | 43 | // Go []byte to C string 44 | // The C string is allocated in the C heap using malloc. 45 | func cByteSlice(b []byte) *C.char { 46 | var c *C.char 47 | if len(b) > 0 { 48 | c = (*C.char)(C.CBytes(b)) 49 | } 50 | return c 51 | } 52 | 53 | // stringToChar returns *C.char from string. 54 | func stringToChar(s string) *C.char { 55 | ptrStr := (*reflect.StringHeader)(unsafe.Pointer(&s)) 56 | return (*C.char)(unsafe.Pointer(ptrStr.Data)) 57 | } 58 | 59 | // charSlice converts a C array of *char to a []*C.char. 60 | func charSlice(data **C.char, len C.int) []*C.char { 61 | var value []*C.char 62 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 63 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 64 | return value 65 | } 66 | 67 | // sizeSlice converts a C array of size_t to a []C.size_t. 68 | func sizeSlice(data *C.size_t, len C.int) []C.size_t { 69 | var value []C.size_t 70 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 71 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 72 | return value 73 | } 74 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/checkpoint.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | 7 | import ( 8 | "errors" 9 | "unsafe" 10 | ) 11 | 12 | // Checkpoint provides Checkpoint functionality. 13 | // Checkpoints provide persistent snapshots of RocksDB databases. 14 | type Checkpoint struct { 15 | c *C.rocksdb_checkpoint_t 16 | } 17 | 18 | // NewNativeCheckpoint creates a new checkpoint. 19 | func NewNativeCheckpoint(c *C.rocksdb_checkpoint_t) *Checkpoint { 20 | return &Checkpoint{c} 21 | } 22 | 23 | // CreateCheckpoint builds an openable snapshot of RocksDB on the same disk, which 24 | // accepts an output directory on the same disk, and under the directory 25 | // (1) hard-linked SST files pointing to existing live SST files 26 | // SST files will be copied if output directory is on a different filesystem 27 | // (2) a copied manifest files and other files 28 | // The directory should not already exist and will be created by this API. 29 | // The directory will be an absolute path 30 | // log_size_for_flush: if the total log file size is equal or larger than 31 | // this value, then a flush is triggered for all the column families. The 32 | // default value is 0, which means flush is always triggered. If you move 33 | // away from the default, the checkpoint may not contain up-to-date data 34 | // if WAL writing is not always enabled. 35 | // Flush will always trigger if it is 2PC. 36 | func (checkpoint *Checkpoint) CreateCheckpoint(checkpoint_dir string, log_size_for_flush uint64) error { 37 | var ( 38 | cErr *C.char 39 | ) 40 | 41 | cDir := C.CString(checkpoint_dir) 42 | defer C.free(unsafe.Pointer(cDir)) 43 | 44 | C.rocksdb_checkpoint_create(checkpoint.c, cDir, C.uint64_t(log_size_for_flush), &cErr) 45 | if cErr != nil { 46 | defer C.free(unsafe.Pointer(cErr)) 47 | return errors.New(C.GoString(cErr)) 48 | } 49 | return nil 50 | } 51 | 52 | // Destroy deallocates the Checkpoint object. 53 | func (checkpoint *Checkpoint) Destroy() { 54 | C.rocksdb_checkpoint_object_destroy(checkpoint.c) 55 | checkpoint.c = nil 56 | } 57 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/sst_file_writer.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | 7 | import ( 8 | "errors" 9 | "unsafe" 10 | ) 11 | 12 | // SSTFileWriter is used to create sst files that can be added to database later. 13 | // All keys in files generated by SstFileWriter will have sequence number = 0. 14 | type SSTFileWriter struct { 15 | c *C.rocksdb_sstfilewriter_t 16 | } 17 | 18 | // NewSSTFileWriter creates an SSTFileWriter object. 19 | func NewSSTFileWriter(opts *EnvOptions, dbOpts *Options) *SSTFileWriter { 20 | c := C.rocksdb_sstfilewriter_create(opts.c, dbOpts.c) 21 | return &SSTFileWriter{c: c} 22 | } 23 | 24 | // Open prepares SstFileWriter to write into file located at "path". 25 | func (w *SSTFileWriter) Open(path string) error { 26 | var ( 27 | cErr *C.char 28 | cPath = C.CString(path) 29 | ) 30 | defer C.free(unsafe.Pointer(cPath)) 31 | C.rocksdb_sstfilewriter_open(w.c, cPath, &cErr) 32 | if cErr != nil { 33 | defer C.free(unsafe.Pointer(cErr)) 34 | return errors.New(C.GoString(cErr)) 35 | } 36 | return nil 37 | } 38 | 39 | // Add adds key, value to currently opened file. 40 | // REQUIRES: key is after any previously added key according to comparator. 41 | func (w *SSTFileWriter) Add(key, value []byte) error { 42 | cKey := byteToChar(key) 43 | cValue := byteToChar(value) 44 | var cErr *C.char 45 | C.rocksdb_sstfilewriter_add(w.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) 46 | if cErr != nil { 47 | defer C.free(unsafe.Pointer(cErr)) 48 | return errors.New(C.GoString(cErr)) 49 | } 50 | return nil 51 | } 52 | 53 | // Finish finishes writing to sst file and close file. 54 | func (w *SSTFileWriter) Finish() error { 55 | var cErr *C.char 56 | C.rocksdb_sstfilewriter_finish(w.c, &cErr) 57 | if cErr != nil { 58 | defer C.free(unsafe.Pointer(cErr)) 59 | return errors.New(C.GoString(cErr)) 60 | } 61 | return nil 62 | } 63 | 64 | // Destroy destroys the SSTFileWriter object. 65 | func (w *SSTFileWriter) Destroy() { 66 | C.rocksdb_sstfilewriter_destroy(w.c) 67 | } 68 | -------------------------------------------------------------------------------- /keys/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFkzCCA3ugAwIBAgIUWb7puabbtOAsqW7mRA7xUmtVs3MwDQYJKoZIhvcNAQEL 3 | BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM 4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X 5 | DTE5MDkzMDIwMjYxM1oXDTIwMDkyOTIwMjYxM1owWTELMAkGA1UEBhMCQVUxEzAR 6 | BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 7 | IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A 8 | MIICCgKCAgEAtQ1ckrwKunUDJwdjEyxkFrTY0jRf5g94URSxCmxnK77KLO+jJY+6 9 | VKh7PEQ6gWn4p31qsPldubVfC6z4W0BnH3uKv9FZiMVrUg5ibP4UFiitvYUJddPD 10 | d6TRlWRtdLzsd9bkFc5/YhHZrqZf/faPr536Tk+OPPg1TnBk+UjZV91kfi+mUG3p 11 | +Ae7SrGdgxVpfeJCy/S7eLfCykrluImyezVAHPKnCptU7eKAd8NdSatEjwo39olq 12 | 794BOhsww3iD5IURs8e3d0cpzCUXczxqO4SEU2werw1+Mb7KmFikZit6DoUGCLlw 13 | sgLx3AuyPBxOJkOHhoBxgYfHzf+UKuadJKua+pu10RjNIblOHDBl6ddjpGleT7UR 14 | OpzBq6UUfu8NX17pPiaHwZxY92gpnjbXKK08UzpbGknert+zQPFnYRp4aEaxnkpM 15 | Yywzybvak1ABn7sstz7FfomUca9NNfbsMgOO3hfDdK4omv3ZTJL56v5PswxSMdxk 16 | UghI51DfnX44ePdceGhRuav2dO7SN4HeeNTRzurDelAsACtD7U0Dj5C3PqIoE244 17 | 2v9IlJ86L1zLEmGap5ExL5o+GbvRrdu3kklCLQ3jGLDDXO2bqALdnV9muYGVKWug 18 | cMsJ2H5cNnBny/wl+2cmwRFTgfBzZxO7XotBcPNihFYSkJVLGIKzKfsCAwEAAaNT 19 | MFEwHQYDVR0OBBYEFOZ5eyP5v6J0BOpC0QgyvxFtmbZlMB8GA1UdIwQYMBaAFOZ5 20 | eyP5v6J0BOpC0QgyvxFtmbZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL 21 | BQADggIBAIvHa0+7QOEHPm548GK0XKs8juGDdCxdJQ2jHafBUKsnLMSWsw1/p5dK 22 | MnfgOlFcSueLAiebtRo2OrNQEkwI6Y3Bi7TMx0l0lpLk25yURXI3wXfdsYG6a0RN 23 | 5BVqQ6GVkz+bamphNCjVOAVYBExt4vKY7ecLcLjgXCFv1Ue/qLjzu/xAAZlS5uVv 24 | Xm2vxST8fyAx74tvS7JktOCDgK1qU99glF4sJq4gMXkpzPTAGBb+T8tJEh1QSu3/ 25 | MQULNaBkTn9vD7dOIZJopDva0UEFoKB4fvLitFiH69Asvxgkhy/sTyA7+e2vSzKn 26 | i5iJh/6Am/SYchNlAH1Jyl3BMCI3VnBP6Z+k22cstm7CY5feR+9TVoGEHwc6WGGr 27 | 3uNhum6k827Wm2xr+63xhrHKLtl/FDHnKpJrGeRo8UEFsdEu+VFYs9oyjypPSl0n 28 | di0W5Rjikgt7vnFIS/WjeVOkQpkMET+93TwQB0NgRndtsAnFqh6fm3JiWjt3+HWK 29 | o+gQp8ERk36lgIAZqrQjx3XGVtxHtgVtjvS5WaVf24Yy0+53Zmgk5IchDgCDyRLJ 30 | GAw0B7n6ynivQwm47lc2ZgrvhTjZddiZ/V+71zWBoOIMn4oGJJpTFM5urM/8XxuZ 31 | yM3x69TgQQBrT2/83eQUdsEwjsfa+sibDL6KpRY/QXKp8e1TmqLd 32 | -----END CERTIFICATE----- 33 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package gorocksdb provides the ability to create and access RocksDB databases. 3 | 4 | gorocksdb.OpenDb opens and creates databases. 5 | 6 | bbto := gorocksdb.NewDefaultBlockBasedTableOptions() 7 | bbto.SetBlockCache(gorocksdb.NewLRUCache(3 << 30)) 8 | opts := gorocksdb.NewDefaultOptions() 9 | opts.SetBlockBasedTableFactory(bbto) 10 | opts.SetCreateIfMissing(true) 11 | db, err := gorocksdb.OpenDb(opts, "/path/to/db") 12 | 13 | The DB struct returned by OpenDb provides DB.Get, DB.Put, DB.Merge and DB.Delete to modify 14 | and query the database. 15 | 16 | ro := gorocksdb.NewDefaultReadOptions() 17 | wo := gorocksdb.NewDefaultWriteOptions() 18 | // if ro and wo are not used again, be sure to Close them. 19 | err = db.Put(wo, []byte("foo"), []byte("bar")) 20 | ... 21 | value, err := db.Get(ro, []byte("foo")) 22 | defer value.Free() 23 | ... 24 | err = db.Delete(wo, []byte("foo")) 25 | 26 | For bulk reads, use an Iterator. If you want to avoid disturbing your live 27 | traffic while doing the bulk read, be sure to call SetFillCache(false) on the 28 | ReadOptions you use when creating the Iterator. 29 | 30 | ro := gorocksdb.NewDefaultReadOptions() 31 | ro.SetFillCache(false) 32 | it := db.NewIterator(ro) 33 | defer it.Close() 34 | it.Seek([]byte("foo")) 35 | for it = it; it.Valid(); it.Next() { 36 | key := it.Key() 37 | value := it.Value() 38 | fmt.Printf("Key: %v Value: %v\n", key.Data(), value.Data()) 39 | key.Free() 40 | value.Free() 41 | } 42 | if err := it.Err(); err != nil { 43 | ... 44 | } 45 | 46 | Batched, atomic writes can be performed with a WriteBatch and 47 | DB.Write. 48 | 49 | wb := gorocksdb.NewWriteBatch() 50 | // defer wb.Close or use wb.Clear and reuse. 51 | wb.Delete([]byte("foo")) 52 | wb.Put([]byte("foo"), []byte("bar")) 53 | wb.Put([]byte("bar"), []byte("foo")) 54 | err := db.Write(wo, wb) 55 | 56 | If your working dataset does not fit in memory, you'll want to add a bloom 57 | filter to your database. NewBloomFilter and 58 | BlockBasedTableOptions.SetFilterPolicy is what you want. NewBloomFilter is 59 | amount of bits in the filter to use per key in your database. 60 | 61 | filter := gorocksdb.NewBloomFilter(10) 62 | bbto := gorocksdb.NewDefaultBlockBasedTableOptions() 63 | bbto.SetFilterPolicy(filter) 64 | opts.SetBlockBasedTableFactory(bbto) 65 | db, err := gorocksdb.OpenDb(opts, "/path/to/db") 66 | 67 | If you're using a custom comparator in your code, be aware you may have to 68 | make your own filter policy object. 69 | 70 | This documentation is not a complete discussion of RocksDB. Please read the 71 | RocksDB documentation for information on its 72 | operation. You'll find lots of goodies there. 73 | */ 74 | package gorocksdb 75 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/compaction_filter.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // A CompactionFilter can be used to filter keys during compaction time. 7 | type CompactionFilter interface { 8 | // If the Filter function returns false, it indicates 9 | // that the kv should be preserved, while a return value of true 10 | // indicates that this key-value should be removed from the 11 | // output of the compaction. The application can inspect 12 | // the existing value of the key and make decision based on it. 13 | // 14 | // When the value is to be preserved, the application has the option 15 | // to modify the existing value and pass it back through a new value. 16 | // To retain the previous value, simply return nil 17 | // 18 | // If multithreaded compaction is being used *and* a single CompactionFilter 19 | // instance was supplied via SetCompactionFilter, this the Filter function may be 20 | // called from different threads concurrently. The application must ensure 21 | // that the call is thread-safe. 22 | Filter(level int, key, val []byte) (remove bool, newVal []byte) 23 | 24 | // The name of the compaction filter, for logging 25 | Name() string 26 | } 27 | 28 | // NewNativeCompactionFilter creates a CompactionFilter object. 29 | func NewNativeCompactionFilter(c *C.rocksdb_compactionfilter_t) CompactionFilter { 30 | return nativeCompactionFilter{c} 31 | } 32 | 33 | type nativeCompactionFilter struct { 34 | c *C.rocksdb_compactionfilter_t 35 | } 36 | 37 | func (c nativeCompactionFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) { 38 | return false, nil 39 | } 40 | func (c nativeCompactionFilter) Name() string { return "" } 41 | 42 | // Hold references to compaction filters. 43 | var compactionFilters = NewCOWList() 44 | 45 | type compactionFilterWrapper struct { 46 | name *C.char 47 | filter CompactionFilter 48 | } 49 | 50 | func registerCompactionFilter(filter CompactionFilter) int { 51 | return compactionFilters.Append(compactionFilterWrapper{C.CString(filter.Name()), filter}) 52 | } 53 | 54 | //export gorocksdb_compactionfilter_filter 55 | func gorocksdb_compactionfilter_filter(idx int, cLevel C.int, cKey *C.char, cKeyLen C.size_t, cVal *C.char, cValLen C.size_t, cNewVal **C.char, cNewValLen *C.size_t, cValChanged *C.uchar) C.int { 56 | key := charToByte(cKey, cKeyLen) 57 | val := charToByte(cVal, cValLen) 58 | 59 | remove, newVal := compactionFilters.Get(idx).(compactionFilterWrapper).filter.Filter(int(cLevel), key, val) 60 | if remove { 61 | return C.int(1) 62 | } else if newVal != nil { 63 | *cNewVal = byteToChar(newVal) 64 | *cNewValLen = C.size_t(len(newVal)) 65 | *cValChanged = C.uchar(1) 66 | } 67 | return C.int(0) 68 | } 69 | 70 | //export gorocksdb_compactionfilter_name 71 | func gorocksdb_compactionfilter_name(idx int) *C.char { 72 | return compactionFilters.Get(idx).(compactionFilterWrapper).name 73 | } 74 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/slice_transform.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // A SliceTransform can be used as a prefix extractor. 7 | type SliceTransform interface { 8 | // Transform a src in domain to a dst in the range. 9 | Transform(src []byte) []byte 10 | 11 | // Determine whether this is a valid src upon the function applies. 12 | InDomain(src []byte) bool 13 | 14 | // Determine whether dst=Transform(src) for some src. 15 | InRange(src []byte) bool 16 | 17 | // Return the name of this transformation. 18 | Name() string 19 | } 20 | 21 | // NewFixedPrefixTransform creates a new fixed prefix transform. 22 | func NewFixedPrefixTransform(prefixLen int) SliceTransform { 23 | return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen))) 24 | } 25 | 26 | // NewNativeSliceTransform creates a SliceTransform object. 27 | func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform { 28 | return nativeSliceTransform{c} 29 | } 30 | 31 | type nativeSliceTransform struct { 32 | c *C.rocksdb_slicetransform_t 33 | } 34 | 35 | func (st nativeSliceTransform) Transform(src []byte) []byte { return nil } 36 | func (st nativeSliceTransform) InDomain(src []byte) bool { return false } 37 | func (st nativeSliceTransform) InRange(src []byte) bool { return false } 38 | func (st nativeSliceTransform) Name() string { return "" } 39 | 40 | // Hold references to slice transforms. 41 | var sliceTransforms = NewCOWList() 42 | 43 | type sliceTransformWrapper struct { 44 | name *C.char 45 | sliceTransform SliceTransform 46 | } 47 | 48 | func registerSliceTransform(st SliceTransform) int { 49 | return sliceTransforms.Append(sliceTransformWrapper{C.CString(st.Name()), st}) 50 | } 51 | 52 | //export gorocksdb_slicetransform_transform 53 | func gorocksdb_slicetransform_transform(idx int, cKey *C.char, cKeyLen C.size_t, cDstLen *C.size_t) *C.char { 54 | key := charToByte(cKey, cKeyLen) 55 | dst := sliceTransforms.Get(idx).(sliceTransformWrapper).sliceTransform.Transform(key) 56 | *cDstLen = C.size_t(len(dst)) 57 | return cByteSlice(dst) 58 | } 59 | 60 | //export gorocksdb_slicetransform_in_domain 61 | func gorocksdb_slicetransform_in_domain(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar { 62 | key := charToByte(cKey, cKeyLen) 63 | inDomain := sliceTransforms.Get(idx).(sliceTransformWrapper).sliceTransform.InDomain(key) 64 | return boolToChar(inDomain) 65 | } 66 | 67 | //export gorocksdb_slicetransform_in_range 68 | func gorocksdb_slicetransform_in_range(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar { 69 | key := charToByte(cKey, cKeyLen) 70 | inRange := sliceTransforms.Get(idx).(sliceTransformWrapper).sliceTransform.InRange(key) 71 | return boolToChar(inRange) 72 | } 73 | 74 | //export gorocksdb_slicetransform_name 75 | func gorocksdb_slicetransform_name(idx int) *C.char { 76 | return sliceTransforms.Get(idx).(sliceTransformWrapper).name 77 | } 78 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/gorocksdb.c: -------------------------------------------------------------------------------- 1 | #include "gorocksdb.h" 2 | #include "_cgo_export.h" 3 | 4 | /* Base */ 5 | 6 | void gorocksdb_destruct_handler(void* state) { } 7 | 8 | /* Comparator */ 9 | 10 | rocksdb_comparator_t* gorocksdb_comparator_create(uintptr_t idx) { 11 | return rocksdb_comparator_create( 12 | (void*)idx, 13 | gorocksdb_destruct_handler, 14 | (int (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_comparator_compare), 15 | (const char *(*)(void*))(gorocksdb_comparator_name)); 16 | } 17 | 18 | /* CompactionFilter */ 19 | 20 | rocksdb_compactionfilter_t* gorocksdb_compactionfilter_create(uintptr_t idx) { 21 | return rocksdb_compactionfilter_create( 22 | (void*)idx, 23 | gorocksdb_destruct_handler, 24 | (unsigned char (*)(void*, int, const char*, size_t, const char*, size_t, char**, size_t*, unsigned char*))(gorocksdb_compactionfilter_filter), 25 | (const char *(*)(void*))(gorocksdb_compactionfilter_name)); 26 | } 27 | 28 | /* Filter Policy */ 29 | 30 | rocksdb_filterpolicy_t* gorocksdb_filterpolicy_create(uintptr_t idx) { 31 | return rocksdb_filterpolicy_create( 32 | (void*)idx, 33 | gorocksdb_destruct_handler, 34 | (char* (*)(void*, const char* const*, const size_t*, int, size_t*))(gorocksdb_filterpolicy_create_filter), 35 | (unsigned char (*)(void*, const char*, size_t, const char*, size_t))(gorocksdb_filterpolicy_key_may_match), 36 | gorocksdb_filterpolicy_delete_filter, 37 | (const char *(*)(void*))(gorocksdb_filterpolicy_name)); 38 | } 39 | 40 | void gorocksdb_filterpolicy_delete_filter(void* state, const char* v, size_t s) { } 41 | 42 | /* Merge Operator */ 43 | 44 | rocksdb_mergeoperator_t* gorocksdb_mergeoperator_create(uintptr_t idx) { 45 | return rocksdb_mergeoperator_create( 46 | (void*)idx, 47 | gorocksdb_destruct_handler, 48 | (char* (*)(void*, const char*, size_t, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_full_merge), 49 | (char* (*)(void*, const char*, size_t, const char* const*, const size_t*, int, unsigned char*, size_t*))(gorocksdb_mergeoperator_partial_merge_multi), 50 | gorocksdb_mergeoperator_delete_value, 51 | (const char* (*)(void*))(gorocksdb_mergeoperator_name)); 52 | } 53 | 54 | void gorocksdb_mergeoperator_delete_value(void* id, const char* v, size_t s) { } 55 | 56 | /* Slice Transform */ 57 | 58 | rocksdb_slicetransform_t* gorocksdb_slicetransform_create(uintptr_t idx) { 59 | return rocksdb_slicetransform_create( 60 | (void*)idx, 61 | gorocksdb_destruct_handler, 62 | (char* (*)(void*, const char*, size_t, size_t*))(gorocksdb_slicetransform_transform), 63 | (unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_domain), 64 | (unsigned char (*)(void*, const char*, size_t))(gorocksdb_slicetransform_in_range), 65 | (const char* (*)(void*))(gorocksdb_slicetransform_name)); 66 | } 67 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_transaction.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // TransactionOptions represent all of the available options options for 7 | // a transaction on the database. 8 | type TransactionOptions struct { 9 | c *C.rocksdb_transaction_options_t 10 | } 11 | 12 | // NewDefaultTransactionOptions creates a default TransactionOptions object. 13 | func NewDefaultTransactionOptions() *TransactionOptions { 14 | return NewNativeTransactionOptions(C.rocksdb_transaction_options_create()) 15 | } 16 | 17 | // NewNativeTransactionOptions creates a TransactionOptions object. 18 | func NewNativeTransactionOptions(c *C.rocksdb_transaction_options_t) *TransactionOptions { 19 | return &TransactionOptions{c} 20 | } 21 | 22 | // SetSetSnapshot to true is the same as calling 23 | // Transaction::SetSnapshot(). 24 | func (opts *TransactionOptions) SetSetSnapshot(value bool) { 25 | C.rocksdb_transaction_options_set_set_snapshot(opts.c, boolToChar(value)) 26 | } 27 | 28 | // SetDeadlockDetect to true means that before acquiring locks, this transaction will 29 | // check if doing so will cause a deadlock. If so, it will return with 30 | // Status::Busy. The user should retry their transaction. 31 | func (opts *TransactionOptions) SetDeadlockDetect(value bool) { 32 | C.rocksdb_transaction_options_set_deadlock_detect(opts.c, boolToChar(value)) 33 | } 34 | 35 | // SetLockTimeout positive, specifies the wait timeout in milliseconds when 36 | // a transaction attempts to lock a key. 37 | // If 0, no waiting is done if a lock cannot instantly be acquired. 38 | // If negative, TransactionDBOptions::transaction_lock_timeout will be used 39 | func (opts *TransactionOptions) SetLockTimeout(lock_timeout int64) { 40 | C.rocksdb_transaction_options_set_lock_timeout(opts.c, C.int64_t(lock_timeout)) 41 | } 42 | 43 | // SetExpiration sets the Expiration duration in milliseconds. 44 | // If non-negative, transactions that last longer than this many milliseconds will fail to commit. 45 | // If not set, a forgotten transaction that is never committed, rolled back, or deleted 46 | // will never relinquish any locks it holds. This could prevent keys from 47 | // being written by other writers. 48 | func (opts *TransactionOptions) SetExpiration(expiration int64) { 49 | C.rocksdb_transaction_options_set_expiration(opts.c, C.int64_t(expiration)) 50 | } 51 | 52 | // SetDeadlockDetectDepth sets the number of traversals to make during deadlock detection. 53 | func (opts *TransactionOptions) SetDeadlockDetectDepth(depth int64) { 54 | C.rocksdb_transaction_options_set_deadlock_detect_depth(opts.c, C.int64_t(depth)) 55 | } 56 | 57 | // SetMaxWriteBatchSize sets the maximum number of bytes used for the write batch. 0 means no limit. 58 | func (opts *TransactionOptions) SetMaxWriteBatchSize(size uint64) { 59 | C.rocksdb_transaction_options_set_max_write_batch_size(opts.c, C.size_t(size)) 60 | } 61 | 62 | // Destroy deallocates the TransactionOptions object. 63 | func (opts *TransactionOptions) Destroy() { 64 | C.rocksdb_transaction_options_destroy(opts.c) 65 | opts.c = nil 66 | } 67 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_ingest.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // IngestExternalFileOptions represents available options when ingesting external files. 7 | type IngestExternalFileOptions struct { 8 | c *C.rocksdb_ingestexternalfileoptions_t 9 | } 10 | 11 | // NewDefaultIngestExternalFileOptions creates a default IngestExternalFileOptions object. 12 | func NewDefaultIngestExternalFileOptions() *IngestExternalFileOptions { 13 | return NewNativeIngestExternalFileOptions(C.rocksdb_ingestexternalfileoptions_create()) 14 | } 15 | 16 | // NewNativeIngestExternalFileOptions creates a IngestExternalFileOptions object. 17 | func NewNativeIngestExternalFileOptions(c *C.rocksdb_ingestexternalfileoptions_t) *IngestExternalFileOptions { 18 | return &IngestExternalFileOptions{c: c} 19 | } 20 | 21 | // SetMoveFiles specifies if it should move the files instead of copying them. 22 | // Default to false. 23 | func (opts *IngestExternalFileOptions) SetMoveFiles(flag bool) { 24 | C.rocksdb_ingestexternalfileoptions_set_move_files(opts.c, boolToChar(flag)) 25 | } 26 | 27 | // SetSnapshotConsistency if specifies the consistency. 28 | // If set to false, an ingested file key could appear in existing snapshots that were created before the 29 | // file was ingested. 30 | // Default to true. 31 | func (opts *IngestExternalFileOptions) SetSnapshotConsistency(flag bool) { 32 | C.rocksdb_ingestexternalfileoptions_set_snapshot_consistency(opts.c, boolToChar(flag)) 33 | } 34 | 35 | // SetAllowGlobalSeqNo sets allow_global_seqno. If set to false,IngestExternalFile() will fail if the file key 36 | // range overlaps with existing keys or tombstones in the DB. 37 | // Default true. 38 | func (opts *IngestExternalFileOptions) SetAllowGlobalSeqNo(flag bool) { 39 | C.rocksdb_ingestexternalfileoptions_set_allow_global_seqno(opts.c, boolToChar(flag)) 40 | } 41 | 42 | // SetAllowBlockingFlush sets allow_blocking_flush. If set to false and the file key range overlaps with 43 | // the memtable key range (memtable flush required), IngestExternalFile will fail. 44 | // Default to true. 45 | func (opts *IngestExternalFileOptions) SetAllowBlockingFlush(flag bool) { 46 | C.rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(opts.c, boolToChar(flag)) 47 | } 48 | 49 | // SetIngestionBehind sets ingest_behind 50 | // Set to true if you would like duplicate keys in the file being ingested 51 | // to be skipped rather than overwriting existing data under that key. 52 | // Usecase: back-fill of some historical data in the database without 53 | // over-writing existing newer version of data. 54 | // This option could only be used if the DB has been running 55 | // with allow_ingest_behind=true since the dawn of time. 56 | // All files will be ingested at the bottommost level with seqno=0. 57 | func (opts *IngestExternalFileOptions) SetIngestionBehind(flag bool) { 58 | C.rocksdb_ingestexternalfileoptions_set_ingest_behind(opts.c, boolToChar(flag)) 59 | } 60 | 61 | // Destroy deallocates the IngestExternalFileOptions object. 62 | func (opts *IngestExternalFileOptions) Destroy() { 63 | C.rocksdb_ingestexternalfileoptions_destroy(opts.c) 64 | opts.c = nil 65 | } 66 | -------------------------------------------------------------------------------- /keys/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKgIBAAKCAgEAtQ1ckrwKunUDJwdjEyxkFrTY0jRf5g94URSxCmxnK77KLO+j 3 | JY+6VKh7PEQ6gWn4p31qsPldubVfC6z4W0BnH3uKv9FZiMVrUg5ibP4UFiitvYUJ 4 | ddPDd6TRlWRtdLzsd9bkFc5/YhHZrqZf/faPr536Tk+OPPg1TnBk+UjZV91kfi+m 5 | UG3p+Ae7SrGdgxVpfeJCy/S7eLfCykrluImyezVAHPKnCptU7eKAd8NdSatEjwo3 6 | 9olq794BOhsww3iD5IURs8e3d0cpzCUXczxqO4SEU2werw1+Mb7KmFikZit6DoUG 7 | CLlwsgLx3AuyPBxOJkOHhoBxgYfHzf+UKuadJKua+pu10RjNIblOHDBl6ddjpGle 8 | T7UROpzBq6UUfu8NX17pPiaHwZxY92gpnjbXKK08UzpbGknert+zQPFnYRp4aEax 9 | nkpMYywzybvak1ABn7sstz7FfomUca9NNfbsMgOO3hfDdK4omv3ZTJL56v5PswxS 10 | MdxkUghI51DfnX44ePdceGhRuav2dO7SN4HeeNTRzurDelAsACtD7U0Dj5C3PqIo 11 | E2442v9IlJ86L1zLEmGap5ExL5o+GbvRrdu3kklCLQ3jGLDDXO2bqALdnV9muYGV 12 | KWugcMsJ2H5cNnBny/wl+2cmwRFTgfBzZxO7XotBcPNihFYSkJVLGIKzKfsCAwEA 13 | AQKCAgEAhsgPply+T9hUJHXnV5qwWAcBclrzGT1jAFk7Att4YqSBtbof5hJ9UBCs 14 | zUytkTHmnilLh7vb/if7PaHisWvcWR+LRwj+ckclgcybkTAEnApkbLXhOQoQdb0J 15 | GQHOSh1qnDwpR+KWCdfoCZBp3ZHznXUppaqr5M+SP3aEdN+6jSKPcTDfyvhIORDo 16 | aM6Yx8Dk64XI3DSwa0nBhP8EzMAVUott0x8BI3BHZWA86utRQ0pwm1usFahvJ2pJ 17 | 14vuXVYWOcSmTQPcnLspboQeVPcgznX0XHYc0Yxl6XpzL76thDHRh4WX3F7QenSe 18 | wIUkhoM72lhjR8nOtfLy13PwRY+wYx3YAZt+kGZCHmxYZl367bgoMTeYzfgjg3J2 19 | SoVCPlrMLXoHenAhgF5BAk3pV+19Kr38VcTo4usVOjdicV937nLAr0Nc4/cLi3oB 20 | 4kLfB6CRdKJuEoISvog3BmlIxgahWqU/HPAJFAHbp9tYpP82WXEZIqFK6wz4W94f 21 | o1KpAylRCDglZOgZiEc26yNgQXyoByhLeEiCbIz9dN8rLeBh2eA0Y0dHvk83iduF 22 | Es7YY9O3LtnoSqDTwpwUY/0l3+Pd3Iem2E2mnzP+roBcnA0FhVBgzwqkkuUYbIMF 23 | XWPPupU508RlIs/yXXkhspy51qBRW26UCiR71ZKMELwx5nqMmOECggEBAO9HOi1D 24 | Cqj5fz6iUUnW60S4zgQC2s8Ww4RtoJ4A+b9plXogb+34GRg0rBw2Gaosz5jXdUxz 25 | 3k4c93L92z5H9nQnxgHsg6n0L3QrMd8kpePgso40ZVXQoeyY4Ld6XdI/WGWQC5mU 26 | P6eZs42vlR7QgtUfWiTaO4fBAr4TbAloT/OJKlmGOQMJK9q0jPv4tZ5IFMU6FoZW 27 | q2Bc5eHmY4x9kqqg1/xR4+xYtMKrgYCmx1dXqMva51uObAaoam1eNCHuFbatu9a1 28 | aH9hppNejECdBxKytN2Tex+EFxAItI96PqVEHdieYILF9MVhDSIqT6RAMhsI2NCI 29 | V8wiK4qXsEhENS8CggEBAMG0cydGR44vKcKFlCugNPBJxzjfFwgZLIhWEuaaRSo2 30 | HvMX/mRoMl8m/64ImhGvvamLh43S4Eid5yD/psv80SpsPpe82+QpbQKUGqkCMAA4 31 | PRn7YSBGvFvjaljb5OL2bhP8KouV7QoriMNWvFhKxozfOiSUa9OKA4ykFbPUS2je 32 | 8YvR8H2zmD6ydNrs9Z3FV/2cy+l1aH5gCpVtrEfnAdICpztjAwmfCVx1imYm1qGs 33 | 6H+Rgn8lnGQtI65cJso7qdZgeWtnd0Hz8u5ccUeLMJ68teTHqOpZQGEECN9YWl9s 34 | QRHKGgPwZU6zcwev+1y3n1+Jq4xKGKZkJsmp7VuO/PUCggEBAOTjKriPUAwxllHw 35 | 7XeH2khDy/a0UGEW8nKkd6eNYVqEApPvoYJu+HpaMGVQLx7FfNRFDpTSpc3tEZvz 36 | UB10s0GLpnLCr3QM/LPQegVtxi6FC+jSIDmFuM9dsmBw8nyjUcjdkOsZkAagFc0s 37 | L6ZpaS27rvxfseB7dkcixw78tLZO+K7ooXLQ/B1a5x5FXF6Lryq3FULud0NXuiNs 38 | wr9qg7kq4VEheLjmZgtuChRp9XgMh5Bx1yySwa1gv2XdXlsyp5y29l896zcmzzRb 39 | lihczZ5KSo9Ge9m6/mC9IxO4qWJoFXDYl5OhDzMcUnbjKIoDSqrnvJdGRU4lRadu 40 | 0Rdf/8sCggEAbgoCuLb2A1HLmVOk/rt+F1ryL96cCN+0KqcrCZFt5TLllhd1Trja 41 | lK9k5ArC63U4e05mbSz0eIwrNtXEfXFbqYvgavbAu/bElIhqNMSnXBiWrWHepSVJ 42 | 77FbQqt4dW6kzfmcebtr8zET+lQVntqrvG87anheYMewiH7WTVg2lpbABTv4MJCL 43 | 1mIufAfM6BmpiMtAE5m9CL2qhYTOL/KNWC5lrhjrKtYLzViAjNNXbnT5hy60QWKu 44 | W0JBWNXEu3H1Y3FeyKL4XqEkyj/9ojTyI9r0Qckiri6RogtWtvpMLJUkyeRTrxUr 45 | l+/dUuqoEt3FTdK5f3LAWhXe6wT7b93k2QKCAQEAwMeH7aSJ9lWU09fBVl4Jrakj 46 | UKLchBU6gmnAisfQRemjl5rBe9iHt6xSnDCwhTUsge854zMVddp4m7aDjLs8R+nB 47 | KTpvmSM0mEsXDX1ddwqyK6w5lojRKNBpsQvlVWVFfSUqGdDaXpZudW+2H9B2lenc 48 | +4ygfwHXcDFj9cg5SWgH1iIZnOn5DAUd+2eoTXzAHA9NAR26AF2gcIn8HjWNlGRQ 49 | FZ1qhpg1Jd3LJYOACRqsUStyjw8nZoXENytKfjhcKlHR1AKMfUjqPeRUUWGb3DNm 50 | v5Zrk8MMW/ltpbi+0gNN9EY+hA+CXhVjEGI2WZrN27aTGIPkjdBlxA2i9tloWg== 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /dcrontab/nats.go: -------------------------------------------------------------------------------- 1 | //===----------- dcrontab - distributed crontab written in go -------------=== 2 | // 3 | // Copyright (c) 2018 Andrew Grosser. All Rights Reserved. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | 17 | package main 18 | 19 | import ( 20 | "crypto/tls" 21 | "crypto/x509" 22 | "fmt" 23 | "io/ioutil" 24 | "log" 25 | "strings" 26 | 27 | "github.com/nats-io/nats.go" 28 | ) 29 | 30 | type NatsService struct { //Implements 'session' 31 | Configuration *Service 32 | nc *nats.Conn 33 | ec *nats.EncodedConn 34 | AppConfig *Configuration 35 | } 36 | 37 | 38 | //////////////////////////////////////// 39 | // Interface Implementations 40 | //////////////////////////////////////// 41 | 42 | //////////////////////////////////////// NATS 43 | // Connect initiates the primary connection to the range of provided URLs 44 | func (i *NatsService) connect() error { 45 | err := fmt.Errorf("Could not connect to NATS") 46 | 47 | certFile := i.Configuration.Cert 48 | keyFile := i.Configuration.Key 49 | cert, err := tls.LoadX509KeyPair(certFile, keyFile) 50 | if err != nil { 51 | log.Fatalf("[ERROR] Parsing X509 certificate/key pair: %v", err) 52 | } 53 | 54 | rootPEM, err := ioutil.ReadFile(i.Configuration.CACert) 55 | 56 | pool := x509.NewCertPool() 57 | ok := pool.AppendCertsFromPEM([]byte(rootPEM)) 58 | if !ok { 59 | log.Fatalln("[ERROR] Failed to parse root certificate.") 60 | } 61 | 62 | config := &tls.Config{ 63 | //ServerName: i.Configuration.Hosts[0], 64 | Certificates: []tls.Certificate{cert}, 65 | RootCAs: pool, 66 | MinVersion: tls.VersionTLS12, 67 | InsecureSkipVerify: i.Configuration.Secure, //TODO: SECURITY THREAT 68 | } 69 | 70 | if i.nc, err = nats.Connect(strings.Join(i.Configuration.Hosts[:], ","), nats.Secure(config)); err != nil { 71 | fmt.Println("[ERROR] Connecting to NATS:", err) 72 | return err 73 | } 74 | if i.ec, err = nats.NewEncodedConn(i.nc, nats.JSON_ENCODER); err != nil { 75 | fmt.Println("[ERROR] Encoding NATS:", err) 76 | return err 77 | } 78 | 79 | return nil 80 | } 81 | 82 | //////////////////////////////////////// NATS 83 | // Close 84 | //will terminate the session to the backend, returning error if an issue arises 85 | func (i *NatsService) close() error { 86 | i.ec.Drain() 87 | i.ec.Close() 88 | i.nc.Drain() 89 | i.nc.Close() 90 | return nil 91 | } 92 | 93 | 94 | //////////////////////////////////////// NATS 95 | // Write 96 | func (i *NatsService) publish(channel string, msg string) error { 97 | // sendCh := make(chan *map[string]interface{}) 98 | // i.ec.Publish(i.Configuration.Context, w.Values) 99 | // i.ec.BindSendChan(i.Configuration.Context, sendCh) 100 | // sendCh <- w.Values 101 | return i.nc.Publish(channel, []byte(msg)) 102 | } -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_transactiondb.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // TransactionDBOptions represent all of the available options when opening a transactional database 7 | // with OpenTransactionDb. 8 | type TransactionDBOptions struct { 9 | c *C.rocksdb_transactiondb_options_t 10 | } 11 | 12 | // NewDefaultTransactionDBOptions creates a default TransactionDBOptions object. 13 | func NewDefaultTransactionDBOptions() *TransactionDBOptions { 14 | return NewNativeTransactionDBOptions(C.rocksdb_transactiondb_options_create()) 15 | } 16 | 17 | // NewDefaultTransactionDBOptions creates a TransactionDBOptions object. 18 | func NewNativeTransactionDBOptions(c *C.rocksdb_transactiondb_options_t) *TransactionDBOptions { 19 | return &TransactionDBOptions{c} 20 | } 21 | 22 | // SetMaxNumLocks sets the maximum number of keys that can be locked at the same time 23 | // per column family. 24 | // If the number of locked keys is greater than max_num_locks, transaction 25 | // writes (or GetForUpdate) will return an error. 26 | // If this value is not positive, no limit will be enforced. 27 | func (opts *TransactionDBOptions) SetMaxNumLocks(max_num_locks int64) { 28 | C.rocksdb_transactiondb_options_set_max_num_locks(opts.c, C.int64_t(max_num_locks)) 29 | } 30 | 31 | // SetNumStripes sets the concurrency level. 32 | // Increasing this value will increase the concurrency by dividing the lock 33 | // table (per column family) into more sub-tables, each with their own 34 | // separate 35 | // mutex. 36 | func (opts *TransactionDBOptions) SetNumStripes(num_stripes uint64) { 37 | C.rocksdb_transactiondb_options_set_num_stripes(opts.c, C.size_t(num_stripes)) 38 | } 39 | 40 | // SetTransactionLockTimeout if positive, specifies the default wait timeout in milliseconds when 41 | // a transaction attempts to lock a key if not specified by 42 | // TransactionOptions::lock_timeout. 43 | // 44 | // If 0, no waiting is done if a lock cannot instantly be acquired. 45 | // If negative, there is no timeout. Not using a timeout is not recommended 46 | // as it can lead to deadlocks. Currently, there is no deadlock-detection to 47 | // recover from a deadlock. 48 | func (opts *TransactionDBOptions) SetTransactionLockTimeout(txn_lock_timeout int64) { 49 | C.rocksdb_transactiondb_options_set_transaction_lock_timeout(opts.c, C.int64_t(txn_lock_timeout)) 50 | } 51 | 52 | // SetDefaultLockTimeout if posititve, specifies the wait timeout in milliseconds when writing a key 53 | // OUTSIDE of a transaction (ie by calling DB::Put(),Merge(),Delete(),Write() 54 | // directly). 55 | // If 0, no waiting is done if a lock cannot instantly be acquired. 56 | // If negative, there is no timeout and will block indefinitely when acquiring 57 | // a lock. 58 | // 59 | // Not using a timeout can lead to deadlocks. Currently, there 60 | // is no deadlock-detection to recover from a deadlock. While DB writes 61 | // cannot deadlock with other DB writes, they can deadlock with a transaction. 62 | // A negative timeout should only be used if all transactions have a small 63 | // expiration set. 64 | func (opts *TransactionDBOptions) SetDefaultLockTimeout(default_lock_timeout int64) { 65 | C.rocksdb_transactiondb_options_set_default_lock_timeout(opts.c, C.int64_t(default_lock_timeout)) 66 | } 67 | 68 | // Destroy deallocates the TransactionDBOptions object. 69 | func (opts *TransactionDBOptions) Destroy() { 70 | C.rocksdb_transactiondb_options_destroy(opts.c) 71 | opts.c = nil 72 | } 73 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM golang:1.12 3 | MAINTAINER Andrew Grosser 4 | 5 | 6 | EXPOSE 6001 7 | EXPOSE 6002 8 | EXPOSE 6003 9 | 10 | RUN apt update \ 11 | && apt install -y build-essential cmake libjemalloc-dev libbz2-dev libsnappy-dev zlib1g-dev liblz4-dev libzstd-dev \ 12 | sudo \ 13 | supervisor \ 14 | netcat 15 | 16 | ############ 17 | ## Physical instructions: 18 | ############ 19 | # apt install git 20 | # apt upgrade 21 | 22 | ## Get good go build 23 | ## https://golang.org/dl/ 24 | 25 | # wget https://dl.google.com/go/go1.13.4.linux-amd64.tar.gz 26 | #tar -xvf go1.13.3.linux-amd64.tar.gz 27 | #sudo mv go /usr/local 28 | #mkdir projects 29 | #cd projects/ 30 | #mkdir go 31 | #vi ~/.bashrc 32 | 33 | ## Add to .bashrc 34 | #echo "export GOROOT=/usr/local/go" >> ~/.bashrc 35 | #echo "export GOPATH=$HOME/projects/go" >> ~/.bashrc 36 | #echo "export PATH=$HOME/projects/go/bin:/usr/local/go/bin:$PATH" >> ~/.bashrc 37 | 38 | # cd ~/projects 39 | # git clone https://github.com/lni/dragonboat 40 | # cd dragonboat 41 | # ROCKSDB_VER=5.17.2 make install-rocksdb-ull 42 | 43 | # cd ~/projects 44 | # git clone https://github.com/dioptre/dcrontab 45 | # cd dcrontab/dcrontab 46 | # go get 47 | # cd .. 48 | # make 49 | 50 | #sudo mkdir /app 51 | #sudo chown admin:admin /app 52 | #ln -s /home/admin/projects/dcrontab /app/dcrontab 53 | 54 | #sudo ln /home/admin/projects/dcrontab/supervisor.conf /etc/supervisor.conf 55 | #sudo ln /home/admin/projects/dcrontab/dcron.supervisor.conf /etc/supervisor/conf.d/dcron.supervisor.conf 56 | 57 | ##UPDATE THE CONFIG FILE 58 | 59 | ## Change hostname on amazon jessie 60 | #sudo hostnamectl set-hostname dcrontab1 61 | #sudo reboot 62 | 63 | COPY supervisor.conf /etc/supervisor.conf 64 | COPY dcron.supervisor.conf /etc/supervisor/conf.d/dcron.supervisor.conf 65 | 66 | # # installing latest gflags 67 | # RUN cd /tmp && \ 68 | # git clone https://github.com/gflags/gflags.git && \ 69 | # cd gflags && \ 70 | # mkdir build && \ 71 | # cd build && \ 72 | # cmake -DBUILD_SHARED_LIBS=1 -DGFLAGS_INSTALL_SHARED_LIBS=1 .. && \ 73 | # make install && \ 74 | # cd /tmp && \ 75 | # rm -R /tmp/gflags/ 76 | 77 | # # Install Rocksdb 78 | # RUN cd /tmp && \ 79 | # git clone https://github.com/facebook/rocksdb.git && \ 80 | # cd rocksdb && \ 81 | # git checkout v6.3.6 && \ 82 | # make shared_lib && \ 83 | # mkdir -p /usr/local/rocksdb/lib && \ 84 | # mkdir /usr/local/rocksdb/include && \ 85 | # cp librocksdb.so* /usr/local/rocksdb/lib && \ 86 | # cp /usr/local/rocksdb/lib/librocksdb.so* /usr/lib/ && \ 87 | # cp -r include /usr/local/rocksdb/ && \ 88 | # cp -r include/* /usr/include/ && \ 89 | # rm -R /tmp/rocksdb/ 90 | 91 | # #Install Gorocksdb 92 | # RUN CGO_CFLAGS="-I/usr/local/rocksdb/include" \ 93 | # CGO_LDFLAGS="-L/usr/local/rocksdb/lib -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" \ 94 | # go get github.com/tecbot/gorocksdb 95 | 96 | # # RUN go get github.com/tecbot/gorocksdb 97 | ENV ROCKSDB_VER=5.17.2 98 | 99 | WORKDIR $HOME/src 100 | RUN git clone https://github.com/lni/dragonboat 101 | WORKDIR $HOME/src/dragonboat 102 | # RUN sed -i 's/ROCKSDB_MAJOR_VER\=.*$/ROCKSDB_MAJOR_VER\=5/ig' Makefile 103 | # RUN sed -i 's/ROCKSDB_MINOR_VER\=.*$/ROCKSDB_MINOR_VER\=17/ig' Makefile 104 | # RUN sed -i 's/ROCKSDB_PATCH_VER\=.*$/ROCKSDB_PATCH_VER\=2/ig' Makefile 105 | RUN make install-rocksdb-ull 106 | 107 | WORKDIR /app/dcrontab 108 | ADD . /app/dcrontab 109 | RUN rm -rf dcrontab-data 110 | RUN go get 111 | RUN make 112 | 113 | #sudo docker build -t dcrontab . 114 | CMD bash dockercmd.sh 115 | 116 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/filter_policy.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // FilterPolicy is a factory type that allows the RocksDB database to create a 7 | // filter, such as a bloom filter, which will used to reduce reads. 8 | type FilterPolicy interface { 9 | // keys contains a list of keys (potentially with duplicates) 10 | // that are ordered according to the user supplied comparator. 11 | CreateFilter(keys [][]byte) []byte 12 | 13 | // "filter" contains the data appended by a preceding call to 14 | // CreateFilter(). This method must return true if 15 | // the key was in the list of keys passed to CreateFilter(). 16 | // This method may return true or false if the key was not on the 17 | // list, but it should aim to return false with a high probability. 18 | KeyMayMatch(key []byte, filter []byte) bool 19 | 20 | // Return the name of this policy. 21 | Name() string 22 | } 23 | 24 | // NewNativeFilterPolicy creates a FilterPolicy object. 25 | func NewNativeFilterPolicy(c *C.rocksdb_filterpolicy_t) FilterPolicy { 26 | return nativeFilterPolicy{c} 27 | } 28 | 29 | type nativeFilterPolicy struct { 30 | c *C.rocksdb_filterpolicy_t 31 | } 32 | 33 | func (fp nativeFilterPolicy) CreateFilter(keys [][]byte) []byte { return nil } 34 | func (fp nativeFilterPolicy) KeyMayMatch(key []byte, filter []byte) bool { return false } 35 | func (fp nativeFilterPolicy) Name() string { return "" } 36 | 37 | // NewBloomFilter returns a new filter policy that uses a bloom filter with approximately 38 | // the specified number of bits per key. A good value for bits_per_key 39 | // is 10, which yields a filter with ~1% false positive rate. 40 | // 41 | // Note: if you are using a custom comparator that ignores some parts 42 | // of the keys being compared, you must not use NewBloomFilterPolicy() 43 | // and must provide your own FilterPolicy that also ignores the 44 | // corresponding parts of the keys. For example, if the comparator 45 | // ignores trailing spaces, it would be incorrect to use a 46 | // FilterPolicy (like NewBloomFilterPolicy) that does not ignore 47 | // trailing spaces in keys. 48 | func NewBloomFilter(bitsPerKey int) FilterPolicy { 49 | return NewNativeFilterPolicy(C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))) 50 | } 51 | 52 | // Hold references to filter policies. 53 | var filterPolicies = NewCOWList() 54 | 55 | type filterPolicyWrapper struct { 56 | name *C.char 57 | filterPolicy FilterPolicy 58 | } 59 | 60 | func registerFilterPolicy(fp FilterPolicy) int { 61 | return filterPolicies.Append(filterPolicyWrapper{C.CString(fp.Name()), fp}) 62 | } 63 | 64 | //export gorocksdb_filterpolicy_create_filter 65 | func gorocksdb_filterpolicy_create_filter(idx int, cKeys **C.char, cKeysLen *C.size_t, cNumKeys C.int, cDstLen *C.size_t) *C.char { 66 | rawKeys := charSlice(cKeys, cNumKeys) 67 | keysLen := sizeSlice(cKeysLen, cNumKeys) 68 | keys := make([][]byte, int(cNumKeys)) 69 | for i, len := range keysLen { 70 | keys[i] = charToByte(rawKeys[i], len) 71 | } 72 | 73 | dst := filterPolicies.Get(idx).(filterPolicyWrapper).filterPolicy.CreateFilter(keys) 74 | *cDstLen = C.size_t(len(dst)) 75 | return cByteSlice(dst) 76 | } 77 | 78 | //export gorocksdb_filterpolicy_key_may_match 79 | func gorocksdb_filterpolicy_key_may_match(idx int, cKey *C.char, cKeyLen C.size_t, cFilter *C.char, cFilterLen C.size_t) C.uchar { 80 | key := charToByte(cKey, cKeyLen) 81 | filter := charToByte(cFilter, cFilterLen) 82 | return boolToChar(filterPolicies.Get(idx).(filterPolicyWrapper).filterPolicy.KeyMayMatch(key, filter)) 83 | } 84 | 85 | //export gorocksdb_filterpolicy_name 86 | func gorocksdb_filterpolicy_name(idx int) *C.char { 87 | return filterPolicies.Get(idx).(filterPolicyWrapper).name 88 | } 89 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/iterator.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import ( 7 | "bytes" 8 | "errors" 9 | "unsafe" 10 | ) 11 | 12 | // Iterator provides a way to seek to specific keys and iterate through 13 | // the keyspace from that point, as well as access the values of those keys. 14 | // 15 | // For example: 16 | // 17 | // it := db.NewIterator(readOpts) 18 | // defer it.Close() 19 | // 20 | // it.Seek([]byte("foo")) 21 | // for ; it.Valid(); it.Next() { 22 | // fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data()) 23 | // } 24 | // 25 | // if err := it.Err(); err != nil { 26 | // return err 27 | // } 28 | // 29 | type Iterator struct { 30 | c *C.rocksdb_iterator_t 31 | } 32 | 33 | // NewNativeIterator creates a Iterator object. 34 | func NewNativeIterator(c unsafe.Pointer) *Iterator { 35 | return &Iterator{(*C.rocksdb_iterator_t)(c)} 36 | } 37 | 38 | // Valid returns false only when an Iterator has iterated past either the 39 | // first or the last key in the database. 40 | func (iter *Iterator) Valid() bool { 41 | return C.rocksdb_iter_valid(iter.c) != 0 42 | } 43 | 44 | // ValidForPrefix returns false only when an Iterator has iterated past the 45 | // first or the last key in the database or the specified prefix. 46 | func (iter *Iterator) ValidForPrefix(prefix []byte) bool { 47 | if C.rocksdb_iter_valid(iter.c) == 0 { 48 | return false 49 | } 50 | 51 | key := iter.Key() 52 | result := bytes.HasPrefix(key.Data(), prefix) 53 | key.Free() 54 | return result 55 | } 56 | 57 | // Key returns the key the iterator currently holds. 58 | func (iter *Iterator) Key() *Slice { 59 | var cLen C.size_t 60 | cKey := C.rocksdb_iter_key(iter.c, &cLen) 61 | if cKey == nil { 62 | return nil 63 | } 64 | return &Slice{cKey, cLen, true} 65 | } 66 | 67 | // Value returns the value in the database the iterator currently holds. 68 | func (iter *Iterator) Value() *Slice { 69 | var cLen C.size_t 70 | cVal := C.rocksdb_iter_value(iter.c, &cLen) 71 | if cVal == nil { 72 | return nil 73 | } 74 | return &Slice{cVal, cLen, true} 75 | } 76 | 77 | // Next moves the iterator to the next sequential key in the database. 78 | func (iter *Iterator) Next() { 79 | C.rocksdb_iter_next(iter.c) 80 | } 81 | 82 | // Prev moves the iterator to the previous sequential key in the database. 83 | func (iter *Iterator) Prev() { 84 | C.rocksdb_iter_prev(iter.c) 85 | } 86 | 87 | // SeekToFirst moves the iterator to the first key in the database. 88 | func (iter *Iterator) SeekToFirst() { 89 | C.rocksdb_iter_seek_to_first(iter.c) 90 | } 91 | 92 | // SeekToLast moves the iterator to the last key in the database. 93 | func (iter *Iterator) SeekToLast() { 94 | C.rocksdb_iter_seek_to_last(iter.c) 95 | } 96 | 97 | // Seek moves the iterator to the position greater than or equal to the key. 98 | func (iter *Iterator) Seek(key []byte) { 99 | cKey := byteToChar(key) 100 | C.rocksdb_iter_seek(iter.c, cKey, C.size_t(len(key))) 101 | } 102 | 103 | // SeekForPrev moves the iterator to the last key that less than or equal 104 | // to the target key, in contrast with Seek. 105 | func (iter *Iterator) SeekForPrev(key []byte) { 106 | cKey := byteToChar(key) 107 | C.rocksdb_iter_seek_for_prev(iter.c, cKey, C.size_t(len(key))) 108 | } 109 | 110 | // Err returns nil if no errors happened during iteration, or the actual 111 | // error otherwise. 112 | func (iter *Iterator) Err() error { 113 | var cErr *C.char 114 | C.rocksdb_iter_get_error(iter.c, &cErr) 115 | if cErr != nil { 116 | defer C.free(unsafe.Pointer(cErr)) 117 | return errors.New(C.GoString(cErr)) 118 | } 119 | return nil 120 | } 121 | 122 | // Close closes the iterator. 123 | func (iter *Iterator) Close() { 124 | C.rocksdb_iter_destroy(iter.c) 125 | iter.c = nil 126 | } 127 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/transaction.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | 7 | import ( 8 | "errors" 9 | "unsafe" 10 | ) 11 | 12 | // Transaction is used with TransactionDB for transaction support. 13 | type Transaction struct { 14 | c *C.rocksdb_transaction_t 15 | } 16 | 17 | // NewNativeTransaction creates a Transaction object. 18 | func NewNativeTransaction(c *C.rocksdb_transaction_t) *Transaction { 19 | return &Transaction{c} 20 | } 21 | 22 | // Commit commits the transaction to the database. 23 | func (transaction *Transaction) Commit() error { 24 | var ( 25 | cErr *C.char 26 | ) 27 | C.rocksdb_transaction_commit(transaction.c, &cErr) 28 | if cErr != nil { 29 | defer C.free(unsafe.Pointer(cErr)) 30 | return errors.New(C.GoString(cErr)) 31 | } 32 | return nil 33 | } 34 | 35 | // Rollback performs a rollback on the transaction. 36 | func (transaction *Transaction) Rollback() error { 37 | var ( 38 | cErr *C.char 39 | ) 40 | C.rocksdb_transaction_rollback(transaction.c, &cErr) 41 | 42 | if cErr != nil { 43 | defer C.free(unsafe.Pointer(cErr)) 44 | return errors.New(C.GoString(cErr)) 45 | } 46 | return nil 47 | } 48 | 49 | // Get returns the data associated with the key from the database given this transaction. 50 | func (transaction *Transaction) Get(opts *ReadOptions, key []byte) (*Slice, error) { 51 | var ( 52 | cErr *C.char 53 | cValLen C.size_t 54 | cKey = byteToChar(key) 55 | ) 56 | cValue := C.rocksdb_transaction_get( 57 | transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, 58 | ) 59 | if cErr != nil { 60 | defer C.free(unsafe.Pointer(cErr)) 61 | return nil, errors.New(C.GoString(cErr)) 62 | } 63 | return NewSlice(cValue, cValLen), nil 64 | } 65 | 66 | // GetForUpdate queries the data associated with the key and puts an exclusive lock on the key from the database given this transaction. 67 | func (transaction *Transaction) GetForUpdate(opts *ReadOptions, key []byte) (*Slice, error) { 68 | var ( 69 | cErr *C.char 70 | cValLen C.size_t 71 | cKey = byteToChar(key) 72 | ) 73 | cValue := C.rocksdb_transaction_get_for_update( 74 | transaction.c, opts.c, cKey, C.size_t(len(key)), &cValLen, C.uchar(byte(1)) /*exclusive*/, &cErr, 75 | ) 76 | if cErr != nil { 77 | defer C.free(unsafe.Pointer(cErr)) 78 | return nil, errors.New(C.GoString(cErr)) 79 | } 80 | return NewSlice(cValue, cValLen), nil 81 | } 82 | 83 | // Put writes data associated with a key to the transaction. 84 | func (transaction *Transaction) Put(key, value []byte) error { 85 | var ( 86 | cErr *C.char 87 | cKey = byteToChar(key) 88 | cValue = byteToChar(value) 89 | ) 90 | C.rocksdb_transaction_put( 91 | transaction.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, 92 | ) 93 | if cErr != nil { 94 | defer C.free(unsafe.Pointer(cErr)) 95 | return errors.New(C.GoString(cErr)) 96 | } 97 | return nil 98 | } 99 | 100 | // Delete removes the data associated with the key from the transaction. 101 | func (transaction *Transaction) Delete(key []byte) error { 102 | var ( 103 | cErr *C.char 104 | cKey = byteToChar(key) 105 | ) 106 | C.rocksdb_transaction_delete(transaction.c, cKey, C.size_t(len(key)), &cErr) 107 | if cErr != nil { 108 | defer C.free(unsafe.Pointer(cErr)) 109 | return errors.New(C.GoString(cErr)) 110 | } 111 | return nil 112 | } 113 | 114 | // NewIterator returns an Iterator over the database that uses the 115 | // ReadOptions given. 116 | func (transaction *Transaction) NewIterator(opts *ReadOptions) *Iterator { 117 | return NewNativeIterator( 118 | unsafe.Pointer(C.rocksdb_transaction_create_iterator(transaction.c, opts.c))) 119 | } 120 | 121 | // Destroy deallocates the transaction object. 122 | func (transaction *Transaction) Destroy() { 123 | C.rocksdb_transaction_destroy(transaction.c) 124 | transaction.c = nil 125 | } 126 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/transactiondb.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import ( 7 | "errors" 8 | "unsafe" 9 | ) 10 | 11 | // TransactionDB is a reusable handle to a RocksDB transactional database on disk, created by OpenTransactionDb. 12 | type TransactionDB struct { 13 | c *C.rocksdb_transactiondb_t 14 | name string 15 | opts *Options 16 | transactionDBOpts *TransactionDBOptions 17 | } 18 | 19 | // OpenTransactionDb opens a database with the specified options. 20 | func OpenTransactionDb( 21 | opts *Options, 22 | transactionDBOpts *TransactionDBOptions, 23 | name string, 24 | ) (*TransactionDB, error) { 25 | var ( 26 | cErr *C.char 27 | cName = C.CString(name) 28 | ) 29 | defer C.free(unsafe.Pointer(cName)) 30 | db := C.rocksdb_transactiondb_open( 31 | opts.c, transactionDBOpts.c, cName, &cErr) 32 | if cErr != nil { 33 | defer C.free(unsafe.Pointer(cErr)) 34 | return nil, errors.New(C.GoString(cErr)) 35 | } 36 | return &TransactionDB{ 37 | name: name, 38 | c: db, 39 | opts: opts, 40 | transactionDBOpts: transactionDBOpts, 41 | }, nil 42 | } 43 | 44 | // NewSnapshot creates a new snapshot of the database. 45 | func (db *TransactionDB) NewSnapshot() *Snapshot { 46 | return NewNativeSnapshot(C.rocksdb_transactiondb_create_snapshot(db.c)) 47 | } 48 | 49 | // ReleaseSnapshot releases the snapshot and its resources. 50 | func (db *TransactionDB) ReleaseSnapshot(snapshot *Snapshot) { 51 | C.rocksdb_transactiondb_release_snapshot(db.c, snapshot.c) 52 | snapshot.c = nil 53 | } 54 | 55 | // TransactionBegin begins a new transaction 56 | // with the WriteOptions and TransactionOptions given. 57 | func (db *TransactionDB) TransactionBegin( 58 | opts *WriteOptions, 59 | transactionOpts *TransactionOptions, 60 | oldTransaction *Transaction, 61 | ) *Transaction { 62 | if oldTransaction != nil { 63 | return NewNativeTransaction(C.rocksdb_transaction_begin( 64 | db.c, 65 | opts.c, 66 | transactionOpts.c, 67 | oldTransaction.c, 68 | )) 69 | } 70 | 71 | return NewNativeTransaction(C.rocksdb_transaction_begin( 72 | db.c, opts.c, transactionOpts.c, nil)) 73 | } 74 | 75 | // Get returns the data associated with the key from the database. 76 | func (db *TransactionDB) Get(opts *ReadOptions, key []byte) (*Slice, error) { 77 | var ( 78 | cErr *C.char 79 | cValLen C.size_t 80 | cKey = byteToChar(key) 81 | ) 82 | cValue := C.rocksdb_transactiondb_get( 83 | db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr, 84 | ) 85 | if cErr != nil { 86 | defer C.free(unsafe.Pointer(cErr)) 87 | return nil, errors.New(C.GoString(cErr)) 88 | } 89 | return NewSlice(cValue, cValLen), nil 90 | } 91 | 92 | // Put writes data associated with a key to the database. 93 | func (db *TransactionDB) Put(opts *WriteOptions, key, value []byte) error { 94 | var ( 95 | cErr *C.char 96 | cKey = byteToChar(key) 97 | cValue = byteToChar(value) 98 | ) 99 | C.rocksdb_transactiondb_put( 100 | db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr, 101 | ) 102 | if cErr != nil { 103 | defer C.free(unsafe.Pointer(cErr)) 104 | return errors.New(C.GoString(cErr)) 105 | } 106 | return nil 107 | } 108 | 109 | // Delete removes the data associated with the key from the database. 110 | func (db *TransactionDB) Delete(opts *WriteOptions, key []byte) error { 111 | var ( 112 | cErr *C.char 113 | cKey = byteToChar(key) 114 | ) 115 | C.rocksdb_transactiondb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) 116 | if cErr != nil { 117 | defer C.free(unsafe.Pointer(cErr)) 118 | return errors.New(C.GoString(cErr)) 119 | } 120 | return nil 121 | } 122 | 123 | // NewCheckpoint creates a new Checkpoint for this db. 124 | func (db *TransactionDB) NewCheckpoint() (*Checkpoint, error) { 125 | var ( 126 | cErr *C.char 127 | ) 128 | cCheckpoint := C.rocksdb_transactiondb_checkpoint_object_create( 129 | db.c, &cErr, 130 | ) 131 | if cErr != nil { 132 | defer C.free(unsafe.Pointer(cErr)) 133 | return nil, errors.New(C.GoString(cErr)) 134 | } 135 | 136 | return NewNativeCheckpoint(cCheckpoint), nil 137 | } 138 | 139 | // Close closes the database. 140 | func (transactionDB *TransactionDB) Close() { 141 | C.rocksdb_transactiondb_close(transactionDB.c) 142 | transactionDB.c = nil 143 | } 144 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/VictoriaMetrics/metrics v1.5.0 h1:WvQqPn+z9pR1U7J58CgaGiWrN8phNGSpr2xUSxJnfpE= 4 | github.com/VictoriaMetrics/metrics v1.5.0/go.mod h1:QZAL5yLaXvhSPeib0ahluGo9VK0HXDZHovKaKlpuWvs= 5 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 6 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 7 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 8 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 9 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 10 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 11 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 12 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 13 | github.com/lni/dragonboat v2.1.7+incompatible h1:U04ZmiQKXcdRWNE6jccSiYJK7k+ECwN7XIw1xpTiJeE= 14 | github.com/lni/dragonboat v2.1.7+incompatible/go.mod h1:eI3naIUzh2+DKqtOFwdZ2t0Z/BjlGEjWZgS07x7x8oU= 15 | github.com/lni/dragonboat/v3 v3.1.0 h1:Hx/i7unEbt0dD1HBINkIsp+3IuFmJ34Ih3VkTxWTMN4= 16 | github.com/lni/dragonboat/v3 v3.1.0/go.mod h1:EQ/59ROPqeDktEFaZcz0lXSSnM74Yj7r92X8oHsZKSQ= 17 | github.com/lni/goutils v1.0.1 h1:b0RTWAg84viMYUy9zJZGATzyXZlCLra3HqMgzydCx8I= 18 | github.com/lni/goutils v1.0.1/go.mod h1:f2YgTtVeGFUdJrZJmtf0S05qYvSZXdmBxa5Z8nhBU/Q= 19 | github.com/lni/goutils v1.0.2 h1:XEQvY9GWR/4cUA35l55tIroD9gA2wlByOAu3thdnThA= 20 | github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ= 21 | github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= 22 | github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= 23 | github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= 24 | github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= 25 | github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= 26 | github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= 27 | github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= 28 | github.com/valyala/histogram v1.0.1 h1:FzA7n2Tz/wKRMejgu3PV1vw3htAklTjjuoI6z3d4KDg= 29 | github.com/valyala/histogram v1.0.1/go.mod h1:lQy0xA4wUz2+IUnf97SivorsJIp8FxsnRd6x25q7Mto= 30 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0= 31 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 32 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= 33 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 34 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 35 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 36 | golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 h1:6KET3Sqa7fkVfD63QnAM81ZeYg5n4HwApOJkufONnHA= 37 | golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 38 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 39 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 40 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= 41 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 42 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 43 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 44 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 45 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 46 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 47 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= 48 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 49 | google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= 50 | google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= 51 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 52 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/backup.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include 4 | // #include "rocksdb/c.h" 5 | import "C" 6 | import ( 7 | "errors" 8 | "unsafe" 9 | ) 10 | 11 | // BackupEngineInfo represents the information about the backups 12 | // in a backup engine instance. Use this to get the state of the 13 | // backup like number of backups and their ids and timestamps etc. 14 | type BackupEngineInfo struct { 15 | c *C.rocksdb_backup_engine_info_t 16 | } 17 | 18 | // GetCount gets the number backsup available. 19 | func (b *BackupEngineInfo) GetCount() int { 20 | return int(C.rocksdb_backup_engine_info_count(b.c)) 21 | } 22 | 23 | // GetTimestamp gets the timestamp at which the backup index was taken. 24 | func (b *BackupEngineInfo) GetTimestamp(index int) int64 { 25 | return int64(C.rocksdb_backup_engine_info_timestamp(b.c, C.int(index))) 26 | } 27 | 28 | // GetBackupId gets an id that uniquely identifies a backup 29 | // regardless of its position. 30 | func (b *BackupEngineInfo) GetBackupId(index int) int64 { 31 | return int64(C.rocksdb_backup_engine_info_backup_id(b.c, C.int(index))) 32 | } 33 | 34 | // GetSize get the size of the backup in bytes. 35 | func (b *BackupEngineInfo) GetSize(index int) int64 { 36 | return int64(C.rocksdb_backup_engine_info_size(b.c, C.int(index))) 37 | } 38 | 39 | // GetNumFiles gets the number of files in the backup index. 40 | func (b *BackupEngineInfo) GetNumFiles(index int) int32 { 41 | return int32(C.rocksdb_backup_engine_info_number_files(b.c, C.int(index))) 42 | } 43 | 44 | // Destroy destroys the backup engine info instance. 45 | func (b *BackupEngineInfo) Destroy() { 46 | C.rocksdb_backup_engine_info_destroy(b.c) 47 | b.c = nil 48 | } 49 | 50 | // RestoreOptions captures the options to be used during 51 | // restoration of a backup. 52 | type RestoreOptions struct { 53 | c *C.rocksdb_restore_options_t 54 | } 55 | 56 | // NewRestoreOptions creates a RestoreOptions instance. 57 | func NewRestoreOptions() *RestoreOptions { 58 | return &RestoreOptions{ 59 | c: C.rocksdb_restore_options_create(), 60 | } 61 | } 62 | 63 | // SetKeepLogFiles is used to set or unset the keep_log_files option 64 | // If true, restore won't overwrite the existing log files in wal_dir. It will 65 | // also move all log files from archive directory to wal_dir. 66 | // By default, this is false. 67 | func (ro *RestoreOptions) SetKeepLogFiles(v int) { 68 | C.rocksdb_restore_options_set_keep_log_files(ro.c, C.int(v)) 69 | } 70 | 71 | // Destroy destroys this RestoreOptions instance. 72 | func (ro *RestoreOptions) Destroy() { 73 | C.rocksdb_restore_options_destroy(ro.c) 74 | } 75 | 76 | // BackupEngine is a reusable handle to a RocksDB Backup, created by 77 | // OpenBackupEngine. 78 | type BackupEngine struct { 79 | c *C.rocksdb_backup_engine_t 80 | path string 81 | opts *Options 82 | } 83 | 84 | // OpenBackupEngine opens a backup engine with specified options. 85 | func OpenBackupEngine(opts *Options, path string) (*BackupEngine, error) { 86 | var cErr *C.char 87 | cpath := C.CString(path) 88 | defer C.free(unsafe.Pointer(cpath)) 89 | 90 | be := C.rocksdb_backup_engine_open(opts.c, cpath, &cErr) 91 | if cErr != nil { 92 | defer C.free(unsafe.Pointer(cErr)) 93 | return nil, errors.New(C.GoString(cErr)) 94 | } 95 | return &BackupEngine{ 96 | c: be, 97 | path: path, 98 | opts: opts, 99 | }, nil 100 | } 101 | 102 | // UnsafeGetBackupEngine returns the underlying c backup engine. 103 | func (b *BackupEngine) UnsafeGetBackupEngine() unsafe.Pointer { 104 | return unsafe.Pointer(b.c) 105 | } 106 | 107 | // CreateNewBackup takes a new backup from db. 108 | func (b *BackupEngine) CreateNewBackup(db *DB) error { 109 | var cErr *C.char 110 | 111 | C.rocksdb_backup_engine_create_new_backup(b.c, db.c, &cErr) 112 | if cErr != nil { 113 | defer C.free(unsafe.Pointer(cErr)) 114 | return errors.New(C.GoString(cErr)) 115 | } 116 | 117 | return nil 118 | } 119 | 120 | // GetInfo gets an object that gives information about 121 | // the backups that have already been taken 122 | func (b *BackupEngine) GetInfo() *BackupEngineInfo { 123 | return &BackupEngineInfo{ 124 | c: C.rocksdb_backup_engine_get_backup_info(b.c), 125 | } 126 | } 127 | 128 | // RestoreDBFromLatestBackup restores the latest backup to dbDir. walDir 129 | // is where the write ahead logs are restored to and usually the same as dbDir. 130 | func (b *BackupEngine) RestoreDBFromLatestBackup(dbDir, walDir string, ro *RestoreOptions) error { 131 | var cErr *C.char 132 | cDbDir := C.CString(dbDir) 133 | cWalDir := C.CString(walDir) 134 | defer func() { 135 | C.free(unsafe.Pointer(cDbDir)) 136 | C.free(unsafe.Pointer(cWalDir)) 137 | }() 138 | 139 | C.rocksdb_backup_engine_restore_db_from_latest_backup(b.c, cDbDir, cWalDir, ro.c, &cErr) 140 | if cErr != nil { 141 | defer C.free(unsafe.Pointer(cErr)) 142 | return errors.New(C.GoString(cErr)) 143 | } 144 | return nil 145 | } 146 | 147 | // Close close the backup engine and cleans up state 148 | // The backups already taken remain on storage. 149 | func (b *BackupEngine) Close() { 150 | C.rocksdb_backup_engine_close(b.c) 151 | b.c = nil 152 | } 153 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_read.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | import "unsafe" 6 | 7 | // ReadTier controls fetching of data during a read request. 8 | // An application can issue a read request (via Get/Iterators) and specify 9 | // if that read should process data that ALREADY resides on a specified cache 10 | // level. For example, if an application specifies BlockCacheTier then the 11 | // Get call will process data that is already processed in the memtable or 12 | // the block cache. It will not page in data from the OS cache or data that 13 | // resides in storage. 14 | type ReadTier uint 15 | 16 | const ( 17 | // ReadAllTier reads data in memtable, block cache, OS cache or storage. 18 | ReadAllTier = ReadTier(0) 19 | // BlockCacheTier reads data in memtable or block cache. 20 | BlockCacheTier = ReadTier(1) 21 | ) 22 | 23 | // ReadOptions represent all of the available options when reading from a 24 | // database. 25 | type ReadOptions struct { 26 | c *C.rocksdb_readoptions_t 27 | } 28 | 29 | // NewDefaultReadOptions creates a default ReadOptions object. 30 | func NewDefaultReadOptions() *ReadOptions { 31 | return NewNativeReadOptions(C.rocksdb_readoptions_create()) 32 | } 33 | 34 | // NewNativeReadOptions creates a ReadOptions object. 35 | func NewNativeReadOptions(c *C.rocksdb_readoptions_t) *ReadOptions { 36 | return &ReadOptions{c} 37 | } 38 | 39 | // UnsafeGetReadOptions returns the underlying c read options object. 40 | func (opts *ReadOptions) UnsafeGetReadOptions() unsafe.Pointer { 41 | return unsafe.Pointer(opts.c) 42 | } 43 | 44 | // SetVerifyChecksums speciy if all data read from underlying storage will be 45 | // verified against corresponding checksums. 46 | // Default: false 47 | func (opts *ReadOptions) SetVerifyChecksums(value bool) { 48 | C.rocksdb_readoptions_set_verify_checksums(opts.c, boolToChar(value)) 49 | } 50 | 51 | // SetFillCache specify whether the "data block"/"index block"/"filter block" 52 | // read for this iteration should be cached in memory? 53 | // Callers may wish to set this field to false for bulk scans. 54 | // Default: true 55 | func (opts *ReadOptions) SetFillCache(value bool) { 56 | C.rocksdb_readoptions_set_fill_cache(opts.c, boolToChar(value)) 57 | } 58 | 59 | // SetSnapshot sets the snapshot which should be used for the read. 60 | // The snapshot must belong to the DB that is being read and must 61 | // not have been released. 62 | // Default: nil 63 | func (opts *ReadOptions) SetSnapshot(snap *Snapshot) { 64 | C.rocksdb_readoptions_set_snapshot(opts.c, snap.c) 65 | } 66 | 67 | // SetReadTier specify if this read request should process data that ALREADY 68 | // resides on a particular cache. If the required data is not 69 | // found at the specified cache, then Status::Incomplete is returned. 70 | // Default: ReadAllTier 71 | func (opts *ReadOptions) SetReadTier(value ReadTier) { 72 | C.rocksdb_readoptions_set_read_tier(opts.c, C.int(value)) 73 | } 74 | 75 | // SetTailing specify if to create a tailing iterator. 76 | // A special iterator that has a view of the complete database 77 | // (i.e. it can also be used to read newly added data) and 78 | // is optimized for sequential reads. It will return records 79 | // that were inserted into the database after the creation of the iterator. 80 | // Default: false 81 | func (opts *ReadOptions) SetTailing(value bool) { 82 | C.rocksdb_readoptions_set_tailing(opts.c, boolToChar(value)) 83 | } 84 | 85 | // SetIterateUpperBound specifies "iterate_upper_bound", which defines 86 | // the extent upto which the forward iterator can returns entries. 87 | // Once the bound is reached, Valid() will be false. 88 | // "iterate_upper_bound" is exclusive ie the bound value is 89 | // not a valid entry. If iterator_extractor is not null, the Seek target 90 | // and iterator_upper_bound need to have the same prefix. 91 | // This is because ordering is not guaranteed outside of prefix domain. 92 | // There is no lower bound on the iterator. If needed, that can be easily 93 | // implemented. 94 | // Default: nullptr 95 | func (opts *ReadOptions) SetIterateUpperBound(key []byte) { 96 | cKey := byteToChar(key) 97 | cKeyLen := C.size_t(len(key)) 98 | C.rocksdb_readoptions_set_iterate_upper_bound(opts.c, cKey, cKeyLen) 99 | } 100 | 101 | // SetPinData specifies the value of "pin_data". If true, it keeps the blocks 102 | // loaded by the iterator pinned in memory as long as the iterator is not deleted, 103 | // If used when reading from tables created with 104 | // BlockBasedTableOptions::use_delta_encoding = false, 105 | // Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to 106 | // return 1. 107 | // Default: false 108 | func (opts *ReadOptions) SetPinData(value bool) { 109 | C.rocksdb_readoptions_set_pin_data(opts.c, boolToChar(value)) 110 | } 111 | 112 | // SetReadaheadSize specifies the value of "readahead_size". 113 | // If non-zero, NewIterator will create a new table reader which 114 | // performs reads of the given size. Using a large size (> 2MB) can 115 | // improve the performance of forward iteration on spinning disks. 116 | // Default: 0 117 | func (opts *ReadOptions) SetReadaheadSize(value uint64) { 118 | C.rocksdb_readoptions_set_readahead_size(opts.c, C.size_t(value)) 119 | } 120 | 121 | // Destroy deallocates the ReadOptions object. 122 | func (opts *ReadOptions) Destroy() { 123 | C.rocksdb_readoptions_destroy(opts.c) 124 | opts.c = nil 125 | } 126 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/merge_operator.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // A MergeOperator specifies the SEMANTICS of a merge, which only 7 | // client knows. It could be numeric addition, list append, string 8 | // concatenation, edit data structure, ... , anything. 9 | // The library, on the other hand, is concerned with the exercise of this 10 | // interface, at the right time (during get, iteration, compaction...) 11 | // 12 | // Please read the RocksDB documentation for 13 | // more details and example implementations. 14 | type MergeOperator interface { 15 | // Gives the client a way to express the read -> modify -> write semantics 16 | // key: The key that's associated with this merge operation. 17 | // Client could multiplex the merge operator based on it 18 | // if the key space is partitioned and different subspaces 19 | // refer to different types of data which have different 20 | // merge operation semantics. 21 | // existingValue: null indicates that the key does not exist before this op. 22 | // operands: the sequence of merge operations to apply, front() first. 23 | // 24 | // Return true on success. 25 | // 26 | // All values passed in will be client-specific values. So if this method 27 | // returns false, it is because client specified bad data or there was 28 | // internal corruption. This will be treated as an error by the library. 29 | FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) 30 | 31 | // This function performs merge(left_op, right_op) 32 | // when both the operands are themselves merge operation types 33 | // that you would have passed to a db.Merge() call in the same order 34 | // (i.e.: db.Merge(key,left_op), followed by db.Merge(key,right_op)). 35 | // 36 | // PartialMerge should combine them into a single merge operation. 37 | // The return value should be constructed such that a call to 38 | // db.Merge(key, new_value) would yield the same result as a call 39 | // to db.Merge(key, left_op) followed by db.Merge(key, right_op). 40 | // 41 | // If it is impossible or infeasible to combine the two operations, return false. 42 | // The library will internally keep track of the operations, and apply them in the 43 | // correct order once a base-value (a Put/Delete/End-of-Database) is seen. 44 | PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) 45 | 46 | // The name of the MergeOperator. 47 | Name() string 48 | } 49 | 50 | // NewNativeMergeOperator creates a MergeOperator object. 51 | func NewNativeMergeOperator(c *C.rocksdb_mergeoperator_t) MergeOperator { 52 | return nativeMergeOperator{c} 53 | } 54 | 55 | type nativeMergeOperator struct { 56 | c *C.rocksdb_mergeoperator_t 57 | } 58 | 59 | func (mo nativeMergeOperator) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { 60 | return nil, false 61 | } 62 | func (mo nativeMergeOperator) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { 63 | return nil, false 64 | } 65 | func (mo nativeMergeOperator) Name() string { return "" } 66 | 67 | // Hold references to merge operators. 68 | var mergeOperators = NewCOWList() 69 | 70 | type mergeOperatorWrapper struct { 71 | name *C.char 72 | mergeOperator MergeOperator 73 | } 74 | 75 | func registerMergeOperator(merger MergeOperator) int { 76 | return mergeOperators.Append(mergeOperatorWrapper{C.CString(merger.Name()), merger}) 77 | } 78 | 79 | //export gorocksdb_mergeoperator_full_merge 80 | func gorocksdb_mergeoperator_full_merge(idx int, cKey *C.char, cKeyLen C.size_t, cExistingValue *C.char, cExistingValueLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char { 81 | key := charToByte(cKey, cKeyLen) 82 | rawOperands := charSlice(cOperands, cNumOperands) 83 | operandsLen := sizeSlice(cOperandsLen, cNumOperands) 84 | existingValue := charToByte(cExistingValue, cExistingValueLen) 85 | operands := make([][]byte, int(cNumOperands)) 86 | for i, len := range operandsLen { 87 | operands[i] = charToByte(rawOperands[i], len) 88 | } 89 | 90 | newValue, success := mergeOperators.Get(idx).(mergeOperatorWrapper).mergeOperator.FullMerge(key, existingValue, operands) 91 | newValueLen := len(newValue) 92 | 93 | *cNewValueLen = C.size_t(newValueLen) 94 | *cSuccess = boolToChar(success) 95 | 96 | return cByteSlice(newValue) 97 | } 98 | 99 | //export gorocksdb_mergeoperator_partial_merge_multi 100 | func gorocksdb_mergeoperator_partial_merge_multi(idx int, cKey *C.char, cKeyLen C.size_t, cOperands **C.char, cOperandsLen *C.size_t, cNumOperands C.int, cSuccess *C.uchar, cNewValueLen *C.size_t) *C.char { 101 | key := charToByte(cKey, cKeyLen) 102 | rawOperands := charSlice(cOperands, cNumOperands) 103 | operandsLen := sizeSlice(cOperandsLen, cNumOperands) 104 | operands := make([][]byte, int(cNumOperands)) 105 | for i, len := range operandsLen { 106 | operands[i] = charToByte(rawOperands[i], len) 107 | } 108 | 109 | var newValue []byte 110 | success := true 111 | 112 | merger := mergeOperators.Get(idx).(mergeOperatorWrapper).mergeOperator 113 | leftOperand := operands[0] 114 | for i := 1; i < int(cNumOperands); i++ { 115 | newValue, success = merger.PartialMerge(key, leftOperand, operands[i]) 116 | if !success { 117 | break 118 | } 119 | leftOperand = newValue 120 | } 121 | 122 | newValueLen := len(newValue) 123 | *cNewValueLen = C.size_t(newValueLen) 124 | *cSuccess = boolToChar(success) 125 | 126 | return cByteSlice(newValue) 127 | } 128 | 129 | //export gorocksdb_mergeoperator_name 130 | func gorocksdb_mergeoperator_name(idx int) *C.char { 131 | return mergeOperators.Get(idx).(mergeOperatorWrapper).name 132 | } 133 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_compaction.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | 6 | // UniversalCompactionStopStyle describes a algorithm used to make a 7 | // compaction request stop picking new files into a single compaction run. 8 | type UniversalCompactionStopStyle uint 9 | 10 | // Compaction stop style types. 11 | const ( 12 | CompactionStopStyleSimilarSize = UniversalCompactionStopStyle(C.rocksdb_similar_size_compaction_stop_style) 13 | CompactionStopStyleTotalSize = UniversalCompactionStopStyle(C.rocksdb_total_size_compaction_stop_style) 14 | ) 15 | 16 | // FIFOCompactionOptions represent all of the available options for 17 | // FIFO compaction. 18 | type FIFOCompactionOptions struct { 19 | c *C.rocksdb_fifo_compaction_options_t 20 | } 21 | 22 | // NewDefaultFIFOCompactionOptions creates a default FIFOCompactionOptions object. 23 | func NewDefaultFIFOCompactionOptions() *FIFOCompactionOptions { 24 | return NewNativeFIFOCompactionOptions(C.rocksdb_fifo_compaction_options_create()) 25 | } 26 | 27 | // NewNativeFIFOCompactionOptions creates a native FIFOCompactionOptions object. 28 | func NewNativeFIFOCompactionOptions(c *C.rocksdb_fifo_compaction_options_t) *FIFOCompactionOptions { 29 | return &FIFOCompactionOptions{c} 30 | } 31 | 32 | // SetMaxTableFilesSize sets the max table file size. 33 | // Once the total sum of table files reaches this, we will delete the oldest 34 | // table file 35 | // Default: 1GB 36 | func (opts *FIFOCompactionOptions) SetMaxTableFilesSize(value uint64) { 37 | C.rocksdb_fifo_compaction_options_set_max_table_files_size(opts.c, C.uint64_t(value)) 38 | } 39 | 40 | // Destroy deallocates the FIFOCompactionOptions object. 41 | func (opts *FIFOCompactionOptions) Destroy() { 42 | C.rocksdb_fifo_compaction_options_destroy(opts.c) 43 | } 44 | 45 | // UniversalCompactionOptions represent all of the available options for 46 | // universal compaction. 47 | type UniversalCompactionOptions struct { 48 | c *C.rocksdb_universal_compaction_options_t 49 | } 50 | 51 | // NewDefaultUniversalCompactionOptions creates a default UniversalCompactionOptions 52 | // object. 53 | func NewDefaultUniversalCompactionOptions() *UniversalCompactionOptions { 54 | return NewNativeUniversalCompactionOptions(C.rocksdb_universal_compaction_options_create()) 55 | } 56 | 57 | // NewNativeUniversalCompactionOptions creates a UniversalCompactionOptions 58 | // object. 59 | func NewNativeUniversalCompactionOptions(c *C.rocksdb_universal_compaction_options_t) *UniversalCompactionOptions { 60 | return &UniversalCompactionOptions{c} 61 | } 62 | 63 | // SetSizeRatio sets the percentage flexibilty while comparing file size. 64 | // If the candidate file(s) size is 1% smaller than the next file's size, 65 | // then include next file into this candidate set. 66 | // Default: 1 67 | func (opts *UniversalCompactionOptions) SetSizeRatio(value uint) { 68 | C.rocksdb_universal_compaction_options_set_size_ratio(opts.c, C.int(value)) 69 | } 70 | 71 | // SetMinMergeWidth sets the minimum number of files in a single compaction run. 72 | // Default: 2 73 | func (opts *UniversalCompactionOptions) SetMinMergeWidth(value uint) { 74 | C.rocksdb_universal_compaction_options_set_min_merge_width(opts.c, C.int(value)) 75 | } 76 | 77 | // SetMaxMergeWidth sets the maximum number of files in a single compaction run. 78 | // Default: UINT_MAX 79 | func (opts *UniversalCompactionOptions) SetMaxMergeWidth(value uint) { 80 | C.rocksdb_universal_compaction_options_set_max_merge_width(opts.c, C.int(value)) 81 | } 82 | 83 | // SetMaxSizeAmplificationPercent sets the size amplification. 84 | // It is defined as the amount (in percentage) of 85 | // additional storage needed to store a single byte of data in the database. 86 | // For example, a size amplification of 2% means that a database that 87 | // contains 100 bytes of user-data may occupy upto 102 bytes of 88 | // physical storage. By this definition, a fully compacted database has 89 | // a size amplification of 0%. Rocksdb uses the following heuristic 90 | // to calculate size amplification: it assumes that all files excluding 91 | // the earliest file contribute to the size amplification. 92 | // Default: 200, which means that a 100 byte database could require upto 93 | // 300 bytes of storage. 94 | func (opts *UniversalCompactionOptions) SetMaxSizeAmplificationPercent(value uint) { 95 | C.rocksdb_universal_compaction_options_set_max_size_amplification_percent(opts.c, C.int(value)) 96 | } 97 | 98 | // SetCompressionSizePercent sets the percentage of compression size. 99 | // 100 | // If this option is set to be -1, all the output files 101 | // will follow compression type specified. 102 | // 103 | // If this option is not negative, we will try to make sure compressed 104 | // size is just above this value. In normal cases, at least this percentage 105 | // of data will be compressed. 106 | // When we are compacting to a new file, here is the criteria whether 107 | // it needs to be compressed: assuming here are the list of files sorted 108 | // by generation time: 109 | // A1...An B1...Bm C1...Ct 110 | // where A1 is the newest and Ct is the oldest, and we are going to compact 111 | // B1...Bm, we calculate the total size of all the files as total_size, as 112 | // well as the total size of C1...Ct as total_C, the compaction output file 113 | // will be compressed iff 114 | // total_C / total_size < this percentage 115 | // Default: -1 116 | func (opts *UniversalCompactionOptions) SetCompressionSizePercent(value int) { 117 | C.rocksdb_universal_compaction_options_set_compression_size_percent(opts.c, C.int(value)) 118 | } 119 | 120 | // SetStopStyle sets the algorithm used to stop picking files into a single compaction run. 121 | // Default: CompactionStopStyleTotalSize 122 | func (opts *UniversalCompactionOptions) SetStopStyle(value UniversalCompactionStopStyle) { 123 | C.rocksdb_universal_compaction_options_set_stop_style(opts.c, C.int(value)) 124 | } 125 | 126 | // Destroy deallocates the UniversalCompactionOptions object. 127 | func (opts *UniversalCompactionOptions) Destroy() { 128 | C.rocksdb_universal_compaction_options_destroy(opts.c) 129 | opts.c = nil 130 | } 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Distributed crontab (dcrontab) 2 | 3 | * Supports Running a single crontab across a cluster of machines with a resolution down to a minute 4 | * Use this for critical service operations like updating auction items at the end of an auction. 5 | 6 | 7 | ## Getting the package from Github docker registry 8 | 9 | https://github.com/dioptre/dcrontab/packages/48583 10 | 11 | ## Running on AWS 12 | * Use the production branch https://github.com/dioptre/dcrontab/tree/production 13 | * You can use docker or build manually if you wish (see the commented lines in ./Dockerfile - assumes a debian/Ubuntu distribution) 14 | * Setup dcrontab1 and more on your private dns (see the config file) names will resolve 15 | * Generate the certs per the ./gencerts.sh (the first time generate a CA Ex. ```./gencerts 1 ca```) 16 | * Update the config file with your key/node (use dcrontab[0-9]*) 17 | * **The example requires a nats service to be setup, but you can disable it if you wish.** 18 | 19 | ## Running on Docker 20 | From the directory: 21 | 22 | ```sudo docker build -t dcrontab .``` 23 | 24 | ### Docker Compose 25 | Add this to your docker-compose.yml (Version 3). You must add >1 machines to cluster. 26 | 27 | Example: 28 | ``` 29 | version: '3' 30 | services: 31 | nats: 32 | build: nats:latest 33 | ports: 34 | - "4222:4222" 35 | - "6222:6222" 36 | - "8222:8222" 37 | networks: 38 | - default 39 | dcron1: 40 | image: dcrontab:latest 41 | command: sh -c '/app/dcrontab/wait-for localhost:4222 -t 300 -- sleep 3 && /app/dcrontab/dockercmd.sh' 42 | depends_on: 43 | - "nats" 44 | expose: 45 | - "6001" 46 | ports: 47 | - "6001:6001" 48 | network_mode: host 49 | environment: 50 | - "NODEID=1" 51 | dcron2: 52 | image: dcrontab:latest 53 | command: sh -c '/app/dcrontab/wait-for localhost:4222 -t 300 -- sleep 3 && /app/dcrontab/dockercmd.sh' 54 | depends_on: 55 | - "nats" 56 | expose: 57 | - "6002" 58 | ports: 59 | - "6002:6002" 60 | network_mode: host 61 | environment: 62 | - "NODEID=2" 63 | dcron3: 64 | image: dcrontab:latest 65 | command: sh -c '/app/dcrontab/wait-for localhost:4222 -t 300 -- sleep 3 && /app/dcrontab/dockercmd.sh' 66 | depends_on: 67 | - "nats" 68 | expose: 69 | - "6003" 70 | ports: 71 | - "6003:6003" 72 | network_mode: host 73 | environment: 74 | - "NODEID=3" 75 | ``` 76 | 77 | Then run: 78 | ```sudo docker-compose up``` 79 | 80 | ## Running from source 81 | 82 | ### Setup 83 | Follow the instructions for building inside the 84 | ```Dockerfile``` 85 | 86 | See the **config.json** file for all the options. 87 | 88 | #### Requirements / Dependencies 89 | **Go > Version 1.12** 90 | RocksDB (try something like brew install rocksdb) 91 | 92 | #### Optional Requirements 93 | 94 | NATS - https://nats.io 95 | 96 | #### Building from source 97 | 98 | ``` 99 | sudo apt update \ 100 | && sudo apt install -y build-essential cmake libjemalloc-dev libbz2-dev libsnappy-dev zlib1g-dev liblz4-dev libzstd-dev \ 101 | sudo \ 102 | supervisor \ 103 | netcat 104 | 105 | sudo apt install git 106 | sudo apt upgrade 107 | 108 | wget https://dl.google.com/go/go1.13.4.linux-amd64.tar.gz 109 | tar -xvf go1.13.4.linux-amd64.tar.gz 110 | sudo mv go /usr/local 111 | mkdir projects 112 | cd projects/ 113 | mkdir go 114 | #vi ~/.bashrc 115 | 116 | ## Add to .bashrc 117 | echo "export GOROOT=/usr/local/go" >> ~/.bashrc 118 | echo "export GOPATH=$HOME/projects/go" >> ~/.bashrc 119 | echo "export PATH=$HOME/projects/go/bin:/usr/local/go/bin:$PATH" >> ~/.bashrc 120 | 121 | # installing latest gflags 122 | cd /tmp && \ 123 | git clone https://github.com/gflags/gflags.git && \ 124 | cd gflags && \ 125 | mkdir build && \ 126 | cd build && \ 127 | cmake -DBUILD_SHARED_LIBS=1 -DGFLAGS_INSTALL_SHARED_LIBS=1 .. && \ 128 | sudo make install && \ 129 | cd /tmp && \ 130 | rm -R /tmp/gflags/ 131 | 132 | # Install Rocksdb 133 | cd /tmp && \ 134 | git clone https://github.com/facebook/rocksdb.git && \ 135 | cd rocksdb && \ 136 | git checkout v6.3.6 && \ 137 | make shared_lib && \ 138 | sudo mkdir -p /usr/local/rocksdb/lib && \ 139 | sudo mkdir /usr/local/rocksdb/include && \ 140 | sudo cp librocksdb.so* /usr/local/rocksdb/lib && \ 141 | sudo cp /usr/local/rocksdb/lib/librocksdb.so* /usr/lib/ && \ 142 | sudo cp -r include /usr/local/rocksdb/ && \ 143 | sudo cp -r include/* /usr/include/ && \ 144 | rm -R /tmp/rocksdb/ 145 | 146 | #Install Gorocksdb 147 | CGO_CFLAGS="-I/usr/local/rocksdb/include" \ 148 | CGO_LDFLAGS="-L/usr/local/rocksdb/lib -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" \ 149 | go get github.com/tecbot/gorocksdb 150 | 151 | 152 | cd ~/projects 153 | git clone https://github.com/dioptre/dcrontab 154 | cd dcrontab 155 | make 156 | 157 | sudo mkdir /app 158 | sudo chown admin:admin /app 159 | ln -s /home/admin/projects/dcrontab /app/dcrontab 160 | 161 | sudo ln /home/admin/projects/dcrontab/supervisor.conf /etc/supervisor.conf 162 | sudo ln /home/admin/projects/dcrontab/dcron.supervisor.conf /etc/supervisor/conf.d/dcron.supervisor.conf 163 | 164 | 165 | sudo systemctl enable supervisor.service 166 | 167 | ##UPDATE THE CONFIG FILE 168 | 169 | ## Change hostname on amazon jessie 170 | #sudo hostnamectl set-hostname dcrontab1 171 | #sudo reboot 172 | 173 | ``` 174 | 175 | then 176 | 177 | ``` 178 | make 179 | ``` 180 | 181 | or on mac I need to 182 | 183 | ``` 184 | IPHONEOS_DEPLOYMENT_TARGET= SDKROOT= make 185 | ``` 186 | 187 | #### Running 188 | 189 | ``` 190 | ./dcrontab -addr localhost:6001 -nodeid 1 191 | #... add more nodes to more machines 192 | ``` 193 | 194 | ### Adding items to crontab 195 | You can type in a cronjob directly into the console - but better to manage the jobs with the config.json. 196 | ``` 197 | put key value 198 | ``` 199 | Ex. 200 | ``` 201 | put __dcron::99 {"Type":"shell","Exec":"ls"} 202 | ``` 203 | or 204 | ``` 205 | get key 206 | ``` 207 | 208 | ### TODO 209 | 210 | - [ ] Run once (equivalent to @reboot) 211 | - [ ] Resolution down to seconds (minutes atm) 212 | - [ ] DELETE method 213 | - [ ] HTTPS & Auth Support 214 | 215 | 216 | #### Credits 217 | 218 | Andrew Grosser - https://sfpl.io 219 | 220 | Lei Ni - https://github.com/lni/dragonboat 221 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/options_block_based_table.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | // #include "gorocksdb.h" 5 | import "C" 6 | 7 | // IndexType specifies the index type that will be used for this table. 8 | type IndexType uint 9 | 10 | const ( 11 | // A space efficient index block that is optimized for 12 | // binary-search-based index. 13 | KBinarySearchIndexType = 0 14 | // The hash index, if enabled, will do the hash lookup when 15 | // `Options.prefix_extractor` is provided. 16 | KHashSearchIndexType = 1 17 | // A two-level index implementation. Both levels are binary search indexes. 18 | KTwoLevelIndexSearchIndexType = 2 19 | ) 20 | 21 | // BlockBasedTableOptions represents block-based table options. 22 | type BlockBasedTableOptions struct { 23 | c *C.rocksdb_block_based_table_options_t 24 | 25 | // Hold references for GC. 26 | cache *Cache 27 | compCache *Cache 28 | 29 | // We keep these so we can free their memory in Destroy. 30 | cFp *C.rocksdb_filterpolicy_t 31 | } 32 | 33 | // NewDefaultBlockBasedTableOptions creates a default BlockBasedTableOptions object. 34 | func NewDefaultBlockBasedTableOptions() *BlockBasedTableOptions { 35 | return NewNativeBlockBasedTableOptions(C.rocksdb_block_based_options_create()) 36 | } 37 | 38 | // NewNativeBlockBasedTableOptions creates a BlockBasedTableOptions object. 39 | func NewNativeBlockBasedTableOptions(c *C.rocksdb_block_based_table_options_t) *BlockBasedTableOptions { 40 | return &BlockBasedTableOptions{c: c} 41 | } 42 | 43 | // Destroy deallocates the BlockBasedTableOptions object. 44 | func (opts *BlockBasedTableOptions) Destroy() { 45 | C.rocksdb_block_based_options_destroy(opts.c) 46 | opts.c = nil 47 | opts.cache = nil 48 | opts.compCache = nil 49 | } 50 | 51 | // SetCacheIndexAndFilterBlocks is indicating if we'd put index/filter blocks to the block cache. 52 | // If not specified, each "table reader" object will pre-load index/filter 53 | // block during table initialization. 54 | // Default: false 55 | func (opts *BlockBasedTableOptions) SetCacheIndexAndFilterBlocks(value bool) { 56 | C.rocksdb_block_based_options_set_cache_index_and_filter_blocks(opts.c, boolToChar(value)) 57 | } 58 | 59 | // SetPinL0FilterAndIndexBlocksInCache sets cache_index_and_filter_blocks. 60 | // If is true and the below is true (hash_index_allow_collision), then 61 | // filter and index blocks are stored in the cache, but a reference is 62 | // held in the "table reader" object so the blocks are pinned and only 63 | // evicted from cache when the table reader is freed. 64 | func (opts *BlockBasedTableOptions) SetPinL0FilterAndIndexBlocksInCache(value bool) { 65 | C.rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(opts.c, boolToChar(value)) 66 | } 67 | 68 | // SetBlockSize sets the approximate size of user data packed per block. 69 | // Note that the block size specified here corresponds opts uncompressed data. 70 | // The actual size of the unit read from disk may be smaller if 71 | // compression is enabled. This parameter can be changed dynamically. 72 | // Default: 4K 73 | func (opts *BlockBasedTableOptions) SetBlockSize(blockSize int) { 74 | C.rocksdb_block_based_options_set_block_size(opts.c, C.size_t(blockSize)) 75 | } 76 | 77 | // SetBlockSizeDeviation sets the block size deviation. 78 | // This is used opts close a block before it reaches the configured 79 | // 'block_size'. If the percentage of free space in the current block is less 80 | // than this specified number and adding a new record opts the block will 81 | // exceed the configured block size, then this block will be closed and the 82 | // new record will be written opts the next block. 83 | // Default: 10 84 | func (opts *BlockBasedTableOptions) SetBlockSizeDeviation(blockSizeDeviation int) { 85 | C.rocksdb_block_based_options_set_block_size_deviation(opts.c, C.int(blockSizeDeviation)) 86 | } 87 | 88 | // SetBlockRestartInterval sets the number of keys between 89 | // restart points for delta encoding of keys. 90 | // This parameter can be changed dynamically. Most clients should 91 | // leave this parameter alone. 92 | // Default: 16 93 | func (opts *BlockBasedTableOptions) SetBlockRestartInterval(blockRestartInterval int) { 94 | C.rocksdb_block_based_options_set_block_restart_interval(opts.c, C.int(blockRestartInterval)) 95 | } 96 | 97 | // SetFilterPolicy sets the filter policy opts reduce disk reads. 98 | // Many applications will benefit from passing the result of 99 | // NewBloomFilterPolicy() here. 100 | // Default: nil 101 | func (opts *BlockBasedTableOptions) SetFilterPolicy(fp FilterPolicy) { 102 | if nfp, ok := fp.(nativeFilterPolicy); ok { 103 | opts.cFp = nfp.c 104 | } else { 105 | idx := registerFilterPolicy(fp) 106 | opts.cFp = C.gorocksdb_filterpolicy_create(C.uintptr_t(idx)) 107 | } 108 | C.rocksdb_block_based_options_set_filter_policy(opts.c, opts.cFp) 109 | } 110 | 111 | // SetNoBlockCache specify whether block cache should be used or not. 112 | // Default: false 113 | func (opts *BlockBasedTableOptions) SetNoBlockCache(value bool) { 114 | C.rocksdb_block_based_options_set_no_block_cache(opts.c, boolToChar(value)) 115 | } 116 | 117 | // SetBlockCache sets the control over blocks (user data is stored in a set of blocks, and 118 | // a block is the unit of reading from disk). 119 | // 120 | // If set, use the specified cache for blocks. 121 | // If nil, rocksdb will auoptsmatically create and use an 8MB internal cache. 122 | // Default: nil 123 | func (opts *BlockBasedTableOptions) SetBlockCache(cache *Cache) { 124 | opts.cache = cache 125 | C.rocksdb_block_based_options_set_block_cache(opts.c, cache.c) 126 | } 127 | 128 | // SetBlockCacheCompressed sets the cache for compressed blocks. 129 | // If nil, rocksdb will not use a compressed block cache. 130 | // Default: nil 131 | func (opts *BlockBasedTableOptions) SetBlockCacheCompressed(cache *Cache) { 132 | opts.compCache = cache 133 | C.rocksdb_block_based_options_set_block_cache_compressed(opts.c, cache.c) 134 | } 135 | 136 | // SetWholeKeyFiltering specify if whole keys in the filter (not just prefixes) 137 | // should be placed. 138 | // This must generally be true for gets opts be efficient. 139 | // Default: true 140 | func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) { 141 | C.rocksdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value)) 142 | } 143 | 144 | // SetIndexType sets the index type used for this table. 145 | // kBinarySearch: 146 | // A space efficient index block that is optimized for 147 | // binary-search-based index. 148 | // 149 | // kHashSearch: 150 | // The hash index, if enabled, will do the hash lookup when 151 | // `Options.prefix_extractor` is provided. 152 | // 153 | // kTwoLevelIndexSearch: 154 | // A two-level index implementation. Both levels are binary search indexes. 155 | // Default: kBinarySearch 156 | func (opts *BlockBasedTableOptions) SetIndexType(value IndexType) { 157 | C.rocksdb_block_based_options_set_index_type(opts.c, C.int(value)) 158 | } 159 | -------------------------------------------------------------------------------- /dcrontab/gorocksdb/write_batch.go: -------------------------------------------------------------------------------- 1 | package gorocksdb 2 | 3 | // #include "rocksdb/c.h" 4 | import "C" 5 | import ( 6 | "errors" 7 | "io" 8 | ) 9 | 10 | // WriteBatch is a batching of Puts, Merges and Deletes. 11 | type WriteBatch struct { 12 | c *C.rocksdb_writebatch_t 13 | } 14 | 15 | // NewWriteBatch create a WriteBatch object. 16 | func NewWriteBatch() *WriteBatch { 17 | return NewNativeWriteBatch(C.rocksdb_writebatch_create()) 18 | } 19 | 20 | // NewNativeWriteBatch create a WriteBatch object. 21 | func NewNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch { 22 | return &WriteBatch{c} 23 | } 24 | 25 | // WriteBatchFrom creates a write batch from a serialized WriteBatch. 26 | func WriteBatchFrom(data []byte) *WriteBatch { 27 | return NewNativeWriteBatch(C.rocksdb_writebatch_create_from(byteToChar(data), C.size_t(len(data)))) 28 | } 29 | 30 | // Put queues a key-value pair. 31 | func (wb *WriteBatch) Put(key, value []byte) { 32 | cKey := byteToChar(key) 33 | cValue := byteToChar(value) 34 | C.rocksdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) 35 | } 36 | 37 | // PutCF queues a key-value pair in a column family. 38 | func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) { 39 | cKey := byteToChar(key) 40 | cValue := byteToChar(value) 41 | C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) 42 | } 43 | 44 | // Merge queues a merge of "value" with the existing value of "key". 45 | func (wb *WriteBatch) Merge(key, value []byte) { 46 | cKey := byteToChar(key) 47 | cValue := byteToChar(value) 48 | C.rocksdb_writebatch_merge(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) 49 | } 50 | 51 | // MergeCF queues a merge of "value" with the existing value of "key" in a 52 | // column family. 53 | func (wb *WriteBatch) MergeCF(cf *ColumnFamilyHandle, key, value []byte) { 54 | cKey := byteToChar(key) 55 | cValue := byteToChar(value) 56 | C.rocksdb_writebatch_merge_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) 57 | } 58 | 59 | // Delete queues a deletion of the data at key. 60 | func (wb *WriteBatch) Delete(key []byte) { 61 | cKey := byteToChar(key) 62 | C.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key))) 63 | } 64 | 65 | // DeleteCF queues a deletion of the data at key in a column family. 66 | func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) { 67 | cKey := byteToChar(key) 68 | C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key))) 69 | } 70 | 71 | // Data returns the serialized version of this batch. 72 | func (wb *WriteBatch) Data() []byte { 73 | var cSize C.size_t 74 | cValue := C.rocksdb_writebatch_data(wb.c, &cSize) 75 | return charToByte(cValue, cSize) 76 | } 77 | 78 | // Count returns the number of updates in the batch. 79 | func (wb *WriteBatch) Count() int { 80 | return int(C.rocksdb_writebatch_count(wb.c)) 81 | } 82 | 83 | // NewIterator returns a iterator to iterate over the records in the batch. 84 | func (wb *WriteBatch) NewIterator() *WriteBatchIterator { 85 | data := wb.Data() 86 | if len(data) < 8+4 { 87 | return &WriteBatchIterator{} 88 | } 89 | return &WriteBatchIterator{data: data[12:]} 90 | } 91 | 92 | // Clear removes all the enqueued Put and Deletes. 93 | func (wb *WriteBatch) Clear() { 94 | C.rocksdb_writebatch_clear(wb.c) 95 | } 96 | 97 | // Destroy deallocates the WriteBatch object. 98 | func (wb *WriteBatch) Destroy() { 99 | C.rocksdb_writebatch_destroy(wb.c) 100 | wb.c = nil 101 | } 102 | 103 | // WriteBatchRecordType describes the type of a batch record. 104 | type WriteBatchRecordType byte 105 | 106 | // Types of batch records. 107 | const ( 108 | WriteBatchDeletionRecord WriteBatchRecordType = 0x0 109 | WriteBatchValueRecord WriteBatchRecordType = 0x1 110 | WriteBatchMergeRecord WriteBatchRecordType = 0x2 111 | WriteBatchLogDataRecord WriteBatchRecordType = 0x3 112 | WriteBatchCFDeletionRecord WriteBatchRecordType = 0x4 113 | WriteBatchCFValueRecord WriteBatchRecordType = 0x5 114 | WriteBatchCFMergeRecord WriteBatchRecordType = 0x6 115 | WriteBatchSingleDeletionRecord WriteBatchRecordType = 0x7 116 | WriteBatchCFSingleDeletionRecord WriteBatchRecordType = 0x8 117 | WriteBatchBeginPrepareXIDRecord WriteBatchRecordType = 0x9 118 | WriteBatchEndPrepareXIDRecord WriteBatchRecordType = 0xA 119 | WriteBatchCommitXIDRecord WriteBatchRecordType = 0xB 120 | WriteBatchRollbackXIDRecord WriteBatchRecordType = 0xC 121 | WriteBatchNoopRecord WriteBatchRecordType = 0xD 122 | WriteBatchRangeDeletion WriteBatchRecordType = 0xF 123 | WriteBatchCFRangeDeletion WriteBatchRecordType = 0xE 124 | WriteBatchCFBlobIndex WriteBatchRecordType = 0x10 125 | WriteBatchBlobIndex WriteBatchRecordType = 0x11 126 | WriteBatchBeginPersistedPrepareXIDRecord WriteBatchRecordType = 0x12 127 | WriteBatchNotUsedRecord WriteBatchRecordType = 0x7F 128 | ) 129 | 130 | // WriteBatchRecord represents a record inside a WriteBatch. 131 | type WriteBatchRecord struct { 132 | CF int 133 | Key []byte 134 | Value []byte 135 | Type WriteBatchRecordType 136 | } 137 | 138 | // WriteBatchIterator represents a iterator to iterator over records. 139 | type WriteBatchIterator struct { 140 | data []byte 141 | record WriteBatchRecord 142 | err error 143 | } 144 | 145 | // Next returns the next record. 146 | // Returns false if no further record exists. 147 | func (iter *WriteBatchIterator) Next() bool { 148 | if iter.err != nil || len(iter.data) == 0 { 149 | return false 150 | } 151 | // reset the current record 152 | iter.record.CF = 0 153 | iter.record.Key = nil 154 | iter.record.Value = nil 155 | 156 | // parse the record type 157 | iter.record.Type = iter.decodeRecType() 158 | 159 | switch iter.record.Type { 160 | case 161 | WriteBatchDeletionRecord, 162 | WriteBatchSingleDeletionRecord: 163 | iter.record.Key = iter.decodeSlice() 164 | case 165 | WriteBatchCFDeletionRecord, 166 | WriteBatchCFSingleDeletionRecord: 167 | iter.record.CF = int(iter.decodeVarint()) 168 | if iter.err == nil { 169 | iter.record.Key = iter.decodeSlice() 170 | } 171 | case 172 | WriteBatchValueRecord, 173 | WriteBatchMergeRecord, 174 | WriteBatchRangeDeletion, 175 | WriteBatchBlobIndex: 176 | iter.record.Key = iter.decodeSlice() 177 | if iter.err == nil { 178 | iter.record.Value = iter.decodeSlice() 179 | } 180 | case 181 | WriteBatchCFValueRecord, 182 | WriteBatchCFRangeDeletion, 183 | WriteBatchCFMergeRecord, 184 | WriteBatchCFBlobIndex: 185 | iter.record.CF = int(iter.decodeVarint()) 186 | if iter.err == nil { 187 | iter.record.Key = iter.decodeSlice() 188 | } 189 | if iter.err == nil { 190 | iter.record.Value = iter.decodeSlice() 191 | } 192 | case WriteBatchLogDataRecord: 193 | iter.record.Value = iter.decodeSlice() 194 | case 195 | WriteBatchNoopRecord, 196 | WriteBatchBeginPrepareXIDRecord, 197 | WriteBatchBeginPersistedPrepareXIDRecord: 198 | case 199 | WriteBatchEndPrepareXIDRecord, 200 | WriteBatchCommitXIDRecord, 201 | WriteBatchRollbackXIDRecord: 202 | iter.record.Value = iter.decodeSlice() 203 | default: 204 | iter.err = errors.New("unsupported wal record type") 205 | } 206 | 207 | return iter.err == nil 208 | 209 | } 210 | 211 | // Record returns the current record. 212 | func (iter *WriteBatchIterator) Record() *WriteBatchRecord { 213 | return &iter.record 214 | } 215 | 216 | // Error returns the error if the iteration is failed. 217 | func (iter *WriteBatchIterator) Error() error { 218 | return iter.err 219 | } 220 | 221 | func (iter *WriteBatchIterator) decodeSlice() []byte { 222 | l := int(iter.decodeVarint()) 223 | if l > len(iter.data) { 224 | iter.err = io.ErrShortBuffer 225 | } 226 | if iter.err != nil { 227 | return []byte{} 228 | } 229 | ret := iter.data[:l] 230 | iter.data = iter.data[l:] 231 | return ret 232 | } 233 | 234 | func (iter *WriteBatchIterator) decodeRecType() WriteBatchRecordType { 235 | if len(iter.data) == 0 { 236 | iter.err = io.ErrShortBuffer 237 | return WriteBatchNotUsedRecord 238 | } 239 | t := iter.data[0] 240 | iter.data = iter.data[1:] 241 | return WriteBatchRecordType(t) 242 | } 243 | 244 | func (iter *WriteBatchIterator) decodeVarint() uint64 { 245 | var n int 246 | var x uint64 247 | for shift := uint(0); shift < 64 && n < len(iter.data); shift += 7 { 248 | b := uint64(iter.data[n]) 249 | n++ 250 | x |= (b & 0x7F) << shift 251 | if (b & 0x80) == 0 { 252 | iter.data = iter.data[n:] 253 | return x 254 | } 255 | } 256 | if n == len(iter.data) { 257 | iter.err = io.ErrShortBuffer 258 | } else { 259 | iter.err = errors.New("malformed varint") 260 | } 261 | return 0 262 | } 263 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /dcrontab/commander.go: -------------------------------------------------------------------------------- 1 | //===----------- dcrontab - distributed crontab written in go -------------=== 2 | // 3 | // Copyright (c) 2018 Andrew Grosser. All Rights Reserved. 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | 17 | package main 18 | 19 | import ( 20 | "bytes" 21 | "crypto/md5" 22 | "encoding/binary" 23 | "encoding/json" 24 | "errors" 25 | "fmt" 26 | "io" 27 | "io/ioutil" 28 | "math/rand" 29 | "os" 30 | "path/filepath" 31 | "strconv" 32 | "sync" 33 | "sync/atomic" 34 | "time" 35 | "unsafe" 36 | "strings" 37 | 38 | "github.com/dioptre/dcrontab/v3/dcrontab/gorocksdb" 39 | sm "github.com/lni/dragonboat/v3/statemachine" 40 | "github.com/lni/goutils/fileutil" 41 | ) 42 | 43 | const ( 44 | appliedIndexKey string = "disk_kv_applied_index" 45 | testDBDirName string = "dcrontab-data" 46 | currentDBFilename string = "current" 47 | updatingDBFilename string = "current.updating" 48 | ) 49 | 50 | type Action struct { 51 | Action string 52 | Key string 53 | Val string 54 | } 55 | 56 | // rocksdb is a wrapper to ensure lookup() and close() can be concurrently 57 | // invoked. IOnDiskStateMachine.Update() and close() will never be concurrently 58 | // invoked. 59 | type rocksdb struct { 60 | mu sync.RWMutex 61 | db *gorocksdb.DB 62 | ro *gorocksdb.ReadOptions 63 | wo *gorocksdb.WriteOptions 64 | opts *gorocksdb.Options 65 | closed bool 66 | } 67 | 68 | func (r *rocksdb) lookup(query []byte) ([]byte, error) { 69 | r.mu.RLock() 70 | defer r.mu.RUnlock() 71 | if r.closed { 72 | return nil, errors.New("db already closed") 73 | } 74 | val, err := r.db.Get(r.ro, query) 75 | if err != nil { 76 | return nil, err 77 | } 78 | defer val.Free() 79 | data := val.Data() 80 | if len(data) == 0 { 81 | return []byte(""), nil 82 | } 83 | v := make([]byte, len(data)) 84 | copy(v, data) 85 | return v, nil 86 | } 87 | 88 | func (r *rocksdb) close() { 89 | r.mu.Lock() 90 | defer r.mu.Unlock() 91 | r.closed = true 92 | if r.db != nil { 93 | r.db.Close() 94 | } 95 | if r.opts != nil { 96 | r.opts.Destroy() 97 | } 98 | if r.wo != nil { 99 | r.wo.Destroy() 100 | } 101 | if r.ro != nil { 102 | r.ro.Destroy() 103 | } 104 | r.db = nil 105 | } 106 | 107 | // createDB creates a RocksDB DB at the specified directory. 108 | func createDB(dbdir string) (*rocksdb, error) { 109 | opts := gorocksdb.NewDefaultOptions() 110 | opts.SetCreateIfMissing(true) 111 | opts.SetUseFsync(true) 112 | wo := gorocksdb.NewDefaultWriteOptions() 113 | wo.SetSync(true) 114 | ro := gorocksdb.NewDefaultReadOptions() 115 | db, err := gorocksdb.OpenDb(opts, dbdir) 116 | if err != nil { 117 | return nil, err 118 | } 119 | return &rocksdb{ 120 | db: db, 121 | ro: ro, 122 | wo: wo, 123 | opts: opts, 124 | }, nil 125 | } 126 | 127 | // functions below are used to manage the current data directory of RocksDB DB. 128 | func isNewRun(dir string) bool { 129 | fp := filepath.Join(dir, currentDBFilename) 130 | if _, err := os.Stat(fp); os.IsNotExist(err) { 131 | return true 132 | } 133 | return false 134 | } 135 | 136 | func getNodeDBDirName(clusterID uint64, nodeID uint64) string { 137 | part := fmt.Sprintf("%d_%d", clusterID, nodeID) 138 | return filepath.Join(testDBDirName, part) 139 | } 140 | 141 | func getNewRandomDBDirName(dir string) string { 142 | part := "%d_%d" 143 | rn := rand.Uint64() 144 | ct := time.Now().UnixNano() 145 | return filepath.Join(dir, fmt.Sprintf(part, rn, ct)) 146 | } 147 | 148 | func replaceCurrentDBFile(dir string) error { 149 | fp := filepath.Join(dir, currentDBFilename) 150 | tmpFp := filepath.Join(dir, updatingDBFilename) 151 | if err := os.Rename(tmpFp, fp); err != nil { 152 | return err 153 | } 154 | return fileutil.SyncDir(dir) 155 | } 156 | 157 | func saveCurrentDBDirName(dir string, dbdir string) error { 158 | h := md5.New() 159 | if _, err := h.Write([]byte(dbdir)); err != nil { 160 | return err 161 | } 162 | fp := filepath.Join(dir, updatingDBFilename) 163 | f, err := os.Create(fp) 164 | if err != nil { 165 | return err 166 | } 167 | defer func() { 168 | if err := f.Close(); err != nil { 169 | panic(err) 170 | } 171 | if err := fileutil.SyncDir(dir); err != nil { 172 | panic(err) 173 | } 174 | }() 175 | if _, err := f.Write(h.Sum(nil)[:8]); err != nil { 176 | return err 177 | } 178 | if _, err := f.Write([]byte(dbdir)); err != nil { 179 | return err 180 | } 181 | if err := f.Sync(); err != nil { 182 | return err 183 | } 184 | return nil 185 | } 186 | 187 | func getCurrentDBDirName(dir string) (string, error) { 188 | fp := filepath.Join(dir, currentDBFilename) 189 | f, err := os.OpenFile(fp, os.O_RDONLY, 0755) 190 | if err != nil { 191 | return "", err 192 | } 193 | defer func() { 194 | if err := f.Close(); err != nil { 195 | panic(err) 196 | } 197 | }() 198 | data, err := ioutil.ReadAll(f) 199 | if err != nil { 200 | return "", err 201 | } 202 | if len(data) <= 8 { 203 | panic("corrupted content") 204 | } 205 | crc := data[:8] 206 | content := data[8:] 207 | h := md5.New() 208 | if _, err := h.Write(content); err != nil { 209 | return "", err 210 | } 211 | if !bytes.Equal(crc, h.Sum(nil)[:8]) { 212 | panic("corrupted content with not matched crc") 213 | } 214 | return string(content), nil 215 | } 216 | 217 | func createNodeDataDir(dir string) error { 218 | return os.MkdirAll(dir, 0755) 219 | } 220 | 221 | func cleanupNodeDataDir(dir string) error { 222 | os.RemoveAll(filepath.Join(dir, updatingDBFilename)) 223 | dbdir, err := getCurrentDBDirName(dir) 224 | if err != nil { 225 | return err 226 | } 227 | files, err := ioutil.ReadDir(dir) 228 | if err != nil { 229 | return err 230 | } 231 | for _, fi := range files { 232 | if !fi.IsDir() { 233 | continue 234 | } 235 | fmt.Printf("dbdir %s, fi.name %s, dir %s\n", dbdir, fi.Name(), dir) 236 | toDelete := filepath.Join(dir, fi.Name()) 237 | if toDelete != dbdir { 238 | fmt.Printf("removing %s\n", toDelete) 239 | if err := os.RemoveAll(toDelete); err != nil { 240 | return err 241 | } 242 | } 243 | } 244 | return nil 245 | } 246 | 247 | // Commander is a state machine that implements the IOnDiskStateMachine interface. 248 | // Commander stores key-value pairs in the underlying RocksDB key-value store. As 249 | // it is used as an example, it is implemented using the most basic features 250 | // common in most key-value stores. This is NOT a benchmark program. 251 | type Commander struct { 252 | clusterID uint64 253 | nodeID uint64 254 | lastApplied uint64 255 | db unsafe.Pointer 256 | closed bool 257 | aborted bool 258 | } 259 | 260 | // NewCommander creates a new disk kv test state machine. 261 | func NewCommander(clusterID uint64, nodeID uint64) sm.IOnDiskStateMachine { 262 | d := &Commander{ 263 | clusterID: clusterID, 264 | nodeID: nodeID, 265 | } 266 | return d 267 | } 268 | 269 | func (d *Commander) queryAppliedIndex(db *rocksdb) (uint64, error) { 270 | val, err := db.db.Get(db.ro, []byte(appliedIndexKey)) 271 | if err != nil { 272 | return 0, err 273 | } 274 | defer val.Free() 275 | data := val.Data() 276 | if len(data) == 0 { 277 | return 0, nil 278 | } 279 | return strconv.ParseUint(string(data), 10, 64) 280 | } 281 | 282 | // Open opens the state machine and return the index of the last Raft Log entry 283 | // already updated into the state machine. 284 | func (d *Commander) Open(stopc <-chan struct{}) (uint64, error) { 285 | dir := getNodeDBDirName(d.clusterID, d.nodeID) 286 | if err := createNodeDataDir(dir); err != nil { 287 | panic(err) 288 | } 289 | var dbdir string 290 | if !isNewRun(dir) { 291 | if err := cleanupNodeDataDir(dir); err != nil { 292 | return 0, err 293 | } 294 | var err error 295 | dbdir, err = getCurrentDBDirName(dir) 296 | if err != nil { 297 | return 0, err 298 | } 299 | if _, err := os.Stat(dbdir); err != nil { 300 | if os.IsNotExist(err) { 301 | panic("db dir unexpectedly deleted") 302 | } 303 | } 304 | } else { 305 | dbdir = getNewRandomDBDirName(dir) 306 | if err := saveCurrentDBDirName(dir, dbdir); err != nil { 307 | return 0, err 308 | } 309 | if err := replaceCurrentDBFile(dir); err != nil { 310 | return 0, err 311 | } 312 | } 313 | db, err := createDB(dbdir) 314 | if err != nil { 315 | return 0, err 316 | } 317 | atomic.SwapPointer(&d.db, unsafe.Pointer(db)) 318 | appliedIndex, err := d.queryAppliedIndex(db) 319 | if err != nil { 320 | panic(err) 321 | } 322 | d.lastApplied = appliedIndex 323 | return appliedIndex, nil 324 | } 325 | 326 | // Lookup queries the state machine. 327 | func (d *Commander) Lookup(key interface{}) (interface{}, error) { 328 | db := (*rocksdb)(atomic.LoadPointer(&d.db)) 329 | action := &Action{ 330 | Action: "GET", 331 | Key: string(key.([]byte)), 332 | } 333 | if err := json.Unmarshal(key.([]byte), action); err != nil { 334 | 335 | } 336 | if db != nil { 337 | switch action.Action { 338 | case "SCAN": 339 | items := make(map[string]string) 340 | ro := gorocksdb.NewDefaultReadOptions() 341 | ro.SetFillCache(false) 342 | it := db.db.NewIterator(ro) 343 | defer it.Close() 344 | it.Seek([]byte(action.Key)) 345 | for it = it; it.Valid(); it.Next() { 346 | key := it.Key() 347 | value := it.Value() 348 | k := string(key.Data()) 349 | if !strings.HasPrefix(k, action.Key) { 350 | key.Free() 351 | value.Free() 352 | break; 353 | } 354 | items[k] = string(value.Data()) 355 | key.Free() 356 | value.Free() 357 | } 358 | return json.Marshal(items); 359 | //case "GET": 360 | default: 361 | v, err := db.lookup([]byte(action.Key)) 362 | if err == nil && d.closed { 363 | panic("lookup returned valid result when Commander is already closed") 364 | } 365 | return v, err 366 | 367 | } 368 | 369 | } 370 | return nil, errors.New("db closed") 371 | } 372 | 373 | // Update updates the state machine. In this example, all updates are put into 374 | // a RocksDB write batch and then atomically written to the DB together with 375 | // the index of the last Raft Log entry. For simplicity, we always Sync the 376 | // writes (db.wo.Sync=True). To get higher throughput, you can implement the 377 | // Sync() method below and choose not to synchronize for every Update(). Sync() 378 | // will periodically called by Dragonboat to synchronize the state. 379 | func (d *Commander) Update(ents []sm.Entry) ([]sm.Entry, error) { 380 | if d.aborted { 381 | panic("update() called after abort set to true") 382 | } 383 | if d.closed { 384 | panic("update called after Close()") 385 | } 386 | wb := gorocksdb.NewWriteBatch() 387 | defer wb.Destroy() 388 | db := (*rocksdb)(atomic.LoadPointer(&d.db)) 389 | for idx, e := range ents { 390 | action := &Action{} 391 | if err := json.Unmarshal(e.Cmd, action); err != nil { 392 | panic(err) 393 | } 394 | wb.Put([]byte(action.Key), []byte(action.Val)) 395 | ents[idx].Result = sm.Result{Value: uint64(len(ents[idx].Cmd))} 396 | } 397 | // save the applied index to the DB. 398 | idx := fmt.Sprintf("%d", ents[len(ents)-1].Index) 399 | wb.Put([]byte(appliedIndexKey), []byte(idx)) 400 | if err := db.db.Write(db.wo, wb); err != nil { 401 | return nil, err 402 | } 403 | if d.lastApplied >= ents[len(ents)-1].Index { 404 | panic("lastApplied not moving forward") 405 | } 406 | d.lastApplied = ents[len(ents)-1].Index 407 | return ents, nil 408 | } 409 | 410 | // Sync synchronizes all in-core state of the state machine. Since the Update 411 | // method in this example already does that every time when it is invoked, the 412 | // Sync method here is a NoOP. 413 | func (d *Commander) Sync() error { 414 | return nil 415 | } 416 | 417 | type CommanderCtx struct { 418 | db *rocksdb 419 | snapshot *gorocksdb.Snapshot 420 | } 421 | 422 | // PrepareSnapshot prepares snapshotting. PrepareSnapshot is responsible to 423 | // capture a state identifier that identifies a point in time state of the 424 | // underlying data. In this example, we use RocksDB's snapshot feature to 425 | // achieve that. 426 | func (d *Commander) PrepareSnapshot() (interface{}, error) { 427 | if d.closed { 428 | panic("prepare snapshot called after Close()") 429 | } 430 | if d.aborted { 431 | panic("prepare snapshot called after abort") 432 | } 433 | db := (*rocksdb)(atomic.LoadPointer(&d.db)) 434 | return &CommanderCtx{ 435 | db: db, 436 | snapshot: db.db.NewSnapshot(), 437 | }, nil 438 | } 439 | 440 | // saveToWriter saves all existing key-value pairs to the provided writer. 441 | // As an example, we use the most straight forward way to implement this. 442 | func (d *Commander) saveToWriter(db *rocksdb, 443 | ss *gorocksdb.Snapshot, w io.Writer) error { 444 | ro := gorocksdb.NewDefaultReadOptions() 445 | ro.SetSnapshot(ss) 446 | iter := db.db.NewIterator(ro) 447 | defer iter.Close() 448 | count := uint64(0) 449 | for iter.SeekToFirst(); iter.Valid(); iter.Next() { 450 | count++ 451 | } 452 | sz := make([]byte, 8) 453 | binary.LittleEndian.PutUint64(sz, count) 454 | if _, err := w.Write(sz); err != nil { 455 | return err 456 | } 457 | for iter.SeekToFirst(); iter.Valid(); iter.Next() { 458 | key := iter.Key() 459 | val := iter.Value() 460 | action := &Action{ 461 | Key: string(key.Data()), 462 | Val: string(val.Data()), 463 | } 464 | data, err := json.Marshal(action) 465 | if err != nil { 466 | panic(err) 467 | } 468 | binary.LittleEndian.PutUint64(sz, uint64(len(data))) 469 | if _, err := w.Write(sz); err != nil { 470 | return err 471 | } 472 | if _, err := w.Write(data); err != nil { 473 | return err 474 | } 475 | } 476 | return nil 477 | } 478 | 479 | // SaveSnapshot saves the state machine state identified by the state 480 | // identifier provided by the input ctx parameter. Note that SaveSnapshot 481 | // is not suppose to save the latest state. 482 | func (d *Commander) SaveSnapshot(ctx interface{}, 483 | w io.Writer, done <-chan struct{}) error { 484 | if d.closed { 485 | panic("prepare snapshot called after Close()") 486 | } 487 | if d.aborted { 488 | panic("prepare snapshot called after abort") 489 | } 490 | ctxdata := ctx.(*CommanderCtx) 491 | db := ctxdata.db 492 | db.mu.RLock() 493 | defer db.mu.RUnlock() 494 | ss := ctxdata.snapshot 495 | return d.saveToWriter(db, ss, w) 496 | } 497 | 498 | // RecoverFromSnapshot recovers the state machine state from snapshot. The 499 | // snapshot is recovered into a new DB first and then atomically swapped with 500 | // the existing DB to complete the recovery. 501 | func (d *Commander) RecoverFromSnapshot(r io.Reader, 502 | done <-chan struct{}) error { 503 | if d.closed { 504 | panic("recover from snapshot called after Close()") 505 | } 506 | dir := getNodeDBDirName(d.clusterID, d.nodeID) 507 | dbdir := getNewRandomDBDirName(dir) 508 | oldDirName, err := getCurrentDBDirName(dir) 509 | if err != nil { 510 | return err 511 | } 512 | db, err := createDB(dbdir) 513 | if err != nil { 514 | return err 515 | } 516 | sz := make([]byte, 8) 517 | if _, err := io.ReadFull(r, sz); err != nil { 518 | return err 519 | } 520 | total := binary.LittleEndian.Uint64(sz) 521 | wb := gorocksdb.NewWriteBatch() 522 | for i := uint64(0); i < total; i++ { 523 | if _, err := io.ReadFull(r, sz); err != nil { 524 | return err 525 | } 526 | toRead := binary.LittleEndian.Uint64(sz) 527 | data := make([]byte, toRead) 528 | if _, err := io.ReadFull(r, data); err != nil { 529 | return err 530 | } 531 | action := &Action{} 532 | if err := json.Unmarshal(data, action); err != nil { 533 | panic(err) 534 | } 535 | wb.Put([]byte(action.Key), []byte(action.Val)) 536 | } 537 | if err := db.db.Write(db.wo, wb); err != nil { 538 | return err 539 | } 540 | if err := saveCurrentDBDirName(dir, dbdir); err != nil { 541 | return err 542 | } 543 | if err := replaceCurrentDBFile(dir); err != nil { 544 | return err 545 | } 546 | newLastApplied, err := d.queryAppliedIndex(db) 547 | if err != nil { 548 | panic(err) 549 | } 550 | if d.lastApplied > newLastApplied { 551 | panic("last applied not moving forward") 552 | } 553 | d.lastApplied = newLastApplied 554 | old := (*rocksdb)(atomic.SwapPointer(&d.db, unsafe.Pointer(db))) 555 | if old != nil { 556 | old.close() 557 | } 558 | return os.RemoveAll(oldDirName) 559 | } 560 | 561 | // Close closes the state machine. 562 | func (d *Commander) Close() error { 563 | db := (*rocksdb)(atomic.SwapPointer(&d.db, unsafe.Pointer(nil))) 564 | if db != nil { 565 | d.closed = true 566 | db.close() 567 | } else { 568 | if d.closed { 569 | panic("close called twice") 570 | } 571 | } 572 | return nil 573 | } 574 | 575 | // GetHash returns a hash value representing the state of the state machine. 576 | func (d *Commander) GetHash() (uint64, error) { 577 | h := md5.New() 578 | db := (*rocksdb)(atomic.LoadPointer(&d.db)) 579 | ss := db.db.NewSnapshot() 580 | db.mu.RLock() 581 | defer db.mu.RUnlock() 582 | if err := d.saveToWriter(db, ss, h); err != nil { 583 | return 0, err 584 | } 585 | md5sum := h.Sum(nil) 586 | return binary.LittleEndian.Uint64(md5sum[:8]), nil 587 | } 588 | --------------------------------------------------------------------------------