├── .gitignore ├── LICENSE ├── README.md ├── config_example.json ├── images ├── gettingstarted.PNG ├── home.PNG ├── poolBlock.PNG ├── poolpayment.PNG └── workerstats.PNG ├── main.go ├── pool └── pool.go ├── rpc └── rpc.go ├── stratum ├── api.go ├── blocks.go ├── charts.go ├── events.go ├── handlers.go ├── miner.go ├── mmap.go ├── payments.go ├── proto.go ├── storage.go ├── stratum.go └── unlocker.go ├── util └── util.go └── website ├── site ├── config.js ├── favicon.ico ├── index.html ├── pages │ ├── events.html │ ├── getting_started.html │ ├── home.html │ ├── payments.html │ ├── poolblocks.html │ └── workerstats.html └── themes │ └── default.css └── website.go /.gitignore: -------------------------------------------------------------------------------- 1 | main.exe 2 | config.json 3 | /pooldb 4 | /pooldb_bak 5 | *.key 6 | *.cer 7 | *.log 8 | /logs -------------------------------------------------------------------------------- /config_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "poolHost": "127.0.0.1", 3 | "blockchainExplorer": "https://explorer.dero.io/block/{id}", 4 | "transactionExplorer": "https://explorer.dero.io/tx/{id}", 5 | "address": "", 6 | "donationAddress": "dERopdjpGmr2DEwQJdRrKc8M6obca9NQu2EaC2fNe3RNHonYcCfqmjGF7NBEHoB8dpLXWhnjdW7dugFTVhofuKTb4sfzmyBSAj", 7 | "donationDescription": "Thank you for supporting our mining pool!", 8 | "bypassShareValidation": false, 9 | 10 | "threads": 2, 11 | "algo": "astrobwt", 12 | "coin": "DERO", 13 | "coinUnits": 1000000000000, 14 | "coinDecimalPlaces": 4, 15 | "coinDifficultyTarget": 27, 16 | 17 | "trustedSharesCount": 30, 18 | "blockRefreshInterval": "120ms", 19 | "hashrateExpiration": "3h", 20 | "storeMinerStatsInterval": "5s", 21 | 22 | "gravitonMaxSnapshots": 5000, 23 | "gravitonMigrateWait": "100ms", 24 | 25 | "upstreamCheckInterval": "5s", 26 | 27 | "upstream": [ 28 | { 29 | "enabled": true, 30 | "name": "Derod", 31 | "host": "127.0.0.1", 32 | "port": 20206, 33 | "timeout": "10s" 34 | }, 35 | { 36 | "enabled": false, 37 | "name": "Remote Derod", 38 | "host": "derodaemon.nelbert442.com", 39 | "port": 20206, 40 | "timeout": "10s" 41 | } 42 | ], 43 | 44 | "stratum": { 45 | "paymentId": { 46 | "addressSeparator": "+" 47 | }, 48 | "fixedDiff": { 49 | "addressSeparator": "." 50 | }, 51 | "workerID": { 52 | "addressSeparator": "@" 53 | }, 54 | "donatePercent": { 55 | "addressSeparator": "%" 56 | }, 57 | "soloMining": { 58 | "enabled": true, 59 | "addressSeparator": "~" 60 | }, 61 | 62 | "timeout": "15m", 63 | "healthCheck": true, 64 | "maxFails": 100, 65 | 66 | "listen": [ 67 | { 68 | "host": "0.0.0.0", 69 | "port": 1111, 70 | "diff": 1000, 71 | "minDiff": 500, 72 | "maxConn": 32768, 73 | "desc": "Low end hardware" 74 | }, 75 | { 76 | "host": "0.0.0.0", 77 | "port": 3333, 78 | "diff": 2500, 79 | "minDiff": 500, 80 | "maxConn": 32768, 81 | "desc": "Mid range hardware" 82 | }, 83 | { 84 | "host": "0.0.0.0", 85 | "port": 5555, 86 | "diff": 5000, 87 | "minDiff": 500, 88 | "maxConn": 32768, 89 | "desc": "High end hardware" 90 | } 91 | ], 92 | 93 | "varDiff": { 94 | "enabled": true, 95 | "minDiff": 750, 96 | "maxDiff": 1000000, 97 | "targetTime": 20, 98 | "retargetTime": 120, 99 | "variancePercent": 30, 100 | "maxJump": 50 101 | } 102 | }, 103 | 104 | "api": { 105 | "enabled": true, 106 | "listen": "0.0.0.0:8082", 107 | "statsCollectInterval": "5s", 108 | "hashrateWindow": "10m", 109 | "payments": 30, 110 | "blocks": 50, 111 | "ssl": false, 112 | "sslListen": "0.0.0.0:9092", 113 | "certFile": "fullchain.cer", 114 | "keyFile": "cert.key" 115 | }, 116 | 117 | "unlocker": { 118 | "enabled": true, 119 | "poolFee": 0.1, 120 | "depth": 60, 121 | "interval": "5m" 122 | }, 123 | 124 | "payments": { 125 | "enabled": true, 126 | "interval": "5m", 127 | "mixin": 8, 128 | "maxAddresses": 2, 129 | "minPayment": 10000000000, 130 | "walletHost": "127.0.0.1", 131 | "walletPort": "30309" 132 | }, 133 | 134 | "website": { 135 | "enabled": true, 136 | "port": "8080", 137 | "ssl": false, 138 | "sslPort": "9090", 139 | "certFile": "fullchain.cer", 140 | "keyFile": "cert.key" 141 | }, 142 | 143 | "poolcharts": { 144 | "interval": 60, 145 | "hashrate": { 146 | "enabled": true, 147 | "maximumPeriod": 86400 148 | }, 149 | "miners": { 150 | "enabled": true, 151 | "maximumPeriod": 86400 152 | }, 153 | "workers": { 154 | "enabled": true, 155 | "maximumPeriod": 86400 156 | }, 157 | "difficulty": { 158 | "enabled": true, 159 | "maximumPeriod": 86400 160 | } 161 | }, 162 | "solocharts": { 163 | "interval": 60, 164 | "hashrate": { 165 | "enabled": true, 166 | "maximumPeriod": 86400 167 | }, 168 | "miners": { 169 | "enabled": true, 170 | "maximumPeriod": 86400 171 | }, 172 | "workers": { 173 | "enabled": true, 174 | "maximumPeriod": 86400 175 | } 176 | }, 177 | "events": { 178 | "enabled": true, 179 | "randomrewardevent": { 180 | "enabled": false, 181 | "startDay": "2021-05-01", 182 | "endDay": "2021-05-31", 183 | "stepIntervalInSeconds": 30, 184 | "rewardValueInDERO": 1, 185 | "minerPercentCriteria": 0.9, 186 | "bonus1hrDayEventDate": "2021-05-05" 187 | } 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /images/gettingstarted.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/images/gettingstarted.PNG -------------------------------------------------------------------------------- /images/home.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/images/home.PNG -------------------------------------------------------------------------------- /images/poolBlock.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/images/poolBlock.PNG -------------------------------------------------------------------------------- /images/poolpayment.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/images/poolpayment.PNG -------------------------------------------------------------------------------- /images/workerstats.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/images/workerstats.PNG -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "math/rand" 7 | "os" 8 | "path/filepath" 9 | "runtime" 10 | "time" 11 | 12 | "github.com/Nelbert442/dero-golang-pool/pool" 13 | "github.com/Nelbert442/dero-golang-pool/stratum" 14 | "github.com/Nelbert442/dero-golang-pool/website" 15 | ) 16 | 17 | var cfg pool.Config 18 | 19 | var MainInfoLogger = logFileOutMain("INFO") 20 | var MainErrorLogger = logFileOutMain("ERROR") 21 | 22 | func startStratum() { 23 | if cfg.Threads > 0 { 24 | runtime.GOMAXPROCS(cfg.Threads) 25 | log.Printf("[Main] Running with %v threads", cfg.Threads) 26 | MainInfoLogger.Printf("[Main] Running with %v threads", cfg.Threads) 27 | } else { 28 | n := runtime.NumCPU() 29 | runtime.GOMAXPROCS(n) 30 | log.Printf("[Main] Running with default %v threads", n) 31 | } 32 | 33 | s := stratum.NewStratum(&cfg) 34 | 35 | // If EventsConfig is enabled, start event configuration service/listeners 36 | if cfg.EventsConfig.Enabled { 37 | events := stratum.NewEventsProcessor(&cfg.EventsConfig, cfg.CoinUnits) 38 | go events.Start() 39 | } 40 | 41 | // If API enabled, start api service/listeners 42 | if cfg.API.Enabled { 43 | a := stratum.NewApiServer(&cfg.API, s, &cfg.EventsConfig) 44 | go a.Start() 45 | 46 | // Start charts, reliant on api (uses data from api to reduce duplicate db calls/query/processing) and no need to run charts if api isn't running too 47 | charts := stratum.NewChartsProcessor(&cfg.PoolCharts, &cfg.SoloCharts, a) 48 | go charts.Start() 49 | } 50 | 51 | // If unlocker enabled, start unlocker processes / go routines 52 | if cfg.UnlockerConfig.Enabled { 53 | unlocker := stratum.NewBlockUnlocker(&cfg.UnlockerConfig, s) 54 | go unlocker.StartBlockUnlocker(s) 55 | } 56 | 57 | // If payments enabled, start payment processes / go routines 58 | if cfg.PaymentsConfig.Enabled { 59 | payments := stratum.NewPayoutsProcessor(&cfg.PaymentsConfig, s) 60 | payments.Start(s) 61 | } 62 | 63 | // If website enabled, start website service/listeners 64 | if cfg.Website.Enabled { 65 | go website.NewWebsite(&cfg.Website) 66 | } 67 | 68 | // Listen on defined stratum ports for incoming miners 69 | s.Listen() 70 | } 71 | 72 | func readConfig(cfg *pool.Config) { 73 | configFileName := "config.json" 74 | if len(os.Args) > 1 { 75 | configFileName = os.Args[1] 76 | } 77 | configFileName, _ = filepath.Abs(configFileName) 78 | log.Printf("[Main] Loading config: %v", configFileName) 79 | MainInfoLogger.Printf("[Main] Loading config: %v", configFileName) 80 | 81 | configFile, err := os.Open(configFileName) 82 | if err != nil { 83 | MainErrorLogger.Printf("[Main] File error: %v", err.Error()) 84 | log.Fatal("[Main] File error: ", err.Error()) 85 | } 86 | defer configFile.Close() 87 | jsonParser := json.NewDecoder(configFile) 88 | if err = jsonParser.Decode(&cfg); err != nil { 89 | MainErrorLogger.Printf("[Main] Config error: %v", err.Error()) 90 | log.Fatal("[Main] Config error: ", err.Error()) 91 | } 92 | } 93 | 94 | func logFileOutMain(lType string) *log.Logger { 95 | var logFileName string 96 | if lType == "ERROR" { 97 | logFileName = "logs/mainError.log" 98 | } else { 99 | logFileName = "logs/main.log" 100 | } 101 | os.Mkdir("logs", 0705) 102 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 103 | if err != nil { 104 | panic(err) 105 | } 106 | 107 | logType := lType + ": " 108 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 109 | return l 110 | } 111 | 112 | func main() { 113 | log.SetFlags(log.LstdFlags | log.Lmicroseconds) 114 | rand.Seed(time.Now().UTC().UnixNano()) 115 | 116 | readConfig(&cfg) 117 | 118 | startStratum() 119 | } 120 | -------------------------------------------------------------------------------- /pool/pool.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | type Config struct { 4 | PoolHost string `json:"poolHost"` 5 | BlockchainExplorer string `json:"blockchainExplorer"` 6 | TransactionExploer string `json:"transactionExplorer"` 7 | Address string `json:"address"` 8 | DonationAddress string `json:"donationAddress"` 9 | DonationDescription string `json:"donationDescription"` 10 | BypassShareValidation bool `json:"bypassShareValidation"` 11 | Threads int `json:"threads"` 12 | Algo string `json:"algo"` 13 | Coin string `json:"coin"` 14 | CoinUnits int64 `json:"coinUnits"` 15 | CoinDecimalPlaces int64 `json:"coinDecimalPlaces"` 16 | CoinDifficultyTarget int `json:"coinDifficultyTarget"` 17 | TrustedSharesCount int64 `json:"trustedSharesCount"` 18 | BlockRefreshInterval string `json:"blockRefreshInterval"` 19 | HashrateExpiration string `json:"hashrateExpiration"` 20 | StoreMinerStatsInterval string `json:"storeMinerStatsInterval"` 21 | GravitonMaxSnapshots uint64 `json:"gravitonMaxSnapshots"` 22 | GravitonMigrateWait string `json:"gravitonMigrateWait"` 23 | UpstreamCheckInterval string `json:"upstreamCheckInterval"` 24 | Upstream []Upstream `json:"upstream"` 25 | Stratum Stratum `json:"stratum"` 26 | API APIConfig `json:"api"` 27 | UnlockerConfig UnlockerConfig `json:"unlocker"` 28 | PaymentsConfig PaymentsConfig `json:"payments"` 29 | Website Website `json:"website"` 30 | PoolCharts PoolChartsConfig `json:"poolcharts"` 31 | SoloCharts SoloChartsConfig `json:"solocharts"` 32 | EventsConfig EventsConfig `json:"events"` 33 | } 34 | 35 | type Upstream struct { 36 | Name string `json:"name"` 37 | Host string `json:"host"` 38 | Port int `json:"port"` 39 | Timeout string `json:"timeout"` 40 | Enabled bool `json:"enabled"` 41 | } 42 | 43 | type Stratum struct { 44 | PaymentID PaymentID `json:"paymentId"` 45 | FixedDiff FixedDiff `json:"fixedDiff"` 46 | WorkerID WorkerID `json:"workerID"` 47 | DonatePercent DonatePercent `json:"donatePercent"` 48 | SoloMining SoloMining `json:"soloMining"` 49 | Timeout string `json:"timeout"` 50 | MaxFails int64 `json:"maxFails"` 51 | HealthCheck bool `json:"healthCheck"` 52 | Ports []Port `json:"listen"` 53 | VarDiff VarDiffConfig `json:"varDiff"` 54 | } 55 | 56 | type PaymentID struct { 57 | AddressSeparator string `json:"addressSeparator"` 58 | } 59 | 60 | type FixedDiff struct { 61 | AddressSeparator string `json:"addressSeparator"` 62 | } 63 | 64 | type WorkerID struct { 65 | AddressSeparator string `json:"addressSeparator"` 66 | } 67 | 68 | type DonatePercent struct { 69 | AddressSeparator string `json:"addressSeparator"` 70 | } 71 | 72 | type SoloMining struct { 73 | Enabled bool `json:"enabled"` 74 | AddressSeparator string `json:"addressSeparator"` 75 | } 76 | 77 | type Port struct { 78 | Difficulty int64 `json:"diff"` 79 | MinDiff int64 `json:"minDiff"` 80 | Host string `json:"host"` 81 | Port int `json:"port"` 82 | MaxConn int `json:"maxConn"` 83 | Desc string `json:"desc"` 84 | } 85 | 86 | type VarDiffConfig struct { 87 | Enabled bool `json:"enabled"` 88 | MinDiff int64 `json:"minDiff"` 89 | MaxDiff int64 `json:"maxDiff"` 90 | TargetTime int64 `json:"targetTime"` 91 | RetargetTime int64 `json:"retargetTime"` 92 | VariancePercent float64 `json:"variancePercent"` 93 | MaxJump float64 `json:"maxJump"` 94 | } 95 | 96 | type APIConfig struct { 97 | Enabled bool `json:"enabled"` 98 | Listen string `json:"listen"` 99 | StatsCollectInterval string `json:"statsCollectInterval"` 100 | HashrateWindow string `json:"hashrateWindow"` 101 | Payments int64 `json:"payments"` 102 | Blocks int64 `json:"blocks"` 103 | SSL bool `json:"ssl"` 104 | SSLListen string `json:"sslListen"` 105 | CertFile string `json:"certFile"` 106 | KeyFile string `json:"keyFile"` 107 | } 108 | 109 | type UnlockerConfig struct { 110 | Enabled bool `json:"enabled"` 111 | PoolFee float64 `json:"poolFee"` 112 | Depth int64 `json:"depth"` 113 | Interval string `json:"interval"` 114 | PoolFeeAddress string `json:"poolFeeAddress"` 115 | } 116 | 117 | type PaymentsConfig struct { 118 | Enabled bool `json:"enabled"` 119 | Interval string `json:"interval"` 120 | Mixin uint64 `json:"mixin"` 121 | MaxAddresses uint64 `json:"maxAddresses"` 122 | Threshold uint64 `json:"minPayment"` 123 | WalletHost string `json:"walletHost"` 124 | WalletPort string `json:"walletPort"` 125 | } 126 | 127 | type Website struct { 128 | Enabled bool `json:"enabled"` 129 | Port string `json:"port"` 130 | SSL bool `json:"ssl"` 131 | SSLPort string `json:"sslPort"` 132 | CertFile string `json:"certFile"` 133 | KeyFile string `json:"keyFile"` 134 | } 135 | 136 | type PoolChartsConfig struct { 137 | Interval int64 `json:"interval"` 138 | Hashrate ChartDataConfig `json:"hashrate"` 139 | Miners ChartDataConfig `json:"miners"` 140 | Workers ChartDataConfig `json:"workers"` 141 | Difficulty ChartDataConfig `json:"difficulty"` 142 | } 143 | 144 | type SoloChartsConfig struct { 145 | Interval int64 `json:"interval"` 146 | Hashrate ChartDataConfig `json:"hashrate"` 147 | Miners ChartDataConfig `json:"miners"` 148 | Workers ChartDataConfig `json:"workers"` 149 | } 150 | 151 | type ChartDataConfig struct { 152 | Enabled bool `json:"enabled"` 153 | MaximumPeriod int64 `json:"maximumPeriod"` 154 | } 155 | 156 | type EventsConfig struct { 157 | Enabled bool `json:"enabled"` 158 | RandomRewardEventConfig RandomRewardEventConfig `json:"randomrewardevent"` 159 | } 160 | 161 | type RandomRewardEventConfig struct { 162 | Enabled bool `json:"enabled"` 163 | StartDay string `json:"startDay"` 164 | EndDay string `json:"endDay"` 165 | StepIntervalInSeconds int64 `json:"stepIntervalInSeconds"` 166 | RewardValueInDERO int64 `json:"rewardValueInDERO"` 167 | MinerPercentCriteria float64 `json:"minerPercentCriteria"` 168 | Bonus1hrDayEventDate string `json:"bonus1hrDayEventDate"` 169 | } 170 | -------------------------------------------------------------------------------- /rpc/rpc.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "net/url" 10 | "sync" 11 | "sync/atomic" 12 | "time" 13 | 14 | "github.com/Nelbert442/dero-golang-pool/pool" 15 | ) 16 | 17 | type RPCClient struct { 18 | sync.RWMutex 19 | sickRate int64 20 | successRate int64 21 | Accepts int64 22 | Rejects int64 23 | LastSubmissionAt int64 24 | FailsCount int64 25 | Url *url.URL 26 | login string 27 | password string 28 | Name string 29 | sick bool 30 | client *http.Client 31 | info atomic.Value 32 | } 33 | 34 | type GetBlockTemplateReply struct { 35 | Blocktemplate_blob string `json:"blocktemplate_blob"` 36 | Blockhashing_blob string `json:"blockhashing_blob"` 37 | Expected_reward uint64 `json:"expected_reward"` 38 | Difficulty uint64 `json:"difficulty"` 39 | Height uint64 `json:"height"` 40 | Prev_Hash string `json:"prev_hash"` 41 | Reserved_Offset uint64 `json:"reserved_offset"` 42 | Epoch uint64 `json:"epoch"` // used to expire pool jobs 43 | Status string `json:"status"` 44 | } 45 | 46 | type SubmitBlock_Result struct { 47 | BLID string `json:"blid"` 48 | Status string `json:"status"` 49 | } 50 | 51 | type GetInfoReply struct { 52 | Difficulty int64 `json:"difficulty"` 53 | Stableheight int64 `json:"stableheight"` 54 | Topoheight int64 `json:"topoheight"` 55 | Averageblocktime50 float32 `json:"averageblocktime50"` 56 | Target int64 `json:"target"` 57 | Testnet bool `json:"testnet"` 58 | TopBlockHash string `json:"top_block_hash"` 59 | DynamicFeePerKB int64 `json:"dynamic_fee_per_kb"` 60 | TotalSupply int64 `json:"total_supply"` 61 | MedianBlockSize int64 `json:"median_block_Size"` 62 | Version string `json:"version"` 63 | Height int64 `json:"height"` 64 | TxPoolSize int64 `json:"tx_pool_size"` 65 | Status string `json:"status"` 66 | } 67 | 68 | type GetBlockHashReply struct { 69 | BlockHeader Block_Header `json:"block_header"` 70 | Status string `json:"status"` 71 | } 72 | 73 | type GetBalanceReply struct { 74 | Balance uint64 `json:"balance"` 75 | UnlockedBalance uint64 `json:"unlocked_balance"` 76 | } 77 | 78 | type ( 79 | Destinations struct { 80 | Amount uint64 `json:"amount"` 81 | Address string `json:"address"` 82 | } 83 | 84 | Transfer_Params struct { 85 | Destinations []Destinations `json:"destinations"` 86 | //Fee uint64 `json:"fee"` 87 | Mixin uint64 `json:"mixin"` 88 | Unlock_time uint64 `json:"unlock_time"` 89 | Payment_ID string `json:"payment_id"` 90 | Get_tx_key bool `json:"get_tx_key"` 91 | Priority uint64 `json:"priority"` 92 | Do_not_relay bool `json:"do_not_relay"` 93 | Get_tx_hex bool `json:"get_tx_hex"` 94 | } 95 | ) 96 | 97 | //transfer split 98 | type ( 99 | TransferSplit_Params Transfer_Params 100 | TransferSplit_Result struct { 101 | Fee_list []uint64 `json:"fee_list"` 102 | Amount_list []uint64 `json:"amount_list"` 103 | Tx_key_list []string `json:"tx_key_list"` 104 | Tx_hash_list []string `json:"tx_hash_list"` 105 | Tx_blob_list []string `json:"tx_blob_list"` 106 | } 107 | ) 108 | 109 | type Block_Header struct { 110 | Depth int64 `json:"depth"` 111 | Difficulty string `json:"difficulty"` 112 | Hash string `json:"hash"` 113 | Height int64 `json:"height"` 114 | Topoheight int64 `json:"topoheight"` 115 | MajorVersion uint64 `json:"major_version"` 116 | MinorVersion uint64 `json:"minor_version"` 117 | Nonce uint64 `json:"nonce"` 118 | OrphanStatus bool `json:"orphan_status"` 119 | Syncblock bool `json:"syncblock"` 120 | Txcount int64 `json:"txcount"` 121 | Reward uint64 `json:"reward"` 122 | Tips []string `json:"tips"` 123 | Timestamp uint64 `json:"timestamp"` 124 | } 125 | 126 | type JSONRpcResp struct { 127 | Id *json.RawMessage `json:"id"` 128 | Result *json.RawMessage `json:"result"` 129 | Error map[string]interface{} `json:"error"` 130 | } 131 | 132 | func NewRPCClient(cfg *pool.Upstream) (*RPCClient, error) { 133 | rawUrl := fmt.Sprintf("http://%s:%v/json_rpc", cfg.Host, cfg.Port) 134 | url, err := url.Parse(rawUrl) 135 | if err != nil { 136 | return nil, err 137 | } 138 | rpcClient := &RPCClient{Name: cfg.Name, Url: url} 139 | timeout, _ := time.ParseDuration(cfg.Timeout) 140 | rpcClient.client = &http.Client{ 141 | Timeout: timeout, 142 | } 143 | return rpcClient, nil 144 | } 145 | 146 | func (r *RPCClient) GetBlockTemplate(reserveSize int, address string) (*GetBlockTemplateReply, error) { 147 | params := map[string]interface{}{"reserve_size": reserveSize, "wallet_address": address} 148 | rpcResp, err := r.doPost(r.Url.String(), "getblocktemplate", params) 149 | var reply *GetBlockTemplateReply 150 | if err != nil { 151 | return nil, err 152 | } 153 | if rpcResp.Result != nil { 154 | err = json.Unmarshal(*rpcResp.Result, &reply) 155 | } 156 | return reply, err 157 | } 158 | 159 | func (r *RPCClient) GetBlockByHash(hash string) (*GetBlockHashReply, error) { 160 | params := map[string]interface{}{"hash": hash} 161 | rpcResp, err := r.doPost(r.Url.String(), "getblockheaderbyhash", params) 162 | if err != nil { 163 | return nil, err 164 | } 165 | 166 | var reply *GetBlockHashReply 167 | if rpcResp.Result != nil { 168 | err = json.Unmarshal(*rpcResp.Result, &reply) 169 | } 170 | 171 | return reply, err 172 | } 173 | 174 | func (r *RPCClient) GetLastBlockHeader() (*GetBlockHashReply, error) { 175 | rpcResp, err := r.doPostNoParams(r.Url.String(), "getlastblockheader") 176 | if err != nil { 177 | return nil, err 178 | } 179 | 180 | var reply *GetBlockHashReply 181 | if rpcResp.Result != nil { 182 | err = json.Unmarshal(*rpcResp.Result, &reply) 183 | } 184 | 185 | return reply, err 186 | } 187 | 188 | func (r *RPCClient) GetInfo() (*GetInfoReply, error) { 189 | rpcResp, err := r.doPostNoParams(r.Url.String(), "get_info") 190 | var reply *GetInfoReply 191 | if err != nil { 192 | return nil, err 193 | } 194 | if rpcResp.Result != nil { 195 | err = json.Unmarshal(*rpcResp.Result, &reply) 196 | } 197 | return reply, err 198 | } 199 | 200 | func (r *RPCClient) GetBalance(url string) (*GetBalanceReply, error) { 201 | rpcResp, err := r.doPostNoParams(url, "getbalance") 202 | if err != nil { 203 | return nil, err 204 | } 205 | var reply *GetBalanceReply 206 | err = json.Unmarshal(*rpcResp.Result, &reply) 207 | if err != nil { 208 | return nil, err 209 | } 210 | return reply, err 211 | } 212 | 213 | func (r *RPCClient) SendTransaction(url string, transferParams Transfer_Params) (*TransferSplit_Result, error) { 214 | 215 | rpcResp, err := r.doPost(url, "transfer_split", transferParams) 216 | if err != nil { 217 | return nil, err 218 | } 219 | var reply *TransferSplit_Result 220 | err = json.Unmarshal(*rpcResp.Result, &reply) 221 | if err != nil { 222 | return nil, err 223 | } 224 | return reply, err 225 | } 226 | 227 | func (r *RPCClient) SubmitBlock(blocktemplate_blob string, blockhashing_blob string) (*JSONRpcResp, error) { 228 | return r.doPost(r.Url.String(), "submitblock", []string{blocktemplate_blob, blockhashing_blob}) 229 | } 230 | 231 | func (r *RPCClient) doPost(url, method string, params interface{}) (*JSONRpcResp, error) { 232 | jsonReq := map[string]interface{}{"jsonrpc": "2.0", "id": 0, "method": method, "params": params} 233 | data, _ := json.Marshal(jsonReq) 234 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(data)) 235 | req.Header.Set("Content-Length", (string)(len(data))) 236 | req.Header.Set("Content-Type", "application/json") 237 | req.Header.Set("Accept", "application/json") 238 | req.SetBasicAuth(r.login, r.password) 239 | resp, err := r.client.Do(req) 240 | if err != nil { 241 | r.markSick() 242 | return nil, err 243 | } 244 | defer resp.Body.Close() 245 | 246 | if resp.StatusCode < 200 || resp.StatusCode >= 400 { 247 | return nil, errors.New(resp.Status) 248 | } 249 | 250 | var rpcResp *JSONRpcResp 251 | err = json.NewDecoder(resp.Body).Decode(&rpcResp) 252 | if err != nil { 253 | r.markSick() 254 | return nil, err 255 | } 256 | if rpcResp.Error != nil { 257 | r.markSick() 258 | return nil, errors.New(rpcResp.Error["message"].(string)) 259 | } 260 | return rpcResp, err 261 | } 262 | 263 | func (r *RPCClient) doPostNoParams(url, method string) (*JSONRpcResp, error) { 264 | jsonReq := map[string]interface{}{"jsonrpc": "2.0", "id": 0, "method": method} 265 | data, _ := json.Marshal(jsonReq) 266 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(data)) 267 | req.Header.Set("Content-Length", (string)(len(data))) 268 | req.Header.Set("Content-Type", "application/json") 269 | req.Header.Set("Accept", "application/json") 270 | req.SetBasicAuth(r.login, r.password) 271 | resp, err := r.client.Do(req) 272 | if err != nil { 273 | r.markSick() 274 | return nil, err 275 | } 276 | defer resp.Body.Close() 277 | 278 | if resp.StatusCode < 200 || resp.StatusCode >= 400 { 279 | return nil, errors.New(resp.Status) 280 | } 281 | 282 | var rpcResp *JSONRpcResp 283 | err = json.NewDecoder(resp.Body).Decode(&rpcResp) 284 | if err != nil { 285 | r.markSick() 286 | return nil, err 287 | } 288 | if rpcResp.Error != nil { 289 | r.markSick() 290 | return nil, errors.New(rpcResp.Error["message"].(string)) 291 | } 292 | return rpcResp, err 293 | } 294 | 295 | func (r *RPCClient) Check(reserveSize int, address string) (bool, error) { 296 | _, err := r.GetBlockTemplate(reserveSize, address) 297 | if err != nil { 298 | return false, err 299 | } 300 | r.markAlive() 301 | return !r.Sick(), nil 302 | } 303 | 304 | func (r *RPCClient) Sick() bool { 305 | r.RLock() 306 | defer r.RUnlock() 307 | return r.sick 308 | } 309 | 310 | func (r *RPCClient) markSick() { 311 | r.Lock() 312 | if !r.sick { 313 | atomic.AddInt64(&r.FailsCount, 1) 314 | } 315 | r.sickRate++ 316 | r.successRate = 0 317 | if r.sickRate >= 5 { 318 | r.sick = true 319 | } 320 | r.Unlock() 321 | } 322 | 323 | func (r *RPCClient) markAlive() { 324 | r.Lock() 325 | r.successRate++ 326 | if r.successRate >= 5 { 327 | r.sick = false 328 | r.sickRate = 0 329 | r.successRate = 0 330 | } 331 | r.Unlock() 332 | } 333 | 334 | func (r *RPCClient) UpdateInfo() (*GetInfoReply, error) { 335 | info, err := r.GetInfo() 336 | if err == nil { 337 | r.info.Store(info) 338 | } 339 | return info, err 340 | } 341 | 342 | func (r *RPCClient) Info() *GetInfoReply { 343 | reply, _ := r.info.Load().(*GetInfoReply) 344 | return reply 345 | } 346 | -------------------------------------------------------------------------------- /stratum/blocks.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "log" 8 | "os" 9 | ) 10 | 11 | type BlockTemplate struct { 12 | Blocktemplate_blob string 13 | Blockhashing_blob string 14 | Expected_reward uint64 15 | Difficulty uint64 16 | Height uint64 17 | Prev_Hash string 18 | Reserved_Offset uint64 19 | Epoch uint64 20 | Status string 21 | Buffer []byte 22 | } 23 | 24 | var BlocksInfoLogger = logFileOutBlocks("INFO") 25 | var BlocksErrorLogger = logFileOutBlocks("ERROR") 26 | 27 | func (b *BlockTemplate) nextBlob(extraNonce uint32, instanceID []byte) string { 28 | extraBuff := new(bytes.Buffer) 29 | binary.Write(extraBuff, binary.BigEndian, extraNonce) 30 | 31 | blobBuff := make([]byte, len(b.Buffer)) 32 | copy(blobBuff, b.Buffer) 33 | copy(blobBuff[b.Reserved_Offset+4:b.Reserved_Offset+7], instanceID) 34 | copy(blobBuff[b.Reserved_Offset:], extraBuff.Bytes()) 35 | blob := blobBuff 36 | return hex.EncodeToString(blob) 37 | } 38 | 39 | func (s *StratumServer) fetchBlockTemplate() bool { 40 | r := s.rpc() 41 | reply, err := r.GetBlockTemplate(10, s.config.Address) 42 | if err != nil { 43 | log.Printf("[Blocks] Error while refreshing block template: %s", err) 44 | BlocksErrorLogger.Printf("[Blocks] Error while refreshing block template: %s", err) 45 | return false 46 | } 47 | 48 | t := s.currentBlockTemplate() 49 | 50 | if t != nil && t.Prev_Hash == reply.Prev_Hash { 51 | // Fallback to height comparison 52 | if len(reply.Prev_Hash) == 0 && reply.Height > t.Height { 53 | log.Printf("[Blocks] New block to mine on %s at height %v, diff: %v", r.Name, reply.Height, reply.Difficulty) 54 | BlocksInfoLogger.Printf("[Blocks] New block to mine on %s at height %v, diff: %v", r.Name, reply.Height, reply.Difficulty) 55 | } else { 56 | return false 57 | } 58 | } else { 59 | log.Printf("[Blocks] New block to mine on %s at height %v, diff: %v, prev_hash: %s", r.Name, reply.Height, reply.Difficulty, reply.Prev_Hash) 60 | BlocksInfoLogger.Printf("[Blocks] New block to mine on %s at height %v, diff: %v, prev_hash: %s", r.Name, reply.Height, reply.Difficulty, reply.Prev_Hash) 61 | } 62 | 63 | newTemplate := BlockTemplate{ 64 | Blocktemplate_blob: reply.Blocktemplate_blob, 65 | Blockhashing_blob: reply.Blockhashing_blob, 66 | Expected_reward: reply.Expected_reward, 67 | Difficulty: reply.Difficulty, 68 | Height: reply.Height, 69 | Prev_Hash: reply.Prev_Hash, 70 | Reserved_Offset: reply.Reserved_Offset, 71 | Epoch: reply.Epoch, 72 | Status: reply.Status, 73 | } 74 | newTemplate.Buffer, _ = hex.DecodeString(reply.Blockhashing_blob) 75 | s.blockTemplate.Store(&newTemplate) 76 | return true 77 | } 78 | 79 | func logFileOutBlocks(lType string) *log.Logger { 80 | var logFileName string 81 | if lType == "ERROR" { 82 | logFileName = "logs/blocksError.log" 83 | } else { 84 | logFileName = "logs/blocks.log" 85 | } 86 | os.Mkdir("logs", 0705) 87 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 88 | if err != nil { 89 | panic(err) 90 | } 91 | 92 | logType := lType + ": " 93 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 94 | return l 95 | } 96 | -------------------------------------------------------------------------------- /stratum/charts.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/Nelbert442/dero-golang-pool/pool" 10 | "github.com/Nelbert442/dero-golang-pool/util" 11 | ) 12 | 13 | type Charts struct { 14 | PoolChartsConfig *pool.PoolChartsConfig 15 | SoloChartsConfig *pool.SoloChartsConfig 16 | Api *ApiServer 17 | } 18 | 19 | type ChartData struct { 20 | Timestamp int64 21 | Value int64 22 | } 23 | 24 | var ChartsInfoLogger = logFileOutCharts("INFO") 25 | var ChartsErrorLogger = logFileOutCharts("ERROR") 26 | 27 | func NewChartsProcessor(pcfg *pool.PoolChartsConfig, scfg *pool.SoloChartsConfig, a *ApiServer) *Charts { 28 | c := &Charts{PoolChartsConfig: pcfg, SoloChartsConfig: scfg, Api: a} 29 | return c 30 | } 31 | 32 | func (c *Charts) Start() { 33 | log.Printf("[Charts] Starting charts data collection") 34 | ChartsInfoLogger.Printf("[Charts] Starting charts data collection") 35 | writeWait, _ := time.ParseDuration("10ms") 36 | 37 | // Pool Hashrate 38 | if c.PoolChartsConfig.Hashrate.Enabled { 39 | phrIntv := time.Duration(c.PoolChartsConfig.Interval) * time.Second 40 | phrTimer := time.NewTimer(phrIntv) 41 | log.Printf("[Charts] Set pool hashrate chart interval to %v", phrIntv) 42 | ChartsInfoLogger.Printf("[Charts] Set pool hashrate chart interval to %v", phrIntv) 43 | 44 | go func() { 45 | for { 46 | select { 47 | case <-phrTimer.C: 48 | stats := c.Api.getStats() 49 | now := util.MakeTimestamp() / 1000 50 | if stats["poolHashrate"] == nil { 51 | phrTimer.Reset(phrIntv) 52 | } else { 53 | log.Printf("[Charts] Pool Hashrate: %v", stats["poolHashrate"]) 54 | cData := &ChartData{Timestamp: now, Value: stats["poolHashrate"].(int64)} 55 | for Graviton_backend.Writing == 1 { 56 | //log.Printf("[Charts-poolhashrate] GravitonDB is writing... sleeping for %v...", writeWait) 57 | //StorageInfoLogger.Printf("[Charts-poolhashrate] GravitonDB is writing... sleeping for %v...", writeWait) 58 | time.Sleep(writeWait) 59 | } 60 | Graviton_backend.Writing = 1 61 | Graviton_backend.WriteChartsData(cData, "poolhashrate", c.PoolChartsConfig.Interval, c.PoolChartsConfig.Hashrate.MaximumPeriod) 62 | Graviton_backend.Writing = 0 63 | phrTimer.Reset(phrIntv) 64 | } 65 | } 66 | } 67 | }() 68 | } 69 | 70 | // Pool Miners 71 | if c.PoolChartsConfig.Miners.Enabled { 72 | pmIntv := time.Duration(c.PoolChartsConfig.Interval) * time.Second 73 | pmTimer := time.NewTimer(pmIntv) 74 | log.Printf("[Charts] Set pool miners chart interval to %v", pmIntv) 75 | ChartsInfoLogger.Printf("[Charts] Set pool miners chart interval to %v", pmIntv) 76 | 77 | go func() { 78 | for { 79 | select { 80 | case <-pmTimer.C: 81 | stats := c.Api.getStats() 82 | now := util.MakeTimestamp() / 1000 83 | if stats["totalPoolMiners"] == nil { 84 | pmTimer.Reset(pmIntv) 85 | } else { 86 | log.Printf("[Charts] Pool Miners: %v", stats["totalPoolMiners"]) 87 | cData := &ChartData{Timestamp: now, Value: stats["totalPoolMiners"].(int64)} 88 | for Graviton_backend.Writing == 1 { 89 | //log.Printf("[Charts-totalpoolminers] GravitonDB is writing... sleeping for %v...", writeWait) 90 | //StorageInfoLogger.Printf("[Charts-totalpoolminers] GravitonDB is writing... sleeping for %v...", writeWait) 91 | time.Sleep(writeWait) 92 | } 93 | Graviton_backend.Writing = 1 94 | Graviton_backend.WriteChartsData(cData, "totalpoolminers", c.PoolChartsConfig.Interval, c.PoolChartsConfig.Miners.MaximumPeriod) 95 | Graviton_backend.Writing = 0 96 | pmTimer.Reset(pmIntv) 97 | } 98 | } 99 | } 100 | }() 101 | } 102 | 103 | // Pool Workers 104 | if c.PoolChartsConfig.Workers.Enabled { 105 | pwIntv := time.Duration(c.PoolChartsConfig.Interval) * time.Second 106 | pwTimer := time.NewTimer(pwIntv) 107 | log.Printf("[Charts] Set pool workers chart interval to %v", pwIntv) 108 | ChartsInfoLogger.Printf("[Charts] Set pool workers chart interval to %v", pwIntv) 109 | 110 | go func() { 111 | for { 112 | select { 113 | case <-pwTimer.C: 114 | stats := c.Api.getStats() 115 | now := util.MakeTimestamp() / 1000 116 | if stats["totalPoolWorkers"] == nil { 117 | pwTimer.Reset(pwIntv) 118 | } else { 119 | log.Printf("[Charts] Pool Workers: %v", stats["totalPoolWorkers"]) 120 | cData := &ChartData{Timestamp: now, Value: stats["totalPoolWorkers"].(int64)} 121 | for Graviton_backend.Writing == 1 { 122 | //log.Printf("[Charts-totalpoolworkers] GravitonDB is writing... sleeping for %v...", writeWait) 123 | //StorageInfoLogger.Printf("[Charts-totalpoolworkers] GravitonDB is writing... sleeping for %v...", writeWait) 124 | time.Sleep(writeWait) 125 | } 126 | Graviton_backend.Writing = 1 127 | Graviton_backend.WriteChartsData(cData, "totalpoolworkers", c.PoolChartsConfig.Interval, c.PoolChartsConfig.Workers.MaximumPeriod) 128 | Graviton_backend.Writing = 0 129 | pwTimer.Reset(pwIntv) 130 | } 131 | } 132 | } 133 | }() 134 | } 135 | 136 | // Solo Hashrate 137 | if c.SoloChartsConfig.Hashrate.Enabled { 138 | shrIntv := time.Duration(c.SoloChartsConfig.Interval) * time.Second 139 | shrTimer := time.NewTimer(shrIntv) 140 | log.Printf("[Charts] Set solo hashrate chart interval to %v", shrIntv) 141 | ChartsInfoLogger.Printf("[Charts] Set solo hashrate chart interval to %v", shrIntv) 142 | 143 | go func() { 144 | for { 145 | select { 146 | case <-shrTimer.C: 147 | stats := c.Api.getStats() 148 | now := util.MakeTimestamp() / 1000 149 | if stats["soloHashrate"] == nil { 150 | shrTimer.Reset(shrIntv) 151 | } else { 152 | log.Printf("[Charts] Solo Hashrate: %v", stats["soloHashrate"]) 153 | cData := &ChartData{Timestamp: now, Value: stats["soloHashrate"].(int64)} 154 | for Graviton_backend.Writing == 1 { 155 | //log.Printf("[Charts-solohashrate] GravitonDB is writing... sleeping for %v...", writeWait) 156 | //StorageInfoLogger.Printf("[Charts-solohashrate] GravitonDB is writing... sleeping for %v...", writeWait) 157 | time.Sleep(writeWait) 158 | } 159 | Graviton_backend.Writing = 1 160 | Graviton_backend.WriteChartsData(cData, "solohashrate", c.SoloChartsConfig.Interval, c.SoloChartsConfig.Hashrate.MaximumPeriod) 161 | Graviton_backend.Writing = 0 162 | shrTimer.Reset(shrIntv) 163 | } 164 | } 165 | } 166 | }() 167 | } 168 | 169 | // Solo Miners 170 | if c.SoloChartsConfig.Miners.Enabled { 171 | smIntv := time.Duration(c.SoloChartsConfig.Interval) * time.Second 172 | smTimer := time.NewTimer(smIntv) 173 | log.Printf("[Charts] Set solo miners chart interval to %v", smIntv) 174 | ChartsInfoLogger.Printf("[Charts] Set solo miners chart interval to %v", smIntv) 175 | 176 | go func() { 177 | for { 178 | select { 179 | case <-smTimer.C: 180 | stats := c.Api.getStats() 181 | now := util.MakeTimestamp() / 1000 182 | if stats["totalSoloMiners"] == nil { 183 | smTimer.Reset(smIntv) 184 | } else { 185 | log.Printf("[Charts] Solo Miners: %v", stats["totalSoloMiners"]) 186 | cData := &ChartData{Timestamp: now, Value: stats["totalSoloMiners"].(int64)} 187 | for Graviton_backend.Writing == 1 { 188 | //log.Printf("[Charts-totalsolominers] GravitonDB is writing... sleeping for %v...", writeWait) 189 | //StorageInfoLogger.Printf("[Charts-totalsolominers] GravitonDB is writing... sleeping for %v...", writeWait) 190 | time.Sleep(writeWait) 191 | } 192 | Graviton_backend.Writing = 1 193 | Graviton_backend.WriteChartsData(cData, "totalsolominers", c.SoloChartsConfig.Interval, c.SoloChartsConfig.Miners.MaximumPeriod) 194 | Graviton_backend.Writing = 0 195 | smTimer.Reset(smIntv) 196 | } 197 | } 198 | } 199 | }() 200 | } 201 | 202 | // Solo Workers 203 | if c.SoloChartsConfig.Workers.Enabled { 204 | swIntv := time.Duration(c.SoloChartsConfig.Interval) * time.Second 205 | swTimer := time.NewTimer(swIntv) 206 | log.Printf("[Charts] Set pool workers chart interval to %v", swIntv) 207 | ChartsInfoLogger.Printf("[Charts] Set pool workers chart interval to %v", swIntv) 208 | 209 | go func() { 210 | for { 211 | select { 212 | case <-swTimer.C: 213 | stats := c.Api.getStats() 214 | now := util.MakeTimestamp() / 1000 215 | if stats["totalSoloWorkers"] == nil { 216 | swTimer.Reset(swIntv) 217 | } else { 218 | log.Printf("[Charts] Solo Workers: %v", stats["totalSoloWorkers"]) 219 | cData := &ChartData{Timestamp: now, Value: stats["totalSoloWorkers"].(int64)} 220 | for Graviton_backend.Writing == 1 { 221 | //log.Printf("[Charts-totalsoloworkers] GravitonDB is writing... sleeping for %v...", writeWait) 222 | //StorageInfoLogger.Printf("[Charts-totalsoloworkers] GravitonDB is writing... sleeping for %v...", writeWait) 223 | time.Sleep(writeWait) 224 | } 225 | Graviton_backend.Writing = 1 226 | Graviton_backend.WriteChartsData(cData, "totalsoloworkers", c.SoloChartsConfig.Interval, c.SoloChartsConfig.Workers.MaximumPeriod) 227 | Graviton_backend.Writing = 0 228 | swTimer.Reset(swIntv) 229 | } 230 | } 231 | } 232 | }() 233 | } 234 | 235 | // Pool Difficulty 236 | if c.PoolChartsConfig.Difficulty.Enabled { 237 | pdIntv := time.Duration(c.PoolChartsConfig.Interval) * time.Second 238 | pdTimer := time.NewTimer(pdIntv) 239 | log.Printf("[Charts] Set pool difficulty chart interval to %v", pdIntv) 240 | ChartsInfoLogger.Printf("[Charts] Set pool difficulty chart interval to %v", pdIntv) 241 | 242 | go func() { 243 | for { 244 | select { 245 | case <-pdTimer.C: 246 | // Build lastblock stats 247 | var diff int64 248 | now := util.MakeTimestamp() / 1000 249 | v := c.Api.stratum.rpc() 250 | prevBlock, getHashERR := v.GetLastBlockHeader() 251 | 252 | if getHashERR != nil { 253 | log.Printf("[API] Error while retrieving block from node: %v", getHashERR) 254 | APIErrorLogger.Printf("[API] Error while retrieving block from node: %v", getHashERR) 255 | } else { 256 | lastBlock := prevBlock.BlockHeader 257 | diff, _ = strconv.ParseInt(lastBlock.Difficulty, 10, 64) 258 | } 259 | log.Printf("[Charts] Pool Difficulty: %v", diff) 260 | cData := &ChartData{Timestamp: now, Value: diff} 261 | for Graviton_backend.Writing == 1 { 262 | //log.Printf("[Charts-pooldifficulty] GravitonDB is writing... sleeping for %v...", writeWait) 263 | //StorageInfoLogger.Printf("[Charts-pooldifficulty] GravitonDB is writing... sleeping for %v...", writeWait) 264 | time.Sleep(writeWait) 265 | } 266 | Graviton_backend.Writing = 1 267 | Graviton_backend.WriteChartsData(cData, "pooldifficulty", c.PoolChartsConfig.Interval, c.PoolChartsConfig.Difficulty.MaximumPeriod) 268 | Graviton_backend.Writing = 0 269 | pdTimer.Reset(pdIntv) 270 | } 271 | } 272 | }() 273 | } 274 | } 275 | 276 | func logFileOutCharts(lType string) *log.Logger { 277 | var logFileName string 278 | if lType == "ERROR" { 279 | logFileName = "logs/chartsError.log" 280 | } else { 281 | logFileName = "logs/charts.log" 282 | } 283 | os.Mkdir("logs", 0705) 284 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 285 | if err != nil { 286 | panic(err) 287 | } 288 | 289 | logType := lType + ": " 290 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 291 | return l 292 | } 293 | -------------------------------------------------------------------------------- /stratum/handlers.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "encoding/hex" 5 | "log" 6 | "os" 7 | "regexp" 8 | "strconv" 9 | "strings" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/Nelbert442/dero-golang-pool/util" 14 | ) 15 | 16 | const ( 17 | paramAddr = iota 18 | paramWID = iota 19 | paramPID = iota 20 | paramDiff = iota 21 | paramDonPerc = iota 22 | ) 23 | 24 | var noncePattern *regexp.Regexp 25 | var HandlersInfoLogger = logFileOutHandlers("INFO") 26 | var HandlersErrorLogger = logFileOutHandlers("ERROR") 27 | 28 | func init() { 29 | noncePattern, _ = regexp.Compile("^[0-9a-f]{8}$") 30 | } 31 | 32 | func (s *StratumServer) handleLoginRPC(cs *Session, params *LoginParams) (*JobReply, *ErrorReply) { 33 | 34 | var id string 35 | // Login validation / splitting optimized by Peppinux (https://github.com/peppinux) 36 | address, workID, paymentid, fixDiff, donatePerc, isSolo := s.splitLoginString(params.Login) 37 | 38 | // Initially set cs.difficulty. If there's no fixDiff defined, inside of cs.getJob the diff target will be set to cs.endpoint.difficulty, 39 | // otherwise will be set to fixDiff (as long as it's above min diff in config) 40 | if fixDiff != 0 { 41 | // If fixDiff is lower than mindiff, set equal to mindiff 42 | if fixDiff < uint64(cs.endpoint.config.MinDiff) { 43 | fixDiff = uint64(cs.endpoint.config.MinDiff) 44 | } 45 | cs.difficulty = int64(fixDiff) 46 | cs.isFixedDiff = true 47 | } else { 48 | cs.difficulty = cs.endpoint.config.Difficulty 49 | cs.isFixedDiff = false 50 | } 51 | 52 | // Take care of less than 0 or greater than 100 vals for donate percentages 53 | if donatePerc < 0 { 54 | donatePerc = 0 55 | } else if donatePerc > 100 { 56 | donatePerc = 100 57 | } 58 | 59 | // PaymentID Length Validation 60 | if paymentid != "" { 61 | if len(paymentid) == 16 || len(paymentid) == 64 { 62 | _, err := hex.DecodeString(paymentid) 63 | 64 | if err != nil { 65 | log.Printf("[Handlers] Invalid paymentID %s used for login by %s - %s", paymentid, cs.ip, params.Login) 66 | HandlersErrorLogger.Printf("[Handlers] Invalid paymentID %s used for login by %s - %s", paymentid, cs.ip, params.Login) 67 | return nil, &ErrorReply{Code: -1, Message: "Invalid paymentID used for login"} 68 | } 69 | } else { 70 | log.Printf("[Handlers] Invalid paymentID %s used for login by %s - %s", paymentid, cs.ip, params.Login) 71 | HandlersErrorLogger.Printf("[Handlers] Invalid paymentID %s used for login by %s - %s", paymentid, cs.ip, params.Login) 72 | return nil, &ErrorReply{Code: -1, Message: "Invalid paymentID used for login"} 73 | } 74 | 75 | // Adding paymentid onto the worker id because later when payments are processed, it's easily identifiable what is the paymentid to supply for creating tx etc. 76 | id = address + "+" + paymentid 77 | } 78 | 79 | // If solo is used, then add solo: to front of id for logging 80 | if isSolo && s.config.Stratum.SoloMining.Enabled { 81 | if id != "" { 82 | // If id is not "" (default value upon var), then it must have a paymentid 83 | id = "solo" + s.config.Stratum.SoloMining.AddressSeparator + id 84 | } else { 85 | id = "solo" + s.config.Stratum.SoloMining.AddressSeparator + address 86 | } 87 | } 88 | 89 | // If workID is used, then append with work separator, this will be easily deciphered later for payments. In future, could store id and values separately so that address payout is clearer 90 | if workID != address && workID != "" { 91 | if id != "" { 92 | // If id is not "" (default value upon var), then it must have a paymentid or is solo and has been set. So append workID to it 93 | id = id + s.config.Stratum.WorkerID.AddressSeparator + workID 94 | } else { 95 | // If id is "" (default value upon var), then it does not have paymentid and append workID to address normally 96 | id = address + s.config.Stratum.WorkerID.AddressSeparator + workID 97 | } 98 | } else { 99 | if id == "" { 100 | // If id is "" (default value upon var), then it does not have a paymentid and in this else doesn't have workid, so set id to address for default. Otherwise, id has already been set 101 | id = address 102 | } 103 | } 104 | 105 | switch s.config.Coin { 106 | case "DERO": 107 | if !util.ValidateAddress(address, s.config.Address) { 108 | log.Printf("[Handlers] Invalid address %s used for login by %s", address, cs.ip) 109 | HandlersErrorLogger.Printf("[Handlers] Invalid address %s used for login by %s", address, cs.ip) 110 | return nil, &ErrorReply{Code: -1, Message: "Invalid address used for login"} 111 | } 112 | default: 113 | if !util.ValidateAddressNonDERO(address, s.config.Address) { 114 | log.Printf("[Handlers] Invalid address %s used for login by %s", address, cs.ip) 115 | HandlersErrorLogger.Printf("[Handlers] Invalid address %s used for login by %s", address, cs.ip) 116 | return nil, &ErrorReply{Code: -1, Message: "Invalid address used for login"} 117 | } 118 | } 119 | 120 | t := s.currentBlockTemplate() 121 | if t == nil { 122 | return nil, &ErrorReply{Code: -1, Message: "Job not ready"} 123 | } 124 | 125 | miner, ok := s.miners.Get(id) 126 | if !ok { 127 | log.Printf("[Handlers] Registering new miner: %s@%s, Address: %s, PaymentID: %s, fixedDiff: %v, donatePercent: %v, isSolo: %v", id, cs.ip, address, paymentid, fixDiff, donatePerc, isSolo) 128 | HandlersInfoLogger.Printf("[Handlers] Registering new miner: %s@%s, Address: %s, PaymentID: %s, fixedDiff: %v, donatePercent: %v, isSolo: %v", id, cs.ip, address, paymentid, fixDiff, donatePerc, isSolo) 129 | miner = NewMiner(id, address, paymentid, fixDiff, workID, donatePerc, isSolo, cs.ip) 130 | s.registerMiner(miner) 131 | 132 | writeWait, _ := time.ParseDuration("10ms") 133 | for Graviton_backend.Writing == 1 { 134 | //log.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 135 | //StorageInfoLogger.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 136 | time.Sleep(writeWait) 137 | } 138 | Graviton_backend.Writing = 1 139 | Graviton_backend.WriteMinerIDRegistration(miner) 140 | Graviton_backend.Writing = 0 141 | } else { 142 | now := util.MakeTimestamp() / 1000 143 | miner.StartedAt = now 144 | miner.DonatePercent = donatePerc 145 | miner.PaymentID = paymentid 146 | miner.FixedDiff = fixDiff 147 | miner.IsSolo = isSolo 148 | miner.WorkID = workID 149 | } 150 | 151 | log.Printf("[Handlers] Miner connected %s@%s, Address: %s, PaymentID: %s, fixedDiff: %v, donatePercent: %v, isSolo: %v", id, cs.ip, address, paymentid, fixDiff, donatePerc, isSolo) 152 | HandlersInfoLogger.Printf("[Handlers] Miner connected %s@%s, Address: %s, PaymentID: %s, fixedDiff: %v, donatePercent: %v, isSolo: %v", id, cs.ip, address, paymentid, fixDiff, donatePerc, isSolo) 153 | 154 | s.registerSession(cs) 155 | miner.heartbeat() 156 | 157 | //log.Printf("[handleGetJobRPC] getJob: %v", cs.getJob(t)) 158 | job := cs.getJob(t, s, 0) 159 | return &JobReply{Id: id, Job: job, Status: "OK"}, nil 160 | } 161 | 162 | func (s *StratumServer) handleGetJobRPC(cs *Session, params *GetJobParams) (*JobReplyData, *ErrorReply) { 163 | miner, ok := s.miners.Get(params.Id) 164 | if !ok { 165 | return nil, &ErrorReply{Code: -1, Message: "Unauthenticated"} 166 | } 167 | t := s.currentBlockTemplate() 168 | if t == nil || s.isSick() { 169 | return nil, &ErrorReply{Code: -1, Message: "Job not ready"} 170 | } 171 | miner.heartbeat() 172 | 173 | reply := cs.getJob(t, s, 0) 174 | return reply, nil 175 | } 176 | 177 | func (s *StratumServer) handleSubmitRPC(cs *Session, params *SubmitParams) (*StatusReply, *ErrorReply) { 178 | miner, ok := s.miners.Get(params.Id) 179 | if !ok { 180 | return nil, &ErrorReply{Code: -1, Message: "Unauthenticated"} 181 | } 182 | miner.heartbeat() 183 | 184 | // Upon job submissions, miner(s) will get error message saying to contact pool owner when stratum .isSick() 185 | if s.isSick() { 186 | return nil, &ErrorReply{Code: -1, Message: "Server error. Contact pool owner."} 187 | } 188 | 189 | job := cs.findJob(params.JobId) 190 | if job == nil { 191 | return nil, &ErrorReply{Code: -1, Message: "Invalid job id"} 192 | } 193 | 194 | if !noncePattern.MatchString(params.Nonce) { 195 | return nil, &ErrorReply{Code: -1, Message: "Malformed nonce"} 196 | } 197 | nonce := strings.ToLower(params.Nonce) 198 | exist := job.submit(nonce) 199 | if exist { 200 | atomic.AddInt64(&miner.InvalidShares, 1) 201 | return nil, &ErrorReply{Code: -1, Message: "Duplicate share"} 202 | } 203 | 204 | t := s.currentBlockTemplate() 205 | if job.height != t.Height { 206 | log.Printf("[Handlers] Stale share for height %d from %s@%s", job.height, miner.Id, cs.ip) 207 | HandlersErrorLogger.Printf("[Handlers] Stale share for height %d from %s@%s", job.height, miner.Id, cs.ip) 208 | atomic.AddInt64(&miner.StaleShares, 1) 209 | return nil, &ErrorReply{Code: -1, Message: "Block expired"} 210 | } 211 | 212 | validShare, minerOutput := miner.processShare(s, cs, job, t, nonce, params) 213 | if !validShare { 214 | return nil, &ErrorReply{Code: -1, Message: minerOutput} 215 | } 216 | return &StatusReply{Status: "OK", Message: minerOutput}, nil 217 | } 218 | 219 | func (s *StratumServer) handleUnknownRPC(req *JSONRpcReq) *ErrorReply { 220 | log.Printf("[Handlers] Unknown RPC method: %v", req) 221 | HandlersErrorLogger.Printf("[Handlers] Unknown RPC method: %v", req) 222 | return &ErrorReply{Code: -1, Message: "Invalid method"} 223 | } 224 | 225 | func (s *StratumServer) broadcastNewJobs() { 226 | t := s.currentBlockTemplate() 227 | if t == nil || s.isSick() { 228 | return 229 | } 230 | s.sessionsMu.RLock() 231 | defer s.sessionsMu.RUnlock() 232 | count := len(s.sessions) 233 | log.Printf("[Handlers] Broadcasting new jobs to %d miners", count) 234 | HandlersInfoLogger.Printf("[Handlers] Broadcasting new jobs to %d miners", count) 235 | bcast := make(chan int, 1024*16) 236 | n := 0 237 | 238 | for m := range s.sessions { 239 | n++ 240 | bcast <- n 241 | go func(cs *Session) { 242 | reply := cs.getJob(t, s, 0) 243 | err := cs.pushMessage("job", &reply) 244 | 245 | <-bcast 246 | if err != nil { 247 | log.Printf("[Handlers] Job transmit error to %s: %v", cs.ip, err) 248 | HandlersErrorLogger.Printf("[Handlers] Job transmit error to %s: %v", cs.ip, err) 249 | s.removeSession(cs) 250 | } else { 251 | s.setDeadline(cs.conn) 252 | } 253 | }(m) 254 | } 255 | } 256 | 257 | func (s *StratumServer) updateFixedDiffJobs() { 258 | t := s.currentBlockTemplate() 259 | if t == nil || s.isSick() { 260 | return 261 | } 262 | s.sessionsMu.RLock() 263 | defer s.sessionsMu.RUnlock() 264 | bcast := make(chan int, 1024*16) 265 | n := 0 266 | 267 | for m := range s.sessions { 268 | n++ 269 | bcast <- n 270 | go func(cs *Session) { 271 | // If fixed diff, ignore cycling update miner jobs 272 | if !cs.isFixedDiff { 273 | preJob := cs.difficulty 274 | newDiff := cs.calcVarDiff(float64(preJob), s) 275 | // If job diffs aren't the same, advertise new job 276 | if preJob != newDiff { 277 | reply := cs.getJob(t, s, newDiff) 278 | log.Printf("[Handlers] Retargetting difficulty from %v to %v for %v", preJob, newDiff, cs.ip) 279 | HandlersInfoLogger.Printf("[Handlers] Retargetting difficulty from %v to %v for %v", preJob, newDiff, cs.ip) 280 | cs.difficulty = newDiff 281 | err := cs.pushMessage("job", &reply) 282 | <-bcast 283 | if err != nil { 284 | log.Printf("[Handlers] Job transmit error to %s: %v", cs.ip, err) 285 | HandlersErrorLogger.Printf("[Handlers] Job transmit error to %s: %v", cs.ip, err) 286 | s.removeSession(cs) 287 | } else { 288 | s.setDeadline(cs.conn) 289 | } 290 | } 291 | } 292 | }(m) 293 | } 294 | } 295 | 296 | func (s *StratumServer) refreshBlockTemplate(bcast bool) { 297 | newBlock := s.fetchBlockTemplate() 298 | if newBlock && bcast { 299 | s.broadcastNewJobs() 300 | } 301 | } 302 | 303 | // Optimized splitting functions with runes from @Peppinux (https://github.com/peppinux) 304 | func (s *StratumServer) splitLoginString(loginWorkerPair string) (addr, wid, pid string, diff uint64, donperc int64, isSolo bool) { 305 | currParam := paramAddr // String always starts with ADDRESS 306 | currSubstr := "" // Substring starts empty 307 | 308 | // Check for solo: 309 | soloPair := "solo" + s.config.Stratum.SoloMining.AddressSeparator 310 | if strings.Index(loginWorkerPair, soloPair) != -1 { 311 | isSolo = true 312 | loginWorkerPair = loginWorkerPair[5:len(loginWorkerPair)] // shave off 5 since solo: is 5 chars, but isSolo will return true to be used to append solo: afterwards [retains addr result properly] 313 | log.Printf("%s", loginWorkerPair) 314 | } else { 315 | isSolo = false 316 | } 317 | 318 | // Since input vals from json are string, need to convert to a rune array, then references just use [0] slice since these are just '@', '+', '.' in config.json 319 | widAddrSep := []rune(s.config.Stratum.WorkerID.AddressSeparator) 320 | pidAddrSep := []rune(s.config.Stratum.PaymentID.AddressSeparator) 321 | fDiffAddrSep := []rune(s.config.Stratum.FixedDiff.AddressSeparator) 322 | donPercAddrSep := []rune(s.config.Stratum.DonatePercent.AddressSeparator) 323 | 324 | lastPos := len(loginWorkerPair) - 1 325 | for pos, c := range loginWorkerPair { 326 | if c != widAddrSep[0] && c != pidAddrSep[0] && c != fDiffAddrSep[0] && c != donPercAddrSep[0] && pos != lastPos { 327 | currSubstr += string(c) 328 | } else { 329 | if pos == lastPos { 330 | currSubstr += string(c) 331 | } 332 | 333 | // Finalize substring 334 | switch currParam { 335 | case paramAddr: 336 | addr = currSubstr 337 | case paramWID: 338 | wid = currSubstr 339 | case paramPID: 340 | pid = currSubstr 341 | case paramDonPerc: 342 | donperc, _ = strconv.ParseInt(currSubstr, 10, 64) 343 | case paramDiff: 344 | diff, _ = strconv.ParseUint(currSubstr, 10, 64) 345 | } 346 | 347 | // Reset substring and find out next param type 348 | currSubstr = "" 349 | switch c { 350 | case widAddrSep[0]: 351 | currParam = paramWID 352 | case pidAddrSep[0]: 353 | currParam = paramPID 354 | case fDiffAddrSep[0]: 355 | currParam = paramDiff 356 | case donPercAddrSep[0]: 357 | currParam = paramDonPerc 358 | } 359 | } 360 | } 361 | return 362 | } 363 | 364 | func logFileOutHandlers(lType string) *log.Logger { 365 | var logFileName string 366 | if lType == "ERROR" { 367 | logFileName = "logs/handlersError.log" 368 | } else { 369 | logFileName = "logs/handlers.log" 370 | } 371 | os.Mkdir("logs", 0705) 372 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 373 | if err != nil { 374 | panic(err) 375 | } 376 | 377 | logType := lType + ": " 378 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 379 | return l 380 | } 381 | -------------------------------------------------------------------------------- /stratum/miner.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "encoding/json" 8 | "fmt" 9 | "log" 10 | "math/big" 11 | "os" 12 | "sort" 13 | "strconv" 14 | "sync" 15 | "sync/atomic" 16 | "time" 17 | 18 | "github.com/Nelbert442/dero-golang-pool/rpc" 19 | "github.com/Nelbert442/dero-golang-pool/util" 20 | ) 21 | 22 | type Job struct { 23 | height uint64 24 | sync.RWMutex 25 | id string 26 | extraNonce uint32 27 | submissions map[string]struct{} 28 | } 29 | 30 | type Miner struct { 31 | LastBeat int64 32 | StartedAt int64 33 | EventDataOffset int64 34 | //EventDataTempTime int64 35 | ValidShares int64 36 | InvalidShares int64 37 | StaleShares int64 38 | TrustedShares int64 39 | Accepts int64 40 | Rejects int64 41 | Shares map[int64]int64 42 | LastRoundShares int64 43 | RoundShares int64 44 | RoundHeight int64 45 | Hashrate int64 46 | Offline bool 47 | sync.RWMutex 48 | Id string 49 | Address string 50 | PaymentID string 51 | FixedDiff uint64 52 | IsSolo bool 53 | WorkID string 54 | Ip string 55 | DonatePercent int64 56 | DonationTotal int64 57 | } 58 | 59 | var MinerInfoLogger = logFileOutMiner("INFO") 60 | var MinerErrorLogger = logFileOutMiner("ERROR") 61 | 62 | func (job *Job) submit(nonce string) bool { 63 | job.Lock() 64 | defer job.Unlock() 65 | if _, exist := job.submissions[nonce]; exist { 66 | return true 67 | } 68 | job.submissions[nonce] = struct{}{} 69 | return false 70 | } 71 | 72 | func NewMiner(id string, address string, paymentid string, fixedDiff uint64, workID string, donationPercent int64, isSolo bool, ip string) *Miner { 73 | shares := make(map[int64]int64) 74 | now := util.MakeTimestamp() / 1000 75 | return &Miner{Id: id, Address: address, PaymentID: paymentid, FixedDiff: fixedDiff, IsSolo: isSolo, WorkID: workID, DonatePercent: donationPercent, Ip: ip, Shares: shares, StartedAt: now} 76 | } 77 | 78 | func (cs *Session) calcVarDiff(currDiff float64, s *StratumServer) int64 { 79 | var newDiff float64 80 | timestamp := time.Now().Unix() 81 | 82 | variance := s.config.Stratum.VarDiff.VariancePercent / 100 * float64(s.config.Stratum.VarDiff.TargetTime) 83 | tMin := float64(s.config.Stratum.VarDiff.TargetTime) - variance 84 | tMax := float64(s.config.Stratum.VarDiff.TargetTime) + variance 85 | 86 | // Set last time varDiff config was handled, usually done initially and builds the map for timestamparr 87 | if cs.VarDiff.LastRetargetTimestamp == 0 { 88 | cs.VarDiff.LastRetargetTimestamp = timestamp - s.config.Stratum.VarDiff.RetargetTime/2 89 | cs.VarDiff.LastTimeStamp = timestamp 90 | cs.VarDiff.TimestampArr = make(map[int64]int64) 91 | 92 | return int64(currDiff) 93 | } 94 | 95 | if (timestamp - cs.VarDiff.LastRetargetTimestamp) < s.config.Stratum.VarDiff.RetargetTime { 96 | return int64(currDiff) 97 | } 98 | 99 | if len(cs.VarDiff.TimestampArr) <= 0 { 100 | sinceLast := timestamp - cs.VarDiff.LastTimeStamp 101 | cs.VarDiff.TimestampArr[sinceLast] += sinceLast 102 | } 103 | cs.VarDiff.LastRetargetTimestamp = timestamp 104 | 105 | var avg float64 106 | var sum int64 107 | for _, v := range cs.VarDiff.TimestampArr { 108 | sum = sum + v 109 | } 110 | 111 | avg = float64(sum) / float64(len(cs.VarDiff.TimestampArr)) 112 | 113 | diffCalc := float64(s.config.Stratum.VarDiff.TargetTime) / avg 114 | 115 | if avg > tMax && currDiff >= float64(s.config.Stratum.VarDiff.MinDiff) { 116 | if diffCalc*currDiff < float64(s.config.Stratum.VarDiff.MinDiff) { 117 | diffCalc = float64(s.config.Stratum.VarDiff.MinDiff) / currDiff 118 | } 119 | } else if avg < tMin { 120 | diffMax := float64(s.config.Stratum.VarDiff.MaxDiff) 121 | 122 | if diffCalc*currDiff > diffMax { 123 | diffCalc = diffMax / currDiff 124 | } 125 | } else { 126 | return int64(currDiff) 127 | } 128 | 129 | newDiff = currDiff * diffCalc 130 | 131 | if newDiff <= 0 { 132 | newDiff = currDiff 133 | } 134 | 135 | maxJump := s.config.Stratum.VarDiff.MaxJump / 100 * currDiff 136 | 137 | // Prevent diff scale up/down to be more than maxJump %. 138 | if newDiff > currDiff && !(newDiff-maxJump <= currDiff) { 139 | newDiff = currDiff + maxJump 140 | } else if currDiff > newDiff && !(newDiff+(maxJump) >= currDiff) { 141 | newDiff = currDiff - (maxJump) 142 | } 143 | 144 | // Reset timestampArr 145 | cs.VarDiff.TimestampArr = make(map[int64]int64) 146 | 147 | return int64(newDiff) 148 | } 149 | 150 | func (cs *Session) getJob(t *BlockTemplate, s *StratumServer, diff int64) *JobReplyData { 151 | if diff == 0 { 152 | diff = cs.difficulty 153 | } 154 | 155 | lastBlockHeight := cs.lastBlockHeight 156 | if lastBlockHeight == t.Height { 157 | return &JobReplyData{} 158 | } 159 | 160 | // Define difficulty and set targetHex = util.GetTargetHex(cs.difficulty) else targetHex == cs.endpoint.targetHex 161 | var targetHex string 162 | 163 | if diff != 0 && cs.isFixedDiff { // If fixed difficulty is defined 164 | if diff >= cs.endpoint.config.MinDiff { 165 | targetHex = util.GetTargetHex(diff) 166 | } else { 167 | targetHex = util.GetTargetHex(cs.endpoint.config.MinDiff) 168 | } 169 | } else { // If vardiff is enabled, otherwise use the default value of the session 170 | if s.config.Stratum.VarDiff.Enabled { 171 | targetHex = util.GetTargetHex(diff) 172 | } else { // If not fixed diff and vardiff is not enabled, use default config difficulty and targetHex 173 | targetHex = cs.endpoint.targetHex 174 | } 175 | } 176 | 177 | extraNonce := atomic.AddUint32(&cs.endpoint.extraNonce, 1) 178 | blob := t.nextBlob(extraNonce, cs.endpoint.instanceId) 179 | id := atomic.AddUint64(&cs.endpoint.jobSequence, 1) 180 | job := &Job{ 181 | id: strconv.FormatUint(id, 10), 182 | extraNonce: extraNonce, 183 | height: t.Height, 184 | } 185 | job.submissions = make(map[string]struct{}) 186 | cs.pushJob(job) 187 | reply := &JobReplyData{JobId: job.id, Blob: blob, Target: targetHex, Algo: s.config.Algo, Height: t.Height} 188 | return reply 189 | } 190 | 191 | func (cs *Session) pushJob(job *Job) { 192 | cs.Lock() 193 | defer cs.Unlock() 194 | cs.validJobs = append(cs.validJobs, job) 195 | 196 | if len(cs.validJobs) > 4 { 197 | cs.validJobs = cs.validJobs[1:] 198 | } 199 | } 200 | 201 | func (cs *Session) findJob(id string) *Job { 202 | cs.Lock() 203 | defer cs.Unlock() 204 | for _, job := range cs.validJobs { 205 | if job.id == id { 206 | return job 207 | } 208 | } 209 | return nil 210 | } 211 | 212 | func (m *Miner) heartbeat() { 213 | now := util.MakeTimestamp() / 1000 214 | atomic.StoreInt64(&m.LastBeat, now) 215 | } 216 | 217 | func (m *Miner) storeShare(diff, minershares, templateHeight int64, hashrateExpiration time.Duration) { 218 | now := util.MakeTimestamp() / 1000 219 | hashExpiration := int64(hashrateExpiration / time.Second) 220 | 221 | if m.IsSolo { 222 | // If miner is solo, we don't care about updating roundheight/roundshares etc. These vals aren't used as upon a solo block being found, the address who finds get all rewards 223 | // Just normal tracking of shares for hashrate purposes 224 | m.Lock() 225 | m.Shares[now] += diff 226 | 227 | for k := range m.Shares { 228 | if k < now-hashExpiration { 229 | delete(m.Shares, k) 230 | } 231 | } 232 | 233 | m.Unlock() 234 | } else { 235 | 236 | blockHeightArr := Graviton_backend.GetBlocksFoundByHeightArr() 237 | 238 | if blockHeightArr != nil { 239 | // Create slice of heights that do not include solo blocks. This will be used to compare the last block found against miner heights below 240 | var heights []int64 241 | for height, isSolo := range blockHeightArr.Heights { 242 | if !isSolo { 243 | heights = append(heights, height) 244 | } 245 | } 246 | // Sort heights so most recent is index 0 [if preferred reverse, just swap > with <] 247 | sort.SliceStable(heights, func(i, j int) bool { 248 | return heights[i] > heights[j] 249 | }) 250 | } 251 | 252 | m.Lock() 253 | // No need to add blank diff shares to m.Shares. Usually only 0 if running NextRound from storage.go 254 | if diff != 0 { 255 | m.Shares[now] += diff 256 | 257 | for k := range m.Shares { 258 | if k < now-hashExpiration { 259 | delete(m.Shares, k) 260 | } 261 | } 262 | } 263 | m.Unlock() 264 | } 265 | } 266 | 267 | func (m *Miner) getHashrate(estimationWindow, hashrateExpiration time.Duration) int64 { 268 | now := util.MakeTimestamp() / 1000 269 | totalShares := int64(0) 270 | // Convert time window (such as 10m) to seconds 271 | window := int64(estimationWindow / time.Second) 272 | boundary := now - m.StartedAt 273 | 274 | if boundary >= window { 275 | boundary = window 276 | } 277 | 278 | m.Lock() 279 | 280 | for k, v := range m.Shares { 281 | if k >= now-boundary { 282 | totalShares += v 283 | } 284 | } 285 | 286 | m.Unlock() 287 | 288 | return int64(float64(totalShares) / float64(boundary)) 289 | } 290 | 291 | func (m *Miner) processShare(s *StratumServer, cs *Session, job *Job, t *BlockTemplate, nonce string, params *SubmitParams) (bool, string) { 292 | 293 | // Var definitions 294 | var extraMinerMessage string 295 | var checkPowHashBig bool 296 | var success bool 297 | var bypassShareValidation bool 298 | var result string = params.Result 299 | var shareType string 300 | var hashBytes []byte 301 | var diff big.Int 302 | var donation float64 303 | diff.SetUint64(t.Difficulty) 304 | var setDiff big.Int 305 | setDiff.SetUint64(uint64(cs.difficulty)) 306 | r := s.rpc() 307 | 308 | shareBuff := make([]byte, len(t.Buffer)) 309 | copy(shareBuff, t.Buffer) 310 | copy(shareBuff[t.Reserved_Offset+4:t.Reserved_Offset+7], cs.endpoint.instanceId) 311 | 312 | extraBuff := new(bytes.Buffer) 313 | binary.Write(extraBuff, binary.BigEndian, job.extraNonce) 314 | copy(shareBuff[t.Reserved_Offset:], extraBuff.Bytes()) 315 | 316 | nonceBuff, _ := hex.DecodeString(nonce) 317 | copy(shareBuff[39:], nonceBuff) 318 | 319 | // After trustedSharesCount is hit (number of accepted shares in a row based on config.json), hash validation will be skipped until an incorrect hash is submitted 320 | if atomic.LoadInt64(&m.TrustedShares) >= s.trustedSharesCount { 321 | shareType = "Trusted" 322 | } else { 323 | shareType = "Valid" 324 | } 325 | 326 | // Append share type, solo or pool for logging assistance 327 | if m.IsSolo { 328 | shareType = shareType + " SOLO" 329 | } else { 330 | shareType = shareType + " POOL" 331 | } 332 | 333 | hashBytes, _ = hex.DecodeString(result) 334 | 335 | if s.config.BypassShareValidation || shareType == "Trusted SOLO" || shareType == "Trusted POOL" { 336 | bypassShareValidation = true 337 | } else { 338 | switch s.algo { 339 | case "astrobwt": 340 | checkPowHashBig, success = util.AstroBWTHash(shareBuff[:], diff, setDiff) 341 | 342 | if !success { 343 | minerOutput := "Bad hash. If you see often [> 1/10 shares on avg], check input on miner software." 344 | log.Printf("[Miner] Bad hash, check input on miner software, from miner %v@%v", m.Id, cs.ip) 345 | MinerErrorLogger.Printf("[Miner] Bad hash, check input on miner software, from miner %v@%v", m.Id, cs.ip) 346 | 347 | if shareType == "Trusted" { 348 | log.Printf("[Miner] Miner is no longer submitting trusted shares: %v@%v", m.Id, cs.ip) 349 | MinerErrorLogger.Printf("[Miner] Miner is no longer submitting trusted shares: %v@%v", m.Id, cs.ip) 350 | shareType = "Valid" 351 | } 352 | 353 | atomic.AddInt64(&m.InvalidShares, 1) 354 | atomic.StoreInt64(&m.TrustedShares, 0) 355 | return false, minerOutput 356 | } 357 | 358 | atomic.AddInt64(&m.TrustedShares, 1) 359 | case "cryptonight": 360 | checkPowHashBig = util.CryptonightHash(shareBuff, diff) 361 | 362 | atomic.AddInt64(&m.TrustedShares, 1) 363 | default: 364 | // Handle when no algo is defined or unhandled algo is defined, let miner know issues (properly gets sent back in job detail rejection message) 365 | minerOutput := "Rejected share, no pool algo defined. Contact pool owner." 366 | log.Printf("[Miner] Rejected share, no pool algo defined (%s). Contact pool owner - from %v@%v", s.algo, m.Id, cs.ip) 367 | MinerErrorLogger.Printf("[Miner] Rejected share, no pool algo defined (%s). Contact pool owner - from %v@%v", s.algo, m.Id, cs.ip) 368 | return false, minerOutput 369 | } 370 | } 371 | 372 | hashDiff, ok := util.GetHashDifficulty(hashBytes) 373 | if !ok { 374 | minerOutput := "Bad hash" 375 | log.Printf("[Miner] Bad hash from miner %v@%v . Could not get hash difficulty.", m.Id, cs.ip) 376 | MinerErrorLogger.Printf("[Miner] Bad hash from miner %v@%v . Could not get hash difficulty.", m.Id, cs.ip) 377 | atomic.AddInt64(&m.InvalidShares, 1) 378 | return false, minerOutput 379 | } 380 | 381 | // May be redundant, or use instead of CheckPowHashBig in future. 382 | block := hashDiff.Cmp(&diff) >= 0 383 | 384 | // If bypassing share validation (either with true/false of config or miner is trusted), block should define properly if a block is found and can set checkPowHashBig to true. Perhaps future improvements to be made here 385 | if block && bypassShareValidation { 386 | checkPowHashBig = true 387 | } 388 | 389 | if checkPowHashBig && block { 390 | blockSubmit, err := r.SubmitBlock(t.Blocktemplate_blob, hex.EncodeToString(shareBuff)) 391 | var blockSubmitReply *rpc.SubmitBlock_Result = &rpc.SubmitBlock_Result{} 392 | 393 | if blockSubmit != nil { 394 | if blockSubmit.Result != nil { 395 | err = json.Unmarshal(*blockSubmit.Result, &blockSubmitReply) 396 | } 397 | } 398 | 399 | if err != nil || blockSubmitReply.Status != "OK" { 400 | atomic.AddInt64(&m.Rejects, 1) 401 | atomic.AddInt64(&r.Rejects, 1) 402 | log.Printf("[BLOCK] Block rejected at height %d: %v", t.Height, err) 403 | MinerErrorLogger.Printf("[BLOCK] Block rejected at height %d: %v", t.Height, err) 404 | return false, "Bad hash" 405 | } else { 406 | log.Printf("[BLOCK] Block accepted. Hash: %s, Status: %s", blockSubmitReply.BLID, blockSubmitReply.Status) 407 | MinerInfoLogger.Printf("[BLOCK] Block accepted. Hash: %s, Status: %s", blockSubmitReply.BLID, blockSubmitReply.Status) 408 | 409 | now := util.MakeTimestamp() / 1000 410 | 411 | atomic.AddInt64(&m.Accepts, 1) 412 | atomic.AddInt64(&r.Accepts, 1) 413 | atomic.StoreInt64(&r.LastSubmissionAt, now) 414 | 415 | if m.IsSolo { 416 | log.Printf("[BLOCK] SOLO Block found at height %d, diff: %v, blid: %s, by miner: %v@%v", t.Height, t.Difficulty, blockSubmitReply.BLID, m.Id, cs.ip) 417 | MinerInfoLogger.Printf("[BLOCK] SOLO Block found at height %d, diff: %v, blid: %s, by miner: %v@%v", t.Height, t.Difficulty, blockSubmitReply.BLID, m.Id, cs.ip) 418 | } else { 419 | log.Printf("[BLOCK] POOL Block found at height %d, diff: %v, blid: %s, by miner: %v@%v", t.Height, t.Difficulty, blockSubmitReply.BLID, m.Id, cs.ip) 420 | MinerInfoLogger.Printf("[BLOCK] POOL Block found at height %d, diff: %v, blid: %s, by miner: %v@%v", t.Height, t.Difficulty, blockSubmitReply.BLID, m.Id, cs.ip) 421 | } 422 | 423 | // Graviton store of successful block 424 | // This could be 'cleaned' to one-liners etc., but just depends on how you feel. Upon build/testing was simpler to view in-line for spec values 425 | ts := util.MakeTimestamp() / 1000 426 | info := &BlockDataGrav{} 427 | info.Height = int64(t.Height) 428 | info.RoundHeight = int64(t.Height) 429 | info.Hash = blockSubmitReply.BLID 430 | info.Nonce = params.Nonce 431 | info.PowHash = result 432 | info.Timestamp = ts 433 | info.Difficulty = int64(t.Difficulty) 434 | // TotalShares val will be gotten from DB 435 | info.TotalShares = 0 436 | info.Solo = m.IsSolo 437 | info.Address = m.Address 438 | info.BlockState = "candidate" 439 | 440 | if m.DonatePercent > 0 && m.Address != s.donateID { 441 | donation = float64(m.DonatePercent) / 100 * float64(cs.difficulty) 442 | atomic.AddInt64(&m.DonationTotal, int64(donation)) 443 | 444 | donateMiner, ok := s.miners.Get(s.donateID) 445 | if !ok { 446 | log.Printf("[Miner] Miner %v@%v intended to donate %v shares, however donation miner is not setup.", params.Id, cs.ip, int64(donation)) 447 | MinerErrorLogger.Printf("[Miner] Miner %v@%v intended to donate %v shares, however donation miner is not setup.", params.Id, cs.ip, int64(donation)) 448 | } else { 449 | log.Printf("[Miner] Miner %v@%v donated %v shares.", params.Id, cs.ip, int64(donation)) 450 | MinerInfoLogger.Printf("[Miner] Miner %v@%v donated %v shares.", params.Id, cs.ip, int64(donation)) 451 | donateMiner.storeShare(cs.difficulty, int64(donation), int64(t.Height), s.hashrateExpiration) 452 | } 453 | } 454 | 455 | m.Lock() 456 | // No need to add blank diff shares to m.Shares. Usually only 0 if running NextRound from storage.go 457 | if cs.difficulty != 0 { 458 | m.Shares[now] += cs.difficulty 459 | } 460 | m.Unlock() 461 | 462 | // Only update next round miner stats if a pool block is found, so can determine this by the miner who found the block's solo status 463 | if !m.IsSolo { 464 | log.Printf("[Miner] Updating miner stats in DB for current round...") 465 | MinerInfoLogger.Printf("[Miner] Updating miner stats in DB for current round...") 466 | 467 | writeWait, _ := time.ParseDuration("10ms") 468 | for Graviton_backend.Writing == 1 { 469 | //log.Printf("[Miner-processShare] GravitonDB is writing... sleeping for %v...", writeWait) 470 | //StorageInfoLogger.Printf("[Miner-processShare] GravitonDB is writing... sleeping for %v...", writeWait) 471 | time.Sleep(writeWait) 472 | } 473 | Graviton_backend.Writing = 1 474 | _ = Graviton_backend.WriteMinerStats(s.miners, s.hashrateExpiration) 475 | 476 | //Graviton_backend.Writing = 1 477 | log.Printf("[Miner] Adding block to graviton...") 478 | MinerInfoLogger.Printf("[Miner] Adding block to graviton...") 479 | infoErr := Graviton_backend.WriteBlocks(info, info.BlockState) 480 | //Graviton_backend.Writing = 0 481 | if infoErr != nil { 482 | log.Printf("[BLOCK] Graviton DB err: %v", infoErr) 483 | MinerErrorLogger.Printf("[BLOCK] Graviton DB err: %v", infoErr) 484 | } 485 | 486 | _ = Graviton_backend.UpdatePoolRoundStats(s.miners, true) 487 | Graviton_backend.Writing = 0 488 | } else { 489 | writeWait, _ := time.ParseDuration("10ms") 490 | for Graviton_backend.Writing == 1 { 491 | //log.Printf("[Miner-processShare] GravitonDB is writing... sleeping for %v...", writeWait) 492 | //StorageInfoLogger.Printf("[Miner-processShare] GravitonDB is writing... sleeping for %v...", writeWait) 493 | time.Sleep(writeWait) 494 | } 495 | Graviton_backend.Writing = 1 496 | infoErr := Graviton_backend.WriteBlocks(info, info.BlockState) 497 | Graviton_backend.Writing = 0 498 | if infoErr != nil { 499 | log.Printf("[BLOCK] Graviton DB err: %v", infoErr) 500 | MinerErrorLogger.Printf("[BLOCK] Graviton DB err: %v", infoErr) 501 | } 502 | } 503 | 504 | // Refresh current BT and send new jobs 505 | s.refreshBlockTemplate(true) 506 | } 507 | } else if hashDiff.Cmp(&setDiff) < 0 { 508 | minerOutput := "Low difficulty share" 509 | log.Printf("[Miner] Rejected low difficulty share of %v / %v from %v@%v", hashDiff, setDiff, m.Id, cs.ip) 510 | MinerErrorLogger.Printf("[Miner] Rejected low difficulty share of %v / %v from %v@%v", hashDiff, setDiff, m.Id, cs.ip) 511 | atomic.AddInt64(&m.InvalidShares, 1) 512 | return false, minerOutput 513 | } 514 | 515 | // Using minermap to store share data rather than direct to DB, future scale might have issues with the large concurrent writes to DB directly 516 | // Minermap allows for concurrent writes easily and quickly, then every x seconds defined in stratum that map gets written/stored to disk DB [5 seconds prob] 517 | 518 | // Store share for current height and current round shares on normal basis. If block && checkPowHashBig, miner round share has already been counted, no need to double count here 519 | if !block && !checkPowHashBig { 520 | // If miner is donating, take % out of cs.difficulty (share amount stored) and storeShare to donation addr 521 | if m.DonatePercent > 0 && m.Address != s.donateID { 522 | donation = float64(m.DonatePercent) / 100 * float64(cs.difficulty) 523 | atomic.AddInt64(&m.DonationTotal, int64(donation)) 524 | 525 | donateMiner, ok := s.miners.Get(s.donateID) 526 | if !ok { 527 | log.Printf("[Miner] Miner %v@%v intended to donate %v shares, however donation miner is not setup.", params.Id, cs.ip, int64(donation)) 528 | MinerErrorLogger.Printf("[Miner] Miner %v@%v intended to donate %v shares, however donation miner is not setup.", params.Id, cs.ip, int64(donation)) 529 | } else { 530 | log.Printf("[Miner] Miner %v@%v donated %v shares.", params.Id, cs.ip, int64(donation)) 531 | MinerInfoLogger.Printf("[Miner] Miner %v@%v donated %v shares.", params.Id, cs.ip, int64(donation)) 532 | donateMiner.storeShare(int64(donation), int64(donation), int64(t.Height), s.hashrateExpiration) 533 | } 534 | 535 | minerShare := cs.difficulty - int64(donation) 536 | m.storeShare(cs.difficulty, minerShare, int64(t.Height), s.hashrateExpiration) 537 | } else { 538 | m.storeShare(cs.difficulty, cs.difficulty, int64(t.Height), s.hashrateExpiration) 539 | } 540 | } else { 541 | // Add extra miner message to return back to mining software if a block is found by the miner - only certain miner software will read/use these results 542 | if m.IsSolo { 543 | extraMinerMessage = fmt.Sprintf("SOLO Block found at height %d, diff: %v, by you!", t.Height, t.Difficulty) 544 | } else { 545 | extraMinerMessage = fmt.Sprintf("POOL Block found at height %d, diff: %v, by you!", t.Height, t.Difficulty) 546 | } 547 | } 548 | 549 | atomic.AddInt64(&m.ValidShares, 1) 550 | 551 | log.Printf("[Miner] %s share at difficulty %v/%v from %v@%v", shareType, cs.difficulty, hashDiff, params.Id, cs.ip) 552 | MinerInfoLogger.Printf("[Miner] %s share at difficulty %v/%v from %v@%v", shareType, cs.difficulty, hashDiff, params.Id, cs.ip) 553 | 554 | ts := time.Now().Unix() 555 | // Omit the first round to setup the vars if they aren't setup, otherwise commit to timestamparr 556 | if cs.VarDiff.LastRetargetTimestamp == 0 { 557 | cs.VarDiff.LastRetargetTimestamp = ts - s.config.Stratum.VarDiff.RetargetTime/2 558 | cs.VarDiff.LastTimeStamp = ts 559 | cs.VarDiff.TimestampArr = make(map[int64]int64) 560 | } else { 561 | sinceLast := ts - cs.VarDiff.LastTimeStamp 562 | cs.VarDiff.TimestampArr[sinceLast] += sinceLast 563 | cs.VarDiff.LastTimeStamp = ts 564 | } 565 | 566 | s.miners.Set(m.Id, m) 567 | 568 | if m.DonatePercent > 0 && m.Address != s.donateID { 569 | // Append if there is previous extra miner message being sent - like found block etc. 570 | if extraMinerMessage != "" { 571 | extraMinerMessage = fmt.Sprintf("%s -- Donated %v shares - Thank you!", extraMinerMessage, int64(donation)) 572 | } else { 573 | extraMinerMessage = fmt.Sprintf("Donated %v shares - Thank you!", int64(donation)) 574 | } 575 | } 576 | 577 | return true, extraMinerMessage 578 | } 579 | 580 | func logFileOutMiner(lType string) *log.Logger { 581 | var logFileName string 582 | if lType == "ERROR" { 583 | logFileName = "logs/minerError.log" 584 | } else { 585 | logFileName = "logs/miner.log" 586 | } 587 | os.Mkdir("logs", 0705) 588 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 589 | if err != nil { 590 | panic(err) 591 | } 592 | 593 | logType := lType + ": " 594 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 595 | return l 596 | } 597 | -------------------------------------------------------------------------------- /stratum/mmap.go: -------------------------------------------------------------------------------- 1 | // Generated from https://github.com/streamrail/concurrent-map 2 | package stratum 3 | 4 | import ( 5 | "hash/fnv" 6 | "sync" 7 | ) 8 | 9 | var SHARD_COUNT = 32 10 | 11 | // A "thread" safe map of type string:*Miner. 12 | // To avoid lock bottlenecks this map is dived to several (SHARD_COUNT) map shards. 13 | type MinersMap []*MinersMapShared 14 | type MinersMapShared struct { 15 | Items map[string]*Miner 16 | sync.RWMutex // Read Write mutex, guards access to internal map. 17 | } 18 | 19 | // Creates a new concurrent map. 20 | func NewMinersMap() MinersMap { 21 | m := make(MinersMap, SHARD_COUNT) 22 | for i := 0; i < SHARD_COUNT; i++ { 23 | m[i] = &MinersMapShared{Items: make(map[string]*Miner)} 24 | } 25 | return m 26 | } 27 | 28 | // Returns shard under given key 29 | func (m MinersMap) GetShard(key string) *MinersMapShared { 30 | hasher := fnv.New32() 31 | hasher.Write([]byte(key)) 32 | return m[int(hasher.Sum32())%SHARD_COUNT] 33 | } 34 | 35 | // Sets the given value under the specified key. 36 | func (m MinersMap) Set(key string, value *Miner) { 37 | // Get map shard. 38 | shard := m.GetShard(key) 39 | shard.Lock() 40 | defer shard.Unlock() 41 | shard.Items[key] = value 42 | } 43 | 44 | // Retrieves an element from map under given key. 45 | func (m MinersMap) Get(key string) (*Miner, bool) { 46 | // Get shard 47 | shard := m.GetShard(key) 48 | shard.RLock() 49 | defer shard.RUnlock() 50 | 51 | // Get item from shard. 52 | val, ok := shard.Items[key] 53 | return val, ok 54 | } 55 | 56 | // Removes an element from the map. 57 | func (m MinersMap) Remove(key string) { 58 | // Try to get shard. 59 | shard := m.GetShard(key) 60 | shard.Lock() 61 | defer shard.Unlock() 62 | delete(shard.Items, key) 63 | } 64 | 65 | // Returns the number of elements within the map. 66 | func (m MinersMap) Count() int { 67 | count := 0 68 | for i := 0; i < SHARD_COUNT; i++ { 69 | shard := m[i] 70 | shard.RLock() 71 | count += len(shard.Items) 72 | shard.RUnlock() 73 | } 74 | return count 75 | } 76 | 77 | // Checks if map is empty. 78 | func (m MinersMap) IsEmpty() bool { 79 | return m.Count() == 0 80 | } 81 | 82 | // Looks up an item under specified key 83 | /* 84 | func (m *MinersMap) Has(key string) bool { 85 | // Get shard 86 | shard := m.GetShard(key) 87 | shard.RLock() 88 | defer shard.RUnlock() 89 | 90 | // See if element is within shard. 91 | _, ok := shard.items[key] 92 | return ok 93 | } 94 | 95 | // Unused/old code functions 96 | // Used by the Iter & IterBuffered functions to wrap two variables together over a channel, 97 | type Tuple struct { 98 | Key string 99 | Val *Miner 100 | } 101 | 102 | // Returns an iterator which could be used in a for range loop. 103 | func (m MinersMap) Iter() <-chan Tuple { 104 | ch := make(chan Tuple) 105 | go func() { 106 | // Foreach shard. 107 | for _, shard := range m { 108 | // Foreach key, value pair. 109 | shard.RLock() 110 | for key, val := range shard.items { 111 | ch <- Tuple{key, val} 112 | } 113 | shard.RUnlock() 114 | } 115 | close(ch) 116 | }() 117 | return ch 118 | } 119 | 120 | // Returns a buffered iterator which could be used in a for range loop. 121 | func (m MinersMap) IterBuffered() <-chan Tuple { 122 | ch := make(chan Tuple, m.Count()) 123 | go func() { 124 | // Foreach shard. 125 | for _, shard := range m { 126 | // Foreach key, value pair. 127 | shard.RLock() 128 | for key, val := range shard.items { 129 | ch <- Tuple{key, val} 130 | } 131 | shard.RUnlock() 132 | } 133 | close(ch) 134 | }() 135 | return ch 136 | } 137 | */ 138 | -------------------------------------------------------------------------------- /stratum/payments.go: -------------------------------------------------------------------------------- 1 | // Some payments integration functions and ideas from: https://github.com/JKKGBE/open-zcash-pool which is a fork of https://github.com/sammy007/open-ethereum-pool 2 | package stratum 3 | 4 | import ( 5 | "fmt" 6 | "log" 7 | "math/big" 8 | "os" 9 | "time" 10 | 11 | "github.com/Nelbert442/dero-golang-pool/pool" 12 | "github.com/Nelbert442/dero-golang-pool/rpc" 13 | "github.com/Nelbert442/dero-golang-pool/util" 14 | "github.com/deroproject/derosuite/address" 15 | "github.com/deroproject/derosuite/walletapi" 16 | ) 17 | 18 | type PayoutsProcessor struct { 19 | config *pool.PaymentsConfig 20 | rpc *rpc.RPCClient 21 | halt bool 22 | lastFail error 23 | } 24 | 25 | type PayoutTracker struct { 26 | Destinations []rpc.Destinations 27 | PaymentIDs []string 28 | } 29 | 30 | /* Used when integrating with derosuite functions, currently not being used but in place incase functions are used later 31 | type Transfer struct { 32 | rAddress *address.Address 33 | PaymentID []byte 34 | Amount uint64 35 | Fees uint64 36 | TX *transaction.Transaction 37 | TXID crypto.Hash 38 | Size float32 39 | Status string 40 | Inputs []uint64 41 | InputSum uint64 42 | Change uint64 43 | Relay bool 44 | OfflineTX bool 45 | Filename string 46 | } 47 | */ 48 | 49 | var wallet *walletapi.Wallet 50 | var PaymentsInfoLogger = logFileOutPayments("INFO") 51 | var PaymentsErrorLogger = logFileOutPayments("ERROR") 52 | 53 | func NewPayoutsProcessor(cfg *pool.PaymentsConfig, s *StratumServer) *PayoutsProcessor { 54 | u := &PayoutsProcessor{config: cfg} //backend: s.backend} 55 | u.rpc = s.rpc() 56 | return u 57 | } 58 | 59 | func (u *PayoutsProcessor) Start(s *StratumServer) { 60 | log.Printf("[Payments] Starting payouts") 61 | PaymentsInfoLogger.Printf("[Payments] Starting payouts") 62 | 63 | intv, _ := time.ParseDuration(u.config.Interval) 64 | timer := time.NewTimer(intv) 65 | log.Printf("[Payments] Set payouts interval to %v", intv) 66 | PaymentsInfoLogger.Printf("[Payments] Set payouts interval to %v", intv) 67 | 68 | payments := Graviton_backend.GetPendingPayments() 69 | 70 | if len(payments) > 0 { 71 | // Quick loop through to check if pending payments have reached threshold. Log to screen any insufficient balances pending as well as to screen/log any failed payments that are above threshold 72 | var checkedPayments []*PaymentPending 73 | var insufficientBalances []*PaymentPending 74 | for _, val := range payments { 75 | amount := val.Amount 76 | 77 | if !u.reachedThreshold(amount) { 78 | insufficientBalances = append(insufficientBalances, val) 79 | continue 80 | } 81 | 82 | checkedPayments = append(checkedPayments, val) 83 | } 84 | 85 | if len(checkedPayments) > 0 { 86 | log.Printf("[Payments] Previous payout failed, trying to resolve it. List of failed payments:\n %v", 87 | formatPendingPayments(checkedPayments)) 88 | PaymentsInfoLogger.Printf("[Payments] Previous payout failed, trying to resolve it. List of failed payments:\n %v", formatPendingPayments(checkedPayments)) 89 | } 90 | 91 | if len(insufficientBalances) > 0 { 92 | log.Printf("[Payments] List of pending payments with insufficient balances (< %v):\n %v", u.config.Threshold, 93 | formatPendingPayments(insufficientBalances)) 94 | } 95 | } 96 | 97 | // Immediately process payouts after start 98 | u.process(s) 99 | timer.Reset(intv) 100 | 101 | go func() { 102 | for { 103 | select { 104 | case <-timer.C: 105 | u.process(s) 106 | timer.Reset(intv) 107 | } 108 | } 109 | }() 110 | } 111 | 112 | func (u *PayoutsProcessor) process(s *StratumServer) { 113 | 114 | maxAddresses := u.config.MaxAddresses 115 | var payoutList []rpc.Destinations 116 | var paymentIDPayeeList []rpc.Destinations 117 | var payIDList []string 118 | var payPending []*PaymentPending 119 | 120 | walletURL := fmt.Sprintf("http://%s:%v/json_rpc", u.config.WalletHost, u.config.WalletPort) 121 | mustPay := 0 122 | minersPaid := 0 123 | totalAmount := big.NewInt(0) 124 | 125 | // Graviton DB Pending Balance 126 | payPending = Graviton_backend.GetPendingPayments() 127 | for _, val := range payPending { 128 | 129 | login := val.Address 130 | amount := val.Amount 131 | 132 | if !u.reachedThreshold(amount) { 133 | continue 134 | } 135 | 136 | // Check if we have enough funds 137 | poolBalanceObj, err := u.rpc.GetBalance(walletURL) 138 | if err != nil { 139 | // TODO: mark sick maybe for tracking and frontend reporting? 140 | log.Printf("[Payments] Error when getting balance from wallet %s. Will try again in %s", walletURL, u.config.Interval) 141 | PaymentsErrorLogger.Printf("[Payments] Error when getting balance from wallet %s. Will try again in %s", walletURL, u.config.Interval) 142 | break 143 | } 144 | poolBalance := poolBalanceObj.UnlockedBalance 145 | // Use <= here as in the event of running at 0 pool fee and no other balance, you will not be able to payout since invalid balance will occur from tx generation 146 | if poolBalance <= amount { 147 | log.Printf("[Payments] Not enough balance for payment, need %v DERO, pool has %v DERO", amount, poolBalance) 148 | PaymentsErrorLogger.Printf("[Payments] Not enough balance for payment, need %v DERO, pool has %v DERO", amount, poolBalance) 149 | break 150 | } 151 | 152 | // Address validations 153 | // We already do this for when the miner connects, we need to get those details/vars or just regen them as well as re-validate JUST TO BE SURE prior to attempting to send 154 | // NOTE: The issue with grabbing from the miners arr (s.miners), is that if they're not actively mining but get rewards from a past round, the query will not return their detail for payout 155 | 156 | addr, _, paymentID, _, _, _ := s.splitLoginString(login) 157 | 158 | log.Printf("[Payments] Split login. Address: %v, paymentID: %v", addr, paymentID) 159 | PaymentsInfoLogger.Printf("[Payments] Split login. Address: %v, paymentID: %v", addr, paymentID) 160 | 161 | // Validate Address - DERO will validate against native DERO validation functions, rest will validate against util [against pool address for comparison, similar to login] 162 | switch s.config.Coin { 163 | case "DERO": 164 | _, err := address.NewAddress(addr) 165 | 166 | if err != nil { 167 | log.Printf("[Payments] Invalid address format. Will not process payments - %v - %v", addr, err) 168 | PaymentsErrorLogger.Printf("[Payments] Invalid address format. Will not process payments - %v - %v", addr, err) 169 | continue 170 | } 171 | default: 172 | if !util.ValidateAddressNonDERO(addr, s.config.Address) { 173 | log.Printf("[Payments] Invalid address format. Will not process payments - %v", addr) 174 | PaymentsErrorLogger.Printf("[Payments] Invalid address format. Will not process payments - %v", addr) 175 | continue 176 | } 177 | } 178 | 179 | currAddr := rpc.Destinations{ 180 | Amount: amount, 181 | Address: addr, 182 | } 183 | mustPay++ 184 | 185 | // If paymentID, put in an array that'll be walked through one at a time versus combining addresses/amounts. 186 | if paymentID != "" { 187 | paymentIDPayeeList = append(paymentIDPayeeList, currAddr) 188 | payIDList = append(payIDList, paymentID) 189 | } else { 190 | payoutList = append(payoutList, currAddr) 191 | } 192 | } 193 | 194 | payIDTracker := &PayoutTracker{Destinations: paymentIDPayeeList, PaymentIDs: payIDList} 195 | 196 | // If there are payouts to be processed 197 | if len(payoutList) > 0 || len(payIDList) > 0 { 198 | 199 | // Send DERO - Native, TODO) 200 | // Issues with mutex and also running wallet process locally (with --rpc-server), as it can't get a lock to generate the transaction. 201 | // Problem comes in, then it can't use the daemon/wallet to do the work it needs and errors, since we're not providing authentication in config etc. 202 | // Future may be allow for auth to rpc-server within the config.json or other means as an option to run it that way. Otherwise will continue using the rpc option as the future option. 203 | /* 204 | // Validate Address 205 | wallet.SetDaemonAddress("http://127.0.0.1:30306") 206 | 207 | transfer.rAddress, err = globals.ParseValidateAddress(login) 208 | if err != nil { 209 | log.Printf("Invalid address format - %v", login) 210 | break 211 | } else { 212 | log.Printf("Valid Address") 213 | } 214 | 215 | transfer.PaymentID = nil 216 | 217 | // This fails out w/ mutex error, not sure TODO 218 | //curBalance, _ := wallet.Get_Balance() 219 | 220 | transfer.Amount = amount 221 | addr_list := []address.Address{*transfer.rAddress} 222 | amount_list := []uint64{transfer.Amount} 223 | fees_per_kb := uint64(0) // fees must be calculated by walletapi 224 | 225 | tx, inputs, input_sum, change, err := wallet.Transfer(addr_list, amount_list, 0, hex.EncodeToString(transfer.PaymentID), fees_per_kb, 0) 226 | _ = inputs 227 | 228 | if err != nil { 229 | log.Printf("Error while building transaction: %s", err) 230 | break 231 | } 232 | 233 | transfer.OfflineTX = false 234 | 235 | transfer.Relay = build_relay_transaction(tx, inputs, input_sum, change, err, transfer.OfflineTX, amount_list) 236 | 237 | if !transfer.Relay { 238 | log.Printf("Error: Unable to build the transfer.") 239 | break 240 | } 241 | 242 | err = wallet.SendTransaction(transfer.TX) // relay tx to daemon/network 243 | 244 | if err == nil { 245 | transfer.Status = "Success" 246 | transfer.TXID = transfer.TX.GetHash() 247 | } else { 248 | transfer.Status = "Failed" 249 | transfer.TXID = transfer.TX.GetHash() 250 | log.Printf("Error relaying transaction: %s", err) 251 | break 252 | } 253 | txHash := transfer.TXID.String() 254 | */ 255 | 256 | // Send DERO - RPC (working) 257 | var currPayout rpc.Transfer_Params 258 | var lastPos int 259 | currPayout.Mixin = u.config.Mixin 260 | currPayout.Unlock_time = 0 261 | currPayout.Get_tx_key = true 262 | currPayout.Do_not_relay = false 263 | currPayout.Get_tx_hex = true 264 | 265 | // Payout paymentID addresses, one at a time since paymentID is used in the tx generation and is a non-array input 266 | for p, payee := range payIDTracker.Destinations { 267 | currPayout.Payment_ID = payIDTracker.PaymentIDs[p] 268 | currPayout.Destinations = append(currPayout.Destinations, payee) 269 | 270 | paymentOutput, err := u.rpc.SendTransaction(walletURL, currPayout) 271 | 272 | if err != nil { 273 | log.Printf("[Payments] Error with transaction: %v", err) 274 | PaymentsErrorLogger.Printf("[Payments] Error with transaction: %v", err) 275 | break 276 | } 277 | log.Printf("[Payments] Success: %v", paymentOutput) 278 | PaymentsInfoLogger.Printf("[Payments] Success: %v", paymentOutput) 279 | // Log transaction hash 280 | txHash := paymentOutput.Tx_hash_list 281 | txFee := paymentOutput.Fee_list 282 | // As pool owner, you probably want to store keys so that you can prove a send if required. 283 | txKey := paymentOutput.Tx_key_list 284 | 285 | if txHash == nil { 286 | log.Printf("[Payments] Failed to generate transaction. It was sent successfully to rpc server, but no reply back.") 287 | PaymentsErrorLogger.Printf("[Payments] Failed to generate transaction. It was sent successfully to rpc server, but no reply back.") 288 | 289 | break 290 | } 291 | 292 | // Debit miner's balance and update stats 293 | login := payee.Address + s.config.Stratum.PaymentID.AddressSeparator + currPayout.Payment_ID 294 | amount := payee.Amount 295 | 296 | for j, f := range payPending { 297 | if login == f.Address { 298 | payPending = removePendingPayments(payPending, j) 299 | break 300 | } 301 | } 302 | 303 | prunedPaymentsPending := &PendingPayments{PendingPayout: payPending} 304 | 305 | err = Graviton_backend.OverwritePendingPayments(prunedPaymentsPending) 306 | if err != nil { 307 | log.Printf("[Payments] Error overwriting pending payments. %v", err) 308 | PaymentsErrorLogger.Printf("[Payments] Error overwriting pending payments. %v", err) 309 | break 310 | } 311 | 312 | // Update stats for pool payments (gravitondb) 313 | info := &MinerPayments{} 314 | info.Login = login 315 | info.TxHash = txHash[0] 316 | info.TxKey = txKey[0] 317 | info.TxFee = txFee[0] 318 | info.Mixin = u.config.Mixin 319 | info.Amount = amount 320 | info.Timestamp = util.MakeTimestamp() / 1000 321 | 322 | writeWait, _ := time.ParseDuration("10ms") 323 | for Graviton_backend.Writing == 1 { 324 | //log.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 325 | //StorageInfoLogger.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 326 | time.Sleep(writeWait) 327 | } 328 | Graviton_backend.Writing = 1 329 | infoErr := Graviton_backend.WriteProcessedPayments(info) 330 | Graviton_backend.Writing = 0 331 | if infoErr != nil { 332 | log.Printf("[Payments] Graviton DB err: %v", infoErr) 333 | PaymentsErrorLogger.Printf("[Payments] Graviton DB err: %v", infoErr) 334 | break 335 | } 336 | 337 | minersPaid++ 338 | totalAmount.Add(totalAmount, big.NewInt(int64(amount))) 339 | } 340 | currPayout.Destinations = nil 341 | 342 | var hasIntegratedAddr bool 343 | 344 | // Payout non-paymentID addresses, max at a time according to maxAddresses in config 345 | for i, value := range payoutList { 346 | currPayout.Payment_ID = "" 347 | currPayout.Destinations = append(currPayout.Destinations, value) 348 | 349 | // Check if current list of destinations has an integrated address (we cannot have more than 1). TOOD: Can re-do this to get to max addresses and include 1 integrated address rather than immediately sending when integrated is found 350 | for _, v := range currPayout.Destinations { 351 | vAddr, _ := address.NewAddress(v.Address) 352 | 353 | if vAddr.IsIntegratedAddress() { 354 | hasIntegratedAddr = true 355 | } 356 | } 357 | 358 | // Payout if maxAddresses is reached or the payout list ending is reached or there is an integrated address within the list of recipients, send the tx 359 | if len(currPayout.Destinations) >= int(maxAddresses) || i+1 == len(payoutList) || hasIntegratedAddr { 360 | hasIntegratedAddr = false 361 | paymentOutput, err := u.rpc.SendTransaction(walletURL, currPayout) 362 | 363 | if err != nil { 364 | log.Printf("[Payments] Error with transaction: %v", err) 365 | PaymentsErrorLogger.Printf("[Payments] Error with transaction: %v", err) 366 | break 367 | } 368 | log.Printf("[Payments] Success: %v", paymentOutput) 369 | PaymentsInfoLogger.Printf("[Payments] Success: %v", paymentOutput) 370 | // Log transaction hash 371 | txHash := paymentOutput.Tx_hash_list 372 | txFee := paymentOutput.Fee_list 373 | // As pool owner, you probably want to store keys so that you can prove a send if required. 374 | txKey := paymentOutput.Tx_key_list 375 | 376 | if txHash == nil { 377 | log.Printf("[Payments] Failed to generate transaction. It was sent successfully to rpc server, but no reply back.") 378 | PaymentsErrorLogger.Printf("[Payments] Failed to generate transaction. It was sent successfully to rpc server, but no reply back.") 379 | 380 | break 381 | } 382 | 383 | if len(payoutList) > 1 && maxAddresses > 1 { 384 | payPos := i - lastPos 385 | for k := 0; k <= payPos; k++ { 386 | // Debit miner's balance and update stats 387 | login := payoutList[lastPos+k].Address 388 | amount := payoutList[lastPos+k].Amount 389 | 390 | for j, f := range payPending { 391 | if login == f.Address { 392 | payPending = removePendingPayments(payPending, j) 393 | break 394 | } 395 | } 396 | 397 | prunedPaymentsPending := &PendingPayments{PendingPayout: payPending} 398 | 399 | err = Graviton_backend.OverwritePendingPayments(prunedPaymentsPending) 400 | if err != nil { 401 | log.Printf("[Payments] Error overwriting pending payments. %v", err) 402 | PaymentsErrorLogger.Printf("[Payments] Error overwriting pending payments. %v", err) 403 | break 404 | } 405 | 406 | // Update stats for pool payments (gravitondb) 407 | info := &MinerPayments{} 408 | info.Login = login 409 | info.TxHash = txHash[0] 410 | info.TxKey = txKey[0] 411 | info.TxFee = txFee[0] 412 | info.Mixin = u.config.Mixin 413 | info.Amount = amount 414 | info.Timestamp = util.MakeTimestamp() / 1000 415 | 416 | writeWait, _ := time.ParseDuration("10ms") 417 | for Graviton_backend.Writing == 1 { 418 | //log.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 419 | //StorageInfoLogger.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 420 | time.Sleep(writeWait) 421 | } 422 | Graviton_backend.Writing = 1 423 | infoErr := Graviton_backend.WriteProcessedPayments(info) 424 | Graviton_backend.Writing = 0 425 | if infoErr != nil { 426 | log.Printf("[Payments] Graviton DB err: %v", infoErr) 427 | PaymentsErrorLogger.Printf("[Payments] Graviton DB err: %v", infoErr) 428 | break 429 | } 430 | 431 | minersPaid++ 432 | totalAmount.Add(totalAmount, big.NewInt(int64(amount))) 433 | } 434 | } else { 435 | log.Printf("[Payments] Processing payoutList[i]: %v", payoutList[i]) 436 | PaymentsInfoLogger.Printf("[Payments] Processing payoutList[i]: %v", payoutList[i]) 437 | // Debit miner's balance and update stats 438 | login := value.Address 439 | amount := value.Amount 440 | 441 | for j, f := range payPending { 442 | if login == f.Address { 443 | //log.Printf("[Payments] Removing payPending: %v", payPending[j]) 444 | payPending = removePendingPayments(payPending, j) 445 | break 446 | } 447 | } 448 | 449 | prunedPaymentsPending := &PendingPayments{PendingPayout: payPending} 450 | 451 | err = Graviton_backend.OverwritePendingPayments(prunedPaymentsPending) 452 | if err != nil { 453 | log.Printf("[Payments] Error overwriting pending payments. %v", err) 454 | PaymentsErrorLogger.Printf("[Payments] Error overwriting pending payments. %v", err) 455 | break 456 | } 457 | 458 | // Update stats for pool payments (gravitondb) 459 | info := &MinerPayments{} 460 | info.Login = login 461 | info.TxHash = txHash[0] 462 | info.TxKey = txKey[0] 463 | info.TxFee = txFee[0] 464 | info.Mixin = u.config.Mixin 465 | info.Amount = amount 466 | info.Timestamp = util.MakeTimestamp() / 1000 467 | 468 | writeWait, _ := time.ParseDuration("10ms") 469 | for Graviton_backend.Writing == 1 { 470 | //log.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 471 | //StorageInfoLogger.Printf("[Payments-writeprocessedpayments] GravitonDB is writing... sleeping for %v...", writeWait) 472 | time.Sleep(writeWait) 473 | } 474 | Graviton_backend.Writing = 1 475 | infoErr := Graviton_backend.WriteProcessedPayments(info) 476 | Graviton_backend.Writing = 0 477 | if infoErr != nil { 478 | log.Printf("[Payments] Graviton DB err: %v", infoErr) 479 | PaymentsErrorLogger.Printf("[Payments] Graviton DB err: %v", infoErr) 480 | break 481 | } 482 | 483 | minersPaid++ 484 | totalAmount.Add(totalAmount, big.NewInt(int64(amount))) 485 | } 486 | // Empty currpayout destinations array 487 | currPayout.Destinations = nil 488 | lastPos = i + 1 // Increment lastPos so it'll be equal to i next round in loop (if required) 489 | } 490 | } 491 | } 492 | 493 | if mustPay > 0 { 494 | log.Printf("[Payments] Paid total %v DERO to %v of %v payees", totalAmount, minersPaid, mustPay) 495 | PaymentsInfoLogger.Printf("[Payments] Paid total %v DERO to %v of %v payees", totalAmount, minersPaid, mustPay) 496 | } else { 497 | log.Printf("[Payments] No payees that have reached payout threshold") 498 | } 499 | } 500 | 501 | func removePendingPayments(s []*PaymentPending, i int) []*PaymentPending { 502 | if len(s) == 1 { 503 | return nil 504 | } 505 | s[i] = s[len(s)-1] 506 | return s[:len(s)-1] 507 | } 508 | 509 | func formatPendingPayments(list []*PaymentPending) string { 510 | var s string 511 | for _, v := range list { 512 | s += fmt.Sprintf("\t[Payments] Address: %s, Amount: %v DERO, %v\n", v.Address, v.Amount, time.Unix(v.Timestamp, 0)) 513 | } 514 | return s 515 | } 516 | 517 | func (self PayoutsProcessor) reachedThreshold(amount uint64) bool { 518 | return self.config.Threshold < amount 519 | } 520 | 521 | func logFileOutPayments(lType string) *log.Logger { 522 | var logFileName string 523 | if lType == "ERROR" { 524 | logFileName = "logs/paymentsError.log" 525 | } else { 526 | logFileName = "logs/payments.log" 527 | } 528 | os.Mkdir("logs", 0705) 529 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 530 | if err != nil { 531 | panic(err) 532 | } 533 | 534 | logType := lType + ": " 535 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 536 | return l 537 | } 538 | -------------------------------------------------------------------------------- /stratum/proto.go: -------------------------------------------------------------------------------- 1 | // Some struct integrations and ideas from: https://github.com/JKKGBE/open-zcash-pool which is a fork of https://github.com/sammy007/open-ethereum-pool 2 | package stratum 3 | 4 | import "encoding/json" 5 | 6 | type JSONRpcReq struct { 7 | Id *json.RawMessage `json:"id"` 8 | Method string `json:"method"` 9 | Params *json.RawMessage `json:"params"` 10 | } 11 | 12 | type JSONRpcResp struct { 13 | Id *json.RawMessage `json:"id"` 14 | Version string `json:"jsonrpc"` 15 | Result interface{} `json:"result"` 16 | Error interface{} `json:"error"` 17 | } 18 | 19 | type JSONPushMessage struct { 20 | Version string `json:"jsonrpc"` 21 | Method string `json:"method"` 22 | Params interface{} `json:"params"` 23 | } 24 | 25 | type LoginParams struct { 26 | Login string `json:"login"` 27 | Pass string `json:"pass"` 28 | Agent string `json:"agent"` 29 | } 30 | 31 | type GetJobParams struct { 32 | Id string `json:"id"` 33 | } 34 | 35 | type SubmitParams struct { 36 | Id string `json:"id"` 37 | JobId string `json:"job_id"` 38 | Nonce string `json:"nonce"` 39 | Result string `json:"result"` 40 | } 41 | 42 | type JobReply struct { 43 | Id string `json:"id"` 44 | Job *JobReplyData `json:"job"` 45 | Status string `json:"status"` 46 | } 47 | 48 | type JobReplyData struct { 49 | Blob string `json:"blob"` 50 | JobId string `json:"job_id"` 51 | Target string `json:"target"` 52 | Algo string `json:"algo"` 53 | Height uint64 `json:"height"` 54 | } 55 | 56 | type StatusReply struct { 57 | Status string `json:"status"` 58 | Message string `json:"message"` 59 | } 60 | 61 | type ErrorReply struct { 62 | Code int `json:"code"` 63 | Message string `json:"message"` 64 | } 65 | -------------------------------------------------------------------------------- /stratum/stratum.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "bufio" 5 | "crypto/rand" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "log" 10 | "math/big" 11 | "net" 12 | "os" 13 | "os/signal" 14 | "sync" 15 | "sync/atomic" 16 | "syscall" 17 | "time" 18 | 19 | "github.com/Nelbert442/dero-golang-pool/pool" 20 | "github.com/Nelbert442/dero-golang-pool/rpc" 21 | "github.com/Nelbert442/dero-golang-pool/util" 22 | ) 23 | 24 | type StratumServer struct { 25 | roundShares int64 26 | config *pool.Config 27 | miners MinersMap 28 | blockTemplate atomic.Value 29 | upstream int32 30 | upstreams []*rpc.RPCClient 31 | timeout time.Duration 32 | estimationWindow time.Duration 33 | sessionsMu sync.RWMutex 34 | sessions map[*Session]struct{} 35 | algo string 36 | trustedSharesCount int64 37 | gravitonDB *GravitonStore 38 | hashrateExpiration time.Duration 39 | failsCount int64 40 | donateID string 41 | } 42 | 43 | type Endpoint struct { 44 | jobSequence uint64 45 | config *pool.Port 46 | difficulty *big.Int 47 | instanceId []byte 48 | extraNonce uint32 49 | targetHex string 50 | } 51 | 52 | type VarDiff struct { 53 | Difficulty int64 54 | Average float64 55 | TimestampArr map[int64]int64 56 | LastRetargetTimestamp int64 57 | LastTimeStamp int64 58 | } 59 | 60 | type Session struct { 61 | lastBlockHeight uint64 62 | sync.Mutex 63 | conn *net.TCPConn 64 | enc *json.Encoder 65 | ip string 66 | endpoint *Endpoint 67 | validJobs []*Job 68 | difficulty int64 69 | VarDiff *VarDiff 70 | isFixedDiff bool 71 | } 72 | 73 | const ( 74 | MaxReqSize = 10 * 1024 75 | ) 76 | 77 | var StratumInfoLogger = logFileOutStratum("INFO") 78 | var StratumErrorLogger = logFileOutStratum("ERROR") 79 | 80 | func NewStratum(cfg *pool.Config) *StratumServer { 81 | stratum := &StratumServer{config: cfg} 82 | 83 | // Setup our Ctrl+C handler 84 | stratum.SetupCloseHandler() 85 | 86 | // Startup/create new gravitondb (if it doesn't exist), write the configuration file (config.json) into storage for use / api surfacing later 87 | Graviton_backend.NewGravDB(cfg.PoolHost, "pooldb", cfg.GravitonMigrateWait, cfg.GravitonMaxSnapshots) //stratum.gravitonDB.NewGravDB(cfg.PoolHost, "pooldb") // TODO: Add to params in config.json file 88 | 89 | writeWait, _ := time.ParseDuration("10ms") 90 | for Graviton_backend.Writing == 1 { 91 | //log.Printf("[Stratum-writeconfig] GravitonDB is writing... sleeping for %v...", writeWait) 92 | //StorageInfoLogger.Printf("[Stratum-writeconfig] GravitonDB is writing... sleeping for %v...", writeWait) 93 | time.Sleep(writeWait) 94 | } 95 | Graviton_backend.Writing = 1 96 | Graviton_backend.WriteConfig(cfg) 97 | Graviton_backend.Writing = 0 98 | 99 | // Set stratum.upstreams length based on cfg.Upstream only if they are set enabled: true. We use arr to simulate this and filter out cfg.Upstream objects 100 | var arr []pool.Upstream 101 | for _, f := range cfg.Upstream { 102 | if f.Enabled { 103 | arr = append(arr, f) 104 | } 105 | } 106 | 107 | stratum.upstreams = make([]*rpc.RPCClient, len(arr)) 108 | for i, v := range arr { 109 | client, err := rpc.NewRPCClient(&v) 110 | if err != nil { 111 | log.Fatal(err) 112 | } else { 113 | stratum.upstreams[i] = client 114 | log.Printf("[Stratum] Upstream: %s => %s", client.Name, client.Url) 115 | StratumInfoLogger.Printf("[Stratum] Upstream: %s => %s", client.Name, client.Url) 116 | } 117 | } 118 | log.Printf("[Stratum] Default upstream: %s => %s", stratum.rpc().Name, stratum.rpc().Url) 119 | StratumInfoLogger.Printf("[Stratum] Default upstream: %s => %s", stratum.rpc().Name, stratum.rpc().Url) 120 | 121 | stratum.miners = NewMinersMap() 122 | stratum.sessions = make(map[*Session]struct{}) 123 | stratum.algo = cfg.Algo 124 | stratum.trustedSharesCount = cfg.TrustedSharesCount 125 | 126 | timeout, _ := time.ParseDuration(cfg.Stratum.Timeout) 127 | stratum.timeout = timeout 128 | 129 | refreshIntv, _ := time.ParseDuration(cfg.BlockRefreshInterval) 130 | refreshTimer := time.NewTimer(refreshIntv) 131 | log.Printf("[Stratum] Set block refresh every %v", refreshIntv) 132 | StratumInfoLogger.Printf("[Stratum] Set block refresh every %v", refreshIntv) 133 | 134 | hashExpiration, _ := time.ParseDuration(cfg.HashrateExpiration) 135 | stratum.hashrateExpiration = hashExpiration 136 | 137 | hashWindow, _ := time.ParseDuration(cfg.API.HashrateWindow) 138 | stratum.estimationWindow = hashWindow 139 | 140 | checkIntv, _ := time.ParseDuration(cfg.UpstreamCheckInterval) 141 | checkTimer := time.NewTimer(checkIntv) 142 | log.Printf("[Stratum] Set upstream check interval every %v", checkIntv) 143 | StratumInfoLogger.Printf("[Stratum] Set upstream check interval every %v", checkIntv) 144 | 145 | minerStatsIntv, _ := time.ParseDuration(cfg.StoreMinerStatsInterval) 146 | minerStatsTimer := time.NewTimer(minerStatsIntv) 147 | log.Printf("[Stratum] Set miner stats store interval every %v", minerStatsIntv) 148 | StratumInfoLogger.Printf("[Stratum] Set miner stats store interval every %v", minerStatsIntv) 149 | 150 | infoIntv, _ := time.ParseDuration(cfg.UpstreamCheckInterval) 151 | infoTimer := time.NewTimer(infoIntv) 152 | 153 | // Setup donate ID 154 | var dwid, dpid string 155 | var ddiff uint64 156 | var ddonperc int64 157 | var disSolo bool 158 | if stratum.config.DonationAddress != "" { 159 | daddress := stratum.config.DonationAddress 160 | stratum.donateID = daddress 161 | dminer, ok := stratum.miners.Get(daddress) 162 | if !ok { 163 | log.Printf("[Stratum] Registering donation miner: Address: %s", daddress) 164 | StratumInfoLogger.Printf("[Stratum] Registering donation miner: Address: %s", daddress) 165 | dminer = NewMiner(daddress, daddress, dpid, ddiff, dwid, ddonperc, disSolo, "127.0.0.1") 166 | stratum.registerMiner(dminer) 167 | 168 | writeWait, _ := time.ParseDuration("10ms") 169 | for Graviton_backend.Writing == 1 { 170 | //log.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 171 | //StorageInfoLogger.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 172 | time.Sleep(writeWait) 173 | } 174 | Graviton_backend.Writing = 1 175 | Graviton_backend.WriteMinerIDRegistration(dminer) 176 | Graviton_backend.Writing = 0 177 | } 178 | } else { 179 | daddress := stratum.config.Address 180 | stratum.donateID = daddress 181 | dminer, ok := stratum.miners.Get(daddress) 182 | if !ok { 183 | log.Printf("[Stratum] Registering donation miner: Address: %s", daddress) 184 | StratumInfoLogger.Printf("[Stratum] Registering donation miner: Address: %s", daddress) 185 | dminer = NewMiner(daddress, daddress, dpid, ddiff, dwid, ddonperc, disSolo, "127.0.0.1") 186 | stratum.registerMiner(dminer) 187 | 188 | writeWait, _ := time.ParseDuration("10ms") 189 | for Graviton_backend.Writing == 1 { 190 | //log.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 191 | //StorageInfoLogger.Printf("[Handlers-handleLoginRPC] GravitonDB is writing... sleeping for %v...", writeWait) 192 | time.Sleep(writeWait) 193 | } 194 | Graviton_backend.Writing = 1 195 | Graviton_backend.WriteMinerIDRegistration(dminer) 196 | Graviton_backend.Writing = 0 197 | } 198 | } 199 | 200 | // Init block template 201 | go stratum.refreshBlockTemplate(false) 202 | 203 | go func() { 204 | for { 205 | select { 206 | case <-refreshTimer.C: 207 | stratum.refreshBlockTemplate(true) 208 | refreshTimer.Reset(refreshIntv) 209 | } 210 | } 211 | }() 212 | 213 | go func() { 214 | for { 215 | select { 216 | case <-checkTimer.C: 217 | stratum.checkUpstreams() 218 | checkTimer.Reset(checkIntv) 219 | } 220 | } 221 | }() 222 | 223 | go func() { 224 | for { 225 | select { 226 | case <-checkTimer.C: 227 | stratum.updateFixedDiffJobs() 228 | checkTimer.Reset(checkIntv) 229 | } 230 | } 231 | }() 232 | 233 | go func() { 234 | for { 235 | select { 236 | case <-minerStatsTimer.C: 237 | // Write miner stats 238 | 239 | writeWait, _ := time.ParseDuration("10ms") 240 | for Graviton_backend.Writing == 1 { 241 | //log.Printf("[Stratum-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 242 | //StorageInfoLogger.Printf("[Stratum-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 243 | time.Sleep(writeWait) 244 | } 245 | Graviton_backend.Writing = 1 246 | err := Graviton_backend.WriteMinerStats(stratum.miners, stratum.hashrateExpiration) 247 | err2 := Graviton_backend.UpdatePoolRoundStats(stratum.miners, false) 248 | Graviton_backend.Writing = 0 249 | if err != nil { 250 | log.Printf("[Stratum] Err storing miner stats: %v", err) 251 | StratumErrorLogger.Printf("[Stratum] Err storing miner stats: %v", err) 252 | } 253 | if err2 != nil { 254 | log.Printf("[Stratum] Err storing miner round stats: %v", err2) 255 | StratumErrorLogger.Printf("[Stratum] Err storing miner round stats: %v", err2) 256 | } 257 | minerStatsTimer.Reset(minerStatsIntv) 258 | } 259 | } 260 | }() 261 | 262 | go func() { 263 | for { 264 | select { 265 | case <-infoTimer.C: 266 | poll := func(v *rpc.RPCClient) { 267 | _, err := v.UpdateInfo() 268 | if err != nil { 269 | log.Printf("[Stratum] Unable to update info on upstream %s: %v", v.Name, err) 270 | StratumErrorLogger.Printf("[Stratum] Unable to update info on upstream %s: %v", v.Name, err) 271 | stratum.markSick() 272 | } else { 273 | stratum.markOk() 274 | } 275 | } 276 | current := stratum.rpc() 277 | poll(current) 278 | 279 | // Async rpc call to not block on rpc timeout, ignoring current 280 | go func() { 281 | for _, v := range stratum.upstreams { 282 | if v != current { 283 | poll(v) 284 | } 285 | } 286 | }() 287 | infoTimer.Reset(infoIntv) 288 | } 289 | } 290 | }() 291 | 292 | return stratum 293 | } 294 | 295 | // Defines parameters for the ports to be listened on, such as default difficulty 296 | func NewEndpoint(cfg *pool.Port) *Endpoint { 297 | e := &Endpoint{config: cfg} 298 | e.instanceId = make([]byte, 4) 299 | _, err := rand.Read(e.instanceId) 300 | if err != nil { 301 | StratumErrorLogger.Printf("[Stratum] Can't seed with random bytes: %v", err) 302 | log.Fatalf("[Stratum] Can't seed with random bytes: %v", err) 303 | } 304 | e.targetHex = util.GetTargetHex(e.config.Difficulty) 305 | e.difficulty = big.NewInt(e.config.Difficulty) 306 | return e 307 | } 308 | 309 | // Sets up stratum to listen on the ports in config.json 310 | func (s *StratumServer) Listen() { 311 | quit := make(chan bool) 312 | for _, port := range s.config.Stratum.Ports { 313 | go func(cfg pool.Port) { 314 | e := NewEndpoint(&cfg) 315 | e.Listen(s) 316 | }(port) 317 | } 318 | <-quit 319 | } 320 | 321 | // Starts listening on the ports defined in s.Listen() 322 | func (e *Endpoint) Listen(s *StratumServer) { 323 | bindAddr := fmt.Sprintf("%s:%d", e.config.Host, e.config.Port) 324 | addr, err := net.ResolveTCPAddr("tcp", bindAddr) 325 | if err != nil { 326 | StratumErrorLogger.Printf("[Stratum] Error: %v", err) 327 | log.Fatalf("[Stratum] Error: %v", err) 328 | } 329 | server, err := net.ListenTCP("tcp", addr) 330 | if err != nil { 331 | StratumErrorLogger.Printf("[Stratum] Error: %v", err) 332 | log.Fatalf("[Stratum] Error: %v", err) 333 | } 334 | defer server.Close() 335 | 336 | log.Printf("[Stratum] Stratum listening on %s", bindAddr) 337 | StratumInfoLogger.Printf("[Stratum] Stratum listening on %s", bindAddr) 338 | accept := make(chan int, e.config.MaxConn) 339 | n := 0 340 | 341 | for { 342 | conn, err := server.AcceptTCP() 343 | if err != nil { 344 | continue 345 | } 346 | conn.SetKeepAlive(true) 347 | ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) 348 | 349 | VarDiff := &VarDiff{} 350 | 351 | cs := &Session{conn: conn, ip: ip, enc: json.NewEncoder(conn), endpoint: e, VarDiff: VarDiff} 352 | n++ 353 | 354 | accept <- n 355 | go func() { 356 | s.handleClient(cs, e) 357 | <-accept 358 | }() 359 | } 360 | } 361 | 362 | // Handles inbound client data, and sends off to handleMessage for processing things like login, submits etc. 363 | func (s *StratumServer) handleClient(cs *Session, e *Endpoint) { 364 | connbuff := bufio.NewReaderSize(cs.conn, MaxReqSize) 365 | s.setDeadline(cs.conn) 366 | 367 | for { 368 | data, isPrefix, err := connbuff.ReadLine() 369 | if isPrefix { 370 | log.Printf("[Stratum] Socket flood detected from %v", cs.ip) 371 | StratumErrorLogger.Printf("[Stratum] Socket flood detected from %v", cs.ip) 372 | break 373 | } else if err == io.EOF { 374 | log.Printf("[Stratum] Client disconnected %v", cs.ip) 375 | StratumErrorLogger.Printf("[Stratum] Client disconnected %v", cs.ip) 376 | break 377 | } else if err != nil { 378 | log.Printf("[Stratum] Error reading: %v", err) 379 | StratumErrorLogger.Printf("[Stratum] Error reading: %v", err) 380 | break 381 | } 382 | 383 | // NOTICE: cpuminer-multi sends junk newlines, so we demand at least 1 byte for decode 384 | // NOTICE: Ns*CNMiner.exe will send malformed JSON on very low diff, not sure we should handle this 385 | if len(data) > 1 { 386 | var req JSONRpcReq 387 | err = json.Unmarshal(data, &req) 388 | if err != nil { 389 | log.Printf("[Stratum] Malformed request from %s: %v", cs.ip, err) 390 | StratumErrorLogger.Printf("[Stratum] Malformed request from %s: %v", cs.ip, err) 391 | break 392 | } 393 | s.setDeadline(cs.conn) 394 | err = cs.handleMessage(s, e, &req) 395 | if err != nil { 396 | break 397 | } 398 | } 399 | } 400 | s.removeSession(cs) 401 | cs.conn.Close() 402 | } 403 | 404 | // Handle messages , login and submit are common 405 | func (cs *Session) handleMessage(s *StratumServer, e *Endpoint, req *JSONRpcReq) error { 406 | if req.Id == nil { 407 | err := fmt.Errorf("[Stratum] Server disconnect request") 408 | StratumErrorLogger.Printf("%v", err) 409 | log.Printf("%v", err) 410 | return err 411 | } else if req.Params == nil { 412 | err := fmt.Errorf("[Stratum] Server RPC request params") 413 | StratumErrorLogger.Printf("%v", err) 414 | log.Printf("%v", err) 415 | return err 416 | } 417 | 418 | // Handle RPC methods 419 | switch req.Method { 420 | 421 | case "login": 422 | var params LoginParams 423 | 424 | err := json.Unmarshal(*req.Params, ¶ms) 425 | if err != nil { 426 | log.Printf("[Stratum] Unable to parse params") 427 | StratumErrorLogger.Printf("[Stratum] Unable to parse params") 428 | return err 429 | } 430 | reply, errReply := s.handleLoginRPC(cs, ¶ms) 431 | if errReply != nil { 432 | return cs.sendError(req.Id, errReply, true) 433 | } 434 | return cs.sendResult(req.Id, &reply) 435 | case "getjob": 436 | var params GetJobParams 437 | err := json.Unmarshal(*req.Params, ¶ms) 438 | if err != nil { 439 | log.Printf("[Stratum] Unable to parse params") 440 | StratumErrorLogger.Printf("[Stratum] Unable to parse params") 441 | return err 442 | } 443 | reply, errReply := s.handleGetJobRPC(cs, ¶ms) 444 | if errReply != nil { 445 | return cs.sendError(req.Id, errReply, true) 446 | } 447 | return cs.sendResult(req.Id, &reply) 448 | case "submit": 449 | var params SubmitParams 450 | err := json.Unmarshal(*req.Params, ¶ms) 451 | if err != nil { 452 | log.Printf("[Stratum] Unable to parse params") 453 | StratumErrorLogger.Printf("[Stratum] Unable to parse params") 454 | return err 455 | } 456 | reply, errReply := s.handleSubmitRPC(cs, ¶ms) 457 | if errReply != nil { 458 | return cs.sendError(req.Id, errReply, false) 459 | } 460 | return cs.sendResult(req.Id, &reply) 461 | case "keepalived": 462 | return cs.sendResult(req.Id, &StatusReply{Status: "KEEPALIVED"}) 463 | default: 464 | errReply := s.handleUnknownRPC(req) 465 | return cs.sendError(req.Id, errReply, true) 466 | } 467 | } 468 | 469 | func (cs *Session) sendResult(id *json.RawMessage, result interface{}) error { 470 | cs.Lock() 471 | defer cs.Unlock() 472 | message := JSONRpcResp{Id: id, Version: "2.0", Error: nil, Result: result} 473 | return cs.enc.Encode(&message) 474 | } 475 | 476 | func (cs *Session) pushMessage(method string, params interface{}) error { 477 | cs.Lock() 478 | defer cs.Unlock() 479 | message := JSONPushMessage{Version: "2.0", Method: method, Params: params} 480 | return cs.enc.Encode(&message) 481 | } 482 | 483 | func (cs *Session) sendError(id *json.RawMessage, reply *ErrorReply, drop bool) error { 484 | cs.Lock() 485 | defer cs.Unlock() 486 | message := JSONRpcResp{Id: id, Version: "2.0", Error: reply} 487 | err := cs.enc.Encode(&message) 488 | if err != nil { 489 | return err 490 | } 491 | if drop { 492 | StratumErrorLogger.Printf("[Stratum] Server disconnect request") 493 | return fmt.Errorf("[Stratum] Server disconnect request") 494 | } 495 | return nil 496 | } 497 | 498 | func (s *StratumServer) setDeadline(conn *net.TCPConn) { 499 | conn.SetDeadline(time.Now().Add(s.timeout)) 500 | } 501 | 502 | func (s *StratumServer) registerSession(cs *Session) { 503 | s.sessionsMu.Lock() 504 | defer s.sessionsMu.Unlock() 505 | s.sessions[cs] = struct{}{} 506 | } 507 | 508 | func (s *StratumServer) removeSession(cs *Session) { 509 | s.sessionsMu.Lock() 510 | defer s.sessionsMu.Unlock() 511 | delete(s.sessions, cs) 512 | } 513 | 514 | func (s *StratumServer) registerMiner(miner *Miner) { 515 | s.miners.Set(miner.Id, miner) 516 | } 517 | 518 | func (s *StratumServer) currentBlockTemplate() *BlockTemplate { 519 | if t := s.blockTemplate.Load(); t != nil { 520 | return t.(*BlockTemplate) 521 | } 522 | return nil 523 | } 524 | 525 | func (s *StratumServer) currentWork() *BlockTemplate { 526 | work := s.blockTemplate.Load() 527 | if work != nil { 528 | return work.(*BlockTemplate) 529 | } else { 530 | return nil 531 | } 532 | } 533 | 534 | // Poll upstreams for health status 535 | func (s *StratumServer) checkUpstreams() { 536 | candidate := int32(0) 537 | backup := false 538 | 539 | for i, v := range s.upstreams { 540 | ok, err := v.Check(10, s.config.Address) 541 | if err != nil { 542 | log.Printf("[Stratum] Upstream %v didn't pass check: %v", v.Name, err) 543 | StratumErrorLogger.Printf("[Stratum] Upstream %v didn't pass check: %v", v.Name, err) 544 | } 545 | if ok && !backup { 546 | candidate = int32(i) 547 | backup = true 548 | } 549 | } 550 | 551 | if s.upstream != candidate { 552 | log.Printf("[Stratum] Switching to %v upstream", s.upstreams[candidate].Name) 553 | StratumInfoLogger.Printf("[Stratum] Switching to %v upstream", s.upstreams[candidate].Name) 554 | atomic.StoreInt32(&s.upstream, candidate) 555 | } 556 | } 557 | 558 | // Loads the current active upstream that is used for getting blocks etc. 559 | func (s *StratumServer) rpc() *rpc.RPCClient { 560 | i := atomic.LoadInt32(&s.upstream) 561 | return s.upstreams[i] 562 | } 563 | 564 | // Mark the stratum server sick if it fails to connect to redis 565 | func (s *StratumServer) markSick() { 566 | atomic.AddInt64(&s.failsCount, 1) 567 | } 568 | 569 | // Checks if the stratum server is sick based on failsCount and if healthcheck is true, to see if >= maxfails from config.json 570 | func (s *StratumServer) isSick() bool { 571 | x := atomic.LoadInt64(&s.failsCount) 572 | if s.config.Stratum.HealthCheck && x >= s.config.Stratum.MaxFails { 573 | return true 574 | } 575 | return false 576 | } 577 | 578 | // Upon success to redis, set failsCount to 0 and mark OK again 579 | func (s *StratumServer) markOk() { 580 | atomic.StoreInt64(&s.failsCount, 0) 581 | } 582 | 583 | // SetupCloseHandler creates a 'listener' on a new goroutine which will notify the 584 | // program if it receives an interrupt from the OS. We then handle this by calling 585 | // our clean up procedure and exiting the program. 586 | // Reference: https://golangcode.com/handle-ctrl-c-exit-in-terminal/ 587 | func (s *StratumServer) SetupCloseHandler() { 588 | c := make(chan os.Signal) 589 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 590 | go func() { 591 | <-c 592 | log.Printf("\r- Ctrl+C pressed in Terminal") 593 | StratumInfoLogger.Printf("\r- Ctrl+C pressed in Terminal") 594 | log.Printf("Closing - syncing miner stats...") 595 | StratumInfoLogger.Printf("Closing - syncing miner stats...") 596 | 597 | writeWait, _ := time.ParseDuration("10ms") 598 | for Graviton_backend.Writing == 1 { 599 | //log.Printf("[Stratum-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 600 | //StorageInfoLogger.Printf("[Stratum-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 601 | time.Sleep(writeWait) 602 | } 603 | Graviton_backend.Writing = 1 604 | err := Graviton_backend.WriteMinerStats(s.miners, s.hashrateExpiration) 605 | err2 := Graviton_backend.UpdatePoolRoundStats(s.miners, false) 606 | Graviton_backend.Writing = 0 607 | if err != nil { 608 | log.Printf("[Stratum] Err storing miner stats: %v", err) 609 | StratumErrorLogger.Printf("[Stratum] Err storing miner stats: %v", err) 610 | } 611 | if err2 != nil { 612 | log.Printf("[Stratum] Err storing miner round stats: %v", err2) 613 | StratumErrorLogger.Printf("[Stratum] Err storing miner round stats: %v", err2) 614 | } 615 | // Add 1 second sleep prior to closing to prevent writeminerstats issues 616 | time.Sleep(time.Second) 617 | Graviton_backend.DB.Close() 618 | os.Exit(0) 619 | }() 620 | } 621 | 622 | func logFileOutStratum(lType string) *log.Logger { 623 | var logFileName string 624 | if lType == "ERROR" { 625 | logFileName = "logs/stratumError.log" 626 | } else { 627 | logFileName = "logs/stratum.log" 628 | } 629 | os.Mkdir("logs", 0705) 630 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 631 | if err != nil { 632 | panic(err) 633 | } 634 | 635 | logType := lType + ": " 636 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 637 | return l 638 | } 639 | -------------------------------------------------------------------------------- /stratum/unlocker.go: -------------------------------------------------------------------------------- 1 | // Many unlocker integration functions and ideas from: https://github.com/JKKGBE/open-zcash-pool which is a fork of https://github.com/sammy007/open-ethereum-pool 2 | package stratum 3 | 4 | import ( 5 | "fmt" 6 | "log" 7 | "math/big" 8 | "os" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | "github.com/Nelbert442/dero-golang-pool/pool" 14 | "github.com/Nelbert442/dero-golang-pool/rpc" 15 | "github.com/Nelbert442/dero-golang-pool/util" 16 | "github.com/deroproject/derosuite/config" 17 | ) 18 | 19 | type BlockUnlocker struct { 20 | config *pool.UnlockerConfig 21 | rpc *rpc.RPCClient 22 | halt bool 23 | lastFail error 24 | } 25 | 26 | type UnlockResultGrav struct { 27 | maturedBlocks []*BlockDataGrav 28 | orphanedBlocks []*BlockDataGrav 29 | orphans int 30 | blocks int 31 | } 32 | 33 | var UnlockerInfoLogger = logFileOutUnlocker("INFO") 34 | var UnlockerErrorLogger = logFileOutUnlocker("ERROR") 35 | 36 | // Get constant blocks required to mature from derosuite 37 | const MINER_TX_AMOUNT_UNLOCK = config.MINER_TX_AMOUNT_UNLOCK 38 | 39 | func NewBlockUnlocker(cfg *pool.UnlockerConfig, s *StratumServer) *BlockUnlocker { 40 | u := &BlockUnlocker{config: cfg} 41 | // Set blockunlocker rpc to stratumserver rpc (defined by current default upstream) 42 | u.rpc = s.rpc() 43 | return u 44 | } 45 | 46 | func (u *BlockUnlocker) StartBlockUnlocker(s *StratumServer) { 47 | log.Printf("[Unlocker] Starting block unlocker") 48 | UnlockerInfoLogger.Printf("[Unlocker] Starting block unlocker") 49 | interval, _ := time.ParseDuration(u.config.Interval) 50 | timer := time.NewTimer(interval) 51 | log.Printf("[Unlocker] Set block unlock interval to %v", interval) 52 | UnlockerInfoLogger.Printf("[Unlocker] Set block unlock interval to %v", interval) 53 | 54 | // Immediately unlock after start 55 | u.unlockPendingBlocks(s) 56 | u.unlockAndCreditMiners(s) 57 | timer.Reset(interval) 58 | 59 | go func() { 60 | for { 61 | select { 62 | case <-timer.C: 63 | u.unlockPendingBlocks(s) 64 | u.unlockAndCreditMiners(s) 65 | timer.Reset(interval) 66 | } 67 | } 68 | }() 69 | } 70 | 71 | func (u *BlockUnlocker) unlockPendingBlocks(s *StratumServer) { 72 | // Graviton DB implementation - choose to sort candidate here for faster return within storage.go, could later have "candidate" as an input and sort within GetBlocksFound() func 73 | blocksFound := Graviton_backend.GetBlocksFound("candidate") 74 | 75 | //if len(candidates) == 0 || len(candidateBlocks) == 0 { 76 | if blocksFound == nil { 77 | log.Printf("[Unlocker] No block candidates to unlock") 78 | return 79 | } 80 | 81 | var candidateBlocks []*BlockDataGrav 82 | for _, value := range blocksFound.MinedBlocks { 83 | // This is a double check, may not be necessary but safeguarding to ensure candidate block 84 | if value.BlockState == "candidate" { 85 | candidateBlocks = append(candidateBlocks, value) 86 | } 87 | } 88 | 89 | if len(candidateBlocks) == 0 { 90 | log.Printf("[Unlocker] No block candidates to unlock") 91 | return 92 | } 93 | 94 | // Graviton DB implementation 95 | resultGrav, err := u.unlockCandidatesGrav(candidateBlocks, "candidates") 96 | if err != nil { 97 | log.Printf("[Unlocker] Failed to unlock blocks grav: %v", err) 98 | UnlockerErrorLogger.Printf("[Unlocker] Failed to unlock blocks grav: %v", err) 99 | return 100 | } 101 | 102 | log.Printf("[Unlocker] Immature %v blocks, %v orphans", resultGrav.blocks, resultGrav.orphans) 103 | UnlockerInfoLogger.Printf("[Unlocker] Immature %v blocks, %v orphans", resultGrav.blocks, resultGrav.orphans) 104 | 105 | if len(resultGrav.orphanedBlocks) > 0 { 106 | writeWait, _ := time.ParseDuration("10ms") 107 | for Graviton_backend.Writing == 1 { 108 | //log.Printf("[Unlocker-writeorphanedblocks] GravitonDB is writing... sleeping for %v...", writeWait) 109 | //StorageInfoLogger.Printf("[Unlocker-writeorphanedblocks] GravitonDB is writing... sleeping for %v...", writeWait) 110 | time.Sleep(writeWait) 111 | } 112 | Graviton_backend.Writing = 1 113 | err = Graviton_backend.WriteOrphanedBlocks(resultGrav.orphanedBlocks) 114 | Graviton_backend.Writing = 0 115 | if err != nil { 116 | log.Printf("[Unlocker] Failed to insert orphaned blocks into backend: %v", err) 117 | UnlockerErrorLogger.Printf("[Unlocker] Failed to insert orphaned blocks into backend: %v", err) 118 | return 119 | } else { 120 | log.Printf("[Unlocker] Inserted %v orphaned blocks to backend", resultGrav.orphans) 121 | UnlockerInfoLogger.Printf("[Unlocker] Inserted %v orphaned blocks to backend", resultGrav.orphans) 122 | } 123 | } 124 | 125 | // Graviton DB 126 | for _, block := range resultGrav.maturedBlocks { 127 | writeWait, _ := time.ParseDuration("10ms") 128 | for Graviton_backend.Writing == 1 { 129 | //log.Printf("[Unlocker-writeimmatureblock] GravitonDB is writing... sleeping for %v...", writeWait) 130 | //StorageInfoLogger.Printf("[Unlocker-writeimmatureblock] GravitonDB is writing... sleeping for %v...", writeWait) 131 | time.Sleep(writeWait) 132 | } 133 | Graviton_backend.Writing = 1 134 | err = Graviton_backend.WriteImmatureBlock(block) 135 | Graviton_backend.Writing = 0 136 | if err != nil { 137 | log.Printf("[Unlocker] Failed to credit rewards for round %v: %v", block.RoundKey(), err) 138 | UnlockerErrorLogger.Printf("[Unlocker] Failed to credit rewards for round %v: %v", block.RoundKey(), err) 139 | return 140 | } 141 | 142 | log.Printf("[Unlocker] IMMATURE %v", block.RoundKey()) 143 | UnlockerInfoLogger.Printf("[Unlocker] IMMATURE %v", block.RoundKey()) 144 | } 145 | } 146 | 147 | func (u *BlockUnlocker) unlockAndCreditMiners(s *StratumServer) { 148 | miningInfo, err := u.rpc.GetInfo() 149 | if err != nil { 150 | log.Printf("[Unlocker] Unable to get current blockchain height from node: %v", err) 151 | UnlockerErrorLogger.Printf("[Unlocker] Unable to get current blockchain height from node: %v", err) 152 | return 153 | } 154 | currentHeight := miningInfo.Height 155 | 156 | // Graviton DB 157 | immatureBlocksFound := Graviton_backend.GetBlocksFound("immature") 158 | 159 | if immatureBlocksFound == nil { 160 | log.Printf("[Unlocker] No immature blocks to credit miners") 161 | return 162 | } 163 | 164 | immatureBlocks := immatureBlocksFound.MinedBlocks 165 | var immature []*BlockDataGrav 166 | 167 | // Set immature to the blocks that are lower or equal to depth counter 168 | for _, value := range immatureBlocks { 169 | if value.Height <= currentHeight-u.config.Depth { 170 | immature = append(immature, value) 171 | } 172 | } 173 | 174 | if len(immature) == 0 { 175 | log.Printf("[Unlocker] No immature blocks to credit miners") 176 | return 177 | } 178 | 179 | result, err := u.unlockCandidatesGrav(immature, "immature") 180 | if err != nil { 181 | log.Printf("[Unlocker] Failed to unlock blocks: %v", err) 182 | UnlockerErrorLogger.Printf("[Unlocker] Failed to unlock blocks: %v", err) 183 | return 184 | } 185 | log.Printf("[Unlocker] Unlocked %v blocks, %v orphans", result.blocks, result.orphans) 186 | UnlockerInfoLogger.Printf("[Unlocker] Unlocked %v blocks, %v orphans", result.blocks, result.orphans) 187 | 188 | if len(result.orphanedBlocks) > 0 { 189 | writeWait, _ := time.ParseDuration("10ms") 190 | for Graviton_backend.Writing == 1 { 191 | //log.Printf("[Unlocker-writeorphanedblocks] GravitonDB is writing... sleeping for %v...", writeWait) 192 | //StorageInfoLogger.Printf("[Unlocker-writeorphanedblocks] GravitonDB is writing... sleeping for %v...", writeWait) 193 | time.Sleep(writeWait) 194 | } 195 | Graviton_backend.Writing = 1 196 | err = Graviton_backend.WriteOrphanedBlocks(result.orphanedBlocks) 197 | Graviton_backend.Writing = 0 198 | if err != nil { 199 | log.Printf("[Unlocker] Failed to insert orphaned blocks into backend: %v", err) 200 | UnlockerErrorLogger.Printf("[Unlocker] Failed to insert orphaned blocks into backend: %v", err) 201 | return 202 | } else { 203 | log.Printf("[Unlocker] Inserted %v orphaned blocks to backend", result.orphans) 204 | UnlockerInfoLogger.Printf("[Unlocker] Inserted %v orphaned blocks to backend", result.orphans) 205 | } 206 | } 207 | 208 | totalRevenue := new(big.Rat) 209 | totalMinersProfit := new(big.Rat) 210 | totalPoolProfit := new(big.Rat) 211 | 212 | for _, block := range result.maturedBlocks { 213 | revenue, minersProfit, poolProfit, roundRewards, err := u.calculateRewardsGrav(s, block) 214 | if err != nil { 215 | log.Printf("[Unlocker] Failed to calculate rewards for round %v: %v", block.RoundKey(), err) 216 | UnlockerErrorLogger.Printf("[Unlocker] Failed to calculate rewards for round %v: %v", block.RoundKey(), err) 217 | return 218 | } 219 | 220 | writeWait, _ := time.ParseDuration("10ms") 221 | for Graviton_backend.Writing == 1 { 222 | //log.Printf("[Unlocker-writematuredblocks] GravitonDB is writing... sleeping for %v...", writeWait) 223 | //StorageInfoLogger.Printf("[Unlocker-writematuredblocks] GravitonDB is writing... sleeping for %v...", writeWait) 224 | time.Sleep(writeWait) 225 | } 226 | Graviton_backend.Writing = 1 227 | err = Graviton_backend.WriteMaturedBlocks(block) 228 | Graviton_backend.Writing = 0 229 | if err != nil { 230 | log.Printf("[Unlocker] Failed to credit rewards for round %v: %v", block.RoundKey(), err) 231 | UnlockerErrorLogger.Printf("[Unlocker] Failed to credit rewards for round %v: %v", block.RoundKey(), err) 232 | return 233 | } 234 | 235 | // Write pending payments to graviton db 236 | total := int64(0) 237 | for login, amount := range roundRewards { 238 | total += amount 239 | 240 | info := &PaymentPending{} 241 | info.Address = login 242 | info.Amount = uint64(amount) 243 | info.Timestamp = util.MakeTimestamp() / 1000 244 | 245 | writeWait, _ := time.ParseDuration("10ms") 246 | for Graviton_backend.Writing == 1 { 247 | //log.Printf("[Unlocker-writependingpayments] GravitonDB is writing... sleeping for %v...", writeWait) 248 | //StorageInfoLogger.Printf("[Unlocker-writependingpayments] GravitonDB is writing... sleeping for %v...", writeWait) 249 | time.Sleep(writeWait) 250 | } 251 | Graviton_backend.Writing = 1 252 | infoErr := Graviton_backend.WritePendingPayments(info) 253 | Graviton_backend.Writing = 0 254 | if infoErr != nil { 255 | log.Printf("[Unlocker] Graviton DB err: %v", infoErr) 256 | UnlockerErrorLogger.Printf("[Unlocker] Graviton DB err: %v", infoErr) 257 | } 258 | } 259 | // To be used later, total taken from db func, will be used for "pool" balance/payment stats 260 | _ = total 261 | 262 | totalRevenue.Add(totalRevenue, revenue) 263 | totalMinersProfit.Add(totalMinersProfit, minersProfit) 264 | totalPoolProfit.Add(totalPoolProfit, poolProfit) 265 | 266 | logEntry := fmt.Sprintf( 267 | "[Unlocker] MATURED %v: revenue %v, minersProfit %v, poolProfit %v", 268 | block.RoundKey(), 269 | revenue.FloatString(8), 270 | minersProfit.FloatString(8), 271 | poolProfit.FloatString(8), 272 | ) 273 | 274 | entries := []string{logEntry} 275 | for login, reward := range roundRewards { 276 | entries = append(entries, fmt.Sprintf("\tREWARD %v: %v: %v", block.RoundKey(), login, reward)) 277 | } 278 | log.Printf(strings.Join(entries, "\n")) 279 | UnlockerInfoLogger.Printf(strings.Join(entries, "\n")) 280 | } 281 | 282 | log.Printf( 283 | "[Unlocker] MATURE SESSION: totalRevenue %v, totalMinersProfit %v, totalPoolProfit %v", 284 | totalRevenue.FloatString(8), 285 | totalMinersProfit.FloatString(8), 286 | totalPoolProfit.FloatString(8), 287 | ) 288 | UnlockerInfoLogger.Printf("[Unlocker] MATURE SESSION: totalRevenue %v, totalMinersProfit %v, totalPoolProfit %v", totalRevenue.FloatString(8), totalMinersProfit.FloatString(8), totalPoolProfit.FloatString(8)) 289 | } 290 | 291 | func (u *BlockUnlocker) unlockCandidatesGrav(candidates []*BlockDataGrav, blockType string) (*UnlockResultGrav, error) { 292 | result := &UnlockResultGrav{} 293 | 294 | for _, candidate := range candidates { 295 | orphan := true 296 | 297 | hash := candidate.Hash 298 | 299 | block, err := u.rpc.GetBlockByHash(hash) 300 | if err != nil { 301 | log.Printf("[Unlocker] Error while retrieving block %s from node: %v", hash, err) 302 | UnlockerErrorLogger.Printf("[Unlocker] Error while retrieving block %s from node: %v", hash, err) 303 | return nil, err 304 | } 305 | if block == nil { 306 | UnlockerErrorLogger.Printf("[Unlocker] Error while retrieving block %s from node, wrong node hash", hash) 307 | return nil, fmt.Errorf("[Unlocker] Error while retrieving block %s from node, wrong node hash", hash) 308 | } 309 | 310 | if matchCandidateGrav(block, candidate) { 311 | orphan = false 312 | result.blocks++ 313 | 314 | err = u.handleBlockGrav(block, candidate, blockType) 315 | if err != nil { 316 | return nil, err 317 | } 318 | result.maturedBlocks = append(result.maturedBlocks, candidate) 319 | log.Printf("[Unlocker] Mature block %v with %v tx, hash: %v", candidate.Height, block.BlockHeader.Txcount, candidate.Hash) 320 | UnlockerInfoLogger.Printf("[Unlocker] Mature block %v with %v tx, hash: %v", candidate.Height, block.BlockHeader.Txcount, candidate.Hash) 321 | break 322 | } 323 | 324 | // Found block 325 | if !orphan { 326 | break 327 | } 328 | 329 | // Block is lost, we didn't find any valid block in a blockchain 330 | if orphan { 331 | result.orphans++ 332 | candidate.Orphan = true 333 | result.orphanedBlocks = append(result.orphanedBlocks, candidate) 334 | log.Printf("[Unlocker] Orphaned block %v:%v", candidate.RoundHeight, candidate.Nonce) 335 | UnlockerInfoLogger.Printf("[Unlocker] Orphaned block %v:%v", candidate.RoundHeight, candidate.Nonce) 336 | } 337 | } 338 | return result, nil 339 | } 340 | 341 | func matchCandidateGrav(block *rpc.GetBlockHashReply, candidate *BlockDataGrav) bool { 342 | return len(candidate.Hash) > 0 && strings.EqualFold(candidate.Hash, block.BlockHeader.Hash) 343 | } 344 | 345 | func (u *BlockUnlocker) handleBlockGrav(block *rpc.GetBlockHashReply, candidate *BlockDataGrav, blockType string) error { 346 | reward := block.BlockHeader.Reward 347 | 348 | candidate.Height = block.BlockHeader.Height 349 | candidate.Orphan = false 350 | candidate.Hash = block.BlockHeader.Hash 351 | candidate.Reward = reward 352 | return nil 353 | } 354 | 355 | func (u *BlockUnlocker) calculateRewardsGrav(s *StratumServer, block *BlockDataGrav) (*big.Rat, *big.Rat, *big.Rat, map[string]int64, error) { 356 | // Write miner stats - force a write to ensure latest stats are in db 357 | log.Printf("[Unlocker] Storing miner stats") 358 | UnlockerInfoLogger.Printf("[Unlocker] Storing miner stats") 359 | 360 | writeWait, _ := time.ParseDuration("10ms") 361 | for Graviton_backend.Writing == 1 { 362 | //log.Printf("[Unlocker-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 363 | //StorageInfoLogger.Printf("[Unlocker-writeminerstats] GravitonDB is writing... sleeping for %v...", writeWait) 364 | time.Sleep(writeWait) 365 | } 366 | Graviton_backend.Writing = 1 367 | err := Graviton_backend.WriteMinerStats(s.miners, s.hashrateExpiration) 368 | err2 := Graviton_backend.UpdatePoolRoundStats(s.miners, false) 369 | Graviton_backend.Writing = 0 370 | if err != nil { 371 | log.Printf("[Unlocker] Err storing miner stats: %v", err) 372 | UnlockerErrorLogger.Printf("[Unlocker] Err storing miner stats: %v", err) 373 | } 374 | if err2 != nil { 375 | log.Printf("[Unlocker] Err storing miner round stats: %v", err2) 376 | UnlockerErrorLogger.Printf("[Unlocker] Err storing miner round stats: %v", err2) 377 | } 378 | revenue := new(big.Rat).SetUint64(block.Reward) 379 | minersProfit, poolProfit := chargeFee(revenue, u.config.PoolFee) 380 | 381 | var shares map[string]int64 382 | var totalroundshares int64 383 | 384 | if block.Solo { 385 | rewards := make(map[string]int64) 386 | rewards[block.Address] += int64(block.Reward) 387 | return revenue, minersProfit, poolProfit, rewards, nil 388 | } else { 389 | shares, totalroundshares, err = Graviton_backend.GetRoundShares(block.RoundHeight) 390 | log.Printf("[Unlocker-calculateRewardsGrav] [round shares] shares: %v, totalroundshares: %v", shares, totalroundshares) 391 | UnlockerInfoLogger.Printf("[Unlocker-calculateRewardsGrav] [round shares] shares: %v, totalroundshares: %v", shares, totalroundshares) 392 | if err != nil { 393 | return nil, nil, nil, nil, err 394 | } 395 | } 396 | 397 | rewards := calculateRewardsForSharesGrav(s, shares, totalroundshares, minersProfit) 398 | 399 | if len(rewards) == 0 { 400 | rewards[block.Address] += int64(block.Reward) 401 | log.Printf("[Unlocker] No shares stored for this round, rewarding block amount (%v) to miner (%v) who found block.", block.Reward, block.Address) 402 | UnlockerInfoLogger.Printf("[Unlocker] No shares stored for this round, rewarding block amount (%v) to miner (%v) who found block.", block.Reward, block.Address) 403 | } 404 | 405 | if block.ExtraReward != nil { 406 | extraReward := new(big.Rat).SetInt(block.ExtraReward) 407 | poolProfit.Add(poolProfit, extraReward) 408 | revenue.Add(revenue, extraReward) 409 | } 410 | 411 | return revenue, minersProfit, poolProfit, rewards, nil 412 | } 413 | 414 | func calculateRewardsForSharesGrav(s *StratumServer, shares map[string]int64, total int64, reward *big.Rat) map[string]int64 { 415 | rewards := make(map[string]int64) 416 | 417 | for login, n := range shares { 418 | if n != 0 { 419 | // Split away for workers, paymentIDs etc. just to compound the shares associated with a given address 420 | address, _, paymentID, _, _, _ := s.splitLoginString(login) 421 | 422 | percent := big.NewRat(n, total) 423 | workerReward := new(big.Rat).Mul(reward, percent) 424 | workerRewardInt, _ := strconv.ParseInt(workerReward.FloatString(0), 10, 64) 425 | if paymentID != "" { 426 | combinedAddr := address + s.config.Stratum.PaymentID.AddressSeparator + paymentID 427 | rewards[combinedAddr] += workerRewardInt 428 | } else { 429 | rewards[address] += workerRewardInt 430 | } 431 | } 432 | } 433 | return rewards 434 | } 435 | 436 | // Returns new value after fee deduction and fee value. 437 | func chargeFee(value *big.Rat, fee float64) (*big.Rat, *big.Rat) { 438 | feePercent := new(big.Rat).SetFloat64(fee / 100) 439 | feeValue := new(big.Rat).Mul(value, feePercent) 440 | return new(big.Rat).Sub(value, feeValue), feeValue 441 | } 442 | 443 | func logFileOutUnlocker(lType string) *log.Logger { 444 | var logFileName string 445 | if lType == "ERROR" { 446 | logFileName = "logs/unlockerError.log" 447 | } else { 448 | logFileName = "logs/unlocker.log" 449 | } 450 | os.Mkdir("logs", 0705) 451 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 452 | if err != nil { 453 | panic(err) 454 | } 455 | 456 | logType := lType + ": " 457 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 458 | return l 459 | } 460 | -------------------------------------------------------------------------------- /util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "encoding/hex" 5 | "log" 6 | "math/big" 7 | "os" 8 | "strings" 9 | "time" 10 | "unicode/utf8" 11 | 12 | "github.com/deroproject/derosuite/address" 13 | "github.com/deroproject/derosuite/astrobwt" 14 | "github.com/deroproject/derosuite/blockchain" 15 | "github.com/deroproject/derosuite/crypto" 16 | "github.com/deroproject/derosuite/cryptonight" 17 | ) 18 | 19 | var Diff1 = StringToBig("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") 20 | 21 | var UtilInfoLogger = logFileOutUtil("INFO") 22 | var UtilErrorLogger = logFileOutUtil("ERROR") 23 | 24 | func StringToBig(h string) *big.Int { 25 | n := new(big.Int) 26 | n.SetString(h, 0) 27 | return n 28 | } 29 | 30 | func MakeTimestamp() int64 { 31 | return time.Now().UnixNano() / int64(time.Millisecond) 32 | } 33 | 34 | func GetTargetHex(diff int64) string { 35 | padded := make([]byte, 32) 36 | 37 | diffBuff := new(big.Int).Div(Diff1, big.NewInt(diff)).Bytes() 38 | copy(padded[32-len(diffBuff):], diffBuff) 39 | buff := padded[0:4] 40 | targetHex := hex.EncodeToString(reverse(buff)) 41 | return targetHex 42 | } 43 | 44 | func GetHashDifficulty(hashBytes []byte) (*big.Int, bool) { 45 | diff := new(big.Int) 46 | diff.SetBytes(reverse(hashBytes)) 47 | 48 | // Check for broken result, empty string or zero hex value 49 | if diff.Cmp(new(big.Int)) == 0 { 50 | return nil, false 51 | } 52 | return diff.Div(Diff1, diff), true 53 | } 54 | 55 | func ValidateAddressNonDERO(addy string, poolAddy string) bool { 56 | prefix, _ := utf8.DecodeRuneInString(addy) 57 | poolPrefix, _ := utf8.DecodeRuneInString(poolAddy) 58 | if prefix != poolPrefix { 59 | log.Printf("[Util] Address prefix (%v) and pool address prefix (%v) do not match. Invalid address.", prefix, poolPrefix) 60 | UtilErrorLogger.Printf("[Util] Address prefix (%v) and pool address prefix (%v) do not match. Invalid address.", prefix, poolPrefix) 61 | return false 62 | } 63 | addyRune := []rune(addy) 64 | poolAddyRune := []rune(poolAddy) 65 | // Validating only first 2 since usually they match in other coins. Could // TODO in future to properly handle or just skip this portion alltogether 66 | poolAddyNetwork := string(poolAddyRune[0:2]) 67 | 68 | if string(addyRune[0:2]) != poolAddyNetwork { 69 | log.Printf("[Util] Invalid address, pool address and supplied address don't match.") 70 | UtilErrorLogger.Printf("[Util] Invalid address, pool address and supplied address don't match.") 71 | return false 72 | } 73 | 74 | return true 75 | } 76 | 77 | func ValidateAddress(addy string, poolAddy string) bool { 78 | prefix, _ := utf8.DecodeRuneInString(addy) 79 | poolPrefix, _ := utf8.DecodeRuneInString(poolAddy) 80 | if prefix != poolPrefix { 81 | log.Printf("[Util] Address prefix (%v) and pool address prefix (%v) do not match. Invalid address.", prefix, poolPrefix) 82 | UtilErrorLogger.Printf("[Util] Address prefix (%v) and pool address prefix (%v) do not match. Invalid address.", prefix, poolPrefix) 83 | return false 84 | } 85 | addyRune := []rune(addy) 86 | poolAddyRune := []rune(poolAddy) 87 | // Validating only first 3 (dET or dER) since possibly integrated addrs could be dETi or dERi and pool addr could be either dETi, dERi, dETo, dERo [i for integrated] 88 | poolAddyNetwork := string(poolAddyRune[0:3]) 89 | 90 | if string(addyRune[0:3]) != poolAddyNetwork { 91 | log.Printf("[Util] Invalid address, pool address and supplied address don't match testnet(dETo)/mainnet(dERo).") 92 | UtilErrorLogger.Printf("[Util] Invalid address, pool address and supplied address don't match testnet(dETo)/mainnet(dERo).") 93 | return false 94 | } 95 | 96 | // Call NewAddress to confirm address validation from "github.com/deroproject/derosuite/address" 97 | _, err := address.NewAddress(strings.TrimSpace(addy)) 98 | if err != nil { 99 | log.Printf("[Util] Address validation failed for '%s': %s", addy, err) 100 | UtilErrorLogger.Printf("[Util] Address validation failed for '%s': %s", addy, err) 101 | return false 102 | } 103 | 104 | return true 105 | } 106 | 107 | func reverse(src []byte) []byte { 108 | dst := make([]byte, len(src)) 109 | for i := len(src); i > 0; i-- { 110 | dst[len(src)-i] = src[i-1] 111 | } 112 | return dst 113 | } 114 | 115 | func CryptonightHash(shareBuff []byte, diff big.Int) bool { 116 | var powhash crypto.Hash 117 | 118 | hash := cryptonight.SlowHash(shareBuff) 119 | copy(powhash[:], hash[:]) 120 | checkPowHashBig := blockchain.CheckPowHashBig(powhash, &diff) 121 | 122 | return checkPowHashBig 123 | } 124 | 125 | func AstroBWTHash(shareBuff []byte, diff, setDiff big.Int) (bool, bool) { 126 | var powhash crypto.Hash 127 | var data astrobwt.Data 128 | 129 | //hash, success := astrobwt.POW_optimized_v2(shareBuff, max_pow_size, &data) 130 | hash, _ := astrobwt.POW_optimized_v2(shareBuff[:], astrobwt.MAX_LENGTH, &data) 131 | /* 132 | if !success || hash[len(hash)-1] != 0 { 133 | return false, false 134 | } 135 | */ 136 | 137 | copy(powhash[:], hash[:]) 138 | 139 | success := blockchain.CheckPowHashBig(powhash, &setDiff) 140 | checkPowHashBig := blockchain.CheckPowHashBig(powhash, &diff) 141 | 142 | return checkPowHashBig, success 143 | } 144 | 145 | func logFileOutUtil(lType string) *log.Logger { 146 | var logFileName string 147 | if lType == "ERROR" { 148 | logFileName = "logs/utilError.log" 149 | } else { 150 | logFileName = "logs/util.log" 151 | } 152 | os.Mkdir("logs", 0705) 153 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 154 | if err != nil { 155 | panic(err) 156 | } 157 | 158 | logType := lType + ": " 159 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 160 | return l 161 | } 162 | -------------------------------------------------------------------------------- /website/site/config.js: -------------------------------------------------------------------------------- 1 | var api = "http://127.0.0.1:8082/api"; 2 | var discord = "https://discord.gg/H95TJDp"; -------------------------------------------------------------------------------- /website/site/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nelbert442/dero-golang-pool/16720d6dff23141be654d41cfd6a9172f48753ab/website/site/favicon.ico -------------------------------------------------------------------------------- /website/site/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Pool.Dero.Network 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 205 | 206 |
207 | 208 | 235 | 236 | 237 |
238 |
Network: N/A
239 |
Difficulty: N/A
240 |
Height: N/A
241 |
Pool: N/A
242 |
Solo: N/A
243 |
Stats Updated  
244 |
245 |
246 | 247 | 248 |
249 |
250 |

251 |
252 |
253 | 254 | 255 | 262 | 263 | 264 | 265 | 266 | -------------------------------------------------------------------------------- /website/site/pages/events.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 |
5 |
6 |
7 | 8 |
9 |
10 |
Reward Event
11 |
Total Payments: 0
12 |
Reward Amount: 0 DERO
13 |
14 |
15 |
16 | 17 |
18 |
19 |
20 | 21 |
22 |
23 |
Event Dates
24 |
Start: N/A
25 |
End: N/A
26 |
27 |
28 |
29 | 30 |
31 |
32 |
33 | 34 |
35 |
36 |
Reward Criteria
37 |
Miner Uptime/Day: NaN%
38 |
39 |
40 |
41 |
42 | 43 |
44 | 45 |
46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 |
Time Sent Amount Winner
58 |
59 | 60 | 187 | -------------------------------------------------------------------------------- /website/site/pages/getting_started.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 |
5 |

Connection Details

6 |
7 |
8 |
Mining Pool Address:
9 |
Algorithm:
10 |
11 |
12 | 13 |

Username

14 |
15 |
16 |
This is your wallet address
17 |
Solo Mining: solo~walletaddress
18 |
Exchange Payment ID: walletaddress+paymentID
19 |
Difficulty Locking: walletaddress.diff
20 |
Worker Defining: walletaddress@worker
21 |
Hash Donation: walletaddress%1
22 |
23 |
24 |
25 | 26 | 27 |
28 |

Mining Ports

29 |
30 |
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 |
PortStarting DifficultyDescription
47 |
48 |
49 |
50 | 51 |
52 | 53 |

Donations

54 |
55 |
56 |
Donation Address:
57 |
Donation Description:
58 |
59 |
60 | 61 | 62 |

Mining Applications

63 | 64 | 65 |
66 |

Generate your custom configuration to mine on our pool

67 |
68 |
69 |
70 | 71 | 72 |
73 |
74 |
75 |
76 | Solo Mining 77 | 78 | 79 |
80 |
81 |
82 |
83 | 84 | 85 |
86 |
87 |
88 |
89 | 90 | 91 |
92 |
93 |
94 |
95 | 96 | 97 |
98 |
99 |
100 |
101 | 102 | 103 |
104 |
105 |
106 |
107 | 108 | 109 |
110 |
111 |
112 | 113 |
114 |
115 |
116 | 117 | 118 |
119 |
120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 178 | 179 | 180 |
App Name Architecture Description Download Configuration
DERO MinerCPUOfficial DERO CPU Mining AppComing SoonSee more
142 |
143 | 144 |
dero-miner-windows-amd64.exe --wallet-address=YOUR_WALLET_ADDRESS --pool-address=POOL_HOST:PORT
145 | 146 |
147 |
XMRigCPU & GPU (NVIDIA)Easy to use CPU + GPU Mining AppDownloadSee more
160 |
161 | 162 |
"pools": [
163 | 				{
164 | 				"algo": ""
165 | 				"coin": ""
166 | 				"url": "POOL_HOST:PORT",
167 | 				"user": "YOUR_WALLET_ADDRESS",
168 | 				"rig_id": null,
169 | 				"pass": "",
170 | 				"nicehash": false,
171 | 				"tls": false, /* Set to true if you are using an SSL port */
172 | 				"tls-fingerprint": null,
173 | 				},
174 | 				],
175 | 176 |
177 |
181 |
182 |
183 | 184 | 185 | 285 | -------------------------------------------------------------------------------- /website/site/pages/home.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |

Events!

4 |
5 |

Thank you for your support for this mining pool, we are implementing 'May Mining Madness'!

6 | 7 |

What do you have to do to participate?

8 | 13 |
14 |
15 |
16 |
17 |
18 | 19 |
20 |
21 |
Connected Miners (POOL/SOLO)
22 |
Miners: 0/0
23 |
Workers: 0/0
24 |
25 |
26 |
27 |
28 |
29 |
30 | 31 |
32 |
33 |
Payments
34 |
Minimum Payout: 0
35 |
Pool Fee: NaN%
36 |
37 |
38 |
39 |
40 |
41 |
42 | 43 |
44 |
45 |
Blocks
46 |
Total Blocks Mined: 0
47 | 48 |
49 |
50 |
51 |
52 | 53 | 54 |
55 |
56 |
57 |
58 |
Last Hash
59 |
N/A (0)
60 |
61 |
62 |
63 |
64 | 65 |
66 | 67 |
68 |
69 |

Hashrate

70 |
71 |
72 |
73 |
74 |
75 |

Difficulty

76 |
77 |
78 |
79 |
80 |
81 |

Miners

82 |
83 |
84 |
85 |
86 |
87 |

Workers

88 |
89 |
90 |
91 |
92 | 93 | 111 |
112 | 113 | 268 | -------------------------------------------------------------------------------- /website/site/pages/payments.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 |
5 |
6 |
7 | 8 |
9 |
10 |
Payment Stats
11 |
Total Payments: 0
12 |
Total Miners Paid: 0
13 |
14 |
15 |
16 | 17 |
18 |
19 |
20 | 21 |
22 |
23 |
Payment Settings
24 |
Pool Fee: NaN%
25 |
Minimum Payout: 0
26 |
27 |
28 |
29 | 30 |
31 |
32 |
33 | 34 |
35 |
36 |
Payout Detail
37 |
Payout Scheme: PROP
38 |
39 |
40 |
41 |
42 | 43 |
44 | 45 |
46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 |
Time Sent Transaction Hash Amount Mixin Fee Payees
61 |
62 | 63 |

64 | 65 |

66 | 67 | 183 | -------------------------------------------------------------------------------- /website/site/pages/poolblocks.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
4 |
5 |
6 |
7 | 8 |
9 |
10 |
Block Details
11 |
Total Blocks Mined: 0
12 |
Maturity Depth Requirement: 0
13 |
14 |
15 |
16 |
17 | 18 |
19 | 20 |
21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 |
Time Found Reward Height Difficulty Block HashMiner Effort Status
38 |
39 | 40 |

41 | 42 |

43 | 44 | 267 | -------------------------------------------------------------------------------- /website/site/pages/workerstats.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |

Your Stats & Payment History

4 | 5 |
6 |
7 |
8 | 9 | 13 |
14 | 15 |
16 |
19 |
22 |
23 |
24 | 25 | 26 |

  Workers Statistics

27 |
28 |
29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
Status Worker Id Hash Rate Last Share Submitted Total Round Hashes Submitted Estimated Payment (next block) Total Hashes Donated
45 |
46 |
47 | 48 | 49 |

  Payments History

50 |
51 |
52 |
53 |
54 | 55 |
56 |
57 |
Pending Payment
58 |
Pending: 0
59 |
60 |
61 |
62 |
63 |
64 |
65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 |
Time SentTransaction HashAmountFeeMixin
79 |
80 |
81 | 82 |

83 | 84 |

85 | 86 |
87 | 88 |
89 | 90 | 91 | 322 | -------------------------------------------------------------------------------- /website/website.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | "os" 7 | 8 | "github.com/Nelbert442/dero-golang-pool/pool" 9 | ) 10 | 11 | var WebsiteInfoLogger = logFileOutWebsite("INFO") 12 | var WebsiteErrorLogger = logFileOutWebsite("ERROR") 13 | 14 | func NewWebsite(cfg *pool.Website) { 15 | fileServer := http.FileServer(http.Dir("./website/site")) 16 | http.Handle("/", fileServer) 17 | 18 | // If SSL is enabled, configure for SSL and HTTP. Else just run HTTP 19 | if cfg.SSL { 20 | go func() { 21 | log.Printf("[Website] Starting website at port %v\n", cfg.Port) 22 | WebsiteInfoLogger.Printf("[Website] Starting website at port %v\n", cfg.Port) 23 | 24 | addr := ":" + cfg.Port 25 | err := http.ListenAndServe(addr, nil) 26 | if err != nil { 27 | log.Printf("[Website] Error starting http server at %v", addr) 28 | WebsiteErrorLogger.Printf("[Website] Error starting http server at %v", addr) 29 | WebsiteErrorLogger.Printf("%v", err) 30 | log.Fatal(err) 31 | } 32 | }() 33 | 34 | log.Printf("[Website] Starting SSL website at port %v\n", cfg.SSLPort) 35 | WebsiteInfoLogger.Printf("[Website] Starting SSL website at port %v\n", cfg.SSLPort) 36 | 37 | addr := ":" + cfg.SSLPort 38 | err := http.ListenAndServeTLS(addr, cfg.CertFile, cfg.KeyFile, nil) 39 | if err != nil { 40 | log.Printf("[Website] Error starting https server at %v", addr) 41 | WebsiteErrorLogger.Printf("[Website] Error starting https server at %v", addr) 42 | WebsiteErrorLogger.Printf("%v", err) 43 | log.Fatal(err) 44 | } 45 | } else { 46 | log.Printf("[Website] Starting website at port %v\n", cfg.Port) 47 | WebsiteInfoLogger.Printf("[Website] Starting website at port %v\n", cfg.Port) 48 | 49 | addr := ":" + cfg.Port 50 | err := http.ListenAndServe(addr, nil) 51 | if err != nil { 52 | log.Printf("[Website] Error starting http server at %v", addr) 53 | WebsiteErrorLogger.Printf("[Website] Error starting http server at %v", addr) 54 | WebsiteErrorLogger.Printf("%v", err) 55 | log.Fatal(err) 56 | } 57 | } 58 | } 59 | 60 | func logFileOutWebsite(lType string) *log.Logger { 61 | var logFileName string 62 | if lType == "ERROR" { 63 | logFileName = "logs/websiteError.log" 64 | } else { 65 | logFileName = "logs/website.log" 66 | } 67 | os.Mkdir("logs", 0705) 68 | f, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0705) 69 | if err != nil { 70 | panic(err) 71 | } 72 | 73 | logType := lType + ": " 74 | l := log.New(f, logType, log.LstdFlags|log.Lmicroseconds) 75 | return l 76 | } 77 | --------------------------------------------------------------------------------