├── clib └── readme.md ├── server ├── proxy.ico ├── config.json ├── go.mod ├── base58 │ ├── cov_report.sh │ ├── base58bench_test.go │ ├── doc.go │ ├── README.md │ ├── genalphabet.go │ ├── alphabet.go │ ├── example_test.go │ ├── base58check.go │ ├── base58check_test.go │ ├── base58_test.go │ └── base58.go ├── stratum │ ├── errors.go │ ├── server.go │ ├── codec.go │ └── client.go ├── proxy │ ├── detect.go │ ├── uniuri.go │ ├── director.go │ ├── share.go │ ├── mining.go │ ├── job.go │ └── proxy.go ├── xdag │ ├── time.go │ ├── block_test.go │ ├── block.go │ └── connect.go ├── tcp │ ├── server.go │ └── worker.go ├── go.sum ├── logger │ └── logger.go ├── main.go ├── utils │ └── safemap.go └── config │ └── config.go ├── .gitignore └── README.md /clib/readme.md: -------------------------------------------------------------------------------- 1 | ## remove native crypto clib -------------------------------------------------------------------------------- /server/proxy.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XDagger/xmrig2xdag/HEAD/server/proxy.ico -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .vscode 3 | .DS_Store 4 | cmake-build*/ 5 | build 6 | main 7 | xmrig2xdag 8 | go_build_xmrig2xdag 9 | clib/*.a 10 | *.exe 11 | *.log 12 | *.out 13 | *._* -------------------------------------------------------------------------------- /server/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "strport": 3232, 3 | "url": "pool.xdag.org:13656", 4 | "log": "proxy.log", 5 | "tls": false, 6 | "debug": false, 7 | "testnet": false, 8 | "socks5": "", 9 | "ratelimit": 10, 10 | "try_pool_times": 3, 11 | "try_delay_seconds": 10, 12 | "exit_on_pool_down": false 13 | } -------------------------------------------------------------------------------- /server/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/swordlet/xmrig2xdag 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/didip/tollbooth v4.0.2+incompatible 7 | github.com/kelseyhightower/envconfig v1.4.0 8 | github.com/pkg/errors v0.9.1 9 | github.com/powerman/rpc-codec v1.2.2 10 | github.com/sourcegraph/jsonrpc2 v0.1.0 11 | golang.org/x/net v0.0.0-20220225172249-27dd8689420f 12 | ) 13 | 14 | require ( 15 | github.com/patrickmn/go-cache v2.1.0+incompatible // indirect 16 | golang.org/x/time v0.3.0 // indirect 17 | ) 18 | -------------------------------------------------------------------------------- /server/base58/cov_report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script uses gocov to generate a test coverage report. 4 | # The gocov tool my be obtained with the following command: 5 | # go get github.com/axw/gocov/gocov 6 | # 7 | # It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. 8 | 9 | # Check for gocov. 10 | type gocov >/dev/null 2>&1 11 | if [ $? -ne 0 ]; then 12 | echo >&2 "This script requires the gocov tool." 13 | echo >&2 "You may obtain it with the following command:" 14 | echo >&2 "go get github.com/axw/gocov/gocov" 15 | exit 1 16 | fi 17 | gocov test | gocov report 18 | -------------------------------------------------------------------------------- /server/stratum/errors.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import "github.com/powerman/rpc-codec/jsonrpc2" 4 | 5 | var ( 6 | // Actual returned error may have different message. 7 | errParse = jsonrpc2.NewError(-32700, "parse error") 8 | errRequest = jsonrpc2.NewError(-32600, "invalid request") 9 | errMethod = jsonrpc2.NewError(-32601, "method not found") 10 | errParams = jsonrpc2.NewError(-32602, "invalid params") 11 | errInternal = jsonrpc2.NewError(-32603, "internal error") 12 | errServer = jsonrpc2.NewError(-32000, "server error") 13 | errServerError = jsonrpc2.NewError(-32001, "jsonrpc2.Error: json.Marshal failed") 14 | ) 15 | -------------------------------------------------------------------------------- /server/proxy/detect.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "math" 5 | "os" 6 | "time" 7 | 8 | "github.com/swordlet/xmrig2xdag/config" 9 | "github.com/swordlet/xmrig2xdag/xdag" 10 | ) 11 | 12 | func PoolDetect() { 13 | p := &Proxy{ 14 | ID: 0, 15 | aliveSince: time.Now(), 16 | currentJob: &Job{}, 17 | PrevJobID: NewLen(28), 18 | submissions: make(chan *share), 19 | done: make(chan int), 20 | ready: true, 21 | lastSend: time.Now(), 22 | miniResult: math.MaxUint64, 23 | notify: make(chan []byte, 2), 24 | address: detectAddr, 25 | } 26 | timer := time.NewTicker(10 * time.Minute) 27 | for { 28 | <-timer.C 29 | if poolIsDown.Load() > 0 { 30 | if config.Get().ExitOnPoolDown { 31 | os.Exit(1) 32 | } 33 | p.fieldIn = 0 34 | p.fieldOut = 0 35 | p.recvCount = 0 36 | p.isClosed = false 37 | for len(p.done) > 0 { 38 | <-p.done 39 | } 40 | for len(p.notify) > 0 { 41 | <-p.notify 42 | } 43 | eofCount.Store(0) 44 | xdag.PoolDown.Store(0) 45 | go p.Run(detectProxy) 46 | } 47 | 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /server/xdag/time.go: -------------------------------------------------------------------------------- 1 | package xdag 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | const ( 9 | // MainEra is XDAG era start time 10 | MainEra uint64 = 0x16940000000 11 | ) 12 | 13 | // MainTime returns a time period index, where a period is 64 seconds long 14 | func MainTime(t uint64) uint64 { 15 | return t >> 16 16 | } 17 | 18 | // gets XDAG timestamp of current time 19 | func GetXTimestamp() uint64 { 20 | t := time.Now().UTC().UnixNano() 21 | sec := t / 1e9 22 | usec := (t - sec*1e9) / 1e3 23 | xmsec := (usec << 10) / 1e6 24 | return uint64(sec)<<10 | uint64(xmsec) 25 | } 26 | 27 | // CurrentMainTime returns a time period index of current time 28 | func CurrentMainTime() uint64 { 29 | return MainTime(GetXTimestamp()) 30 | } 31 | 32 | // StartMainTime returns the time period index corresponding to the start of the network 33 | func StartMainTime() uint64 { 34 | return MainTime(MainEra) 35 | } 36 | 37 | // Xtime2str converts xtime_t to string representation 38 | func Xtime2str(t uint64) string { 39 | msec := ((t & 0x3ff) * 1e3) >> 10 40 | tm := time.Unix(int64(t>>10), 0) 41 | return tm.Format("2006-01-02 15:04:05") + fmt.Sprintf(".%03d", msec) 42 | } 43 | -------------------------------------------------------------------------------- /server/base58/base58bench_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2014 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58_test 6 | 7 | import ( 8 | "bytes" 9 | "testing" 10 | 11 | "github.com/swordlet/xmrig2xdag/base58" 12 | ) 13 | 14 | var ( 15 | raw5k = bytes.Repeat([]byte{0xff}, 5000) 16 | raw100k = bytes.Repeat([]byte{0xff}, 100*1000) 17 | encoded5k = base58.Encode(raw5k) 18 | encoded100k = base58.Encode(raw100k) 19 | ) 20 | 21 | func BenchmarkBase58Encode_5K(b *testing.B) { 22 | b.SetBytes(int64(len(raw5k))) 23 | for i := 0; i < b.N; i++ { 24 | base58.Encode(raw5k) 25 | } 26 | } 27 | 28 | func BenchmarkBase58Encode_100K(b *testing.B) { 29 | b.SetBytes(int64(len(raw100k))) 30 | for i := 0; i < b.N; i++ { 31 | base58.Encode(raw100k) 32 | } 33 | } 34 | 35 | func BenchmarkBase58Decode_5K(b *testing.B) { 36 | b.SetBytes(int64(len(encoded5k))) 37 | for i := 0; i < b.N; i++ { 38 | base58.Decode(encoded5k) 39 | } 40 | } 41 | 42 | func BenchmarkBase58Decode_100K(b *testing.B) { 43 | b.SetBytes(int64(len(encoded100k))) 44 | for i := 0; i < b.N; i++ { 45 | base58.Decode(encoded100k) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /server/base58/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | /* 6 | Package base58 provides an API for working with modified base58 and Base58Check 7 | encodings. 8 | 9 | Modified Base58 Encoding 10 | 11 | Standard base58 encoding is similar to standard base64 encoding except, as the 12 | name implies, it uses a 58 character alphabet which results in an alphanumeric 13 | string and allows some characters which are problematic for humans to be 14 | excluded. Due to this, there can be various base58 alphabets. 15 | 16 | The modified base58 alphabet used by Bitcoin, and hence this package, omits the 17 | 0, O, I, and l characters that look the same in many fonts and are therefore 18 | hard to humans to distinguish. 19 | 20 | Base58Check Encoding Scheme 21 | 22 | The Base58Check encoding scheme is primarily used for Bitcoin addresses at the 23 | time of this writing, however it can be used to generically encode arbitrary 24 | byte arrays into human-readable strings along with a version byte that can be 25 | used to differentiate the same payload. For Bitcoin addresses, the extra 26 | version is used to differentiate the network of otherwise identical public keys 27 | which helps prevent using an address intended for one network on another. 28 | */ 29 | package base58 30 | -------------------------------------------------------------------------------- /server/base58/README.md: -------------------------------------------------------------------------------- 1 | base58 2 | ========== 3 | 4 | [![Build Status](http://img.shields.io/travis/btcsuite/btcutil.svg)](https://travis-ci.org/btcsuite/btcutil) 5 | [![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) 6 | [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58) 7 | 8 | Package base58 provides an API for encoding and decoding to and from the 9 | modified base58 encoding. It also provides an API to do Base58Check encoding, 10 | as described [here](https://en.bitcoin.it/wiki/Base58Check_encoding). 11 | 12 | A comprehensive suite of tests is provided to ensure proper functionality. 13 | 14 | ## Installation and Updating 15 | 16 | ```bash 17 | $ go get -u github.com/btcsuite/btcd/btcutil/base58 18 | ``` 19 | 20 | ## Examples 21 | 22 | * [Decode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-Decode) 23 | Demonstrates how to decode modified base58 encoded data. 24 | * [Encode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-Encode) 25 | Demonstrates how to encode data using the modified base58 encoding scheme. 26 | * [CheckDecode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-CheckDecode) 27 | Demonstrates how to decode Base58Check encoded data. 28 | * [CheckEncode Example](http://godoc.org/github.com/btcsuite/btcd/btcutil/base58#example-CheckEncode) 29 | Demonstrates how to encode data using the Base58Check encoding scheme. 30 | 31 | ## License 32 | 33 | Package base58 is licensed under the [copyfree](http://copyfree.org) ISC 34 | License. 35 | -------------------------------------------------------------------------------- /server/tcp/server.go: -------------------------------------------------------------------------------- 1 | package tcp 2 | 3 | import ( 4 | "crypto/tls" 5 | "net" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/swordlet/xmrig2xdag/config" 10 | "github.com/swordlet/xmrig2xdag/logger" 11 | ) 12 | 13 | func StartServer() { 14 | tcpPort := config.Get().StratumPort 15 | // TODO expose bind address? 16 | portStr := ":" + strconv.Itoa(tcpPort) 17 | 18 | logger.Get().Debug("Starting TCP listener on port: ", portStr) 19 | var listener net.Listener 20 | var listenErr error 21 | if config.Get().Tls { 22 | cert, err := tls.LoadX509KeyPair(config.Get().CertFile, config.Get().KeyFile) 23 | if err != nil { 24 | logger.Get().Fatal("Unable to open cert file ", config.Get().CertFile, " or key file ", 25 | config.Get().KeyFile) 26 | return 27 | } 28 | tlsConfig := &tls.Config{Certificates: []tls.Certificate{cert}} 29 | listener, listenErr = tls.Listen("tcp", portStr, tlsConfig) 30 | } else { 31 | listener, listenErr = net.Listen("tcp", portStr) 32 | } 33 | 34 | if listenErr != nil { 35 | logger.Get().Fatal("Unable to listen for tcp connections on port ", portStr, 36 | " Listen failed with error: ", listenErr) 37 | return 38 | } 39 | // rl := config.Get().RateLimit 40 | // if rl == 0 { 41 | // rl = 1 42 | // } 43 | // ra := rate.Every(25 * time.Millisecond) 44 | // limit := rate.NewLimiter(ra, rl) 45 | for { 46 | conn, err := listener.Accept() 47 | if err != nil { 48 | logger.Get().Println("Unable to accept connection: ", err) 49 | } 50 | conn.SetReadDeadline(time.Now().Add(90 * time.Second)) 51 | 52 | // if !limit.Allow() { 53 | // logger.Get().Println("Out of rate limit:", rl, "per 25 ms.") 54 | // conn.Close() 55 | // continue 56 | // } 57 | go SpawnWorker(conn) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /server/base58/genalphabet.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | //+build ignore 6 | 7 | package main 8 | 9 | import ( 10 | "bytes" 11 | "io" 12 | "log" 13 | "os" 14 | "strconv" 15 | ) 16 | 17 | var ( 18 | start = []byte(`// Copyright (c) 2015 The btcsuite developers 19 | // Use of this source code is governed by an ISC 20 | // license that can be found in the LICENSE file. 21 | 22 | // AUTOGENERATED by genalphabet.go; do not edit. 23 | 24 | package base58 25 | 26 | const ( 27 | // alphabet is the modified base58 alphabet used by Bitcoin. 28 | alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" 29 | 30 | alphabetIdx0 = '1' 31 | ) 32 | 33 | var b58 = [256]byte{`) 34 | 35 | end = []byte(`}`) 36 | 37 | alphabet = []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") 38 | tab = []byte("\t") 39 | invalid = []byte("255") 40 | comma = []byte(",") 41 | space = []byte(" ") 42 | nl = []byte("\n") 43 | ) 44 | 45 | func write(w io.Writer, b []byte) { 46 | _, err := w.Write(b) 47 | if err != nil { 48 | log.Fatal(err) 49 | } 50 | } 51 | 52 | func main() { 53 | fi, err := os.Create("alphabet.go") 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | defer fi.Close() 58 | 59 | write(fi, start) 60 | write(fi, nl) 61 | for i := byte(0); i < 32; i++ { 62 | write(fi, tab) 63 | for j := byte(0); j < 8; j++ { 64 | idx := bytes.IndexByte(alphabet, i*8+j) 65 | if idx == -1 { 66 | write(fi, invalid) 67 | } else { 68 | write(fi, strconv.AppendInt(nil, int64(idx), 10)) 69 | } 70 | write(fi, comma) 71 | if j != 7 { 72 | write(fi, space) 73 | } 74 | } 75 | write(fi, nl) 76 | } 77 | write(fi, end) 78 | write(fi, nl) 79 | } 80 | -------------------------------------------------------------------------------- /server/base58/alphabet.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | // AUTOGENERATED by genalphabet.go; do not edit. 6 | 7 | package base58 8 | 9 | const ( 10 | // alphabet is the modified base58 alphabet used by Bitcoin. 11 | alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" 12 | 13 | alphabetIdx0 = '1' 14 | ) 15 | 16 | var b58 = [256]byte{ 17 | 255, 255, 255, 255, 255, 255, 255, 255, 18 | 255, 255, 255, 255, 255, 255, 255, 255, 19 | 255, 255, 255, 255, 255, 255, 255, 255, 20 | 255, 255, 255, 255, 255, 255, 255, 255, 21 | 255, 255, 255, 255, 255, 255, 255, 255, 22 | 255, 255, 255, 255, 255, 255, 255, 255, 23 | 255, 0, 1, 2, 3, 4, 5, 6, 24 | 7, 8, 255, 255, 255, 255, 255, 255, 25 | 255, 9, 10, 11, 12, 13, 14, 15, 26 | 16, 255, 17, 18, 19, 20, 21, 255, 27 | 22, 23, 24, 25, 26, 27, 28, 29, 28 | 30, 31, 32, 255, 255, 255, 255, 255, 29 | 255, 33, 34, 35, 36, 37, 38, 39, 30 | 40, 41, 42, 43, 255, 44, 45, 46, 31 | 47, 48, 49, 50, 51, 52, 53, 54, 32 | 55, 56, 57, 255, 255, 255, 255, 255, 33 | 255, 255, 255, 255, 255, 255, 255, 255, 34 | 255, 255, 255, 255, 255, 255, 255, 255, 35 | 255, 255, 255, 255, 255, 255, 255, 255, 36 | 255, 255, 255, 255, 255, 255, 255, 255, 37 | 255, 255, 255, 255, 255, 255, 255, 255, 38 | 255, 255, 255, 255, 255, 255, 255, 255, 39 | 255, 255, 255, 255, 255, 255, 255, 255, 40 | 255, 255, 255, 255, 255, 255, 255, 255, 41 | 255, 255, 255, 255, 255, 255, 255, 255, 42 | 255, 255, 255, 255, 255, 255, 255, 255, 43 | 255, 255, 255, 255, 255, 255, 255, 255, 44 | 255, 255, 255, 255, 255, 255, 255, 255, 45 | 255, 255, 255, 255, 255, 255, 255, 255, 46 | 255, 255, 255, 255, 255, 255, 255, 255, 47 | 255, 255, 255, 255, 255, 255, 255, 255, 48 | 255, 255, 255, 255, 255, 255, 255, 255, 49 | } 50 | -------------------------------------------------------------------------------- /server/xdag/block_test.go: -------------------------------------------------------------------------------- 1 | package xdag 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "math/big" 7 | "testing" 8 | ) 9 | 10 | func TestAddress2Hash1(t *testing.T) { 11 | address := "NrD1QgI1ORuQtjwTniSwr 8pTnS8Rgdu" 12 | hash, err := Address2hash(address) 13 | if err != nil { 14 | t.Error(err.Error()) 15 | } else { 16 | fmt.Println(hex.EncodeToString(hash[:])) 17 | } 18 | } 19 | 20 | func TestAddress2Hash2(t *testing.T) { 21 | address := "YOUR_WALLET_ADDRESS" 22 | hash, err := Address2hash(address) 23 | if err != nil { 24 | t.Error(err.Error()) 25 | } else { 26 | fmt.Println(hex.EncodeToString(hash[:])) 27 | } 28 | } 29 | 30 | func TestAddress2Hash3(t *testing.T) { 31 | address := "Pzza6lHK4GrZoEEBWh9wNbDjaNgJhq6V" 32 | hash, err := Address2hash(address) 33 | if err != nil { 34 | t.Error(err.Error()) 35 | } else { 36 | fmt.Println(hex.EncodeToString(hash[:])) 37 | } 38 | } 39 | 40 | func TestHash2address(t *testing.T) { 41 | 42 | hash, err := hex.DecodeString("3f3cdaea51cae06ad9a041015a1f7035b0e368d80986ae950000000000000000") 43 | if err != nil { 44 | t.Error(err.Error()) 45 | } else { 46 | str := Hash2address(hash[:]) 47 | fmt.Println(str) 48 | } 49 | } 50 | 51 | func TestDifficulty(t *testing.T) { 52 | 53 | hashRate := 20.722698 * 1024 * 1024 // << 20 54 | 55 | h := big.NewInt(int64(hashRate)) 56 | 57 | h.Lsh(h, 6) // h.Lsh(h, 58) , 58-32-20 58 | 59 | //fmt.Println(h) 60 | //fmt.Printf("%032x\n", h) 61 | 62 | max128, _ := new(big.Int).SetString("ffffffffffffffffffffffffffffffff", 16) 63 | 64 | hash128 := new(big.Int).Div(max128, h) 65 | 66 | //s.Lsh(s, 32) // 58-32 67 | 68 | fmt.Printf("%032x\n", hash128) 69 | 70 | hash64 := new(big.Int).Rsh(hash128, 64) 71 | 72 | max64, _ := new(big.Int).SetString("ffffffffffffffff", 16) 73 | 74 | diff := new(big.Int).Div(max64, hash64) 75 | 76 | fmt.Printf("%016x\n", diff) 77 | fmt.Println(diff) 78 | } 79 | -------------------------------------------------------------------------------- /server/proxy/uniuri.go: -------------------------------------------------------------------------------- 1 | // Written in 2011-2014 by Dmitry Chestnykh 2 | // 3 | // The author(s) have dedicated all copyright and related and 4 | // neighboring rights to this software to the public domain 5 | // worldwide. Distributed without any warranty. 6 | // http://creativecommons.org/publicdomain/zero/1.0/ 7 | 8 | // https://github.com/dchest/uniuri 9 | 10 | package proxy 11 | 12 | import ( 13 | "crypto/rand" 14 | ) 15 | 16 | const ( 17 | // StdLen is a standard length of uniuri string to achive ~95 bits of entropy. 18 | StdLen = 16 19 | // UUIDLen is a length of uniuri string to achive ~119 bits of entropy, closest 20 | // to what can be losslessly converted to UUIDv4 (122 bits). 21 | UUIDLen = 20 22 | ) 23 | 24 | // StdChars is a set of standard characters allowed in uniuri string. 25 | var StdChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") 26 | 27 | // NewLen returns a new random string of the provided length, consisting of 28 | // standard characters. 29 | func NewLen(length int) string { 30 | return NewLenChars(length, StdChars) 31 | } 32 | 33 | // NewLenChars returns a new random string of the provided length, consisting 34 | // of the provided byte slice of allowed characters (maximum 256). 35 | func NewLenChars(length int, chars []byte) string { 36 | if length == 0 { 37 | return "" 38 | } 39 | clen := len(chars) 40 | if clen < 2 || clen > 256 { 41 | panic("uniuri: wrong charset length for NewLenChars") 42 | } 43 | maxrb := 255 - (256 % clen) 44 | b := make([]byte, length) 45 | r := make([]byte, length+(length/4)) // storage for random bytes. 46 | i := 0 47 | for { 48 | if _, err := rand.Read(r); err != nil { 49 | panic("uniuri: error reading random bytes: " + err.Error()) 50 | } 51 | for _, rb := range r { 52 | c := int(rb) 53 | if c > maxrb { 54 | // Skip this number to avoid modulo bias. 55 | continue 56 | } 57 | b[i] = chars[c%clen] 58 | i++ 59 | if i == length { 60 | return string(b) 61 | } 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /server/base58/example_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58_test 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/swordlet/xmrig2xdag/base58" 11 | ) 12 | 13 | // This example demonstrates how to decode modified base58 encoded data. 14 | func ExampleDecode() { 15 | // Decode example modified base58 encoded data. 16 | encoded := "25JnwSn7XKfNQ" 17 | decoded := base58.Decode(encoded) 18 | 19 | // Show the decoded data. 20 | fmt.Println("Decoded Data:", string(decoded)) 21 | 22 | // Output: 23 | // Decoded Data: Test data 24 | } 25 | 26 | // This example demonstrates how to encode data using the modified base58 27 | // encoding scheme. 28 | func ExampleEncode() { 29 | // Encode example data with the modified base58 encoding scheme. 30 | data := []byte("Test data") 31 | encoded := base58.Encode(data) 32 | 33 | // Show the encoded data. 34 | fmt.Println("Encoded Data:", encoded) 35 | 36 | // Output: 37 | // Encoded Data: 25JnwSn7XKfNQ 38 | } 39 | 40 | // This example demonstrates how to decode Base58Check encoded data. 41 | func ExampleCheckDecode() { 42 | // Decode an example Base58Check encoded data. 43 | encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" 44 | decoded, version, err := base58.CheckDecode(encoded) 45 | if err != nil { 46 | fmt.Println(err) 47 | return 48 | } 49 | 50 | // Show the decoded data. 51 | fmt.Printf("Decoded data: %x\n", decoded) 52 | fmt.Println("Version Byte:", version) 53 | 54 | // Output: 55 | // Decoded data: 62e907b15cbf27d5425399ebf6f0fb50ebb88f18 56 | // Version Byte: 0 57 | } 58 | 59 | // This example demonstrates how to encode data using the Base58Check encoding 60 | // scheme. 61 | func ExampleCheckEncode() { 62 | // Encode example data with the Base58Check encoding scheme. 63 | data := []byte("Test data") 64 | encoded := base58.CheckEncode(data, 0) 65 | 66 | // Show the encoded data. 67 | fmt.Println("Encoded Data:", encoded) 68 | 69 | // Output: 70 | // Encoded Data: 182iP79GRURMp7oMHDU 71 | } 72 | -------------------------------------------------------------------------------- /server/go.sum: -------------------------------------------------------------------------------- 1 | github.com/didip/tollbooth v4.0.2+incompatible h1:fVSa33JzSz0hoh2NxpwZtksAzAgd7zjmGO20HCZtF4M= 2 | github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= 3 | github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= 4 | github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 5 | github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= 6 | github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= 7 | github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= 8 | github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= 9 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 10 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 11 | github.com/powerman/rpc-codec v1.2.2 h1:BK0JScZivljhwW/vLLhZLtUgqSxc/CD3sHEs8LiwwKw= 12 | github.com/powerman/rpc-codec v1.2.2/go.mod h1:3Qr/y/+u3CwcSww9tfJMRn/95lB2qUdUeIQe7BYlLDo= 13 | github.com/sourcegraph/jsonrpc2 v0.1.0 h1:ohJHjZ+PcaLxDUjqk2NC3tIGsVa5bXThe1ZheSXOjuk= 14 | github.com/sourcegraph/jsonrpc2 v0.1.0/go.mod h1:ZafdZgk/axhT1cvZAPOhw+95nz2I/Ra5qMlU4gTRwIo= 15 | golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= 16 | golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 17 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 18 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 19 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 20 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 21 | golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= 22 | golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 23 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 24 | -------------------------------------------------------------------------------- /server/base58/base58check.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2014 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58 6 | 7 | import ( 8 | "crypto/sha256" 9 | "errors" 10 | ) 11 | 12 | // ErrChecksum indicates that the checksum of a check-encoded string does not verify against 13 | // the checksum. 14 | var ErrChecksum = errors.New("checksum error") 15 | 16 | // ErrInvalidFormat indicates that the check-encoded string has an invalid format. 17 | var ErrInvalidFormat = errors.New("invalid format: version and/or checksum bytes missing") 18 | 19 | // checksum: first four bytes of sha256^2 20 | func checksum(input []byte) (cksum [4]byte) { 21 | h := sha256.Sum256(input) 22 | h2 := sha256.Sum256(h[:]) 23 | copy(cksum[:], h2[:4]) 24 | return 25 | } 26 | 27 | // CheckEncode prepends a version byte and appends a four byte checksum. 28 | func CheckEncode(input []byte, version byte) string { 29 | b := make([]byte, 0, 1+len(input)+4) 30 | b = append(b, version) 31 | b = append(b, input...) 32 | cksum := checksum(b) 33 | b = append(b, cksum[:]...) 34 | return Encode(b) 35 | } 36 | 37 | // CheckDecode decodes a string that was encoded with CheckEncode and verifies the checksum. 38 | func CheckDecode(input string) (result []byte, version byte, err error) { 39 | decoded := Decode(input) 40 | if len(decoded) < 5 { 41 | return nil, 0, ErrInvalidFormat 42 | } 43 | version = decoded[0] 44 | var cksum [4]byte 45 | copy(cksum[:], decoded[len(decoded)-4:]) 46 | if checksum(decoded[:len(decoded)-4]) != cksum { 47 | return nil, 0, ErrChecksum 48 | } 49 | payload := decoded[1 : len(decoded)-4] 50 | result = append(result, payload...) 51 | return 52 | } 53 | 54 | // ChkDec decodes a XDAGJ address that was encoded with CheckEncode and verifies the checksum. 55 | func ChkDec(input string) (result []byte, version byte, err error) { 56 | decoded := Decode(input) 57 | if len(decoded) < 5 { 58 | return nil, 0, ErrInvalidFormat 59 | } 60 | version = decoded[0] 61 | var cksum [4]byte 62 | copy(cksum[:], decoded[len(decoded)-4:]) 63 | if checksum(decoded[:len(decoded)-4]) != cksum { 64 | return nil, 0, ErrChecksum 65 | } 66 | payload := decoded[:] 67 | result = append(result, payload...) 68 | return 69 | } 70 | -------------------------------------------------------------------------------- /server/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "os" 7 | "sync" 8 | ) 9 | 10 | const ( 11 | // Info and Debug are the two possible logging levels 12 | // The only difference is the Debug method does nothing on info level 13 | Info = iota 14 | Debug 15 | ) 16 | 17 | var ( 18 | // defaults 19 | config = &Config{ 20 | os.Stdout, 21 | log.LstdFlags, 22 | Info, 23 | false, 24 | } 25 | 26 | // global logger singleton 27 | instance *Logger 28 | instantiation = sync.Once{} 29 | ) 30 | 31 | // Config allows selection of logger output, content and level (debug!) 32 | // new - create a logger that discards all output 33 | type Config struct { 34 | W io.Writer 35 | Flag, Level int 36 | Discard bool 37 | } 38 | 39 | // Logger wraps the standard logger and adds a debug level 40 | type Logger struct { 41 | *log.Logger 42 | Level int 43 | } 44 | 45 | type discardWriter struct{} 46 | 47 | func (*discardWriter) Write(p []byte) (int, error) { 48 | return len(p), nil 49 | } 50 | 51 | // Configure sets up the global logger. This should be called from the main thread 52 | // before the logger is created with Get 53 | func Configure(c *Config) { 54 | if c.Discard { 55 | config.Discard = c.Discard // which would be true 56 | config.W = &discardWriter{} 57 | } 58 | // The presence of a writer overrules the discard option 59 | if c.W != nil { 60 | config.W = c.W 61 | } 62 | if c.Flag != 0 { 63 | // flags start at 1 64 | config.Flag = c.Flag 65 | } 66 | config.Level = c.Level 67 | } 68 | 69 | // New makes a new logger with config. 70 | func New(c *Config) *Logger { 71 | return &Logger{ 72 | log.New(c.W, "[XDAG_PROXY] ", c.Flag), 73 | c.Level, 74 | } 75 | } 76 | 77 | // Get returns the global singleton logger 78 | func Get() *Logger { 79 | instantiation.Do(func() { 80 | instance = New(config) 81 | }) 82 | 83 | return instance 84 | } 85 | 86 | func (l *Logger) Debug(v ...interface{}) { 87 | if l.Level < Debug { 88 | return 89 | } 90 | l.Logger.Print(v...) 91 | } 92 | 93 | func (l *Logger) Debugf(format string, v ...interface{}) { 94 | if l.Level < Debug { 95 | return 96 | } 97 | l.Logger.Printf(format, v...) 98 | } 99 | 100 | func (l *Logger) Debugln(v ...interface{}) { 101 | if l.Level < Debug { 102 | return 103 | } 104 | 105 | l.Logger.Println(v...) 106 | } 107 | 108 | func CheckFileExist(filename string) bool { 109 | exist := true 110 | if _, err := os.Stat(filename); os.IsNotExist(err) { 111 | exist = false 112 | } 113 | return exist 114 | } 115 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # XMRig2XDAG 2 | 3 | XMRig2XDAG is a stratum proxy for Monero (XMR) miners mining XDAG coins. 4 | 5 | XMRig2XDAG is translator between XMR stratum protocol and XDAG mining protocol. Written in Go. 6 | 7 | XMRig can connect to XDAG mining pool through XMRig2XDAG proxy. 8 | 9 | XDAG and XMR are using the same mining algorithm: RandomX. 10 | 11 | XMRig2XDAG is Working with XMRig fork https://github.com/swordlet/xmrig/tree/xdag. 12 | 13 | # Build 14 | ## under server folder 15 | 16 | $ go mod tidy 17 | $ go build 18 | 19 | # Guide 20 | start up command line and configure file 21 | 22 | ## start up 23 | start up xmrig2xdag proxy and xmrig miner 24 | 25 | ### proxy 26 | ./xmrig2xdag -c config.json 27 | 28 | ### miner 29 | ./xmrig -c config.json (using administrator or root) 30 | 31 | ## config file 32 | xmrig2xdag proxy and xmrig config file 33 | 34 | ### xmrig2xdag config json file 35 | 36 | { 37 | "strport": 3232, // port for xmrig to connect 38 | "url": "equal.xdag.org:13656", // XDAG mining pool address and port 39 | "tls": false, // using SSL 40 | "debug": false, // printing debug info 41 | "testnet": false, // using test net 42 | "socks5": "" // SOCKS5 proxy address:port 43 | } 44 | 45 | ### xmrig config json file 46 | 47 | "pools": [ 48 | { 49 | "algo": "rx/xdag", // mining XDAG 50 | "coin": null, 51 | "url": "stratum+tcp://127.0.0.1:3232", // xmrig2xdag address and port 52 | "user": "YOUR_WALLET_ADDRESS", // your XDAG wallet address 53 | "pass": "your_miner_name", // naming your miner 54 | ..... 55 | } 56 | ] 57 | 58 | ## Arm or Android Mobile 59 | 60 | CPU must above Snpadragon 810 for Android 61 | 62 | install termux on Android and install scrcpy on PC for better interaction 63 | 64 | in Arm computer or Android Mobile command line: 65 | 66 | apt update && apt upgrade && 67 | 68 | apt install -y git wget proot build-essential cmake libmicrohttpd && 69 | 70 | git clone -b xdag https://github.com/swordlet/xmrig --depth 1 && 71 | 72 | mkdir xmrig/build && 73 | 74 | cd xmrig/build && 75 | 76 | cmake -DWITH_HWLOC=OFF .. && 77 | 78 | make -j10 79 | 80 | edit config.json following the guide 81 | 82 | ./xmrig -c config.json 83 | 84 | connect XDAG pool through xmr2xdag proxy running in local network 85 | 86 | ## Acknowledgement 87 | https://github.com/xmrig/xmrig 88 | 89 | https://github.com/xmrig/xmrig-proxy 90 | 91 | https://github.com/trey-jones/stratum 92 | 93 | https://github.com/trey-jones/xmrwasp -------------------------------------------------------------------------------- /server/tcp/worker.go: -------------------------------------------------------------------------------- 1 | package tcp 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "time" 7 | 8 | "github.com/swordlet/xmrig2xdag/logger" 9 | "github.com/swordlet/xmrig2xdag/proxy" 10 | "github.com/swordlet/xmrig2xdag/stratum" 11 | ) 12 | 13 | const ( 14 | workerTimeout = 1 * time.Minute 15 | jobSendTimeout = 30 * time.Second 16 | ) 17 | 18 | // Worker does the work (of mining, well more like accounting) 19 | type Worker struct { 20 | conn net.Conn 21 | id uint64 22 | p *proxy.Proxy 23 | 24 | // codec will be used directly for sending jobs 25 | // this is not ideal, and it would be nice to do this differently 26 | codec *stratum.DefaultServerCodec 27 | 28 | //jobs chan *proxy.Job 29 | } 30 | 31 | // SpawnWorker spawns a new TCP worker and adds it to a proxy 32 | func SpawnWorker(conn net.Conn) { 33 | w := &Worker{ 34 | conn: conn, 35 | //jobs: make(chan *proxy.Job), 36 | } 37 | ctx := context.WithValue(context.Background(), "worker", w) 38 | codec := stratum.NewDefaultServerCodecContext(ctx, w.Conn()) 39 | w.codec = codec.(*stratum.DefaultServerCodec) 40 | 41 | p := proxy.GetDirector().NextProxy() 42 | logger.Get().Debugln("New proxy.", p.ID) 43 | p.Add(w) // set worker's proxy 44 | 45 | // blocks until disconnect 46 | w.Proxy().SS.ServeCodec(codec) 47 | 48 | w.Disconnect() 49 | } 50 | 51 | func (w *Worker) Conn() net.Conn { 52 | return w.conn 53 | } 54 | 55 | func (w *Worker) SetConn(c net.Conn) { 56 | w.conn = c 57 | } 58 | 59 | // Worker interfaces 60 | 61 | func (w *Worker) ID() uint64 { 62 | return w.id 63 | } 64 | 65 | func (w *Worker) SetID(i uint64) { 66 | w.id = i 67 | } 68 | 69 | func (w *Worker) SetProxy(p *proxy.Proxy) { 70 | w.p = p 71 | } 72 | 73 | func (w *Worker) Proxy() *proxy.Proxy { 74 | return w.p 75 | } 76 | 77 | func (w *Worker) Disconnect() { 78 | if w.Conn() != nil { 79 | w.Conn().Close() 80 | } 81 | 82 | if w.p != nil { 83 | w.p.Remove(w) 84 | w.p = nil 85 | } 86 | 87 | } 88 | 89 | func (w *Worker) RemoveProxy() { 90 | if w.p != nil { 91 | logger.Get().Printf("proxy [%d] shutdown by login error\n", w.p.ID) 92 | w.p.Close() 93 | w.p = nil 94 | } 95 | } 96 | 97 | func (w *Worker) Close() { 98 | w.p = nil 99 | w.Conn().Close() 100 | 101 | } 102 | 103 | func (w *Worker) NewJob(j *proxy.Job) { 104 | err := w.codec.Notify("job", j) 105 | if err != nil { 106 | // logger.Get().Debugln("Error sending job to worker: ", err) 107 | w.Disconnect() 108 | } 109 | // other actions? shut down worker? 110 | } 111 | 112 | func (w *Worker) expectedHashes() uint32 { 113 | // this is a complete unknown at this time. 114 | return 0x7a120 115 | } 116 | -------------------------------------------------------------------------------- /server/base58/base58check_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2014 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58_test 6 | 7 | import ( 8 | "fmt" 9 | "testing" 10 | 11 | "github.com/swordlet/xmrig2xdag/base58" 12 | ) 13 | 14 | var checkEncodingStringTests = []struct { 15 | version byte 16 | in string 17 | out string 18 | }{ 19 | {20, "", "3MNQE1X"}, 20 | {20, " ", "B2Kr6dBE"}, 21 | {20, "-", "B3jv1Aft"}, 22 | {20, "0", "B482yuaX"}, 23 | {20, "1", "B4CmeGAC"}, 24 | {20, "-1", "mM7eUf6kB"}, 25 | {20, "11", "mP7BMTDVH"}, 26 | {20, "abc", "4QiVtDjUdeq"}, 27 | {20, "1234598760", "ZmNb8uQn5zvnUohNCEPP"}, 28 | {20, "abcdefghijklmnopqrstuvwxyz", "K2RYDcKfupxwXdWhSAxQPCeiULntKm63UXyx5MvEH2"}, 29 | {20, "00000000000000000000000000000000000000000000000000000000000000", "bi1EWXwJay2udZVxLJozuTb8Meg4W9c6xnmJaRDjg6pri5MBAxb9XwrpQXbtnqEoRV5U2pixnFfwyXC8tRAVC8XxnjK"}, 30 | } 31 | 32 | func TestBase58Check(t *testing.T) { 33 | for x, test := range checkEncodingStringTests { 34 | // test encoding 35 | if res := base58.CheckEncode([]byte(test.in), test.version); res != test.out { 36 | t.Errorf("CheckEncode test #%d failed: got %s, want: %s", x, res, test.out) 37 | } 38 | 39 | // test decoding 40 | res, version, err := base58.CheckDecode(test.out) 41 | switch { 42 | case err != nil: 43 | t.Errorf("CheckDecode test #%d failed with err: %v", x, err) 44 | 45 | case version != test.version: 46 | t.Errorf("CheckDecode test #%d failed: got version: %d want: %d", x, version, test.version) 47 | 48 | case string(res) != test.in: 49 | t.Errorf("CheckDecode test #%d failed: got: %s want: %s", x, res, test.in) 50 | } 51 | } 52 | 53 | // test the two decoding failure cases 54 | // case 1: checksum error 55 | _, _, err := base58.CheckDecode("3MNQE1Y") 56 | if err != base58.ErrChecksum { 57 | t.Error("Checkdecode test failed, expected ErrChecksum") 58 | } 59 | // case 2: invalid formats (string lengths below 5 mean the version byte and/or the checksum 60 | // bytes are missing). 61 | testString := "" 62 | for len := 0; len < 4; len++ { 63 | testString += "x" 64 | _, _, err = base58.CheckDecode(testString) 65 | if err != base58.ErrInvalidFormat { 66 | t.Error("Checkdecode test failed, expected ErrInvalidFormat") 67 | } 68 | } 69 | 70 | } 71 | 72 | func TestChkDec(t *testing.T) { 73 | address := "4smXToYpMy1648T3PXpBRZ8zSey5c6Sy7" 74 | 75 | b, v, e := base58.ChkDec(address) 76 | if e != nil { 77 | t.Error(e) 78 | } 79 | fmt.Printf("%x, %d\n", b, len(b)) 80 | fmt.Printf("%x\n", v) 81 | 82 | b2, v2, e2 := base58.CheckDecode(address) 83 | if e2 != nil { 84 | t.Error(e2) 85 | } 86 | fmt.Printf("%x, %d\n", b2, len(b2)) 87 | fmt.Printf("%x\n", v2) 88 | 89 | } 90 | -------------------------------------------------------------------------------- /server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "log" 8 | "os" 9 | 10 | // _ "net/http/pprof" 11 | 12 | "github.com/swordlet/xmrig2xdag/config" 13 | "github.com/swordlet/xmrig2xdag/logger" 14 | "github.com/swordlet/xmrig2xdag/proxy" 15 | "github.com/swordlet/xmrig2xdag/tcp" 16 | ) 17 | 18 | // func HelloWorld(w http.ResponseWriter, r *http.Request) { 19 | // fmt.Fprintln(w, "hello world") 20 | // } 21 | 22 | var ( 23 | version = "2.0.7" 24 | 25 | // cmd line options 26 | configFile *string 27 | logFile *os.File 28 | ) 29 | 30 | func printWelcomeMessage() { 31 | logger.Get().Println("************************************************************************") 32 | logger.Get().Printf("* XMR Stratum to XDAG Proxy \t\t\t v%s \t*\n", version) 33 | 34 | port := config.Get().StratumPort 35 | //var tls string 36 | //if config.Get().Tls { 37 | // tls = "tls" 38 | //} 39 | logger.Get().Printf("* Accepting XMRig Connections on port: \t\t %v\t\t*\n", port) 40 | 41 | statInterval := config.Get().StatInterval 42 | logger.Get().Printf("* Printing stats every: \t\t\t\t %v seconds\t*\n", statInterval) 43 | logger.Get().Println("************************************************************************") 44 | } 45 | 46 | func usage() { 47 | fmt.Printf("Usage: %s [-c CONFIG_PATH] \n", os.Args[0]) 48 | flag.PrintDefaults() 49 | } 50 | 51 | func setOptions() { 52 | configFile = flag.String("c", "config.json", "JSON file from which to read configuration values") 53 | flag.Parse() 54 | 55 | config.File = *configFile 56 | } 57 | 58 | func setupLogger() { 59 | lc := &logger.Config{W: nil} 60 | c := config.Get() 61 | if c.Debug { 62 | lc.Level = logger.Debug 63 | } 64 | var err error 65 | if c.LogFile != "" { 66 | if logger.CheckFileExist(c.LogFile) { 67 | logFile, err = os.OpenFile(c.LogFile, os.O_APPEND|os.O_RDWR, 0644) 68 | if err != nil { 69 | log.Fatal("could not open log file for writing: ", err) 70 | } 71 | } else { 72 | logFile, err = os.Create(c.LogFile) 73 | if err != nil { 74 | log.Fatal("could not create log file for writing: ", err) 75 | } 76 | } 77 | lc.W = io.MultiWriter(os.Stdout, logFile) 78 | } 79 | if c.DiscardLog { 80 | lc.Discard = true 81 | } 82 | logger.Configure(lc) 83 | logger.Get().Debugln("logger is configured") 84 | } 85 | 86 | func main() { 87 | setOptions() 88 | setupLogger() 89 | defer func() { 90 | if logFile != nil { 91 | logFile.Close() 92 | } 93 | }() 94 | 95 | flag.Usage = usage 96 | 97 | flag.Parse() 98 | if args := flag.Args(); len(args) > 1 && (args[1] == "help" || args[1] == "-h") { 99 | flag.Usage() 100 | return 101 | } 102 | config.File = *configFile 103 | 104 | holdOpen := make(chan bool, 1) 105 | 106 | go tcp.StartServer() 107 | go proxy.PoolDetect() 108 | 109 | printWelcomeMessage() 110 | 111 | <-holdOpen 112 | // http.HandleFunc("/", HelloWorld) 113 | 114 | // err := http.ListenAndServe(":8089", nil) 115 | // if err != nil { 116 | // fmt.Println(err) 117 | // } 118 | } 119 | -------------------------------------------------------------------------------- /server/stratum/server.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "io" 8 | "net/http" 9 | "net/rpc" 10 | "sync" 11 | 12 | "github.com/didip/tollbooth" 13 | "github.com/swordlet/xmrig2xdag/config" 14 | ) 15 | 16 | type serverCodec struct { 17 | encmutex sync.Mutex // protects enc 18 | dec *json.Decoder // for reading JSON values 19 | enc *json.Encoder // for writing JSON values 20 | c io.ReadWriteCloser 21 | ctx context.Context 22 | 23 | // temporary work space 24 | req serverRequest 25 | } 26 | 27 | type serverRequest struct { 28 | Version string `json:"jsonrpc"` 29 | Method string `json:"method"` 30 | Params *json.RawMessage `json:"params"` 31 | ID *json.RawMessage `json:"id"` 32 | } 33 | 34 | func (r *serverRequest) reset() { 35 | r.Version = "" 36 | r.Method = "" 37 | r.Params = nil 38 | r.ID = nil 39 | } 40 | 41 | func (r *serverRequest) UnmarshalJSON(raw []byte) error { 42 | r.reset() 43 | type req *serverRequest 44 | if err := json.Unmarshal(raw, req(r)); err != nil { 45 | return errors.New("bad request") 46 | } 47 | 48 | var o = make(map[string]*json.RawMessage) 49 | if err := json.Unmarshal(raw, &o); err != nil { 50 | return errors.New("bad request") 51 | } 52 | // if o["type"] == nil { 53 | 54 | // return errors.New("bad request") 55 | // } 56 | _, okID := o["id"] 57 | _, okParams := o["params"] 58 | // if len(o) == 3 && !(okID || okParams) || len(o) == 4 && !(okID && okParams) || len(o) > 4 { 59 | // return errors.New("bad request") 60 | // } 61 | if okParams { 62 | if r.Params == nil || len(*r.Params) == 0 { 63 | return errors.New("bad request") 64 | } 65 | switch []byte(*r.Params)[0] { 66 | case '[', '{': 67 | default: 68 | return errors.New("bad request") 69 | } 70 | } 71 | if okID && r.ID == nil { 72 | r.ID = &null 73 | } 74 | if okID { 75 | if len(*r.ID) == 0 { 76 | return errors.New("bad request") 77 | } 78 | switch []byte(*r.ID)[0] { 79 | case 't', 'f', '{', '[': 80 | return errors.New("bad request") 81 | } 82 | } 83 | 84 | return nil 85 | } 86 | 87 | type serverResponse struct { 88 | Version string `json:"jsonrpc"` 89 | ID *json.RawMessage `json:"id"` 90 | Result interface{} `json:"result,omitempty"` 91 | Error interface{} `json:"error,omitempty"` 92 | } 93 | 94 | // public API 95 | 96 | type Server struct { 97 | *rpc.Server 98 | } 99 | 100 | func NewServer() *Server { 101 | s := &Server{ 102 | rpc.NewServer(), 103 | } 104 | return s 105 | } 106 | 107 | func (s *Server) ServeCodec(codec rpc.ServerCodec) { 108 | // defer codec.Close() 109 | s.Server.ServeCodec(codec) 110 | } 111 | 112 | func (s *Server) HandleHTTP(rpcPath, debugPath string) { 113 | limit := config.Get().RateLimit 114 | if limit == 0 { 115 | limit = 1 116 | } 117 | http.Handle(rpcPath, tollbooth.LimitFuncHandler(tollbooth.NewLimiter(float64(limit), nil), s.ServeHTTP)) 118 | // http.Handle(debugPath, debugHTTP{server}) 119 | } 120 | 121 | func (s *Server) ServeConn(ctx context.Context, conn io.ReadWriteCloser) { 122 | s.ServeCodec(NewDefaultServerCodecContext(ctx, conn)) 123 | } 124 | -------------------------------------------------------------------------------- /server/proxy/director.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "time" 7 | 8 | "github.com/swordlet/xmrig2xdag/config" 9 | "github.com/swordlet/xmrig2xdag/logger" 10 | "github.com/swordlet/xmrig2xdag/utils" 11 | ) 12 | 13 | var ( 14 | directorInstance *Director 15 | directorInstantiation = sync.Once{} 16 | ) 17 | 18 | type Director struct { 19 | aliveSince time.Time 20 | statInterval time.Duration 21 | 22 | currentProxyID atomic.Uint64 23 | proxies *utils.SafeMap 24 | 25 | // stat tracking only 26 | lastTotalShares uint64 27 | deleteShares atomic.Uint64 28 | } 29 | 30 | func GetDirector() *Director { 31 | directorInstantiation.Do(func() { 32 | directorInstance = newDirector() 33 | }) 34 | return directorInstance 35 | } 36 | 37 | func newDirector() *Director { 38 | d := &Director{ 39 | aliveSince: time.Now(), 40 | statInterval: time.Duration(config.Get().StatInterval) * time.Second, 41 | proxies: utils.NewSafeMap(), 42 | } 43 | go d.run() 44 | 45 | return d 46 | } 47 | 48 | // Stats is a struct containing information about server uptime and activity, generated on demand 49 | type Stats struct { 50 | Timestamp time.Time 51 | Alive time.Duration 52 | Proxies int 53 | Workers int 54 | Shares uint64 55 | NewShares uint64 56 | } 57 | 58 | func (d *Director) addProxy() *Proxy { 59 | p := NewProxy(d.nextProxyID()) 60 | p.director = d 61 | d.proxies.Set(p.ID, p) 62 | return p 63 | } 64 | 65 | func (d *Director) run() { 66 | statPrinter := time.NewTicker(d.statInterval) 67 | defer statPrinter.Stop() 68 | for { 69 | <-statPrinter.C 70 | d.printStats() 71 | } 72 | } 73 | 74 | func (d *Director) printStats() { 75 | if poolIsDown.Load() > 0 { 76 | return 77 | } 78 | 79 | stats := d.GetStats() 80 | logger.Get().Printf(" uptime:%s \t proxies:%v \t workers:%v \t shares:%v(+%v)\n", 81 | stats.Alive, stats.Proxies, stats.Workers, stats.Shares, stats.NewShares) 82 | } 83 | 84 | func (d *Director) removeProxy(id uint64) { 85 | p, ok := d.proxies.Get(id) 86 | if ok { 87 | d.deleteShares.Add(p.(*Proxy).shares) 88 | } 89 | d.proxies.Del(id) 90 | } 91 | 92 | func (d *Director) nextProxyID() uint64 { 93 | return d.currentProxyID.Add(1) 94 | } 95 | 96 | func (d *Director) NextProxy() *Proxy { 97 | var pr *Proxy = d.addProxy() 98 | return pr 99 | } 100 | 101 | func (d *Director) GetStats() *Stats { 102 | totalProxies := 0 103 | totalWorkers := 0 104 | var totalSharesSubmitted uint64 105 | 106 | d.proxies.Range(func(key interface{}, p interface{}) bool { 107 | totalProxies++ 108 | totalWorkers++ 109 | if p != nil { 110 | totalSharesSubmitted += p.(*Proxy).shares 111 | } 112 | return true 113 | }) 114 | totalSharesSubmitted += d.deleteShares.Load() 115 | 116 | recentShares := totalSharesSubmitted - d.lastTotalShares 117 | d.lastTotalShares = totalSharesSubmitted 118 | duration := time.Since(d.aliveSince).Truncate(1 * time.Second) 119 | 120 | stats := &Stats{ 121 | Timestamp: time.Now(), 122 | Alive: duration, 123 | Proxies: totalProxies, 124 | Workers: totalWorkers, 125 | Shares: totalSharesSubmitted, 126 | NewShares: recentShares, 127 | } 128 | 129 | return stats 130 | } 131 | -------------------------------------------------------------------------------- /server/utils/safemap.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "sync" 4 | 5 | // https://github.com/zeromicro/go-zero/blob/master/core/collection/safemap.go 6 | const ( 7 | copyThreshold = 1000 8 | maxDeletion = 10000 9 | ) 10 | 11 | // SafeMap provides a map alternative to avoid memory leak. 12 | // This implementation is not needed until issue below fixed. 13 | // https://github.com/golang/go/issues/20135 14 | type SafeMap struct { 15 | lock sync.RWMutex 16 | deletionOld int 17 | deletionNew int 18 | dirtyOld map[uint64]interface{} 19 | dirtyNew map[uint64]interface{} 20 | } 21 | 22 | // NewSafeMap returns a SafeMap. 23 | func NewSafeMap() *SafeMap { 24 | return &SafeMap{ 25 | dirtyOld: make(map[uint64]interface{}), 26 | dirtyNew: make(map[uint64]interface{}), 27 | } 28 | } 29 | 30 | // Del deletes the value with the given key from m. 31 | func (m *SafeMap) Del(key uint64) { 32 | m.lock.Lock() 33 | if _, ok := m.dirtyOld[key]; ok { 34 | m.dirtyOld[key] = nil 35 | delete(m.dirtyOld, key) 36 | m.deletionOld++ 37 | } else if _, ok := m.dirtyNew[key]; ok { 38 | m.dirtyNew[key] = nil 39 | delete(m.dirtyNew, key) 40 | m.deletionNew++ 41 | } 42 | if m.deletionOld >= maxDeletion && len(m.dirtyOld) < copyThreshold { 43 | for k, v := range m.dirtyOld { 44 | m.dirtyNew[k] = v 45 | } 46 | m.dirtyOld = m.dirtyNew 47 | m.deletionOld = m.deletionNew 48 | m.dirtyNew = make(map[uint64]interface{}) 49 | m.deletionNew = 0 50 | } 51 | if m.deletionNew >= maxDeletion && len(m.dirtyNew) < copyThreshold { 52 | for k, v := range m.dirtyNew { 53 | m.dirtyOld[k] = v 54 | } 55 | m.dirtyNew = make(map[uint64]interface{}) 56 | m.deletionNew = 0 57 | } 58 | m.lock.Unlock() 59 | } 60 | 61 | // Get gets the value with the given key from m. 62 | func (m *SafeMap) Get(key uint64) (interface{}, bool) { 63 | m.lock.RLock() 64 | defer m.lock.RUnlock() 65 | 66 | if val, ok := m.dirtyOld[key]; ok { 67 | return val, true 68 | } 69 | 70 | val, ok := m.dirtyNew[key] 71 | return val, ok 72 | } 73 | 74 | // Range calls f sequentially for each key and value present in the map. 75 | // If f returns false, range stops the iteration. 76 | func (m *SafeMap) Range(f func(key, val interface{}) bool) { 77 | m.lock.RLock() 78 | defer m.lock.RUnlock() 79 | 80 | for k, v := range m.dirtyOld { 81 | if !f(k, v) { 82 | return 83 | } 84 | } 85 | for k, v := range m.dirtyNew { 86 | if !f(k, v) { 87 | return 88 | } 89 | } 90 | } 91 | 92 | // Set sets the value into m with the given key. 93 | func (m *SafeMap) Set(key uint64, value interface{}) { 94 | m.lock.Lock() 95 | if m.deletionOld <= maxDeletion { 96 | if _, ok := m.dirtyNew[key]; ok { 97 | m.dirtyNew[key] = nil 98 | delete(m.dirtyNew, key) 99 | m.deletionNew++ 100 | } 101 | m.dirtyOld[key] = value 102 | } else { 103 | if _, ok := m.dirtyOld[key]; ok { 104 | m.dirtyOld[key] = nil 105 | delete(m.dirtyOld, key) 106 | m.deletionOld++ 107 | } 108 | m.dirtyNew[key] = value 109 | } 110 | m.lock.Unlock() 111 | } 112 | 113 | // Size returns the size of m. 114 | func (m *SafeMap) Size() int { 115 | m.lock.RLock() 116 | size := len(m.dirtyOld) + len(m.dirtyNew) 117 | m.lock.RUnlock() 118 | return size 119 | } 120 | -------------------------------------------------------------------------------- /server/base58/base58_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2017 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58_test 6 | 7 | import ( 8 | "bytes" 9 | "encoding/hex" 10 | "testing" 11 | 12 | "github.com/swordlet/xmrig2xdag/base58" 13 | ) 14 | 15 | var stringTests = []struct { 16 | in string 17 | out string 18 | }{ 19 | {"", ""}, 20 | {" ", "Z"}, 21 | {"-", "n"}, 22 | {"0", "q"}, 23 | {"1", "r"}, 24 | {"-1", "4SU"}, 25 | {"11", "4k8"}, 26 | {"abc", "ZiCa"}, 27 | {"1234598760", "3mJr7AoUXx2Wqd"}, 28 | {"abcdefghijklmnopqrstuvwxyz", "3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f"}, 29 | {"00000000000000000000000000000000000000000000000000000000000000", "3sN2THZeE9Eh9eYrwkvZqNstbHGvrxSAM7gXUXvyFQP8XvQLUqNCS27icwUeDT7ckHm4FUHM2mTVh1vbLmk7y"}, 30 | } 31 | 32 | var invalidStringTests = []struct { 33 | in string 34 | out string 35 | }{ 36 | {"0", ""}, 37 | {"O", ""}, 38 | {"I", ""}, 39 | {"l", ""}, 40 | {"3mJr0", ""}, 41 | {"O3yxU", ""}, 42 | {"3sNI", ""}, 43 | {"4kl8", ""}, 44 | {"0OIl", ""}, 45 | {"!@#$%^&*()-_=+~`", ""}, 46 | {"abcd\xd80", ""}, 47 | {"abcd\U000020BF", ""}, 48 | } 49 | 50 | var hexTests = []struct { 51 | in string 52 | out string 53 | }{ 54 | {"", ""}, 55 | {"61", "2g"}, 56 | {"626262", "a3gV"}, 57 | {"636363", "aPEr"}, 58 | {"73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"}, 59 | {"00eb15231dfceb60925886b67d065299925915aeb172c06647", "1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L"}, 60 | {"516b6fcd0f", "ABnLTmg"}, 61 | {"bf4f89001e670274dd", "3SEo3LWLoPntC"}, 62 | {"572e4794", "3EFU7m"}, 63 | {"ecac89cad93923c02321", "EJDM8drfXA6uyA"}, 64 | {"10c8511e", "Rt5zm"}, 65 | {"00000000000000000000", "1111111111"}, 66 | {"000111d38e5fc9071ffcd20b4a763cc9ae4f252bb4e48fd66a835e252ada93ff480d6dd43dc62a641155a5", "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"}, 67 | {"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", "1cWB5HCBdLjAuqGGReWE3R3CguuwSjw6RHn39s2yuDRTS5NsBgNiFpWgAnEx6VQi8csexkgYw3mdYrMHr8x9i7aEwP8kZ7vccXWqKDvGv3u1GxFKPuAkn8JCPPGDMf3vMMnbzm6Nh9zh1gcNsMvH3ZNLmP5fSG6DGbbi2tuwMWPthr4boWwCxf7ewSgNQeacyozhKDDQQ1qL5fQFUW52QKUZDZ5fw3KXNQJMcNTcaB723LchjeKun7MuGW5qyCBZYzA1KjofN1gYBV3NqyhQJ3Ns746GNuf9N2pQPmHz4xpnSrrfCvy6TVVz5d4PdrjeshsWQwpZsZGzvbdAdN8MKV5QsBDY"}, 68 | } 69 | 70 | func TestBase58(t *testing.T) { 71 | // Encode tests 72 | for x, test := range stringTests { 73 | tmp := []byte(test.in) 74 | if res := base58.Encode(tmp); res != test.out { 75 | t.Errorf("Encode test #%d failed: got: %s want: %s", 76 | x, res, test.out) 77 | continue 78 | } 79 | } 80 | 81 | // Decode tests 82 | for x, test := range hexTests { 83 | b, err := hex.DecodeString(test.in) 84 | if err != nil { 85 | t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in) 86 | continue 87 | } 88 | if res := base58.Decode(test.out); !bytes.Equal(res, b) { 89 | t.Errorf("Decode test #%d failed: got: %q want: %q", 90 | x, res, test.in) 91 | continue 92 | } 93 | } 94 | 95 | // Decode with invalid input 96 | for x, test := range invalidStringTests { 97 | if res := base58.Decode(test.in); string(res) != test.out { 98 | t.Errorf("Decode invalidString test #%d failed: got: %q want: %q", 99 | x, res, test.out) 100 | continue 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /server/base58/base58.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2013-2015 The btcsuite developers 2 | // Use of this source code is governed by an ISC 3 | // license that can be found in the LICENSE file. 4 | 5 | package base58 6 | 7 | import ( 8 | "math/big" 9 | ) 10 | 11 | //go:generate go run genalphabet.go 12 | 13 | var bigRadix = [...]*big.Int{ 14 | big.NewInt(0), 15 | big.NewInt(58), 16 | big.NewInt(58 * 58), 17 | big.NewInt(58 * 58 * 58), 18 | big.NewInt(58 * 58 * 58 * 58), 19 | big.NewInt(58 * 58 * 58 * 58 * 58), 20 | big.NewInt(58 * 58 * 58 * 58 * 58 * 58), 21 | big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58), 22 | big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58), 23 | big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58), 24 | bigRadix10, 25 | } 26 | 27 | var bigRadix10 = big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58) // 58^10 28 | 29 | // Decode decodes a modified base58 string to a byte slice. 30 | func Decode(b string) []byte { 31 | answer := big.NewInt(0) 32 | scratch := new(big.Int) 33 | 34 | // Calculating with big.Int is slow for each iteration. 35 | // x += b58[b[i]] * j 36 | // j *= 58 37 | // 38 | // Instead we can try to do as much calculations on int64. 39 | // We can represent a 10 digit base58 number using an int64. 40 | // 41 | // Hence we'll try to convert 10, base58 digits at a time. 42 | // The rough idea is to calculate `t`, such that: 43 | // 44 | // t := b58[b[i+9]] * 58^9 ... + b58[b[i+1]] * 58^1 + b58[b[i]] * 58^0 45 | // x *= 58^10 46 | // x += t 47 | // 48 | // Of course, in addition, we'll need to handle boundary condition when `b` is not multiple of 58^10. 49 | // In that case we'll use the bigRadix[n] lookup for the appropriate power. 50 | for t := b; len(t) > 0; { 51 | n := len(t) 52 | if n > 10 { 53 | n = 10 54 | } 55 | 56 | total := uint64(0) 57 | for _, v := range t[:n] { 58 | if v > 255 { 59 | return []byte("") 60 | } 61 | 62 | tmp := b58[v] 63 | if tmp == 255 { 64 | return []byte("") 65 | } 66 | total = total*58 + uint64(tmp) 67 | } 68 | 69 | answer.Mul(answer, bigRadix[n]) 70 | scratch.SetUint64(total) 71 | answer.Add(answer, scratch) 72 | 73 | t = t[n:] 74 | } 75 | 76 | tmpval := answer.Bytes() 77 | 78 | var numZeros int 79 | for numZeros = 0; numZeros < len(b); numZeros++ { 80 | if b[numZeros] != alphabetIdx0 { 81 | break 82 | } 83 | } 84 | flen := numZeros + len(tmpval) 85 | val := make([]byte, flen) 86 | copy(val[numZeros:], tmpval) 87 | 88 | return val 89 | } 90 | 91 | // Encode encodes a byte slice to a modified base58 string. 92 | func Encode(b []byte) string { 93 | x := new(big.Int) 94 | x.SetBytes(b) 95 | 96 | // maximum length of output is log58(2^(8*len(b))) == len(b) * 8 / log(58) 97 | maxlen := int(float64(len(b))*1.365658237309761) + 1 98 | answer := make([]byte, 0, maxlen) 99 | mod := new(big.Int) 100 | for x.Sign() > 0 { 101 | // Calculating with big.Int is slow for each iteration. 102 | // x, mod = x / 58, x % 58 103 | // 104 | // Instead we can try to do as much calculations on int64. 105 | // x, mod = x / 58^10, x % 58^10 106 | // 107 | // Which will give us mod, which is 10 digit base58 number. 108 | // We'll loop that 10 times to convert to the answer. 109 | 110 | x.DivMod(x, bigRadix10, mod) 111 | if x.Sign() == 0 { 112 | // When x = 0, we need to ensure we don't add any extra zeros. 113 | m := mod.Int64() 114 | for m > 0 { 115 | answer = append(answer, alphabet[m%58]) 116 | m /= 58 117 | } 118 | } else { 119 | m := mod.Int64() 120 | for i := 0; i < 10; i++ { 121 | answer = append(answer, alphabet[m%58]) 122 | m /= 58 123 | } 124 | } 125 | } 126 | 127 | // leading zero bytes 128 | for _, i := range b { 129 | if i != 0 { 130 | break 131 | } 132 | answer = append(answer, alphabetIdx0) 133 | } 134 | 135 | // reverse 136 | alen := len(answer) 137 | for i := 0; i < alen/2; i++ { 138 | answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i] 139 | } 140 | 141 | return string(answer) 142 | } 143 | -------------------------------------------------------------------------------- /server/proxy/share.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "errors" 7 | 8 | "github.com/swordlet/xmrig2xdag/config" 9 | "github.com/swordlet/xmrig2xdag/logger" 10 | ) 11 | 12 | const ( 13 | _ = iota 14 | // ValidateNormal just checks that there is a valid job ID and the share is 15 | // not a duplicate for this job 16 | ValidateNormal 17 | 18 | // ValidateFormat checks the results and nonce for valid size 19 | ValidateFormat 20 | 21 | // ValidateDiff checks that the result difficulty meets the target 22 | // NOT WORKING - the idea would be to include previous levels also 23 | ValidateDiff 24 | 25 | // ValidateFull TODO checks nonce against blob for result 26 | // maybe not worth it! 27 | ValidateFull 28 | ) 29 | 30 | const ( 31 | // need more information about this uint64 32 | shareValueOffset = 24 33 | shareValueLength = 8 34 | ) 35 | 36 | var ( 37 | ErrMalformedShareResult = errors.New("result is the correct length") 38 | ErrDiffTooLow = errors.New("share difficulty too low") 39 | ) 40 | 41 | type share struct { 42 | AuthID string `json:"id"` 43 | JobID string `json:"job_id"` 44 | Nonce string `json:"nonce"` 45 | Result string `json:"result"` 46 | 47 | Error chan error `json:"-"` 48 | Response chan *StatusReply `json:"-"` 49 | } 50 | 51 | // might return an invalid share, and that's fine - will fail validation 52 | func newShare(params map[string]interface{}) *share { 53 | s := &share{ 54 | Error: make(chan error, 1), 55 | Response: make(chan *StatusReply, 1), 56 | } 57 | 58 | if jobID, ok := params["job_id"]; ok { 59 | s.JobID = jobID.(string) 60 | } 61 | 62 | if nonce, ok := params["nonce"]; ok { 63 | s.Nonce = nonce.(string) 64 | } 65 | 66 | if result, ok := params["result"]; ok { 67 | s.Result = result.(string) 68 | } 69 | 70 | return s 71 | } 72 | 73 | func (s *share) validate(j *Job) error { 74 | // normal validate for no duplicate 75 | //for _, n := range j.submittedNonces { 76 | // if n == s.Nonce { 77 | // return ErrDuplicateShare 78 | // } 79 | //} 80 | 81 | validateLevel := config.Get().ShareValidation 82 | if validateLevel >= ValidateFormat { 83 | if err := s.validateFormat(); err != nil { 84 | return err 85 | } 86 | } 87 | 88 | if validateLevel >= ValidateDiff { 89 | if err := s.validateDifficulty(j); err != nil { 90 | return err 91 | } 92 | } 93 | 94 | if validateLevel >= ValidateFull { 95 | return s.validateResult(j) 96 | } 97 | 98 | return nil 99 | } 100 | 101 | func (s *share) validateFormat() error { 102 | if len(s.Nonce) != 8 || len(s.Result) != 64 { 103 | return ErrMalformedShare 104 | } 105 | _, err := hex.DecodeString(s.Nonce) 106 | if err != nil { 107 | return err 108 | } 109 | 110 | _, err = hex.DecodeString(s.Result) 111 | if err != nil { 112 | return err 113 | } 114 | return nil 115 | } 116 | 117 | // Disabled, not working 118 | func (s *share) validateDifficulty(j *Job) error { 119 | return nil 120 | target, err := j.getTargetUint64() 121 | if err != nil { 122 | // don't try to validate, just record so we can fix later 123 | logger.Get().Println("error validating difficulty: ", err) 124 | return nil 125 | } 126 | 127 | result, err := s.getResultUint64() 128 | if err != nil { 129 | logger.Get().Println("error validating difficulty: ", err) 130 | return err 131 | } 132 | 133 | logger.Get().Debugf("comparing result %v < target %v", result, target) 134 | if result < target { 135 | return ErrDiffTooLow 136 | } 137 | 138 | return nil 139 | } 140 | 141 | // not implemented, and no rush to do so 142 | func (s *share) validateResult(j *Job) error { 143 | return nil 144 | } 145 | 146 | func (s *share) getResultUint64() (uint64, error) { 147 | resultBytes, err := hex.DecodeString(s.Result) 148 | if err != nil { 149 | return 0, err 150 | } 151 | 152 | if len(resultBytes) < shareValueOffset+shareValueLength { 153 | return 0, ErrMalformedShareResult 154 | } 155 | 156 | valueBytes := resultBytes[shareValueOffset : shareValueOffset+shareValueLength] 157 | 158 | return binary.LittleEndian.Uint64(valueBytes), nil 159 | } 160 | -------------------------------------------------------------------------------- /server/proxy/mining.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/sourcegraph/jsonrpc2" 10 | "github.com/swordlet/xmrig2xdag/logger" 11 | ) 12 | 13 | // PassThruParams is a generic type for handling RPC requests. It can (should) contain the context 14 | // of the request in order to be handled correctly. Other than the context, everything else should 15 | // be shipped off to the pool as is. If that is not the correct behavior, use another type for params. 16 | type PassThruParams map[string]interface{} 17 | 18 | // Context implements jsonrpc2.WithContext 19 | func (p PassThruParams) Context() context.Context { 20 | if ctx, ok := p["ctx"]; ok { 21 | return ctx.(context.Context) 22 | } 23 | logger.Get().Println("Failed to get context on request with params: ", p) 24 | return nil 25 | } 26 | 27 | // SetContext implements jsonrpc2.WithContext 28 | func (p *PassThruParams) SetContext(ctx context.Context) { 29 | if *p == nil { 30 | *p = make(PassThruParams) 31 | } 32 | params := *p 33 | params["ctx"] = ctx 34 | } 35 | 36 | // structures for non-passthru objects, and replies 37 | 38 | type AuthReply struct { 39 | Token string `json:"token"` 40 | Hashes string `json:"hashes"` 41 | } 42 | 43 | type LoginReply struct { 44 | ID string `json:"id"` 45 | Job *Job `json:"job"` 46 | Status string `json:"status"` 47 | Error *jsonrpc2.Error `json:"error,omitempty"` 48 | } 49 | 50 | type StatusReply struct { 51 | Status string `json:"status"` 52 | Error *jsonrpc2.Error `json:"error,omitempty"` 53 | } 54 | 55 | // RPC proxy service 56 | type Mining struct{} 57 | 58 | func (m *Mining) getWorker(ctx context.Context) Worker { 59 | return ctx.Value("worker").(Worker) 60 | } 61 | 62 | func (m *Mining) Login(p PassThruParams, resp *LoginReply) error { 63 | var err error 64 | var minerName string 65 | worker := m.getWorker(p.Context()) 66 | if poolIsDown.Load() > 0 { 67 | resp.ID = strconv.Itoa(int(worker.ID())) 68 | resp.Error = &jsonrpc2.Error{ 69 | Code: -1, 70 | Message: "*** pool is down, please switch to other pool.", 71 | } 72 | worker.RemoveProxy() 73 | return errors.New("Invalid job id *** Pool is down: Please switch to other pool") 74 | } 75 | 76 | if address, ok := p["login"]; ok { 77 | err = worker.Proxy().SetAddress(address.(string)) 78 | } else { 79 | err = errors.New("no login param") 80 | } 81 | if err != nil { 82 | resp.ID = strconv.Itoa(int(worker.ID())) 83 | resp.Error = &jsonrpc2.Error{ 84 | Code: -1, 85 | Message: err.Error(), 86 | } 87 | worker.RemoveProxy() 88 | return err 89 | } 90 | 91 | if name, ok := p["pass"]; ok { 92 | minerName = name.(string) 93 | if minerName == "x" { 94 | minerName = "" 95 | } 96 | } 97 | logger.Get().Debugln("RPC server is listening on proxy ", worker.Proxy().ID) 98 | go worker.Proxy().Run(minerName) 99 | //resp.Job = worker.Proxy().NextJob() 100 | resp.ID = strconv.Itoa(int(worker.ID())) 101 | resp.Status = "OK" 102 | resp.Job = createFakeJob() 103 | return nil 104 | } 105 | 106 | func (m *Mining) Getjob(p PassThruParams, resp *Job) error { 107 | worker := m.getWorker(p.Context()) 108 | *resp = *worker.Proxy().NextJob() 109 | 110 | return nil 111 | } 112 | 113 | // Submit accepts shares from a worker and passes them through to the pool. 114 | // This does NOT currently recognize which worker or even what type of worker 115 | // is doing the submiting, and does not return a Coinhive friendly response. 116 | // But the coinhive miner doesn't care, it just doesn't keep up with submissions. 117 | func (m *Mining) Submit(p PassThruParams, resp *StatusReply) error { 118 | worker := m.getWorker(p.Context()) 119 | worker.Conn().SetReadDeadline(time.Now().Add(45 * time.Second)) 120 | status, err := worker.Proxy().Submit(p) 121 | if err != nil { 122 | return err 123 | } 124 | *resp = *status 125 | 126 | return nil 127 | } 128 | 129 | // Keepalived lets the client tell you they're still there, and you get to say "I'm still here too" 130 | // Right now, we don't keep track of idle connections, so this doesn't really matter. 131 | func (m *Mining) Keepalived(p PassThruParams, resp *StatusReply) error { 132 | resp.Status = "KEEPALIVED" 133 | return nil 134 | } 135 | -------------------------------------------------------------------------------- /server/xdag/block.go: -------------------------------------------------------------------------------- 1 | package xdag 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "errors" 7 | "unsafe" 8 | 9 | "github.com/swordlet/xmrig2xdag/config" 10 | "github.com/swordlet/xmrig2xdag/logger" 11 | ) 12 | 13 | const ( 14 | // HashLength is the expected length of the hash 15 | HashLength = 32 16 | // AddressLength is the expected length of the address 17 | AddressLength = 32 18 | // RawBlockSize is the expected length of the XDAG block 19 | RawBlockSize = 512 20 | FieldSize = 32 21 | 22 | bits2mime = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" 23 | XDAG_FIELD_HEAD uint64 = 1 24 | XDAG_FIELD_SIGN_IN uint64 = 4 25 | XDAG_FIELD_SIGN_OUT uint64 = 5 26 | XDAG_FIELD_HEAD_TEST uint64 = 8 27 | 28 | BLOCK_HEADER_WORD uint64 = 0x3fca9e2b 29 | WORKERNAME_HEADER_WORD uint32 = 0xf46b9853 30 | XDAG_BLOCK_FIELDS = 16 31 | ) 32 | 33 | var ( 34 | mime2bits = make([]byte, 256) 35 | 36 | // Zero8bytes 8 bytes with zero data 37 | Zero8bytes = make([]byte, 8) 38 | ) 39 | 40 | func init() { 41 | for i := range mime2bits { 42 | mime2bits[i] = 0xFF 43 | } 44 | var i uint8 45 | for i = 0; i < 64; i++ { 46 | mime2bits[bits2mime[i]] = i 47 | } 48 | } 49 | 50 | // Hash2address converts hash to address 51 | func Hash2address(h []byte) string { 52 | address := make([]byte, AddressLength) 53 | var c, d, j uint 54 | // every 3 bytes(24 bits) hashs convert to 4 chars(6 bit each) 55 | // first 24 bytes hash to 32 byte address, ignore last 8 bytes of hash 56 | for i := 0; i < AddressLength; i++ { 57 | if d < 6 { 58 | d += 8 59 | c <<= 8 60 | c |= uint(h[j]) 61 | j++ 62 | } 63 | d -= 6 64 | address[i] = bits2mime[c>>d&0x3F] 65 | } 66 | return bytes2str(address) 67 | } 68 | 69 | // Address2hash converts address to hash 70 | func Address2hash(addr string) ([HashLength]byte, error) { 71 | 72 | var hash [HashLength]byte 73 | var i, e, n, j uint 74 | var c, d uint8 75 | 76 | if len(addr) != 32 { 77 | return hash, errors.New("invalid address") 78 | } 79 | var k = -1 80 | // convert 32 byte address to 24 bytes hash 81 | // each byte (8 bits) address to 6 bits hash 82 | for i = 0; i < AddressLength; i++ { 83 | for { 84 | k += 1 85 | if k == 32 { 86 | return hash, errors.New("Address string error") 87 | } 88 | c = addr[k] 89 | d = mime2bits[c] 90 | if d&0xC0 == 0 { 91 | break 92 | } 93 | } 94 | e <<= 6 95 | e |= uint(d) 96 | n += 6 97 | if n >= 8 { 98 | n -= 8 99 | hash[j] = uint8(e >> n) 100 | j++ 101 | } 102 | } 103 | //copy(hash[24:], Zero8bytes) // set last 8 bytes of hash to 0 104 | return hash, nil 105 | } 106 | 107 | // RawBlock contains raw XDAG block bytes 108 | type RawBlock struct { 109 | Hash [HashLength]byte 110 | Address string 111 | Timestamp uint64 112 | RawBytes []byte 113 | } 114 | 115 | // NewRawBlock builds new raw block from bytes 116 | func NewRawBlock(b []byte) RawBlock { 117 | 118 | header := make([]byte, 8) 119 | copy(header, b[:8]) // backup block transport header 120 | copy(b[:8], Zero8bytes) // clear block transport header 121 | 122 | hash := sha256.Sum256(b) 123 | copy(b[:8], header) // restore block transport header 124 | r := RawBlock{ 125 | Hash: sha256.Sum256(hash[:]), 126 | RawBytes: b, 127 | } 128 | // get time from block header 129 | r.Timestamp = binary.LittleEndian.Uint64(b[16:24]) 130 | 131 | r.Address = Hash2address(r.Hash[:]) 132 | return r 133 | } 134 | 135 | func GenerateFakeBlock() [RawBlockSize]byte { 136 | var block [RawBlockSize]byte 137 | var transportHeader uint64 = 1 138 | var amount uint64 = 0 139 | var fieldType uint64 140 | if config.Get().Testnet { 141 | logger.Get().Debugln("XDAG_FIELD_HEAD_TEST") 142 | fieldType = XDAG_FIELD_HEAD_TEST | ((XDAG_FIELD_SIGN_OUT * 0x11) << 4) 143 | } else { 144 | logger.Get().Debugln("XDAG_FIELD_HEAD") 145 | fieldType = XDAG_FIELD_HEAD | ((XDAG_FIELD_SIGN_OUT * 0x11) << 4) 146 | } 147 | binary.LittleEndian.PutUint64(block[0:8], transportHeader) 148 | binary.LittleEndian.PutUint64(block[8:16], fieldType) 149 | binary.LittleEndian.PutUint64(block[16:24], GetXTimestamp()) 150 | binary.LittleEndian.PutUint64(block[24:32], amount) 151 | return block 152 | } 153 | 154 | // unsafe and fast convert string to bytes slice 155 | func str2bytes(s string) []byte { 156 | x := (*[2]uintptr)(unsafe.Pointer(&s)) 157 | h := [3]uintptr{x[0], x[1], x[1]} 158 | return *(*[]byte)(unsafe.Pointer(&h)) 159 | } 160 | 161 | // unsafe and fast convert bytes slice to string 162 | func bytes2str(b []byte) string { 163 | return *(*string)(unsafe.Pointer(&b)) 164 | } 165 | -------------------------------------------------------------------------------- /server/stratum/codec.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "io" 8 | "net/rpc" 9 | "strings" 10 | "sync" 11 | 12 | "github.com/powerman/rpc-codec/jsonrpc2" 13 | "github.com/swordlet/xmrig2xdag/utils" 14 | ) 15 | 16 | // DefaultServerCodec handles xmr stratum+tcp requests and is capabable of sending a notification to 17 | // the connection using it. 18 | type DefaultServerCodec struct { 19 | *serverCodec 20 | 21 | // JSON-RPC clients can use arbitrary json values as request IDs. 22 | // Package rpc expects uint64 request IDs. 23 | // We assign uint64 sequence numbers to incoming requests 24 | // but save the original request ID in the pending map. 25 | // When rpc responds, we use the sequence number in 26 | // the response to find the original request ID. 27 | mutex sync.Mutex // protects seq, pending 28 | seq uint64 29 | pending *utils.SafeMap //[uint64]*json.RawMessage 30 | } 31 | 32 | type defaultNotification struct { 33 | Method string `json:"method"` 34 | Params interface{} `json:"params"` 35 | } 36 | 37 | // NewDefaultServerCodec returns a new rpc.ServerCodec for handling from a miner implementing the 38 | // (standard?) xmr stratum+tcp protocol 39 | func NewDefaultServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec { 40 | return &DefaultServerCodec{ 41 | serverCodec: &serverCodec{ 42 | dec: json.NewDecoder(conn), 43 | enc: json.NewEncoder(conn), 44 | c: conn, 45 | ctx: context.Background(), 46 | }, 47 | pending: utils.NewSafeMap(), //(map[uint64]*json.RawMessage), 48 | } 49 | } 50 | 51 | // NewDefaultServerCodecContext is NewDefaultServerCodec with given context provided 52 | // within parameters for compatible RPC methods. 53 | func NewDefaultServerCodecContext(ctx context.Context, conn io.ReadWriteCloser) rpc.ServerCodec { 54 | codec := NewDefaultServerCodec(conn) 55 | codec.(*DefaultServerCodec).ctx = ctx 56 | return codec 57 | } 58 | 59 | // ReadRequestHeader implements rpc.ServerCodec 60 | func (c *DefaultServerCodec) ReadRequestHeader(r *rpc.Request) (err error) { 61 | var raw json.RawMessage 62 | if err := c.dec.Decode(&raw); err != nil { 63 | c.encmutex.Lock() 64 | c.enc.Encode(serverResponse{Version: "2.0", ID: &null, Error: errParse}) 65 | c.encmutex.Unlock() 66 | return err 67 | } 68 | 69 | if err := json.Unmarshal(raw, &c.req); err != nil { 70 | if err.Error() == "bad request" { 71 | c.encmutex.Lock() 72 | c.enc.Encode(serverResponse{Version: "2.0", ID: &null, Error: errRequest}) 73 | c.encmutex.Unlock() 74 | } 75 | return err 76 | } 77 | 78 | r.ServiceMethod = strings.Title(c.req.Method) 79 | if !strings.Contains(r.ServiceMethod, "mining") { 80 | r.ServiceMethod = "mining." + r.ServiceMethod 81 | } 82 | 83 | // JSON request id can be any JSON value; 84 | // RPC package expects uint64. Translate to 85 | // internal uint64 and save JSON on the side. 86 | c.mutex.Lock() 87 | c.seq++ 88 | // c.pending[c.seq] = c.req.ID 89 | c.pending.Set(c.seq, c.req.ID) 90 | c.req.ID = nil 91 | r.Seq = c.seq 92 | c.mutex.Unlock() 93 | 94 | return nil 95 | } 96 | 97 | // ReadRequestBody implements rpc.ServerCodec 98 | func (c *DefaultServerCodec) ReadRequestBody(x interface{}) error { 99 | if x == nil { 100 | return nil 101 | } 102 | if x, ok := x.(jsonrpc2.WithContext); ok { 103 | x.SetContext(c.ctx) 104 | } 105 | if c.req.Params == nil { 106 | return nil 107 | } 108 | if err := json.Unmarshal(*c.req.Params, x); err != nil { 109 | return jsonrpc2.NewError(errParams.Code, err.Error()) 110 | } 111 | return nil 112 | } 113 | 114 | // WriteResponse implements rpc.ServerCodec 115 | func (c *DefaultServerCodec) WriteResponse(r *rpc.Response, x interface{}) error { 116 | c.mutex.Lock() 117 | // b, ok := c.pending[r.Seq] 118 | b, ok := c.pending.Get(r.Seq) 119 | if !ok { 120 | c.mutex.Unlock() 121 | return errors.New("invalid sequence number in response") 122 | } 123 | // delete(c.pending, r.Seq) 124 | c.pending.Del(r.Seq) 125 | c.mutex.Unlock() 126 | 127 | if b == nil { 128 | // Notification. Do not respond. 129 | return nil 130 | } 131 | resp := serverResponse{Version: "2.0", ID: b.(*json.RawMessage)} 132 | if r.Error == "" { 133 | if x == nil { 134 | resp.Result = &null 135 | } else { 136 | resp.Result = x 137 | } 138 | } else { 139 | resp.Error = jsonrpc2.NewError(errInternal.Code, r.Error) 140 | } 141 | // c.encmutex.Lock() 142 | // defer c.encmutex.Unlock() 143 | return c.enc.Encode(resp) 144 | } 145 | 146 | // Close implements rpc.ServerCodec 147 | func (c *DefaultServerCodec) Close() error { 148 | return c.c.Close() 149 | } 150 | 151 | func (d *DefaultServerCodec) Notify(method string, args interface{}) error { 152 | payload := defaultNotification{ 153 | Method: method, 154 | Params: args, 155 | } 156 | return d.enc.Encode(payload) 157 | } 158 | -------------------------------------------------------------------------------- /server/xdag/connect.go: -------------------------------------------------------------------------------- 1 | package xdag 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | 12 | "github.com/swordlet/xmrig2xdag/logger" 13 | ) 14 | 15 | var PoolDown atomic.Uint64 16 | 17 | // Connection to XDAG pool 18 | type Connection struct { 19 | sync.RWMutex 20 | Conn net.Conn //tcp socket 21 | ConnID uint64 //ID 22 | ctx context.Context //exit channel 23 | cancel context.CancelFunc //cancel channel 24 | msgBuffChan chan []byte //buffered message channel 25 | isClosed bool //closed flag 26 | //isMulti bool // multiple works 27 | //worksCounts int 28 | jobNotify chan []byte 29 | // done chan int 30 | EOFcount atomic.Uint64 31 | } 32 | 33 | func NewConnection(conn net.Conn, connID uint64, notify chan []byte, done chan int) *Connection { 34 | //initialization 35 | c := &Connection{ 36 | Conn: conn, 37 | ConnID: connID, 38 | isClosed: false, 39 | msgBuffChan: make(chan []byte, 8), 40 | jobNotify: notify, 41 | // done: done, 42 | } 43 | 44 | return c 45 | } 46 | 47 | // StartWriter write message Goroutine, send message to XDAG pool 48 | func (c *Connection) StartWriter() { 49 | logger.Get().Debugln("[Writer Goroutine is running]") 50 | defer logger.Get().Debugln(c.RemoteAddr().String(), "[conn Writer exit!]") 51 | defer c.Stop() 52 | 53 | for { 54 | if PoolDown.Load() > 0 { 55 | return 56 | } 57 | select { 58 | case data, ok := <-c.msgBuffChan: 59 | if ok { 60 | if _, err := c.Conn.Write(data); err != nil { 61 | logger.Get().Println("Send Buff Data error:, ", err, " Conn Writer exit") 62 | return 63 | } 64 | } else { 65 | logger.Get().Println("msgBuffChan is Closed") 66 | } 67 | case <-c.ctx.Done(): 68 | return 69 | } 70 | } 71 | } 72 | 73 | // StartReader read message Goroutine, receive message from XDAG pool 74 | func (c *Connection) StartReader() { 75 | logger.Get().Debugln("[Reader Goroutine is running]") 76 | defer logger.Get().Debugln(c.RemoteAddr().String(), "[conn Reader exit!]") 77 | defer c.Stop() 78 | 79 | for { 80 | if PoolDown.Load() > 0 { 81 | return 82 | } 83 | select { 84 | case <-c.ctx.Done(): 85 | return 86 | default: 87 | // 设定连接的等待时长期限 88 | err := c.Conn.SetReadDeadline(time.Now().Add(time.Second * 128)) 89 | if err != nil { 90 | return 91 | } 92 | data := make([]byte, 32) 93 | 94 | if _, err = io.ReadFull(c.Conn, data); err != nil { 95 | logger.Get().Println("read msg head error ", err) 96 | 97 | switch errType := err.(type) { 98 | case net.Error: 99 | if errType.Timeout() { 100 | PoolDown.Add(1) 101 | } 102 | } 103 | 104 | if err == io.EOF { 105 | c.EOFcount.Add(1) 106 | } 107 | return 108 | } 109 | //logger.Get().Debugf("%#v\n", data) 110 | c.RLock() 111 | if !c.isClosed { 112 | c.jobNotify <- data 113 | } 114 | c.RUnlock() 115 | } 116 | } 117 | } 118 | 119 | // Start a connection 120 | func (c *Connection) Start() { 121 | c.ctx, c.cancel = context.WithCancel(context.Background()) 122 | //1 start receive Goroutine 123 | go c.StartReader() 124 | //2 start send Goroutine 125 | go c.StartWriter() 126 | } 127 | 128 | // Stop a connection 129 | func (c *Connection) Stop() { 130 | 131 | c.Lock() 132 | defer c.Unlock() 133 | 134 | if c.isClosed { 135 | return 136 | } 137 | 138 | logger.Get().Println("Conn Stoped()...ConnID = ", c.ConnID) 139 | 140 | c.Conn.Close() 141 | //close writer 142 | c.cancel() 143 | 144 | //close channel 145 | close(c.msgBuffChan) 146 | //set flag 147 | c.isClosed = true 148 | // if PoolDown.Load() > 0 { 149 | // c.done <- 2 150 | // } else { 151 | // c.done <- 1 152 | // } 153 | 154 | } 155 | 156 | // worker disconnected 157 | func (c *Connection) Close() { 158 | 159 | c.Lock() 160 | defer c.Unlock() 161 | 162 | if c.isClosed { 163 | return 164 | } 165 | 166 | logger.Get().Println("Conn Closed() ...ConnID = ", c.ConnID) 167 | 168 | c.Conn.Close() 169 | //close writer 170 | c.cancel() 171 | 172 | //close channel 173 | close(c.msgBuffChan) 174 | //set flag 175 | c.isClosed = true 176 | 177 | } 178 | 179 | // GetTCPConnection get socket TCPConn 180 | func (c *Connection) GetTCPConnection() net.Conn { 181 | return c.Conn 182 | } 183 | 184 | // GetConnID get ID 185 | func (c *Connection) GetConnID() uint64 { 186 | return c.ConnID 187 | } 188 | 189 | // RemoteAddr get remote address 190 | func (c *Connection) RemoteAddr() net.Addr { 191 | return c.Conn.RemoteAddr() 192 | } 193 | 194 | // SendBuffMsg send message to XDAG pool through buffered channel 195 | func (c *Connection) SendBuffMsg(data []byte) error { 196 | c.RLock() 197 | defer c.RUnlock() 198 | if c.isClosed { 199 | return errors.New("Connection closed when send buff msg") 200 | } 201 | c.msgBuffChan <- data 202 | 203 | return nil 204 | } 205 | 206 | func (c *Connection) SendLogin() error { 207 | return nil 208 | } 209 | -------------------------------------------------------------------------------- /server/proxy/job.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "errors" 8 | "github.com/swordlet/xmrig2xdag/logger" 9 | "math" 10 | ) 11 | 12 | const ( 13 | initNonceOffset = 56 14 | initNonceLength = 8 15 | 16 | nonceOffset = 60 17 | nonceLength = 4 // bytes 18 | 19 | // TODO - worker could supply expected hashes? 20 | nonceIncrement = 0x02000000 // 32M, not really expected, just plenty of work 21 | maxNonceValue = math.MaxUint32 - nonceIncrement 22 | ) 23 | 24 | var ( 25 | ErrMalformedJob = errors.New("bad job format from pool") 26 | ErrUnknownTargetFormat = errors.New("unrecognized format for job target") 27 | ) 28 | 29 | // Job is a mining job. Break it up and send chunks to workers. 30 | type Job struct { 31 | Blob string `json:"blob"` 32 | ID string `json:"job_id"` 33 | Target string `json:"target"` 34 | SeedHash string `json:"seed_hash"` 35 | Algo string `json:"algo"` 36 | 37 | //submittedNonces []string `json:"-"` 38 | initialNonce uint32 `json:"-"` 39 | currentBlob []byte `json:"-"` 40 | currentNonce uint32 `json:"-"` 41 | } 42 | 43 | //// NewJobFromServer creates a Job from a pool notice 44 | //func NewJobFromServer(job map[string]interface{}) (*Job, error) { 45 | // j := &Job{} 46 | // var ok bool 47 | // if j.Blob, ok = job["blob"].(string); !ok { 48 | // return nil, ErrMalformedJob 49 | // } 50 | // if j.ID, ok = job["job_id"].(string); !ok { 51 | // return nil, ErrMalformedJob 52 | // } 53 | // if j.Target, ok = job["target"].(string); !ok { 54 | // return nil, ErrMalformedJob 55 | // } 56 | // if j.SeedHash, ok = job["seed_hash"].(string); !ok { 57 | // return nil, ErrMalformedJob 58 | // } 59 | // 60 | // if err := j.init(); err != nil { 61 | // return nil, err 62 | // } 63 | // 64 | // return j, nil 65 | //} 66 | 67 | //func (j *Job) init() error { 68 | // currentNonce, currentBlob, err := j.Nonce() 69 | // if err != nil { 70 | // return err 71 | // } 72 | // //j.submittedNonces = make([]string, 0) 73 | // j.currentNonce = currentNonce 74 | // j.initialNonce = currentNonce 75 | // j.currentBlob = currentBlob 76 | // 77 | // return nil 78 | //} 79 | 80 | // Next returns the next version of this job for worker distribution 81 | // and increments the nonce 82 | func (j *Job) Next() *Job { 83 | 84 | j.currentNonce += nonceIncrement 85 | if j.currentNonce >= maxNonceValue { 86 | j.currentNonce = 1 87 | } 88 | 89 | nextJob := &Job{ 90 | ID: j.ID, 91 | Target: j.Target, 92 | SeedHash: j.SeedHash, 93 | Algo: xdagAlgo, 94 | initialNonce: j.initialNonce, 95 | currentNonce: j.currentNonce, 96 | currentBlob: make([]byte, 64), 97 | } 98 | 99 | nonceBytes := make([]byte, nonceLength, nonceLength) 100 | binary.BigEndian.PutUint32(nonceBytes, j.currentNonce) 101 | 102 | copy(nextJob.currentBlob[:], j.currentBlob[:]) 103 | copy(nextJob.currentBlob[nonceOffset:nonceOffset+nonceLength], nonceBytes) 104 | nextJob.Blob = hex.EncodeToString(nextJob.currentBlob) 105 | 106 | logger.Get().Println("next, job blob: ", nextJob.Blob, ", nonce: ", hex.EncodeToString(nonceBytes)) 107 | return nextJob 108 | } 109 | 110 | // 111 | //// NewJob builds a job for distribution to a worker 112 | //func NewJob(blobBytes []byte, nonce uint32, id, target string) *Job { 113 | // j := &Job{ 114 | // ID: id, 115 | // Target: target, 116 | // //submittedNonces: make([]string, 0), 117 | // } 118 | // nonceBytes := make([]byte, nonceLength, nonceLength) 119 | // binary.BigEndian.PutUint32(nonceBytes, nonce) 120 | // copy(blobBytes[nonceOffset:nonceOffset+nonceLength], nonceBytes) 121 | // j.Blob = hex.EncodeToString(blobBytes) 122 | // 123 | // return j 124 | //} 125 | 126 | //// Nonce extracts the nonce from the job blob and returns it. 127 | //func (j *Job) Nonce() (nonce uint32, blobBytes []byte, err error) { 128 | // blobBytes, err = hex.DecodeString(j.Blob) 129 | // if err != nil { 130 | // return 131 | // } 132 | // 133 | // nonceBytes := blobBytes[nonceOffset : nonceOffset+nonceLength] 134 | // nonce = binary.BigEndian.Uint32(nonceBytes) 135 | // 136 | // return 137 | //} 138 | 139 | // can we count on uint32 hex targets? 140 | // NOT WORKING PROPERLY 141 | func (j *Job) getTargetUint64() (uint64, error) { 142 | target := j.Target 143 | if len(target) == 8 { 144 | target = "00000000" + target 145 | } 146 | if len(target) != 16 { 147 | logger.Get().Println("Job target format is : ", target) 148 | return 0, ErrUnknownTargetFormat 149 | } 150 | targetBytes, err := hex.DecodeString(target) 151 | if err != nil { 152 | return 0, err 153 | } 154 | 155 | return binary.LittleEndian.Uint64(targetBytes), nil 156 | } 157 | 158 | func createFakeJob() *Job { 159 | b := make([]byte, 64) 160 | var blob string 161 | if _, err := rand.Read(b); err != nil { 162 | blob = "070780e6b9d60586ba419a0c224e3c6c3e134cc45c4fa04d8ee2d91c2595463c57eef0a4f0796c000000002fcc4d62fa6c77e76c30017c768be5c61d83ec9d3a" 163 | } 164 | blob = hex.EncodeToString(b) 165 | //fmt.Println(blob) 166 | return &Job{ // return a fake job before proxy connect XDAG pool 167 | ID: "FFFFFFFFFF" + NewLen(18), 168 | Target: "b88d0600", //difficulty = 10000 169 | Algo: xdagAlgo, 170 | Blob: blob, 171 | SeedHash: "e1364b8782719d7683e2ccd3d8f724bc59dfa780a9e960e7c0e0046acdb40100", 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /server/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "log" 9 | "os" 10 | "reflect" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | 15 | "github.com/kelseyhightower/envconfig" 16 | "github.com/pkg/errors" 17 | ) 18 | 19 | var ( 20 | // File specifies a file from which to read the config 21 | // If empty, config will be read from the environment 22 | File string 23 | 24 | instance *Config 25 | instantiation = sync.Once{} 26 | ) 27 | 28 | // Config holds the global application configuration. 29 | type Config struct { 30 | Debug bool `envconfig:"debug" json:"debug"` 31 | 32 | StratumPort int `envconfig:"strport" default:"3232" json:"strport"` 33 | 34 | CertFile string `envconfig:"tlscert" default:"server.pem" json:"tlscert"` 35 | KeyFile string `envconfig:"tlskey" default:"server.key" json:"tlskey"` 36 | // TODO also support TLS for stratum connections 37 | Tls bool `envconfig:"tls" json:"tls"` 38 | 39 | // TODO multiple pools for fallback 40 | PoolAddr string `envconfig:"url" required:"true" json:"url"` 41 | //PoolLogin string `envconfig:"login" required:"true" json:"login"` 42 | //PoolPassword string `envconfig:"password" required:"true" json:"password"` 43 | 44 | StatInterval int `envconfig:"stats" default:"60" json:"stats"` 45 | 46 | ShareValidation int `envconfig:"validateshares" json:"validateshares" default:"2"` 47 | 48 | //DonateLevel int `envconfig:"donate" default:"2" json:"donate"` 49 | 50 | // LogFile and DiscardLog are mutually exclusive - logfile will be used if present 51 | LogFile string `envconfig:"log" json:"log"` 52 | DiscardLog bool `envconfig:"nolog" json:"nolog"` 53 | 54 | // not yet implemented 55 | //Background bool `envconfig:"background" json:"background"` 56 | 57 | Testnet bool `envconfig:"testnet" json:"testnet"` 58 | Socks5 string `envconfig:"socks5" json:"socks5"` 59 | RateLimit int `envconfig:"ratelimit" json:"ratelimit"` 60 | 61 | TryPoolTimes int `envconfig:"try_pool_times" json:"try_pool_times"` 62 | TryDelaySeconds int `envconfig:"try_delay_seconds" json:"try_delay_seconds"` 63 | ExitOnPoolDown bool `envconfig:"exit_on_pool_down" json:"exit_on_pool_down"` 64 | } 65 | 66 | // IsMissingConfig returns true if the the error has to do with missing required configs 67 | func IsMissingConfig(err error) bool { 68 | return strings.Contains(err.Error(), "required key") 69 | } 70 | 71 | // only for config from file 72 | func setDefaults(c *Config) error { 73 | // TODO cleanup? 74 | val := reflect.ValueOf(c) 75 | refType := reflect.TypeOf(c) 76 | for i := 0; i < val.Elem().NumField(); i++ { 77 | field := val.Elem().Field(i) 78 | fieldType := field.Type() 79 | defaultValue := refType.Elem().Field(i).Tag.Get("default") 80 | if defaultValue != "" { 81 | valueType := fieldType.Kind() 82 | switch valueType { 83 | case reflect.String: 84 | if field.String() == "" && field.CanSet() { 85 | field.SetString(defaultValue) 86 | } 87 | case reflect.Int: 88 | intVal, err := strconv.Atoi(defaultValue) 89 | if err != nil { 90 | return fmt.Errorf("unable to convert default value to int: %v - err: %s", defaultValue, err) 91 | } 92 | if field.Int() == 0 && field.CanSet() { 93 | field.SetInt(int64(intVal)) 94 | } 95 | case reflect.Bool: 96 | if field.CanSet() { 97 | v, err := strconv.ParseBool(defaultValue) 98 | if err != nil { 99 | return fmt.Errorf("unable to parse bool value for: %v - err: %s"+defaultValue, err) 100 | } 101 | field.SetBool(v) 102 | } 103 | default: 104 | log.Println("Unexpected type found in config. Skipping: ", field) 105 | } 106 | } 107 | } 108 | 109 | return nil 110 | } 111 | 112 | // only for config from file 113 | func validate(c *Config) error { 114 | val := reflect.ValueOf(c) 115 | refType := reflect.TypeOf(c) 116 | for i := 0; i < val.Elem().NumField(); i++ { 117 | field := val.Elem().Field(i) 118 | 119 | // required fields are all strings 120 | if _, ok := refType.Elem().Field(i).Tag.Lookup("required"); ok && field.String() == "" { 121 | return fmt.Errorf("required key %s missing value", refType.Elem().Field(i).Name) 122 | } 123 | } 124 | 125 | return nil 126 | } 127 | 128 | func configFromEnv() error { 129 | cfg := Config{} 130 | err := envconfig.Process("xmr2xdag", &cfg) 131 | if err != nil { 132 | return err 133 | } 134 | instance = &cfg 135 | return nil 136 | } 137 | 138 | func configFromFile(r io.Reader) error { 139 | data, err := ioutil.ReadAll(r) 140 | if err != nil { 141 | return errors.Wrap(err, "Failed to read config file.") 142 | } 143 | 144 | cfg := Config{} 145 | err = setDefaults(&cfg) 146 | if err != nil { 147 | return err 148 | } 149 | err = json.Unmarshal(data, &cfg) 150 | if err != nil { 151 | return errors.Wrap(err, "Failed to parse JSON.") 152 | } 153 | err = validate(&cfg) 154 | if err != nil { 155 | return err 156 | } 157 | 158 | instance = &cfg 159 | return nil 160 | } 161 | 162 | // Get returns the global configuration singleton. 163 | func Get() *Config { 164 | var err error 165 | instantiation.Do(func() { 166 | if File != "" { 167 | var f *os.File 168 | f, err = os.Open(File) 169 | if err != nil { 170 | log.Fatal("open config file failed: ", err) 171 | return 172 | } 173 | defer f.Close() 174 | err = configFromFile(f) 175 | } else { 176 | // try to read config from environment 177 | err = configFromEnv() 178 | } 179 | }) 180 | if err != nil { 181 | log.Fatal("Unable to load config: ", err) 182 | } 183 | return instance 184 | } 185 | -------------------------------------------------------------------------------- /server/stratum/client.go: -------------------------------------------------------------------------------- 1 | package stratum 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "io" 8 | "log" 9 | "math" 10 | "net" 11 | "net/rpc" 12 | "os" 13 | "reflect" 14 | "runtime/pprof" 15 | "sync" 16 | "time" 17 | 18 | "github.com/powerman/rpc-codec/jsonrpc2" 19 | ) 20 | 21 | const ( 22 | seqNotify = math.MaxUint64 23 | notificationBufferLength = 10 24 | ) 25 | 26 | var ( 27 | null = json.RawMessage([]byte("null")) 28 | 29 | // CallTimeout is the amount of time we wait for a response before we return an error 30 | CallTimeout = 30 * time.Second 31 | 32 | // ErrCallTimedOut means that call did not succeed within CallTimeout 33 | ErrCallTimedOut = errors.New("rpc call timeout") 34 | ) 35 | 36 | type clientCodec struct { 37 | dec *json.Decoder // for reading JSON values 38 | enc *json.Encoder // for writing JSON values 39 | c io.ReadWriteCloser 40 | 41 | // temporary work space 42 | resp clientResponse 43 | notif Notification 44 | 45 | // JSON-RPC responses include the request id but not the request method. 46 | // Package rpc expects both. 47 | // We save the request method in pending when sending a request 48 | // and then look it up by request ID when filling out the rpc Response. 49 | mutex sync.Mutex // protects pending 50 | pending map[uint64]string // map request id to method name 51 | 52 | notifications chan Notification 53 | } 54 | 55 | // newClientCodec returns a new rpc.ClientCodec using JSON-RPC 2.0 on conn. 56 | func newClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec { 57 | return &clientCodec{ 58 | dec: json.NewDecoder(conn), 59 | enc: json.NewEncoder(conn), 60 | c: conn, 61 | pending: make(map[uint64]string), 62 | 63 | // if the buffer gets full, we assume that it's not being consumed and error out 64 | notifications: make(chan Notification, notificationBufferLength), 65 | } 66 | } 67 | 68 | type clientRequest struct { 69 | Version string `json:"jsonrpc"` 70 | Method string `json:"method"` 71 | Params interface{} `json:"params,omitempty"` 72 | ID *uint64 `json:"id,omitempty"` 73 | } 74 | 75 | type Notification clientRequest 76 | 77 | func (c *clientCodec) WriteRequest(r *rpc.Request, param interface{}) error { 78 | if r.Seq == 0 { 79 | // seems many stratum pools don't like seq = 0 80 | return errors.New("skipping first request") 81 | } 82 | // If return error: it will be returned as is for this call. 83 | // Allow param to be only Array, Slice, Map or Struct. 84 | // When param is nil or uninitialized Map or Slice - omit "params". 85 | if param != nil { 86 | switch k := reflect.TypeOf(param).Kind(); k { 87 | case reflect.Map: 88 | if reflect.TypeOf(param).Key().Kind() == reflect.String { 89 | if reflect.ValueOf(param).IsNil() { 90 | param = nil 91 | } 92 | } 93 | case reflect.Slice: 94 | if reflect.ValueOf(param).IsNil() { 95 | param = nil 96 | } 97 | case reflect.Array, reflect.Struct: 98 | case reflect.Ptr: 99 | switch k := reflect.TypeOf(param).Elem().Kind(); k { 100 | case reflect.Map: 101 | if reflect.TypeOf(param).Elem().Key().Kind() == reflect.String { 102 | if reflect.ValueOf(param).Elem().IsNil() { 103 | param = nil 104 | } 105 | } 106 | case reflect.Slice: 107 | if reflect.ValueOf(param).Elem().IsNil() { 108 | param = nil 109 | } 110 | case reflect.Array, reflect.Struct: 111 | default: 112 | return jsonrpc2.NewError(errInternal.Code, "unsupported param type: Ptr to "+k.String()) 113 | } 114 | default: 115 | return jsonrpc2.NewError(errInternal.Code, "unsupported param type: "+k.String()) 116 | } 117 | } 118 | 119 | var req clientRequest 120 | if r.Seq != seqNotify { 121 | c.mutex.Lock() 122 | c.pending[r.Seq] = r.ServiceMethod 123 | c.mutex.Unlock() 124 | req.ID = &r.Seq 125 | } 126 | req.Version = "2.0" 127 | req.Method = r.ServiceMethod 128 | req.Params = param 129 | if err := c.enc.Encode(&req); err != nil { 130 | return jsonrpc2.NewError(errInternal.Code, err.Error()) 131 | } 132 | 133 | return nil 134 | } 135 | 136 | type clientResponse struct { 137 | Version string `json:"jsonrpc"` 138 | ID *uint64 `json:"id"` 139 | Result *json.RawMessage `json:"result,omitempty"` 140 | Error *jsonrpc2.Error `json:"error,omitempty"` 141 | } 142 | 143 | func (r *clientResponse) reset() { 144 | r.Version = "" 145 | r.ID = nil 146 | r.Result = nil 147 | r.Error = nil 148 | } 149 | 150 | func (r *clientResponse) UnmarshalJSON(raw []byte) error { 151 | r.reset() 152 | type resp *clientResponse 153 | if err := json.Unmarshal(raw, resp(r)); err != nil { 154 | return errors.New("bad response: " + string(raw)) 155 | } 156 | 157 | var o = make(map[string]*json.RawMessage) 158 | if err := json.Unmarshal(raw, &o); err != nil { 159 | return errors.New("bad response: " + string(raw)) 160 | } 161 | _, okVer := o["jsonrpc"] 162 | _, okID := o["id"] 163 | _, okRes := o["result"] 164 | _, okErr := o["error"] 165 | // this has been updated to allow error and result as part of the response 166 | if !okVer || !okID || !(okRes || okErr) || len(o) > 4 { 167 | return errors.New("bad response: " + string(raw)) 168 | } 169 | if r.Version != "2.0" { 170 | return errors.New("bad response: " + string(raw)) 171 | } 172 | if okRes && r.Result == nil { 173 | r.Result = &null 174 | } 175 | if okErr && o["error"] != nil { 176 | oe := make(map[string]*json.RawMessage) 177 | if err := json.Unmarshal(*o["error"], &oe); err != nil { 178 | return errors.New("bad response: " + string(raw)) 179 | } 180 | if oe["code"] == nil || oe["message"] == nil { 181 | return errors.New("bad response: " + string(raw)) 182 | } 183 | if _, ok := oe["data"]; (!ok && len(oe) > 2) || len(oe) > 3 { 184 | return errors.New("bad response: " + string(raw)) 185 | } 186 | } 187 | if o["id"] == nil && !okErr { 188 | return errors.New("bad response: " + string(raw)) 189 | } 190 | 191 | return nil 192 | } 193 | 194 | func (c *clientCodec) handleNotification(r io.Reader) error { 195 | d := json.NewDecoder(r) 196 | err := d.Decode(&c.notif) 197 | // EOF is already handled by ReadResponseHeader 198 | if err == nil { 199 | c.receiveNotification() 200 | } 201 | 202 | return err 203 | } 204 | 205 | func (c *clientCodec) receiveNotification() { 206 | // if we fill the buffer, kill the application 207 | if len(c.notifications) >= notificationBufferLength { 208 | out, _ := os.Create("/tmp/goroutine.pprof") 209 | blockOut, _ := os.Create("/tmp/block.pprof") 210 | defer out.Close() 211 | defer blockOut.Close() 212 | pprof.Lookup("goroutine").WriteTo(out, 2) 213 | pprof.Lookup("block").WriteTo(blockOut, 2) 214 | 215 | log.Fatal("Stratum client notification buffer is full! Process will be killed!" + 216 | " Read from Client.Notifications to fix this error.") 217 | } 218 | 219 | c.notifications <- c.notif 220 | } 221 | 222 | // Because the stratum connection is bidirectional, we are going to modify the behavior of the client to accept 223 | // notifications from the server (including jobs). Adding some server functionality (receive Notifs) to Client 224 | // seems easier than multiplexing every connection. Notifications are NOT handled (eg. by RPC svc) by this codec 225 | // This library throws a fatal error if it detects that notifications are not being consumed. 226 | func (c *clientCodec) ReadResponseHeader(r *rpc.Response) error { 227 | // If return err: 228 | // - io.EOF will became ErrShutdown or io.ErrUnexpectedEOF 229 | // - it will be returned as is for all pending calls 230 | // - client will be shutdown 231 | // So, return io.EOF as is, return *Error for all other errors. 232 | b := make([]byte, 0) 233 | backup := bytes.NewBuffer(b) 234 | conn := io.TeeReader(c.c, backup) 235 | d := json.NewDecoder(conn) 236 | 237 | if err := d.Decode(&c.resp); err != nil { 238 | if err == io.EOF { 239 | return err 240 | } 241 | return c.handleNotification(backup) 242 | } 243 | if c.resp.Error != nil { 244 | return c.resp.Error 245 | } 246 | 247 | if c.resp.ID == nil { 248 | // TODO - this is probably the wrong error 249 | return errInternal 250 | } 251 | 252 | c.mutex.Lock() 253 | r.ServiceMethod = c.pending[*c.resp.ID] 254 | delete(c.pending, *c.resp.ID) 255 | c.mutex.Unlock() 256 | 257 | r.Error = "" 258 | r.Seq = *c.resp.ID 259 | if c.resp.Error != nil { 260 | r.Error = c.resp.Error.Error() 261 | } 262 | return nil 263 | } 264 | 265 | func (c *clientCodec) ReadResponseBody(x interface{}) error { 266 | // If x!=nil and return error e: 267 | // - this call get e.Error() appended to "reading body " 268 | // - other pending calls get error as is XXX actually other calls 269 | // shouldn't be affected by this error at all, so let's at least 270 | // provide different error message for other calls 271 | if x == nil || c.resp.Result == nil { 272 | return nil 273 | } 274 | if err := json.Unmarshal(*c.resp.Result, x); err != nil { 275 | e := jsonrpc2.NewError(errInternal.Code, err.Error()) 276 | e.Data = jsonrpc2.NewError(errInternal.Code, "some other Call failed to unmarshal Reply") 277 | return e 278 | } 279 | return nil 280 | } 281 | 282 | func (c *clientCodec) Close() error { 283 | return c.c.Close() 284 | } 285 | 286 | type Client struct { 287 | *rpc.Client 288 | codec rpc.ClientCodec 289 | } 290 | 291 | // Call wraps rpc.Call to provide a timeout - otherwise functionality is the same 292 | func (c *Client) Call(serviceMethod string, args interface{}, reply interface{}) error { 293 | call := c.Go(serviceMethod, args, reply, nil) 294 | select { 295 | case <-call.Done: 296 | if call.Error != nil { 297 | return call.Error 298 | } 299 | return nil 300 | case <-time.After(CallTimeout): 301 | return ErrCallTimedOut 302 | } 303 | } 304 | 305 | // Notify tries to invoke the named function. It return error only in case 306 | // it wasn't able to send request. 307 | func (c *Client) Notify(serviceMethod string, args interface{}) error { 308 | req := &rpc.Request{ 309 | ServiceMethod: serviceMethod, 310 | Seq: seqNotify, 311 | } 312 | return c.codec.WriteRequest(req, args) 313 | } 314 | 315 | func (c *Client) Notifications() chan Notification { 316 | return c.codec.(*clientCodec).notifications 317 | } 318 | 319 | // NewClient returns a new Client to handle requests to the 320 | // set of services at the other end of the connection. 321 | func NewClient(conn io.ReadWriteCloser) *Client { 322 | codec := newClientCodec(conn) 323 | client := rpc.NewClientWithCodec(codec) 324 | // this is hack around 325 | _ = client.Go("incrementMySequence", nil, nil, nil) 326 | return &Client{client, codec} 327 | } 328 | 329 | // Dial connects to a JSON-RPC 2.0 server at the specified network address. 330 | func Dial(network, address string) (*Client, error) { 331 | conn, err := net.Dial(network, address) 332 | if err != nil { 333 | return nil, err 334 | } 335 | return NewClient(conn), err 336 | } 337 | 338 | // DialTimeout is Dial, but with the timeout specified 339 | func DialTimeout(network, address string, timeout time.Duration) (*Client, error) { 340 | conn, err := net.DialTimeout(network, address, timeout) 341 | if err != nil { 342 | return nil, err 343 | } 344 | return NewClient(conn), err 345 | } 346 | -------------------------------------------------------------------------------- /server/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "errors" 7 | "math" 8 | "math/rand" 9 | "net" 10 | "os" 11 | "strings" 12 | "sync" 13 | "sync/atomic" 14 | "time" 15 | 16 | "github.com/swordlet/xmrig2xdag/base58" 17 | "github.com/swordlet/xmrig2xdag/config" 18 | "github.com/swordlet/xmrig2xdag/logger" 19 | "github.com/swordlet/xmrig2xdag/stratum" 20 | "github.com/swordlet/xmrig2xdag/xdag" 21 | "golang.org/x/net/proxy" 22 | ) 23 | 24 | const ( 25 | // MaxUint protects IDs from overflow if the process runs for thousands of years 26 | MaxUint = ^uint64(0) 27 | 28 | // TODO adjust - lower means more connections to pool, potentially fewer stales if that is a problem 29 | //maxProxyWorkers = 1024 30 | 31 | // retryDelay = 5 * time.Second 32 | 33 | timeout = 10 * time.Second 34 | 35 | xdagAlgo = `rx/xdag` 36 | 37 | initDiffCount = 16 38 | 39 | //refreshDiffCount uint64 = 128 // count of shares to refresh target 40 | refreshDiffInterval = 10 * time.Minute 41 | 42 | submitInterval = 5 43 | 44 | eofLimit = 3 45 | 46 | detectProxy = "XDAG_POOL_RESTART_DETECT_PROXY" 47 | detectAddr = "NO_ADDRESS_PROXY_0" 48 | ) 49 | 50 | var poolIsDown atomic.Uint64 51 | var eofCount atomic.Uint64 52 | 53 | // var crc32table = crc32.MakeTable(0xEDB88320) 54 | 55 | var ( 56 | ErrBadJobID = errors.New("invalid job id") 57 | ErrDuplicateShare = errors.New("duplicate share") 58 | ErrMalformedShare = errors.New("malformed share") 59 | ) 60 | 61 | // Worker does the work for the proxy. It exposes methods that allow interface with the proxy. 62 | type Worker interface { 63 | // ID is used to distinguish this worker from other workers on the proxy. 64 | ID() uint64 65 | // SetID allows proxies to assign this value when a connection is established. 66 | SetID(uint64) 67 | 68 | //SetProxy Workers must implement this method to establish communication with their assigned 69 | // proxy. The proxy connection should be stored in order to 1. Submit Shares and 2. Disconnect Cleanly 70 | SetProxy(*Proxy) 71 | Proxy() *Proxy 72 | 73 | // Disconnect closes the connection to the proxy from the worker. 74 | // Ideally it sets up the worker to try and reconnect to a new proxy through the director. 75 | Disconnect() 76 | Conn() net.Conn 77 | Close() 78 | 79 | NewJob(*Job) 80 | 81 | RemoveProxy() 82 | } 83 | 84 | // Proxy manages a group of workers. 85 | type Proxy struct { 86 | ID uint64 87 | Conn *xdag.Connection 88 | SS *stratum.Server 89 | director *Director 90 | 91 | //authID string // identifies the proxy to the pool 92 | aliveSince time.Time 93 | shares uint64 94 | 95 | //workerCount int 96 | worker Worker 97 | submissions chan *share 98 | notify chan []byte 99 | done chan int 100 | ready bool 101 | currentJob *Job 102 | PrevJobID string 103 | BeforePrevJobID string 104 | BeforeBeforePrevJobID string 105 | jobMu sync.Mutex 106 | connMu sync.RWMutex 107 | isClosed bool 108 | 109 | addressHash [24]byte 110 | address string 111 | 112 | fieldOut uint64 113 | fieldIn uint64 114 | 115 | recvCount int 116 | recvByte [2 * xdag.HashLength]byte 117 | 118 | target string 119 | targetSince time.Time 120 | lastSend time.Time 121 | miniResult uint64 122 | miniNonce uint32 123 | targetShare uint64 124 | minerName string 125 | } 126 | 127 | func init() { 128 | rand.Seed(time.Now().UnixNano()) 129 | } 130 | 131 | // NewProxy creates a new proxy, starts the work thread, and returns a pointer to it. 132 | func NewProxy(id uint64) *Proxy { 133 | p := &Proxy{ 134 | ID: id, 135 | aliveSince: time.Now(), 136 | currentJob: &Job{}, 137 | PrevJobID: NewLen(28), 138 | submissions: make(chan *share), 139 | done: make(chan int), 140 | ready: true, 141 | lastSend: time.Now(), 142 | miniResult: math.MaxUint64, 143 | notify: make(chan []byte, 2), 144 | } 145 | 146 | p.SS = stratum.NewServer() 147 | p.SS.RegisterName("mining", &Mining{}) 148 | return p 149 | } 150 | 151 | func (p *Proxy) Run(minerName string) { 152 | retryTimes := 3 153 | if config.Get().TryPoolTimes > 0 { 154 | retryTimes = config.Get().TryPoolTimes 155 | } 156 | retryDelay := 10 * time.Second 157 | if config.Get().TryDelaySeconds > 1 { 158 | retryDelay = time.Duration(config.Get().TryDelaySeconds) * time.Second 159 | } 160 | p.minerName = minerName 161 | var retryCount = 0 162 | for { 163 | if poolIsDown.Load() > 0 && minerName != detectProxy { 164 | p.shutdown(2) 165 | return 166 | } 167 | err := p.connect(minerName) 168 | if err == nil { 169 | break 170 | } 171 | retryCount += 1 172 | if retryCount > retryTimes { 173 | if minerName != detectProxy { 174 | p.shutdown(2) 175 | } else { 176 | p.shutdown(-1) 177 | } 178 | return 179 | } 180 | logger.Get().Printf("Proxy[%d] Failed to acquire pool connection %d times. Retrying in %s.Error: %s\n", 181 | p.ID, retryCount, retryDelay, err) 182 | // TODO allow fallback pools here 183 | <-time.After(retryDelay) 184 | } 185 | 186 | for { 187 | if minerName == detectProxy && poolIsDown.Load() == 0 { // pool restart , quit detect proxy 188 | return 189 | } 190 | select { 191 | // these are from workers 192 | case s := <-p.submissions: 193 | err := p.handleSubmit(s) //, p.SC) 194 | if err != nil { 195 | logger.Get().Println("share submission error: ", err) 196 | } 197 | if err != nil && strings.Contains(strings.ToLower(err.Error()), "banned") { 198 | logger.Get().Println("Banned IP - killing proxy: ", p.ID) 199 | p.shutdown(-1) 200 | return 201 | } 202 | case notif := <-p.notify: 203 | p.handleNotification(notif) 204 | case cl := <-p.done: 205 | p.shutdown(cl) 206 | return 207 | } 208 | } 209 | 210 | } 211 | 212 | func (p *Proxy) handleJob(job *Job) (err error) { 213 | p.jobMu.Lock() 214 | //p.prevJob, p.currentJob = p.currentJob, job 215 | p.BeforeBeforePrevJobID = p.BeforePrevJobID 216 | p.BeforePrevJobID = p.PrevJobID 217 | p.PrevJobID = p.currentJob.ID 218 | p.currentJob = job 219 | 220 | p.miniResult = math.MaxUint64 221 | p.lastSend = time.Now() 222 | 223 | p.jobMu.Unlock() 224 | 225 | //if err != nil { 226 | // // logger.Get().Debugln("Skipping regular job broadcast: ", err) 227 | // return 228 | //} 229 | 230 | // logger.Get().Debugln("Broadcasting new regular job: ", job.ID) 231 | p.broadcastJob() 232 | return 233 | } 234 | 235 | // broadcast a job to all workers 236 | func (p *Proxy) broadcastJob() { 237 | p.connMu.Lock() 238 | defer p.connMu.Unlock() 239 | if p.isClosed { 240 | return 241 | } 242 | 243 | logger.Get().Debugln("Broadcasting new job to connected workers.") 244 | //for _, w := range p.workers { 245 | // go w.NewJob(p.NextJob()) 246 | //} 247 | if p.worker != nil { 248 | p.worker.NewJob(p.currentJob) 249 | } 250 | 251 | } 252 | 253 | func (p *Proxy) handleNotification(notif []byte) { 254 | var data [32]byte 255 | copy(data[:], notif[:]) 256 | p.fieldIn += 1 257 | if xdag.Hash2address(data[:]) == p.address { // ignore 32 bytes: address with balance 258 | p.recvCount = 0 259 | } else if p.recvCount == 0 { // ignore the balance of fake block and the seed ,both ended with 8 bytes of zero 260 | seedZero := binary.BigEndian.Uint64(data[24:]) 261 | if seedZero == 0 { 262 | return 263 | } 264 | p.recvCount++ 265 | copy(p.recvByte[:32], data[:]) 266 | } else if p.recvCount == 1 { 267 | p.recvCount = 0 268 | if p.address == detectProxy { // pool restart, close detect proxy, restore miners connection 269 | poolIsDown.Store(0) 270 | p.shutdown(-1) 271 | return 272 | } 273 | copy(p.recvByte[32:], data[:]) 274 | 275 | if p.shares > initDiffCount+1 && time.Since(p.targetSince) >= refreshDiffInterval { 276 | p.setTarget(p.shares) 277 | } 278 | 279 | job := p.CreateJob(p.recvByte[:]) 280 | if p.currentJob.Blob == "" || p.currentJob.Blob[:32] != job.Blob[:32] { 281 | err := p.handleJob(job) 282 | 283 | if err != nil { 284 | // log and wait for the next job? 285 | logger.Get().Println("error processing job: ", job) 286 | logger.Get().Println(err) 287 | } 288 | } 289 | 290 | } 291 | 292 | } 293 | 294 | func (p *Proxy) connect(minerName string) error { 295 | p.connMu.Lock() 296 | defer p.connMu.Unlock() 297 | 298 | if p.isClosed { 299 | return errors.New("using closed proxy error") 300 | } 301 | 302 | var conn net.Conn 303 | var socks5Dialer proxy.Dialer 304 | var err error 305 | 306 | if len(config.Get().Socks5) > 11 { 307 | socks5Dialer, err = proxy.SOCKS5("tcp", config.Get().Socks5, nil, proxy.Direct) 308 | if err != nil { 309 | return err 310 | } 311 | conn, err = socks5Dialer.Dial("tcp", config.Get().PoolAddr) 312 | } else { 313 | conn, err = net.DialTimeout("tcp", config.Get().PoolAddr, timeout) 314 | } 315 | 316 | if err != nil { 317 | return err 318 | } 319 | 320 | logger.Get().Debugln("Client made pool connection.", p.ID) 321 | //p.SC = sc 322 | 323 | p.Conn = xdag.NewConnection(conn, p.ID, p.notify, p.done) 324 | p.Conn.Start() 325 | 326 | p.fieldOut += 16 327 | var bytesWithHeader [28]byte 328 | binary.LittleEndian.PutUint32(bytesWithHeader[0:4], 24) 329 | copy(bytesWithHeader[4:], p.addressHash[:]) 330 | p.Conn.SendBuffMsg(bytesWithHeader[:]) 331 | 332 | time.Sleep(2 * time.Second) 333 | if minerName != "" { 334 | var field [32]byte 335 | binary.LittleEndian.PutUint32(field[0:4], xdag.WORKERNAME_HEADER_WORD) 336 | copy(field[4:32], minerName[:]) 337 | p.fieldOut += 1 338 | var nameWithHeader [36]byte 339 | binary.LittleEndian.PutUint32(nameWithHeader[0:4], 32) 340 | copy(nameWithHeader[4:], field[:]) 341 | p.Conn.SendBuffMsg(nameWithHeader[:]) 342 | } 343 | 344 | logger.Get().Debugln(p.address, "--Successfully logged into pool.") 345 | 346 | logger.Get().Printf("**** Proxy [%d] Connected to pool server: <%s>, (%s) \n", p.ID, config.Get().PoolAddr, p.minerName) 347 | 348 | return nil 349 | } 350 | 351 | func (p *Proxy) validateShare(s *share) error { 352 | var job *Job 353 | switch { 354 | case s.JobID == p.currentJob.ID: 355 | job = p.currentJob 356 | case s.JobID == p.PrevJobID: 357 | job = p.currentJob 358 | default: 359 | return ErrBadJobID 360 | } 361 | // for _, n := range job.submittedNonces { 362 | // if n == s.Nonce { 363 | // return ErrDuplicateShare 364 | // } 365 | // } 366 | return s.validate(job) 367 | } 368 | 369 | // proxy disconnected, cl:0 by worker, cl:1 by pool , cl:-1 byself 370 | func (p *Proxy) shutdown(cl int) { 371 | p.connMu.Lock() 372 | defer p.connMu.Unlock() 373 | 374 | if p.isClosed { 375 | return 376 | } 377 | 378 | if cl == 0 { 379 | logger.Get().Printf("proxy [%d] shutdown by worker <%s>\n", p.ID, p.address) 380 | if p.Conn != nil { 381 | p.Conn.Close() 382 | } 383 | } else if cl == 1 { 384 | logger.Get().Printf("proxy [%d] shutdown by pool <%s>\n", p.ID, p.address) 385 | if p.worker != nil { 386 | p.worker.Close() 387 | } 388 | if p.fieldIn == 0 && p.fieldOut < 18 && p.Conn.EOFcount.Load() > 0 { 389 | eofCount.Add(1) 390 | if eofCount.Load() > eofLimit { // connection eof immediately after connect 391 | poolIsDown.Add(1) 392 | logger.Get().Println("*** Pool is down. Please wait for pool recovery.") 393 | if config.Get().ExitOnPoolDown { 394 | os.Exit(1) 395 | } 396 | } 397 | } 398 | } else if cl == 2 { 399 | poolIsDown.Add(1) 400 | logger.Get().Printf("proxy [%d] pool is down <%s>\n", p.ID, p.address) 401 | logger.Get().Println("*** Pool is down. Please wait for pool recovery.") 402 | if p.worker != nil { 403 | p.worker.Close() 404 | } 405 | if config.Get().ExitOnPoolDown { 406 | os.Exit(1) 407 | } 408 | } else if cl == -1 { 409 | logger.Get().Printf("proxy [%d] shutdown, <%s>\n", p.ID, p.address) 410 | if p.Conn != nil { 411 | p.Conn.Close() 412 | } 413 | if p.worker != nil { 414 | p.worker.Close() 415 | } 416 | } 417 | 418 | if p.ID > 0 { 419 | close(p.done) 420 | p.director.removeProxy(p.ID) 421 | p.worker = nil 422 | p.SS = nil 423 | p.director = nil 424 | close(p.notify) 425 | close(p.submissions) 426 | } 427 | p.Conn = nil 428 | p.isClosed = true 429 | } 430 | 431 | func (p *Proxy) Close() { 432 | p.connMu.Lock() 433 | defer p.connMu.Unlock() 434 | 435 | if p.isClosed { 436 | return 437 | } 438 | 439 | if p.Conn != nil { 440 | p.Conn.Close() 441 | } 442 | if p.ID > 0 { 443 | close(p.done) 444 | p.director.removeProxy(p.ID) 445 | p.worker = nil 446 | p.SS = nil 447 | p.director = nil 448 | close(p.notify) 449 | close(p.submissions) 450 | 451 | } 452 | p.Conn = nil 453 | p.isClosed = true 454 | } 455 | 456 | // func (p *Proxy) Delete() { 457 | // p.connMu.Lock() 458 | // defer p.connMu.Unlock() 459 | 460 | // if p.isClosed { 461 | // return 462 | // } 463 | 464 | // if p.Conn != nil && p.Conn.ConnID > 0 { 465 | // return 466 | // } 467 | 468 | // if p.worker != nil { 469 | // p.worker.Close() 470 | // } 471 | // close(p.done) 472 | // p.director.removeProxy(p.ID) 473 | // p.worker = nil 474 | // p.SS = nil 475 | // p.director = nil 476 | 477 | // logger.Get().Printf("Proxy[%d] idle deleted", p.ID) 478 | // p.Conn = nil 479 | // p.isClosed = true 480 | // } 481 | 482 | func (p *Proxy) handleSubmit(s *share) (err error) { 483 | p.connMu.Lock() 484 | defer p.connMu.Unlock() 485 | if p.isClosed { 486 | return 487 | } 488 | 489 | defer func() { 490 | close(s.Response) 491 | close(s.Error) 492 | }() 493 | if p.Conn == nil { 494 | logger.Get().Println("dropping share due to nil client for job: ", s.JobID) 495 | err = errors.New("no client to handle share") 496 | s.Error <- err 497 | return 498 | } 499 | reply := StatusReply{} 500 | if !strings.HasPrefix(s.JobID, "FFFFFFFFFF") && s.JobID == p.currentJob.ID { 501 | if err = p.validateShare(s); err != nil { 502 | logger.Get().Debug("share: ", s) 503 | logger.Get().Println("rejecting share with: ", err) 504 | s.Error <- err 505 | return 506 | } 507 | p.jobMu.Lock() 508 | resBytes, _ := hex.DecodeString(s.Result) 509 | nonceBytes, _ := hex.DecodeString(s.Nonce) 510 | result := binary.LittleEndian.Uint64(resBytes[len(resBytes)-8:]) 511 | t := time.Now() 512 | if t.Sub(p.lastSend) >= 4*time.Second { 513 | var shareBytes []byte 514 | if result < p.miniResult { 515 | shareBytes = p.CreateShare(s) 516 | } else { 517 | shareBytes = p.MakeShare(p.miniResult, p.miniNonce) 518 | } 519 | p.fieldOut += 1 520 | var bytesWithHeader [36]byte 521 | binary.LittleEndian.PutUint32(bytesWithHeader[0:4], 32) 522 | copy(bytesWithHeader[4:], shareBytes[:]) 523 | p.Conn.SendBuffMsg(bytesWithHeader[:]) 524 | 525 | p.miniResult = math.MaxUint64 526 | p.lastSend = t 527 | } else { 528 | if result < p.miniResult { 529 | p.miniResult = result 530 | p.miniNonce = binary.LittleEndian.Uint32(nonceBytes[:]) 531 | } 532 | } 533 | p.jobMu.Unlock() 534 | } 535 | reply.Status = "OK" 536 | p.shares++ 537 | if p.shares == 1 { 538 | p.targetSince = time.Now() 539 | } else if p.shares == initDiffCount+1 { 540 | p.setTarget(p.shares) 541 | } 542 | 543 | //else if p.shares > initDiffCount+1 && time.Now().Sub(p.targetSince) >= refreshDiffInterval { 544 | // p.setTarget(p.shares) 545 | // 546 | //} 547 | 548 | // logger.Get().Debugf("proxy %v share submit response: %s", p.ID, reply) 549 | s.Response <- &reply 550 | s.Error <- nil 551 | return 552 | } 553 | 554 | // Submit sends worker shares to the pool. Safe for concurrent use. 555 | func (p *Proxy) Submit(params map[string]interface{}) (*StatusReply, error) { 556 | p.connMu.Lock() 557 | defer p.connMu.Unlock() 558 | 559 | if p.isClosed { 560 | return nil, ErrBadJobID 561 | } 562 | s := newShare(params) 563 | 564 | if s.JobID == "" { 565 | return nil, ErrBadJobID 566 | } 567 | if s.Nonce == "" { 568 | return nil, ErrMalformedShare 569 | } 570 | 571 | // if it matters - locking jobMu should be fine 572 | // there might be a race for the job ids's but it shouldn't matter 573 | //if s.JobID == p.currentJob.ID || s.JobID == p.prevJob.ID { 574 | if s.JobID == p.currentJob.ID || s.JobID == p.PrevJobID || strings.HasPrefix(s.JobID, "FFFFFFFFFF") || 575 | s.JobID == p.BeforePrevJobID || s.JobID == p.BeforeBeforePrevJobID { 576 | p.submissions <- s 577 | //} else if s.JobID == p.donateJob.ID || s.JobID == p.prevDonateJob.ID { 578 | // p.donations <- s 579 | } else { 580 | return nil, ErrBadJobID 581 | } 582 | 583 | return <-s.Response, <-s.Error 584 | } 585 | 586 | // NextJob gets the next job (on the current block) and increments the nonce 587 | func (p *Proxy) NextJob() *Job { 588 | p.jobMu.Lock() 589 | defer p.jobMu.Unlock() 590 | p.currentJob = p.currentJob.Next() 591 | 592 | return p.currentJob 593 | } 594 | 595 | // Add a worker to the proxy - safe for concurrent use. 596 | func (p *Proxy) Add(w Worker) { 597 | w.SetProxy(p) 598 | w.SetID(p.ID) 599 | p.worker = w 600 | } 601 | 602 | // Remove a worker from the proxy - safe for concurrent use. 603 | func (p *Proxy) Remove(w Worker) { 604 | p.connMu.Lock() 605 | defer p.connMu.Unlock() 606 | 607 | if p.isClosed { 608 | return 609 | } 610 | 611 | if p.Conn != nil { 612 | p.Conn.Close() 613 | } 614 | 615 | p.director.removeProxy(p.ID) 616 | p.worker = nil 617 | p.SS = nil 618 | p.director = nil 619 | 620 | logger.Get().Printf("Proxy[%d] removed", p.ID) 621 | p.Conn = nil 622 | close(p.done) 623 | close(p.notify) 624 | close(p.submissions) 625 | p.isClosed = true 626 | } 627 | 628 | // CreateJob builds a job for distribution to a worker 629 | func (p *Proxy) CreateJob(blobBytes []byte) *Job { 630 | 631 | logger.Get().Debugf("proxy[%d] <%s> (%s) --read: %s", p.ID, p.address, p.minerName, hex.EncodeToString(blobBytes[:])) 632 | 633 | nonce := rand.Uint64() // initial random nonce 634 | j := &Job{ 635 | ID: NewLen(28), 636 | Target: p.getTarget(), 637 | SeedHash: hex.EncodeToString(blobBytes[32:64]), // seed 638 | Algo: xdagAlgo, 639 | //submittedNonces: make([]string, 0), 640 | initialNonce: uint32(nonce), // low 32 bits nonce from random uint64 641 | currentNonce: uint32(nonce), // 32 bits nonce to mining 642 | currentBlob: make([]byte, 64), 643 | } 644 | copy(j.currentBlob, blobBytes) 645 | copy(j.currentBlob[32:56], p.addressHash[:]) // low 24 bytes of account address 646 | nonceBytes := make([]byte, initNonceLength) 647 | binary.BigEndian.PutUint64(nonceBytes, nonce) // last 8 bytes for nonce 648 | copy(j.currentBlob[initNonceOffset:initNonceOffset+initNonceLength], nonceBytes) 649 | j.Blob = hex.EncodeToString(j.currentBlob) 650 | logger.Get().Debugf("proxy[%d] job blob:%s ", p.ID, j.Blob) 651 | logger.Get().Debugf("proxy[%d] seed:%s", p.ID, j.SeedHash) 652 | return j 653 | } 654 | 655 | func (p *Proxy) CreateShare(s *share) []byte { 656 | nonceBytes, _ := hex.DecodeString(s.Nonce) 657 | 658 | var result [32]byte 659 | copy(result[:28], p.currentJob.currentBlob[32:60]) 660 | copy(result[28:32], nonceBytes[:]) 661 | logger.Get().Debugf("proxy[%d] nonce: %s, share result: %s", p.ID, s.Nonce, s.Result) 662 | return result[:] 663 | } 664 | 665 | func (p *Proxy) MakeShare(miniResult uint64, miniNonce uint32) []byte { 666 | var result [32]byte 667 | copy(result[:28], p.currentJob.currentBlob[32:60]) 668 | binary.LittleEndian.PutUint32(result[28:32], miniNonce) 669 | logger.Get().Debugf("proxy[%d] nonce: %08x, share result: %016x", p.ID, miniNonce, miniResult) 670 | return result[:] 671 | } 672 | 673 | func (p *Proxy) SetAddress(a string) error { 674 | h, err := fromBase85(a) 675 | if err != nil { 676 | return errors.New("invalid wallet address") 677 | } 678 | if len(h) != 24 { 679 | return errors.New("invalide address length") 680 | } 681 | copy(p.addressHash[:], h[:]) 682 | p.address = a 683 | return nil 684 | } 685 | 686 | func (p *Proxy) getTarget() string { 687 | if p.target == "" { 688 | return "b88d0600" //difficulty = 10000 689 | } else { 690 | return p.target 691 | } 692 | 693 | } 694 | 695 | func (p *Proxy) setTarget(shareIndex uint64) { 696 | p.jobMu.Lock() 697 | defer p.jobMu.Unlock() 698 | var b [8]byte 699 | t := time.Now() 700 | duration := t.Sub(p.targetSince) 701 | //hashRate := (float64(10000) * initDiffCount) / duration.Seconds() 702 | //difficulty := hashRate * submitInterval 703 | //target := uint64(float64(0xFFFFFFFFFFFFFFFF) / difficulty) 704 | 705 | //difficulty := (float64(10000) * diffCount * submitInterval) / duration.Seconds() 706 | //target := uint64(float64(0xFFFFFFFFFFFFFFFF) / difficulty) 707 | 708 | var diffCount float64 709 | if shareIndex == initDiffCount+1 { 710 | diffCount = float64(initDiffCount) 711 | } else { 712 | diffCount = float64(p.shares - p.targetShare) //float64(refreshDiffCount) 713 | } 714 | targetStr := "00000000" + p.getTarget() 715 | targetBytes, err := hex.DecodeString(targetStr) 716 | if err != nil { 717 | return 718 | } 719 | target := binary.LittleEndian.Uint64(targetBytes) 720 | 721 | // difficulty = 1/target = uint64(float64(0xFFFFFFFFFFFFFFFF) / target) 722 | // target = uint64(float64(0xFFFFFFFFFFFFFFFF) / difficulty) 723 | // difficult = hashRate * calculateDuration 724 | // target : newTarget = duration : submitInterval 725 | 726 | newTarget := uint64(float64(target) * duration.Seconds() / (diffCount * submitInterval)) 727 | 728 | binary.LittleEndian.PutUint64(b[:], newTarget) 729 | p.target = hex.EncodeToString(b[4:]) 730 | p.targetSince = t 731 | p.targetShare = p.shares 732 | logger.Get().Printf("proxy [%d]new target:%s\n", p.ID, p.target) 733 | } 734 | 735 | func fromBase85(address string) ([]byte, error) { 736 | b, _, err := base58.ChkDec(address) 737 | return b, err 738 | } 739 | --------------------------------------------------------------------------------