├── .npmignore ├── .gitignore ├── lib ├── constants.js ├── config.json ├── blockTemplate.js ├── stats.js ├── init.js ├── util.js ├── messages.js ├── varDiff.js ├── daemon.js ├── httpClient.js ├── jobManager.js ├── pool.js ├── stratum.js ├── shareProcessor.js └── paymentProcessor.js ├── .github └── workflows │ └── test.yml ├── Dockerfile ├── Dockerfile-release ├── test ├── test.js ├── utilTest.js ├── messagesTest.js ├── varDiffTest.js ├── statsTest.js ├── jobManagerTest.js ├── stratumTest.js ├── shareProcessorTest.js └── paymentProcessorTest.js ├── package.json ├── composePoolConfig.json ├── docker-compose.yaml ├── README.md └── LICENSE /.npmignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | logs 3 | 4 | -------------------------------------------------------------------------------- /lib/constants.js: -------------------------------------------------------------------------------- 1 | 2 | module.exports = Object.freeze({ 3 | MiningProtocolVersion: 0x01, 4 | JobsMessageType: 0x00, 5 | SubmitResultMessageType: 0x01, 6 | SubmitBlockMessageType: 0x00, 7 | GroupSize: 4, 8 | NonceLength: 24, 9 | NumZeroAtLeastInHash: 37, 10 | HashLength: 32 11 | }); 12 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Unit Tests 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | strategy: 12 | matrix: 13 | node-version: [14.x] 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-node@v1 17 | with: 18 | node-version: ${{ matrix.node-version }} 19 | - run: npm ci 20 | - run: npm test 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine AS build 2 | RUN apk add python3 3 | RUN ln -sf /usr/bin/python3 /usr/bin/python 4 | RUN apk add build-base 5 | ENV NODE_ENV=production 6 | WORKDIR /home/node/mining-pool 7 | ADD package.json package.json 8 | ADD package-lock.json package-lock.json 9 | RUN npm ci 10 | 11 | FROM node:16-alpine AS run 12 | 13 | USER node 14 | COPY --from=build /home/node/mining-pool/node_modules /home/node/mining-pool/node_modules 15 | 16 | USER root 17 | RUN apk add tini 18 | RUN mkdir /home/node/mining-pool/logs && chown node /home/node/mining-pool/logs 19 | 20 | USER node 21 | WORKDIR /home/node/mining-pool 22 | 23 | ENV NODE_ENV=production 24 | 25 | EXPOSE 20032 26 | VOLUME /home/node/mining-pool/logs 27 | 28 | ADD ./package.json /home/node/mining-pool/package.json 29 | ADD ./lib /home/node/mining-pool/lib 30 | 31 | ENTRYPOINT ["/sbin/tini","--","npm","run","start"] 32 | -------------------------------------------------------------------------------- /Dockerfile-release: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine AS build 2 | RUN apk add python3 3 | RUN ln -sf /usr/bin/python3 /usr/bin/python 4 | RUN apk add build-base 5 | RUN apk add curl 6 | ENV NODE_ENV=production 7 | WORKDIR /home/node/mining-pool 8 | RUN curl -o mining-pool-latest.tar.gz -L https://api.github.com/repos/alephium/mining-pool/tarball 9 | RUN tar -xf mining-pool-latest.tar.gz && rm mining-pool-latest.tar.gz 10 | RUN cd * && mv ./* ../. && cd .. 11 | RUN npm ci 12 | 13 | FROM node:16-alpine AS run 14 | 15 | USER node 16 | COPY --from=build /home/node/mining-pool/ /home/node/mining-pool/ 17 | 18 | USER root 19 | RUN apk add tini 20 | RUN mkdir /home/node/mining-pool/logs && chown node /home/node/mining-pool/logs 21 | 22 | USER node 23 | WORKDIR /home/node/mining-pool 24 | 25 | ENV NODE_ENV=production 26 | 27 | EXPOSE 20032 28 | VOLUME /home/node/mining-pool/logs 29 | 30 | ENTRYPOINT ["/sbin/tini","--","npm","run","start"] 31 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | const winston = require('winston'); 2 | 3 | exports.logger = winston.createLogger({ 4 | format: winston.format.combine( 5 | winston.format.timestamp(), 6 | winston.format.printf(i => `${i.timestamp} | ${i.level} | ${i.message}`) 7 | ), 8 | transports: new winston.transports.Console({ 9 | level: 'debug' 10 | }) 11 | }); 12 | 13 | exports.config = { 14 | "daemon": { 15 | "host": "127.0.0.1", 16 | "port": 12973, 17 | "apiKey": "0000000000000000000000000000000000000000000000000000000000000000", 18 | "minerApiPort": 10973 19 | }, 20 | 21 | "redis": { 22 | "host": "127.0.0.1", 23 | "port": 6379, 24 | "db": 0, 25 | }, 26 | 27 | "diff1TargetNumZero": 30, 28 | "withholdPercent": 0, 29 | "rewardInterval": 600, 30 | "confirmationTime": 30600, 31 | 32 | "minPaymentCoins": "3.5", 33 | "paymentInterval": 600, 34 | 35 | "addresses": [], 36 | 37 | "wallet": { 38 | "name": "", 39 | "password": "", 40 | "mnemonicPassphrase": "" 41 | } 42 | }; 43 | -------------------------------------------------------------------------------- /test/utilTest.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const util = require('../lib/util'); 3 | 4 | it('should validate address', function(){ 5 | var invalidAddress1 = '114E4tiwXSyfvCqLnARL21Ac2pVS6GvPomw5y6HsLMwuyR'; 6 | var [addressGroup, error] = util.groupOfAddress(invalidAddress1); 7 | expect(addressGroup).equal(null); 8 | expect(error).equal('incorrect P2PKH address size'); 9 | 10 | var invalidAddress2 = 'mJ81KDniPRnFddgY6gUqKP1QXh2j5n37M9JWzuyNYVUQ'; 11 | var [addressGroup, error] = util.groupOfAddress(invalidAddress2); 12 | expect(addressGroup).equal(null); 13 | expect(error).equal('invalid P2PKH address'); 14 | 15 | var invalidAddress3 = ' '; 16 | var [addressGroup, error] = util.groupOfAddress(invalidAddress3); 17 | expect(addressGroup).equal(null); 18 | expect(error).equal('invalid P2PKH address format'); 19 | 20 | var validAddress = '1AqVGKeHWoLJiVU7heL8EvwQN2hk5bMtvP3PsH57qWayr'; 21 | var [okey, error] = util.isValidAddress(validAddress, 2); 22 | expect(okey).equal(true); 23 | expect(error).equal(null); 24 | 25 | var [okey, error] = util.isValidAddress(validAddress, 1); 26 | expect(okey).equal(false); 27 | }) 28 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "alephium-mining-pool", 3 | "version": "0.1.1", 4 | "description": "Alephium mining pool in Node.js", 5 | "keywords": [ 6 | "alephium", 7 | "mining", 8 | "pool" 9 | ], 10 | "homepage": "https://github.com/alephium/mining-pool", 11 | "bugs": { 12 | "url": "https://github.com/alephium/mining-pool/issues" 13 | }, 14 | "license": "GPL-2.0", 15 | "author": "Matthew Little", 16 | "contributors": [ 17 | "vekexasia", 18 | "TheSeven" 19 | ], 20 | "main": "./lib/init.js", 21 | "scripts": { 22 | "start": "node ./lib/init.js", 23 | "test": "mocha --bail --exit" 24 | }, 25 | "repository": { 26 | "type": "git", 27 | "url": "https://github.com/alephium/mining-pool.git" 28 | }, 29 | "dependencies": { 30 | "base58-native": "*", 31 | "bignum": "0.13.1", 32 | "binary-parser": "2.0.2", 33 | "blake3": "2.1.7", 34 | "findhit-proxywrap": "^0.3.13", 35 | "ioredis": "^4.28.2", 36 | "pg": "^8.7.1", 37 | "winston": "^3.3.3", 38 | "winston-daily-rotate-file": "^4.5.5" 39 | }, 40 | "engines": { 41 | "node": ">=14" 42 | }, 43 | "devDependencies": { 44 | "chai": "^4.3.4", 45 | "ioredis-mock": "^5.8.1", 46 | "mocha": "^9.1.3", 47 | "nock": "^13.2.1" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /test/messagesTest.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const { randomBytes } = require('crypto'); 3 | const constants = require('../lib/constants'); 4 | const { updateJobTimestamp } = require("../lib/messages") 5 | 6 | it('should update the job timestamp', function(){ 7 | function randomHeaderBlob(ts) { 8 | var encodedTs = Buffer.alloc(8) 9 | encodedTs.writeBigUInt64BE(BigInt(ts)) 10 | return Buffer.concat([ 11 | randomBytes(24), // nonce 12 | randomBytes(1), // version 13 | randomBytes(1 + (2 * constants.GroupSize - 1) * 32), // block deps 14 | randomBytes(32), // state hash 15 | randomBytes(32), // txs hash 16 | encodedTs, 17 | randomBytes(4), // target 18 | ]) 19 | } 20 | 21 | for (var i = 1; i <= 10; i += 1) { 22 | var jobTs = Date.now() + (i * 60 * 1000) 23 | var job = { headerBlob: randomHeaderBlob(jobTs) } 24 | var prevHeaderBlob = Buffer.from(job.headerBlob) 25 | 26 | updateJobTimestamp(job, _ => jobTs) 27 | expect(job.headerBlob).to.deep.equal(prevHeaderBlob) 28 | 29 | var delta = i * 60 * 1000 30 | updateJobTimestamp(job, ts => ts + delta) 31 | expect(job.headerBlob).to.not.deep.equal(prevHeaderBlob) 32 | 33 | var tsOffset = job.headerBlob.length - 12 34 | prevHeaderBlob.writeBigUInt64BE(BigInt(jobTs + delta), tsOffset) 35 | expect(job.headerBlob).to.deep.equal(prevHeaderBlob) 36 | expect(Number(job.headerBlob.readBigUInt64BE(tsOffset))).to.equal(jobTs + delta) 37 | } 38 | }) -------------------------------------------------------------------------------- /lib/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "logPath": "./logs/", 3 | 4 | "connectionTimeout": 600, 5 | "maxConnectionsFromSameIP": 10, 6 | "whitelistIps": [], 7 | 8 | "jobExpiryPeriod": 10, 9 | 10 | "banning": { 11 | "enabled": true, 12 | "time": 600, 13 | "invalidPercent": 50, 14 | "checkThreshold": 500, 15 | "purgeInterval": 300 16 | }, 17 | 18 | "statsInterval": 600, 19 | 20 | "diff1TargetNumZero": 30, 21 | "pool": { 22 | "port": 20032, 23 | "proxyProtocol": false, 24 | "diff": 1, 25 | 26 | "varDiff": { 27 | "minDiff": 1, 28 | "maxDiff": 4096, 29 | "targetTime": 2.5, 30 | "retargetTime": 90, 31 | "variancePercent": 30 32 | } 33 | }, 34 | 35 | "daemon": { 36 | "host": "127.0.0.1", 37 | "port": 12973, 38 | "apiKey": "0000000000000000000000000000000000000000000000000000000000000000", 39 | "minerApiPort": 10973 40 | }, 41 | 42 | "redis": { 43 | "host": "127.0.0.1", 44 | "port": 6379, 45 | "db": 0 46 | }, 47 | 48 | "withholdPercent": 0.005, 49 | "rewardEnabled": true, 50 | "rewardInterval": 600, 51 | "confirmationTime": 30600, 52 | 53 | "paymentEnabled": true, 54 | "minPaymentCoins": "0.5", 55 | "paymentInterval": 3600, 56 | "txConfirmations": { 57 | "chainConfirmations": 10, 58 | "fromGroupConfirmations": 5, 59 | "toGroupConfirmations": 5 60 | }, 61 | 62 | "persistence": { 63 | "enabled": false, 64 | "host": "127.0.0.1", 65 | "port": 5432, 66 | "user": "postgres", 67 | "password": "postgres", 68 | "database": "mining-pool" 69 | }, 70 | 71 | "addresses": [], 72 | 73 | "wallet": { 74 | "name": "", 75 | "password": "", 76 | "mnemonicPassphrase": "" 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /lib/blockTemplate.js: -------------------------------------------------------------------------------- 1 | const bignum = require('bignum'); 2 | const blake3 = require('blake3') 3 | const constants = require('./constants'); 4 | 5 | /** 6 | * The BlockTemplate class holds a single job. 7 | * and provides several methods to validate and submit it to the daemon coin 8 | **/ 9 | var BlockTemplate = module.exports = function BlockTemplate(job, timestamp){ 10 | 11 | //private members 12 | 13 | var submits = []; 14 | var emptyTxsBlob = ''; 15 | 16 | //public members 17 | 18 | this.jobId = job.jobId; 19 | this.timestamp = timestamp; 20 | this.fromGroup = job.fromGroup; 21 | this.toGroup = job.toGroup; 22 | this.headerBlob = job.headerBlob; 23 | this.txsBlob = job.txsBlob; 24 | this.targetBlob = job.targetBlob; 25 | this.target = bignum.fromBuffer(this.targetBlob); 26 | this.chainIndex = this.fromGroup * constants.GroupSize + this.toGroup; 27 | this.height = job.height 28 | 29 | this.registerSubmit = function(nonce){ 30 | if (submits.indexOf(nonce) === -1){ 31 | submits.push(nonce); 32 | return true; 33 | } 34 | return false; 35 | }; 36 | 37 | this.hash = function(nonce){ 38 | if (nonce.length != constants.NonceLength){ 39 | throw new Error("Invalid nonce, size: " + nonce.length); 40 | } 41 | var header = Buffer.concat([nonce, this.headerBlob]); 42 | return blake3.hash(blake3.hash(header)); 43 | } 44 | 45 | this.getJobParams = function(){ 46 | if (!this.jobParams){ 47 | this.jobParams = { 48 | jobId: this.jobId, 49 | fromGroup: this.fromGroup, 50 | toGroup: this.toGroup, 51 | headerBlob: this.headerBlob.toString('hex'), 52 | txsBlob: emptyTxsBlob, 53 | height: this.height 54 | }; 55 | } 56 | return this.jobParams; 57 | }; 58 | }; 59 | -------------------------------------------------------------------------------- /composePoolConfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "logPath": "./logs/", 3 | 4 | "connectionTimeout": 600, 5 | "maxConnectionsFromSameIP": 10, 6 | "whitelistIps": [], 7 | 8 | "jobExpiryPeriod": 10, 9 | 10 | "banning": { 11 | "enabled": true, 12 | "time": 600, 13 | "invalidPercent": 50, 14 | "checkThreshold": 500, 15 | "purgeInterval": 300 16 | }, 17 | 18 | "statsInterval": 600, 19 | 20 | "diff1TargetNumZero": 30, 21 | "pool": { 22 | "port": 20032, 23 | "diff": 1, 24 | "proxyProtocol": false, 25 | 26 | "varDiff": { 27 | "minDiff": 1, 28 | "maxDiff": 4096, 29 | "targetTime": 2.5, 30 | "retargetTime": 90, 31 | "variancePercent": 30 32 | } 33 | }, 34 | 35 | "daemon": { 36 | "host": "127.0.0.1", 37 | "port": 12973, 38 | "apiKey": "0000000000000000000000000000000000000000000000000000000000000000", 39 | "minerApiPort": 10973 40 | }, 41 | 42 | "redis": { 43 | "host": "mining-redis", 44 | "port": 6379, 45 | "db": 0 46 | }, 47 | 48 | "withholdPercent": 0.005, 49 | "rewardEnabled": true, 50 | "rewardInterval": 600, 51 | "confirmationTime": 30600, 52 | 53 | "paymentEnabled": true, 54 | "minPaymentCoins": "0.5", 55 | "paymentInterval": 3600, 56 | "txConfirmations": { 57 | "chainConfirmations": 10, 58 | "fromGroupConfirmations": 5, 59 | "toGroupConfirmations": 5 60 | }, 61 | 62 | "persistence": { 63 | "enabled": false, 64 | "host": "mining-postgres", 65 | "port": 5432, 66 | "user": "postgres", 67 | "password": "postgres", 68 | "database": "mining-pool" 69 | }, 70 | 71 | "addresses": [], 72 | 73 | "wallet": { 74 | "name": "", 75 | "password": "", 76 | "mnemonicPassphrase": "" 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | 3 | volumes: 4 | logs: {} 5 | dbdata: {} 6 | 7 | networks: 8 | backend: 9 | 10 | services: 11 | mining-pool: 12 | # replace with the image name (and version) you have built using the provided Dockerfile 13 | image: mining-pool:latest 14 | # optional automatic build step, if an image was not already built 15 | build: . 16 | container_name: mining-pool 17 | depends_on: 18 | - redis 19 | - postgres 20 | restart: unless-stopped 21 | ports: 22 | - 20032:20032 23 | volumes: 24 | # Pool logs will be stored on a volume called "logs", and can be mounted to a local path 25 | # Due to the Node user, mounts need to be owned by 1000:1000: 26 | # mkdir ./logs && chown 1000:1000 ./logs && chmod 644 ./logs 27 | # - ./logs:/home/node/mining-pool/logs 28 | - logs:/home/node/mining-pool/logs 29 | # The following line requires your own config file in the current folder 30 | # Please refer to the Readme for the sections "persistence" and "redis" in a Docker setup 31 | - ./composePoolConfig.json:/home/node/mining-pool/lib/config.json 32 | networks: 33 | - backend 34 | 35 | redis: 36 | image: redis:latest 37 | container_name: mining-redis 38 | ports: 39 | # still here only for debugging purposes, not required 40 | - 6379:6379 41 | command: redis-server 42 | restart: unless-stopped 43 | networks: 44 | - backend 45 | 46 | postgres: 47 | image: postgres:latest 48 | container_name: mining-postgres 49 | ports: 50 | # here for exposing the db to an admin tool, while not conflicting with 51 | # the default port of an eventual other PostgreSQL installation on the same host 52 | - 25432:5432 53 | volumes: 54 | - dbdata:/var/lib/postgresql/data 55 | environment: 56 | - POSTGRES_PASSWORD=postgres 57 | restart: unless-stopped 58 | networks: 59 | - backend 60 | -------------------------------------------------------------------------------- /test/varDiffTest.js: -------------------------------------------------------------------------------- 1 | const assert = require('assert'); 2 | const VarDiff = require('../lib/varDiff'); 3 | 4 | var varDiffOptions = { 5 | minDiff: 1, 6 | maxDiff: 4096, 7 | targetTime: 2.5, 8 | retargetTime: 90, 9 | variancePercent: 30 10 | }; 11 | 12 | var varDiff = new VarDiff(varDiffOptions); 13 | 14 | function validateDifficulty(newDifficulty, expected){ 15 | var max = newDifficulty * 1.0000001; 16 | var min = newDifficulty * 0.9999999; 17 | assert(min <= expected && expected <= max); 18 | } 19 | 20 | function testAdjustDifficulty(prevDiffs, averageTargetTime){ 21 | for (var prevDiff of prevDiffs){ 22 | var [diffUpdated, newDiff] = varDiff.retarget(averageTargetTime, prevDiff); 23 | var expectedDiff = (prevDiff * (varDiffOptions.targetTime / averageTargetTime)); 24 | assert(diffUpdated === true); 25 | validateDifficulty(newDiff, expectedDiff); 26 | } 27 | } 28 | 29 | describe('test var diff', function(){ 30 | it('should decrease difficulty', function(){ 31 | var prevDifficultys = [7, 10, 24.7, 111, 555]; 32 | testAdjustDifficulty(prevDifficultys, 5.7); 33 | }); 34 | 35 | it('should increase difficulty', function(){ 36 | var prevDifficultys = [3.5, 10, 24.7, 111, 555]; 37 | testAdjustDifficulty(prevDifficultys, 0.7); 38 | }) 39 | 40 | it('should set difficulty to maximum', function(){ 41 | var [diffUpdated, newDiff] = varDiff.retarget(1, 2048); 42 | assert(diffUpdated === true); 43 | validateDifficulty(newDiff, varDiffOptions.maxDiff); 44 | }) 45 | 46 | it('should set difficulty to minimum', function(){ 47 | var [diffUpdated, newDiff] = varDiff.retarget(6, 1.8); 48 | assert(diffUpdated === true); 49 | validateDifficulty(newDiff, varDiffOptions.minDiff); 50 | }) 51 | 52 | it('should not adjust difficulty', function(){ 53 | var [diffUpdated, _] = varDiff.retarget(3.1, 2.9); 54 | assert(diffUpdated === false); 55 | }) 56 | }) 57 | -------------------------------------------------------------------------------- /test/statsTest.js: -------------------------------------------------------------------------------- 1 | const RedisMock = require('ioredis-mock'); 2 | const Stats = require('../lib/stats'); 3 | const test = require('./test'); 4 | const { expect, assert } = require('chai'); 5 | 6 | describe('test stats', function(){ 7 | it('should cleanup stale data after stats', function(done){ 8 | var redisClient = new RedisMock(); 9 | var stats = new Stats(test.config, test.logger); 10 | stats.redisClient = redisClient; 11 | 12 | var interval = 60; 13 | var currentMs = Date.now(); 14 | var currentTs = Math.floor(Date.now() / 1000); 15 | var expiredTs = currentTs - 80; 16 | var from = currentTs - interval; 17 | var redisTx = redisClient.multi(); 18 | 19 | for (var idx = 0; idx < 60; idx++){ 20 | redisTx.zadd('hashrate', currentTs, [0, 0, 'miner' + idx, 1, currentMs].join(':')); 21 | redisTx.zadd('hashrate', expiredTs, [0, 1, 'miner' + idx, 1, currentMs].join(':')); 22 | } 23 | 24 | var checkState = function(){ 25 | redisClient 26 | .multi() 27 | .zrangebyscore('hashrate', '-inf', '(' + from) 28 | .zrangebyscore('hashrate', from, '+inf') 29 | .exec(function(error, result){ 30 | if (result.error) assert.fail('Test error: ' + error); 31 | expect(result[0][1].length).equal(0); 32 | expect(result[1][1].length).equal(60); 33 | done(); 34 | }); 35 | } 36 | 37 | redisTx.exec(function(error, _){ 38 | if (error) assert.fail('Test error: ' + error); 39 | stats.getStats(interval, function(result){ 40 | if (result.error) assert.fail('Test error: ' + error); 41 | var expectedHashRate = ((60 * 16 * Math.pow(2, test.config.diff1TargetNumZero)) / (interval * 1000 * 1000)).toFixed(2); 42 | expect(result.hashrate).equal(expectedHashRate); 43 | checkState(); 44 | }) 45 | }); 46 | }) 47 | }) 48 | -------------------------------------------------------------------------------- /lib/stats.js: -------------------------------------------------------------------------------- 1 | const Redis = require('ioredis'); 2 | 3 | var Stats = module.exports = function(config, logger){ 4 | var hashrateKey = "hashrate"; 5 | var _this = this; 6 | this.redisClient = new Redis(config.redis.port, config.redis.host, {db: config.redis.db}); 7 | 8 | function calcHashrate(interval, callback){ 9 | var now = Math.floor(Date.now() / 1000); 10 | var from = now - interval; 11 | 12 | _this.redisClient 13 | .multi() 14 | .zrangebyscore(hashrateKey, from, '+inf') 15 | .zremrangebyscore(hashrateKey, '-inf', '(' + from) 16 | .exec(function(error, results){ 17 | if (error){ 18 | logger.error('Get hashrate data failed, error: ' + error); 19 | callback({error: error}); 20 | return; 21 | } 22 | 23 | var hashrateData = results[0][1]; 24 | var difficultySum = 0; 25 | for (var idx in hashrateData){ 26 | // data format: 'fromGroup:toGroup:worker:difficulty:ms' 27 | var data = hashrateData[idx].split(':'); 28 | difficultySum += parseFloat(data[3]); 29 | } 30 | // multiply 16 because we encoded the chainIndex to blockHash 31 | var hashrate = difficultySum * 16 * Math.pow(2, config.diff1TargetNumZero) / interval; 32 | var hashrateMHs = (hashrate / 1000000).toFixed(2); 33 | _this.redisClient.set('pool-hashrate', hashrateMHs, _ => {}); 34 | callback({hashrate: hashrateMHs}); 35 | }); 36 | } 37 | 38 | this.getStats = function(interval, callback){ 39 | calcHashrate(interval, callback); 40 | } 41 | 42 | this.reportStatsRegularly = function(){ 43 | setInterval(function(){ 44 | _this.getStats(config.statsInterval, function(result){ 45 | if (result.error){ 46 | logger.error('Stats failed, error: ' + result.error); 47 | return; 48 | } 49 | logger.info('Pool hashrate: ' + result.hashrate + ' MH/s'); 50 | }) 51 | }, config.statsInterval * 1000); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /lib/init.js: -------------------------------------------------------------------------------- 1 | const Pool = require("./pool"); 2 | const winston = require('winston'); 3 | const bignum = require('bignum'); 4 | require('winston-daily-rotate-file'); 5 | const fs = require('fs'); 6 | const path = require('path'); 7 | 8 | const CONFIG_FILE = process.env.CONFIG_FILE || path.resolve(__dirname, 'config.json'); 9 | 10 | if (!fs.existsSync(CONFIG_FILE)){ 11 | console.log(`${CONFIG_FILE} does not exist.`); 12 | process.exit(1); 13 | } 14 | 15 | var config = JSON.parse(fs.readFileSync(CONFIG_FILE, {encoding: 'utf8'})); 16 | if ((config.withholdPercent < 0) || (config.withholdPercent >= 1)){ 17 | console.log('invalid withhold percent'); 18 | process.exit(1); 19 | } 20 | 21 | if (config.confirmationTime < 30600) { 22 | console.warn("\x1b[31m`confirmationTime` less than 30600, please make sure that the pool wallet has enough balance to pay the rewards\x1b[0m") 23 | } 24 | 25 | if (config.withholdPercent === 0){ 26 | console.warn("\x1b[31m`withholdPercent` is 0, please make sure that the pool wallet has enough balance to pay the transaction fee\x1b[0m") 27 | } 28 | 29 | if (!config.daemon.apiKey || config.daemon.apiKey === '') { 30 | console.warn("\x1b[31m`apiKey` is not configured, please make sure you don't need an apiKey to access your full node\x1b[0m") 31 | } 32 | 33 | global.diff1Target = bignum.pow(2, 256 - config.diff1TargetNumZero).sub(1); 34 | 35 | var logger = winston.createLogger({ 36 | format: winston.format.combine( 37 | winston.format.timestamp(), 38 | winston.format.printf(i => `${i.timestamp} | ${i.level} | ${i.message}`) 39 | ), 40 | transports: [ 41 | new winston.transports.DailyRotateFile({ 42 | filename: config.logPath + 'pool-%DATE%-debug.log', 43 | datePattern: 'YYYY-MM-DD', 44 | maxSize: '100m', 45 | maxFiles: '5d', 46 | level: 'debug' 47 | }), 48 | new winston.transports.DailyRotateFile({ 49 | filename: config.logPath + 'pool-%DATE%-info.log', 50 | datePattern: 'YYYY-MM-DD', 51 | maxSize: '100m', 52 | maxFiles: '5d', 53 | level: 'info' 54 | }), 55 | new winston.transports.DailyRotateFile({ 56 | filename: config.logPath + 'pool-%DATE%-error.log', 57 | datePattern: 'YYYY-MM-DD', 58 | maxSize: '100m', 59 | maxFiles: '5d', 60 | level: 'error' 61 | }), 62 | new winston.transports.Console({ 63 | level: 'info' 64 | }) 65 | ] 66 | }); 67 | 68 | var pool = new Pool(config, logger); 69 | pool.start(); 70 | -------------------------------------------------------------------------------- /lib/util.js: -------------------------------------------------------------------------------- 1 | const base58 = require('base58-native'); 2 | const constants = require('./constants'); 3 | 4 | exports.packInt64LE = function(num){ 5 | var buff = Buffer.alloc(8); 6 | buff.writeUInt32LE(num % Math.pow(2, 32), 0); 7 | buff.writeUInt32LE(Math.floor(num / Math.pow(2, 32)), 4); 8 | return buff; 9 | }; 10 | 11 | var magnitude = 1000000000000000000; 12 | var precision = 18; 13 | 14 | exports.toALPH = function(amount){ 15 | return parseFloat((amount / magnitude).toFixed(precision)); 16 | }; 17 | 18 | exports.fromALPH = function(coins){ 19 | return Math.floor(coins * magnitude); 20 | }; 21 | 22 | function djbHash(buffer){ 23 | var hash = 5381; 24 | for (var idx = 0; idx < buffer.length; idx++){ 25 | hash = (((hash << 5) + hash) + (buffer[idx] & 0xff)) & 0xffffffff; 26 | } 27 | return hash; 28 | } 29 | 30 | function xorByte(intValue){ 31 | var byte0 = (intValue >> 24) & 0xff; 32 | var byte1 = (intValue >> 16) & 0xff; 33 | var byte2 = (intValue >> 8) & 0xff; 34 | var byte3 = intValue & 0xff; 35 | return (byte0 ^ byte1 ^ byte2 ^ byte3) & 0xff; 36 | } 37 | 38 | function groupOfAddress(addressStr){ 39 | var decoded = null; 40 | try { 41 | decoded = base58.decode(addressStr); 42 | } catch (error){ 43 | return [null, 'invalid P2PKH address format']; 44 | } 45 | if (decoded.length != 33){ // prefix(1 byte) + public key hash(32 bytes) 46 | return [null, 'incorrect P2PKH address size']; 47 | } 48 | 49 | if (decoded[0] != 0x00){ // prefix for P2PKH 50 | return [null, 'invalid P2PKH address']; 51 | } 52 | 53 | var hint = djbHash(decoded.slice(1)) | 1; 54 | var hash = xorByte(hint); 55 | var group = hash % constants.GroupSize; 56 | return [group, null]; 57 | } 58 | 59 | exports.isValidAddress = function(addressStr, group){ 60 | var [g, error] = groupOfAddress(addressStr); 61 | if (error){ 62 | return [false, error]; 63 | } 64 | return [g == group, null]; 65 | } 66 | 67 | exports.groupOfAddress = groupOfAddress; 68 | 69 | function executeForEach(array, func, callback){ 70 | if (array.length === 0){ 71 | callback(); 72 | return; 73 | } 74 | var element = array.shift(); 75 | func(element, function(){ 76 | executeForEach(array, func, callback); 77 | }); 78 | } 79 | 80 | exports.executeForEach = executeForEach; 81 | 82 | function blockChainIndex(hash){ 83 | var beforeLast = hash[hash.length - 2] & 0xff; 84 | var last = hash[hash.length -1] & 0xff; 85 | var bigIndex = beforeLast << 8 | last; 86 | var chainNum = constants.GroupSize * constants.GroupSize; 87 | var index = bigIndex % chainNum; 88 | var fromGroup = Math.floor(index / constants.GroupSize); 89 | var toGroup = index % constants.GroupSize; 90 | return [fromGroup, toGroup]; 91 | } 92 | 93 | exports.blockChainIndex = blockChainIndex; 94 | -------------------------------------------------------------------------------- /lib/messages.js: -------------------------------------------------------------------------------- 1 | const { Parser } = require("binary-parser"); 2 | const constants = require("./constants"); 3 | 4 | var headerSize = 4; // 4 bytes body length 5 | 6 | var jobParser = new Parser() 7 | .endianess("big") 8 | .uint32('fromGroup') 9 | .uint32('toGroup') 10 | .uint32('headerBlobLength') 11 | .buffer('headerBlob', { 12 | 'length': 'headerBlobLength' 13 | }) 14 | .uint32('txsBlobLength') 15 | .buffer('txsBlob', { 16 | 'length': 'txsBlobLength' 17 | }) 18 | .uint32('targetLength') 19 | .buffer('targetBlob', { 20 | 'length': 'targetLength' 21 | }) 22 | .uint32('height') 23 | .saveOffset('dataLength'); 24 | 25 | exports.parseMessage = function(buffer, callback){ 26 | if (buffer.length < headerSize) { 27 | callback(null, 0); 28 | } 29 | else { 30 | var bodyLength = buffer.readUInt32BE(); 31 | if (buffer.length < (headerSize + bodyLength)) { 32 | callback(null, 0); 33 | } 34 | else { 35 | var version = buffer.readUInt8(headerSize); // 1 byte version 36 | if (version !== constants.MiningProtocolVersion) { 37 | throw Error(`Invalid protocol version ${version}, expect ${constants.MiningProtocolVersion}`); 38 | } 39 | var messageType = buffer.readUInt8(headerSize + 1); // 1 byte message type 40 | var startOffset = headerSize + 2; 41 | var endOffset = headerSize + bodyLength; 42 | var message = buffer.slice(startOffset, endOffset); 43 | var payload = parse(messageType, message); 44 | var result = { 45 | type: messageType, 46 | payload: payload 47 | }; 48 | callback(result, endOffset); 49 | } 50 | } 51 | } 52 | 53 | function parse(messageType, buffer){ 54 | if (messageType == constants.JobsMessageType) { 55 | return parseJobs(buffer); 56 | } 57 | else if (messageType == constants.SubmitResultMessageType) { 58 | return parseSubmitResult(buffer); 59 | } 60 | else { 61 | throw Error("Invalid message type"); // TODO: handle error properly 62 | } 63 | } 64 | 65 | function parseJobs(buffer){ 66 | var jobSize = buffer.readUInt32BE(); 67 | var offset = 4; 68 | var jobs = []; 69 | for (var index = 0; index < jobSize; index++){ 70 | var job = jobParser.parse(buffer.slice(offset)); 71 | jobs[index] = job; 72 | offset += job.dataLength; 73 | } 74 | return jobs; 75 | } 76 | 77 | function parseSubmitResult(buffer){ 78 | var fromGroup = buffer.readUInt32BE(); 79 | var toGroup = buffer.readUInt32BE(4); 80 | var blockHash = buffer.slice(8, 40) 81 | var result = buffer.readUInt8(40); 82 | var succeed = result == 1; 83 | return { 84 | fromGroup: fromGroup, 85 | toGroup: toGroup, 86 | blockHash: blockHash, 87 | succeed: succeed 88 | }; 89 | } 90 | 91 | exports.updateJobTimestamp = function(job, updater) { 92 | var headerBlob = job.headerBlob 93 | var encodedTsLength = 8 94 | var encodedTargetLength = 4 95 | var tsOffset = job.headerBlob.length - (encodedTsLength + encodedTargetLength) 96 | var jobTs = Number(headerBlob.readBigUInt64BE(tsOffset)) 97 | var newJobTs = updater(jobTs) 98 | headerBlob.writeBigUInt64BE(BigInt(newJobTs), tsOffset) 99 | } 100 | -------------------------------------------------------------------------------- /lib/varDiff.js: -------------------------------------------------------------------------------- 1 | const events = require('events'); 2 | 3 | /* 4 | Vardiff ported from stratum-mining share-limiter 5 | https://github.com/ahmedbodi/stratum-mining/blob/master/mining/basic_share_limiter.py 6 | */ 7 | 8 | function RingBuffer(maxSize){ 9 | var data = []; 10 | var cursor = 0; 11 | var isFull = false; 12 | this.append = function(x){ 13 | if (isFull){ 14 | data[cursor] = x; 15 | cursor = (cursor + 1) % maxSize; 16 | } 17 | else{ 18 | data.push(x); 19 | cursor++; 20 | if (data.length === maxSize){ 21 | cursor = 0; 22 | isFull = true; 23 | } 24 | } 25 | }; 26 | this.avg = function(){ 27 | var sum = data.reduce(function(a, b){ return a + b }); 28 | return sum / (isFull ? maxSize : cursor); 29 | }; 30 | this.size = function(){ 31 | return isFull ? maxSize : cursor; 32 | }; 33 | this.clear = function(){ 34 | data = []; 35 | cursor = 0; 36 | isFull = false; 37 | }; 38 | } 39 | 40 | // Truncate a number to a fixed amount of decimal places 41 | function toFixed(num, len) { 42 | return parseFloat(num.toFixed(len)); 43 | } 44 | 45 | var varDiff = module.exports = function varDiff(varDiffOptions){ 46 | var _this = this; 47 | var bufferSize, tMin, tMax; 48 | var variance = varDiffOptions.targetTime * (varDiffOptions.variancePercent / 100); 49 | 50 | bufferSize = varDiffOptions.retargetTime / varDiffOptions.targetTime * 4; 51 | tMin = varDiffOptions.targetTime - variance; 52 | tMax = varDiffOptions.targetTime + variance; 53 | 54 | this.retarget = function(averageTargetTime, prevDifficulty){ 55 | var ddiff = varDiffOptions.targetTime / averageTargetTime; 56 | 57 | if (averageTargetTime > tMax && prevDifficulty > varDiffOptions.minDiff){ 58 | if (varDiffOptions.x2mode){ 59 | ddiff = 0.5; 60 | } 61 | if (ddiff * prevDifficulty < varDiffOptions.minDiff){ 62 | ddiff = varDiffOptions.minDiff / prevDifficulty; 63 | } 64 | } else if (averageTargetTime < tMin){ 65 | if (varDiffOptions.x2mode){ 66 | ddiff = 2; 67 | } 68 | if (ddiff * prevDifficulty > varDiffOptions.maxDiff){ 69 | ddiff = varDiffOptions.maxDiff / prevDifficulty; 70 | } 71 | } 72 | else { 73 | return [false, prevDifficulty]; 74 | } 75 | 76 | var newDifficulty = toFixed(prevDifficulty * ddiff, 8); 77 | return [true, newDifficulty]; 78 | } 79 | 80 | this.manageClient = function(client){ 81 | var options = varDiffOptions; 82 | var lastTs; 83 | var lastRtc; 84 | var timeBuffer; 85 | 86 | client.on('submitAccepted', function(){ 87 | var ts = (Date.now() / 1000) | 0; 88 | if (!lastRtc){ 89 | lastRtc = ts - options.retargetTime / 2; 90 | lastTs = ts; 91 | timeBuffer = new RingBuffer(bufferSize); 92 | return; 93 | } 94 | 95 | var sinceLast = ts - lastTs; 96 | timeBuffer.append(sinceLast); 97 | lastTs = ts; 98 | if ((ts - lastRtc) < options.retargetTime && timeBuffer.size() > 0) 99 | return; 100 | 101 | lastRtc = ts; 102 | var [diffUpdated, newDifficulty] = _this.retarget(timeBuffer.avg(), client.difficulty); 103 | if (diffUpdated){ 104 | timeBuffer.clear(); 105 | _this.emit('newDifficulty', client, newDifficulty); 106 | } 107 | }); 108 | }; 109 | }; 110 | varDiff.prototype.__proto__ = events.EventEmitter.prototype; 111 | -------------------------------------------------------------------------------- /lib/daemon.js: -------------------------------------------------------------------------------- 1 | const events = require('events'); 2 | const net = require('net'); 3 | const messages = require('./messages'); 4 | const constants = require('./constants'); 5 | const HttpClient = require('./httpClient'); 6 | 7 | function MinerClient(instance, logger){ 8 | var client = net.Socket(); 9 | var _this = this; 10 | 11 | this.connect = function(callback){ 12 | client.removeAllListeners('close'); 13 | client.removeAllListeners('error'); 14 | client.removeAllListeners('data'); 15 | client.removeAllListeners('connect'); 16 | 17 | client.connect(instance.minerApiPort, instance.host); 18 | client.on('connect', function(){ 19 | logger.info('Connected to alephium full node'); 20 | }); 21 | 22 | var buffer = Buffer.from([]); 23 | var tryParseMessage = function() { 24 | messages.parseMessage(buffer, function(message, offset){ 25 | if (message){ 26 | buffer = buffer.slice(offset); 27 | callback(message); 28 | tryParseMessage(); 29 | } 30 | }) 31 | } 32 | client.on('data', function(data) { 33 | buffer = Buffer.concat([buffer, data]); 34 | tryParseMessage(); 35 | }); 36 | 37 | client.on('error', function(error){ 38 | logger.error('Full node connection error: ' + error); 39 | }); 40 | 41 | client.on('close', function(){ 42 | logger.warn('Full node connection closed, trying to reconnect...'); 43 | setTimeout(function(){ 44 | _this.connect(callback); 45 | }, 8000); 46 | }); 47 | } 48 | 49 | this.submit = function(block, callback){ 50 | var blockSize = block.length; 51 | var msgPrefixSize = 1 + 1 + 4; // version(1 byte) + messageType(1 byte) + encodedBlockSize(4 bytes) 52 | var msgSize = msgPrefixSize + blockSize; 53 | var msgHeader = Buffer.alloc(10); // encodedMessageSize(4 bytes) + msgPrefixSize 54 | msgHeader.writeUInt32BE(msgSize); 55 | msgHeader.writeUInt8(constants.MiningProtocolVersion, 4) 56 | msgHeader.writeUInt8(constants.SubmitBlockMessageType, 5); 57 | msgHeader.writeUInt32BE(blockSize, 6); 58 | var data = Buffer.concat([msgHeader, block]); 59 | client.write(data, callback); 60 | } 61 | } 62 | 63 | /** 64 | * The daemon interface interacts with the coin daemon by using the rpc interface. 65 | * in order to make it work it needs, as constructor, an array of objects containing 66 | * - 'host' : hostname where the coin lives 67 | * - 'port' : port where the coin accepts rpc connections 68 | **/ 69 | 70 | function DaemonInterface(instance, logger){ 71 | 72 | //private members 73 | var _this = this; 74 | 75 | this.httpClient = new HttpClient(instance.host, instance.port, instance.apiKey); 76 | 77 | this.init = function(){ 78 | _this.httpClient.selfClique(function(result){ 79 | if (result.selfReady){ 80 | _this.emit('online'); 81 | } 82 | else { 83 | _this.emit('cliqueNotReady'); 84 | } 85 | }); 86 | } 87 | 88 | this.isSynced = function(callback){ 89 | _this.httpClient.selfClique(function(result){ 90 | callback(result.selfReady && result.synced); 91 | }); 92 | } 93 | 94 | this.connectToMiningServer = function(callback){ 95 | _this.minerClient = new MinerClient(instance, logger); 96 | _this.minerClient.connect(callback); 97 | } 98 | 99 | this.submit = function(block, callback){ 100 | _this.minerClient.submit(block, callback); 101 | } 102 | } 103 | 104 | DaemonInterface.prototype.__proto__ = events.EventEmitter.prototype; 105 | 106 | exports.interface = DaemonInterface; 107 | -------------------------------------------------------------------------------- /lib/httpClient.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | 3 | var HttpClient = module.exports = function HttpClient(host, port, apiKey){ 4 | var _this = this; 5 | 6 | function parseJson(res, data){ 7 | var dataJson = {}; 8 | if (data){ 9 | try{ 10 | dataJson = JSON.parse(data); 11 | } 12 | catch(error){ 13 | dataJson.error = error; 14 | } 15 | } 16 | if (res.statusCode !== 200){ 17 | dataJson.statusCode = res.statusCode 18 | dataJson.error = dataJson.detail ? dataJson.detail : 'Request error, status code: ' + res.statusCode; 19 | } 20 | return dataJson; 21 | } 22 | 23 | function httpRequest(method, path, headers, requestData, callback){ 24 | var options = { 25 | hostname: host, 26 | port: port, 27 | path: path, 28 | method: method, 29 | headers: headers 30 | }; 31 | 32 | var req = http.request(options, function(res){ 33 | var data = ''; 34 | res.setEncoding('utf8'); 35 | res.on('data', function(chunk){ 36 | data += chunk; 37 | }); 38 | res.on('end', function(){ 39 | callback(parseJson(res, data)); 40 | }); 41 | }); 42 | 43 | req.on('error', function(e) { 44 | callback({error: e}); 45 | }); 46 | 47 | if (requestData) req.end(requestData); 48 | else req.end(); 49 | } 50 | 51 | var getHeaders = apiKey ? {'accept': 'application/json', 'X-API-KEY': apiKey} : {'accept': 'application/json'}; 52 | var postHeaders = apiKey ? {'Content-Type': 'application/json', 'X-API-KEY': apiKey} : {'Content-Type': 'application/json'}; 53 | 54 | this.get = function(path, callback){ 55 | httpRequest('GET', path, getHeaders, null, callback); 56 | } 57 | 58 | this.post = function(path, data, callback){ 59 | httpRequest('POST', path, postHeaders, data, callback); 60 | } 61 | 62 | this.selfClique = function(callback){ 63 | this.get('/infos/self-clique', callback); 64 | } 65 | 66 | this.buildUnsignedTx = function(fromPubKey, destinations, callback){ 67 | var data = JSON.stringify({ 68 | fromPublicKey: fromPubKey, 69 | destinations: destinations 70 | }); 71 | this.post('/transactions/build', data, callback); 72 | } 73 | 74 | this.buildUnsignedTxFromUtxos = function(fromPubKey, destinations, utxos, gas, callback){ 75 | var data = JSON.stringify({ 76 | fromPublicKey: fromPubKey, 77 | destinations: destinations, 78 | utxos: utxos, 79 | gasAmount: gas 80 | }); 81 | this.post('/transactions/build', data, callback); 82 | } 83 | 84 | this.unlockWallet = function(walletName, password, mnemonicPassphrase, callback){ 85 | var path = '/wallets/' + walletName + '/unlock'; 86 | var params = {password: password}; 87 | if (mnemonicPassphrase){ 88 | params.mnemonicPassphrase = mnemonicPassphrase; 89 | } 90 | var data = JSON.stringify(params); 91 | this.post(path, data, callback); 92 | } 93 | 94 | this.walletStatus = function(walletName, callback){ 95 | var path = '/wallets/' + walletName 96 | this.get(path, callback); 97 | } 98 | 99 | this.changeActiveAddress = function(walletName, address, callback){ 100 | var path = '/wallets/' + walletName + '/change-active-address'; 101 | var data = JSON.stringify({address: address}); 102 | this.post(path, data, callback); 103 | } 104 | 105 | this.signTx = function(walletName, txId, callback){ 106 | var path = '/wallets/' + walletName + '/sign'; 107 | var data = JSON.stringify({data: txId}); 108 | this.post(path, data, callback); 109 | } 110 | 111 | this.getAddressInfo = function(walletName, address, callback){ 112 | var path = '/wallets/' + walletName + '/addresses/' + address; 113 | this.get(path, callback); 114 | } 115 | 116 | this.sweepActiveAddress = function(walletName, toAddress, callback){ 117 | var path = '/wallets/' + walletName + '/sweep-active-address'; 118 | var data = JSON.stringify({toAddress: toAddress}); 119 | this.post(path, data, callback); 120 | } 121 | 122 | this.submitTx = function(unsignedTx, signature, callback){ 123 | var data = JSON.stringify({ 124 | unsignedTx: unsignedTx, 125 | signature: signature 126 | }); 127 | this.post('/transactions/submit', data, callback); 128 | } 129 | 130 | this.txStatus = function(txId, callback){ 131 | var path = '/transactions/status?txId=' + txId; 132 | this.get(path, callback); 133 | } 134 | 135 | this.getBlock = function(blockHash, callback){ 136 | var path = '/blockflow/blocks/' + blockHash; 137 | this.get(path, callback); 138 | } 139 | 140 | this.getMainChainBlockByGhostUncle = function(ghostUncleHash, callback) { 141 | var path = '/blockflow/main-chain-block-by-ghost-uncle/' + ghostUncleHash 142 | this.get(path, callback) 143 | } 144 | 145 | this.blockHashesAtHeight = function(height, fromGroup, toGroup, callback){ 146 | var path = '/blockflow/hashes?fromGroup=' + fromGroup + '&toGroup=' + toGroup + '&height=' + height; 147 | this.get(path, callback); 148 | } 149 | 150 | this.blockInMainChain = function(blockHash, callback){ 151 | var path = '/blockflow/is-block-in-main-chain?blockHash=' + blockHash; 152 | this.get(path, callback); 153 | } 154 | 155 | this.getUtxos = function(address, callback){ 156 | var path = '/addresses/' + address + '/utxos' 157 | this.get(path, callback) 158 | } 159 | 160 | this.listAddresses = function(walletName, callback){ 161 | var path = '/wallets/' + walletName + '/addresses'; 162 | this.get(path, callback); 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /lib/jobManager.js: -------------------------------------------------------------------------------- 1 | const events = require('events'); 2 | const bignum = require('bignum'); 3 | const blockTemplate = require('./blockTemplate.js'); 4 | const constants = require('./constants.js'); 5 | const util = require('./util'); 6 | const crypto = require('crypto'); 7 | 8 | //Unique job per new block template 9 | var JobCounter = function(){ 10 | var counter = 0; 11 | 12 | this.next = function(){ 13 | counter++; 14 | if (counter % 0xffff === 0) 15 | counter = 1; 16 | return this.cur(); 17 | }; 18 | 19 | this.cur = function () { 20 | return counter.toString(16); 21 | }; 22 | }; 23 | 24 | var ErrorCodes = { 25 | JobNotFound: 20, 26 | InvalidJobChainIndex: 21, 27 | InvalidWorker: 22, 28 | InvalidNonce: 23, 29 | DuplicatedShare: 24, 30 | LowDifficulty: 25, 31 | InvalidBlockChainIndex: 26 32 | }; 33 | 34 | function isStringType(value){ 35 | return (typeof value === 'string') || (value instanceof String); 36 | } 37 | 38 | function MiningJobs(expiryDuration){ 39 | var _this = this; 40 | 41 | this.jobsList = []; 42 | this.jobMap = {}; 43 | 44 | this.removeExpiredJobs = function(now){ 45 | while (now - _this.jobsList[0][0].timestamp > expiryDuration){ 46 | var expiredJobs = _this.jobsList.shift(); 47 | expiredJobs.forEach(job => delete _this.jobMap[job.jobId]); 48 | } 49 | } 50 | 51 | this.addJobs = function(jobs, now){ 52 | _this.jobsList.push(jobs); 53 | _this.removeExpiredJobs(now); 54 | jobs.forEach(job => _this.jobMap[job.jobId] = job); 55 | } 56 | 57 | this.getJob = function(jobId){ 58 | return _this.jobMap[jobId]; 59 | } 60 | } 61 | 62 | /** 63 | * Emits: 64 | * - newJobs(jobs) - Use this event to broadcast new jobs 65 | * - share(shareData) - It will have blockHex if a block was found 66 | **/ 67 | function JobManager(jobExpiryPeriod){ 68 | 69 | //private members 70 | var _this = this; 71 | var jobCounter = new JobCounter(); 72 | 73 | //public members 74 | this.validJobs = new MiningJobs(jobExpiryPeriod); 75 | 76 | this.processJobs = function(jobs){ 77 | var now = Date.now(); 78 | var jobIndex = crypto.randomInt(0, jobs.length) 79 | var job = jobs[jobIndex] 80 | job.jobId = jobCounter.next() 81 | var miningJobs = [new blockTemplate(job, now)] 82 | _this.validJobs.addJobs(miningJobs, now); 83 | _this.emit('newJobs', miningJobs); 84 | }; 85 | 86 | function validateNonce(nonceHex){ 87 | if (!isStringType(nonceHex)){ 88 | return null; 89 | } 90 | 91 | var nonce = null; 92 | try { 93 | nonce = Buffer.from(nonceHex, 'hex'); 94 | } catch (error) { 95 | return null; 96 | } 97 | if (nonce.length === constants.NonceLength){ 98 | return nonce; 99 | } 100 | return null; 101 | } 102 | 103 | function addressIsValid(addressStr){ 104 | var [_, error] = util.groupOfAddress(addressStr); 105 | return error == null; 106 | } 107 | 108 | this.getWorkerAddress = function(worker){ 109 | if (!isStringType(worker)){ 110 | return null; 111 | } 112 | 113 | var index = worker.indexOf('.'); 114 | if (index === -1){ 115 | return addressIsValid(worker) ? worker : null; 116 | } 117 | 118 | // try to decode address from prefix 119 | var address = worker.slice(0, index); 120 | if (addressIsValid(address)){ 121 | var workerName = worker.slice(index + 1); 122 | return workerName.length > 32 ? null : address; 123 | } 124 | 125 | // try to decode address from postfix 126 | index = worker.lastIndexOf('.'); 127 | address = worker.slice(index + 1); 128 | if (addressIsValid(address)){ 129 | var workerName = worker.slice(0, index); 130 | return workerName.length > 32 ? null : address; 131 | } 132 | return null; 133 | } 134 | 135 | this.processShare = function(params, previousDifficulty, difficulty, remoteAddress, localPort){ 136 | var shareError = function(error){ 137 | _this.emit('share', { 138 | job: params.jobId, 139 | ip: remoteAddress, 140 | worker: params.worker, 141 | difficulty: difficulty, 142 | error: error[1] 143 | }); 144 | return {error: error, result: null}; 145 | }; 146 | 147 | var job = _this.validJobs.getJob(params.jobId); 148 | if (typeof job === 'undefined' || job.jobId != params.jobId ) { 149 | return shareError([ErrorCodes.JobNotFound, 'job not found, maybe expired']); 150 | } 151 | 152 | if ((params.fromGroup != job.fromGroup) || (params.toGroup != job.toGroup)){ 153 | return shareError([ErrorCodes.InvalidJobChainIndex, 'invalid job chain index']); 154 | } 155 | 156 | var address = _this.getWorkerAddress(params.worker); 157 | if (!address){ 158 | return shareError([ErrorCodes.InvalidWorker, 'invalid worker']); 159 | } 160 | 161 | var nonce = validateNonce(params.nonce); 162 | if (!nonce) { 163 | return shareError([ErrorCodes.InvalidNonce, 'invalid nonce']); 164 | } 165 | 166 | if (!job.registerSubmit(params.nonce)) { 167 | return shareError([ErrorCodes.DuplicatedShare, 'duplicate share']); 168 | } 169 | 170 | var hash = job.hash(nonce); 171 | var [fromGroup, toGroup] = util.blockChainIndex(hash); 172 | if ((fromGroup != job.fromGroup) || (toGroup != job.toGroup)){ 173 | return shareError([ErrorCodes.InvalidBlockChainIndex, 'invalid block chain index']); 174 | } 175 | 176 | var hashBigNum = bignum.fromBuffer(hash); 177 | 178 | var shareDiff = global.diff1Target.mul(1024).div(hashBigNum).toNumber() / 1024.0; 179 | var foundBlock = false; 180 | 181 | //Check if share is a block candidate (matched network difficulty) 182 | if (job.target.ge(hashBigNum)){ 183 | foundBlock = true; 184 | } 185 | else { 186 | //Check if share didn't reached the miner's difficulty) 187 | if (shareDiff < difficulty){ 188 | 189 | //Check if share matched a previous difficulty from before a vardiff retarget 190 | if (previousDifficulty && shareDiff >= previousDifficulty){ 191 | difficulty = previousDifficulty; 192 | } 193 | else{ 194 | return shareError([ErrorCodes.LowDifficulty, 195 | 'low difficulty share of ' + shareDiff + 196 | ', current difficulty: ' + difficulty + 197 | ', previous difficulty: ' + previousDifficulty] 198 | ); 199 | } 200 | 201 | } 202 | } 203 | 204 | _this.emit('share', { 205 | job: job, 206 | nonce: nonce, 207 | ip: remoteAddress, 208 | port: localPort, 209 | worker: params.worker, 210 | workerAddress: address, 211 | difficulty: difficulty, 212 | shareDiff: shareDiff, 213 | blockHash: hash.toString('hex'), 214 | foundBlock: foundBlock 215 | }); 216 | 217 | return {result: true, error: null, blockHash: hash}; 218 | }; 219 | }; 220 | JobManager.prototype.__proto__ = events.EventEmitter.prototype; 221 | 222 | exports.JobManager = JobManager; 223 | exports.ErrorCodes = ErrorCodes; 224 | exports.MiningJobs = MiningJobs; 225 | exports.JobCounter = JobCounter; 226 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Build 2 | 3 | 1. install redis 4 | 2. install alephium full node 5 | 3. install node.js(>=14) and npm(>=8) 6 | 7 | Or: 8 | 9 | 1. deploy with Docker Compose 10 | 11 | ## Run 12 | 13 | configs explanation: 14 | 15 | ```javascript 16 | { 17 | "logPath": "./logs/", // log path 18 | 19 | "connectionTimeout": 600, // disconnect workers that haven't submitted shares for this many seconds 20 | "maxConnectionsFromSameIP": 10, // limit number of connection from same IP address 21 | "whitelistIps": [], // whitelist IP address from 'maxConnectionsFromSameIP' 22 | 23 | "jobExpiryPeriod": 10, // job expires after set period 24 | 25 | "banning": { 26 | "enabled": true, // enabled by default 27 | "time": 600, // how many seconds to ban worker for 28 | "invalidPercent": 50, // what percent of invalid shares triggers ban 29 | "checkThreshold": 500, // perform check when this many shares have been submitted 30 | "purgeInterval": 300 // every this many seconds clear out the list of old bans 31 | }, 32 | 33 | "diff1TargetNumZero": 30, // diff1 target leading zero num 34 | "pool": { 35 | "port": 20032, // port which the server bind 36 | "diff": 64, // init difficulty 37 | "proxyProtocol": false, // enable it to get real client IPs if you are behind a reverse proxy that uses Proxy Protocol v1 38 | "varDiff": { 39 | "minDiff": 16, // minimum difficulty 40 | "maxDiff": 4096, // maximum difficulty 41 | "targetTime": 15, // try to get 1 share per this many seconds 42 | "retargetTime": 90, // check to see if we should retarget every this many seconds 43 | "variancePercent": 30 // allow time to very this % from target without retargeting 44 | } 45 | }, 46 | 47 | "daemon": { 48 | "host": "127.0.0.1", // alephium full node host 49 | "port": 12973, // alephium full node rest api port 50 | "apiKey": "xxx", // alephium full node api key 51 | "minerApiPort": 10973 // alephium full node miner api port 52 | }, 53 | 54 | "redis": { 55 | "host": "127.0.0.1", // redis host 56 | "port": 6379 // redis port 57 | }, 58 | 59 | "withholdPercent": 0.005, // coinbase reward withhold percent(0.5% by default), used for tx fee mainly 60 | "rewardEnabled": true, // enabled by default 61 | "rewardInterval": 600, // update miner balances every this many seconds 62 | "confirmationTime": 30600, // 510m by default, you can decrease this if your payment addresses have enough balance 63 | 64 | "paymentEnabled": true, // enabled by default 65 | "minPaymentCoins": "3.5", // minimum number of coins that a miner must earn before sending payment 66 | "paymentInterval": 600, // send payment every this many seconds 67 | "txConfirmations": { // Check tx confirmations to remove/pay back balance after tx 68 | "chainConfirmations": 10, 69 | "fromGroupConfirmations": 5, 70 | "toGroupConfirmations": 5 71 | }, 72 | 73 | "persistence": { // persistent shares and blocks 74 | "enabled": false, // disabled by default 75 | "host": "127.0.0.1", // postgresql ip 76 | "port": 5432, // postgresql port 77 | "user": "postgres", // postgresql user 78 | "password": "postgres", // postgresql password 79 | "database": "mining-pool" // database name 80 | }, 81 | 82 | "addresses": [], // 4 addresses(we have 4 groups) to where block rewards are given 83 | 84 | "wallet": { 85 | "name": "", // wallet name 86 | "password": "", // wallet password 87 | "mnemonicPassphrase": "" // wallet mnemonic passphrase 88 | } 89 | } 90 | ``` 91 | 92 | run: 93 | 94 | ```shell 95 | npm install 96 | npm run start 97 | ``` 98 | 99 | ## Docker: 100 | 101 | The provided docker-compose file allows you to deploy Redis, Postgres and the Alephium mining pool in 2 simple steps: 102 | 103 | 104 | ### Clone the repo and move to its folder: 105 | ```shell 106 | git clone https://github.com/alephium/mining-pool.git && cd mining-pool 107 | docker-compose up -d 108 | ``` 109 | 110 | 111 | ### (optional) Build different Docker image variants: 112 | 113 | 1. From (modified) local sources: 114 | ```shell 115 | docker build -t mining-pool:latest . 116 | ``` 117 | 118 | 2. From latest master branch code: 119 | ```shell 120 | docker build -t mining-pool:latest https://github.com/alephium/mining-pool.git 121 | ``` 122 | 123 | 3. From latest release: 124 | ```shell 125 | docker build -t mining-pool:latest -f Dockerfile-release . 126 | ``` 127 | 128 | 129 | The only prerequisites are a synced full node and Docker, and of course your own config file. For this kind of deployment the follliwng sections of the config file should be left unaltered: 130 | 131 | ```javascript 132 | 133 | "redis": { 134 | "host": "redis", // the "redis" host spawned by docker-compose 135 | "port": 6379 // and its port 136 | }, 137 | 138 | "persistence": { // persistent shares and blocks 139 | "enabled": false, // disabled by default 140 | "host": "postgres", // the "postgres" host spawned by docker-compose 141 | "port": 5432, // its port 142 | "user": "postgres", // its user 143 | "password": "postgres", // its password 144 | "database": "postgres" // and its db name. All as defined in the Compose file. 145 | }, 146 | ``` 147 | 148 | Lastly, for single-user setups these are suitable payout and reward settings: 149 | 150 | ```javascript 151 | 152 | "pool": { 153 | "port": 20032, // port which the server binds to 154 | "diff": 8, // init difficulty 155 | 156 | "varDiff": { 157 | "minDiff": 1, // minimum difficulty 158 | "maxDiff": 4096, // maximum difficulty 159 | "targetTime": 10, // try to get 1 share per this many seconds 160 | "retargetTime": 90, // check to see if we should retarget every this many seconds 161 | "variancePercent": 30 // allow time to very this % from target without retargeting 162 | } 163 | }, 164 | 165 | "withholdPercent": 0.005, // Even for single-user pools, this is required to allow the pool to pay tx fees 166 | "rewardInterval": 600, // This can be relaxed, but should still be lower than paymentInterval 167 | 168 | "minPaymentCoins": "10", // Increases the min payout to 10 Alph 169 | "paymentInterval": 7200, // Increased to 1 payment every 2 hours. 170 | ``` 171 | 172 | 173 | ## TLS configuration 174 | 175 | 176 | ``` 177 | ... 178 | "pool": { 179 | "port": 20443, 180 | "tls": true, 181 | "privateKeyPath": "/path/to/pool.key", 182 | "publicCertPath": "/path/to/pool.pem", 183 | ... 184 | } 185 | ``` 186 | 187 | Generate a basic, self-signed TLS certificate 188 | 189 | ``` 190 | openssl req -newkey rsa:4096 \ 191 | -x509 \ 192 | -sha256 \ 193 | -days 3650 \ 194 | -nodes \ 195 | -out pool.pem \ 196 | -keyout pool.key 197 | ``` 198 | -------------------------------------------------------------------------------- /test/jobManagerTest.js: -------------------------------------------------------------------------------- 1 | const { JobManager, ErrorCodes, MiningJobs, JobCounter } = require('../lib/jobManager'); 2 | const bignum = require('bignum'); 3 | const { expect } = require('chai'); 4 | const blockTemplate = require('../lib/blockTemplate'); 5 | 6 | describe('test job manager', function(){ 7 | global.diff1Target = bignum.pow(2, 228).sub(1); 8 | 9 | var job = { 10 | fromGroup: 1, 11 | toGroup: 2, 12 | headerBlob: Buffer.from('0007790e4ec67704f105b406379c6640a1edd9f4f55d07628fd555d72da436dccd50000000003309007511dbb1976d272fdaec0a5ead079da6e422c3c16faa593a3abddd3b822d4602064477d36478ce5a7370e0220e5daadfd9c4984f140a41537f000000005276b90956fa5c3d5b5cdb4235de493e239bd12523266099f0ae9834fd481a7288ec588c73287d61dc9b7ed326e1bc8980bffa383698fe780c1d00153b2890a5ea1c798f8e552c79e44d31fe1d02e2fbef8763369a774502229dd01600000000f07f98e3ec9b7d634293748bac9720310d681dd5a3e87f2f330d61d7c2f0ce1adccebc35c08b472d5d7f1a008009c8291382b839aa8493b57e58cbb400b9146184a084990fe481afb4fea6f0cdd5b372bce59d6be4b7c3b1a1ac72260000017ddbd15f591cffffff', 'hex'), 13 | txsBlob: Buffer.from('01000a0080004e20bb9aca000001c4261832f67ec288960022daf915864428873f422f045031a889ba81ecbd50db6e967c498b9d8af431e30000017ddbd33419000a01020000017ddbd15f590100000000', 'hex'), 14 | targetBlob: Buffer.from('00ffffff00000000000000000000000000000000000000000000000000000000', 'hex') 15 | }; 16 | 17 | var jobCounter = new JobCounter(); 18 | 19 | function generateMiningJobs(ts){ 20 | var jobs = []; 21 | for (var i = 0; i < 16; i++){ 22 | var job = {jobId: jobCounter.next(), timestamp: ts}; 23 | jobs.push(job); 24 | } 25 | return jobs; 26 | } 27 | 28 | var nonce = 'f8c8741232a4ebb0aad38ffb8e829b9bf5b00770f1ac31dc'; 29 | var address = '1AqVGKeHWoLJiVU7heL8EvwQN2hk5bMtvP3PsH57qWayr'; 30 | var invalidAddress = '114E4tiwXSyfvCqLnARL21Ac2pVS6GvPomw5y6HsLMwuyR'; 31 | var defaultHost = '127.0.0.1'; 32 | var defaultPort = 11111; 33 | 34 | function processShare(jobManager, params, prevDiff, currDiff){ 35 | return jobManager.processShare(params, prevDiff, currDiff, defaultHost, defaultPort); 36 | } 37 | 38 | it('should handle jobs properly', function(){ 39 | var miningJobs = new MiningJobs(500); 40 | var now = Date.now(); 41 | var jobs1 = generateMiningJobs(now - 600); 42 | var jobs2 = generateMiningJobs(now); 43 | 44 | expect(miningJobs.jobsList.length).equal(0); 45 | miningJobs.addJobs(jobs1, now - 600); 46 | expect(miningJobs.jobsList.length).equal(1); 47 | 48 | miningJobs.addJobs(jobs2, now); 49 | expect(miningJobs.jobsList.length).equal(1); 50 | expect(miningJobs.jobsList[0]).to.deep.equal(jobs2); 51 | jobs2.forEach(job => expect(miningJobs.getJob(job.jobId)).to.deep.equal(job)); 52 | jobs1.forEach(job => expect(miningJobs.getJob(job.jobId)).equal(undefined)); 53 | }) 54 | 55 | function expectError(error, code, msg){ 56 | expect(error[0]).equal(code); 57 | expect(error[1]).equal(msg); 58 | } 59 | 60 | function defaultJobManager(timestamp){ 61 | var jobManager = new JobManager(); 62 | job.jobId = jobCounter.next(); 63 | var jobTs = timestamp ? timestamp : Date.now(); 64 | jobManager.validJobs.addJobs([new blockTemplate(job, jobTs)], jobTs); 65 | return jobManager; 66 | } 67 | 68 | it('should process share failed if job does not exist', function(){ 69 | var jobManager = new JobManager(); 70 | var params = {jobId: 1, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce, worker: address}; 71 | var result = processShare(jobManager, params, 2, 2); 72 | expectError(result.error, ErrorCodes.JobNotFound, 'job not found, maybe expired'); 73 | }) 74 | 75 | it('should process share failed if chainIndex is invalid', function(){ 76 | var jobManager = defaultJobManager(); 77 | var params = {jobId: job.jobId, fromGroup: job.fromGroup + 1, toGroup: job.toGroup, nonce: nonce, worker: address}; 78 | var result = processShare(jobManager, params, 2, 2); 79 | expectError(result.error, ErrorCodes.InvalidJobChainIndex, 'invalid job chain index'); 80 | }) 81 | 82 | it('should process share failed if worker is invalid', function(){ 83 | var jobManager = defaultJobManager(); 84 | var workers = [invalidAddress, 123, null, undefined, '.' + address + '.123', 'a'.repeat(33) + '.' + address]; 85 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce}; 86 | for (var worker of workers){ 87 | params.worker = worker; 88 | var result = processShare(jobManager, params, 2, 2); 89 | expectError(result.error, ErrorCodes.InvalidWorker, 'invalid worker'); 90 | } 91 | }) 92 | 93 | it('shuold process share succeed if worker is valid', function(){ 94 | var workers = ['.' + address, address, 'abc.' + address, '....' + address]; 95 | var params = {fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce}; 96 | var ts = Date.now(); 97 | for (var worker of workers){ 98 | var jobManager = new JobManager(); 99 | job.jobId = jobCounter.next(); 100 | jobManager.validJobs.addJobs([new blockTemplate(job, ts)], ts); 101 | params.jobId = job.jobId; 102 | params.worker = worker; 103 | var result = processShare(jobManager, params, 2, 2); 104 | expect(result.error).equal(null); 105 | } 106 | }) 107 | 108 | it('should decode address', function(){ 109 | var jobManager = defaultJobManager(); 110 | var valids = [address, 'a..bc....' + address, address + '.ab..cde', address + '.', '.' + address, 'test.' + address, address + '.test']; 111 | var invalids = [invalidAddress, 'a'.repeat(33) + '.' + address, address + '.' + 'a'.repeat(33), 1234, null, undefined]; 112 | for (var worker of valids){ 113 | expect(jobManager.getWorkerAddress(worker)).equal(address); 114 | } 115 | for (var worker of invalids){ 116 | expect(jobManager.getWorkerAddress(worker)).equal(null); 117 | } 118 | }) 119 | 120 | it('should process share failed if nonce is invalid', function(){ 121 | var jobManager = defaultJobManager(); 122 | var nonces = ['0011', 123, null, undefined]; 123 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, worker: address}; 124 | for (var nonce of nonces){ 125 | params.nonce = nonce; 126 | var result = processShare(jobManager, params, 2, 2); 127 | expectError(result.error, ErrorCodes.InvalidNonce, 'invalid nonce'); 128 | } 129 | }) 130 | 131 | it('should process share failed if share is duplicated', function(){ 132 | var jobManager = defaultJobManager(); 133 | var blockTemplate = jobManager.validJobs.jobsList[0][0]; 134 | var result = blockTemplate.registerSubmit(nonce); 135 | expect(result).equal(true); 136 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce, worker: address}; 137 | var result = processShare(jobManager, params, 2, 2); 138 | expectError(result.error, ErrorCodes.DuplicatedShare, 'duplicate share'); 139 | }) 140 | 141 | it('should process share failed if difficulty is low', function(){ 142 | var jobManager = defaultJobManager(); 143 | var lowDiffNonce = '301d0b1f7e61e1d532d37df520f1acc92cfccc48de323e1c'; 144 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: lowDiffNonce, worker: address}; 145 | var result = processShare(jobManager, params, 2, 2); 146 | var [errCode, _] = result.error; 147 | expect(errCode).equal(ErrorCodes.LowDifficulty); 148 | }) 149 | 150 | it('should process share failed if chain index unmatched', function(){ 151 | var jobManager = defaultJobManager(); 152 | var invalidNonce = 'b6414be3a40e1a2852b3171e4462846ea48c777104b03e5e'; 153 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: invalidNonce, worker: address}; 154 | var result = processShare(jobManager, params, 2, 2); 155 | expect(result.error, ErrorCodes.InvalidBlockChainIndex, 'invalid block chain index'); 156 | }) 157 | 158 | it('should accept share if difficulty larger than current difficulty', function(){ 159 | var jobManager = defaultJobManager(); 160 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce, worker: address}; 161 | var currentDiff = 10; 162 | var prevDiff = 2; 163 | var result = processShare(jobManager, params, prevDiff, currentDiff); 164 | expect(result.error).equal(null); 165 | 166 | jobManager.on('share', function(shareData){ 167 | expect(shareData.difficulty).equal(currentDiff); 168 | expect(shareData.foundBlock).equal(true); 169 | }); 170 | }) 171 | 172 | it('should accept share if difficulty larger than previous difficulty', function(){ 173 | var jobManager = defaultJobManager(); 174 | var params = {jobId: job.jobId, fromGroup: job.fromGroup, toGroup: job.toGroup, nonce: nonce, worker: address}; 175 | var currentDiff = 20; 176 | var prevDiff = 10; 177 | var result = processShare(jobManager, params, prevDiff, currentDiff); 178 | expect(result.error).equal(null); 179 | 180 | jobManager.on('share', function(shareData){ 181 | expect(shareData.difficulty).equal(prevDiff); 182 | expect(shareData.foundBlock).equal(true); 183 | }) 184 | }) 185 | }) 186 | -------------------------------------------------------------------------------- /test/stratumTest.js: -------------------------------------------------------------------------------- 1 | const stratum = require('../lib/stratum'); 2 | const assert = require('assert'); 3 | const net = require('net'); 4 | const { expect } = require('chai'); 5 | 6 | var config = { 7 | banning: { 8 | enabled: true, 9 | time: 1, 10 | invalidPercent: 50, 11 | checkThreshold: 4, 12 | purgeInterval: 2 13 | }, 14 | 15 | pool: { 16 | port: 38888, 17 | diff: 12 18 | }, 19 | 20 | connectionTimeout: 5, 21 | maxConnectionsFromSameIP: 3 22 | }; 23 | 24 | function DummyJob(){ 25 | var target = global.diff1Target.mul(1024).div(Math.ceil(config.pool.diff * 1024)).toBuffer().toString('hex'); 26 | this.getJobParams = function(){ 27 | return { 28 | jobId: 1, 29 | fromGroup: 0, 30 | toGroup: 0, 31 | headerBlob: 'headerBlob', 32 | txsBlob: 'txsBlob', 33 | targetBlob: target 34 | } 35 | } 36 | } 37 | 38 | describe('test stratum server', function(){ 39 | var server; 40 | this.beforeEach(function(){ 41 | server = new stratum.Server(config); 42 | }); 43 | 44 | this.afterEach(function(){ 45 | server.close(); 46 | }); 47 | 48 | var submitMessage = { 49 | id: null, 50 | method: 'mining.submit', 51 | params: 'block' 52 | }; 53 | 54 | var subscribeMessage = { 55 | id: null, 56 | method: 'mining.subscribe', 57 | params: [] 58 | }; 59 | 60 | var authorizeMessage = { 61 | id: null, 62 | method: 'mining.authorize', 63 | params: [] 64 | }; 65 | 66 | function assertBanned(address){ 67 | var [banned, _] = server.isBanned(address); 68 | assert(banned); 69 | } 70 | 71 | function assertNotBanned(address){ 72 | var bannedIps = Object.keys(server.bannedIPs); 73 | assert(bannedIps.find(ip => ip === address) === undefined); 74 | } 75 | 76 | function setupClient(client, callback){ 77 | client.setEncoding('utf8'); 78 | client.setNoDelay(true); 79 | client.connect(config.pool.port); 80 | 81 | var buffer = ''; 82 | client.on('data', function(data){ 83 | buffer += data; 84 | if (buffer.indexOf('\n') !== -1){ 85 | var messages = buffer.split('\n'); 86 | var remain = buffer.slice(-1) === '\n' ? '' : messages.pop(); 87 | messages.forEach(message => { 88 | if (message === '') return; 89 | callback(JSON.parse(message)); 90 | }); 91 | buffer = remain; 92 | } 93 | }); 94 | } 95 | 96 | it('should work as expected', function(done){ 97 | var client = net.Socket(); 98 | var jobs = [new DummyJob()]; 99 | var responses = []; 100 | 101 | setupClient(client, function(message){ 102 | responses.push(message); 103 | if (responses.length === 6){ 104 | // mining.set_difficulty 105 | expect(responses[0].params).to.deep.equal([config.pool.diff]); 106 | // mining.notify 107 | expect(responses[1].params).to.deep.equal(jobs.map(job => job.getJobParams())); 108 | // submit result 109 | expect(responses[2].result).equal(true); 110 | // authorize result 111 | expect(responses[3].result).equal(true); 112 | // mining.set_extranonce 113 | expect(responses[4].method).equal('mining.set_extranonce'); 114 | expect(responses[4].params.length).equal(1); 115 | expect(responses[4].params[0].length).equal(4); 116 | // subscribe result 117 | expect(responses[5]).to.deep.equal({id: null, result: 'result'}); 118 | 119 | client.end(); 120 | done(); 121 | } 122 | }); 123 | 124 | client.on('connect', function(){ 125 | var clientIds = Object.keys(server.stratumClients); 126 | var stratumClient = server.stratumClients[clientIds[0]]; 127 | stratumClient.on('submit', function(params, callback){ 128 | expect(params).equal(submitMessage.params); 129 | callback(null, true); 130 | }); 131 | 132 | assert(clientIds.length === 1); 133 | server.broadcastMiningJobs(jobs); 134 | client.write(JSON.stringify(submitMessage) + '\n'); 135 | client.write(JSON.stringify(authorizeMessage) + '\n'); 136 | client.write(JSON.stringify(subscribeMessage) + '\n'); 137 | }); 138 | }) 139 | 140 | it('should disconnect if client is banned', function(done){ 141 | var client = net.Socket(); 142 | client.connect(config.pool.port); 143 | client.on('connect', function(){ 144 | var remoteAddress = Object.keys(server.connectionNumFromIP)[0]; 145 | server.addBannedIP(remoteAddress); 146 | assertBanned(remoteAddress); 147 | 148 | client.on('data', _ => {}); 149 | client.end(); 150 | client.on('close', function(){ 151 | client.removeAllListeners('connect'); 152 | client.removeAllListeners('close'); 153 | 154 | server.on('client.disconnected', function(_client){ 155 | expect(remoteAddress).equal(_client.remoteAddress); 156 | expect(Object.keys(server.stratumClients).length).equal(0); 157 | expect(Object.keys(server.connectionNumFromIP).length).equal(0); 158 | done(); 159 | }); 160 | client.connect(config.pool.port); 161 | }); 162 | }); 163 | }) 164 | 165 | it('should ban client when received too much invalid shares', function(done){ 166 | var client = net.Socket(); 167 | client.connect(config.pool.port); 168 | client.on('connect', function(){ 169 | var clientId = Object.keys(server.stratumClients)[0]; 170 | var stratumClient = server.stratumClients[clientId]; 171 | assertNotBanned(client.remoteAddress); 172 | 173 | for (var i = 0; i < config.banning.checkThreshold + 1; i++){ 174 | client.write(JSON.stringify(submitMessage) + '\n'); 175 | } 176 | 177 | stratumClient.on('submit', function(params, callback){ 178 | expect(params).equal(submitMessage.params); 179 | callback(null, false); 180 | }); 181 | }); 182 | 183 | server.on('client.disconnected', function(stratumClient){ 184 | assertBanned(stratumClient.remoteAddress); 185 | assert(Object.keys(server.stratumClients).length === 0); 186 | assert(Object.keys(server.connectionNumFromIP).length === 0); 187 | done(); 188 | }) 189 | }) 190 | 191 | it('should reset shares', function(done){ 192 | var client = net.Socket(); 193 | client.connect(config.pool.port); 194 | 195 | var shares = [ 196 | {id: null, method: 'mining.submit', params: {valid: true}}, 197 | {id: null, method: 'mining.submit', params: {valid: true}}, 198 | {id: null, method: 'mining.submit', params: {valid: true}}, 199 | {id: null, method: 'mining.submit', params: {valid: false}}, 200 | ]; 201 | client.on('connect', function(){ 202 | var clientId = Object.keys(server.stratumClients)[0]; 203 | var stratumClient = server.stratumClients[clientId]; 204 | assertNotBanned(client.remoteAddress); 205 | 206 | for (var idx in shares){ 207 | client.write(JSON.stringify(shares[idx]) + '\n'); 208 | } 209 | 210 | var invalids = 0, valids = 0; 211 | stratumClient.on('submit', function(params, callback){ 212 | callback(null, params.valid); 213 | if (params.valid) valids++; 214 | else invalids++; 215 | if ((valids + invalids) === shares.length){ 216 | expect(stratumClient.shares.valid).equal(0); 217 | expect(stratumClient.shares.invalid).equal(0); 218 | done(); 219 | } else { 220 | expect(stratumClient.shares.valid).equal(valids); 221 | expect(stratumClient.shares.invalid).equal(invalids); 222 | } 223 | }); 224 | }); 225 | }) 226 | 227 | it('should unban client when ban time expired', function(done){ 228 | var address = '11.11.11.11'; 229 | server.addBannedIP(address); 230 | assertBanned(address); 231 | 232 | setTimeout(function() { 233 | assertNotBanned(address); 234 | done(); 235 | }, (config.banning.purgeInterval + 1) * 1000); 236 | }).timeout((config.banning.purgeInterval + 2) * 1000); 237 | 238 | it('should not emit `forgaveBannedIP` if client has not been banned before', function(done){ 239 | var client = net.Socket(); 240 | client.connect(config.pool.port); 241 | 242 | client.on('connect', function(){ 243 | var clientId = Object.keys(server.stratumClients)[0]; 244 | var stratumClient = server.stratumClients[clientId]; 245 | assertNotBanned(stratumClient.remoteAddress); 246 | 247 | setTimeout(_ => done(), 1000); 248 | stratumClient.on('forgaveBannedIP', function(){ 249 | assert.fail('client has not been banned before'); 250 | }); 251 | }); 252 | }) 253 | 254 | it('should increase/decrease connection num when client connected/disconnected', function(){ 255 | var ipAddress = '11.11.11.11'; 256 | for (var idx = 0; idx < config.maxConnectionsFromSameIP; idx++){ 257 | var okey = server.addConnectionFromIP(ipAddress); 258 | expect(okey).equal(true); 259 | } 260 | var okey = server.addConnectionFromIP(ipAddress); 261 | expect(okey).equal(false); 262 | 263 | server.removeConnectionFromIP(ipAddress); 264 | okey = server.addConnectionFromIP(ipAddress); 265 | expect(okey).equal(true); 266 | }) 267 | 268 | it('should allow more connections to whitelisted ips', function(){ 269 | var ip = '::ffff:11.11.11.11'; 270 | config.whitelistIps = [ip]; 271 | for (var i = 0; i < config.maxConnectionsFromSameIP; i++){ 272 | expect(server.addConnectionFromIP(ip)).equal(true); 273 | } 274 | expect(server.addConnectionFromIP(ip)).equal(true); 275 | }) 276 | 277 | it('should limit the connections from same IP', function(done){ 278 | var clients = []; 279 | function createClient(num, callback){ 280 | var client = net.Socket(); 281 | client.connect(config.pool.port); 282 | clients.push(client); 283 | client.on('connect', _ => { 284 | if (num === 1) callback(); 285 | else createClient(num - 1, callback); 286 | }); 287 | } 288 | 289 | createClient(config.maxConnectionsFromSameIP, function(){ 290 | var ipAddress = Object.keys(server.connectionNumFromIP)[0]; 291 | var connectionNum = server.connectionNumFromIP[ipAddress]; 292 | if (connectionNum && connectionNum == config.maxConnectionsFromSameIP){ 293 | var client = net.Socket(); 294 | client.connect(config.pool.port); 295 | client.on('close', function(){ 296 | clients.forEach(c => c.destroy()); 297 | done(); 298 | }); 299 | } 300 | }); 301 | }) 302 | }) 303 | -------------------------------------------------------------------------------- /test/shareProcessorTest.js: -------------------------------------------------------------------------------- 1 | const RedisMock = require('ioredis-mock'); 2 | const { expect, assert } = require('chai'); 3 | const nock = require('nock'); 4 | const ShareProcessor = require('../lib/shareProcessor'); 5 | const util = require('../lib/util'); 6 | const test = require('./test'); 7 | 8 | describe('test share processor', function(){ 9 | var redisClient; 10 | this.beforeEach(function(){ 11 | redisClient = new RedisMock(); 12 | }) 13 | 14 | this.afterEach(function(){ 15 | redisClient.disconnect(); 16 | }) 17 | 18 | it('should allocate reward according shares', function(){ 19 | var shareProcessor = new ShareProcessor(test.config, test.logger); 20 | var workerRewards = {}; 21 | var shares = {miner0: 8, miner1: 4, miner2: 2, miner3: 1, miner4: 1}; 22 | var totalReward = util.fromALPH(16); 23 | shareProcessor.allocateReward(totalReward, workerRewards, shares); 24 | expect(workerRewards).to.deep.equal({ 25 | miner0: 8, miner1: 4, miner2: 2, miner3: 1, miner4: 1 26 | }) 27 | }) 28 | 29 | it('should process shares', function(done){ 30 | var shareProcessor = new ShareProcessor(test.config, test.logger); 31 | shareProcessor.redisClient = redisClient; 32 | 33 | var shareData = { 34 | job: {fromGroup: 0, toGroup: 1}, 35 | worker: 'proxy.1AqVGKeHWoLJiVU7heL8EvwQN2hk5bMtvP3PsH57qWayr', 36 | workerAddress: '1AqVGKeHWoLJiVU7heL8EvwQN2hk5bMtvP3PsH57qWayr', 37 | difficulty: 1.2, 38 | foundBlock: false 39 | }; 40 | 41 | shareProcessor.handleShare(shareData); 42 | var currentRoundKey = shareProcessor.currentRoundKey( 43 | shareData.job.fromGroup, 44 | shareData.job.toGroup 45 | ); 46 | 47 | redisClient.hget(currentRoundKey, shareData.workerAddress, function(error, res){ 48 | if (error) assert.fail('Test failed: ' + error); 49 | expect(parseFloat(res)).equal(shareData.difficulty); 50 | 51 | shareData.foundBlock = true; 52 | var blockHashHex = '0011'; 53 | shareData.blockHash = blockHashHex; 54 | shareProcessor.handleShare(shareData); 55 | 56 | var roundKey = shareProcessor.roundKey( 57 | shareData.job.fromGroup, 58 | shareData.job.toGroup, 59 | blockHashHex 60 | ); 61 | 62 | redisClient 63 | .multi() 64 | .hget(roundKey, shareData.workerAddress) 65 | .smembers('pendingBlocks') 66 | .hget('foundBlocks', blockHashHex) 67 | .exec(function(error, result){ 68 | if (error) assert.fail('Test failed: ' + error); 69 | var difficulty = result[0][1]; 70 | var pendingBlocks = result[1][1]; 71 | var blockMiner = result[2][1]; 72 | 73 | expect(parseFloat(difficulty)).equal(shareData.difficulty * 2); 74 | expect(pendingBlocks.length).equal(1); 75 | expect(pendingBlocks[0].startsWith(blockHashHex)); 76 | expect(blockMiner).equal(shareData.workerAddress); 77 | done(); 78 | }); 79 | }); 80 | }) 81 | 82 | it('should update miner balances and remove shares', function(done){ 83 | var shareProcessor = new ShareProcessor(test.config, test.logger); 84 | shareProcessor.redisClient = redisClient; 85 | 86 | var shares = {'miner0': '4', 'miner1': '2', 'miner2': '2'}; 87 | var block = {pendingBlockValue: '0011' + ':' + '0', hash: '0011', fromGroup: 0, toGroup: 1, height: 1, rewardAmount: '40000000000000000000'}; 88 | 89 | var checkState = function(){ 90 | redisClient 91 | .multi() 92 | .hgetall('balances') 93 | .smembers('pendingBlocks') 94 | .exec(function(error, result){ 95 | if (error) assert.fail('Test failed: ' + error); 96 | var balances = result[0][1]; 97 | var pendingBlocks = result[1][1]; 98 | 99 | expect(balances.miner0).equal('20'); 100 | expect(balances.miner1).equal('10'); 101 | expect(balances.miner2).equal('10'); 102 | expect(pendingBlocks.length).equal(0); 103 | done(); 104 | }); 105 | } 106 | 107 | var roundKey = shareProcessor.roundKey( 108 | block.fromGroup, 109 | block.toGroup, 110 | block.hash 111 | ); 112 | 113 | redisClient 114 | .multi() 115 | .sadd('pendingBlocks', block.pendingBlockValue) 116 | .hset(roundKey, 'miner0', shares.miner0) 117 | .hset(roundKey, 'miner1', shares.miner1) 118 | .hset(roundKey, 'miner2', shares.miner2) 119 | .exec(function(error, _){ 120 | if (error) assert.fail('Test failed: ' + error); 121 | shareProcessor.allocateRewards([block], _ => checkState()); 122 | }); 123 | }) 124 | 125 | it('should reward uncle miners with correct reward amount', function(done){ 126 | var config = { ...test.config, confirmationTime: 0 } 127 | var shareProcessor = new ShareProcessor(config, test.logger); 128 | shareProcessor.redisClient = redisClient; 129 | 130 | var currentMs = Date.now(); 131 | var rewardAmount = '4000000000000000000'; 132 | var ghostUncleRewardAmount = '2000000000000000000'; 133 | var ghostUncleCoinbaseTx = [{unsigned:{fixedOutputs:[{attoAlphAmount: rewardAmount}]}}]; 134 | var ghostUncleBlock = {hash: 'block1', height: 1, chainFrom: 0, chainTo: 0, transactions: ghostUncleCoinbaseTx, inMainChain: false, submittedMs: currentMs, ghostUncles: []} 135 | 136 | var mainChainCoinbaseTx = [{unsigned:{fixedOutputs:[{attoAlphAmount: rewardAmount},{attoAlphAmount: ghostUncleRewardAmount}]}}]; 137 | var mainChainBlock = {hash: 'block2', height: 2, chainFrom: 0, chainTo: 0, transactions: mainChainCoinbaseTx, inMainChain: true, submittedMs: currentMs, ghostUncles: [{blockHash:ghostUncleBlock.hash}]} 138 | var blocks = [ghostUncleBlock, mainChainBlock] 139 | 140 | function prepare(blocks, callback){ 141 | var restServer = nock('http://127.0.0.1:12973'); 142 | var redisTx = redisClient.multi(); 143 | restServer.persist().get('/blockflow/main-chain-block-by-ghost-uncle/' + ghostUncleBlock.hash).reply(200, mainChainBlock) 144 | for (var block of blocks){ 145 | restServer.persist().get('/blockflow/blocks/' + block.hash).reply(200, block); 146 | var isInMainChainPath = '/blockflow/is-block-in-main-chain?blockHash=' + block.hash; 147 | restServer.persist().get(isInMainChainPath).reply(200, block.inMainChain ? true : false); 148 | 149 | var blockWithTs = block.hash + ':' + block.submittedMs; 150 | redisTx.sadd('pendingBlocks', blockWithTs); 151 | } 152 | 153 | redisTx.exec(function(error, _){ 154 | if (error) assert.fail('Test failed: ' + error); 155 | callback(restServer); 156 | }); 157 | } 158 | 159 | prepare(blocks, _ => { 160 | shareProcessor.getPendingBlocks( 161 | blocks.map(block => block.hash + ':' + block.submittedMs), 162 | function(pendingBlocks){ 163 | expect(pendingBlocks).to.deep.equal([ 164 | { 165 | fromGroup: 0, 166 | hash: "block1", 167 | height: 1, 168 | pendingBlockValue: blocks[0].hash + ':' + blocks[0].submittedMs, 169 | rewardAmount: "2000000000000000000", 170 | toGroup: 0, 171 | }, 172 | { 173 | fromGroup: 0, 174 | hash: "block2", 175 | height: 2, 176 | pendingBlockValue: blocks[1].hash + ':' + blocks[1].submittedMs, 177 | rewardAmount: "4000000000000000000", 178 | toGroup: 0 179 | } 180 | ]); 181 | nock.cleanAll(); 182 | done(); 183 | } 184 | ); 185 | }); 186 | }) 187 | 188 | it('should remove orphan block and shares', function(done){ 189 | var shareProcessor = new ShareProcessor(test.config, test.logger); 190 | shareProcessor.redisClient = redisClient; 191 | 192 | var rewardAmount = '4000000000000000000'; 193 | var transactions = [{unsigned:{fixedOutputs:[{attoAlphAmount: rewardAmount}]}}]; 194 | var currentMs = Date.now(); 195 | var confirmationTime = test.config.confirmationTime * 1000; 196 | var blocks = [ 197 | {hash: 'block1', height: 1, chainFrom: 0, chainTo: 0, transactions: transactions, inMainChain: true, submittedMs: currentMs}, 198 | {hash: 'block2', height: 2, chainFrom: 0, chainTo: 0, transactions: transactions, inMainChain: false, submittedMs: currentMs - confirmationTime}, 199 | {hash: 'block3', height: 3, chainFrom: 0, chainTo: 0, transactions: transactions, inMainChain: true, submittedMs: currentMs - confirmationTime}, 200 | {hash: 'block4', height: 4, chainFrom: 0, chainTo: 0, transactions: transactions, inMainChain: true, submittedMs: currentMs - confirmationTime}, 201 | ]; 202 | var orphanBlock = blocks[1]; 203 | 204 | var shares = {}; 205 | for (var block of blocks){ 206 | shares[block.hash] = {address: 'miner', difficulty: 1} 207 | } 208 | 209 | function prepare(blocks, shares, callback){ 210 | var restServer = nock('http://127.0.0.1:12973'); 211 | var redisTx = redisClient.multi(); 212 | restServer.persist() 213 | .get('/blockflow/main-chain-block-by-ghost-uncle/' + orphanBlock.hash) 214 | .reply(404, { detail: `The mainchain block that references the ghost uncle block ${orphanBlock.hash} not found` }); 215 | for (var block of blocks){ 216 | restServer.persist().get('/blockflow/blocks/' + block.hash).reply(200, block); 217 | var path = '/blockflow/is-block-in-main-chain?blockHash=' + block.hash; 218 | restServer.persist().get(path).reply(200, block.inMainChain ? true : false); 219 | 220 | var blockWithTs = block.hash + ':' + block.submittedMs; 221 | redisTx.sadd('pendingBlocks', blockWithTs); 222 | var sharesOfBlock = shares[block.hash]; 223 | var roundKey = shareProcessor.roundKey(block.chainFrom, block.chainTo, block.hash); 224 | for (var address in sharesOfBlock){ 225 | redisTx.hincrbyfloat(roundKey, address, sharesOfBlock[address]); 226 | } 227 | } 228 | 229 | redisTx.exec(function(error, _){ 230 | if (error) assert.fail('Test failed: ' + error); 231 | callback(restServer); 232 | }); 233 | } 234 | 235 | var blockData = function(block){ 236 | return {hash: block.hash, fromGroup: block.chainFrom, toGroup: block.chainTo, height: block.height, rewardAmount: rewardAmount}; 237 | } 238 | 239 | var checkState = function(){ 240 | var orphanBlockWithTs = orphanBlock.hash + ':' + orphanBlock.submittedMs; 241 | var roundKey = shareProcessor.roundKey(orphanBlock.chainFrom, orphanBlock.chainTo, orphanBlock.hash); 242 | 243 | redisClient.multi() 244 | .smembers('pendingBlocks') 245 | .hgetall(roundKey) 246 | .exec(function(error, results){ 247 | if (error) assert.fail('Test failed: ' + error); 248 | 249 | var pendingBlocks = results[0][1]; 250 | var orphanBlockShares = results[1][1]; 251 | expect(pendingBlocks.indexOf(orphanBlockWithTs)).equal(-1); 252 | expect(orphanBlockShares).to.deep.equal({}); 253 | done(); 254 | }); 255 | } 256 | 257 | var runTest = function(_restServer){ 258 | var expected = [ 259 | { 260 | pendingBlockValue: blocks[2].hash + ':' + blocks[2].submittedMs, 261 | ...blockData(blocks[2]) 262 | }, 263 | { 264 | pendingBlockValue: blocks[3].hash + ':' + blocks[3].submittedMs, 265 | ...blockData(blocks[3]) 266 | } 267 | ]; 268 | 269 | shareProcessor.getPendingBlocks( 270 | blocks.map(block => block.hash + ':' + block.submittedMs), 271 | function(pendingBlocks){ 272 | expect(pendingBlocks).to.deep.equal(expected); 273 | nock.cleanAll(); 274 | checkState(); 275 | } 276 | ); 277 | } 278 | 279 | prepare(blocks, shares, _restServer => runTest(_restServer)); 280 | }) 281 | }) 282 | -------------------------------------------------------------------------------- /lib/pool.js: -------------------------------------------------------------------------------- 1 | const events = require('events'); 2 | const Redis = require('ioredis'); 3 | const varDiff = require('./varDiff.js'); 4 | const daemon = require('./daemon.js'); 5 | const stratum = require('./stratum.js'); 6 | const { JobManager } = require('./jobManager.js'); 7 | const constants = require('./constants.js'); 8 | const ShareProcessor = require('./shareProcessor.js'); 9 | const PaymentProcessor = require('./paymentProcessor.js'); 10 | const Stats = require('./stats.js'); 11 | 12 | var pool = module.exports = function pool(config, logger){ 13 | var topicNameClient = "workers-events"; 14 | var topicNameShares = "shares-events"; 15 | 16 | this.redisClient = new Redis(config.redis.port, config.redis.host, {db: config.redis.db}); 17 | this.config = config; 18 | var _this = this; 19 | var jobExpiryPeriod = config.jobExpiryPeriod * 1000; // ms 20 | 21 | this.start = function(){ 22 | SetupVarDiff(); 23 | SetupDaemonInterface(function(){ 24 | SetupJobManager(); 25 | OnBlockchainSynced(function(){ 26 | StartShareProcessor(); 27 | StartPaymentProcessor(); 28 | StartStatsReport(); 29 | StartStratumServer(); 30 | }); 31 | }); 32 | }; 33 | 34 | function OnBlockchainSynced(syncedCallback){ 35 | 36 | var checkSynced = function(displayNotSynced){ 37 | _this.daemon.isSynced(function(synced){ 38 | if (synced){ 39 | syncedCallback(); 40 | } 41 | else{ 42 | if (displayNotSynced) displayNotSynced(); 43 | setTimeout(checkSynced, 5000); 44 | } 45 | }); 46 | }; 47 | checkSynced(function(){ 48 | //Only let the first fork show synced status or the log wil look flooded with it 49 | if (!process.env.forkId || process.env.forkId === '0') 50 | logger.info('Daemon is still syncing with network (download blockchain) - server will be started once synced'); 51 | }); 52 | } 53 | 54 | function SetupVarDiff(){ 55 | _this.varDiff = new varDiff(config.pool.varDiff); 56 | _this.varDiff.on('newDifficulty', function(client, newDiff) { 57 | 58 | /* We request to set the newDiff @ the next difficulty retarget 59 | (which should happen when a new job comes in - AKA BLOCK) */ 60 | client.enqueueNextDifficulty(newDiff); 61 | }); 62 | } 63 | 64 | function SetupJobManager(){ 65 | 66 | _this.jobManager = new JobManager(jobExpiryPeriod); 67 | 68 | _this.jobManager.on('newJobs', function(templates){ 69 | //Check if stratumServer has been initialized yet 70 | if (_this.stratumServer) { 71 | _this.stratumServer.broadcastMiningJobs(templates); 72 | } 73 | }).on('share', function(shareData){ 74 | if (shareData.error){ 75 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 76 | state: 'invalidShare', 77 | ip: shareData.ip, 78 | port: shareData.port, 79 | error: shareData.error, 80 | errorCode: shareData.errorCode 81 | })); 82 | // we only emit valid shares 83 | logger.error('Invalid share from ' + shareData.worker + 84 | ', error: ' + shareData.error + 85 | ', jobId: ' + shareData.job + 86 | ', ip: ' + shareData.ip 87 | ); 88 | return; 89 | } 90 | 91 | var job = shareData.job; 92 | var chainIndex = chainIndexStr(job.fromGroup, job.toGroup); 93 | logger.info('Received share from ' + shareData.worker + 94 | ', jobId: ' + job.jobId + 95 | ', chainIndex: ' + chainIndex + 96 | ', pool difficulty: ' + shareData.difficulty + 97 | ', share difficulty: ' + shareData.shareDiff + 98 | ', ip: ' + shareData.ip 99 | ); 100 | _this.redisClient.publish(topicNameShares,JSON.stringify({ 101 | worker: shareData.worker, 102 | workerAddr: shareData.workerAddress, 103 | jobId: job.jobId, 104 | fromGroup: job.fromGroup, 105 | toGroup: job.toGroup, 106 | pool_difficulty: shareData.difficulty, 107 | share_difficulty: shareData.shareDiff, 108 | block_hash: shareData.blockHash, 109 | found_block: shareData.foundBlock, 110 | ip: shareData.ip 111 | })) 112 | _this.shareProcessor.handleShare(shareData); 113 | if (shareData.foundBlock){ 114 | logger.info('Found block for chainIndex: ' + chainIndex + 115 | ', hash: ' + shareData.blockHash + 116 | ', miner: ' + shareData.worker 117 | ); 118 | 119 | var block = Buffer.concat([shareData.nonce, job.headerBlob, job.txsBlob]); 120 | _this.daemon.submit(block, function(error){ 121 | if (error) { 122 | logger.error('Submit block error: ' + error); 123 | } 124 | }); 125 | } 126 | }) 127 | } 128 | 129 | function chainIndexStr(fromGroup, toGroup){ 130 | return fromGroup + " -> " + toGroup; 131 | } 132 | 133 | function SetupDaemonInterface(finishedCallback){ 134 | 135 | if (!config.daemon) { 136 | logger.error('No daemons have been configured - pool cannot start'); 137 | return; 138 | } 139 | 140 | // TODO: support backup daemons 141 | _this.daemon = new daemon.interface(config.daemon, logger); 142 | 143 | _this.daemon.once('online', function(){ 144 | finishedCallback(); 145 | _this.daemon.connectToMiningServer(messageHandler); 146 | 147 | }).on('cliqueNotReady', function(){ 148 | logger.info('Clique is not ready.'); 149 | 150 | }).on('error', function(message){ 151 | logger.error(message); 152 | 153 | }); 154 | 155 | _this.daemon.init(); 156 | } 157 | 158 | function messageHandler(message){ 159 | switch(message.type) { 160 | case constants.JobsMessageType: 161 | _this.jobManager.processJobs(message.payload); 162 | break; 163 | case constants.SubmitResultMessageType: 164 | var result = message.payload; 165 | handleSubmitResult(result); 166 | break; 167 | default: 168 | logger.error('Invalid message type: ' + message.type); 169 | } 170 | } 171 | 172 | function handleSubmitResult(result){ 173 | var chainIndex = chainIndexStr(result.fromGroup, result.toGroup); 174 | var blockHashHex = result.blockHash.toString('hex'); 175 | if (result.succeed){ 176 | logger.info(`Submit block ${blockHashHex} succeed for chainIndex ${chainIndex}`); 177 | } 178 | else { 179 | logger.error(`Submit block ${blockHashHex} failed for chainIndex ${chainIndex}`); 180 | } 181 | } 182 | 183 | function StartShareProcessor(){ 184 | _this.shareProcessor = new ShareProcessor(config, logger); 185 | _this.shareProcessor.start(); 186 | } 187 | 188 | function StartPaymentProcessor(){ 189 | _this.paymentProcessor = new PaymentProcessor(config, logger); 190 | _this.paymentProcessor.start(); 191 | } 192 | 193 | function StartStatsReport(){ 194 | _this.stats = new Stats(config, logger); 195 | _this.stats.reportStatsRegularly(); 196 | } 197 | 198 | function StartStratumServer(){ 199 | _this.stratumServer = new stratum.Server(config); 200 | 201 | _this.stratumServer.on('started', function(){ 202 | _this.stratumServer.broadcastMiningJobs(_this.jobManager.currentJobs); 203 | }).on('tooManyConnectionsFromSameIP', function(ipAddress){ 204 | logger.warn('Too many connections from IP: ' + ipAddress); 205 | 206 | }).on('client.connected', function(client){ 207 | logger.info('New miner connected: ' + client.getLabel()); 208 | 209 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 210 | state: 'connected', 211 | ip: client.remoteAddress, 212 | port: client.remotePort 213 | })) 214 | _this.varDiff.manageClient(client); 215 | 216 | client.on('submit', function(params, resultCallback){ 217 | var result =_this.jobManager.processShare( 218 | params, 219 | client.previousDifficulty, 220 | client.difficulty, 221 | client.remoteAddress, 222 | client.socket.localPort 223 | ); 224 | resultCallback(result.error, result.result ? true : null); 225 | 226 | }).on('malformedMessage', function (message) { 227 | logger.warn('Malformed message from ' + client.getLabel() + ': ' + message); 228 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 229 | state: 'malformedMessage', 230 | ip: client.remoteAddress, 231 | port: client.remotePort 232 | })) 233 | 234 | }).on('socketError', function(err) { 235 | logger.warn('Socket error from ' + client.getLabel() + ': ' + JSON.stringify(err)); 236 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 237 | state: 'socketError', 238 | ip: client.remoteAddress, 239 | port: client.remotePort 240 | })) 241 | 242 | }).on('socketTimeout', function(reason){ 243 | logger.warn('Connected timed out for ' + client.getLabel() + ': ' + reason) 244 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 245 | state: 'socketTimeout', 246 | ip: client.remoteAddress, 247 | port: client.remotePort 248 | })) 249 | 250 | }).on('socketDisconnect', function() { 251 | logger.warn('Socket disconnected from ' + client.getLabel()); 252 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 253 | state: 'socketDisconnected', 254 | ip: client.remoteAddress, 255 | port: client.remotePort 256 | })) 257 | 258 | }).on('difficultyChanged', function(difficulty){ 259 | logger.info('Set new difficulty for ' + client.getLabel() + ' to ' + difficulty); 260 | 261 | }).on('kickedBannedIP', function(remainingBanTime){ 262 | logger.info('Rejected incoming connection from ' + client.remoteAddress + ' banned for ' + remainingBanTime + ' more seconds'); 263 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 264 | state: 'kickedBannedIP', 265 | ip: client.remoteAddress, 266 | port: client.remotePort 267 | })) 268 | 269 | }).on('forgaveBannedIP', function(){ 270 | logger.info('Forgave banned IP ' + client.remoteAddress); 271 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 272 | state: 'unbanned', 273 | ip: client.remoteAddress, 274 | port: client.remotePort 275 | })) 276 | 277 | }).on('unknownStratumMethod', function(fullMessage) { 278 | logger.error('Unknown stratum method from ' + client.getLabel() + ': ' + fullMessage.method); 279 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 280 | state: 'unknownStratumMethod', 281 | ip: client.remoteAddress, 282 | port: client.remotePort 283 | })) 284 | 285 | }).on('socketFlooded', function() { 286 | logger.warn('Detected socket flooding from ' + client.getLabel()); 287 | const data = { 288 | state: 'socketFlooded', 289 | ip: client.remoteAddress, 290 | port: client.remotePort 291 | } 292 | _this.redisClient.publish(topicNameClient,JSON.stringify(data)) 293 | logger.info("send to channel "+topicNameClient+": "+JSON.stringify(data)) 294 | 295 | }).on('triggerBan', function(reason){ 296 | logger.info('Banned triggered for ' + client.getLabel() + ': ' + reason); 297 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 298 | state: 'triggerBan', 299 | ip: client.remoteAddress, 300 | port: client.remotePort 301 | })) 302 | }); 303 | }).on('client.disconnected', function(client){ 304 | logger.info('Client ' + client.getLabel() + ' disconnected'); 305 | _this.redisClient.publish(topicNameClient,JSON.stringify({ 306 | state: 'disconnected', 307 | ip: client.remoteAddress, 308 | port: client.remotePort 309 | })) 310 | }); 311 | } 312 | }; 313 | pool.prototype.__proto__ = events.EventEmitter.prototype; 314 | -------------------------------------------------------------------------------- /lib/stratum.js: -------------------------------------------------------------------------------- 1 | const net = require('net'); 2 | const events = require('events'); 3 | const util = require('./util.js'); 4 | 5 | var SubscriptionCounter = function(){ 6 | var count = 0; 7 | var padding = 'deadbeefcafebabe'; 8 | return { 9 | next: function(){ 10 | count++; 11 | if (Number.MAX_SAFE_INTEGER === count) count = 0; 12 | return padding + util.packInt64LE(count).toString('hex'); 13 | } 14 | }; 15 | }; 16 | 17 | 18 | /** 19 | * Defining each client that connects to the stratum server. 20 | * Emits: 21 | * - subscription(obj, cback(error, extraNonce1, extraNonce2Size)) 22 | * - submit(data(name, jobID, extraNonce2, ntime, nonce)) 23 | **/ 24 | var StratumClient = function(params){ 25 | var pendingDifficulty = null; 26 | //private members 27 | this.socket = params.socket; 28 | 29 | this.remoteAddress = params.socket.remoteAddress; 30 | this.remotePort = params.socket.remotePort; 31 | 32 | var banning = params.banning; 33 | 34 | var _this = this; 35 | 36 | this.lastActivity = Date.now(); 37 | 38 | this.shares = {valid: 0, invalid: 0}; 39 | 40 | var considerBan = (!banning || !banning.enabled) ? function(){ return false } : function(shareValid){ 41 | if (shareValid === true) _this.shares.valid++; 42 | else _this.shares.invalid++; 43 | var totalShares = _this.shares.valid + _this.shares.invalid; 44 | if (totalShares >= banning.checkThreshold){ 45 | var percentBad = (_this.shares.invalid / totalShares) * 100; 46 | if (percentBad < banning.invalidPercent) //reset shares 47 | _this.shares = {valid: 0, invalid: 0}; 48 | else { 49 | _this.emit('triggerBan', _this.shares.invalid + ' out of the last ' + totalShares + ' shares were invalid'); 50 | _this.socket.destroy(); 51 | return true; 52 | } 53 | } 54 | return false; 55 | }; 56 | 57 | this.init = function init(){ 58 | setupSocket(); 59 | }; 60 | 61 | function handleMessage(message){ 62 | switch(message.method){ 63 | case 'mining.submit': 64 | _this.lastActivity = Date.now(); 65 | handleSubmit(message); 66 | break; 67 | case 'alph_submitHashrate': 68 | break; 69 | case 'mining.subscribe': 70 | sendJson({ 71 | id: message.id, 72 | result: "result" 73 | }); 74 | break; 75 | case 'mining.authorize': 76 | sendJson({ 77 | id: message.id, 78 | result: true 79 | }); 80 | sendJson({ 81 | method: "mining.set_extranonce", 82 | params: [(Math.floor(Math.random() * 65535)).toString(16).padStart(4, '0')], 83 | id: null 84 | }); 85 | break; 86 | default: 87 | _this.emit('unknownStratumMethod', message); 88 | break; 89 | } 90 | } 91 | 92 | function handleSubmit(message){ 93 | _this.emit('submit', 94 | message.params, 95 | function(error, result){ 96 | if (!error && result){ 97 | _this.emit('submitAccepted'); 98 | } 99 | if (!considerBan(result)){ 100 | sendJson({ 101 | id: message.id, 102 | result: result, 103 | }); 104 | } 105 | } 106 | ); 107 | 108 | } 109 | 110 | function sendJson(){ 111 | var response = ''; 112 | for (var i = 0; i < arguments.length; i++){ 113 | response += JSON.stringify(arguments[i]) + '\n'; 114 | } 115 | params.socket.write(response); 116 | } 117 | 118 | function setupSocket(){ 119 | var socket = params.socket; 120 | var dataBuffer = ''; 121 | socket.setEncoding('utf8'); 122 | 123 | _this.emit('checkBan'); 124 | _this.sendDifficulty(params.initDiff); 125 | socket.on('data', function(d){ 126 | dataBuffer += d; 127 | if (Buffer.byteLength(dataBuffer, 'utf8') > 10240){ //10KB 128 | dataBuffer = ''; 129 | _this.emit('socketFlooded'); 130 | socket.destroy(); 131 | return; 132 | } 133 | if (dataBuffer.indexOf('\n') !== -1){ 134 | var messages = dataBuffer.split('\n'); 135 | var incomplete = dataBuffer.slice(-1) === '\n' ? '' : messages.pop(); 136 | messages.forEach(function(message){ 137 | if (message === '') return; 138 | var messageJson; 139 | try { 140 | messageJson = JSON.parse(message); 141 | } catch(e) { 142 | _this.emit('malformedMessage', message); 143 | socket.destroy(); 144 | return; 145 | } 146 | 147 | if (messageJson) { 148 | handleMessage(messageJson); 149 | } 150 | }); 151 | dataBuffer = incomplete; 152 | } 153 | }); 154 | socket.on('close', function() { 155 | _this.emit('socketDisconnect'); 156 | }); 157 | socket.on('error', function(err){ 158 | if (err.code !== 'ECONNRESET') 159 | _this.emit('socketError', err); 160 | }); 161 | } 162 | 163 | this.getLabel = function(){ 164 | return _this.remoteAddress + ':' + _this.remotePort; 165 | }; 166 | 167 | this.enqueueNextDifficulty = function(requestedNewDifficulty) { 168 | pendingDifficulty = requestedNewDifficulty; 169 | }; 170 | 171 | //public members 172 | 173 | /** 174 | * IF the given difficulty is valid and new it'll send it to the client. 175 | * returns boolean 176 | **/ 177 | this.sendDifficulty = function(difficulty){ 178 | if (difficulty === _this.difficulty) 179 | return false; 180 | 181 | _this.previousDifficulty = _this.difficulty; 182 | _this.difficulty = difficulty; 183 | _this.target = global.diff1Target.mul(1024).div(Math.ceil(_this.difficulty * 1024)).toBuffer().toString('hex'); 184 | sendJson({ 185 | id : null, 186 | method: "mining.set_difficulty", 187 | params: [difficulty], 188 | }); 189 | return true; 190 | }; 191 | 192 | this.sendMiningJob = function(templates){ 193 | 194 | var lastActivityAgo = Date.now() - _this.lastActivity; 195 | if (lastActivityAgo > params.connectionTimeout * 1000){ 196 | _this.emit('socketTimeout', 'last submitted a share was ' + (lastActivityAgo / 1000 | 0) + ' seconds ago'); 197 | _this.socket.destroy(); 198 | return; 199 | } 200 | 201 | if (pendingDifficulty !== null){ 202 | var result = _this.sendDifficulty(pendingDifficulty); 203 | pendingDifficulty = null; 204 | if (result) { 205 | _this.emit('difficultyChanged', _this.difficulty); 206 | } 207 | } 208 | 209 | var jobParams = templates.map(template => { 210 | var p = template.getJobParams(); 211 | p.targetBlob = _this.target; 212 | return p; 213 | }); 214 | sendJson({ 215 | id: null, 216 | method: "mining.notify", 217 | params: jobParams 218 | }); 219 | }; 220 | }; 221 | StratumClient.prototype.__proto__ = events.EventEmitter.prototype; 222 | 223 | /** 224 | * The actual stratum server. 225 | * It emits the following Events: 226 | * - 'client.connected'(StratumClientInstance) - when a new miner connects 227 | * - 'client.disconnected'(StratumClientInstance) - when a miner disconnects. Be aware that the socket cannot be used anymore. 228 | * - 'started' - when the server is up and running 229 | **/ 230 | var StratumServer = exports.Server = function StratumServer(config){ 231 | 232 | //private members 233 | 234 | var bannedMS = config.banning ? config.banning.time * 1000 : null; 235 | var subscriptionCounter = SubscriptionCounter(); 236 | 237 | var _this = this; 238 | this.connectionNumFromIP = {}; 239 | this.stratumClients = {}; 240 | this.bannedIPs = {}; 241 | 242 | this.addConnectionFromIP = function(ipAddress){ 243 | if (config.whitelistIps && config.whitelistIps.includes(ipAddress)){ 244 | return true; 245 | } 246 | 247 | var connectionNum = _this.connectionNumFromIP[ipAddress]; 248 | if (connectionNum){ 249 | if (connectionNum >= config.maxConnectionsFromSameIP){ 250 | return false; 251 | } 252 | _this.connectionNumFromIP[ipAddress] += 1; 253 | return true; 254 | } 255 | _this.connectionNumFromIP[ipAddress] = 1; 256 | return true; 257 | } 258 | 259 | this.removeConnectionFromIP = function(ipAddress){ 260 | var connectionNum = _this.connectionNumFromIP[ipAddress]; 261 | if (connectionNum && connectionNum > 0){ 262 | var num = connectionNum - 1; 263 | if (num == 0){ 264 | delete _this.connectionNumFromIP[ipAddress]; 265 | return; 266 | } 267 | _this.connectionNumFromIP[ipAddress] = num; 268 | } 269 | } 270 | 271 | this.isBanned = function(address){ 272 | if (config.banning && config.banning.enabled && address in _this.bannedIPs){ 273 | var bannedTime = _this.bannedIPs[address]; 274 | var bannedTimeAgo = Date.now() - bannedTime; 275 | var timeLeft = bannedMS - bannedTimeAgo; 276 | return [true, timeLeft]; 277 | } 278 | return [false, null]; 279 | } 280 | 281 | this.handleNewClient = function(socket){ 282 | if (!_this.addConnectionFromIP(socket.remoteAddress)){ 283 | _this.emit('tooManyConnectionsFromSameIP', socket.remoteAddress); 284 | socket.destroy(); 285 | return; 286 | } 287 | 288 | socket.setNoDelay(true); 289 | var subscriptionId = subscriptionCounter.next(); 290 | var client = new StratumClient( 291 | { 292 | subscriptionId: subscriptionId, 293 | socket: socket, 294 | banning: config.banning, 295 | connectionTimeout: config.connectionTimeout, 296 | initDiff: config.pool.diff 297 | } 298 | ); 299 | 300 | _this.stratumClients[subscriptionId] = client; 301 | _this.emit('client.connected', client); 302 | 303 | client.on('socketDisconnect', function() { 304 | _this.removeConnectionFromIP(client.remoteAddress); 305 | _this.removeClient(subscriptionId); 306 | _this.emit('client.disconnected', client); 307 | 308 | }).on('checkBan', function(){ 309 | var [banned, timeLeft] = _this.isBanned(client.remoteAddress); 310 | if (!banned){ 311 | return; 312 | } 313 | if (timeLeft > 0){ 314 | client.socket.destroy(); 315 | client.emit('kickedBannedIP', timeLeft / 1000 | 0); 316 | } 317 | else { 318 | delete _this.bannedIPs[client.remoteAddress]; 319 | client.emit('forgaveBannedIP'); 320 | } 321 | 322 | }).on('triggerBan', function(){ 323 | _this.addBannedIP(client.remoteAddress); 324 | 325 | }).init(); 326 | 327 | return subscriptionId; 328 | }; 329 | 330 | 331 | this.broadcastMiningJobs = function(jobs){ 332 | for (var clientId in _this.stratumClients) { 333 | var client = _this.stratumClients[clientId]; 334 | client.sendMiningJob(jobs); 335 | } 336 | }; 337 | 338 | (function init(){ 339 | 340 | //Interval to look through bannedIPs for old bans and remove them in order to prevent a memory leak 341 | if (config.banning && config.banning.enabled){ 342 | _this.timer = setInterval(function(){ 343 | for (ip in _this.bannedIPs){ 344 | var banTime = _this.bannedIPs[ip]; 345 | if (Date.now() - banTime > config.banning.time) 346 | delete _this.bannedIPs[ip]; 347 | } 348 | }, 1000 * config.banning.purgeInterval); 349 | } 350 | 351 | var serverFactory = net.createServer; 352 | var options = { 353 | allowHalfOpen: false 354 | } 355 | if (config.pool.proxyProtocol) { 356 | const proxiedNet = require('findhit-proxywrap').proxy(net); 357 | serverFactory = proxiedNet.createServer; 358 | } else if (config.pool.tls) { 359 | var tls = require('tls'); 360 | var fs = require('fs'); 361 | serverFactory = tls.createServer 362 | options.key = fs.readFileSync(config.pool.privateKeyPath) 363 | options.cert = fs.readFileSync(config.pool.publicCertPath) 364 | } 365 | 366 | _this.server = serverFactory(options, function(socket) { 367 | _this.handleNewClient(socket); 368 | }).listen(config.pool.port, function() { 369 | _this.emit('started'); 370 | }); 371 | })(); 372 | 373 | 374 | this.close = function(){ 375 | clearInterval(_this.timer); 376 | _this.server.close(); 377 | } 378 | 379 | this.addBannedIP = function(ipAddress){ 380 | _this.bannedIPs[ipAddress] = Date.now(); 381 | }; 382 | 383 | this.removeClient = function(subscriptionId) { 384 | delete _this.stratumClients[subscriptionId]; 385 | }; 386 | }; 387 | StratumServer.prototype.__proto__ = events.EventEmitter.prototype; 388 | -------------------------------------------------------------------------------- /lib/shareProcessor.js: -------------------------------------------------------------------------------- 1 | const Redis = require('ioredis'); 2 | const HttpClient = require('./httpClient'); 3 | const util = require('./util'); 4 | const { Pool } = require('pg'); 5 | 6 | const MaxGhostUncleAge = 7; 7 | 8 | var ShareProcessor = module.exports = function ShareProcessor(config, logger){ 9 | var confirmationTime = config.confirmationTime * 1000; 10 | var rewardPercent = 1 - config.withholdPercent; 11 | 12 | var _this = this; 13 | this.redisClient = new Redis(config.redis.port, config.redis.host, {db: config.redis.db}); 14 | this.httpClient = new HttpClient(config.daemon.host, config.daemon.port, config.daemon.apiKey); 15 | 16 | function createTables(db){ 17 | var tables = 18 | `CREATE TABLE IF NOT EXISTS "shares" ( 19 | "from_group" SMALLINT NOT NULL, 20 | "to_group" SMALLINT NOT NULL, 21 | "pool_diff" NUMERIC(13, 8) NOT NULL, 22 | "share_diff" NUMERIC(13, 8) NOT NULL, 23 | "worker" VARCHAR(64) NOT NULL, 24 | "found_block" BOOLEAN NOT NULL, 25 | "created_date" TIMESTAMP, 26 | "modified_date" TIMESTAMP, 27 | "id" SERIAL PRIMARY KEY 28 | ); 29 | CREATE TABLE IF NOT EXISTS "blocks" ( 30 | "share_id" INTEGER NOT NULL, 31 | "from_group" SMALLINT NOT NULL, 32 | "to_group" SMALLINT NOT NULL, 33 | "block_hash" CHAR(64) NOT NULL, 34 | "worker" VARCHAR(64) NOT NULL, 35 | "created_date" TIMESTAMP, 36 | "modified_date" TIMESTAMP, 37 | "id" SERIAL PRIMARY KEY 38 | );`; 39 | db.query(tables, function(error, _){ 40 | if (error) { 41 | logger.error('Create table error: ' + error); 42 | process.exit(1); 43 | } 44 | }); 45 | } 46 | 47 | function persistShare(db, share){ 48 | db.query( 49 | 'INSERT INTO shares(from_group, to_group, pool_diff, share_diff, worker, found_block, created_date, modified_date) VALUES($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id', 50 | [share.job.fromGroup, share.job.toGroup, share.difficulty, share.shareDiff, share.workerAddress, share.foundBlock, new Date(), new Date()], 51 | function(error, result){ 52 | if (error) { 53 | logger.error('Persist share error: ' + error); 54 | return; 55 | } 56 | 57 | if (share.foundBlock){ 58 | var shareId = result.rows[0].id; 59 | db.query( 60 | 'INSERT INTO blocks(share_id, from_group, to_group, block_hash, worker, created_date, modified_date) VALUES($1, $2, $3, $4, $5, $6, $7)', 61 | [shareId, share.job.fromGroup, share.job.toGroup, share.blockHash, share.workerAddress, new Date(), new Date()], 62 | 63 | function(error, _){ 64 | if (error) logger.error('Persist block error: ' + error); 65 | } 66 | ); 67 | } 68 | } 69 | ); 70 | } 71 | 72 | if (config.persistence && config.persistence.enabled) { 73 | _this.db = new Pool(config.persistence); 74 | createTables(_this.db); 75 | _this.handleShare = function(share){ 76 | persistShare(_this.db, share); 77 | _this._handleShare(share); 78 | } 79 | } 80 | else { 81 | _this.handleShare = share => _this._handleShare(share); 82 | } 83 | 84 | this.currentRoundKey = function(fromGroup, toGroup){ 85 | return fromGroup + ':' + toGroup + ':shares:currentRound'; 86 | } 87 | 88 | this.roundKey = function(fromGroup, toGroup, blockHash){ 89 | return fromGroup + ':' + toGroup + ':shares:' + blockHash; 90 | } 91 | 92 | var pendingBlocksKey = 'pendingBlocks'; 93 | var foundBlocksKey = 'foundBlocks'; 94 | var hashrateKey = 'hashrate'; 95 | var balancesKey = 'balances'; 96 | 97 | this._handleShare = function(share){ 98 | var redisTx = _this.redisClient.multi(); 99 | var currentMs = Date.now(); 100 | var fromGroup = share.job.fromGroup; 101 | var toGroup = share.job.toGroup; 102 | var currentRound = _this.currentRoundKey(fromGroup, toGroup); 103 | redisTx.hincrbyfloat(currentRound, share.workerAddress, share.difficulty); 104 | 105 | var currentTs = Math.floor(currentMs / 1000); 106 | redisTx.zadd(hashrateKey, currentTs, [fromGroup, toGroup, share.worker, share.difficulty, currentMs].join(':')); 107 | 108 | if (share.foundBlock){ 109 | var blockHash = share.blockHash; 110 | var newKey = _this.roundKey(fromGroup, toGroup, blockHash); 111 | var blockWithTs = blockHash + ':' + currentMs.toString(); 112 | 113 | redisTx.rename(currentRound, newKey); 114 | redisTx.sadd(pendingBlocksKey, blockWithTs); 115 | redisTx.hset(foundBlocksKey, blockHash, share.workerAddress) 116 | } 117 | redisTx.exec(function(error, _){ 118 | if (error) logger.error('Handle share failed, error: ' + error); 119 | }); 120 | } 121 | 122 | function handleBlock(block, callback){ 123 | var transactions = block.transactions; 124 | var rewardTx = transactions[transactions.length - 1]; 125 | var rewardOutput = rewardTx.unsigned.fixedOutputs[0]; 126 | var blockData = { 127 | hash: block.hash, 128 | fromGroup: block.chainFrom, 129 | toGroup: block.chainTo, 130 | height: block.height, 131 | rewardAmount: rewardOutput.attoAlphAmount // string 132 | }; 133 | logger.debug('Main chain block: ' + JSON.stringify(blockData)); 134 | callback(blockData); 135 | } 136 | 137 | // remove block shares and remove blockHash from pendingBlocks 138 | function removeBlockAndShares(fromGroup, toGroup, blockHash, blockHashWithTs){ 139 | _this.redisClient 140 | .multi() 141 | .del(_this.roundKey(fromGroup, toGroup, blockHash)) 142 | .srem(pendingBlocksKey, blockHashWithTs) 143 | .hdel(foundBlocksKey, blockHash) 144 | .exec(function(error, _){ 145 | if (error) logger.error('Remove block shares failed, error: ' + error + ', blockHash: ' + blockHash); 146 | }) 147 | } 148 | 149 | function getUncleReward(ghostUncleHash, ghostUncleHashWithTs, callback) { 150 | _this.httpClient.getMainChainBlockByGhostUncle(ghostUncleHash, function (response) { 151 | if (response.error) { 152 | var errorMsg = `${response.error}` 153 | if (response.statusCode === 404 && errorMsg.includes(`The mainchain block that references the ghost uncle block ${ghostUncleHash} not found`)) { 154 | logger.warn(`Block ${ghostUncleHash} is not a ghost uncle block`); 155 | var [fromGroup, toGroup] = util.blockChainIndex(Buffer.from(ghostUncleHash, 'hex')); 156 | removeBlockAndShares(fromGroup, toGroup, ghostUncleHash, ghostUncleHashWithTs); 157 | } else { 158 | logger.error('Get main chain block error: ' + response.error + ', ghost uncle hash: ' + ghostUncleHash); 159 | } 160 | callback(null); 161 | return; 162 | } 163 | 164 | var transactions = response.transactions; 165 | var coinbaseTx = transactions[transactions.length - 1]; 166 | var index = response.ghostUncles.findIndex((u) => u.blockHash === ghostUncleHash) 167 | var rewardOutput = coinbaseTx.unsigned.fixedOutputs[index + 1]; 168 | var rewardAmount = rewardOutput.attoAlphAmount; 169 | logger.info('Found main chain block ' + response.hash + ', uncle reward: ' + rewardAmount); 170 | callback(rewardAmount); 171 | }) 172 | } 173 | 174 | function tryHandleUncleBlock(ghostUncleHash, ghostUncleHashWithTs, callback) { 175 | logger.info('Try handling uncle block: ' + ghostUncleHash) 176 | _this.httpClient.getBlock(ghostUncleHash, function(ghostUncleBlock){ 177 | if (ghostUncleBlock.error){ 178 | logger.error('Get uncle block error: ' + ghostUncleBlock.error + ', hash: ' + ghostUncleHash); 179 | callback(null); 180 | return; 181 | } 182 | 183 | getUncleReward(ghostUncleHash, ghostUncleHashWithTs, function (uncleReward) { 184 | if (uncleReward) { 185 | var blockData = { 186 | hash: ghostUncleHash, 187 | fromGroup: ghostUncleBlock.chainFrom, 188 | toGroup: ghostUncleBlock.chainTo, 189 | height: ghostUncleBlock.height, 190 | rewardAmount: uncleReward // string 191 | }; 192 | logger.debug('Ghost uncle block: ' + JSON.stringify(blockData)); 193 | callback(blockData); 194 | return; 195 | } 196 | callback(null); 197 | return; 198 | }) 199 | }); 200 | } 201 | 202 | this.getPendingBlocks = function(results, callback){ 203 | var blocksNeedToReward = []; 204 | util.executeForEach(results, function(blockHashWithTs, callback){ 205 | var array = blockHashWithTs.split(':'); 206 | var blockHash = array[0]; 207 | var blockSubmitTs = parseInt(array[1]); 208 | var now = Date.now(); 209 | 210 | if (now < (blockSubmitTs + confirmationTime)){ 211 | // the block reward might be locked, skip and 212 | // try to reward in the next loop 213 | callback(); 214 | return; 215 | } 216 | 217 | _this.httpClient.blockInMainChain(blockHash, function(result){ 218 | if (result.error){ 219 | logger.error('Check block in main chain error: ' + result.error); 220 | callback(); 221 | return; 222 | } 223 | 224 | if (!result){ 225 | tryHandleUncleBlock(blockHash, blockHashWithTs, function (uncleBlockData) { 226 | if (uncleBlockData) { 227 | var block = { 228 | pendingBlockValue: blockHashWithTs, 229 | ...uncleBlockData 230 | }; 231 | blocksNeedToReward.push(block); 232 | } 233 | callback(); 234 | return; 235 | }); 236 | return; 237 | } 238 | 239 | _this.httpClient.getBlock(blockHash, function(result){ 240 | if (result.error){ 241 | logger.error('Get block error: ' + result.error + ', hash: ' + blockHash); 242 | callback(); 243 | return; 244 | } 245 | 246 | handleBlock(result, function(blockData){ 247 | var block = { 248 | pendingBlockValue: blockHashWithTs, 249 | ...blockData 250 | }; 251 | blocksNeedToReward.push(block); 252 | callback(); 253 | }); 254 | }); 255 | }); 256 | }, _ => callback(blocksNeedToReward)); 257 | } 258 | 259 | this.allocateRewards = function(blocks, callback){ 260 | var workerRewards = {}; 261 | var redisTx = _this.redisClient.multi(); 262 | util.executeForEach(blocks, function(block, callback){ 263 | allocateRewardForBlock(block, redisTx, workerRewards, callback); 264 | }, function(_){ 265 | for (var worker in workerRewards){ 266 | redisTx.hincrbyfloat(balancesKey, worker, workerRewards[worker]); 267 | } 268 | redisTx.exec(function(error, _){ 269 | if (error) { 270 | logger.error('Allocate rewards failed, error: ' + error); 271 | callback(error); 272 | return; 273 | } 274 | logger.debug('Rewards: ' + JSON.stringify(workerRewards)); 275 | callback(null); 276 | }); 277 | }); 278 | } 279 | 280 | function allocateRewardForBlock(block, redisTx, workerRewards, callback){ 281 | var round = _this.roundKey(block.fromGroup, block.toGroup, block.hash); 282 | _this.redisClient.hgetall(round, function(error, shares){ 283 | if (error) { 284 | logger.error('Get shares failed, error: ' + error + ', round: ' + round); 285 | callback(); 286 | return; 287 | } 288 | 289 | var totalReward = Math.floor(parseInt(block.rewardAmount) * rewardPercent); 290 | logger.info('Reward miners for block: ' + block.hash + ', total reward: ' + totalReward); 291 | logger.debug('Block hash: ' + block.hash + ', shares: ' + JSON.stringify(shares)); 292 | _this.allocateReward(totalReward, workerRewards, shares); 293 | 294 | redisTx.del(round); 295 | redisTx.srem(pendingBlocksKey, block.pendingBlockValue); 296 | logger.info('Remove shares for block: ' + block.hash); 297 | callback(); 298 | }); 299 | } 300 | 301 | this.allocateReward = function(totalReward, workerRewards, shares){ 302 | var totalShare = Object.keys(shares).reduce(function(acc, worker){ 303 | return acc + parseFloat(shares[worker]); 304 | }, 0); 305 | 306 | for (var worker in shares){ 307 | var percent = parseFloat(shares[worker]) / totalShare; 308 | var workerReward = util.toALPH(totalReward * percent); 309 | if (workerRewards[worker]){ 310 | workerRewards[worker] += workerReward; 311 | } 312 | else { 313 | workerRewards[worker] = workerReward; 314 | } 315 | } 316 | } 317 | 318 | function scanBlocks(){ 319 | _this.redisClient.smembers(pendingBlocksKey, function(err, results){ 320 | if (err){ 321 | logger.error('Get pending blocks failed, error: ' + err); 322 | return; 323 | } 324 | _this.getPendingBlocks(results, function(blocks){ 325 | _this.allocateRewards(blocks, _ => {}); 326 | }); 327 | }) 328 | } 329 | 330 | this.start = function(){ 331 | if (config.rewardEnabled){ 332 | setInterval(scanBlocks, config.rewardInterval * 1000); 333 | } 334 | } 335 | } 336 | -------------------------------------------------------------------------------- /test/paymentProcessorTest.js: -------------------------------------------------------------------------------- 1 | const RedisMock = require('ioredis-mock'); 2 | const { randomBytes } = require('crypto'); 3 | const { expect, assert } = require('chai'); 4 | const nock = require('nock'); 5 | const PaymentProcessor = require('../lib/paymentProcessor'); 6 | const util = require('../lib/util'); 7 | const test = require('./test'); 8 | 9 | describe('test payment processor', function(){ 10 | var redisClient; 11 | this.beforeEach(function(){ 12 | redisClient = new RedisMock(); 13 | }) 14 | 15 | this.afterEach(function(){ 16 | redisClient.disconnect(); 17 | }) 18 | 19 | it('should group miner balances by groupIndex', function(){ 20 | var paymentProcessor = new PaymentProcessor(test.config, test.logger); 21 | var balances = { 22 | '1GQoT6oDKfi18m5JyCvKu9EBx4iiy6ie7cHw51NuF3idh': '4', // groupIndex: 0 23 | '1H59e6Sa2WwfsPqbobmRVGUBHdHAH7ux4c1bDx3LHMFiB': '4', // groupIndex: 0 24 | '1FTkWfUJmERVYN6iV1jSNUkebQPS2g4xy4e3ETNB9N6Kg': '4', // groupIndex: 1 25 | '1EK5p5d18z4skYB9VMNiuXYQHzF6MH5QqJ1uqQfJF2TFE': '4', // groupIndex: 3 26 | '1BncLrXD7fr9acVkETsxoNghyXLAXnxYSmLm8czVpSJ6u': '3' // groupIndex: 3 27 | }; 28 | var groupedBalances = paymentProcessor.grouping(balances); 29 | 30 | expect(groupedBalances).to.deep.equal([ 31 | { 32 | group: '0', 33 | balances: [ 34 | { 35 | address: '1GQoT6oDKfi18m5JyCvKu9EBx4iiy6ie7cHw51NuF3idh', 36 | amount: 4 37 | }, 38 | { 39 | address: '1H59e6Sa2WwfsPqbobmRVGUBHdHAH7ux4c1bDx3LHMFiB', 40 | amount: 4 41 | } 42 | ] 43 | }, 44 | { 45 | group: '1', 46 | balances: [ 47 | { 48 | address:'1FTkWfUJmERVYN6iV1jSNUkebQPS2g4xy4e3ETNB9N6Kg', 49 | amount: 4 50 | } 51 | ] 52 | }, 53 | { 54 | group: '3', 55 | balances: [ 56 | { 57 | address: '1EK5p5d18z4skYB9VMNiuXYQHzF6MH5QqJ1uqQfJF2TFE', 58 | amount: 4 59 | } 60 | ] 61 | } 62 | ]); 63 | }) 64 | 65 | function randomHex(size){ 66 | return randomBytes(size).toString('hex'); 67 | } 68 | 69 | function randomInt(){ 70 | return Math.floor(Math.random() * Math.pow(2, 32)); 71 | } 72 | 73 | function generateUtxos(num, amount){ 74 | var utxos = []; 75 | while (utxos.length < num){ 76 | utxos.push({ 77 | ref: {key: randomHex(8), hint: randomInt()}, 78 | amount: util.fromALPH(amount).toString(10), 79 | lockTime: Date.now() - 1000 80 | }); 81 | } 82 | return utxos; 83 | } 84 | 85 | function generateBalances(num, amount, groupIndex){ 86 | var balances = []; 87 | while (balances.length < num){ 88 | balances.push({ 89 | address: randomHex(32), 90 | amount: amount.toString() 91 | }); 92 | } 93 | return { 94 | group: groupIndex ? groupIndex : 0, 95 | balances: balances 96 | }; 97 | } 98 | 99 | function expectedTxData(fromPublicKey, utxos, balances){ 100 | var inputNum = utxos.length; 101 | var outputNum = balances.length + 1; // change output 102 | var expectedGasAmount = 1000 + // txBaseGas 103 | inputNum * 2000 + // inputs gas 104 | outputNum * 4500 + // output gas 105 | 2060; // p2pk unlock gas 106 | var expectedChangedBalances = {}; 107 | for (var idx in balances){ 108 | var balance = balances[idx]; 109 | expectedChangedBalances[balance.address] = balance.amount; 110 | } 111 | var expectedInputs = utxos.map(utxo => utxo.ref); 112 | var expectedDestinations = balances.map(e => ({ 113 | address: e.address, 114 | attoAlphAmount: util.fromALPH(e.amount).toString() 115 | })); 116 | return { 117 | fromPublicKey: fromPublicKey, 118 | gasAmount: Math.max(expectedGasAmount, 20000), 119 | inputs: expectedInputs, 120 | destinations: expectedDestinations, 121 | changedBalances: expectedChangedBalances 122 | } 123 | } 124 | 125 | var fromPublicKey = randomHex(64); 126 | 127 | it('should prepare transaction succeed', function(){ 128 | var utxos = generateUtxos(40, 3); 129 | var balances = generateBalances(30, 3.1); 130 | var payment = new PaymentProcessor(test.config, test.logger); 131 | 132 | var expected = expectedTxData(fromPublicKey, utxos.slice(0, 32), balances.balances); 133 | var txsDatas = payment.prepareTransactions(fromPublicKey, utxos, [balances]); 134 | 135 | expect(txsDatas.length).equal(1); 136 | expect(txsDatas[0]).to.deep.equal(expected); 137 | expect(balances.balances.length).equal(0); 138 | }) 139 | 140 | it('should prepare multi transactions if there are too many miners', function(){ 141 | var balances = generateBalances(138, 1); 142 | var utxos = generateUtxos(3, 150); 143 | var payment = new PaymentProcessor(test.config, test.logger); 144 | 145 | var expectedTx1 = expectedTxData(fromPublicKey, utxos.slice(0, 1), balances.balances.slice(0, 136)); 146 | var expectedTx2 = expectedTxData(fromPublicKey, utxos.slice(1, 2), balances.balances.slice(136)); 147 | var txsDatas = payment.prepareTransactions(fromPublicKey, utxos, [balances]); 148 | 149 | expect(txsDatas.length).equal(2); 150 | expect(expectedTx1).to.deep.equal(txsDatas[0]); 151 | expect(expectedTx2).to.deep.equal(txsDatas[1]); 152 | expect(balances.balances.length).equal(0); 153 | }) 154 | 155 | it('should prepare transactions failed if no enough utxos for transfer', function(){ 156 | var balances = generateBalances(1, 10).balances; 157 | var utxos = generateUtxos(5, 1); 158 | var payment = new PaymentProcessor(test.config, test.logger); 159 | var txData = payment.prepareTransaction(fromPublicKey, utxos, balances); 160 | 161 | expect(txData.error).equal('not enough utxos for transfer, will try to transfer later'); 162 | expect(utxos.length).equal(5); 163 | expect(balances.length).equal(1); 164 | }) 165 | 166 | it('should pop destinations if no enough utxos for tx fee', function(){ 167 | var balances = generateBalances(2, 5).balances; 168 | var utxos = generateUtxos(5, 2); 169 | var payment = new PaymentProcessor(test.config, test.logger); 170 | var expectedTx = expectedTxData(fromPublicKey, utxos.slice(), balances.slice(0, 1)); 171 | var txData = payment.prepareTransaction(fromPublicKey, utxos, balances); 172 | expect(expectedTx).to.deep.equal(txData); 173 | }) 174 | 175 | it('should prepare transactions failed if no enough utxos for tx fee', function(){ 176 | var balances = generateBalances(1, 10).balances; 177 | var utxos = generateUtxos(5, 2); 178 | var payment = new PaymentProcessor(test.config, test.logger); 179 | var txData = payment.prepareTransaction(fromPublicKey, utxos, balances); 180 | 181 | expect(txData.error).equal('not enough utxos for tx fee, will try to transfer later'); 182 | expect(utxos.length).equal(5); 183 | expect(balances.length).equal(1); 184 | }) 185 | 186 | it('should prepare transactions failed if utxos is still locked', function(){ 187 | var balances = generateBalances(10, 2); 188 | var utxos = generateUtxos(2, 15); 189 | utxos.forEach(utxo => utxo.lockTime = Date.now() + 1000); 190 | var payment = new PaymentProcessor(test.config, test.logger); 191 | 192 | var remainBalances = balances.balances.slice(0); 193 | var txsDatas = payment.prepareTransactions(fromPublicKey, utxos, [balances]); 194 | 195 | expect(txsDatas.length).equal(0); 196 | expect(balances.balances).to.deep.equal(remainBalances); 197 | }) 198 | 199 | it('should prepare transactions for multiple groups', function(){ 200 | var utxos = generateUtxos(10, 2); 201 | var group0 = generateBalances(1, 21, 0); 202 | var group1 = generateBalances(2, 4, 1); 203 | var payment = new PaymentProcessor(test.config, test.logger); 204 | var expectedTx = expectedTxData(fromPublicKey, utxos.slice(0, 5), group1.balances); 205 | var txsDatas = payment.prepareTransactions(fromPublicKey, utxos, [group0, group1]); 206 | 207 | expect(txsDatas.length).equal(1); 208 | expect(txsDatas[0]).to.deep.equal(expectedTx); 209 | expect(utxos.length).equal(5); 210 | }) 211 | 212 | it('should estimate gas fee correctly', function(){ 213 | var utxos = generateUtxos(10, 2); 214 | var group0 = generateBalances(1, 1, 0); 215 | var group1 = generateBalances(2, 4, 1); 216 | var payment = new PaymentProcessor(test.config, test.logger); 217 | var txsDatas = payment.prepareTransactions(fromPublicKey, utxos, [group0, group1]); 218 | 219 | expect(txsDatas.length).equal(2); 220 | expect(txsDatas[0].gasAmount).equal(20000); 221 | expect(txsDatas[1].gasAmount).equal(26560); 222 | }) 223 | 224 | it('should lock rewards before submit tx', function(done){ 225 | var payment = new PaymentProcessor(test.config, test.logger); 226 | payment.redisClient = redisClient; 227 | 228 | var txId = randomHex(32); 229 | nock('http://127.0.0.1:12973') 230 | .post('/transactions/submit', body => body.signature && body.unsignedTx) 231 | .reply(200, {txId: txId}); 232 | 233 | var checkState = function(txId, remainBalances, lockedBalances){ 234 | var lockedRewardsKey = payment.txRewardsKey(txId) 235 | redisClient 236 | .multi() 237 | .smembers('transactions') 238 | .hgetall('balances') 239 | .hgetall(lockedRewardsKey) 240 | .exec(function(error, results){ 241 | if (error) assert.fail('Test error: ' + error); 242 | var txs = results[0][1]; 243 | var remain = results[1][1]; 244 | var lockedRewards = results[2][1]; 245 | Object.keys(lockedRewards).forEach(address => { 246 | lockedRewards[address] = parseFloat(lockedRewards[address]); 247 | }); 248 | 249 | expect(txs).to.deep.equal([txId]); 250 | expect(remain).to.deep.equal(remainBalances); 251 | expect(lockedRewards).to.deep.equal(lockedBalances); 252 | done(); 253 | }); 254 | } 255 | 256 | var prepare = function(amount, changedAmount, callback){ 257 | var initBalances = generateBalances(10, amount).balances; 258 | var changedBalances = {}, remainBalances = {}; 259 | var redisTx = redisClient.multi(); 260 | for (var idx in initBalances){ 261 | var balance = initBalances[idx]; 262 | redisTx.hincrbyfloat('balances', balance.address, parseFloat(balance.amount)); 263 | changedBalances[balance.address] = changedAmount; 264 | remainBalances[balance.address] = (amount - changedAmount).toString(); 265 | } 266 | 267 | redisTx.exec(function(error, _){ 268 | if (error) assert.fail('Test error: ' + error); 269 | callback(changedBalances, remainBalances); 270 | }); 271 | } 272 | 273 | prepare(12, 2, function(changedBalances, remainBalances){ 274 | var signedTx = { 275 | txId: txId, unsignedTx: randomHex(12), signature: randomHex(12), changedBalances: changedBalances 276 | }; 277 | 278 | payment.lockRewardsAndSubmitTx(signedTx, function(){ 279 | checkState(txId, remainBalances, changedBalances); 280 | }); 281 | }) 282 | }) 283 | 284 | it('should remove locked rewards when tx confirmed', function(done){ 285 | var payment = new PaymentProcessor(test.config, test.logger); 286 | payment.redisClient = redisClient; 287 | 288 | var txId = randomHex(32); 289 | var lockRewardsKey = payment.txRewardsKey(txId); 290 | var prepare = function(amount, changedAmount, callback){ 291 | var initBalances = generateBalances(10, amount).balances; 292 | var redisTx = redisClient.multi(); 293 | redisTx.sadd('transactions', txId); 294 | for (var idx in initBalances){ 295 | var balance = initBalances[idx]; 296 | redisTx.hincrbyfloat('balances', balance.address, parseFloat(balance.amount)); 297 | redisTx.hset(lockRewardsKey, balance.address, changedAmount); 298 | } 299 | 300 | redisTx.exec(function(error, _){ 301 | if (error) assert.fail('Test error: ' + error); 302 | callback(initBalances); 303 | }); 304 | } 305 | 306 | var checkState = function(expectedBalances){ 307 | redisClient.multi() 308 | .smembers('transactions') 309 | .hgetall(lockRewardsKey) 310 | .hgetall('balances') 311 | .exec(function(error, results){ 312 | if (error) assert.fail('Test error: ' + error); 313 | var txs = results[0][1]; 314 | var lockedRewards = results[1][1]; 315 | var balances = results[2][1]; 316 | 317 | expect(txs.length).equal(0); 318 | expect(lockedRewards).to.deep.equal({}); 319 | expect(balances).to.deep.equal(expectedBalances); 320 | done(); 321 | }) 322 | } 323 | 324 | prepare(12, 2, function(balances){ 325 | var expectedBalances = {}; 326 | for (var idx in balances){ 327 | var balance = balances[idx]; 328 | expectedBalances[balance.address] = balance.amount; 329 | } 330 | payment.onTxConfirmed(txId, _ => checkState(expectedBalances)); 331 | }); 332 | }) 333 | 334 | it('should unlock rewards when tx failed', function(done){ 335 | var payment = new PaymentProcessor(test.config, test.logger); 336 | payment.redisClient = redisClient; 337 | 338 | var txId = randomHex(32); 339 | var lockRewardsKey = payment.txRewardsKey(txId); 340 | var prepare = function(amount, changedAmount, callback){ 341 | var initBalances = generateBalances(10, amount).balances; 342 | var unlockedBalances = {}; 343 | var redisTx = redisClient.multi(); 344 | redisTx.sadd('transactions', txId); 345 | for (var idx in initBalances){ 346 | var balance = initBalances[idx]; 347 | redisTx.hincrbyfloat('balances', balance.address, parseFloat(balance.amount)); 348 | redisTx.hset(lockRewardsKey, balance.address, changedAmount); 349 | unlockedBalances[balance.address] = (amount + changedAmount).toString(); 350 | } 351 | 352 | redisTx.exec(function(error, _){ 353 | if (error) assert.fail('Test error: ' + error); 354 | callback(unlockedBalances); 355 | }); 356 | } 357 | 358 | var checkState = function(expectedBalances){ 359 | redisClient.multi() 360 | .smembers('transactions') 361 | .hgetall(lockRewardsKey) 362 | .hgetall('balances') 363 | .exec(function(error, results){ 364 | if (error) assert.fail('Test error: ' + error); 365 | var txs = results[0][1]; 366 | var lockedRewards = results[1][1]; 367 | var balances = results[2][1]; 368 | 369 | expect(txs.length).equal(0); 370 | expect(lockedRewards).to.deep.equal({}); 371 | expect(balances).to.deep.equal(expectedBalances); 372 | done(); 373 | }) 374 | } 375 | 376 | prepare(12, 2, function(balances){ 377 | payment.onTxFailed(txId, _ => checkState(balances)); 378 | }); 379 | }) 380 | }) 381 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | {description} 294 | Copyright (C) {year} {fullname} 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | {signature of Ty Coon}, 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /lib/paymentProcessor.js: -------------------------------------------------------------------------------- 1 | const Redis = require('ioredis'); 2 | const HttpClient = require('./httpClient'); 3 | const util = require('./util'); 4 | const constants = require('./constants'); 5 | 6 | var PaymentProcessor = module.exports = function PaymentProcessor(config, logger){ 7 | var balancesKey = "balances"; 8 | var transactionsKey = "transactions"; 9 | var addressGroupCache = {}; 10 | var minPaymentCoins = parseFloat(config.minPaymentCoins); 11 | 12 | // gas constants 13 | const maxGasPerTx = 625000; 14 | const minimumGas = 20000; 15 | const gasPerInput = 2000; 16 | const gasPerOutput = 4500; 17 | const txBaseGas = 1000; 18 | const p2pkUnlockGas = 2060; 19 | const defaultGasFee = 100000000000; 20 | 21 | var _this = this; 22 | this.addressInfo = []; 23 | this.redisClient = new Redis(config.redis.port, config.redis.host, {db: config.redis.db}); 24 | this.httpClient = new HttpClient(config.daemon.host, config.daemon.port, config.daemon.apiKey); 25 | 26 | function getUtxoForTransfer(utxos, amount, now){ 27 | var sum = 0; 28 | var selected = []; 29 | while (utxos.length > 0){ 30 | if (sum >= amount){ 31 | break; 32 | } 33 | 34 | var utxoData = utxos.shift(); 35 | if (utxoData.lockTime <= now){ 36 | var utxoAmount = parseInt(utxoData.amount); 37 | sum += utxoAmount; 38 | selected.push(utxoData); 39 | } 40 | } 41 | if (sum >= amount){ 42 | return {sum: sum, selected: selected}; 43 | } 44 | return {error: "not enough balance", sum: sum, selected: selected}; 45 | } 46 | 47 | this.prepareTransaction = function(fromPublicKey, utxos, balances){ 48 | var txInputs = [], txDestinations = [], changedBalances = {}; 49 | var now = Date.now(), inputSum = 0, outputSum = 0; 50 | var estimatedGas = txBaseGas + p2pkUnlockGas + gasPerOutput; // change output 51 | 52 | var addDestination = function(output){ 53 | var amount = util.fromALPH(output.amount); 54 | outputSum += amount; 55 | txDestinations.push({address: output.address, amount: amount}); 56 | changedBalances[output.address] = output.amount; 57 | estimatedGas += gasPerOutput 58 | } 59 | 60 | var popDestination = function(){ 61 | var destination = txDestinations.pop(); 62 | outputSum -= destination.amount; 63 | var address = destination.address; 64 | balances.unshift({address: address, amount: changedBalances[address]}); 65 | delete changedBalances[address]; 66 | estimatedGas -= gasPerOutput; 67 | } 68 | 69 | var addInputs = function(selected, selectedSum){ 70 | txInputs.push(selected); 71 | estimatedGas += selected.length * gasPerInput; 72 | inputSum += selectedSum; 73 | } 74 | 75 | var popInputs = function(selectedSum){ 76 | var selected = txInputs.pop(); 77 | estimatedGas -= selected.length * gasPerInput; 78 | selected.forEach(output => utxos.push(output)); 79 | inputSum -= selectedSum; 80 | } 81 | 82 | var calcTxFee = function(){ 83 | var txGas = Math.max(minimumGas, estimatedGas); 84 | return txGas * defaultGasFee; 85 | } 86 | 87 | // pay as many miners as possible in one tx 88 | while (balances.length > 0){ 89 | addDestination(balances.shift()); 90 | if (estimatedGas > maxGasPerTx){ 91 | popDestination(); 92 | break; 93 | } 94 | if (outputSum < inputSum){ 95 | continue; 96 | } 97 | 98 | var result = getUtxoForTransfer(utxos, outputSum - inputSum, now); 99 | if (result.error){ 100 | result.selected.forEach(output => utxos.push(output)); 101 | popDestination(); 102 | break; 103 | } 104 | addInputs(result.selected, result.sum); 105 | if (estimatedGas > maxGasPerTx){ 106 | popInputs(result.sum); 107 | popDestination(); 108 | break; 109 | } 110 | } 111 | 112 | if (txInputs.length === 0){ 113 | return {error: 'not enough utxos for transfer, will try to transfer later'}; 114 | } 115 | 116 | var txFee = calcTxFee(); 117 | var remain = inputSum - outputSum; 118 | var popDestinations = function(){ 119 | while (remain < txFee && txDestinations.length > 0){ 120 | popDestination(); 121 | remain = inputSum - outputSum; 122 | txFee = calcTxFee(); 123 | } 124 | } 125 | 126 | var txData = function(){ 127 | if (txDestinations.length === 0 || txInputs.length === 0){ 128 | txInputs.forEach(selected => selected.forEach(output => utxos.push(output))); 129 | return {error: 'not enough utxos for tx fee, will try to transfer later'}; 130 | } else return { 131 | fromPublicKey: fromPublicKey, 132 | gasAmount: Math.max(minimumGas, estimatedGas), 133 | inputs: txInputs.flat().map(output => output.ref), 134 | destinations: txDestinations.map(e => ({address: e.address, attoAlphAmount: e.amount.toString()})), 135 | changedBalances: changedBalances 136 | } 137 | } 138 | 139 | if (remain >= txFee){ 140 | return txData(); 141 | } 142 | 143 | // try to cover the tx fee 144 | var result = getUtxoForTransfer(utxos, txFee - remain, now); 145 | if (result.error){ 146 | result.selected.forEach(output => utxos.push(output)); 147 | // try to remove destinations if not enough utxos 148 | popDestinations() 149 | return txData(); 150 | } 151 | 152 | addInputs(result.selected, result.sum); 153 | if (estimatedGas > maxGasPerTx){ 154 | popInputs(result.sum); 155 | // try to remove destinations if gas larger than `maxGasPerTx`, this should rarely happen 156 | popDestinations() 157 | return txData(); 158 | } 159 | return txData(); 160 | } 161 | 162 | this.prepareTransactions = function(fromPublicKey, utxos, groupedBalances){ 163 | var txsData = []; 164 | logger.debug('Payment data: ' + JSON.stringify({utxos: utxos, groupedBalances: groupedBalances})); 165 | for (var idx in groupedBalances){ 166 | var entry = groupedBalances[idx]; 167 | var groupIndex = entry.group; 168 | var balances = entry.balances; 169 | while (balances.length > 0 && utxos.length > 0){ 170 | var result = this.prepareTransaction(fromPublicKey, utxos, balances); 171 | if (result.error){ 172 | logger.error('Prepare transaction error: ' + result.error + 173 | ', group: ' + groupIndex + 174 | ', remain balances: ' + JSON.stringify(balances) 175 | ); 176 | break; 177 | } 178 | txsData.push(result); 179 | } 180 | } 181 | logger.debug('Prepared txs: ' + JSON.stringify(txsData)); 182 | return txsData; 183 | } 184 | 185 | function transfer(addressInfo, groupedBalances, callback){ 186 | var fromAddress = addressInfo.address; 187 | var fromPublicKey = addressInfo.publicKey; 188 | 189 | _this.httpClient.getUtxos(fromAddress, function(result){ 190 | if (result.error){ 191 | logger.error('Get utxos failed, error: ' + result.error + ', fromAddress: ' + fromAddress); 192 | callback(); 193 | return; 194 | } 195 | 196 | var now = Date.now(); 197 | var utxoIsEnough = haveEnoughUtxo(result.utxos, groupedBalances, now); 198 | if (!utxoIsEnough) { 199 | logger.error('Not enough utxo when transfer'); 200 | logger.debug('Timestamp: ' + now + ', payment data: ' + JSON.stringify({utxos: result.utxos, groupedBalances: groupedBalances})); 201 | callback(); 202 | return; 203 | } 204 | var txsData = _this.prepareTransactions(fromPublicKey, result.utxos, groupedBalances); 205 | prepareSendTxs(fromAddress, function(error){ 206 | if (error){ 207 | callback(); 208 | return; 209 | } 210 | util.executeForEach( 211 | txsData, 212 | (txData, callback) => sendTx(fromAddress, txData, callback), 213 | _ => { 214 | var remains = groupedBalances.filter(e => e.balances.length > 0); 215 | if ((remains.length > 0) && utxoIsEnough){ 216 | logger.debug("Transfer remain balances: " + JSON.stringify(remains)); 217 | transfer(addressInfo, remains, callback); 218 | return; 219 | } 220 | callback(); 221 | } 222 | ); 223 | }); 224 | }); 225 | } 226 | 227 | function haveEnoughUtxo(utxos, groupedBalances, now){ 228 | var inputSum = 0, outputSum = 0; 229 | for (var utxo of utxos){ 230 | if (utxo.lockTime <= now) { 231 | inputSum += parseInt(utxo.amount); 232 | } 233 | } 234 | for (var entry of groupedBalances){ 235 | outputSum += entry.balances.map(e => e.amount).reduce((a, b) => a + b, 0); 236 | } 237 | return util.toALPH(inputSum) > outputSum; 238 | } 239 | 240 | function prepareSendTxs(fromAddress, callback){ 241 | _this.httpClient.unlockWallet( 242 | config.wallet.name, 243 | config.wallet.password, 244 | config.wallet.mnemonicPassphrase, 245 | function(result){ 246 | if (result.error){ 247 | logger.error('Unlock wallet ' + config.wallet.name + ' failed, error: ' + result.error); 248 | callback(result.error); 249 | return; 250 | } 251 | 252 | _this.httpClient.changeActiveAddress( 253 | config.wallet.name, 254 | fromAddress, 255 | function(result){ 256 | if (result.error){ 257 | logger.error('Change active address failed, error: ' + result.error + ', address: ' + fromAddress); 258 | callback(result.error); 259 | return; 260 | } 261 | callback(null); 262 | } 263 | ) 264 | } 265 | ); 266 | } 267 | 268 | function sendTx(fromAddress, txData, callback){ 269 | var handleUnsignedTx = function(unsignedTx){ 270 | if (unsignedTx.error){ 271 | logger.error('Build unsigned tx failed, error: ' + unsignedTx.error + 272 | ', fromAddress: ' + fromAddress + 273 | ', destinations: ' + JSON.stringify(txData.destinations) + 274 | ', inputs: ' + JSON.stringify(txData.inputs) + 275 | ', gas: ' + JSON.stringify(txData.gasAmount) 276 | ); 277 | callback(); 278 | return; 279 | } 280 | 281 | _this.httpClient.signTx(config.wallet.name, unsignedTx.txId, function(result){ 282 | if (result.error){ 283 | logger.error('Sign tx failed, error: ' + result.error + 284 | ', fromAddress: ' + fromAddress + 285 | ', txId: ' + unsignedTx.txId 286 | ); 287 | callback(); 288 | return; 289 | } 290 | 291 | var signedTx = { 292 | txId: unsignedTx.txId, 293 | changedBalances: txData.changedBalances, 294 | signature: result.signature, 295 | unsignedTx: unsignedTx.unsignedTx 296 | }; 297 | _this.lockRewardsAndSubmitTx(signedTx, _ => callback()); 298 | }); 299 | } 300 | 301 | _this.httpClient.buildUnsignedTxFromUtxos( 302 | txData.fromPublicKey, 303 | txData.destinations, 304 | txData.inputs, 305 | txData.gasAmount, 306 | handleUnsignedTx 307 | ); 308 | } 309 | 310 | this.txRewardsKey = function(txId){ 311 | return 'rewards:' + txId; 312 | } 313 | 314 | this.lockRewards = function(txId, changedBalances, callback){ 315 | var redisTx = _this.redisClient.multi(); 316 | var lockedRewardsKey = _this.txRewardsKey(txId); 317 | redisTx.sadd(transactionsKey, txId); 318 | for (var address in changedBalances){ 319 | var amount = changedBalances[address]; 320 | redisTx.hincrbyfloat(balancesKey, address, -amount); 321 | redisTx.hset(lockedRewardsKey, address, amount); 322 | } 323 | 324 | redisTx.exec(function(error, _){ 325 | if (error){ 326 | logger.error('Lock rewards failed, error: ' + error); 327 | callback(error); 328 | return; 329 | } 330 | callback(null); 331 | }); 332 | } 333 | 334 | this.lockRewardsAndSubmitTx = function(signedTx, callback){ 335 | _this.lockRewards(signedTx.txId, signedTx.changedBalances, function(error){ 336 | if (error){ 337 | callback(error); 338 | return; 339 | } 340 | _this.httpClient.submitTx( 341 | signedTx.unsignedTx, 342 | signedTx.signature, 343 | function(result){ 344 | if (result.error){ 345 | logger.error('Submit tx failed, error: ' + result.error + ', txId: ' + signedTx.txId); 346 | callback(result.error); 347 | return; 348 | } 349 | logger.info('Tx ' + result.txId + ' submitted'); 350 | callback(null); 351 | } 352 | ); 353 | }); 354 | } 355 | 356 | this.checkTxConfirmation = function(txId, confirmations, onTxConfirmed, onTxFailed, callback){ 357 | _this.httpClient.txStatus(txId, function(result){ 358 | if (result.error){ 359 | logger.error('Check tx status failed, error: ' + error); 360 | callback(); 361 | return; 362 | } 363 | 364 | switch(result.type){ 365 | case 'Confirmed': 366 | if ((result.chainConfirmations >= confirmations.chainConfirmations) && 367 | (result.fromGroupConfirmations >= confirmations.fromGroupConfirmations) && 368 | (result.toGroupConfirmations >= confirmations.toGroupConfirmations) 369 | ){ 370 | logger.info('Tx ' + txId + ' confirmed'); 371 | onTxConfirmed(txId, callback); 372 | return; 373 | } 374 | logger.info('Tx ' + txId + ' confirmations is not enough'); 375 | callback(); 376 | break; 377 | case 'MemPooled': 378 | logger.info('Tx ' + txId + ' is mem-pooled'); 379 | callback(); 380 | break; 381 | case 'TxNotFound': 382 | logger.info('Tx ' + txId + ' not found'); 383 | onTxFailed(txId, callback); 384 | break; 385 | default: 386 | logger.error('Unknown tx status: ' + result.type); 387 | callback(); 388 | break; 389 | } 390 | }); 391 | } 392 | 393 | this.checkTxConfirmations = function(callback){ 394 | _this.redisClient.smembers(transactionsKey, function(error, txIds){ 395 | if (error){ 396 | logger.error('Get transactions failed, error: ' + error); 397 | callback(); 398 | return; 399 | } 400 | util.executeForEach( 401 | txIds, 402 | (txId, callback) => _this.checkTxConfirmation(txId, config.txConfirmations, _this.onTxConfirmed, _this.onTxFailed, callback), 403 | _ => callback() 404 | ); 405 | }); 406 | } 407 | 408 | this.onTxFailed = function(txId, callback){ 409 | var lockedRewardsKey = _this.txRewardsKey(txId); 410 | _this.redisClient.hgetall(lockedRewardsKey, function(error, balances){ 411 | if (error){ 412 | logger.error('Get locked rewards failed, error: ' + error); 413 | callback(); 414 | return; 415 | } 416 | 417 | var redisTx = _this.redisClient.multi(); 418 | redisTx.srem(transactionsKey, txId); 419 | redisTx.del(lockedRewardsKey); 420 | for (var address in balances){ 421 | var amount = parseFloat(balances[address]); 422 | redisTx.hincrbyfloat(balancesKey, address, amount); 423 | } 424 | 425 | redisTx.exec(function(error, _){ 426 | if (error){ 427 | logger.error('Update state failed when tx failed, error: ' + error); 428 | } 429 | callback(); 430 | }); 431 | }); 432 | } 433 | 434 | this.onTxConfirmed = function(txId, callback){ 435 | var redisTx = _this.redisClient.multi(); 436 | redisTx.del(_this.txRewardsKey(txId)); 437 | redisTx.srem(transactionsKey, txId); 438 | redisTx.exec(function(error, _){ 439 | if (error){ 440 | logger.error('Update state failed when tx confirmed, error: ' + error); 441 | } 442 | callback(); 443 | }); 444 | } 445 | 446 | this.grouping = function(allBalances){ 447 | // we have 4 groups 448 | var groups = [[], [], [], []]; 449 | for (var address in allBalances){ 450 | var balance = parseFloat(allBalances[address]); 451 | if (balance >= minPaymentCoins){ 452 | var groupIndex = addressGroupCache[address]; 453 | if (!groupIndex){ 454 | var [addressGroup, error] = util.groupOfAddress(address); 455 | if (error){ 456 | logger.error('Invalid address: ' + address + ', error: ' + error); 457 | continue; 458 | } 459 | groupIndex = addressGroup; 460 | addressGroupCache[address] = groupIndex; 461 | } 462 | var group = groups[groupIndex]; 463 | group.push({address: address, amount: balance}); 464 | } 465 | } 466 | var groupBalances = []; 467 | for (var idx in groups){ 468 | if (Object.keys(groups[idx]).length > 0){ 469 | groupBalances.push({ 470 | group: idx, 471 | balances: groups[idx] 472 | }); 473 | } 474 | } 475 | return groupBalances; 476 | } 477 | 478 | function sweepToAddress(toAddress, fromAddress, callback){ 479 | prepareSendTxs(fromAddress, function(error){ 480 | if (error){ 481 | callback({error: error}); 482 | return; 483 | } 484 | 485 | _this.httpClient.sweepActiveAddress( 486 | config.wallet.name, 487 | toAddress, 488 | function(result){ 489 | if (result.error){ 490 | logger.error('Sweep failed, error: ' + result.error + 491 | ', fromAddress: ' + fromAddress, 492 | ', toAddress: ' + toAddress 493 | ); 494 | callback({error: result.error}); 495 | return; 496 | } 497 | 498 | callback(result); 499 | } 500 | ); 501 | }); 502 | } 503 | 504 | function sweep(fromAddresses, toAddress, callback){ 505 | var txs = []; 506 | util.executeForEach(fromAddresses, 507 | function(fromAddress, callback){ 508 | sweepToAddress(toAddress, fromAddress, function(result){ 509 | if (result.error){ 510 | callback(); 511 | return; 512 | } 513 | 514 | for (var entry of result.results){ 515 | txs.push(entry); 516 | } 517 | callback(); 518 | }); 519 | }, 520 | _ => { 521 | logger.debug('Sweep completed, txs: ' + JSON.stringify(txs)); 522 | _this.waitSweepTxsConfirmed(txs.map(txInfo => txInfo.txId), callback); 523 | } 524 | ); 525 | } 526 | 527 | this.waitSweepTxsConfirmed = function(txs, callback){ 528 | var confirmations = { 529 | chainConfirmations: 1, 530 | fromGroupConfirmations: 1, 531 | toGroupConfirmations: 1 532 | }; 533 | var checkConfirmed = function(txs, callback){ 534 | var unconfirmedTxs = txs.slice(); 535 | util.executeForEach( 536 | txs, 537 | (txId, callback) => _this.checkTxConfirmation( 538 | txId, 539 | confirmations, 540 | (txId, callback) => { 541 | unconfirmedTxs = unconfirmedTxs.filter(id => id !== txId); 542 | callback(); 543 | }, 544 | (txId, _) => { 545 | logger.error('Sweep tx failed, txId: ' + txId); 546 | setTimeout(payment, config.paymentInterval * 1000); 547 | }, 548 | callback 549 | ), 550 | _ => { 551 | if (unconfirmedTxs.length > 0){ 552 | setTimeout(_ => checkConfirmed(unconfirmedTxs, callback), 30 * 1000); 553 | return; 554 | } 555 | logger.debug('Sweep txs are confirmed'); 556 | callback(); 557 | } 558 | ); 559 | }; 560 | 561 | setTimeout(_ => checkConfirmed(txs, callback), 30 * 1000); 562 | } 563 | 564 | function payment(){ 565 | _this.checkTxConfirmations(function(){ 566 | _this.redisClient.hgetall(balancesKey, function(error, result){ 567 | if (error){ 568 | logger.error('Get balances error: ' + error); 569 | return; 570 | } 571 | 572 | logger.info('Payment loop started'); 573 | var index = Math.floor(Math.random() * 4); 574 | var toAddress = config.addresses[index]; 575 | var fromAddresses = config.addresses.filter(addr => addr !== toAddress); 576 | var addressInfo = _this.addressInfo[index]; 577 | var groupedBalances = _this.grouping(result); 578 | sweep(fromAddresses, toAddress, function(){ 579 | transfer(addressInfo, groupedBalances, _ => { 580 | logger.info('Payment loop completed'); 581 | setTimeout(payment, config.paymentInterval * 1000); 582 | }); 583 | }); 584 | }); 585 | }); 586 | } 587 | 588 | this.start = function(){ 589 | if (config.paymentEnabled){ 590 | checkAddress(config.addresses); 591 | loadPublicKey(config.wallet, function(){ 592 | setTimeout(payment, config.paymentInterval * 1000); 593 | }); 594 | } 595 | } 596 | 597 | function loadPublicKey(walletConfig, callback){ 598 | var walletName = walletConfig.name; 599 | var password = walletConfig.password; 600 | var mnemonicPassphrase = walletConfig.mnemonicPassphrase; 601 | _this.httpClient.unlockWallet(walletName, password, mnemonicPassphrase, function(result){ 602 | if (result.error){ 603 | logger.error('Load public key, unlock wallet failed, error: ' + result.error); 604 | process.exit(1); 605 | } 606 | 607 | util.executeForEach( 608 | config.addresses.slice(), 609 | function(address, callback){ 610 | _this.httpClient.getAddressInfo(walletName, address, function(result){ 611 | if (result.error){ 612 | logger.error('Load public key, get address info failed, error: ' + result.error); 613 | process.exit(1); 614 | } 615 | 616 | _this.addressInfo.push({ 617 | address: address, 618 | publicKey: result.publicKey 619 | }); 620 | callback(); 621 | }); 622 | }, 623 | _ => callback() 624 | ); 625 | }); 626 | } 627 | 628 | function checkAddress(addresses){ 629 | if (addresses.length != constants.GroupSize){ 630 | logger.error('Expect ' + constants.GroupSize + ' miner addresses, but have ' + addresses.length); 631 | process.exit(1); 632 | } 633 | 634 | for (var idx = 0; idx < constants.GroupSize; idx++){ 635 | var [okey, error] = util.isValidAddress(addresses[idx], idx); 636 | if (error || !okey){ 637 | logger.error('Invalid miner address: ' + addresses[idx] + ', error: ' + error); 638 | process.exit(1); 639 | } 640 | } 641 | } 642 | } 643 | --------------------------------------------------------------------------------