├── .gitignore
├── LICENSE
├── README.rst
├── bench
└── share_fuzzer.py
├── cgminer.conf.example
├── config.example.yml
├── contrib
├── alert_block.sh
├── check_stratum.py
├── production_example.yml
└── upstart.conf
├── defaults.yml
├── dev-requirements.txt
├── powerpool
├── __init__.py
├── agent_server.py
├── entry.py
├── exceptions.py
├── jobmanagers
│ ├── __init__.py
│ ├── base.py
│ ├── monitor_aux_network.py
│ └── monitor_network.py
├── lib.py
├── main.py
├── monitor.py
├── reporters
│ ├── __init__.py
│ ├── base.py
│ ├── celery_reporter.py
│ ├── double_reporter.py
│ └── redis_reporter.py
├── server.py
├── stratum_server.py
└── utils.py
├── requirements-test.txt
├── requirements.txt
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.yml
2 |
3 | *.py[cod]
4 | *.swp
5 | *.swo
6 | .idea
7 |
8 | # tmp saves
9 | *~
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Packages
15 | *.egg
16 | *.egg-info
17 | dist
18 | build
19 | eggs
20 | parts
21 | bin
22 | var
23 | sdist
24 | develop-eggs
25 | .installed.cfg
26 |
27 | # Installer logs
28 | pip-log.txt
29 |
30 | # Unit test / coverage reports
31 | .coverage
32 | .tox
33 | nosetests.xml
34 |
35 | # Translations
36 | *.mo
37 |
38 | # Mr Developer
39 | .mr.developer.cfg
40 | .project
41 | .pydevproject
42 | .ropeproject
43 | .sass-cache
44 | tags
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013, Isaac Cook
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without modification,
5 | are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice, this
11 | list of conditions and the following disclaimer in the documentation and/or
12 | other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =========
2 | PowerPool
3 | =========
4 |
5 | A `gevent `_ based `Stratum
6 | `_ mining pool server.
7 |
8 | ========
9 | Features
10 | ========
11 |
12 | * Lightweight, asynchronous, gevent based internals.
13 | * Built in HTTP statistics/monitoring server.
14 | * Flexible statistics collection engine.
15 | * Multiple coinserver (RPC server) support for redundancy. Support for coinserver prioritization.
16 | * Celery driven share logging allows multiple servers to log shares and
17 | statistics to a central source for easy scaling out.
18 | * SHA256, X11, scrypt, and scrypt-n support
19 | * Support for merge mining multiple auxilury (merge mined) blockchains
20 | * Modular architecture makes customization simple(r)
21 |
22 | Uses `Celery `_ to log shares and statistics for
23 | miners. Work generation and (bit|lite|alt)coin data structure serialization is
24 | performed by `Cryptokit `_ and connects to
25 | bitcoind using GBT for work generation (or getauxblock for merged work).
26 | Currently only Python 2.7 is supported.
27 |
28 | Built to power the `SimpleDoge `_ mining pool.
29 |
30 |
31 | ======
32 | Donate
33 | ======
34 |
35 | If you feel so inclined, you can give back to the devs at the below addresses.
36 |
37 | DOGE DAbhwsnEq5TjtBP5j76TinhUqqLTktDAnD
38 |
39 | BTC 185cYTmEaTtKmBZc8aSGCr9v2VCDLqQHgR
40 |
41 | VTC VkbHY8ua2TjxdL7gY2uMfCz3TxMzMPgmRR
42 |
43 | =============
44 | Getting Setup
45 | =============
46 |
47 | The only external service PowerPool relies on in is its Celery broker. By
48 | default this will be RabbitMQ on a local connection, so simply having it
49 | installed will work fine.
50 |
51 | .. code-block:: bash
52 |
53 | sudo apt-get install rabbitmq-server
54 |
55 | Setup a virtualenv and install...
56 |
57 | .. code-block:: bash
58 |
59 | mkvirtualenv pp # if you've got virtualenvwrapper...
60 | # Install all of powerpools dependencies
61 | pip install -r requirements.txt
62 | # Install powerpool
63 | pip install -e .
64 | # Install the hashing algorithm modules
65 | pip install vtc_scrypt # for scryptn support
66 | pip install drk_hash # for x11 support
67 | pip install ltc_scrypt # for scrypt support
68 | pip install git+https://github.com/BlueDragon747/Blakecoin_Python_POW_Module.git@e3fb2a5d4ea5486f52f9568ffda132bb69ed8772#egg=blake_hash
69 |
70 | Now copy ``config.yml.example`` to ``config.yml``. All the defaults are
71 | commented out and mandatory fields are uncommented. Fill out all required fields
72 | and you should be good to go for testing.
73 |
74 | .. code-block:: bash
75 |
76 | pp config.yml
77 |
78 | And now your stratum server is running. Point a miner at it on
79 | ``localhost:3333`` (or more specifically, ``stratum+tcp://localhost:3333`` and
80 | do some mining. View server health on the monitor port at
81 | ``http://localhost:3855``. Various events will be getting logged into RabbitMQ
82 | to be picked up by a celery worker. See `Simple Coin
83 | `_ for a reference implementation
84 | of Celery task handler.
85 |
86 | =====================
87 | Architecture Overview
88 | =====================
89 |
90 | **Reporter**
91 | The reporter is responsible for transmitting shares, mining statistics, and new
92 | blocks to some external storage. The reference implementation is the
93 | CeleryReporter which aggregates shares into batches and logs them in a way
94 | designed to interface with SimpleCoin. The reporter is also responsible for
95 | tracking share rates for vardiff. This makes sense if you want vardiff to be
96 | based off the shares per second of an entire address, instead of a single
97 | connection.
98 |
99 | **Jobmanager**
100 | This module generates mining jobs and sends them to workers. It must provide
101 | current jobs for the stratum server to be able to push. The reference
102 | implementation monitors an RPC daemon server.
103 |
104 | **Server**
105 | This is a singleton class that holds statistics and references to all other
106 | modules. All components get access to this object, which is largely concerned
107 | with handling startup and shutdown, along with statistics rotation.
108 |
109 | **Stratum Manager**
110 | Handles spawning one or many stratum servers (which bind to a single port
111 | each), as well as spawning corresponding agent servers as well. It holds data
112 | structures that allow lookup of all StratumClient objects.
113 |
114 | **Stratum Server**
115 | A server that listens to a single port and accepts new stratum clients.
116 |
117 | **Agent Server**
118 | A server that listens to a single port and accepts new ppagent connections.
119 |
120 |
121 | ==============================================
122 | Setting up push block notifications (optional)
123 | ==============================================
124 |
125 | To check for new blocks Powerpool defaults to polling each of the coinservers
126 | you configure. It just runs the rpc call 'getblockcount' 5x/second
127 | (configurable) to see if the block height has changed. If it has changed, it
128 | runs getblocktemplate to grab the new info.
129 |
130 | Since polling creates a 100ms delay (on average) for detecting new blocks one
131 | optimization is to configure the coinservers to push PowerPool a notification
132 | when they accept a new block. Since this reduces the delay to <1ms
133 | you'll end up with fewer orphans. The impact of the faster speed is more
134 | pronounced with currencies that have shorter block times.
135 |
136 | Although this is an improvement, its worth mentioning that it is pretty minor.
137 | We're talking about shaving off ~100ms or so, which should reduce orphan
138 | percentages by ~0.01% - 0.1%, depending on block times. Miners often connect with
139 | far more latency than this.
140 |
141 | How it push block works
142 | -----------------------
143 |
144 | Standard Bitcoin/Litecoin based coinservers have a built in config option to
145 | allow executing a script right after a new block is discovered. We want to run
146 | a script that notifies our PowerPool process to check for a new block.
147 |
148 | To accomplish this PowerPool has built in support for receiving a UDP datagram
149 | on its monitor port. The basic system flow looks like this:
150 |
151 | Coinserver -> Learns of new block
152 | Coinserver -> Executes blocknotify script (Alertblock)
153 | Alertblock -> Parses the passed in .push file
154 | Alertblock -> Sends a UDP datagram based on that .push file
155 | PowerPool -> Receives UDP datagram
156 | PowerPool -> Runs `getblocktemplate` on the Coinserver
157 |
158 | Note: Using a pushblock script to deliver a UDP datagram to PowerPool can
159 | be accomplished in many different ways. We're going to walk
160 | through how we've set it up on our own servers, but please note if your
161 | server configuration/architechture differs much from ours you may have to adapt
162 | this guide.
163 |
164 | Modify the coinserver's config
165 | ------------------------------
166 |
167 | This is the part that tells the coinserver what script to run when it learns
168 | of a new block.
169 |
170 | .. code-block:: bash
171 |
172 | blocknotify=/usr/bin/alertblock /home/USER/coinserver_push/vertcoin.push
173 |
174 | You'll want something similar to this in each coinserver's config. Make sure to
175 | restart it after.
176 |
177 |
178 | Alertblock script
179 | -----------------
180 |
181 | Now that the coin server is trying to run /usr/bin/alertblock, you'll need to
182 | make that Alertblock script.
183 |
184 | Open your text editor of choice and save this to /usr/bin/alertblock
185 |
186 | .. code-block:: bash
187 |
188 | #!/bin/bash
189 | cat $1 | xargs -P 0 -d '\n' -I ARGS bash -c 'a="ARGS"; args=($a); echo "${args[@]:2}" | nc -4u -w0 -q1 ${args[@]:0:2}'
190 | # For testing the command
191 | #cat $1 | xargs -P 0 -td '\n' -I ARGS bash -xc 'a="ARGS"; args=($a); echo "${args[@]:2}" | nc -4u -w0 -q1 ${args[@]:0:2}'
192 |
193 |
194 | Block .push script
195 | ------------------
196 |
197 | Now your Alertblock script will be looking for a
198 | /home/USER/coinserver_push/vertcoin.push file. The data in this file is
199 | interpreted by the Alertblock script. It looks at each line and tries to send
200 | a UDP packet based on the info. The .push file might contain something like
201 | this:
202 |
203 | .. code-block:: bash
204 |
205 | 127.0.0.1 6855 VTC getblocktemplate signal=1 __spawn=1
206 |
207 | Basically, this tells the Alertblock script to send a UDP datagram to 127.0.0.1
208 | on port 6855. PowerPool will parse the datagram and run getblocktemplate
209 | for the currency VTC.
210 |
211 | The port (6855) should be the monitor port for the stratum process you want
212 | to send the notification to. The currency code (VTC) should match one of the
213 | configured currencies in that stratum's config.
214 |
215 | If you need to push to multiple monitor ports just do something like:
216 |
217 | .. code-block:: bash
218 |
219 | 127.0.0.1 6855 VTC getblocktemplate signal=1 __spawn=1
220 | 127.0.0.1 6856 VTC getblocktemplate signal=1 __spawn=1
221 |
222 | For merge mined coins you'll want something slightly different:
223 |
224 | .. code-block:: bash
225 |
226 | 127.0.0.1 6855 DOGE _check_new_jobs signal=1 _single_exec=True __spawn=1
227 |
228 |
229 | Powerpool config
230 | ----------------
231 |
232 | Now we need to update PowerPool's config to not poll, as it is no longer needed,
233 | and makes the coinserver's logs a lot harder to use. All that needs to be done
234 | is set the `poll` key to False for each currency you have push block setup for.
235 |
236 | .. code-block:: python
237 |
238 | VTC:
239 | poll: False
240 | type: powerpool.jobmanagers.MonitorNetwork
241 | algo: scryptn
242 | currency: VTC
243 | etc...
244 |
245 | Confirm it is working
246 | ---------------------
247 |
248 | You'll want to double check push block notifications are actually
249 | working as planned. The easiest way is to visit PowerPool's monitoring endpoint
250 | and look for the `last_signal` key. It should be updated each time PowerPool is
251 | notified of a block via push block.
252 |
253 | =======
254 | License
255 | =======
256 |
257 | BSD
258 |
--------------------------------------------------------------------------------
/bench/share_fuzzer.py:
--------------------------------------------------------------------------------
1 | import gevent
2 | import random
3 | import string
4 |
5 | from powerpool.clients import StratumClients
6 | import logging
7 | logging.getLogger().addHandler(logging.StreamHandler())
8 |
9 |
10 | SEED_CLIENTS = 1000
11 | client_id = 0
12 |
13 |
14 | def rand_str(N):
15 | return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
16 |
17 |
18 | class DummyClient(object):
19 | def __init__(self, address, worker, id):
20 | self.address = address
21 | self.worker = worker
22 | self.id = id
23 |
24 |
25 | class DummyReporter(object):
26 | def add_one_minute(self, address, acc, stamp, worker, dup, low, stale):
27 | print "add one minute"
28 |
29 | def add_shares(self, address, shares):
30 | print "add shares"
31 |
32 | def agent_send(self, address, worker, typ, data, time):
33 | print "agent_send"
34 |
35 | def transmit_block(self, address, worker, height, total_subsidy, fees,
36 | hex_bits, hash, merged):
37 | pass
38 |
39 |
40 | class DummyServer(object):
41 | reporter = DummyReporter()
42 | config = dict(share_batch_interval=10)
43 |
44 | server = DummyServer()
45 | clients = StratumClients(server)
46 | clients.start()
47 |
48 |
49 | def client_sim():
50 | global client_id
51 |
52 | print "client {} starting".format(client_id)
53 | if clients.address_lut.keys() and random.randint(1, 3) == 1:
54 | address = random.choice(clients.address_lut.keys())
55 | print "picking address from already connected users"
56 | else:
57 | address = rand_str(34)
58 |
59 | worker = rand_str(10)
60 | client = DummyClient(address, worker, client_id)
61 | clients[client_id] = client
62 | clients.set_user(client)
63 | client_id += 1
64 | try:
65 | while True:
66 | if 1 == random.randint(1, 100): # diconnect the sim client
67 | break
68 |
69 | if 1 == random.randint(1, 5): # submit a share
70 | clients.add_share(address, worker, 100, 1)
71 |
72 | gevent.sleep(random.uniform(0, 0.3))
73 | #print "iter on client {}".format(client.id)
74 |
75 | finally:
76 | del clients[client.id]
77 | print "client {} closing".format(client.id)
78 |
79 |
80 | def client_maker():
81 | for i in xrange(SEED_CLIENTS):
82 | gevent.spawn(client_sim)
83 |
84 | while True:
85 | gevent.sleep(random.uniform(0.2, 2))
86 | client_sim()
87 |
88 | gevent.joinall([gevent.spawn(client_maker)])
89 |
--------------------------------------------------------------------------------
/cgminer.conf.example:
--------------------------------------------------------------------------------
1 | {
2 | "pools" : [
3 | {
4 | "url" : "stratum+tcp://localhost:8123",
5 | "user" : "DLmW4utjzP7ML8iVyoQQ",
6 | "pass" : "none"
7 | }
8 | ],
9 | "debug" : true,
10 | "intensity" : "1",
11 | "vectors" : "1",
12 | "worksize" : "256",
13 | "kernel" : "scrypt",
14 | "lookup-gap" : "2",
15 | "thread-concurrency" : "8000",
16 | "shaders" : "1792",
17 | "gpu-engine" : "900-1050",
18 | "gpu-fan" : "40-100",
19 | "gpu-memclock" : "1450",
20 | "gpu-powertune" : "20",
21 | "gpu-memdiff" : "0",
22 | "gpu-vddc" : "0.000",
23 | "temp-cutoff" : "90",
24 | "temp-overheat" : "90",
25 | "temp-target" : "75",
26 | "api-mcast-port" : "4028",
27 | "api-port" : "4028",
28 | "expiry" : "120",
29 | "gpu-dyninterval" : "7",
30 | "gpu-platform" : "0",
31 | "gpu-threads" : "2",
32 | "hotplug" : "5",
33 | "log" : "5",
34 | "no-pool-disable" : true,
35 | "queue" : "1",
36 | "scan-time" : "60",
37 | "scrypt" : true,
38 | "temp-hysteresis" : "3",
39 | "shares" : "0",
40 | "kernel-path" : "/usr/local/bin"
41 | }
42 |
--------------------------------------------------------------------------------
/config.example.yml:
--------------------------------------------------------------------------------
1 | # -----------------------------------------
2 | # PowerPool configuration file
3 | # -----------------------------------------
4 | # Each item in the list is a discreet component in powerpool that will get
5 | # created Type specifies the fully qualified python path to the module. A basic
6 | # powerpool server will contain one of each of the following:
7 | # * Jobmanager: creates jobs to be sent to miners and submits solved blocks
8 | # * StratumServer: Accepts stratum connections and sends jobs to users, accepts
9 | # shares, etc
10 | # * Reporter: records submitted shares and solved blocks
11 | # * Manager: Manages logging, starting and stopping the server, and stats
12 | # collection
13 | #
14 | # Optional modules:
15 | # * ServerMonitor: Exposes and HTTP port to view system health and connected
16 | # worker information
17 | #
18 | # Each of these modules has default configurations which can be viewed
19 | # and explained in the defaults.yml file
20 | ---
21 | PP:
22 | type: powerpool.main.PowerPool
23 |
24 | RR:
25 | type: powerpool.reporters.RedisReporter
26 | redis:
27 | db: 15
28 | # Configures special users which will get all the pool shares reported to
29 | # them
30 | pool_report_configs:
31 | - worker_format_string: "{chain}"
32 | user: "pool"
33 | - worker_format_string: "{currency}"
34 | user: "pool_currency"
35 | report_merge: True
36 | - worker_format_string: "{algo}"
37 | user: "pool_algo"
38 |
39 | CR:
40 | type: powerpool.reporters.CeleryReporter
41 | DR:
42 | type: powerpool.reporters.DoubleReporter
43 | reporters:
44 | - RR
45 | - CR
46 |
47 | LTC:
48 | type: powerpool.jobmanagers.MonitorNetwork
49 | merged:
50 | - SYS
51 | algo: scrypt
52 | currency: LTC
53 | pool_address: mri1PEngsRuU6aLKQJ5gGePUdEo76C6DeT
54 | coinservs:
55 | - port: 20001
56 | address: 127.0.0.1
57 | username: admin1
58 | password: 123
59 | poll_priority: 100
60 |
61 | SYS:
62 | type: powerpool.jobmanagers.MonitorAuxNetwork
63 | algo: scrypt
64 | signal: 28
65 | currency: SYS
66 | coinservs:
67 | - port: 19001
68 | address: 127.0.0.1
69 | username: admin1
70 | password: 123
71 | poll_priority: 100
72 |
73 | TEST_STRAT:
74 | type: powerpool.stratum_server.StratumServer
75 | algo: scrypt
76 | jobmanager: LTC
77 | reporter: DR
78 | start_difficulty: 0.1
79 |
80 | MON:
81 | type: powerpool.monitor.ServerMonitor
82 |
--------------------------------------------------------------------------------
/contrib/alert_block.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cat $1 | xargs -P 0 -d '\n' -I ARGS bash -c 'a="ARGS"; args=($a); echo "${args[@]:2}" | nc -4u -w0 -q1 ${args[@]:0:2}'
3 | # For testing the command
4 | #cat $1 | xargs -P 0 -td '\n' -I ARGS bash -xc 'a="ARGS"; args=($a); echo "${args[@]:2}" | nc -4u -w0 -q1 ${args[@]:0:2}'
5 |
6 | # Populate a text file with something like:
7 | # localhost 6855 LTC getblocktemplate signal=1 __spawn=1
8 | # localhost 6856 LTC getblocktemplate signal=1 __spawn=1
9 | # localhost 6857 LTC getblocktemplate signal=1 __spawn=1
10 | # To do push block notification for each of them. File changes take effect instantly
11 |
12 |
--------------------------------------------------------------------------------
/contrib/check_stratum.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import socket
3 | import json
4 | import argparse
5 | import time
6 | import logging
7 |
8 |
9 | def wait_id(fo, target_id):
10 | for i in xrange(10):
11 | ret = json.loads(fo.readline())
12 | if ret['id'] == target_id:
13 | break
14 | else:
15 | raise Exception("No valid return in 10 reads!")
16 | return ret
17 |
18 |
19 | def wait_method(fo, target_method):
20 | for i in xrange(10):
21 | ret = json.loads(fo.readline())
22 | if ret['method'] == target_method:
23 | break
24 | else:
25 | raise Exception("No valid return in 10 reads!")
26 | return ret
27 |
28 |
29 | def main():
30 | start = int(time.time())
31 |
32 | parser = argparse.ArgumentParser(description='Check Stratum')
33 | parser.add_argument('-s', '--server', default='localhost',
34 | help='the remote hostname')
35 | parser.add_argument('-p', '--port', type=int, default=3333,
36 | help='the port to try and connect on')
37 | args = parser.parse_args()
38 |
39 | s = None
40 | for res in socket.getaddrinfo(args.server, args.port, socket.AF_UNSPEC,
41 | socket.SOCK_STREAM):
42 | af, socktype, proto, canonname, sa = res
43 | try:
44 | s = socket.socket(af, socktype, proto)
45 | except socket.error:
46 | s = None
47 | continue
48 |
49 | try:
50 | s.connect(sa)
51 | except socket.error:
52 | s.close()
53 | s = None
54 | continue
55 | break
56 |
57 | if s is None:
58 | print 'could not open socket'
59 | return 2
60 |
61 | f = s.makefile()
62 |
63 | # subscribe
64 | f.write(json.dumps({u'params': [u'stratum_check/0.1'], u'id': 100,
65 | u'method': u'mining.subscribe'}) + "\n")
66 | f.flush()
67 | t = time.time()
68 | ret = wait_id(f, 100)
69 | assert ret['error'] is None
70 | print 'stratum.{}.{}.subscribe {} {}'.format(
71 | args.server, args.port,
72 | (time.time() - t) * 1000,
73 | start)
74 |
75 | # authorize
76 | f.write(json.dumps({u'params': [u'testing', u''], u'id': 200, u'method':
77 | u'mining.authorize'}) + "\n")
78 | f.flush()
79 | t = time.time()
80 | ret = wait_id(f, 200)
81 | assert ret['error'] is None
82 | print 'stratum.{}.{}.authorize {} {}'.format(
83 | args.server, args.port,
84 | (time.time() - t) * 1000,
85 | start)
86 |
87 | ret = wait_method(f, "mining.notify")
88 | print 'stratum.{}.{}.notify {} {}'.format(
89 | args.server, args.port,
90 | (time.time() - t) * 1000,
91 | start)
92 |
93 | # submit a job!
94 | t = time.time()
95 | f.write(json.dumps({u'params': [u'testing', ret['params'][0], u'00000000',
96 | u'545d2122', u'28030000'], u'method':
97 | u'mining.submit', u'id': 300}) + "\n")
98 | f.flush()
99 | ret = wait_id(f, 300)
100 | assert ret['error'][0] is not None
101 | print 'stratum.{}.{}.share_process {} {}'.format(
102 | args.server, args.port,
103 | (time.time() - t) * 1000,
104 | start)
105 |
106 | f.close()
107 | s.close()
108 |
109 |
110 | if __name__ == '__main__':
111 | try:
112 | exit(main())
113 | except Exception:
114 | logging.exception("Unhandled exception!")
115 | exit(2)
116 |
--------------------------------------------------------------------------------
/contrib/production_example.yml:
--------------------------------------------------------------------------------
1 | ---
2 | pp:
3 | type: powerpool.main.PowerPool
4 | procname: powerpool_litecoin
5 | server_number: 31
6 | events:
7 | enabled: True
8 | port: 9050
9 | datagram:
10 | enabled: True
11 |
12 | redis:
13 | type: powerpool.reporters.RedisReporter
14 | pool_report_configs:
15 | - worker_format_string: "{chain}"
16 | user: "pool"
17 | - worker_format_string: "{currency}"
18 | user: "pool_currency"
19 | report_merge: True
20 | attrs:
21 | chain: "LTC"
22 | redis:
23 | port: 6380
24 | chain: 1
25 |
26 | LTC:
27 | poll: False
28 | type: powerpool.jobmanagers.MonitorNetwork
29 | algo: scrypt
30 | currency: LTC
31 | pool_address: LgdMDJhr1A4poib2WBPxdFpmLikcNSihAE
32 | coinservs:
33 | - port: 10080
34 | address: 127.0.0.1
35 | username:
36 | password:
37 | poll_priority: 100
38 | - port: 10081
39 | address: 127.0.0.1
40 | username:
41 | password:
42 | poll_priority: 200
43 | merged:
44 | - DOGE
45 | - SYS
46 | - TCO
47 | - ULTC
48 | - PTC
49 | - GRE
50 |
51 | DOGE:
52 | work_interval: 10
53 | poll: False
54 | type: powerpool.jobmanagers.MonitorAuxNetwork
55 | algo: scrypt
56 | currency: DOGE
57 | flush: True
58 | coinservs:
59 | - port: 10050
60 | address: 127.0.0.1
61 | username:
62 | password:
63 | poll_priority: 100
64 |
65 | GRE:
66 | work_interval: 10
67 | poll: False
68 | type: powerpool.jobmanagers.MonitorAuxNetwork
69 | algo: scrypt
70 | currency: GRE
71 | coinservs:
72 | - port: 10130
73 | address: 127.0.0.1
74 | username:
75 | password:
76 |
77 | SYS:
78 | work_interval: 10
79 | poll: False
80 | type: powerpool.jobmanagers.MonitorAuxNetwork
81 | algo: scrypt
82 | currency: SYS
83 | coinservs:
84 | - port: 10020
85 | address: 127.0.0.1
86 | username: syscoinrpc
87 | password:
88 |
89 | TCO:
90 | work_interval: 10
91 | poll: False
92 | type: powerpool.jobmanagers.MonitorAuxNetwork
93 | algo: scrypt
94 | currency: TCO
95 | coinservs:
96 | - port: 10010
97 | address: 127.0.0.1
98 | username: tacocoinrpc
99 | password:
100 |
101 | ULTC:
102 | work_interval: 10
103 | poll: False
104 | type: powerpool.jobmanagers.MonitorAuxNetwork
105 | algo: scrypt
106 | currency: ULTC
107 | coinservs:
108 | - port: 10040
109 | address: 127.0.0.1
110 | username: umbrella-ltcrpc
111 | password:
112 |
113 | PTC:
114 | work_interval: 10
115 | poll: False
116 | type: powerpool.jobmanagers.MonitorAuxNetwork
117 | algo: scrypt
118 | currency: PTC
119 | coinservs:
120 | - port: 10030
121 | address: 127.0.0.1
122 | username: pesetacoinrpc
123 | password:
124 |
125 | vardiff:
126 | type: powerpool.stratum_server.StratumServer
127 | algo: scrypt
128 | reporter: redis
129 | jobmanager: LTC
130 | start_difficulty: 256
131 | vardiff:
132 | enabled: True
133 | spm_target: 30
134 | tiers:
135 | - 8
136 | - 16
137 | - 32
138 | - 64
139 | - 96
140 | - 128
141 | - 192
142 | - 256
143 | - 512
144 | - 1024
145 | - 2048
146 | - 4096
147 | - 8192
148 | - 16384
149 | - 32768
150 | - 65536
151 | - 131072
152 | agent:
153 | enabled: True
154 |
155 | mon:
156 | type: powerpool.monitor.ServerMonitor
157 |
--------------------------------------------------------------------------------
/contrib/upstart.conf:
--------------------------------------------------------------------------------
1 | start on (filesystem)
2 | stop on runlevel [016]
3 |
4 | instance $TYPE
5 |
6 | respawn
7 | console log
8 | setuid multi
9 | setgid multi
10 | limit nofile 16192 16192
11 |
12 | env PYTHONOPTIMIZE=2
13 |
14 | exec /home/multi/powerpools/$TYPE /home/multi/powerpools/$TYPE.yml
15 |
--------------------------------------------------------------------------------
/defaults.yml:
--------------------------------------------------------------------------------
1 | # ==========================================================================
2 | # DEFAULTS
3 | # Represents default configurations applied to all different component types
4 | # Can be overridden as show in the config.example.yml
5 | #
6 | # NOTE: The settings in this file are not used or parsed, simply informative
7 | # ==========================================================================
8 | - &stat_reporter_defaults
9 | type: powerpool.reporters.StatReporter
10 | # Whether or not the reporter will call log_one_minute as user "pool" for all
11 | # shares submitted to allow pool tracking
12 | report_pool_stats: True
13 | # The worker that pool stats get reported as. Only applies with
14 | # reporter_pool_stats = True
15 | pool_worker: ""
16 |
17 | - &redis_reporter_defaults # Also has all the defaults of StatReporter!
18 | type: powerpool.reporters.RedisReporter
19 |
20 | # Shares can be submitted to redis on independent share "chains" to allow
21 | # cooperatively solving blocks between many payout types. Should be different
22 | # for each different payout type...
23 | chain: 1
24 |
25 | # Used directly to configured redis-py redis instance. Read redis py docs
26 | # for more information
27 | redis: {}
28 |
29 | # The jobmanager is in charge of building jobs for the stratum workers,
30 | # submitting blocks to rpc servers, and pushing new block notifications to
31 | # clients
32 | - &monitor_network_defaults
33 | type: powerpool.jobmanagers.MonitorNetwork
34 |
35 | # The difficulty to start people out when they connect. Will be the
36 | # fixed difficulty if vardiff is disabled
37 | start_difficulty: 1
38 | # a list of connections to daemons to get block templates from
39 | # and submit complete blocks to. Required
40 | coinservs: []
41 |
42 | # Short currency name used by the reporter as needed to properly report
43 | # block submissions. Required
44 | currency:
45 |
46 | # Should the proof of work algorithm be used to produce the block hash, or
47 | # sha256. Most use sha256, but some like Darkcoin use the POW algo
48 | pow_block_hash: True
49 |
50 | # what algorithm should these jobs be hashed with? Passed to reporter on
51 | # submission for recording and checking compatible StratumServer. Required.
52 | algo:
53 |
54 | # This should contain a list of keys for defined AuxNetworkMonitor Components
55 | merged: []
56 |
57 | # address that all blocks will be paid out to. Make sure this is right! Required.
58 | pool_address:
59 |
60 | # should we poll the RPC server for new blocks? True will force polling,
61 | # null will poll if push notifications are disabled, and False will force
62 | # off
63 | poll:
64 |
65 | # the definition of a target of difficulty 1. 4 zeroes for scrypt, 8 for
66 | # sha...
67 | diff1: 0x0000FFFF0000000000000000000000000000000000000000000000000000
68 |
69 | # The number of hashes a single diff1 share takes on average to compute
70 | # 0xFFFFFFFF for sha256 and dark diff, 0xFFFF for scrypt. Used for showing
71 | # hashrate
72 | hashes_per_share: 0xFFFFFFFF
73 |
74 | # time between checking live rpc for new blocks... lower means less orphan
75 | # blocks... Unused if using push block signals
76 | block_poll: 2
77 | # block polls between generating a new job for workers (running gbt, etc)
78 | job_generate_int: 75
79 | # Time between pinging rpc_servers that are down
80 | rpc_ping_int: 2
81 | # Pay out Darkcoin masternodes if True. Blocks may be rejected if False
82 | payout_drk_mn: True
83 | # A blockheight at which to set profitability to 0. Usually used to indicate
84 | # the height at which a coin is going PoS. Default is None
85 | max_blockheight:
86 |
87 | # The HTTP health monitor. Most configs go straight to Flask configuration
88 | - &server_monitor_defaults
89 | type: powerpool.monitor.ServerMonitor
90 |
91 | # Show tracebacks for erroring views. Allow debug view to display (possibly
92 | # shows PASSWORDS!)
93 | DEBUG: false
94 | # Address to bind for accepting HTTP connections. Localhost by default
95 | address: 127.0.0.1
96 | port: 3855
97 |
98 | # This defines default configurations that will be applied to every
99 | # StratumServer configuration
100 | - &stratum_server_defaults
101 | type: powerpool.stratum_server.StratumServer
102 |
103 | address: 0.0.0.0
104 | port: 3333
105 |
106 | # The difficulty to start people out when they connect. Will be the
107 | # fixed difficulty if vardiff is disabled
108 | start_difficulty: 128
109 |
110 | # what algorithm should these jobs be hashed with? Must be an algo
111 | # listed in the stratum manager dictionary. Required.
112 | algo:
113 |
114 | # Configuration that each vardiff enabled interface will use
115 | vardiff:
116 | # whether our this port will be vardiff enabled
117 | enabled: False
118 | # the overal shares per minute we're targeting
119 | spm_target: 20
120 | # time between checks triggered from share submission in seconds
121 | interval: 10
122 | # the available difficulty tiers. Will adjust to one of these
123 | tiers:
124 | - 8
125 | - 16
126 | - 32
127 | - 64
128 | - 96
129 | - 128
130 | - 192
131 | - 256
132 | - 512
133 |
134 | # The minimum allowable user-settable difficulty. Vardiff may still
135 | # adjust lower/higher
136 | minimum_manual_diff: 64
137 |
138 | # time between sending latest job to workers when there is no new block
139 | push_job_interval: 30
140 |
141 | # the agent server allows data collection agents to connect and report
142 | # stats about stratum clients. disabled by default. If enabled an agent
143 | # server will be started to mirror every stratum port add `port_diff`
144 | # higher port number (ie stratum port 3333 will create agent port 4444 by
145 | # default)
146 | agent:
147 | enabled: False
148 | port_diff: 1111
149 | accepted_types:
150 | - temp
151 | - status
152 | - hashrate
153 | - thresholds
154 |
155 | # aliases allow you to automatically translate a friendly username to to a
156 | # predefined address. the donate address is similar, except that any invalid
157 | # address is translated to it
158 | aliases: {}
159 |
160 | # General process management configurations
161 | - &powerpool_defaults
162 | type: powerpool.main.PowerPool
163 |
164 | # The name of the powerpool process on the system. Useful for quickly
165 | # identifying pid with grep and ps
166 | procname: powerpool
167 | # Grace period before outright terminating the process after termination is
168 | # requested
169 | term_timeout: 10
170 |
171 | # Configures standard python loggers. type must be a logging handler provided
172 | # by python std lib
173 | loggers:
174 | - type: StreamHandler
175 | level: NOTSET
176 |
177 | # Can be overridden in any specific Component's logger with log_level attribute
178 | default_component_log_level: INFO
179 |
180 | # A list of modules and hashing algorithms that you'd like to attempt to
181 | # load on startup.
182 | algorithms:
183 | x11: drk_hash.getPoWHash
184 | scrypt: ltc_scrypt.getPoWHash
185 | scryptn: vtc_scrypt.getPoWHash
186 | sha256: cryptokit.sha256d
187 | blake256: blake_hash.getPoWHash
188 |
189 | # server side size extranonce size. synonymous with worker id internally, used
190 | # to give a unique extranonce to each connection
191 | extranonce_serv_size: 8
192 | # size that clients will generate in bytes
193 | extranonce_size: 4
194 |
--------------------------------------------------------------------------------
/dev-requirements.txt:
--------------------------------------------------------------------------------
1 | nose
2 |
--------------------------------------------------------------------------------
/powerpool/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.6.2"
2 | __version_info__ = (0, 6, 2)
3 |
--------------------------------------------------------------------------------
/powerpool/agent_server.py:
--------------------------------------------------------------------------------
1 | import json
2 | import socket
3 |
4 | from time import time
5 | from gevent.queue import Queue
6 | from gevent.pool import Pool
7 | from gevent.server import StreamServer
8 | from gevent import with_timeout
9 |
10 | from .server import GenericClient
11 | from .lib import Component, loop
12 | from .exceptions import LoopExit
13 |
14 |
15 | class AgentServer(Component, StreamServer):
16 | """ The agent server that pairs with a single port binding of a stratum
17 | server. Accepts connections from ppagent and reports more details
18 | statistics. """
19 |
20 | # Don't spawn a greenlet to handle creation of clients, we start one for
21 | # reading and one for writing in their own class...
22 | _spawn = None
23 |
24 | def __init__(self, stratum_server):
25 | self.server = stratum_server
26 | self.config = stratum_server.config
27 |
28 | def start(self, *args, **kwargs):
29 | self.logger = self.server.logger
30 | self.listener = (self.config['address'],
31 | self.config['port'] +
32 | self.config['agent']['port_diff'] +
33 | self.server.manager.config['server_number'])
34 | StreamServer.__init__(self, self.listener, spawn=Pool())
35 | self.logger.info("Agent server starting up on {}".format(self.listener))
36 | StreamServer.start(self, *args, **kwargs)
37 | Component.start(self)
38 |
39 | def stop(self, *args, **kwargs):
40 | self.logger.info("Agent server {} stopping".format(self.listener))
41 | StreamServer.close(self)
42 | for serv in self.server.agent_clients.values():
43 | serv.stop()
44 | Component.stop(self)
45 | self.logger.info("Exit")
46 |
47 | def handle(self, sock, address):
48 | self.logger.info("Recieving agent connection from addr {} on sock {}"
49 | .format(address, sock))
50 | self.server.agent_id_count += 1
51 | client = AgentClient(
52 | sock=sock,
53 | address=address,
54 | id=self.server.agent_id_count,
55 | server=self.server,
56 | config=self.config,
57 | logger=self.logger,
58 | reporter=self.server.reporter)
59 | client.start()
60 |
61 |
62 | class AgentClient(GenericClient):
63 | """ Object representation of a single ppagent agent connected to the server
64 | """
65 |
66 | # Our (very stratum like) protocol errors
67 | errors = {
68 | 20: 'Other/Unknown',
69 | 25: 'Not subscribed',
70 | 30: 'Unkown command',
71 | 31: 'Worker not connected',
72 | 32: 'Already associated',
73 | 33: 'No hello exchanged',
74 | 34: 'Worker not authed',
75 | 35: 'Type not accepted',
76 | 36: 'Invalid format for method',
77 | }
78 |
79 | def __init__(self, sock, address, id, server, config, logger, reporter):
80 | self.logger = logger
81 | self.sock = sock
82 | self.server = server
83 | self.config = config
84 | self.reporter = reporter
85 |
86 | # Seconds before sending keepalive probes
87 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 120)
88 | # Interval in seconds between keepalive probes
89 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1)
90 | # Failed keepalive probles before declaring other end dead
91 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)
92 |
93 | self._disconnected = False
94 | self._authenticated = False
95 | self._client_state = None
96 | self._authed = {}
97 | self._client_version = None
98 | self._connection_time = time()
99 | self._id = id
100 |
101 | # where we put all the messages that need to go out
102 | self.write_queue = Queue()
103 | self.fp = None
104 | self._stopped = False
105 |
106 | @property
107 | def summary(self):
108 | return dict(workers=self._authed,
109 | connection_time=self._connection_time_dt)
110 |
111 | def send_error(self, num=20):
112 | """ Utility for transmitting an error to the client """
113 | err = {'result': None, 'error': (num, self.errors[num], None)}
114 | self.logger.debug("error response: {}".format(err))
115 | self.write_queue.put(json.dumps(err, separators=(',', ':')) + "\n")
116 |
117 | def send_success(self):
118 | """ Utility for transmitting success to the client """
119 | succ = {'result': True, 'error': None}
120 | self.logger.debug("success response: {}".format(succ))
121 | self.write_queue.put(json.dumps(succ, separators=(',', ':')) + "\n")
122 |
123 | @loop(fin='stop', exit_exceptions=(socket.error, ))
124 | def read(self):
125 | if self._disconnected:
126 | self.logger.info("Agent client {} write loop exited, exiting read loop"
127 | .format(self._id))
128 | return
129 |
130 | line = with_timeout(self.config['agent']['timeout'],
131 | self.fp.readline,
132 | timeout_value='timeout')
133 |
134 | # push a new job every timeout seconds if requested
135 | if line == 'timeout':
136 | raise LoopExit("Agent client timeout")
137 |
138 | line = line.strip()
139 |
140 | # Reading from a defunct connection yeilds an EOF character which gets
141 | # stripped off
142 | if not line:
143 | raise LoopExit("Closed file descriptor encountered")
144 |
145 | try:
146 | data = json.loads(line)
147 | except ValueError:
148 | self.logger.info("Data {} not JSON".format(line))
149 | self.send_error()
150 | return
151 |
152 | self.logger.debug("Data {} recieved on client {}".format(data, self._id))
153 |
154 | if 'method' not in data:
155 | self.logger.info("Unkown action for command {}".format(data))
156 | self.send_error()
157 |
158 | meth = data['method'].lower()
159 | if meth == 'hello':
160 | if self._client_version is not None:
161 | self.send_error(32)
162 | return
163 | self._client_version = data.get('params', [0.1])[0]
164 | self.logger.info("Agent {} identified as version {}"
165 | .format(self._id, self._client_version))
166 | elif meth == 'worker.authenticate':
167 | if self._client_version is None:
168 | self.send_error(33)
169 | return
170 | username = data.get('params', [""])[0]
171 | user_worker = self.convert_username(username)
172 | # setup lookup table for easier access from other read sources
173 | self.client_state = self.server.address_worker_lut.get(user_worker)
174 | if not self.client_state:
175 | self.send_error(31)
176 | return
177 |
178 | # here's where we do some top security checking...
179 | self._authed[username] = user_worker
180 | self.send_success()
181 | self.logger.info("Agent {} authenticated worker {}"
182 | .format(self._id, username))
183 | elif meth == "stats.submit":
184 | if self._client_version is None:
185 | self.send_error(33)
186 | return
187 |
188 | if data.get('params', [''])[0] not in self._authed:
189 | self.send_error(34)
190 | return
191 |
192 | if 'params' not in data or len(data['params']) != 4:
193 | self.send_error(36)
194 | return
195 |
196 | user_worker, typ, data, stamp = data['params']
197 | # lookup our authed usernames translated creds
198 | address, worker = self._authed[user_worker]
199 | if typ in self.config['agent']['accepted_types']:
200 | self.reporter.agent_send(address, worker, typ, data, stamp)
201 | self.send_success()
202 | self.logger.info("Agent {} transmitted payload for worker "
203 | "{}.{} of type {} and length {}"
204 | .format(self._id, address, worker, typ, len(line)))
205 | else:
206 | self.send_error(35)
207 |
--------------------------------------------------------------------------------
/powerpool/entry.py:
--------------------------------------------------------------------------------
1 | from powerpool.main import main
2 |
3 |
4 | if __name__ == "__main__":
5 | main()
6 |
--------------------------------------------------------------------------------
/powerpool/exceptions.py:
--------------------------------------------------------------------------------
1 | class ConfigurationError(Exception):
2 | pass
3 |
4 |
5 | class LoopExit(BaseException):
6 | pass
7 |
8 |
9 | class RPCException(Exception):
10 | pass
11 |
--------------------------------------------------------------------------------
/powerpool/jobmanagers/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import NodeMonitorMixin, Jobmanager
2 | from .monitor_aux_network import MonitorAuxNetwork
3 | from .monitor_network import MonitorNetwork
4 |
--------------------------------------------------------------------------------
/powerpool/jobmanagers/base.py:
--------------------------------------------------------------------------------
1 | import urllib3
2 | import time
3 |
4 | from gevent.event import Event
5 | from cryptokit.rpc import CoinRPCException, CoinserverRPC
6 | from urllib3.connection import HTTPConnection
7 |
8 | from ..lib import loop, Component
9 | from ..exceptions import RPCException
10 |
11 |
12 | class Jobmanager(Component):
13 | pass
14 |
15 |
16 | class TimedHTTPConnection(HTTPConnection):
17 | _last_rtt = 0.0
18 | _request_start = 0.0
19 | _connected_since = 0.0
20 |
21 | def connect(self):
22 | self._connected_since = time.time()
23 | return HTTPConnection.connect(self)
24 |
25 | def request(self, *args, **kwargs):
26 | self._request_start = time.time()
27 | return HTTPConnection.request(self, *args, **kwargs)
28 |
29 | def getresponse(self, *args, **kwargs):
30 | ret = HTTPConnection.getresponse(self, *args, **kwargs)
31 | self._last_rtt = time.time() - self._request_start
32 | return ret
33 |
34 | @property
35 | def status(self):
36 | return dict(last_rtt=self._last_rtt,
37 | connected_since=self._connected_since)
38 |
39 |
40 | class WrappedCoinserverRPC(CoinserverRPC):
41 | def __init__(self, *args, **kwargs):
42 | CoinserverRPC.__init__(self, *args, **kwargs)
43 | self._conn.ConnectionCls = TimedHTTPConnection
44 | self.last_getinfo = None
45 | self.name = None
46 |
47 | def status(self):
48 | ret = dict(last_getinfo=self.last_getinfo,
49 | connections=[])
50 | for connection in self._conn.pool.queue:
51 | if connection is None:
52 | continue
53 | ret['connections'].append(connection.status)
54 | return ret
55 |
56 | def getinfo(self, *args, **kwargs):
57 | res = CoinserverRPC.__getattr__(self, "getinfo")
58 | res = res(*args, **kwargs)
59 | self.last_getinfo = res
60 | self.last_getinfo['time'] = time.time()
61 | return res
62 |
63 |
64 | class NodeMonitorMixin(object):
65 | def __init__(self):
66 | self._down_connections = [] # list of RPC conns that are down
67 | self._poll_connection = None # our currently active RPC connection
68 | self._live_connections = [] # list of live RPC connections
69 | self._connected = Event() # An event type status flag
70 |
71 | def _start_monitor_nodes(self):
72 | for serv in self.config['coinservs']:
73 | conn = WrappedCoinserverRPC(
74 | "http://{0}:{1}@{2}:{3}/"
75 | .format(serv['username'],
76 | serv['password'],
77 | serv['address'],
78 | serv['port']),
79 | pool_kwargs=dict(maxsize=serv.get('maxsize', 10)))
80 | conn.config = serv
81 | conn.name = "{}:{}".format(serv['address'], serv['port'])
82 | self._down_connections.append(conn)
83 |
84 | @loop(setup='_start_monitor_nodes', interval='rpc_ping_int')
85 | def _monitor_nodes(self):
86 | remlist = []
87 | for conn in self._down_connections:
88 | try:
89 | conn.getinfo()
90 | except (urllib3.exceptions.HTTPError, CoinRPCException, ValueError):
91 | self.logger.warn("RPC connection {} still down!".format(conn.name))
92 | continue
93 |
94 | self._live_connections.append(conn)
95 | remlist.append(conn)
96 | self.logger.info("Now connected to {} RPC Server {}."
97 | .format(self.config['currency'], conn.name))
98 |
99 | # if this connection has a higher priority than current
100 | if self._poll_connection is not None:
101 | curr_poll = self._poll_connection.config['poll_priority']
102 | if conn.config['poll_priority'] > curr_poll:
103 | self.logger.info("RPC connection {} has higher poll priority than "
104 | "current poll connection, switching..."
105 | .format(conn.name))
106 | self._poll_connection = conn
107 | else:
108 | self._connected.set()
109 | self._poll_connection = conn
110 | self.logger.info("RPC connection {} defaulting poll connection"
111 | .format(conn.name))
112 |
113 | for conn in remlist:
114 | self._down_connections.remove(conn)
115 |
116 | def down_connection(self, conn):
117 | """ Called when a connection goes down. Removes if from the list of
118 | live connections and recomputes a new. """
119 | if not conn:
120 | self.logger.warn("Tried to down a NoneType connection")
121 | return
122 |
123 | if conn in self._live_connections:
124 | self._live_connections.remove(conn)
125 |
126 | if self._poll_connection is conn:
127 | # find the next best poll connection
128 | try:
129 | self._poll_connection = min(self._live_connections,
130 | key=lambda x: x.config['poll_priority'])
131 | except ValueError:
132 | self._poll_connection = None
133 | self._connected.clear()
134 | self.logger.error("No RPC connections available for polling!!!")
135 | else:
136 | self.logger.warn("RPC connection {} switching to poll_connection "
137 | "after {} went down!"
138 | .format(self._poll_connection.name, conn.name))
139 |
140 | if conn not in self._down_connections:
141 | self.logger.info("Server at {} now reporting down".format(conn.name))
142 | self._down_connections.append(conn)
143 |
144 | def call_rpc(self, command, *args, **kwargs):
145 | self._connected.wait()
146 | try:
147 | return getattr(self._poll_connection, command)(*args, **kwargs)
148 | except (urllib3.exceptions.HTTPError, CoinRPCException) as e:
149 | self.logger.warn("Unable to perform {} on RPC server. Got: {}"
150 | .format(command, e))
151 | self.down_connection(self._poll_connection)
152 | raise RPCException(e)
153 |
--------------------------------------------------------------------------------
/powerpool/jobmanagers/monitor_aux_network.py:
--------------------------------------------------------------------------------
1 | import gevent
2 | import socket
3 | import time
4 | import datetime
5 |
6 | from binascii import hexlify
7 | from cryptokit.rpc import CoinRPCException
8 | from collections import deque
9 | from cryptokit.util import pack
10 | from cryptokit.bitcoin import data as bitcoin_data
11 | from gevent import sleep
12 | from gevent.event import Event
13 |
14 | from . import NodeMonitorMixin, Jobmanager
15 | from ..exceptions import RPCException
16 | from ..lib import loop, REQUIRED
17 |
18 |
19 | class MonitorAuxNetwork(Jobmanager, NodeMonitorMixin):
20 | gl_methods = ['_monitor_nodes', '_check_new_jobs']
21 | one_min_stats = ['work_restarts', 'new_jobs']
22 |
23 | defaults = dict(enabled=False,
24 | work_interval=1,
25 | signal=None,
26 | rpc_ping_int=2,
27 | algo=REQUIRED,
28 | currency=REQUIRED,
29 | coinservs=REQUIRED,
30 | flush=False,
31 | send=True)
32 |
33 | def __init__(self, config):
34 | self._configure(config)
35 | NodeMonitorMixin.__init__(self)
36 |
37 | self.new_job = Event()
38 | self.last_signal = 0.0
39 | self.last_work = {'hash': None}
40 | self.block_stats = dict(accepts=0,
41 | rejects=0,
42 | stale=0,
43 | solves=0,
44 | last_solve_height=None,
45 | last_solve_time=None,
46 | last_solve_worker=None)
47 | self.current_net = dict(difficulty=None, height=None, last_block=0.0)
48 | self.recent_blocks = deque(maxlen=15)
49 |
50 | def start(self):
51 | super(MonitorAuxNetwork, self).start()
52 | if self.config['signal']:
53 | self.logger.info("Listening for push block notifs on signal {}"
54 | .format(self.config['signal']))
55 | gevent.signal(self.config['signal'],
56 | self._check_new_jobs,
57 | signal=True,
58 | _single_exec=True)
59 |
60 | def found_block(self, address, worker, header, coinbase_raw, job, start):
61 | aux_data = job.merged_data[self.config['currency']]
62 | new_height = aux_data['height'] + 1
63 | self.block_stats['solves'] += 1
64 | stale = new_height <= self.current_net['height']
65 |
66 | self.logger.info("New {} Aux block at height {}"
67 | .format(self.config['currency'], new_height))
68 | aux_block = (
69 | pack.IntType(256, 'big').pack(aux_data['hash']).encode('hex'),
70 | bitcoin_data.aux_pow_type.pack(dict(
71 | merkle_tx=dict(
72 | tx=bitcoin_data.tx_type.unpack(coinbase_raw),
73 | block_hash=bitcoin_data.hash256(header),
74 | merkle_link=job.merkle_link,
75 | ),
76 | merkle_link=bitcoin_data.calculate_merkle_link(aux_data['hashes'],
77 | aux_data['index']),
78 | parent_block_header=bitcoin_data.block_header_type.unpack(header),
79 | )).encode('hex'),
80 | )
81 |
82 | retries = 0
83 | while retries < 5:
84 | retries += 1
85 | res = False
86 | try:
87 | res = self.call_rpc('getauxblock', *aux_block)
88 | except (CoinRPCException, socket.error, ValueError) as e:
89 | self.logger.error("{} Aux block failed to submit to the server!"
90 | .format(self.config['currency']), exc_info=True)
91 | self.logger.error(getattr(e, 'error'))
92 |
93 | if res is True:
94 | # Record it for the stats
95 | self.block_stats['accepts'] += 1
96 | self.recent_blocks.append(
97 | dict(height=new_height, timestamp=int(time.time())))
98 |
99 | # submit it to our reporter if configured to do so
100 | if self.config['send']:
101 | if start:
102 | submission_time = time.time() - start
103 | self.manager.log_event(
104 | "{name}.block_submission_{curr}:{t}|ms"
105 | .format(name=self.manager.config['procname'],
106 | curr=self.config['currency'],
107 | t=submission_time * 1000))
108 | hsh = hexlify(pack.IntType(256, 'big').pack(aux_data['hash']))
109 | self.logger.info(
110 | "{} BLOCK {}:{} accepted after {}"
111 | .format(self.config['currency'], hsh, new_height,
112 | submission_time))
113 |
114 | # A bit of a mess that grabs the required information for
115 | # reporting the new block. Pretty failsafe so at least
116 | # partial information will be reporter regardless
117 | block = None
118 | amount = 0
119 | try:
120 | block = self.call_rpc('getblock', hsh)
121 | except Exception:
122 | self.logger.info("", exc_info=True)
123 | else:
124 | try:
125 | trans = self.call_rpc('gettxout', block['tx'][0], 0)
126 | amount = trans['value']
127 | except Exception:
128 | self.logger.info("", exc_info=True)
129 |
130 | self.block_stats['last_solve_hash'] = hsh
131 | return dict(address=address,
132 | height=new_height,
133 | total_subsidy=int(amount * 100000000),
134 | fees=None,
135 | hex_bits="%0.6X" % bitcoin_data.FloatingInteger.from_target_upper_bound(aux_data['target']).bits,
136 | hex_hash=hsh,
137 | currency=self.config['currency'],
138 | merged=True,
139 | algo=self.config['algo'],
140 | worker=worker)
141 |
142 | break # break retry loop if success
143 | else:
144 | self.logger.error(
145 | "{} Aux Block failed to submit to the server, "
146 | "server returned {}!".format(self.config['currency'], res),
147 | exc_info=True)
148 | sleep(1)
149 | else:
150 | if stale:
151 | self.block_stats['stale'] += 1
152 | else:
153 | self.block_stats['rejects'] += 1
154 |
155 | self.block_stats['last_solve_height'] = aux_data['height'] + 1
156 | self.block_stats['last_solve_worker'] = "{}.{}".format(address, worker)
157 | self.block_stats['last_solve_time'] = datetime.datetime.utcnow()
158 |
159 | @loop(interval='work_interval')
160 | def _check_new_jobs(self, signal=False):
161 | if signal:
162 | self.last_signal = time.time()
163 | self.logger.info("Updating {} aux work from a signal recieved!"
164 | .format(self.config['currency']))
165 |
166 | try:
167 | auxblock = self.call_rpc('getauxblock')
168 | except RPCException:
169 | sleep(2)
170 | return False
171 |
172 | hash = int(auxblock['hash'], 16)
173 | if hash != self.last_work['hash']:
174 | # We fetch the block height so we can see if the hash changed
175 | # because of a new network block, or because new transactions
176 | try:
177 | height = self.call_rpc('getblockcount')
178 | except RPCException:
179 | sleep(2)
180 | return False
181 |
182 | target_int = pack.IntType(256).unpack(auxblock['target'].decode('hex'))
183 | self.last_work.update(dict(
184 | hash=hash,
185 | target=target_int,
186 | type=self.config['currency'],
187 | height=height,
188 | found_block=self.found_block,
189 | chainid=auxblock['chainid']
190 | ))
191 |
192 | # only push the job if there's a new block height discovered.
193 | new_block = False
194 | if self.current_net['height'] != height:
195 | self.current_net['height'] = height
196 | self._incr("work_restarts")
197 | self._incr("new_jobs")
198 | self.new_job.flush = self.config['flush']
199 | new_block = True
200 | else:
201 | self._incr("new_jobs")
202 | self.new_job.flush = False
203 | self.new_job.set()
204 | self.new_job.clear()
205 |
206 | if new_block:
207 | self.current_net['last_block'] = time.time()
208 | self.current_net['difficulty'] = bitcoin_data.target_to_difficulty(target_int)
209 | self.logger.info("New aux block announced! Diff {:,.4f}. Height {:,}"
210 | .format(self.current_net['difficulty'], height))
211 |
212 | return True
213 |
214 | @property
215 | def status(self):
216 | ret = dict(block_stats=self.block_stats,
217 | currency=self.config['currency'],
218 | last_work=self.last_work,
219 | last_signal=self.last_signal,
220 | live_coinservers=len(self._live_connections),
221 | down_coinservers=len(self._down_connections),
222 | coinservers={},
223 | current_net=self.current_net)
224 | for connection in self._live_connections:
225 | st = connection.status()
226 | st['status'] = 'live'
227 | ret['coinservers'][connection.name] = st
228 | for connection in self._down_connections:
229 | st = connection.status()
230 | st['status'] = 'down'
231 | ret['coinservers'][connection.name] = st
232 | return ret
233 |
--------------------------------------------------------------------------------
/powerpool/jobmanagers/monitor_network.py:
--------------------------------------------------------------------------------
1 | import struct
2 | import gevent
3 | import socket
4 | import time
5 | import datetime
6 |
7 | from binascii import unhexlify, hexlify
8 | from collections import deque
9 | from cryptokit import bits_to_difficulty
10 | from cryptokit.rpc import CoinRPCException
11 | from cryptokit.transaction import Transaction, Input, Output
12 | from cryptokit.block import BlockTemplate
13 | from cryptokit.bitcoin import data as bitcoin_data
14 | from cryptokit.base58 import get_bcaddress_version
15 | from gevent import sleep, spawn
16 | from gevent.event import Event
17 |
18 | from . import NodeMonitorMixin, Jobmanager
19 | from ..lib import loop, REQUIRED
20 | from ..exceptions import ConfigurationError, RPCException
21 |
22 |
23 | class MonitorNetwork(Jobmanager, NodeMonitorMixin):
24 | one_min_stats = ['work_restarts', 'new_jobs', 'work_pushes']
25 | defaults = config = dict(coinservs=REQUIRED,
26 | diff1=0x0000FFFF00000000000000000000000000000000000000000000000000000000,
27 | hashes_per_share=0xFFFF,
28 | merged=tuple(),
29 | block_poll=0.2,
30 | job_refresh=15,
31 | rpc_ping_int=2,
32 | pow_block_hash=False,
33 | poll=None,
34 | currency=REQUIRED,
35 | algo=REQUIRED,
36 | pool_address='',
37 | signal=None,
38 | payout_drk_mn=True,
39 | max_blockheight=None)
40 |
41 | def __init__(self, config):
42 | NodeMonitorMixin.__init__(self)
43 | self._configure(config)
44 | if get_bcaddress_version(self.config['pool_address']) is None:
45 | raise ConfigurationError("No valid pool address configured! Exiting.")
46 |
47 | # Since some MonitorNetwork objs are polling and some aren't....
48 | self.gl_methods = ['_monitor_nodes', '_check_new_jobs']
49 |
50 | # Aux network monitors (merged mining)
51 | self.auxmons = []
52 |
53 | # internal vars
54 | self._last_gbt = {}
55 | self._job_counter = 0 # a unique job ID counter
56 |
57 | # Currently active jobs keyed by their unique ID
58 | self.jobs = {}
59 | self.latest_job = None # The last job that was generated
60 | self.new_job = Event()
61 | self.last_signal = 0.0
62 |
63 | # general current network stats
64 | self.current_net = dict(difficulty=None,
65 | height=None,
66 | last_block=0.0,
67 | prev_hash=None,
68 | transactions=None,
69 | subsidy=None)
70 | self.block_stats = dict(accepts=0,
71 | rejects=0,
72 | solves=0,
73 | last_solve_height=None,
74 | last_solve_time=None,
75 | last_solve_worker=None)
76 | self.recent_blocks = deque(maxlen=15)
77 |
78 | # Run the looping height poller if we aren't getting push notifications
79 | if (not self.config['signal'] and self.config['poll'] is None) or self.config['poll']:
80 | self.gl_methods.append('_poll_height')
81 |
82 | @property
83 | def status(self):
84 | """ For display in the http monitor """
85 | ret = dict(net_state=self.current_net,
86 | block_stats=self.block_stats,
87 | last_signal=self.last_signal,
88 | currency=self.config['currency'],
89 | live_coinservers=len(self._live_connections),
90 | down_coinservers=len(self._down_connections),
91 | coinservers={},
92 | job_count=len(self.jobs))
93 | for connection in self._live_connections:
94 | st = connection.status()
95 | st['status'] = 'live'
96 | ret['coinservers'][connection.name] = st
97 | for connection in self._down_connections:
98 | st = connection.status()
99 | st['status'] = 'down'
100 | ret['coinservers'][connection.name] = st
101 | return ret
102 |
103 | def start(self):
104 | Jobmanager.start(self)
105 |
106 | if self.config['signal']:
107 | self.logger.info("Listening for push block notifs on signal {}"
108 | .format(self.config['signal']))
109 | gevent.signal(self.config['signal'], self.getblocktemplate, signal=True)
110 |
111 | # Find desired auxmonitors
112 | self.config['merged'] = set(self.config['merged'])
113 | found_merged = set()
114 |
115 | for mon in self.manager.component_types['Jobmanager']:
116 | if mon.key in self.config['merged']:
117 | self.auxmons.append(mon)
118 | found_merged.add(mon.key)
119 | mon.new_job.rawlink(self.new_merged_work)
120 |
121 | for monitor in self.config['merged'] - found_merged:
122 | self.logger.error("Unable to locate Auxmonitor(s) '{}'".format(monitor))
123 |
124 | def found_block(self, raw_coinbase, address, worker, hash_hex, header, job, start):
125 | """ Submit a valid block (hopefully!) to the RPC servers """
126 | block = hexlify(job.submit_serial(header, raw_coinbase=raw_coinbase))
127 | result = {}
128 |
129 | def record_outcome(success):
130 | # If we've already recorded a result, then return
131 | if result:
132 | return
133 |
134 | if start:
135 | submission_time = time.time() - start
136 | self.logger.info(
137 | "Recording block submission outcome {} after {}"
138 | .format(success, submission_time))
139 | if success:
140 | self.manager.log_event(
141 | "{name}.block_submission_{curr}:{t}|ms"
142 | .format(name=self.manager.config['procname'],
143 | curr=self.config['currency'],
144 | t=submission_time * 1000))
145 |
146 | if success:
147 | self.block_stats['accepts'] += 1
148 | self.recent_blocks.append(
149 | dict(height=job.block_height, timestamp=int(time.time())))
150 | else:
151 | self.block_stats['rejects'] += 1
152 | self.logger.info("{} BLOCK {}:{} REJECTED"
153 | .format(self.config['currency'], hash_hex,
154 | job.block_height))
155 |
156 | result.update(dict(
157 | address=address,
158 | height=job.block_height,
159 | total_subsidy=job.total_value,
160 | fees=job.fee_total,
161 | hex_bits=hexlify(job.bits),
162 | hex_hash=hash_hex,
163 | worker=worker,
164 | algo=job.algo,
165 | merged=False,
166 | success=success,
167 | currency=self.config['currency']
168 | ))
169 |
170 | def submit_block(conn):
171 | retries = 0
172 | while retries < 5:
173 | retries += 1
174 | res = "failed"
175 | try:
176 | res = conn.submitblock(block)
177 | except (CoinRPCException, socket.error, ValueError) as e:
178 | self.logger.info("Block failed to submit to the server {} with submitblock! {}"
179 | .format(conn.name, e))
180 | if getattr(e, 'error', {}).get('code', 0) != -8:
181 | self.logger.error(getattr(e, 'error'), exc_info=True)
182 | try:
183 | res = conn.getblocktemplate({'mode': 'submit', 'data': block})
184 | except (CoinRPCException, socket.error, ValueError) as e:
185 | self.logger.error("Block failed to submit to the server {}!"
186 | .format(conn.name), exc_info=True)
187 | self.logger.error(getattr(e, 'error'))
188 |
189 | if res is None:
190 | self.logger.info("{} BLOCK {}:{} accepted by {}"
191 | .format(self.config['currency'], hash_hex,
192 | job.block_height, conn.name))
193 | record_outcome(True)
194 | break # break retry loop if success
195 | else:
196 | self.logger.error(
197 | "Block failed to submit to the server {}, "
198 | "server returned {}!".format(conn.name, res),
199 | exc_info=True)
200 | sleep(1)
201 | self.logger.info("Retry {} for connection {}".format(retries, conn.name))
202 |
203 | for tries in xrange(200):
204 | if not self._live_connections:
205 | self.logger.error("No live connections to submit new block to!"
206 | " Retry {} / 200.".format(tries))
207 | sleep(0.1)
208 | continue
209 |
210 | gl = []
211 | for conn in self._live_connections:
212 | # spawn a new greenlet for each submission to do them all async.
213 | # lower orphan chance
214 | gl.append(spawn(submit_block, conn))
215 |
216 | gevent.joinall(gl)
217 | # If none of the submission threads were successfull then record a
218 | # failure
219 | if not result:
220 | record_outcome(False)
221 | break
222 |
223 | self.logger.log(35, "Valid network block identified!")
224 | self.logger.info("New block at height {} with hash {} and subsidy {}"
225 | .format(job.block_height,
226 | hash_hex,
227 | job.total_value))
228 |
229 | self.block_stats['solves'] += 1
230 | self.block_stats['last_solve_hash'] = hash_hex
231 | self.block_stats['last_solve_height'] = job.block_height
232 | self.block_stats['last_solve_worker'] = "{}.{}".format(address, worker)
233 | self.block_stats['last_solve_time'] = datetime.datetime.utcnow()
234 |
235 | if __debug__:
236 | self.logger.debug("New block hex dump:\n{}".format(block))
237 | self.logger.debug("Coinbase: {}".format(str(job.coinbase.to_dict())))
238 | for trans in job.transactions:
239 | self.logger.debug(str(trans.to_dict()))
240 |
241 | # Pass back all the results to the reporter who's waiting
242 | return result
243 |
244 | @loop(interval='block_poll')
245 | def _poll_height(self):
246 | try:
247 | height = self.call_rpc('getblockcount')
248 | except RPCException:
249 | return
250 |
251 | if self.current_net['height'] != height:
252 | self.logger.info("New block on main network detected with polling")
253 | self.current_net['height'] = height
254 | self.getblocktemplate(new_block=True)
255 |
256 | @loop(interval='job_refresh')
257 | def _check_new_jobs(self):
258 | self.getblocktemplate()
259 |
260 | def getblocktemplate(self, new_block=False, signal=False):
261 | if signal:
262 | self.last_signal = time.time()
263 | try:
264 | # request local memory pool and load it in
265 | bt = self.call_rpc('getblocktemplate',
266 | {'capabilities': [
267 | 'coinbasevalue',
268 | 'coinbase/append',
269 | 'coinbase',
270 | 'generation',
271 | 'time',
272 | 'transactions/remove',
273 | 'prevblock',
274 | ]})
275 | except RPCException:
276 | return False
277 |
278 | if self._last_gbt.get('height') != bt['height']:
279 | new_block = True
280 | # If this was from a push signal and the
281 | if signal and new_block:
282 | self.logger.info("Push block signal notified us of a new block!")
283 | elif signal:
284 | self.logger.info("Push block signal notified us of a block we "
285 | "already know about!")
286 | return
287 |
288 | # generate a new job if we got some new work!
289 | dirty = False
290 | if bt != self._last_gbt:
291 | self._last_gbt = bt
292 | self._last_gbt['update_time'] = time.time()
293 | dirty = True
294 |
295 | if new_block or dirty:
296 | # generate a new job and push it if there's a new block on the
297 | # network
298 | self.generate_job(push=new_block, flush=new_block, new_block=new_block)
299 |
300 | def new_merged_work(self, event):
301 | self.generate_job(push=True, flush=event.flush, network='aux')
302 |
303 | def generate_job(self, push=False, flush=False, new_block=False, network='main'):
304 | """ Creates a new job for miners to work on. Push will trigger an
305 | event that sends new work but doesn't force a restart. If flush is
306 | true a job restart will be triggered. """
307 |
308 | # aux monitors will often call this early when not needed at startup
309 | if not self._last_gbt:
310 | self.logger.warn("Cannot generate new job, missing last GBT info")
311 | return
312 |
313 | if self.auxmons:
314 | merged_work = {}
315 | auxdata = {}
316 | for auxmon in self.auxmons:
317 | if auxmon.last_work['hash'] is None:
318 | continue
319 | merged_work[auxmon.last_work['chainid']] = dict(
320 | hash=auxmon.last_work['hash'],
321 | target=auxmon.last_work['type']
322 | )
323 |
324 | tree, size = bitcoin_data.make_auxpow_tree(merged_work)
325 | mm_hashes = [merged_work.get(tree.get(i), dict(hash=0))['hash']
326 | for i in xrange(size)]
327 | mm_data = '\xfa\xbemm'
328 | mm_data += bitcoin_data.aux_pow_coinbase_type.pack(dict(
329 | merkle_root=bitcoin_data.merkle_hash(mm_hashes),
330 | size=size,
331 | nonce=0,
332 | ))
333 |
334 | for auxmon in self.auxmons:
335 | if auxmon.last_work['hash'] is None:
336 | continue
337 | data = dict(target=auxmon.last_work['target'],
338 | hash=auxmon.last_work['hash'],
339 | height=auxmon.last_work['height'],
340 | found_block=auxmon.found_block,
341 | index=mm_hashes.index(auxmon.last_work['hash']),
342 | type=auxmon.last_work['type'],
343 | hashes=mm_hashes)
344 | auxdata[auxmon.config['currency']] = data
345 | else:
346 | auxdata = {}
347 | mm_data = None
348 |
349 | # here we recalculate the current merkle branch and partial
350 | # coinbases for passing to the mining clients
351 | coinbase = Transaction()
352 | coinbase.version = 2
353 | # create a coinbase input with encoded height and padding for the
354 | # extranonces so script length is accurate
355 | extranonce_length = (self.manager.config['extranonce_size'] +
356 | self.manager.config['extranonce_serv_size'])
357 | coinbase.inputs.append(
358 | Input.coinbase(self._last_gbt['height'],
359 | addtl_push=[mm_data] if mm_data else [],
360 | extra_script_sig=b'\0' * extranonce_length))
361 |
362 | # Payout Darkcoin masternodes
363 | mn_enforcement = self._last_gbt.get('enforce_masternode_payments', True)
364 | if (self.config['payout_drk_mn'] is True or mn_enforcement is True) \
365 | and self._last_gbt.get('payee', '') != '':
366 | # Grab the darkcoin payout amount, default to 20%
367 | payout = self._last_gbt.get('payee_amount', self._last_gbt['coinbasevalue'] / 5)
368 | self._last_gbt['coinbasevalue'] -= payout
369 | coinbase.outputs.append(
370 | Output.to_address(payout, self._last_gbt['payee']))
371 | self.logger.info("Paying out masternode at addr {}. Payout {}. Blockval reduced to {}"
372 | .format(self._last_gbt['payee'], payout, self._last_gbt['coinbasevalue']))
373 |
374 | # simple output to the proper address and value
375 | coinbase.outputs.append(
376 | Output.to_address(self._last_gbt['coinbasevalue'], self.config['pool_address']))
377 |
378 | job_id = hexlify(struct.pack(str("I"), self._job_counter))
379 | bt_obj = BlockTemplate.from_gbt(self._last_gbt,
380 | coinbase,
381 | extranonce_length,
382 | [Transaction(unhexlify(t['data']), fees=t['fee'])
383 | for t in self._last_gbt['transactions']])
384 | # add in our merged mining data
385 | if mm_data:
386 | hashes = [bitcoin_data.hash256(tx.raw) for tx in bt_obj.transactions]
387 | bt_obj.merkle_link = bitcoin_data.calculate_merkle_link([None] + hashes, 0)
388 | bt_obj.merged_data = auxdata
389 | bt_obj.job_id = job_id
390 | bt_obj.diff1 = self.config['diff1']
391 | bt_obj.algo = self.config['algo']
392 | bt_obj.currency = self.config['currency']
393 | bt_obj.pow_block_hash = self.config['pow_block_hash']
394 | bt_obj.block_height = self._last_gbt['height']
395 | bt_obj.acc_shares = set()
396 | bt_obj.flush = flush
397 | bt_obj.found_block = self.found_block
398 |
399 | # Push the fresh job to users after updating details
400 | self._job_counter += 1
401 | if flush:
402 | self.jobs.clear()
403 | self.jobs[job_id] = bt_obj
404 | self.latest_job = bt_obj
405 | if push or flush:
406 | self.new_job.job = bt_obj
407 | self.new_job.set()
408 | self.new_job.clear()
409 |
410 | self.logger.info("{}: New block template with {:,} trans. "
411 | "Diff {:,.4f}. Subsidy {:,.2f}. Height {:,}. "
412 | "Merged: {}"
413 | .format("FLUSH" if flush else "PUSH",
414 | len(self._last_gbt['transactions']),
415 | bits_to_difficulty(self._last_gbt['bits']),
416 | self._last_gbt['coinbasevalue'] / 100000000.0,
417 | self._last_gbt['height'],
418 | ', '.join(auxdata.keys())))
419 |
420 | # Stats and notifications now that it's pushed
421 | if flush:
422 | self._incr('work_restarts')
423 | self._incr('work_pushes')
424 | self.logger.info("New {} network block announced! Wiping previous"
425 | " jobs and pushing".format(network))
426 | elif push:
427 | self.logger.info("New {} network block announced, pushing new job!"
428 | .format(network))
429 | self._incr('work_pushes')
430 |
431 | if new_block:
432 | hex_bits = hexlify(bt_obj.bits)
433 | self.current_net['difficulty'] = bits_to_difficulty(hex_bits)
434 | self.current_net['subsidy'] = bt_obj.total_value
435 | self.current_net['height'] = bt_obj.block_height - 1
436 | self.current_net['last_block'] = time.time()
437 | self.current_net['prev_hash'] = bt_obj.hashprev_be_hex
438 | self.current_net['transactions'] = len(bt_obj.transactions)
439 |
440 | self.manager.log_event(
441 | "{name}.{curr}.difficulty:{diff}|g\n"
442 | "{name}.{curr}.subsidy:{subsidy}|g\n"
443 | "{name}.{curr}.job_generate:{t}|g\n"
444 | "{name}.{curr}.height:{height}|g"
445 | .format(name=self.manager.config['procname'],
446 | curr=self.config['currency'],
447 | diff=self.current_net['difficulty'],
448 | subsidy=bt_obj.total_value,
449 | height=bt_obj.block_height - 1,
450 | t=(time.time() - self._last_gbt['update_time']) * 1000))
451 | self._incr('new_jobs')
452 |
--------------------------------------------------------------------------------
/powerpool/lib.py:
--------------------------------------------------------------------------------
1 | import time
2 | import logging
3 |
4 | from copy import deepcopy
5 | from collections import deque
6 | from gevent import sleep, spawn
7 | from functools import wraps
8 |
9 | from .utils import recursive_update
10 | from .exceptions import ConfigurationError
11 |
12 |
13 | # A sufficiently random number to not collide with real default requirement values
14 | REQUIRED = 2345987234589723495872345
15 | manager = None
16 |
17 |
18 | def loop(interval=None, precise=False, fin=None, exit_exceptions=None, setup=None, backoff=1):
19 | """ Causes the function to loop infinitely at the specified interval.
20 |
21 | Precise allows timing to follow the desired interval as closely as
22 | possible. For example, we might desire a function to execute as close to
23 | 1 second after the minute. Simply sleeping for 60 seconds every iteration
24 | doesn't take into account the execution time of the loop and inaccuracy of
25 | the sleep function, so we adjust the amount we sleep to meet our desired
26 | time. Using an example ficticious execution log...
27 | t = 0.0
28 | ... loop operations happen ...
29 | t = 0.3
30 | sleep(10)
31 | t = 10.3
32 | ... loop operations happen ...
33 | t = 10.6
34 | sleep(10)
35 |
36 | With a precise setting of 10 and a interval of 10.
37 | t = 0.00
38 | ... loop operation happens ...
39 | t = 0.30
40 | sleep((t // 10) * 10 + 10) # 9.7
41 | t = 10.01
42 | ... loop operation happens ...
43 | t = 10.31
44 | sleep((t // 10) * 10 + 10) # 9.69
45 |
46 | And our desired interval of every ten seconds is maintained with good accuracy.
47 | Setting interval larger than precise allows us to execute some amount of
48 | time after a certain period, for example with an interval of 61 and a
49 | precise of 60 we will execute as close to 1 second after the minute as
50 | possible. Precise cannot be lower than interval, otherwise negative sleep
51 | values will always be generated...
52 |
53 | Exit exceptions are exceptions that will cause the loop to end. By default
54 | all exceptions of subclass Exception are absorbed and the loop is continued.
55 |
56 | `fin` should define either a class method name (as a string) or a
57 | callable that will be called with either an exception matching
58 | `exit_exceptions` instance or None upon no exception exit.
59 |
60 | `setup` should define either a class method name (as a string) or a
61 | callable that will be called on loop entry.
62 | """
63 | def loop_deco(f):
64 | @wraps(f)
65 | def wrapper(self, *args, **kwargs):
66 | if kwargs.pop('_single_exec', False):
67 | return f(self, *args, **kwargs)
68 | if isinstance(interval, basestring):
69 | interval_val = self.config[interval]
70 | else:
71 | interval_val = interval
72 | if precise and not interval:
73 | raise ValueError("Cannot perform precise timing without an interval")
74 | if precise is True:
75 | precise_val = interval_val
76 | else:
77 | precise_val = precise
78 |
79 | # Make class methods properly bounded
80 | if isinstance(fin, basestring):
81 | fin_func = getattr(self, fin)
82 | else:
83 | fin_func = fin
84 | if isinstance(setup, basestring):
85 | setup_func = getattr(self, setup)
86 | else:
87 | setup_func = setup
88 |
89 | if setup_func:
90 | setup_func()
91 |
92 | res = None
93 | exit_exc = None
94 | try:
95 | while True:
96 | try:
97 | res = f(self, *args, **kwargs)
98 | except Exception as e:
99 | if exit_exceptions and isinstance(e, exit_exceptions):
100 | exit_exc = e
101 | return
102 | sleep(backoff)
103 | self.logger.error(
104 | "Unhandled error in {}".format(f.__name__),
105 | exc_info=True)
106 | continue
107 |
108 | if res is False:
109 | continue
110 |
111 | if precise:
112 | # Integer computation is about twice as fast as float,
113 | # and we don't need the precision of floating point
114 | # anywhere...
115 | now = int(time.time())
116 | sleep(((now // precise_val) * precise_val) +
117 | interval_val - now)
118 | elif interval:
119 | sleep(interval_val)
120 |
121 | # Catch even system exit calls exceptions so we can pass them to
122 | # the finally function.
123 | except BaseException as e:
124 | exit_exc = e
125 | finally:
126 | if fin_func:
127 | return fin_func(exit_exc=exit_exc, caller=f)
128 | elif exit_exc is not None:
129 | raise exit_exc
130 |
131 | return wrapper
132 | return loop_deco
133 |
134 |
135 | class Component(object):
136 | """ Abstract base class documenting the component architecture expectations
137 | """
138 | # Provides default configuration values. To make a configuration key required
139 | # simply make the value = REQUIRED
140 | defaults = dict()
141 | key = None
142 | # A list of class methods that are independent greenlets. These will
143 | # automatically get started and stopped at appropriate times.
144 | gl_methods = []
145 | one_min_stats = []
146 | one_sec_stats = []
147 | dependencies = {}
148 |
149 | @property
150 | def name(self):
151 | return "{}_{}".format(self.__class__.__name__, self.key)
152 |
153 | def _configure(self, config):
154 | """ Applies defaults and checks requirements of component configuration
155 | """
156 | # Apply defaults
157 | self.config = deepcopy(self.defaults)
158 | # Override defaults with provided config information
159 | recursive_update(self.config, config)
160 | for key, value in self.config.iteritems():
161 | if value == REQUIRED:
162 | raise ConfigurationError(
163 | "Key {} is a required configuration value for "
164 | "component {}".format(key, self.__class__.__name__))
165 |
166 | if ('log_level' in self.config and
167 | self.config['log_level'] not in ['DEBUG', 'INFO', 'WARN', 'ERROR']):
168 | raise ConfigurationError("Invalid logging level specified")
169 | self.key = self.config.get('key')
170 |
171 | def __getitem__(self, key):
172 | """ Easy access to configuration values! """
173 | return self.config[key]
174 |
175 | def start(self):
176 | """ Called when the application is starting. """
177 | log_level = self.config.get('log_level')
178 | if log_level:
179 | self.logger.setLevel(getattr(logging, log_level))
180 | self.logger.info("Component {} starting up".format(self.name))
181 | self.greenlets = {}
182 | for method in self.gl_methods:
183 | gl = spawn(getattr(self, method))
184 | self.logger.info("Starting greenlet {}".format(method))
185 | self.greenlets[method] = gl
186 |
187 | def stop(self):
188 | """ Called when the application is trying to exit. Should not block.
189 | """
190 | self.logger.info("Component {} stopping".format(self.name))
191 | for method, gl in self.greenlets.iteritems():
192 | self.logger.info("Stopping greenlet {}".format(method))
193 | gl.kill(block=False)
194 |
195 | @property
196 | def status(self):
197 | """ Should return a json convertable data structure to be shown in the
198 | web interface. """
199 | return dict()
200 |
201 | def update_config(self, updated_config):
202 | """ A call performed when the configuration file gets reloaded at
203 | runtime. self.raw_config will have bee pre-populated by the manager
204 | before call is made.
205 |
206 | Since configuration values of certain components can't be reloaded at
207 | runtime it's good practice to log a warning when a change is detected
208 | but can't be implemented. """
209 | pass
210 |
211 | def _incr(self, counter, amount=1):
212 | self.counters[counter].incr(amount)
213 |
214 | def _lookup(self, key):
215 | try:
216 | return self.manager.components[key]
217 | except KeyError:
218 | raise ConfigurationError("Cannot find component {}"
219 | .format(key))
220 |
221 |
222 | class SecondStatManager(object):
223 | """ Monitors the last 60 minutes of a specific number at 1 minute precision
224 | and the last 1 minute of a number at 1 second precision. Essentially a
225 | counter gets incremented and rotated through a circular buffer.
226 | """
227 | def __init__(self):
228 | self._val = 0
229 | self.mins = deque([], 60)
230 | self.seconds = deque([], 60)
231 | self.total = 0
232 |
233 | def incr(self, amount=1):
234 | """ Increments the counter """
235 | self._val += amount
236 |
237 | def tick(self):
238 | """ should be called once every second """
239 | self.seconds.append(self._val)
240 | self.total += self._val
241 | self._val = 0
242 |
243 | def tock(self):
244 | # rotate the total into a minute slot
245 | last_min = sum(self.seconds)
246 | self.mins.append(last_min)
247 | return last_min
248 |
249 | @property
250 | def hour(self):
251 | return sum(self.mins)
252 |
253 | @property
254 | def minute(self):
255 | if len(self.mins):
256 | return self.mins[-1]
257 | return 0
258 |
259 | @property
260 | def second_avg(self):
261 | return sum(self.seconds) / 60.0
262 |
263 | @property
264 | def min_avg(self):
265 | return sum(self.mins) / 60.0
266 |
267 | def summary(self):
268 | return dict(name=self.key,
269 | owner=str(self.owner),
270 | total=self.total,
271 | min_total=self.minute,
272 | hour_total=self.hour,
273 | min_avg=self.min_avg)
274 |
275 |
276 | class MinuteStatManager(SecondStatManager):
277 | """ Monitors the last 60 minutes of a specific number at 1 minute precision
278 | """
279 | def __init__(self):
280 | SecondStatManager.__init__(self)
281 | self._val = 0
282 | self.mins = deque([], 60)
283 | self.total = 0
284 |
285 | def tock(self):
286 | """ should be called once every minute """
287 | self.mins.append(self._val)
288 | self.total += self._val
289 | self._val = 0
290 |
--------------------------------------------------------------------------------
/powerpool/main.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import socket
3 | import argparse
4 | import datetime
5 | import setproctitle
6 | import gevent
7 | import gevent.hub
8 | import signal
9 | import subprocess
10 | import powerpool
11 | import time
12 | import logging
13 | import sys
14 |
15 | from gevent_helpers import BlockingDetector
16 | from gevent import sleep
17 | from gevent.monkey import patch_all
18 | from gevent.server import DatagramServer
19 | patch_all()
20 |
21 | from .utils import import_helper
22 | from .lib import MinuteStatManager, SecondStatManager, Component
23 | from .jobmanagers import Jobmanager
24 | from .reporters import Reporter
25 | from .stratum_server import StratumServer
26 |
27 |
28 | def main():
29 | parser = argparse.ArgumentParser(description='Run powerpool!')
30 | parser.add_argument('config', type=argparse.FileType('r'),
31 | help='yaml configuration file to run with')
32 | parser.add_argument('-d', '--dump-config', action="store_true",
33 | help='print the result of the YAML configuration file and exit')
34 | parser.add_argument('-s', '--server-number', type=int, default=0,
35 | help='increase the configued server_number by this much')
36 | args = parser.parse_args()
37 |
38 | # override those defaults with a loaded yaml config
39 | raw_config = yaml.load(args.config) or {}
40 | if args.dump_config:
41 | import pprint
42 | pprint.pprint(raw_config)
43 | exit(0)
44 | PowerPool.from_raw_config(raw_config, vars(args)).start()
45 |
46 |
47 | class PowerPool(Component, DatagramServer):
48 | """ This is a singelton class that manages starting/stopping of the server,
49 | along with all statistical counters rotation schedules. It takes the raw
50 | config and distributes it to each module, as well as loading dynamic modules.
51 |
52 | It also handles logging facilities by being the central logging registry.
53 | Each module can "register" a logger with the main object, which attaches
54 | it to configured handlers.
55 | """
56 | manager = None
57 | gl_methods = ['_tick_stats']
58 | defaults = dict(procname="powerpool",
59 | term_timeout=10,
60 | extranonce_serv_size=4,
61 | extranonce_size=4,
62 | default_component_log_level='INFO',
63 | loggers=[{'type': 'StreamHandler', 'level': 'NOTSET'}],
64 | events=dict(enabled=False, port=8125, host="127.0.0.1"),
65 | datagram=dict(enabled=False, port=6855, host="127.0.0.1"),
66 | server_number=0,
67 | algorithms=dict(
68 | x11={"module": "drk_hash.getPoWHash",
69 | "hashes_per_share": 4294967296},
70 | scrypt={"module": "ltc_scrypt.getPoWHash",
71 | "hashes_per_share": 65536},
72 | scryptn={"module": "vtc_scrypt.getPoWHash",
73 | "hashes_per_share": 65536},
74 | blake256={"module": "blake_hash.getPoWHash",
75 | "hashes_per_share": 65536},
76 | sha256={"module": "cryptokit.sha256d",
77 | "hashes_per_share": 4294967296}
78 | ))
79 |
80 | @classmethod
81 | def from_raw_config(self, raw_config, args):
82 | components = {}
83 | types = [PowerPool, Reporter, Jobmanager, StratumServer]
84 | component_types = {cls.__name__: [] for cls in types}
85 | component_types['other'] = []
86 | for key, config in raw_config.iteritems():
87 | typ = import_helper(config['type'])
88 | # Pass the commandline arguments to the manager component
89 | if issubclass(typ, PowerPool):
90 | config['args'] = args
91 |
92 | obj = typ(config)
93 | obj.key = key
94 | for typ in types:
95 | if isinstance(obj, typ):
96 | component_types[typ.__name__].append(obj)
97 | break
98 | else:
99 | component_types['other'].append(obj)
100 | components[key] = obj
101 |
102 | pp = component_types['PowerPool'][0]
103 | assert len(component_types['PowerPool']) == 1
104 | pp.components = components
105 | pp.component_types = component_types
106 | return pp
107 |
108 | def __init__(self, config):
109 | self._configure(config)
110 | self._log_handlers = []
111 | # Parse command line args
112 | self.config['server_number'] += self.config['args']['server_number']
113 | self.config['procname'] += "_{}".format(self.config['server_number'])
114 | # setup all our log handlers
115 | for log_cfg in self.config['loggers']:
116 | if log_cfg['type'] == "StreamHandler":
117 | kwargs = dict(stream=sys.stdout)
118 | else:
119 | kwargs = dict()
120 | handler = getattr(logging, log_cfg['type'])(**kwargs)
121 | log_level = getattr(logging, log_cfg['level'].upper())
122 | handler.setLevel(log_level)
123 | fmt = log_cfg.get('format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
124 | formatter = logging.Formatter(fmt)
125 | handler.setFormatter(formatter)
126 | self._log_handlers.append((log_cfg.get('listen'), handler))
127 | self.logger = self.register_logger(self.__class__.__name__)
128 |
129 | setproctitle.setproctitle(self.config['procname'])
130 | self.version = powerpool.__version__
131 | self.version_info = powerpool.__version_info__
132 | self.sha = getattr(powerpool, '__sha__', "unknown")
133 | self.rev_date = getattr(powerpool, '__rev_date__', "unknown")
134 | if self.sha == "unknown":
135 | # try and fetch the git version information
136 | try:
137 | output = subprocess.check_output("git show -s --format='%ci %h'",
138 | shell=True).strip().rsplit(" ", 1)
139 | self.sha = output[1]
140 | self.rev_date = output[0]
141 | # celery won't work with this, so set some default
142 | except Exception as e:
143 | self.logger.info("Unable to fetch git hash info: {}".format(e))
144 |
145 | self.algos = {}
146 | self.server_start = datetime.datetime.utcnow()
147 | self.logger.info("=" * 80)
148 | self.logger.info("PowerPool stratum server ({}) starting up..."
149 | .format(self.config['procname']))
150 |
151 | if __debug__:
152 | self.logger.info("Python not running in optimized mode. For best "
153 | "performance set enviroment variable PYTHONOPTIMIZE=2")
154 |
155 | gevent.spawn(BlockingDetector(raise_exc=False))
156 |
157 | # Detect and load all the hash functions we can find
158 | for name, algo_data in self.config['algorithms'].iteritems():
159 | self.algos[name] = algo_data.copy()
160 | self.algos[name]['name'] = name
161 | mod = algo_data['module']
162 | try:
163 | self.algos[name]['module'] = import_helper(mod)
164 | except ImportError:
165 | self.algos[name]['module'] = None
166 | else:
167 | self.logger.info("Enabling {} hashing algorithm from module {}"
168 | .format(name, mod))
169 |
170 | self.event_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
171 | self.events_enabled = self.config['events']['enabled']
172 | if self.events_enabled:
173 | self.logger.info("Transmitting statsd formatted stats to {}:{}".format(
174 | self.config['events']['host'], self.config['events']['port']))
175 | self.events_address = (self.config['events']['host'].encode('utf8'),
176 | self.config['events']['port'])
177 |
178 | # Setup all our stat managers
179 | self._min_stat_counters = []
180 | self._sec_stat_counters = []
181 |
182 | if self.config['datagram']['enabled']:
183 | listener = (self.config['datagram']['host'],
184 | self.config['datagram']['port'] +
185 | self.config['server_number'])
186 | self.logger.info("Turning on UDP control server on {}"
187 | .format(listener))
188 | DatagramServer.__init__(self, listener, spawn=None)
189 |
190 | def handle(self, data, address):
191 | self.logger.info("Recieved new command {}".format(data))
192 | parts = data.split(" ")
193 | try:
194 | component = self.components[parts[0]]
195 | func = getattr(component, parts[1])
196 | kwargs = {}
197 | args = []
198 | for arg in parts[2:]:
199 | if "=" in arg:
200 | k, v = arg.split("=", 1)
201 | kwargs[k] = v
202 | else:
203 | args.append(arg)
204 | if kwargs.pop('__spawn', False):
205 | gevent.spawn(func, *args, **kwargs)
206 | else:
207 | func(*args, **kwargs)
208 | except AttributeError:
209 | self.logger.warn("Component {} doesn't have a method {}"
210 | .format(*parts))
211 | except KeyError:
212 | self.logger.warn("Component {} doesn't exist".format(*parts))
213 | except Exception:
214 | self.logger.warn("Error in called function {}!".format(data),
215 | exc_info=True)
216 |
217 | def log_event(self, event):
218 | if self.events_enabled:
219 | self.event_socket.sendto(event, self.events_address)
220 |
221 | def start(self):
222 | self.register_logger("gevent_helpers")
223 | for comp in self.components.itervalues():
224 | comp.manager = self
225 | comp.counters = self.register_stat_counters(comp, comp.one_min_stats, comp.one_sec_stats)
226 | if comp is not self:
227 | comp.logger = self.register_logger(comp.name)
228 | comp.start()
229 |
230 | # Starts the greenlet
231 | Component.start(self)
232 | # Start the datagram control server if it's been inited
233 | if self.config['datagram']['enabled']:
234 | DatagramServer.start(self, )
235 |
236 | # This is the main thread of execution, so just continue here waiting
237 | # for exit signals
238 | ######
239 | # Register shutdown signals
240 | gevent.signal(signal.SIGUSR1, self.dump_objgraph)
241 | gevent.signal(signal.SIGHUP, exit, "SIGHUP")
242 | gevent.signal(signal.SIGINT, exit, "SIGINT")
243 | gevent.signal(signal.SIGTERM, exit, "SIGTERM")
244 |
245 | try:
246 | gevent.wait()
247 | # Allow a force exit from multiple exit signals
248 | finally:
249 | self.logger.info("Exiting requested, allowing {} seconds for cleanup."
250 | .format(self.config['term_timeout']))
251 | try:
252 | for comp in self.components.itervalues():
253 | self.logger.debug("Calling stop on component {}".format(comp))
254 | comp.stop()
255 | if gevent.wait(timeout=self.config['term_timeout']):
256 | self.logger.info("All threads exited normally")
257 | else:
258 | self.logger.info("Timeout reached, shutting down forcefully")
259 | except gevent.GreenletExit:
260 | self.logger.info("Shutdown requested again by system, "
261 | "exiting without cleanup")
262 | self.logger.info("Exit")
263 | self.logger.info("=" * 80)
264 |
265 | def dump_objgraph(self):
266 | import gc
267 | gc.collect()
268 | import objgraph
269 | print "Dumping object growth ****"
270 | objgraph.show_growth(limit=100)
271 | print "****"
272 |
273 | def exit(self, signal=None):
274 | """ Handle an exit request """
275 | self.logger.info("{} {}".format(signal, "*" * 80))
276 | # Kill the top level greenlet
277 | gevent.kill(gevent.hub.get_hub().parent)
278 |
279 | @property
280 | def status(self):
281 | """ For display in the http monitor """
282 | return dict(uptime=str(datetime.datetime.utcnow() - self.server_start),
283 | server_start=str(self.server_start),
284 | version=dict(
285 | version=self.version,
286 | version_info=self.version_info,
287 | sha=self.sha,
288 | rev_date=self.rev_date)
289 | )
290 |
291 | def _tick_stats(self):
292 | """ A greenlet that handles rotation of statistics """
293 | last_tick = int(time.time())
294 | last_send = (last_tick // 60) * 60
295 | while True:
296 | now = time.time()
297 | # time to rotate minutes?
298 | if now > (last_send + 60):
299 | for manager in self._min_stat_counters:
300 | manager.tock()
301 | for manager in self._sec_stat_counters:
302 | manager.tock()
303 | last_send += 60
304 |
305 | # time to tick?
306 | if now > (last_tick + 1):
307 | for manager in self._sec_stat_counters:
308 | manager.tick()
309 | last_tick += 1
310 |
311 | sleep(last_tick - time.time() + 1.0)
312 |
313 | def register_logger(self, name):
314 | logger = logging.getLogger(name)
315 | logger.setLevel(getattr(logging, self.config['default_component_log_level']))
316 | for keys, handler in self._log_handlers:
317 | # If the keys are blank then we assume it wants all loggers
318 | # registered
319 | if not keys or name in keys:
320 | logger.addHandler(handler)
321 |
322 | return logger
323 |
324 | def register_stat_counters(self, comp, min_counters, sec_counters=None):
325 | """ Creates and adds the stat counters to internal tracking dictionaries.
326 | These dictionaries are iterated to perform stat rotation, as well
327 | as accessed to perform stat logging """
328 | counters = {}
329 | for key in min_counters:
330 | new = MinuteStatManager()
331 | new.owner = comp
332 | new.key = key
333 | counters[key] = new
334 | self._min_stat_counters.append(new)
335 |
336 | for key in sec_counters or []:
337 | new = SecondStatManager()
338 | new.owner = comp
339 | new.key = key
340 | counters[key] = new
341 | self._sec_stat_counters.append(new)
342 |
343 | return counters
344 |
--------------------------------------------------------------------------------
/powerpool/monitor.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, jsonify, abort, send_from_directory, url_for
2 | from cryptokit.block import BlockTemplate
3 | from cryptokit.transaction import Transaction
4 | from gevent.wsgi import WSGIServer, WSGIHandler
5 | from collections import deque
6 |
7 | from .utils import time_format
8 | from .lib import Component
9 |
10 | import decimal
11 | import os
12 |
13 |
14 | class Logger(object):
15 | """ A dummy file object to allow using a logger to log requests instead
16 | of sending to stderr like the default WSGI logger """
17 | logger = None
18 |
19 | def write(self, s):
20 | self.logger.info(s.strip())
21 |
22 |
23 | class CustomWSGIHandler(WSGIHandler):
24 | """ A simple custom handler allows us to provide more helpful request
25 | logging format. Format designed for easy profiling """
26 | def format_request(self):
27 | length = self.response_length or '-'
28 | delta = time_format(self.time_finish - self.time_start)
29 | client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
30 | return '%s "%s" %s %s %s' % (
31 | client_address or '-',
32 | getattr(self, 'requestline', ''),
33 | (getattr(self, 'status', None) or '000').split()[0],
34 | length,
35 | delta)
36 |
37 |
38 | class ReverseProxied(object):
39 | '''Wrap the application in this middleware and configure the
40 | front-end server to add these headers, to let you quietly bind
41 | this to a URL other than / and to an HTTP scheme that is
42 | different than what is used locally.
43 |
44 | In nginx:
45 | location /myprefix {
46 | proxy_pass http://192.168.0.1:5001;
47 | proxy_set_header Host $host;
48 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
49 | proxy_set_header X-Scheme $scheme;
50 | proxy_set_header X-Script-Name /myprefix;
51 | }
52 |
53 | :param app: the WSGI application
54 | '''
55 | def __init__(self, app):
56 | self.app = app
57 |
58 | def __call__(self, environ, start_response):
59 | script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
60 | if script_name:
61 | environ['SCRIPT_NAME'] = script_name
62 | path_info = environ['PATH_INFO']
63 | if path_info.startswith(script_name):
64 | environ['PATH_INFO'] = path_info[len(script_name):]
65 |
66 | scheme = environ.get('HTTP_X_SCHEME', '')
67 | if scheme:
68 | environ['wsgi.url_scheme'] = scheme
69 | return self.app(environ, start_response)
70 |
71 |
72 | class ServerMonitor(Component, WSGIServer):
73 | """ Provides a few useful json endpoints for viewing server health and
74 | performance. """
75 | # Use our custom wsgi handler
76 | handler_class = CustomWSGIHandler
77 | defaults = dict(address="127.0.0.1",
78 | port=3855,
79 | JSON_SORT_KEYS=False,
80 | JSONIFY_PRETTYPRINT_REGULAR=False,
81 | DEBUG=False)
82 |
83 | def __init__(self, config):
84 | self._configure(config)
85 | app = Flask(__name__)
86 | app.wsgi_app = ReverseProxied(app.wsgi_app)
87 | app.config.update(self.config)
88 | app.add_url_rule('/', 'general', self.general)
89 | app.add_url_rule('/debug/', 'debug', self.debug)
90 | app.add_url_rule('/counters/', 'counters', self.counters)
91 | app.add_url_rule('//clients/', 'clients_comp', self.clients_comp)
92 | app.add_url_rule('//client/', 'client', self.client)
93 | app.add_url_rule('//', 'comp', self.comp)
94 | app.add_url_rule('//config', 'comp_config', self.comp_config)
95 | # Legacy
96 | app.add_url_rule('/05/clients/', 'clients', self.clients_0_5)
97 | app.add_url_rule('/05/', 'general_0_5', self.general_0_5)
98 |
99 | self.viewer_dir = os.path.join(os.path.abspath(
100 | os.path.dirname(__file__) + '/../'), 'viewer')
101 | self.app = app
102 |
103 | def start(self, *args, **kwargs):
104 | listener = (self.config['address'],
105 | self.config['port'] +
106 | self.manager.config['server_number'])
107 | WSGIServer.__init__(self, listener, self.app, spawn=100, log=Logger())
108 |
109 | self.logger.info("Monitoring port listening on {}".format(listener))
110 |
111 | # Monkey patch the wsgi logger
112 | Logger.logger = self.logger
113 |
114 | WSGIServer.start(self, *args, **kwargs)
115 | Component.start(self)
116 |
117 | def stop(self, *args, **kwargs):
118 | WSGIServer.stop(self)
119 | Component.stop(self)
120 | self.logger.info("Exit")
121 |
122 | def debug(self):
123 | data = {}
124 | for key, comp in self.manager.components.iteritems():
125 | data[key] = jsonize(comp.__dict__)
126 | return jsonify(data)
127 |
128 | def general(self):
129 | from .stratum_server import StratumServer
130 | data = {}
131 | for key, comp in self.manager.components.iteritems():
132 | dict_key = "{}_{}".format(comp.__class__.__name__, key)
133 | try:
134 | data[dict_key] = comp.status
135 | data[dict_key]['config_view'] = url_for(
136 | 'comp_config', comp_key=key, _external=True)
137 | if isinstance(comp, StratumServer):
138 | data[dict_key]['clients'] = url_for(
139 | 'clients_comp', comp_key=key, _external=True)
140 | except Exception as e:
141 | err = "Component {} status call raised {}".format(key, e)
142 | data[dict_key] = err
143 | self.logger.error(err, exc_info=True)
144 | data['debug_view'] = url_for('debug', _external=True)
145 | data['counter_view'] = url_for('counters', _external=True)
146 | return jsonify(jsonize(data))
147 |
148 | def client(self, comp_key, username):
149 | try:
150 | component = self.manager.components[comp_key]
151 | except KeyError:
152 | abort(404)
153 | return jsonify(username=[client.details for client in
154 | component.address_lut.get(username, [])])
155 |
156 | def comp_config(self, comp_key):
157 | try:
158 | return jsonify(**jsonize(self.manager.components[comp_key].config))
159 | except KeyError:
160 | abort(404)
161 |
162 | def comp(self, comp_key):
163 | try:
164 | return jsonify(**jsonize(self.manager.components[comp_key].status))
165 | except KeyError:
166 | abort(404)
167 |
168 | def clients_comp(self, comp_key):
169 | try:
170 | lut = self.manager.components[comp_key].address_lut
171 | except KeyError:
172 | abort(404)
173 |
174 | clients = {}
175 | for username, client_list in lut.iteritems():
176 | clients[username] = {client._id: client.summary
177 | for client in client_list}
178 | clients[username]['details_view'] = url_for(
179 | 'client', comp_key=comp_key, username=username, _external=True)
180 |
181 | return jsonify(clients=clients)
182 |
183 | def counters(self):
184 | counters = []
185 | counters.extend(c.summary() for c in self.manager._min_stat_counters)
186 | counters.extend(c.summary() for c in self.manager._sec_stat_counters)
187 | return jsonify(counters=counters)
188 |
189 | def clients_0_5(self):
190 | """ Legacy client view emulating version 0.5 support """
191 | lut = self.manager.component_types['StratumServer'][0].address_lut
192 | clients = {key: [item.summary for item in value]
193 | for key, value in lut.iteritems()}
194 |
195 | return jsonify(clients=clients)
196 |
197 | def general_0_5(self):
198 | """ Legacy 0.5 emulating view """
199 | return jsonify(server={},
200 | stratum_manager=self.manager.component_types['StratumServer'][0].status)
201 |
202 |
203 | def jsonize(item):
204 | """ Recursive function that converts a lot of non-serializable content
205 | to something json.dumps will like better """
206 | if isinstance(item, dict):
207 | new = {}
208 | for k, v in item.iteritems():
209 | k = str(k)
210 | if isinstance(v, deque):
211 | new[k] = jsonize(list(v))
212 | else:
213 | new[k] = jsonize(v)
214 | return new
215 | elif isinstance(item, list) or isinstance(item, tuple):
216 | new = []
217 | for part in item:
218 | new.append(jsonize(part))
219 | return new
220 | else:
221 | if isinstance(item, Transaction):
222 | item.disassemble()
223 | return item.to_dict()
224 | elif isinstance(item, str):
225 | return item.encode('string_escape')
226 | elif isinstance(item, set):
227 | return list(item)
228 | elif isinstance(item, decimal.Decimal):
229 | return float(item)
230 | elif isinstance(item, (int, long, bool, float)) or item is None:
231 | return item
232 | elif hasattr(item, "__dict__"):
233 | return {str(k).encode('string_escape'): str(v).encode('string_escape')
234 | for k, v in item.__dict__.iteritems()}
235 | else:
236 | return str(item)
237 |
--------------------------------------------------------------------------------
/powerpool/reporters/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import Reporter, StatReporter, QueueStatReporter
2 |
3 | from .redis_reporter import RedisReporter
4 | from .double_reporter import DoubleReporter
5 | from .celery_reporter import CeleryReporter
6 |
--------------------------------------------------------------------------------
/powerpool/reporters/base.py:
--------------------------------------------------------------------------------
1 | import time
2 | import gevent
3 |
4 | from gevent import sleep
5 | from gevent import spawn, GreenletExit
6 | from gevent.queue import Queue
7 | from hashlib import sha256
8 | from binascii import hexlify
9 |
10 | from ..lib import Component, loop
11 | from ..utils import time_format
12 | from ..stratum_server import StratumClient
13 |
14 |
15 | class Reporter(Component):
16 | """ An abstract base class to document the Reporter interface. """
17 | def agent_send(self, address, worker, typ, data, time):
18 | """ Called when valid data is recieved from a PPAgent connection. """
19 | raise NotImplementedError
20 |
21 | def add_block(self, address, height, total_subsidy, fees,
22 | hex_bits, hash, merged, worker, algo):
23 | """ Called when a share is submitted with a hash that is valid for the
24 | network. """
25 | raise NotImplementedError
26 |
27 | def log_share(self, client, diff, typ, params, job=None, header_hash=None,
28 | header=None, start=None, **kwargs):
29 | """ Logs a share to external sources for payout calculation and
30 | statistics """
31 | #if __debug__:
32 | # self.logger.debug(
33 | # "Running log share with args {} kwargs {}"
34 | # .format((client._id, diff, typ, params), dict(
35 | # job=job, header_hash=header_hash, header=hexlify(header))))
36 |
37 | if typ == StratumClient.VALID_SHARE:
38 | self.logger.debug("Valid share accepted from worker {}.{}!"
39 | .format(client.address, client.worker))
40 | # Grab the raw coinbase out of the job object before gevent can
41 | # preempt to another thread and change the value. Very important!
42 | coinbase_raw = job.coinbase.raw
43 |
44 | # Some coins use POW function to do blockhash, while others use
45 | # SHA256. Allow toggling which is used
46 | if job.pow_block_hash:
47 | header_hash_raw = client.algo['module'](header)[::-1]
48 | else:
49 | header_hash_raw = sha256(sha256(header).digest()).digest()[::-1]
50 | hash_hex = hexlify(header_hash_raw)
51 |
52 | submission_threads = []
53 | # valid network hash?
54 | if header_hash <= job.bits_target:
55 | submission_threads.append(spawn(
56 | job.found_block,
57 | coinbase_raw,
58 | client.address,
59 | client.worker,
60 | hash_hex,
61 | header,
62 | job,
63 | start))
64 |
65 | # check each aux chain for validity
66 | for chain_id, data in job.merged_data.iteritems():
67 | if header_hash <= data['target']:
68 | submission_threads.append(spawn(
69 | data['found_block'],
70 | client.address,
71 | client.worker,
72 | header,
73 | coinbase_raw,
74 | job,
75 | start))
76 |
77 | for gl in gevent.iwait(submission_threads):
78 | ret = gl.value
79 | if ret:
80 | spawn(self.add_block, **gl.value)
81 | else:
82 | self.logger.error("Submission gl {} returned nothing!"
83 | .format(gl))
84 |
85 |
86 | class StatReporter(Reporter):
87 | """ The stat reporter groups all shares into one minute chunks and reports
88 | them to allow separation of statistics reporting and payout related
89 | logging. """
90 |
91 | defaults = dict(pool_report_configs={},
92 | chain=1,
93 | attrs={})
94 | gl_methods = ['_report_one_min']
95 |
96 | def __init__(self):
97 | self._minute_slices = {}
98 | self._per_address_slices = {}
99 |
100 | def log_one_minute(self, address, worker, algo, stamp, typ, amount):
101 | """ Called to log a minutes worth of shares that have been submitted
102 | by a unique (address, worker, algo). """
103 | raise NotImplementedError("If you're not logging the one minute chunks"
104 | "don't use the StatReporter!")
105 |
106 | def log_share(self, client, diff, typ, params, job=None, header_hash=None,
107 | header=None, **kwargs):
108 | super(StatReporter, self).log_share(
109 | client, diff, typ, params, job=job, header_hash=header_hash,
110 | header=header, **kwargs)
111 | address, worker = client.address, client.worker
112 | algo = client.algo['name']
113 | slc_time = (int(time.time()) // 60) * 60
114 | slc = self._minute_slices.setdefault(slc_time, {})
115 | self._aggr_one_min(address, worker, algo, typ, diff, slc)
116 | currency = job.currency if job else "UNKNOWN"
117 | # log the share under user "pool" to allow easy/fast display of pool stats
118 | for cfg in self.config['pool_report_configs']:
119 | user = cfg['user']
120 | pool_worker = cfg['worker_format_string'].format(
121 | algo=algo,
122 | currency=currency,
123 | server_name=self.manager.config['procname'],
124 | **self.config['attrs'])
125 | self._aggr_one_min(user, pool_worker, algo, typ, diff, slc)
126 | if cfg.get('report_merge') and job:
127 | for currency in job.merged_data:
128 | pool_worker = cfg['worker_format_string'].format(
129 | algo=algo,
130 | currency=currency,
131 | server_name=self.manager.config['procname'],
132 | **self.config['attrs'])
133 | self._aggr_one_min(user, pool_worker, algo, typ, diff, slc)
134 |
135 | # reporting for vardiff rates
136 | if typ == StratumClient.VALID_SHARE:
137 | slc = self._per_address_slices.setdefault(slc_time, {})
138 | if address not in slc:
139 | slc[address] = diff
140 | else:
141 | slc[address] += diff
142 |
143 | def _aggr_one_min(self, address, worker, algo, typ, amount, slc):
144 | key = (address, worker, algo, typ)
145 | if key not in slc:
146 | slc[key] = amount
147 | else:
148 | slc[key] += amount
149 |
150 | def _flush_one_min(self, exit_exc=None, caller=None):
151 | self._process_minute_slices(flush=True)
152 | self.logger.info("One minute flush complete, Exit.")
153 |
154 | @loop(interval=61, precise=60, fin="_flush_one_min")
155 | def _report_one_min(self):
156 | self._process_minute_slices()
157 |
158 | def _process_minute_slices(self, flush=False):
159 | """ Goes through our internal aggregated share data structures and
160 | reports them to our external storage. If asked to flush it will report
161 | all one minute shares, otherwise it will only report minutes that have
162 | passed. """
163 | self.logger.info("Reporting one minute shares for address/workers")
164 | t = time.time()
165 | if not flush:
166 | upper = (int(t) // 60) * 60
167 | for stamp, data in self._minute_slices.items():
168 | if flush or stamp < upper:
169 | for (address, worker, algo, typ), amount in data.iteritems():
170 | self.log_one_minute(address, worker, algo, stamp, typ, amount)
171 | # XXX: GreenletExit getting raised here might cause some
172 | # double reporting!
173 | del self._minute_slices[stamp]
174 |
175 | self.logger.info("One minute shares reported in {}"
176 | .format(time_format(time.time() - t)))
177 |
178 | # Clean up old per address slices as well
179 | ten_ago = ((time.time() // 60) * 60) - 600
180 | for stamp in self._per_address_slices.keys():
181 | if stamp < ten_ago:
182 | del self._per_address_slices[stamp]
183 |
184 | def spm(self, address):
185 | """ Called by the client code to determine how many shares per second
186 | are currently being submitted. Automatically cleans up the times older
187 | than 10 minutes. """
188 | mins = 0
189 | total = 0
190 | for stamp in self._per_address_slices.keys():
191 | val = self._per_address_slices[stamp].get(address)
192 | if val is not None:
193 | total += val
194 | mins += 1
195 |
196 | return total / (mins or 1) # or 1 prevents divison by zero error
197 |
198 |
199 | class QueueStatReporter(StatReporter):
200 | def _start_queue(self):
201 | self.queue = Queue()
202 |
203 | def _flush_queue(self, exit_exc=None, caller=None):
204 | sleep(1)
205 | self.logger.info("Flushing a queue of size {}"
206 | .format(self.queue.qsize()))
207 | self.queue.put(StopIteration)
208 | for item in self.queue:
209 | self._run_queue_item(item)
210 | self.logger.info("Queue flush complete, Exit.")
211 |
212 | @loop(setup='_start_queue', fin='_flush_queue')
213 | def _queue_proc(self):
214 | item = self.queue.get()
215 | if self._run_queue_item(item) == "retry":
216 | # Put it at the back of the queue for retry
217 | self.queue.put(item)
218 | sleep(1)
219 |
220 | def _run_queue_item(self, item):
221 | name, args, kwargs = item
222 | if __debug__:
223 | self.logger.debug("Queue running {} with args '{}' kwargs '{}'"
224 | .format(name, args, kwargs))
225 | try:
226 | func = getattr(self, name, None)
227 | if func is None:
228 | raise NotImplementedError(
229 | "Item {} has been enqueued that has no valid function!"
230 | .format(name))
231 | func(*args, **kwargs)
232 | except self.queue_exceptions as e:
233 | self.logger.error("Unable to process queue item, retrying! "
234 | "{} Name: {}; Args: {}; Kwargs: {};"
235 | .format(e, name, args, kwargs))
236 | return "retry"
237 | except Exception:
238 | # Log any unexpected problem, but don't retry because we might
239 | # end up endlessly retrying with same failure
240 | self.logger.error("Unkown error, queue data discarded!"
241 | "Name: {}; Args: {}; Kwargs: {};"
242 | .format(name, args, kwargs), exc_info=True)
243 |
244 | def log_one_minute(self, *args, **kwargs):
245 | self.queue.put(("_queue_log_one_minute", args, kwargs))
246 |
247 | def add_block(self, *args, **kwargs):
248 | self.queue.put(("_queue_add_block", args, kwargs))
249 |
250 | def _queue_add_block(self, address, height, total_subsidy, fees, hex_bits,
251 | hex_hash, currency, algo, merged=False, worker=None,
252 | **kwargs):
253 | raise NotImplementedError
254 |
255 | def _queue_log_one_minute(self, address, worker, algo, stamp, typ, amount):
256 | raise NotImplementedError
257 |
--------------------------------------------------------------------------------
/powerpool/reporters/celery_reporter.py:
--------------------------------------------------------------------------------
1 | from gevent.queue import Queue
2 |
3 | from . import StatReporter
4 | from ..lib import loop
5 | from ..stratum_server import StratumClient
6 |
7 |
8 | class CeleryReporter(StatReporter):
9 | """ A legacy wrapper around old log reporting system to allow testing
10 | PowerPool 0.6 with SimpleCoin 0.7 """
11 | one_sec_stats = ['queued']
12 | gl_methods = ['_queue_proc', '_report_one_min', '_report_payout_share_aggrs']
13 | defaults = StatReporter.defaults.copy()
14 | defaults.update(dict(celery_task_prefix='simplecoin.tasks',
15 | celery={'CELERY_DEFAULT_QUEUE': 'celery'},
16 | share_batch_interval=60))
17 |
18 | def __init__(self, config):
19 | self._configure(config)
20 | super(CeleryReporter, self).__init__()
21 |
22 | # setup our celery agent and monkey patch
23 | from celery import Celery
24 | self.celery = Celery()
25 | self.celery.conf.update(self.config['celery'])
26 |
27 | self.queue = Queue()
28 | self._aggr_shares = {}
29 |
30 | @property
31 | def status(self):
32 | dct = dict(queue_size=self.queue.qsize(),
33 | unrep_shares=len(self._aggr_shares))
34 | return dct
35 |
36 | def log_one_minute(self, address, worker, algo, stamp, typ, amount):
37 | self._incr('queued')
38 | kwargs = {'user': address, 'worker': worker, 'minute': stamp,
39 | 'valid_shares': 0}
40 | if typ == StratumClient.VALID_SHARE:
41 | kwargs['valid_shares'] = amount
42 | if typ == StratumClient.DUP_SHARE:
43 | kwargs['dup_shares'] = amount
44 | if typ == StratumClient.LOW_DIFF_SHARE:
45 | kwargs['low_diff_shares'] = amount
46 | if typ == StratumClient.STALE_SHARE:
47 | kwargs['stale_shares'] = amount
48 | self.queue.put(("add_one_minute", [], kwargs))
49 |
50 | def log_share(self, client, diff, typ, params, job=None, header_hash=None,
51 | header=None, **kwargs):
52 | super(CeleryReporter, self).log_share(
53 | client, diff, typ, params, job=job, header_hash=header_hash,
54 | header=header, **kwargs)
55 |
56 | # Aggregate valid shares to be reported in batches. SimpleCoin's Celery
57 | # worker can't really handle high load share logging with the way it's
58 | # built
59 | address = client.address
60 | if typ == StratumClient.VALID_SHARE:
61 | if address not in self._aggr_shares:
62 | self._aggr_shares[address] = diff
63 | else:
64 | self._aggr_shares[address] += diff
65 |
66 | def agent_send(self, *args, **kwargs):
67 | self._incr('queued')
68 | self.queue.put(("agent_receive", args, kwargs))
69 |
70 | def add_block(self, address, height, total_subsidy, fees, hex_bits,
71 | hex_hash, currency, algo, merged=False, worker=None, **kwargs):
72 | self._incr('queued')
73 | # user, height, total_value, transaction_fees, bits, hash_hex, merged=None, worker=None
74 | kwargs = dict(user=address,
75 | height=height,
76 | total_value=total_subsidy,
77 | transaction_fees=fees,
78 | bits=hex_bits,
79 | hash_hex=hex_hash,
80 | merged=currency if merged else None,
81 | worker=worker)
82 | self.queue.put(("add_block", [], kwargs))
83 |
84 | @loop()
85 | def _queue_proc(self):
86 | name, args, kwargs = self.queue.peek()
87 | try:
88 | if name != "agent_receive":
89 | self.logger.info("Calling celery task {} with args: {}, kwargs: {}"
90 | .format(name, args, kwargs))
91 | self.celery.send_task(
92 | self.config['celery_task_prefix'] + '.' + name, args, kwargs)
93 | except Exception as e:
94 | self.logger.error("Unable to communicate with celery broker! {}"
95 | .format(e))
96 | else:
97 | self.queue.get()
98 |
99 | @loop(interval='share_batch_interval', precise=True, fin='_report_payout_shares')
100 | def _report_payout_share_aggrs(self):
101 | self._report_payout_shares()
102 |
103 | def _report_payout_shares(self, exit_exc=None, caller=None):
104 | """ Goes through our internal aggregated share data and adds a celery
105 | task for each unque address. """
106 | self.logger.info("Reporting shares for {:,} users"
107 | .format(len(self._aggr_shares)))
108 | for address, shares in self._aggr_shares.items():
109 | self.queue.put(("add_share", [address, shares], {}))
110 | del self._aggr_shares[address]
111 |
--------------------------------------------------------------------------------
/powerpool/reporters/double_reporter.py:
--------------------------------------------------------------------------------
1 | import time
2 | import gevent
3 |
4 | from gevent import spawn
5 | from hashlib import sha256
6 | from binascii import hexlify
7 |
8 | from ..stratum_server import StratumClient
9 | from ..lib import loop
10 | from ..exceptions import ConfigurationError
11 | from . import Reporter
12 |
13 |
14 | class DoubleReporter(Reporter):
15 | defaults = dict(reporters=[])
16 | gl_methods = ['_process_minute_slices']
17 |
18 | def __init__(self, config):
19 | self._configure(config)
20 | super(DoubleReporter, self).__init__()
21 | # Terrible, messy hack to get the child reporters to not log shares...
22 | self.child_reporters = []
23 | self._per_address_slices = {}
24 | Reporter.log_share = lambda *args, **kwargs: None
25 |
26 | def start(self):
27 | Reporter.start(self)
28 | for key in self.config['reporters']:
29 | if key in self.manager.components:
30 | self.child_reporters.append(self.manager.components[key])
31 | else:
32 | raise ConfigurationError("Couldn't find {}".format(key))
33 |
34 | if not self.child_reporters:
35 | raise ConfigurationError("Must have at least one reporter!")
36 |
37 | def log_share(self, client, diff, typ, params, job=None, header_hash=None,
38 | header=None, **kwargs):
39 | if typ == StratumClient.VALID_SHARE:
40 | start = time.time()
41 | self.logger.debug("Valid share accepted from worker {}.{}!"
42 | .format(client.address, client.worker))
43 | # Grab the raw coinbase out of the job object before gevent can preempt
44 | # to another thread and change the value. Very important!
45 | coinbase_raw = job.coinbase.raw
46 |
47 | # Some coins use POW function to do blockhash, while others use SHA256.
48 | # Allow toggling
49 | if job.pow_block_hash:
50 | header_hash_raw = client.algo['module'](header)[::-1]
51 | else:
52 | header_hash_raw = sha256(sha256(header).digest()).digest()[::-1]
53 | hash_hex = hexlify(header_hash_raw)
54 |
55 | submission_threads = []
56 | # valid network hash?
57 | if header_hash <= job.bits_target:
58 | submission_threads.append(spawn(
59 | job.found_block,
60 | coinbase_raw,
61 | client.address,
62 | client.worker,
63 | hash_hex,
64 | header,
65 | job,
66 | start))
67 |
68 | # check each aux chain for validity
69 | for chain_id, data in job.merged_data.iteritems():
70 | if header_hash <= data['target']:
71 | submission_threads.append(spawn(
72 | data['found_block'],
73 | client.address,
74 | client.worker,
75 | header,
76 | coinbase_raw,
77 | job,
78 | start))
79 |
80 | for gl in gevent.iwait(submission_threads):
81 | ret = gl.value
82 | if ret:
83 | spawn(self.add_block, **ret)
84 | else:
85 | self.logger.error("Submission gl {} returned nothing!"
86 | .format(gl))
87 |
88 | for reporter in self.child_reporters:
89 | reporter.log_share(client, diff, typ, params, job=job,
90 | header_hash=header_hash, header=header, **kwargs)
91 |
92 | # reporting for vardiff rates
93 | slc_time = (int(time.time()) // 60) * 60
94 | address = client.address
95 | if typ == StratumClient.VALID_SHARE:
96 | slc = self._per_address_slices.setdefault(slc_time, {})
97 | if address not in slc:
98 | slc[address] = diff
99 | else:
100 | slc[address] += diff
101 |
102 | @loop(interval=61)
103 | def _process_minute_slices(self):
104 | # Clean up old per address slices as well
105 | self.logger.info("Cleaning up old vardiff trackers")
106 | ten_ago = ((time.time() // 60) * 60) - 600
107 | for stamp in self._per_address_slices.keys():
108 | if stamp < ten_ago:
109 | del self._per_address_slices[stamp]
110 |
111 | def spm(self, address):
112 | """ Called by the client code to determine how many shares per second
113 | are currently being submitted. Automatically cleans up the times older
114 | than 10 minutes. """
115 | mins = 0
116 | total = 0
117 | for stamp in self._per_address_slices.keys():
118 | val = self._per_address_slices[stamp].get(address)
119 | if val is not None:
120 | total += val
121 | mins += 1
122 |
123 | return total / (mins or 1) # or 1 prevents divison by zero error
124 |
125 | def agent_send(self, *args, **kwargs):
126 | for reporter in self.child_reporters:
127 | reporter.agent_send(*args, **kwargs)
128 |
129 | def add_block(self, *args, **kwargs):
130 | for reporter in self.child_reporters:
131 | reporter.add_block(*args, **kwargs)
132 |
--------------------------------------------------------------------------------
/powerpool/reporters/redis_reporter.py:
--------------------------------------------------------------------------------
1 | import time
2 | import json
3 |
4 | from . import QueueStatReporter
5 | from ..stratum_server import StratumClient
6 |
7 |
8 | # Parameters: {"current block"'s key name,
9 | # current timestamp,
10 | # new key name for "current block" (something like unproc_block_{block_hash}}
11 | solve_rotate_multichain = """
12 | -- Get all the keys so we can find all the sharechains that contributed
13 | local keys = redis.call('HKEYS', ARGV[1])
14 | -- Set the end time of block solve. This also serves to guarentee the key is there...
15 | redis.call('HSET', ARGV[1], 'solve_time', ARGV[2])
16 | -- Rename to new home
17 | redis.call('rename', ARGV[1], ARGV[3])
18 | -- Initialize the new block key with a start time
19 | redis.call('HSET', ARGV[1], 'start_time', ARGV[2])
20 |
21 | -- Parse out and rotate all share chains. I'm sure this is terrible, no LUA skillz
22 | local idx_map = {}
23 | for key, val in pairs(keys) do
24 | local t = {}
25 | local i = 0
26 | for w in string.gmatch(val, "%w+") do
27 | t[i] = w
28 | i = i + 1
29 | end
30 | if t[0] == "chain" and t[2] == "shares" then
31 | local base = "chain_" .. t[1] .. "_slice"
32 | local idx = redis.call('incr', base .. "_index")
33 | redis.pcall('HSET', ARGV[1], "chain_" .. t[1] .. "_start_index", "" .. idx)
34 | redis.pcall('renamenx', base, base .. "_" .. idx)
35 | table.insert(idx_map, t[1] .. ":" .. idx)
36 | end
37 | end
38 | return idx_map
39 | """
40 |
41 |
42 | class RedisReporter(QueueStatReporter):
43 | one_sec_stats = ['queued']
44 | gl_methods = ['_queue_proc', '_report_one_min']
45 | defaults = QueueStatReporter.defaults.copy()
46 | defaults.update(dict(redis={}, chain=1))
47 |
48 | def __init__(self, config):
49 | self._configure(config)
50 | super(RedisReporter, self).__init__()
51 | # Import reporter type specific modules here as to not require them
52 | # for using powerpool with other reporters
53 | import redis
54 | # A list of exceptions that would indicate that retrying a queue item
55 | # COULD EVENTUALLY work (ie, bad connection because server
56 | # maintenince). Errors that are likely to occur because of bad
57 | # coding/edge cases should be let through and data discarded after a
58 | # few attempts.
59 | self.queue_exceptions = (redis.exceptions.ConnectionError,
60 | redis.exceptions.InvalidResponse,
61 | redis.exceptions.TimeoutError,
62 | redis.exceptions.ConnectionError)
63 | self.redis = redis.Redis(**self.config['redis'])
64 | self.solve_cmd = self.redis.register_script(solve_rotate_multichain)
65 |
66 | @property
67 | def status(self):
68 | return dict(queue_size=self.queue.qsize())
69 |
70 | def _queue_log_one_minute(self, address, worker, algo, stamp, typ, amount):
71 | # Include worker info if defined
72 | address += "." + worker
73 | self.redis.hincrbyfloat(
74 | "min_{}_{}_{}".format(StratumClient.share_type_strings[typ], algo, stamp),
75 | address, amount)
76 |
77 | def _queue_add_block(self, address, height, total_subsidy, fees, hex_bits,
78 | hex_hash, currency, algo, merged=False, worker=None,
79 | **kwargs):
80 | block_key = 'current_block_{}_{}'.format(currency, algo)
81 | new_block_key = "unproc_block_{}".format(hex_hash)
82 |
83 | chain_indexes_serial = self.solve_cmd(keys=[], args=[block_key, time.time(), new_block_key])
84 | chain_indexs = {}
85 | for chain in chain_indexes_serial:
86 | chain_id, last_index = chain.split(":")
87 | chain_indexs["chain_{}_solve_index".format(chain_id)] = last_index
88 | self.redis.hmset(new_block_key, dict(address=address,
89 | worker=worker,
90 | height=height,
91 | total_subsidy=total_subsidy,
92 | fees=fees,
93 | hex_bits=hex_bits,
94 | hash=hex_hash,
95 | currency=currency,
96 | algo=algo,
97 | merged=int(bool(merged)),
98 | **chain_indexs))
99 |
100 | def _queue_log_share(self, address, shares, algo, currency, merged=False):
101 | block_key = 'current_block_{}_{}'.format(currency, algo)
102 | chain_key = 'chain_{}_shares'.format(self.config['chain'])
103 | chain_slice = 'chain_{}_slice'.format(self.config['chain'])
104 | user_shares = '{}:{}'.format(address, shares)
105 | self.redis.hincrbyfloat(block_key, chain_key, shares)
106 | self.redis.rpush(chain_slice, user_shares)
107 |
108 | def log_share(self, client, diff, typ, params, job=None, header_hash=None, header=None,
109 | **kwargs):
110 | super(RedisReporter, self).log_share(
111 | client, diff, typ, params, job=job, header_hash=header_hash,
112 | header=header, **kwargs)
113 |
114 | if typ != StratumClient.VALID_SHARE:
115 | return
116 |
117 | for currency in job.merged_data:
118 | self.queue.put(("_queue_log_share", [], dict(address=client.address,
119 | shares=diff,
120 | algo=job.algo,
121 | currency=currency,
122 | merged=True)))
123 | self.queue.put(("_queue_log_share", [], dict(address=client.address,
124 | shares=diff,
125 | algo=job.algo,
126 | currency=job.currency,
127 | merged=False)))
128 |
129 | def _queue_agent_send(self, address, worker, typ, data, stamp):
130 | if typ == "hashrate" or typ == "temp":
131 | stamp = (stamp // 60) * 60
132 | for did, val in enumerate(data):
133 | self.redis.hset("{}_{}".format(typ, stamp),
134 | "{}_{}_{}".format(address, worker, did),
135 | val)
136 | elif typ == "status":
137 | # Set time so we know how fresh the data is
138 | data['time'] = time.time()
139 | # Remove the data in 1 day
140 | self.redis.setex("status_{}_{}".format(address, worker),
141 | json.dumps(data), 86400)
142 | else:
143 | self.logger.warn("Recieved unsupported ppagent type {}"
144 | .format(typ))
145 |
146 | def agent_send(self, *args, **kwargs):
147 | self.queue.put(("_queue_agent_send", args, kwargs))
148 |
149 |
150 | #import redis
151 | #redis = redis.Redis()
152 | #solve_cmd = redis.register_script(solve_rotate_multichain)
153 | #redis.hincrbyfloat("current_block_testing", "chain_1_shares", 12.5)
154 | #print solve_cmd(keys=[], args=["current_block_testing", time.time(),
155 | # "unproc_block_testing"])
156 | #exit(0)
157 |
--------------------------------------------------------------------------------
/powerpool/server.py:
--------------------------------------------------------------------------------
1 | from cryptokit.base58 import get_bcaddress_version
2 | from gevent import spawn
3 |
4 | from .lib import loop
5 |
6 | import socket
7 | import datetime
8 | import re
9 |
10 |
11 | class GenericClient(object):
12 | def convert_username(self, username):
13 | # if the address they passed is a valid address,
14 | # use it. Otherwise use the pool address
15 | bits = username.split('.', 1)
16 | username = bits[0]
17 | worker = ''
18 | if len(bits) > 1:
19 | parsed_w = re.sub(r'[^a-zA-Z0-9\[\]_]+', '-', str(bits[1]))
20 | self.logger.debug("Registering worker name {}".format(parsed_w))
21 | worker = parsed_w[:16]
22 |
23 | try:
24 | version = get_bcaddress_version(username)
25 | except Exception:
26 | version = False
27 |
28 | if self.config['valid_address_versions'] and version not in self.config['valid_address_versions']:
29 | version = False
30 |
31 | if isinstance(version, int) and version is not False:
32 | address = username
33 | else:
34 | # Filter all except underscores and letters
35 | filtered = re.sub('[\W_]+', '', username).lower()
36 | self.logger.debug(
37 | "Invalid address passed in, checking aliases against {}"
38 | .format(filtered))
39 | if filtered in self.config['aliases']:
40 | address = self.config['aliases'][filtered]
41 | self.logger.debug("Setting address alias to {}".format(address))
42 | else:
43 | address = self.config['donate_key']
44 | self.logger.debug("Falling back to donate key {}".format(address))
45 | return address, worker
46 |
47 | def start(self):
48 | self.server.add_client(self)
49 | try:
50 | self.peer_name = self.sock.getpeername()
51 | except socket.error:
52 | self.logger.warn(
53 | "Peer was no longer connected when trying to setup connection.")
54 | self.fp = self.sock.makefile()
55 |
56 | self._rloop = spawn(self.read)
57 | self._wloop = spawn(self.write)
58 |
59 | def stop(self, exit_exc=None, caller=None):
60 | spawn(self._stop)
61 |
62 | def _stop(self, exit_exc=None, caller=None):
63 | if self._stopped:
64 | return
65 |
66 | self._stopped = True
67 | self._rloop.kill(block=True)
68 | self._wloop.kill(block=True)
69 |
70 | # handle clean disconnection from client
71 | try:
72 | self.sock.shutdown(socket.SHUT_RDWR)
73 | except socket.error:
74 | pass
75 | try:
76 | self.fp.close()
77 | except (socket.error, AttributeError):
78 | pass
79 | try:
80 | self.sock.close()
81 | except (socket.error, AttributeError):
82 | pass
83 |
84 | self.server.remove_client(self)
85 | self.logger.info("Closing connection for client {}".format(self._id))
86 |
87 | @property
88 | def connection_duration(self):
89 | return datetime.datetime.utcnow() - self.connection_time_dt
90 |
91 | @property
92 | def connection_time_dt(self):
93 | return datetime.datetime.utcfromtimestamp(self.connection_time)
94 |
95 | @loop(fin='stop', exit_exceptions=(socket.error, ))
96 | def write(self):
97 | for item in self.write_queue:
98 | self.fp.write(item)
99 | self.fp.flush()
100 |
--------------------------------------------------------------------------------
/powerpool/stratum_server.py:
--------------------------------------------------------------------------------
1 | import json
2 | import socket
3 | import datetime
4 | import argparse
5 | import struct
6 | import random
7 | import time
8 | import weakref
9 |
10 | from binascii import hexlify, unhexlify
11 | from cryptokit import target_from_diff, uint256_from_str
12 | from gevent import sleep, with_timeout
13 | from gevent.queue import Queue
14 | from gevent.pool import Pool
15 | from gevent.server import StreamServer
16 | from pprint import pformat
17 |
18 | from .agent_server import AgentServer, AgentClient
19 | from .exceptions import LoopExit
20 | from .server import GenericClient
21 | from .utils import time_format
22 | from .exceptions import ConfigurationError
23 | from .lib import Component, loop, REQUIRED
24 |
25 |
26 | class ArgumentParserError(Exception):
27 | pass
28 |
29 |
30 | class ThrowingArgumentParser(argparse.ArgumentParser):
31 | def error(self, message):
32 | raise ArgumentParserError(message)
33 |
34 |
35 | password_arg_parser = ThrowingArgumentParser()
36 | password_arg_parser.add_argument('-d', '--diff', type=float)
37 |
38 |
39 | class StratumServer(Component, StreamServer):
40 | """ A single port binding of our stratum server. """
41 | one_min_stats = ['stratum_connects', 'stratum_disconnects',
42 | 'agent_connects', 'agent_disconnects',
43 | 'reject_low_share_n1', 'reject_dup_share_n1',
44 | 'reject_stale_share_n1', 'acc_share_n1',
45 | 'reject_low_share_count', 'reject_dup_share_count',
46 | 'reject_stale_share_count', 'acc_share_count',
47 | 'unk_err', 'not_authed_err', 'not_subbed_err']
48 | # enhance readability by reducing magic number use...
49 | defaults = dict(address="0.0.0.0",
50 | port=3333,
51 | start_difficulty=128,
52 | reporter=None,
53 | jobmanager=None,
54 | algo=REQUIRED,
55 | idle_worker_threshold=300,
56 | aliases={},
57 | valid_address_versions=[],
58 | donate_key="donate",
59 | vardiff=dict(enabled=False,
60 | spm_target=20,
61 | interval=30,
62 | tiers=[8, 16, 32, 64, 96, 128, 192, 256, 512]),
63 | minimum_manual_diff=64,
64 | push_job_interval=30,
65 | idle_worker_disconnect_threshold=3600,
66 | agent=dict(enabled=False,
67 | port_diff=1111,
68 | timeout=120,
69 | accepted_types=['temp', 'status', 'hashrate',
70 | 'thresholds']))
71 | # Don't spawn a greenlet to handle creation of clients, we start one for
72 | # reading and one for writing in their own class...
73 | _spawn = None
74 |
75 | def __init__(self, config):
76 | self._configure(config)
77 | self.agent_servers = []
78 |
79 | # Start a corresponding agent server
80 | if self.config['agent']['enabled']:
81 | serv = AgentServer(self)
82 | self.agent_servers.append(serv)
83 |
84 | # A dictionary of all connected clients indexed by id
85 | self.clients = {}
86 | self.agent_clients = {}
87 | # A dictionary of lists of connected clients indexed by address
88 | self.address_lut = {}
89 | # A dictionary of lists of connected clients indexed by address and
90 | # worker tuple
91 | self.address_worker_lut = {}
92 | # counters that allow quick display of these numbers. stratum only
93 | self.authed_clients = 0
94 | self.idle_clients = 0
95 | # Unique client ID counters for stratum and agents
96 | self.stratum_id_count = 0
97 | self.agent_id_count = 0
98 |
99 | # Track the last job we pushed and when we pushed it
100 | self.last_flush_job = None
101 | self.last_flush_time = None
102 | self.listener = None
103 |
104 | def start(self, *args, **kwargs):
105 | self.listener = (self.config['address'],
106 | self.config['port'] + self.manager.config['server_number'])
107 | StreamServer.__init__(self, self.listener, spawn=Pool())
108 |
109 | self.algo = self.manager.algos[self.config['algo']]
110 | if not self.config['reporter'] and len(self.manager.component_types['Reporter']) == 1:
111 | self.reporter = self.manager.component_types['Reporter'][0]
112 | elif not self.config['reporter']:
113 | raise ConfigurationError(
114 | "There are more than one Reporter components, target reporter"
115 | "must be specified explicitly!")
116 | else:
117 | self.reporter = self._lookup(self.config['reporter'])
118 |
119 | if not self.config['jobmanager'] and len(self.manager.component_types['Jobmanager']) == 1:
120 | self.jobmanager = self.manager.component_types['Jobmanager'][0]
121 | elif not self.config['jobmanager']:
122 | raise ConfigurationError(
123 | "There are more than one Jobmanager components, target jobmanager "
124 | "must be specified explicitly!")
125 | else:
126 | self.jobmanager = self._lookup(self.config['jobmanager'])
127 | self.jobmanager.new_job.rawlink(self.new_job)
128 |
129 | self.logger.info("Stratum server starting up on {}".format(self.listener))
130 | for serv in self.agent_servers:
131 | serv.start()
132 | StreamServer.start(self, *args, **kwargs)
133 | Component.start(self)
134 |
135 | def stop(self, *args, **kwargs):
136 | self.logger.info("Stratum server {} stopping".format(self.listener))
137 | StreamServer.close(self)
138 | for serv in self.agent_servers:
139 | serv.stop()
140 | for client in self.clients.values():
141 | client.stop()
142 | StreamServer.stop(self)
143 | Component.stop(self)
144 | self.logger.info("Exit")
145 |
146 | def handle(self, sock, address):
147 | """ A new connection appears on the server, so setup a new StratumClient
148 | object to manage it. """
149 | self.logger.info("Recieving stratum connection from addr {} on sock {}"
150 | .format(address, sock))
151 | self.stratum_id_count += 1
152 | client = StratumClient(
153 | sock,
154 | address,
155 | config=self.config,
156 | logger=self.logger,
157 | jobmanager=self.jobmanager,
158 | manager=self.manager,
159 | algo=self.algo,
160 | server=self,
161 | reporter=self.reporter)
162 | client.start()
163 |
164 | def new_job(self, event):
165 | job = event.job
166 | t = time.time()
167 | job.stratum_string()
168 | flush = job.flush
169 | for client in self.clients.itervalues():
170 | if client.authenticated:
171 | client._push(job, flush=flush, block=False)
172 | self.logger.info("New job enqueued for transmission to {} users in {}"
173 | .format(len(self.clients), time_format(time.time() - t)))
174 | self.last_flush_job = job
175 | self.last_flush_time = time.time()
176 |
177 | @property
178 | def status(self):
179 | """ For display in the http monitor """
180 | hps = (self.algo['hashes_per_share'] *
181 | self.counters['acc_share_n1'].minute /
182 | 60.0)
183 | dct = dict(mhps=hps / 1000000.0,
184 | hps=hps,
185 | last_flush_job=None,
186 | agent_client_count=len(self.agent_clients),
187 | client_count=len(self.clients),
188 | address_count=len(self.address_lut),
189 | address_worker_count=len(self.address_lut),
190 | client_count_authed=self.authed_clients,
191 | client_count_active=len(self.clients) - self.idle_clients,
192 | client_count_idle=self.idle_clients)
193 | if self.last_flush_job:
194 | j = self.last_flush_job
195 | dct['last_flush_job'] = dict(
196 | algo=j.algo,
197 | pow_block_hash=j.pow_block_hash,
198 | currency=j.currency,
199 | job_id=j.job_id,
200 | merged_networks=j.merged_data.keys(),
201 | pushed_at=self.last_flush_time
202 | )
203 | return dct
204 |
205 | def set_user(self, client):
206 | """ Add the client (or create) appropriate worker and address trackers
207 | """
208 | user_worker = (client.address, client.worker)
209 | self.address_worker_lut.setdefault(user_worker, [])
210 | self.address_worker_lut[user_worker].append(client)
211 | self.authed_clients += 1
212 |
213 | self.address_lut.setdefault(user_worker[0], [])
214 | self.address_lut[user_worker[0]].append(client)
215 |
216 | def add_client(self, client):
217 | if isinstance(client, StratumClient):
218 | self._incr('stratum_connects')
219 | self.clients[client._id] = client
220 | elif isinstance(client, AgentClient):
221 | self._incr('agent_connects')
222 | self.agent_clients[client._id] = client
223 | else:
224 | self.logger.warn("Add client got unknown client of type {}"
225 | .format(type(client)))
226 |
227 | def remove_client(self, client):
228 | """ Manages removing the StratumClient from the luts """
229 | if isinstance(client, StratumClient):
230 | del self.clients[client._id]
231 | address, worker = client.address, client.worker
232 | self._incr('stratum_disconnects')
233 |
234 | if client.authenticated:
235 | self.authed_clients -= 1
236 | if client.idle:
237 | self.idle_clients -= 1
238 |
239 | # it won't appear in the luts if these values were never set
240 | if address is None and worker is None:
241 | return
242 |
243 | # wipe the client from the address tracker
244 | if address in self.address_lut:
245 | # remove from lut for address
246 | self.address_lut[address].remove(client)
247 | # if it's the last client in the object, delete the entry
248 | if not len(self.address_lut[address]):
249 | del self.address_lut[address]
250 |
251 | # wipe the client from the address/worker lut
252 | key = (address, worker)
253 | if key in self.address_worker_lut:
254 | self.address_worker_lut[key].remove(client)
255 | # if it's the last client in the object, delete the entry
256 | if not len(self.address_worker_lut[key]):
257 | del self.address_worker_lut[key]
258 | elif isinstance(client, AgentClient):
259 | self._incr('agent_disconnects')
260 | del self.agent_clients[client._id]
261 | else:
262 | self.logger.warn("Remove client got unknown client of type {}"
263 | .format(type(client)))
264 |
265 |
266 | class StratumClient(GenericClient):
267 | """ Object representation of a single stratum connection to the server. """
268 |
269 | # Stratum error codes
270 | errors = {20: 'Other/Unknown',
271 | 21: 'Job not found (=stale)',
272 | 22: 'Duplicate share',
273 | 23: 'Low difficulty share',
274 | 24: 'Unauthorized worker',
275 | 25: 'Not subscribed'}
276 | error_counter = {20: 'unk_err',
277 | 24: 'not_authed_err',
278 | 25: 'not_subbed_err'}
279 | # enhance readability by reducing magic number use...
280 | STALE_SHARE_ERR = 21
281 | LOW_DIFF_ERR = 23
282 | DUP_SHARE_ERR = 22
283 |
284 | # constansts for share submission outcomes. returned by the share checker
285 | VALID_SHARE = 0
286 | DUP_SHARE = 1
287 | LOW_DIFF_SHARE = 2
288 | STALE_SHARE = 3
289 | share_type_strings = {0: "acc", 1: "dup", 2: "low", 3: "stale"}
290 |
291 | def __init__(self, sock, address, logger, manager, jobmanager, server,
292 | reporter, algo, config):
293 | self.config = config
294 | self.jobmanager = jobmanager
295 | self.manager = manager
296 | self.algo = algo
297 | self.server = server
298 | self.reporter = reporter
299 | self.logger = logger
300 | self.sock = sock
301 | self.address = address
302 |
303 | # Seconds before sending keepalive probes
304 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 120)
305 | # Interval in seconds between keepalive probes
306 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1)
307 | # Failed keepalive probles before declaring other end dead
308 | sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)
309 |
310 | self.authenticated = False
311 | self.subscribed = False
312 | # flags for current connection state
313 | self.idle = False
314 | self.address = None
315 | self.worker = None
316 | self.client_type = None
317 | # the worker id. this is also extranonce 1
318 | id = self.server.stratum_id_count
319 | if self.manager.config['extranonce_serv_size'] == 8:
320 | self._id = hexlify(struct.pack('Q', id))
321 | elif self.manager.config['extranonce_serv_size'] == 4:
322 | self._id = hexlify(struct.pack('I', id))
323 | else:
324 | raise Exception("Unsupported extranonce size!")
325 |
326 | t = time.time()
327 | # running total for vardiff
328 | self.accepted_shares = 0
329 | # an index of jobs and their difficulty
330 | self.job_mapper = {}
331 | self.old_job_mapper = {}
332 | self.job_counter = random.randint(0, 100000)
333 | # Allows us to avoid a bunch of clients getting scheduled at the same
334 | # time by offsetting most timing values by this
335 | self.time_seed = random.uniform(0, 10)
336 | # Used to determine if they're idle
337 | self.last_share_submit = t
338 | # Used to determine if we should send another job on read loop timeout
339 | self.last_job_push = t
340 | # Avoids repeat pushing jobs that the client already knows about
341 | self.last_job = None
342 | # Last time vardiff happened
343 | self.last_diff_adj = t - self.time_seed
344 | # Current difficulty setting
345 | self.difficulty = self.config['start_difficulty']
346 | # the next diff to be used by push job
347 | self.next_diff = self.config['start_difficulty']
348 | # What time the user connected...
349 | self.connection_time = int(t)
350 |
351 | # where we put all the messages that need to go out
352 | self.write_queue = Queue()
353 | self.fp = None
354 | self._stopped = False
355 |
356 | def _incr(self, *args):
357 | self.server._incr(*args)
358 |
359 | def send_error(self, num=20, id_val=1):
360 | """ Utility for transmitting an error to the client """
361 | err = {'id': id_val,
362 | 'result': None,
363 | 'error': (num, self.errors[num], None)}
364 | self.logger.debug("Error number {}".format(num, self.peer_name[0]))
365 | self.write_queue.put(json.dumps(err, separators=(',', ':')) + "\n")
366 |
367 | def send_success(self, id_val=1):
368 | """ Utility for transmitting success to the client """
369 | succ = {'id': id_val, 'result': True, 'error': None}
370 | self.logger.debug("success response: {}".format(pformat(succ)))
371 | self.write_queue.put(json.dumps(succ, separators=(',', ':')) + "\n")
372 |
373 | def push_difficulty(self):
374 | """ Pushes the current difficulty to the client. Currently this
375 | only happens uppon initial connect, but would be used for vardiff
376 | """
377 | send = {'params': [self.difficulty],
378 | 'id': None,
379 | 'method': 'mining.set_difficulty'}
380 | self.write_queue.put(json.dumps(send, separators=(',', ':')) + "\n")
381 |
382 | def push_job(self, flush=False, timeout=False):
383 | """ Pushes the latest job down to the client. Flush is whether
384 | or not he should dump his previous jobs or not. Dump will occur
385 | when a new block is found since work on the old block is
386 | invalid."""
387 | job = None
388 | while job is None:
389 | job = self.jobmanager.latest_job
390 | if job is None:
391 | self.logger.warn("No jobs available for worker!")
392 | sleep(0.1)
393 |
394 | if self.last_job == job and not timeout:
395 | self.logger.info("Ignoring non timeout resend of job id {} to worker {}.{}"
396 | .format(job.job_id, self.address, self.worker))
397 | return
398 |
399 | # we push the next difficulty here instead of in the vardiff block to
400 | # prevent a potential mismatch between client and server
401 | if self.next_diff != self.difficulty:
402 | self.logger.info(
403 | "Pushing diff update {} -> {} before job for {}.{}"
404 | .format(self.difficulty, self.next_diff, self.address, self.worker))
405 | self.difficulty = self.next_diff
406 | self.push_difficulty()
407 |
408 | self.logger.debug("Sending job id {} to worker {}.{}{}"
409 | .format(job.job_id, self.address, self.worker,
410 | " after timeout" if timeout else ''))
411 |
412 | self._push(job)
413 |
414 | def _push(self, job, flush=False, block=True):
415 | """ Abbreviated push update that will occur when pushing new block
416 | notifications. Mico-optimized to try and cut stale share rates as much
417 | as possible. """
418 | self.last_job = job
419 | self.last_job_push = time.time()
420 | # get client local job id to map current difficulty
421 | self.job_counter += 1
422 | if self.job_counter % 10 == 0:
423 | # Run a swap to avoid GC
424 | tmp = self.job_mapper
425 | self.old_job_mapper = self.job_mapper
426 | self.job_mapper = tmp
427 | self.job_mapper.clear()
428 | job_id = str(self.job_counter)
429 | self.job_mapper[job_id] = (self.difficulty, weakref.ref(job))
430 | self.write_queue.put(job.stratum_string() % (job_id, "true" if flush else "false"), block=block)
431 |
432 | def submit_job(self, data, t):
433 | """ Handles recieving work submission and checking that it is valid
434 | , if it meets network diff, etc. Sends reply to stratum client. """
435 | params = data['params']
436 | # [worker_name, job_id, extranonce2, ntime, nonce]
437 | # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"]
438 | if __debug__:
439 | self.logger.debug(
440 | "Recieved work submit:\n\tworker_name: {0}\n\t"
441 | "job_id: {1}\n\textranonce2: {2}\n\t"
442 | "ntime: {3}\n\tnonce: {4} ({int_nonce})"
443 | .format(
444 | *params,
445 | int_nonce=struct.unpack(str("= job_target:
500 | self.logger.info("Low diff share rejected from worker {}.{}!"
501 | .format(self.address, self.worker))
502 | self.send_error(self.LOW_DIFF_ERR, id_val=data['id'])
503 | self.reporter.log_share(client=self,
504 | diff=difficulty,
505 | typ=self.LOW_DIFF_SHARE,
506 | params=params,
507 | job=job,
508 | start=t)
509 | return difficulty, self.LOW_DIFF_SHARE
510 |
511 | # we want to send an ack ASAP, so do it here
512 | self.send_success(id_val=data['id'])
513 | # Add the share to the accepted set to check for dups
514 | job.acc_shares.add(share)
515 | self.accepted_shares += difficulty
516 | self.reporter.log_share(client=self,
517 | diff=difficulty,
518 | typ=self.VALID_SHARE,
519 | params=params,
520 | job=job,
521 | header_hash=hash_int,
522 | header=header,
523 | start=t)
524 |
525 | return difficulty, self.VALID_SHARE
526 |
527 | def recalc_vardiff(self):
528 | # ideal difficulty is the n1 shares they solved divided by target
529 | # shares per minute
530 | spm_tar = self.config['vardiff']['spm_target']
531 | ideal_diff = self.reporter.spm(self.address) / spm_tar
532 | self.logger.debug("VARDIFF: Calculated client {} ideal diff {}"
533 | .format(self._id, ideal_diff))
534 | # find the closest tier for them
535 | new_diff = min(self.config['vardiff']['tiers'], key=lambda x: abs(x - ideal_diff))
536 |
537 | if new_diff != self.difficulty:
538 | self.logger.info(
539 | "VARDIFF: Moving to D{} from D{} on {}.{}"
540 | .format(new_diff, self.difficulty, self.address, self.worker))
541 | self.next_diff = new_diff
542 | else:
543 | self.logger.debug("VARDIFF: Not adjusting difficulty, already "
544 | "close enough")
545 |
546 | self.last_diff_adj = time.time()
547 | self.push_job(timeout=True)
548 |
549 | @loop(fin='stop', exit_exceptions=(socket.error, ))
550 | def read(self):
551 | # designed to time out approximately "push_job_interval" after the user
552 | # last recieved a job. Some miners will consider the mining server dead
553 | # if they don't recieve something at least once a minute, regardless of
554 | # whether a new job is _needed_. This aims to send a job _only_ as
555 | # often as needed
556 | line = with_timeout(time.time() - self.last_job_push + self.config['push_job_interval'] - self.time_seed,
557 | self.fp.readline,
558 | timeout_value='timeout')
559 |
560 | if line == 'timeout':
561 | t = time.time()
562 | if not self.idle and (t - self.last_share_submit) > self.config['idle_worker_threshold']:
563 | self.idle = True
564 | self.server.idle_clients += 1
565 |
566 | # push a new job if
567 | if (t - self.last_share_submit) > self.config['idle_worker_disconnect_threshold']:
568 | self.logger.info("Disconnecting worker {}.{} at ip {} for inactivity"
569 | .format(self.address, self.worker, self.peer_name[0]))
570 | self.stop()
571 |
572 | if (self.authenticated is True and # don't send to non-authed
573 | # force send if we need to push a new difficulty
574 | (self.next_diff != self.difficulty or
575 | # send if we're past the push interval
576 | t > (self.last_job_push +
577 | self.config['push_job_interval'] -
578 | self.time_seed))):
579 | if self.config['vardiff']['enabled'] is True:
580 | self.recalc_vardiff()
581 | self.push_job(timeout=True)
582 | return
583 |
584 | line = line.strip()
585 |
586 | # Reading from a defunct connection yeilds an EOF character which gets
587 | # stripped off
588 | if not line:
589 | raise LoopExit("Closed file descriptor encountered")
590 |
591 | try:
592 | data = json.loads(line)
593 | except ValueError:
594 | self.logger.warn("Data {}.. not JSON".format(line[:15]))
595 | self.send_error()
596 | self._incr('unk_err')
597 | return
598 |
599 | # handle malformed data
600 | data.setdefault('id', 1)
601 | data.setdefault('params', [])
602 |
603 | if __debug__:
604 | self.logger.debug("Data {} recieved on client {}".format(data, self._id))
605 |
606 | # run a different function depending on the action requested from
607 | # user
608 | if 'method' not in data:
609 | self.logger.warn("Empty action in JSON {}".format(self.peer_name[0]))
610 | self._incr('unk_err')
611 | self.send_error(id_val=data['id'])
612 | return
613 |
614 | meth = data['method'].lower()
615 | if meth == 'mining.subscribe':
616 | if self.subscribed is True:
617 | self.send_error(id_val=data['id'])
618 | return
619 |
620 | try:
621 | self.client_type = data['params'][0]
622 | except IndexError:
623 | pass
624 | ret = {
625 | 'result': (
626 | (
627 | # These values aren't used for anything, although
628 | # perhaps they should be
629 | ("mining.set_difficulty", self._id),
630 | ("mining.notify", self._id)
631 | ),
632 | self._id,
633 | self.manager.config['extranonce_size']
634 | ),
635 | 'error': None,
636 | 'id': data['id']
637 | }
638 | self.subscribed = True
639 | self.logger.debug("Sending subscribe response: {}".format(pformat(ret)))
640 | self.write_queue.put(json.dumps(ret) + "\n")
641 |
642 | elif meth == "mining.authorize":
643 | if self.subscribed is False:
644 | self._incr('not_subbed_err')
645 | self.send_error(25, id_val=data['id'])
646 | return
647 |
648 | if self.authenticated is True:
649 | self._incr('not_authed_err')
650 | self.send_error(24, id_val=data['id'])
651 | return
652 |
653 | try:
654 | password = data['params'][1]
655 | username = data['params'][0]
656 | # allow the user to use the password field as an argument field
657 | try:
658 | args = password_arg_parser.parse_args(password.split())
659 | except ArgumentParserError:
660 | # Ignore malformed parser data
661 | pass
662 | else:
663 | if args.diff:
664 | diff = max(self.config['minimum_manual_diff'], args.diff)
665 | self.difficulty = diff
666 | self.next_diff = diff
667 | except IndexError:
668 | password = ""
669 | username = ""
670 |
671 | self.manager.log_event(
672 | "{name}.auth:1|c".format(name=self.manager.config['procname']))
673 |
674 | self.logger.info("Authentication request from {} for username {}"
675 | .format(self.peer_name[0], username))
676 | user_worker = self.convert_username(username)
677 |
678 | # unpack into state dictionary
679 | self.address, self.worker = user_worker
680 | self.authenticated = True
681 | self.server.set_user(self)
682 |
683 | # notify of success authing and send him current diff and latest
684 | # job
685 | self.send_success(data['id'])
686 | self.push_difficulty()
687 | self.push_job()
688 |
689 | elif meth == "mining.submit":
690 | if self.authenticated is False:
691 | self._incr('not_authed_err')
692 | self.send_error(24, id_val=data['id'])
693 | return
694 |
695 | t = time.time()
696 | diff, typ = self.submit_job(data, t)
697 | # Log the share to our stat counters
698 | key = ""
699 | if typ > 0:
700 | key += "reject_"
701 | key += StratumClient.share_type_strings[typ] + "_share"
702 | if typ == 0:
703 | # Increment valid shares to calculate hashrate
704 | self._incr(key + "_n1", diff)
705 | self.manager.log_event(
706 | "{name}.{type}:1|c\n"
707 | "{name}.{type}_n1:{diff}|c\n"
708 | "{name}.submit_time:{t}|ms"
709 | .format(name=self.manager.config['procname'], type=key,
710 | diff=diff, t=(time.time() - t) * 1000))
711 |
712 | # don't recalc their diff more often than interval
713 | if (self.config['vardiff']['enabled'] is True and
714 | (t - self.last_diff_adj) > self.config['vardiff']['interval']):
715 | self.recalc_vardiff()
716 |
717 | elif meth == "mining.get_transactions":
718 | self.send_error(id_val=data['id'])
719 | elif meth == "mining.extranonce.subscribe":
720 | self.send_success(id_val=data['id'])
721 |
722 | else:
723 | self.logger.info("Unkown action {} for command {}"
724 | .format(data['method'][:20], self.peer_name[0]))
725 | self._incr('unk_err')
726 | self.send_error(id_val=data['id'])
727 |
728 | @property
729 | def summary(self):
730 | """ Displayed on the all client view in the http status monitor """
731 | return dict(worker=self.worker, idle=self.idle)
732 |
733 | @property
734 | def last_share_submit_delta(self):
735 | return datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(self.last_share_submit)
736 |
737 | @property
738 | def details(self):
739 | """ Displayed on the single client view in the http status monitor """
740 | return dict(alltime_accepted_shares=self.accepted_shares,
741 | difficulty=self.difficulty,
742 | type=self.client_type,
743 | worker=self.worker,
744 | id=self._id,
745 | jobmapper_size=len(self.old_job_mapper) + len(self.job_mapper),
746 | last_share_submit=str(self.last_share_submit_delta),
747 | idle=self.idle,
748 | address=self.address,
749 | ip_address=self.peer_name[0],
750 | connection_time=str(self.connection_duration))
751 |
--------------------------------------------------------------------------------
/powerpool/utils.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import time
3 | import importlib
4 | import cProfile
5 |
6 |
7 | class Benchmark(object):
8 | def __init__(self, name):
9 | self.name = name
10 |
11 | def __enter__(self):
12 | self.start = time.time()
13 |
14 | def __exit__(self, ty, val, tb):
15 | end = time.time()
16 | print("BENCHMARK: {} in {}"
17 | .format(self.name, time_format(end - self.start)))
18 | return False
19 |
20 |
21 | def profileit(func):
22 | def wrapper(*args, **kwargs):
23 | datafn = func.__name__ + ".profile" # Name the data file sensibly
24 | prof = cProfile.Profile()
25 | retval = prof.runcall(func, *args, **kwargs)
26 | prof.dump_stats(datafn)
27 | return retval
28 | return wrapper
29 |
30 |
31 | def time_format(seconds):
32 | # microseconds
33 | if seconds <= 1.0e-3:
34 | return "{:,.4f} us".format(seconds * 1000000.0)
35 | if seconds <= 1.0:
36 | return "{:,.4f} ms".format(seconds * 1000.0)
37 | return "{:,.4f} sec".format(seconds)
38 |
39 |
40 | def recursive_update(d, u):
41 | """ Simple recursive dictionary update """
42 | for k, v in u.iteritems():
43 | if isinstance(v, collections.Mapping):
44 | r = recursive_update(d.get(k, {}), v)
45 | d[k] = r
46 | else:
47 | d[k] = u[k]
48 | return d
49 |
50 |
51 | def import_helper(dotted_path):
52 | module, cls = dotted_path.rsplit(".", 1)
53 | module = importlib.import_module(module)
54 | return getattr(module, cls)
55 |
56 |
57 | def timeit(method):
58 |
59 | def timed(*args, **kw):
60 | ts = time.time()
61 | result = method(*args, **kw)
62 | te = time.time()
63 |
64 | print '%r (%r, %r) %s' % \
65 | (method.__name__, args, kw, time_format(te-ts))
66 | return result
67 |
68 | return timed
69 |
--------------------------------------------------------------------------------
/requirements-test.txt:
--------------------------------------------------------------------------------
1 |
2 | mocket==1.1.1
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Top level requirements
2 | git+https://github.com/simplecrypto/cryptokit.git@v0.2.10#egg=cryptokit
3 | gevent==1.0.1
4 | PyYAML==3.10
5 | Flask==0.10.1
6 | setproctitle==1.1.8
7 |
8 | # Dependencies of top level req
9 | Jinja2==2.7.3
10 | MarkupSafe==0.23
11 | Werkzeug==0.9.6
12 | argparse==1.2.1
13 | future==0.11.2
14 | greenlet==0.4.3
15 | itsdangerous==0.24
16 | wsgiref==0.1.2
17 | gevent-helpers==0.1.1
18 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from setuptools import setup, find_packages
4 | import powerpool
5 |
6 |
7 | setup(name='powerpool',
8 | version=powerpool.__version__,
9 | description='A pluggable mining pool server implementation',
10 | author='Isaac Cook',
11 | author_email='isaac@simpload.com',
12 | url='http://www.python.org/sigs/distutils-sig/',
13 | packages=find_packages(),
14 | entry_points={
15 | 'console_scripts': [
16 | 'pp = powerpool.main:main'
17 | ]
18 | }
19 | )
20 |
--------------------------------------------------------------------------------