├── .gitignore
├── .travis.yml
├── Dockerfile
├── Gemfile
├── LICENSE
├── README.md
├── Rakefile
├── bin
└── meeseeker
├── lib
├── meeseeker.rb
└── meeseeker
│ ├── block_follower_job.rb
│ ├── hive_engine.rb
│ ├── steem_engine
│ ├── agent.rb
│ └── follower_job.rb
│ ├── version.rb
│ └── witness_schedule_job.rb
├── meeseeker.gemspec
└── test
├── meeseeker
└── meeseeker_test.rb
└── test_helper.rb
/.gitignore:
--------------------------------------------------------------------------------
1 | *.gem
2 | *.rbc
3 | /.config
4 | /coverage/
5 | /InstalledFiles
6 | /pkg/
7 | /spec/reports/
8 | /spec/examples.txt
9 | /test/tmp/
10 | /test/version_tmp/
11 | /tmp/
12 |
13 | # Used by dotenv library to load environment variables.
14 | # .env
15 |
16 | ## Specific to RubyMotion:
17 | .dat*
18 | .repl_history
19 | build/
20 | *.bridgesupport
21 | build-iPhoneOS/
22 | build-iPhoneSimulator/
23 |
24 | ## Specific to RubyMotion (use of CocoaPods):
25 | #
26 | # We recommend against adding the Pods directory to your .gitignore. However
27 | # you should judge for yourself, the pros and cons are mentioned at:
28 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
29 | #
30 | # vendor/Pods/
31 |
32 | ## Documentation cache and generated files:
33 | /.yardoc/
34 | /_yardoc/
35 | /doc/
36 | /rdoc/
37 |
38 | ## Environment normalization:
39 | /.bundle/
40 | /vendor/bundle
41 | /lib/bundler/man/
42 |
43 | # for a library or gem, you might want to ignore these files since the code is
44 | # intended to run in multiple environments; otherwise, check them in:
45 | Gemfile.lock
46 | # .ruby-version
47 | # .ruby-gemset
48 |
49 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
50 | .rvmrc
51 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: ruby
2 | rvm:
3 | - 2.5
4 |
5 | services:
6 | - redis-server
7 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM redis
2 |
3 | ENV APP_ROOT /meeseeker
4 | ENV MEESEEKER_MAX_KEYS 300000
5 | WORKDIR /meeseeker
6 |
7 | # Dependencies
8 | RUN \
9 | apt-get update && \
10 | apt-get install -y \
11 | curl \
12 | bzip2 \
13 | build-essential \
14 | libssl-dev \
15 | libreadline-dev \
16 | zlib1g-dev \
17 | nodejs \
18 | procps && \
19 | apt-get clean && \
20 | command curl -sSL https://rvm.io/mpapis.asc | gpg --import - && \
21 | command curl -sSL https://rvm.io/pkuczynski.asc | gpg --import - && \
22 | curl -sSL https://get.rvm.io | bash -s stable --ruby
23 |
24 | RUN \
25 | /bin/bash -c " \
26 | source /usr/local/rvm/scripts/rvm && \
27 | gem update --system && \
28 | gem install bundler \
29 | "
30 |
31 | # copy in everything from repo
32 | COPY bin bin
33 | COPY lib lib
34 | COPY Gemfile .
35 | COPY meeseeker.gemspec .
36 | COPY Rakefile .
37 | COPY LICENSE .
38 | COPY README.md .
39 |
40 | RUN chmod +x /meeseeker/bin/meeseeker
41 |
42 | RUN \
43 | /bin/bash -c " \
44 | source /usr/local/rvm/scripts/rvm && \
45 | bundle config --global silence_root_warning 1 && \
46 | bundle install \
47 | "
48 |
49 | ENTRYPOINT \
50 | /usr/local/bin/redis-server --daemonize yes && \
51 | /bin/bash -c " \
52 | source /usr/local/rvm/scripts/rvm && \
53 | while :; do bundle exec rake sync; echo Restarting meeseeker; sleep 3; done \
54 | "
55 |
56 | EXPOSE 6379
57 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | # Specify your gem's dependencies in steem_api.gemspec
4 | gemspec
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | CC0 1.0 Universal (CC0 1.0)
2 | Public Domain Dedication
3 | https://creativecommons.org/publicdomain/zero/1.0/
4 |
5 | This is a human-readable summary of the Legal Code:
6 | https://creativecommons.org/publicdomain/zero/1.0/legalcode
7 |
8 | Disclaimer
9 |
10 | The Commons Deed is not a legal instrument. It is simply a handy reference for
11 | understanding the CC0 Legal Code, a human-readable expression of some of its key
12 | terms. Think of it as the user-friendly interface to the CC0 Legal Code beneath.
13 | This Deed itself has no legal value, and its contents do not appear in CC0.
14 | Creative Commons is not a law firm and does not provide legal services.
15 | Distributing, displaying, or linking to this Commons Deed does not create an
16 | attorney-client relationship.
17 |
18 | Creative Commons has not verified the copyright status of any work to which CC0
19 | has been applied. CC makes no warranties about any work or its copyright status
20 | in any jurisdiction, and disclaims all liability for all uses of any work.
21 |
22 | No Copyright
23 |
24 | The person who associated a work with this deed has dedicated the work to the
25 | public domain by waiving all of his or her rights to the work worldwide under
26 | copyright law, including all related and neighboring rights, to the extent
27 | allowed by law.
28 |
29 | You can copy, modify, distribute and perform the work, even for commercial
30 | purposes, all without asking permission. See Other Information below.
31 |
32 | Other Information
33 |
34 | * In no way are the patent or trademark rights of any person affected by CC0,
35 | nor are the rights that other persons may have in the work or in how the work
36 | is used, such as publicity or privacy rights.
37 | * Unless expressly stated otherwise, the person who associated a work with this
38 | deed makes no warranties about the work, and disclaims liability for all uses
39 | of the work, to the fullest extent permitted by applicable law.
40 | * When using or citing the work, you should not imply endorsement by the author
41 | or the affirmer.
42 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # meeseeker
2 |
3 | Redis based block follower is an efficient way for multiple apps to stream the Hive Blockchain.
4 |
5 | [](https://travis-ci.org/inertia186/meeseeker)
6 |
7 | If you have multiple applications that need to perform actions as operations occur, `meeseeker` will allow your apps to each perform actions for specific operations without each app having to stream the entire blockchain.
8 |
9 | *In a nutshell:* The overarching intent here is to provide a "live view" of the blockchain, *not* store the entire blockchain. Apps can attach to your redis source and ask, "What *just* happened?"
10 |
11 | ## Purpose
12 |
13 | Although Meeseeker tracks all operations, it is only intended to provide other applications signals that those operations have happened. It is not intended to provide cryptographically verifiable events.
14 |
15 | Possible uses:
16 |
17 | * Notifications of events, suitable for push to mobile devices or web browsers.
18 | * Invoke periodic updates on a threshold.
19 | * Light-weight bots that only care about a limit set of operations, reducing the number of API calls.
20 |
21 | ## Why Redis?
22 |
23 | Redis is a persistent key-value database, with built-in net interface. See: https://redis.io/
24 |
25 | It allows for quick storage and lookup of operations by key as well as the ability to automatically expire keys that are no longer needed.
26 |
27 | ### Installation
28 |
29 | First, install redis:
30 |
31 | On linux:
32 |
33 | ```bash
34 | sudo apt install redis-server
35 | ```
36 |
37 | On macOS:
38 |
39 | ```bash
40 | brew install redis
41 | ```
42 |
43 | Next, install ruby. One way to do this is install [rvm](https://rvm.io/install). Once ruby is installed, install `meeseeker` with the `gem` command:
44 |
45 | ```bash
46 | gem install meeseeker
47 | ```
48 |
49 | This installs meeseeker as a command available to the OS, e.g.:
50 |
51 | ```bash
52 | meeseeker help
53 | ```
54 |
55 | To do the actual sync to your local redis source (defaults assume `redis://127.0.0.1:6379/0`):
56 |
57 | ```bash
58 | meeseeker sync
59 | ```
60 |
61 | To specify an alternative redis source:
62 |
63 | ```bash
64 | MEESEEKER_REDIS_URL=redis://:p4ssw0rd@10.0.1.1:6380/15 meeseeker sync
65 | ```
66 |
67 | You can also specify am alternative Hive node:
68 |
69 | ```bash
70 | MEESEEKER_NODE_URL=http://anyx.io meeseeker sync
71 | ```
72 |
73 | You can also specify a Steem node instead of Hive (if that's your thing):
74 |
75 | ```bash
76 | MEESEEKER_NODE_URL=https://api.steemit.com meeseeker sync[steem]
77 | ```
78 |
79 | Or, you can have meeseeker automatically use random Hive nodes:
80 |
81 | ```bash
82 | MEESEEKER_NODE_URL=shuffle meeseeker sync
83 | ```
84 |
85 | To sync from the head block instead of the last irreversible block:
86 |
87 | ```bash
88 | MEESEEKER_STREAM_MODE=head meeseeker sync
89 | ```
90 |
91 | To ignore virtual operations (useful if the node doesn't enable `get_ops_in_blocks` or if you want to sync from the head block):
92 |
93 | ```bash
94 | MEESEEKER_INCLUDE_VIRTUAL=false meeseeker sync
95 | ```
96 |
97 | Normally, block headers are added to the `hive:block` channel. This requires one additional API call for each block. If you don't need block headers, you can configure the `hive:block` channel to only publish with the `block_num`:
98 |
99 | ```bash
100 | MEESEEKER_INCLUDE_BLOCK_HEADER=false meeseeker sync
101 | ```
102 |
103 | Normally, keys stay on redis for 24 hours. If you want to change this behavior, use `MEESEEKER_EXPIRE_KEYS` and specify the new value in seconds, for example:
104 |
105 | ```bash
106 | MEESEEKER_EXPIRE_KEYS=10 meeseeker sync
107 | ```
108 |
109 | If you never want the keys to expire (not recommended), set
110 | `MEESEEKER_EXPIRE_KEYS` to -1:
111 |
112 | ```bash
113 | MEESEEKER_EXPIRE_KEYS=-1 meeseeker sync
114 | ```
115 |
116 | Normally, sync will create keys until it uses up all available memory. If you would like to only sync a certain number of keys, then sleep until those keys expire so it can pick up where it left off, set `MEESEEKER_MAX_KEYS` to a positive value:
117 |
118 | ```bash
119 | MEESEEKER_MAX_KEYS=99 meeseeker sync
120 | ```
121 |
122 | ### Usage
123 |
124 | When `meeseeker sync` starts for the first time, it initializes from the last irreversible block number. If the sync is interrupted, it will resume from the last block sync'd unless that block is older than `MEESEEKER_EXPIRE_KEYS` in which case it will skip to the last irreversible block number.
125 |
126 | #### Using `SUBSCRIBE`
127 |
128 | For `redis-cli`, please see: https://redis.io/topics/pubsub
129 |
130 | ##### Sync
131 |
132 | When running `meeseeker sync`, the following channels are available:
133 |
134 | * `hive:block`
135 | * `hive:transaction`
136 | * `hive:op:vote`
137 | * `hive:op:comment`
138 | * `hive:op:comment_options`
139 | * `hive:op:whatever` (replace "whatever" with the op you want)
140 | * `hive:op:custom_json:whatever` (if enabled, replace "whatever" with the `custom_json.id` you want)
141 |
142 | As mentioned in the first `whatever` example, for ops, [all operation types](https://developers.hive.io/apidefinitions/broadcast-ops) can be subscribed to as channels, including virtual operations, if enabled.
143 |
144 | In the second `whatever` example, for `custom_json.id`, if you want to subscribe to the `follow` channel, use `hive:op:custom_json:follow`. Or if you want to subscribe to the `sm_team_reveal` channel, use `hive:op:custom_json:sm_team_reveal`. The `custom_json.id` channels are not enabled by default. To enable it, set the `MEESEEKER_PUBLISH_OP_CUSTOM_ID` to `true` (see example below).
145 |
146 | For example, from `redis-cli`, if we wanted to stream block numbers:
147 |
148 | ```bash
149 | $ redis-cli
150 | 127.0.0.1:6379> subscribe hive:block
151 | Reading messages... (press Ctrl-C to quit)
152 | 1) "subscribe"
153 | 2) "hive:block"
154 | 3) (integer) 1
155 | 1) "message"
156 | 2) "hive:block"
157 | 3) "{\"block_num\":29861068,\"previous\":\"01c7a4cb4424b4dc0cb0cc72fd36b1644f8aeba5\",\"timestamp\":\"2019-01-28T20:55:03\",\"witness\":\"ausbitbank\",\"transaction_merkle_root\":\"a318bb82625bd78af8d8b506ccd4f53116372c8e\",\"extensions\":[]}"
158 | 1) "message"
159 | 2) "hive:block"
160 | 3) "{\"block_num\":29861069,\"previous\":\"01c7a4cc1bed060876cab57476846a91568a9f8a\",\"timestamp\":\"2019-01-28T20:55:06\",\"witness\":\"followbtcnews\",\"transaction_merkle_root\":\"834e05d40b9666e5ef50deb9f368c63070c0105b\",\"extensions\":[]}"
161 | 1) "message"
162 | 2) "hive:block"
163 | 3) "{\"block_num\":29861070,\"previous\":\"01c7a4cd3bbf872895654765faa4409a8e770e91\",\"timestamp\":\"2019-01-28T20:55:09\",\"witness\":\"timcliff\",\"transaction_merkle_root\":\"b2366ce9134d627e00423b28d33cc57f1e6e453f\",\"extensions\":[]}"
164 | ```
165 |
166 | In addition to general op channels, there's an additional channel for `custom_json.id`. This option must be enabled:
167 |
168 | ```bash
169 | MEESEEKER_PUBLISH_OP_CUSTOM_ID=true meeseeker sync
170 | ```
171 |
172 | Which allows subscription to specific `id` patterns:
173 |
174 | ```
175 | $ redis-cli
176 | 127.0.0.1:6379> subscribe hive:op:custom_json:sm_team_reveal
177 | Reading messages... (press Ctrl-C to quit)
178 | 1) "subscribe"
179 | 2) "hive:op:custom_json:sm_team_reveal"
180 | 3) (integer) 1
181 | 1) "message"
182 | 2) "hive:op:custom_json:sm_team_reveal"
183 | 3) "{\"key\":\"hive:29890790:bcfa68d9be10b3587d81039b85fd0536ddeddffb:0:custom_json\"}"
184 | 1) "message"
185 | 2) "hive:op:custom_json:sm_team_reveal"
186 | 3) "{\"key\":\"hive:29890792:3f3b921ec6706bcd259f5cc6ac922dc59bbe2de5:0:custom_json\"}"
187 | 1) "message"
188 | 2) "hive:op:custom_json:sm_team_reveal"
189 | 3) "{\"key\":\"hive:29890792:4ceca16dd114b1851140086a82a5fb3a6eb6ec42:0:custom_json\"}"
190 | 1) "message"
191 | 2) "hive:op:custom_json:sm_team_reveal"
192 | 3) "{\"key\":\"hive:29890792:00930eff76b3f0af8ed7215e88cf351cc671490b:0:custom_json\"}"
193 | 1) "message"
194 | 2) "hive:op:custom_json:sm_team_reveal"
195 | 3) "{\"key\":\"hive:29890799:01483bd252ccadb05f546051bb20a4ba9afea243:0:custom_json\"}"
196 | ```
197 |
198 | A `ruby` application can subscribe to a channel as well, using the `redis` gem:
199 |
200 | ```ruby
201 | require 'redis'
202 |
203 | url = 'redis://127.0.0.1:6379/0'
204 | ctx = Redis.new(url: url)
205 |
206 | Redis.new(url: url).subscribe('hive:op:comment') do |on|
207 | on.message do |channel, message|
208 | payload = JSON[message]
209 | comment = JSON[ctx.get(payload['key'])]
210 |
211 | puts comment['value']
212 | end
213 | end
214 | ```
215 |
216 | Many other clients are supported: https://redis.io/clients
217 |
218 | ##### Witness Schedule
219 |
220 | When running `meeseeker witness:schedule`, the `hive:witness:schedule` channel is available. This is offered as a separate command because most applications don't need to worry about this level of blockchain logistics.
221 |
222 | For example, from `redis-cli`, if we wanted to subscribe to the witness schedule:
223 |
224 | ```
225 | $ redis-cli
226 | 127.0.0.1:6379> subscribe hive:witness:schedule
227 | Reading messages... (press Ctrl-C to quit)
228 | 1) "subscribe"
229 | 2) "hive:witness:schedule"
230 | 3) (integer) 1
231 | 1) "message"
232 | 2) "hive:witness:schedule"
233 | 3) "{\"id\":0,\"current_virtual_time\":\"415293532210075480213212125\",\"next_shuffle_block_num\":30035208,\"current_shuffled_witnesses\":[\"thecryptodrive\",\"timcliff\",\"utopian-io\",\"themarkymark\",\"aggroed\",\"smooth.witness\",\"someguy123\",\"gtg\",\"followbtcnews\",\"yabapmatt\",\"therealwolf\",\"ausbitbank\",\"curie\",\"clayop\",\"drakos\",\"blocktrades\",\"good-karma\",\"roelandp\",\"lukestokes.mhth\",\"liondani\",\"anyx\"],\"num_scheduled_witnesses\":21,\"elected_weight\":1,\"timeshare_weight\":5,\"miner_weight\":1,\"witness_pay_normalization_factor\":25,\"median_props\":{\"account_creation_fee\":{\"amount\":\"3000\",\"precision\":3,\"nai\":\"@@000000021\"},\"maximum_block_size\":65536,\"sbd_interest_rate\":0,\"account_subsidy_budget\":797,\"account_subsidy_decay\":347321},\"majority_version\":\"0.20.8\",\"max_voted_witnesses\":20,\"max_miner_witnesses\":0,\"max_runner_witnesses\":1,\"hardfork_required_witnesses\":17,\"account_subsidy_rd\":{\"resource_unit\":10000,\"budget_per_time_unit\":797,\"pool_eq\":157691079,\"max_pool_size\":157691079,\"decay_params\":{\"decay_per_time_unit\":347321,\"decay_per_time_unit_denom_shift\":36},\"min_decay\":0},\"account_subsidy_witness_rd\":{\"resource_unit\":10000,\"budget_per_time_unit\":996,\"pool_eq\":9384019,\"max_pool_size\":9384019,\"decay_params\":{\"decay_per_time_unit\":7293741,\"decay_per_time_unit_denom_shift\":36},\"min_decay\":257},\"min_witness_account_subsidy_decay\":0}"
234 | ```
235 |
236 | #### Using `SCAN`
237 |
238 | From the redis manual:
239 |
240 | > Since these commands allow for incremental iteration, returning only a small number of elements per call, they can be used in production without the downside of commands like KEYS or SMEMBERS that may block the server for a long time (even several seconds) when called against big collections of keys or elements.
241 | >
242 | > However while blocking commands like SMEMBERS are able to provide all the elements that are part of a Set in a given moment, The SCAN family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process.
243 |
244 | See: https://redis.io/commands/scan
245 |
246 | Keep in mind that `SCAN` requires pagination to get a complete result. Redis implements pagination using a cursor based iterator.
247 |
248 | See: https://redis.io/commands/scan#scan-basic-usage
249 |
250 | Once your sync has started, you can begin doing queries against redis, for example, in the `redis-cli`:
251 |
252 | ```bash
253 | redis-cli --scan --pattern 'hive:*:vote'
254 | ```
255 |
256 | This returns the keys, for example:
257 |
258 | ```
259 | hive:29811083:7fd2ea1c73e6cc08ab6e24cf68e67ff19a05896a:0:vote
260 | hive:29811085:091c3df76322ec7f0dc51a6ed526ff9a9f69869e:0:vote
261 | hive:29811085:24bfc199501779b6c2be2370fab1785f58062c5a:0:vote
262 | hive:29811086:36761db678fe89df48d2c5d11a23cdafe57b2476:0:vote
263 | hive:29811085:f904ac2e5e338263b03b640a4d1ff2d5fd01169e:0:vote
264 | hive:29811085:44036fde09f20d91afda8fc2072b383935c0b615:0:vote
265 | hive:29811086:570abf0fbeeeb0bb5c1e26281f0acb1daf175c39:0:vote
266 | hive:29811083:e3ee518c4958a10f0d0c5ed39e3dc736048e8ec7:0:vote
267 | hive:29811083:e06be9ade6758df59e179160b749d1ace3508044:0:vote
268 | ```
269 |
270 | To get the actual vote operation for a particular key, use:
271 |
272 | ```bash
273 | redis-cli get hive:29811085:f904ac2e5e338263b03b640a4d1ff2d5fd01169e:0:vote
274 | ```
275 |
276 | If, on the other hand, you want `custom_json` only:
277 |
278 | ```bash
279 | redis-cli --scan --pattern 'hive:*:custom_json'
280 | ```
281 |
282 | This only returns the related keys, for example:
283 |
284 | ```
285 | hive:29811084:43f1e1a367b97ea4e05fbd3a80a42146d97121a2:0:custom_json
286 | hive:29811085:5795ff73234d64a11c1fb78edcae6f5570409d8e:0:custom_json
287 | hive:29811083:2d6635a093243ef7a779f31a01adafe6db8c53c9:0:custom_json
288 | hive:29811086:31ecb9c85e9eabd7ca2460fdb4f3ce4a7ca6ec32:0:custom_json
289 | hive:29811083:7fbbde120aef339511f5af1a499f62464fbf4118:0:custom_json
290 | hive:29811083:04a6ddc83a63d024b90ca13996101b83519ba8f5:0:custom_json
291 | ```
292 |
293 | To get the actual custom json operation for a particular key, use:
294 |
295 | ```bash
296 | redis-cli get hive:29811083:7fbbde120aef339511f5af1a499f62464fbf4118:0:custom_json
297 | ```
298 |
299 | To get all transactions for a particular block number:
300 |
301 | ```bash
302 | redis-cli --scan --pattern 'hive:29811085:*'
303 | ```
304 |
305 | Or to get all ops for a particular transaction:
306 |
307 | ```bash
308 | redis-cli --scan --pattern 'hive:*:31ecb9c85e9eabd7ca2460fdb4f3ce4a7ca6ec32:*'
309 | ```
310 |
311 | ### Hive Engine Support
312 |
313 | As of `v0.0.6`, meeseeker can also follow the Hive Engine side-chain. This is optional and requires a separate process.
314 |
315 | To sync Hive Engine to your local redis source (also defaults to `redis://127.0.0.1:6379/0`):
316 |
317 | ```bash
318 | meeseeker sync hive_engine
319 | ```
320 |
321 | When running `meeseeker sync hive_engine`, the following channels are available:
322 |
323 | * `hive_engine:block`
324 | * `hive_engine:transaction`
325 | * `hive_engine:virtual_transaction`
326 | * `hive_engine:contract`
327 | * `hive_engine:contract:deploy`
328 | * `hive_engine:contract:update`
329 | * `hive_engine:market`
330 | * `hive_engine:market:buy`
331 | * `hive_engine:market:cancel`
332 | * `hive_engine:market:sell`
333 | * `hive_engine:sscstore`
334 | * `hive_engine:sscstore:buy`
335 | * `hive_engine:steempegged`
336 | * `hive_engine:steempegged:buy`
337 | * `hive_engine:steempegged:removeWithdrawal`
338 | * `hive_engine:steempegged:withdraw`
339 | * `hive_engine:tokens`
340 | * `hive_engine:tokens:checkPendingUnstake`
341 | * `hive_engine:tokens:create`
342 | * `hive_engine:tokens:enableStaking`
343 | * `hive_engine:tokens:issue`
344 | * `hive_engine:tokens:transfer`
345 | * `hive_engine:tokens:transferOwnership`
346 | * `hive_engine:tokens:unstake`
347 | * `hive_engine:tokens:updateMetadata`
348 | * `hive_engine:tokens:updateParams`
349 | * `hive_engine:tokens:updateUrl`
350 |
351 | The above "channel/action" patterns are the ones that are known that the time of writing. In addition, if a new contract is added or updated, meeseeker will automatically publish to these corresponding channels as they appear, without needing to update or even restart meeseeker.
352 |
353 | See main section on [Using `SUBSCRIBE`](#using-subscribe).
354 |
355 | Once your HiveEngine sync has started, you can begin doing queries against redis, for example, in the `redis-cli`:
356 |
357 | ```bash
358 | redis-cli --scan --pattern 'hive_engine:*:tokens:transfer'
359 | ```
360 |
361 | This returns the keys, for example:
362 |
363 | ```
364 | hive_engine:18000:d414373db84e6a642f289641ea1433fda22b8a4d:0:tokens:transfer
365 | hive_engine:18004:c9e06c8449d2d04b4a0a31ec7b80d2f62009a5f0:0:tokens:transfer
366 | hive_engine:17994:faf097391760ad896b19d5854e2822f62dee284b:0:tokens:transfer
367 | ```
368 |
369 | See main section on [Using `SCAN`](#using-scan).
370 |
371 | ### Docker
372 |
373 | This will launch meeseeker in a docker container, so you can immediately attach to it on port 6380.
374 |
375 | ```bash
376 | docker run -d -p 6380:6379 inertia/meeseeker:latest
377 | redis-cli -p 6380
378 | ```
379 |
380 | You can also pass any of the environment variables meeseeker accepts. For example, this will launch meeseeker with `custom_json.id` channels enabled, but only keeps ops around for 5 minutes:
381 |
382 | ```bash
383 | docker run \
384 | --env MEESEEKER_PUBLISH_OP_CUSTOM_ID=true \
385 | --env MEESEEKER_EXPIRE_KEYS=300 \
386 | -d -p 6380:6379 inertia/meeseeker:latest
387 | ```
388 |
389 | Also see: https://hub.docker.com/r/inertia/meeseeker/
390 |
391 | ---
392 |
393 |
394 |
395 |
396 |
397 | See some of my previous Ruby How To posts in: [#radiator](https://hive.blog/created/radiator) [#ruby](https://hive.blog/created/ruby)
398 |
399 | ## Get in touch!
400 |
401 | If you're using Radiator, I'd love to hear from you. Drop me a line and tell me what you think! I'm @inertia on Hive.
402 |
403 | ## License
404 |
405 | I don't believe in intellectual "property". If you do, consider Radiator as licensed under a Creative Commons [](http://creativecommons.org/publicdomain/zero/1.0/) License.
406 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | require "bundler/gem_tasks"
2 | require "rake/testtask"
3 | require 'meeseeker'
4 |
5 | defined? Thread.report_on_exception and Thread.report_on_exception = true
6 |
7 | Rake::TestTask.new(:test) do |t|
8 | t.libs << 'test'
9 | t.libs << 'lib'
10 | t.test_files = FileList['test/**/*_test.rb']
11 | t.ruby_opts << if ENV['HELL_ENABLED']
12 | '-W2'
13 | else
14 | '-W1'
15 | end
16 | end
17 |
18 | task :default => :test
19 |
20 | task :console do
21 | exec "irb -r meeseeker -I ./lib"
22 | end
23 |
24 | desc 'Build a new version of the meeseeker gem.'
25 | task :build do
26 | exec 'gem build meeseeker.gemspec'
27 | end
28 |
29 | desc 'Publish the current version of the meeseeker gem.'
30 | task :push do
31 | exec "gem push meeseeker-#{Meeseeker::VERSION}.gem"
32 | end
33 |
34 | desc 'Build a new version of the meeseeker docker image.'
35 | task :docker_build do
36 | exec 'docker build -t inertia/meeseeker:latest .'
37 | end
38 |
39 | desc 'Publish the current version of the meeseeker docker image.'
40 | task :docker_push do
41 | exec 'docker push inertia/meeseeker:latest'
42 | end
43 |
44 | task :check_schema do
45 | begin
46 | abort 'Unable to ping redis source.' unless Meeseeker.redis.ping == 'PONG'
47 | rescue Redis::CommandError => e
48 | puts e
49 | rescue Redis::CannotConnectError => e
50 | puts e
51 | end
52 | end
53 |
54 | task(:sync, [:chain, :at_block_num] => [:check_schema]) do |t, args|
55 | chain = args[:chain] if args[:chain]
56 | chain ||= Meeseeker.default_chain_key_prefix
57 |
58 | job = case chain.to_sym
59 | when :steem_engine
60 | Meeseeker::SteemEngine::FollowerJob.new
61 | when :hive_engine
62 | Meeseeker::HiveEngine::FollowerJob.new
63 | else
64 | Meeseeker::BlockFollowerJob.new
65 | end
66 |
67 | job.perform(chain: chain, at_block_num: args[:at_block_num])
68 | end
69 |
70 | namespace :witness do
71 | desc 'Publish the witness schedule every minute or so (e.g.: hive:witness:schedule).'
72 | task :schedule, [:chain] do |t, args|
73 | chain = args[:chain] if args[:chain]
74 | chain ||= Meeseeker.default_chain_key_prefix
75 |
76 | job = Meeseeker::WitnessScheduleJob.new
77 | job.perform(chain: chain)
78 | end
79 | end
80 |
81 | task(:find, [:what, :key, :chain] => [:check_schema]) do |t, args|
82 | chain = args[:chain] if args[:chain]
83 | chain ||= Meeseeker.default_chain_key_prefix
84 | redis = Meeseeker.redis
85 |
86 | match = case args[:what].downcase.to_sym
87 | when :block then "#{chain}:#{args[:key]}:*"
88 | when :trx then "#{chain}:*:#{args[:key]}:*"
89 | else; abort "Unknown lookup using #{args}"
90 | end
91 |
92 | puts "Looking for match on: #{match}"
93 | keys = redis.keys(match)
94 |
95 | keys.each do |key|
96 | puts key
97 | puts redis.get(key)
98 | end
99 | end
100 |
101 | task :reset, [:chain] => [:check_schema] do |t, args|
102 | chain = (args[:chain] || 'all').to_sym
103 | keys = []
104 |
105 | print 'Dropping keys for set: %s ...' % chain.to_s
106 |
107 | case chain
108 | when :steem_engine then keys += Meeseeker.redis.keys('steem_engine:*')
109 | when :hive_engine then keys += Meeseeker.redis.keys('hive_engine:*')
110 | when :all
111 | keys += Meeseeker.redis.keys('steem:*')
112 | keys += Meeseeker.redis.keys('hive:*')
113 | keys += Meeseeker.redis.keys('steem_engine:*')
114 | keys += Meeseeker.redis.keys('hive_engine:*')
115 | else
116 | keys += Meeseeker.redis.keys("#{chain}:*")
117 | end
118 |
119 | if keys.any?
120 | print " found #{keys.size} keys ..."
121 | dropped = Meeseeker.redis.del(*keys)
122 | puts " dropped #{dropped} keys."
123 | else
124 | puts ' nothing to drop.'
125 | end
126 | end
127 |
128 | namespace :verify do
129 | desc 'Verifies transactions land where they should.'
130 | task :block_org, [:chain, :max_blocks] do |t, args|
131 | chain = args[:chain] if args[:chain]
132 | chain ||= Meeseeker.default_chain_key_prefix
133 | chain_key_prefix = chain.to_s
134 | max_blocks = args[:max_blocks]
135 | node_url = Meeseeker.shuffle_node_url(chain)
136 | database_api = Meeseeker.database_api_class(chain).new(url: node_url)
137 | mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').to_sym
138 | until_block_num = if !!max_blocks
139 | database_api.get_dynamic_global_properties do |dgpo|
140 | raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
141 |
142 | case mode
143 | when :head then dgpo.head_block_number
144 | when :irreversible then dgpo.last_irreversible_block_num
145 | else; abort "Unknown block mode: #{mode}"
146 | end
147 | end + max_blocks.to_i + 1
148 | end
149 |
150 | Thread.new do
151 | job = Meeseeker::BlockFollowerJob.new
152 |
153 | loop do
154 | begin
155 | job.perform(chain: chain, mode: mode, until_block_num: until_block_num)
156 | rescue => e
157 | puts e.inspect
158 | sleep 5
159 | end
160 |
161 | break # success
162 | end
163 |
164 | puts 'Background sync finished ...'
165 | end
166 |
167 | begin
168 | block_api = Meeseeker.block_api_class(chain).new(url: node_url)
169 | block_channel = "#{chain_key_prefix}:block"
170 | redis_url = ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0')
171 | subscription = Redis.new(url: redis_url)
172 | ctx = Redis.new(url: redis_url)
173 | timeout = (max_blocks).to_i * 3
174 |
175 | subscribe_mode, subscribe_args = if timeout > 0
176 | [:subscribe_with_timeout, [timeout, [block_channel]]]
177 | else
178 | [:subscribe, [[block_channel]]]
179 | end
180 |
181 | subscription.send(subscribe_mode, *subscribe_args) do |on|
182 | on.subscribe do |channel, subscriptions|
183 | puts "Subscribed to ##{channel} (subscriptions: #{subscriptions})"
184 | end
185 |
186 | on.message do |channel, message|
187 | payload = JSON[message]
188 | block_num = payload['block_num']
189 | expected_witness = payload['witness']
190 | next_block_num = block_num + 1
191 |
192 | if !!max_blocks
193 | if block_num >= until_block_num
194 | # We're done trailing blocks. Typically, this is used by unit
195 | # tests so the test can halt.
196 |
197 | subscription.unsubscribe
198 | next
199 | end
200 | end
201 |
202 | 5.times do
203 | break unless ctx.keys("#{chain_key_prefix}:#{next_block_num}:*").size == 0
204 |
205 | # This ensures at least the next block has been indexed before
206 | # proceeding.
207 |
208 | puts "Waiting for block (verify:block_org): #{next_block_num} ..."
209 |
210 | sleep 6
211 | end
212 |
213 | if ctx.keys("#{chain_key_prefix}:#{next_block_num}:*").size == 0
214 | puts "Gave up waiting for block (check current_aslot slippage): #{next_block_num}"
215 | end
216 |
217 | database_api.get_dynamic_global_properties do |dgpo|
218 | raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
219 |
220 | (block_num - dgpo.last_irreversible_block_num).tap do |offset|
221 | # This will block all channel callbacks until the first known block
222 | # is irreversible. After that, the offsets should mostly go
223 | # negative.
224 |
225 | sleep offset * 3 if offset > 0
226 | end
227 | end
228 |
229 | # In theory, we should have all the keys using this pattern.
230 | keys = ctx.keys("#{chain_key_prefix}:#{block_num}:*")
231 |
232 | # If we have all the keys, we should also have all transaction ids.
233 | expected_ids = keys.map { |k| k.split(':')[2] }.uniq
234 | expected_ids -= [Meeseeker::VIRTUAL_TRX_ID]
235 |
236 | actual_ids, actual_witness = block_api.get_block(block_num: block_num) do |result|
237 | raise 'Got empty block result.' if result.nil? || result.block.nil?
238 |
239 | block = result.block
240 | [block.transaction_ids, block.witness]
241 | end
242 |
243 | # We do an intersection to make sure there's no difference between
244 | # the two copies, regardless of order, as opposed to just checking that
245 | # the lengths match.
246 |
247 | (actual_ids & expected_ids).tap do |intersection|
248 | all_sizes = [intersection.size, expected_ids.size, actual_ids.size]
249 | puts 'intersection: %d; expected: %d; actual: %d' % all_sizes
250 |
251 | if all_sizes.min != all_sizes.max
252 | puts "Expected witness: #{expected_witness}; actual witness: #{actual_witness}"
253 | puts "Expected transaction ids:"
254 | puts expected_ids
255 | puts "Actual transaction ids:"
256 | puts actual_ids
257 |
258 | puts "actual_ids minus expected:"
259 | puts actual_ids - expected_ids
260 | puts "expected_ids minus actual:"
261 | puts expected_ids - actual_ids
262 |
263 | exit(-1)
264 | end
265 | end
266 | end
267 |
268 | on.unsubscribe do |channel, subscriptions|
269 | puts "Unsubscribed from ##{channel} (subscriptions: #{subscriptions})"
270 | end
271 | end
272 | end
273 | end
274 |
275 | desc 'Verifies Hive Engine transactions land where they should.'
276 | task :hive_engine_block_org, [:max_blocks] do |t, args|
277 | Rake::Task['verify:engine_block_org'].invoke('hive_engine', args[:max_blocks])
278 | end
279 |
280 | desc 'Verifies Steem Engine transactions land where they should.'
281 | task :steem_engine_block_org, [:max_blocks] do |t, args|
282 | Rake::Task['verify:engine_block_org'].invoke('steem_engine', args[:max_blocks])
283 | end
284 |
285 | desc 'Verifies Steem/Hive Engine transactions land where they should.'
286 | task :engine_block_org, [:chain_key_prefix, :max_blocks] do |t, args|
287 | chain_key_prefix = args[:chain_key_prefix]
288 | max_blocks = args[:max_blocks]
289 | case chain_key_prefix.to_sym
290 | when :steem_engine
291 | node_url = ENV.fetch('MEESEEKER_STEEM_ENGINE_NODE_URL', 'https://api.steem-engine.com/rpc')
292 | agent = Meeseeker::SteemEngine::Agent.new(url: node_url)
293 | job = Meeseeker::SteemEngine::FollowerJob.new
294 | when :hive_engine
295 | node_url = ENV.fetch('MEESEEKER_HIVE_ENGINE_NODE_URL', 'https://api.hive-engine.com/rpc')
296 | agent = Meeseeker::HiveEngine::Agent.new(url: node_url)
297 | job = Meeseeker::HiveEngine::FollowerJob.new
298 | end
299 | until_block_num = if !!max_blocks
300 | agent.latest_block_info['blockNumber']
301 | end
302 |
303 | Thread.new do
304 | loop do
305 | begin
306 | at_block_num = agent.latest_block_info["blockNumber"] - max_blocks.to_i
307 | at_block_num = [at_block_num, 1].max
308 | job.perform(at_block_num: at_block_num, until_block_num: until_block_num)
309 | rescue => e
310 | puts e.inspect
311 | sleep 5
312 | end
313 |
314 | break # success
315 | end
316 |
317 | puts 'Background sync finished ...'
318 | end
319 |
320 | begin
321 | block_channel = "#{chain_key_prefix}:block"
322 | redis_url = ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0')
323 | subscription = Redis.new(url: redis_url)
324 | ctx = Redis.new(url: redis_url)
325 | timeout = (max_blocks).to_i * 3
326 |
327 | subscribe_mode, subscribe_args = if timeout > 0
328 | [:subscribe_with_timeout, [timeout, [block_channel]]]
329 | else
330 | [:subscribe, [[block_channel]]]
331 | end
332 |
333 | subscription.send(subscribe_mode, *subscribe_args) do |on|
334 | on.subscribe do |channel, subscriptions|
335 | puts "Subscribed to ##{channel} (subscriptions: #{subscriptions})"
336 | end
337 |
338 | on.message do |channel, message|
339 | payload = JSON[message]
340 | block_num = payload['block_num']
341 | next_block_num = block_num + 1
342 |
343 | if !!max_blocks
344 | if block_num >= until_block_num
345 | # We're done trailing blocks. Typically, this is used by unit
346 | # tests so the test can halt.
347 |
348 | subscription.unsubscribe
349 | next
350 | end
351 | end
352 |
353 | while ctx.keys("#{chain_key_prefix}:#{next_block_num}:*").size == 0
354 | # This ensures at least the next block has been indexed before
355 | # proceeding.
356 |
357 | puts "Waiting for block (verify:#{chain_key_prefix}_engine_block_org): #{next_block_num} ..."
358 | sleep 6
359 | end
360 |
361 | # In theory, we should have all the keys using this pattern.
362 | keys = ctx.keys("#{chain_key_prefix}:#{block_num}:*")
363 |
364 | # If we have all the keys, we should also have all transaction ids.
365 | expected_ids = keys.map { |k| k.split(':')[2] }.uniq
366 | expected_ids -= [Meeseeker::VIRTUAL_TRX_ID]
367 | actual_ids = nil
368 |
369 | agent.block(block_num).tap do |block|
370 | raise 'Got empty block result.' if block.nil?
371 |
372 | actual_ids = block['transactions'].map{|trx| trx['transactionId'].to_s.split('-').first}.uniq
373 | end
374 |
375 | # We do an intersection to make sure there's no difference between
376 | # the two copies, regardless of order, as opposed to just checking that
377 | # the lengths match.
378 |
379 | (actual_ids & expected_ids).tap do |intersection|
380 | all_sizes = [intersection.size, expected_ids.size, actual_ids.size]
381 | puts 'intersection: %d; expected: %d; actual: %d' % all_sizes
382 |
383 | if all_sizes.min != all_sizes.max
384 | puts "Expected transaction ids:"
385 | puts expected_ids
386 | puts "Actual transaction ids:"
387 | puts actual_ids
388 |
389 | puts "actual_ids minus expected:"
390 | puts actual_ids - expected_ids
391 | puts "expected_ids minus actual:"
392 | puts expected_ids - actual_ids
393 |
394 | exit(-1)
395 | end
396 | end
397 | end
398 |
399 | on.unsubscribe do |channel, subscriptions|
400 | puts "Unsubscribed from ##{channel} (subscriptions: #{subscriptions})"
401 | end
402 | end
403 | end
404 |
405 | agent.shutdown
406 | end
407 |
408 | desc 'Verifies Hive Engine sidechain against the mainnet.'
409 | task :hive_engine_ref_blocks do |t|
410 | Rake::Task['verify:engine_ref_blocks'].invoke('hive_engine')
411 | end
412 |
413 | desc 'Verifies Steem Engine sidechain against the mainnet.'
414 | task :steem_engine_ref_blocks do |t|
415 | Rake::Task['verify:engine_ref_blocks'].invoke('steem_engine')
416 | end
417 |
418 | desc 'Verifies Steem/Hive Engine sidechain against the mainnet.'
419 | task :engine_ref_blocks, [:chain_key_prefix] do |t, args|
420 | chain_key_prefix = args[:chain_key_prefix]
421 | redis_url = ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0')
422 | ctx = ctx = Redis.new(url: redis_url)
423 | keys = ctx.keys("#{chain_key_prefix}:*:*")
424 | mainchain, mainchain_url = case chain_key_prefix
425 | when 'steem_engine' then ['steem', 'https://api.steemit.com']
426 | when 'hive_engine' then ['hive', 'https://api.openhive.network']
427 | end
428 | block_api = Meeseeker.block_api_class(mainchain).new(url: mainchain_url)
429 | block_trxs = {}
430 |
431 | puts "Checking #{chain_key_prefix} keys: #{keys.size}"
432 |
433 | keys.each do |key|
434 | transaction = JSON[ctx.get(key)]
435 |
436 | next if transaction.class == Integer
437 |
438 | block_num = case chain_key_prefix
439 | when 'steem_engine' then transaction.fetch('refSteemBlockNumber')
440 | when 'hive_engine' then transaction.fetch('refHiveBlockNumber')
441 | end.to_i
442 |
443 | block_trxs[block_num] ||= []
444 | block_trxs[block_num] << transaction['transactionId'].to_s.split('-').first
445 | end
446 |
447 | puts "Related mainnet blocks: #{block_trxs.keys.size}"
448 |
449 | skipped_blocks = []
450 |
451 | next if block_trxs.empty?
452 |
453 | block_api.get_blocks(block_range: block_trxs.keys) do |block, block_num|
454 | if block.nil? || block[:transaction_ids].nil?
455 | print 'S'
456 | skipped_blocks << block_num
457 |
458 | next
459 | else
460 | print '.'
461 | end
462 |
463 | trx_ids = block_trxs[block_num] - [Meeseeker::VIRTUAL_TRX_ID]
464 |
465 | if trx_ids.any? && (block.transaction_ids & trx_ids).none?
466 | puts "\nNo intersection in #{block_num}!"
467 | puts "Expected the following sidechain trx_ids: #{trx_ids.join(', ')}"
468 | end
469 | end
470 |
471 | puts "\nBlocks to retry: #{skipped_blocks.size}"
472 |
473 | skipped_blocks.each do |block_num|
474 | block_found = false
475 |
476 | block_api.get_block(block_num: block_num) do |result|
477 | break unless !!result.block
478 |
479 | block = result.block
480 | block_found = true
481 | trx_ids = block_trxs[block_num] - [Meeseeker::VIRTUAL_TRX_ID]
482 |
483 | if trx_ids.any? && (block.transaction_ids & trx_ids).none?
484 | puts "No intersection in #{block_num}!"
485 | puts "Expected the following sidechain trx_ids: #{trx_ids.join(', ')}"
486 | end
487 | end
488 |
489 | redo unless block_found
490 | end
491 |
492 | puts "Done."
493 | end
494 |
495 | namespace :witness do
496 | desc 'Verifies witnessses in the schedule produced a block.'
497 | task :schedule, [:chain, :max_blocks] do |t, args|
498 | chain = args[:chain] if !!args[:chain]
499 | chain ||= Meeseeker.default_chain_key_prefix
500 | max_blocks = args[:max_blocks]
501 | node_url = Meeseeker.shuffle_node_url(chain)
502 | database_api = Meeseeker.database_api_class(chain).new(url: node_url)
503 | mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').to_sym
504 | until_block_num = if !!max_blocks
505 | database_api.get_dynamic_global_properties do |dgpo|
506 | raise 'Got empty dynamic_global_properties result.' if dgpo.nil?
507 |
508 | case mode
509 | when :head then dgpo.head_block_number
510 | when :irreversible then dgpo.last_irreversible_block_num
511 | else; abort "Unknown block mode: #{mode}"
512 | end
513 | end + max_blocks.to_i
514 | end
515 |
516 | Thread.new do
517 | job = Meeseeker::WitnessScheduleJob.new
518 |
519 | loop do
520 | begin
521 | job.perform(chain: chain, mode: mode, until_block_num: until_block_num)
522 | rescue => e
523 | puts e.inspect
524 | sleep 5
525 | end
526 |
527 | break # success
528 | end
529 |
530 | puts 'Background sync finished ...'
531 | end
532 |
533 | begin
534 | block_api = Meeseeker.block_api_class(chain).new(url: node_url)
535 | chain_key_prefix = chain.to_s if args[:chain]
536 | chain_key_prefix ||= Meeseeker.default_chain_key_prefix
537 | schedule_channel = "#{chain_key_prefix}:witness:schedule"
538 | redis_url = ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0')
539 | subscription = Redis.new(url: redis_url)
540 | timeout = (max_blocks).to_i * 3
541 |
542 | subscribe_mode, subscribe_args = if timeout > 0
543 | [:subscribe_with_timeout, [timeout, [schedule_channel]]]
544 | else
545 | [:subscribe, [[schedule_channel]]]
546 | end
547 |
548 | # Check if the redis context is still available right before we
549 | # subscribe.
550 | break unless subscription.ping == 'PONG'
551 |
552 | subscription.send(subscribe_mode, *subscribe_args) do |on|
553 | on.subscribe do |channel, subscriptions|
554 | puts "Subscribed to ##{channel} (subscriptions: #{subscriptions})"
555 | end
556 |
557 | on.message do |channel, message|
558 | payload = JSON[message]
559 | next_shuffle_block_num = payload['next_shuffle_block_num']
560 | current_shuffled_witnesses = payload['current_shuffled_witnesses']
561 | num_witnesses = current_shuffled_witnesses.size
562 | from_block_num = next_shuffle_block_num - num_witnesses + 1
563 | to_block_num = from_block_num + num_witnesses - 1
564 | block_range = from_block_num..to_block_num # typically 21 blocks
565 |
566 | if !!max_blocks
567 | if block_range.include? until_block_num
568 | # We're done trailing blocks. Typically, this is used by unit
569 | # tests so the test can halt.
570 |
571 | subscription.unsubscribe
572 | end
573 | end
574 |
575 | begin
576 | # We write witnesses to this hash until all 21 produce blocks.
577 | actual_witnesses = {}
578 | tries = 0
579 |
580 | while actual_witnesses.size != num_witnesses
581 | # Allow the immediate node to catch up in case it's behind by a
582 | # block.
583 | sleep 3
584 |
585 | # Typically, nodes will allow up to 50 block headers in one
586 | # request, if backed by jussi. We only need 21, so each
587 | # request should only make a single response with the entire
588 | # round. Under normal circumstances, this call happens only
589 | # once. But if the there's additional p2p or cache latency,
590 | # it might have missing headers.
591 |
592 | block_api.get_block_headers(block_range: block_range) do |header, block_num|
593 | unless !!header
594 | # Can happen when there's excess p2p latency and/or jussi
595 | # cache is under load.
596 | puts "Waiting for block header (witness:schedule): #{block_num}"
597 |
598 | node_url = Meeseeker.shuffle_node_url(chain)
599 | block_api = Meeseeker.block_api_class(chain).new(url: node_url)
600 |
601 | next
602 | end
603 |
604 | actual_witnesses[header.witness] = block_num
605 | end
606 |
607 | break if (tries += 1) > 5
608 | end
609 |
610 | # If there are multiple tries due to high p2p latency, even though
611 | # we got all 21 block headers, seeing this message could be an
612 | # early-warning of other problems on the blockchain.
613 |
614 | # If there's a missing block header, this will always show 5
615 | # tries.
616 |
617 | puts "Tries: #{tries}" if tries > 1
618 |
619 | missing_witnesses = current_shuffled_witnesses - actual_witnesses.keys
620 | extra_witnesses = actual_witnesses.keys - current_shuffled_witnesses
621 |
622 | if missing_witnesses.any? || extra_witnesses.any?
623 | puts "Expected only these witness to produce a block in #{block_range}."
624 | puts "Missing witnesses: #{missing_witnesses.join(', ')}"
625 | puts "Extra witnesses: #{extra_witnesses.join(', ')}"
626 |
627 | puts "\nWitnesses and block numbers in range:"
628 | actual_witnesses.sort_by{ |k, v| v }.each do |k, v|
629 | puts "#{v}: #{k}"
630 | end
631 | puts "Count: #{actual_witnesses.size}"
632 |
633 | # Non-zero exit to notify the shell caller that there's a
634 | # problem.
635 |
636 | exit(-(missing_witnesses.size + extra_witnesses.size))
637 | end
638 | end
639 |
640 | # Perfect round.
641 |
642 | puts "Found all #{num_witnesses} expected witnesses in block range #{block_range}: √"
643 | end
644 |
645 | on.unsubscribe do |channel, subscriptions|
646 | puts "Unsubscribed from ##{channel} (subscriptions: #{subscriptions})"
647 | end
648 | end
649 | end
650 | end
651 | end
652 | end
653 |
--------------------------------------------------------------------------------
/bin/meeseeker:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 |
3 | require 'rake'
4 | require 'pp'
5 | require 'meeseeker'
6 |
7 | gem_dir = File.expand_path("..", File.dirname(__FILE__))
8 | $LOAD_PATH.unshift gem_dir
9 |
10 | pwd = Dir.pwd
11 | Dir.chdir(gem_dir)
12 | Rake.application.init
13 | Rake.application.load_rakefile
14 | Dir.chdir(pwd)
15 |
16 | puts "meeseeker-#{Meeseeker::VERSION}"
17 | filename = __FILE__.split('/').last
18 |
19 | case ARGV[0]
20 | when 'console' then Rake::Task['console'].invoke
21 | when 'sync', 'witness:schedule'
22 | backoff = 0.01
23 | max_backoff = 30
24 |
25 | loop do; begin
26 | Rake::Task[ARGV[0]].invoke(*ARGV[1..-1])
27 | rescue => e
28 | puts "Error: #{e.inspect}"
29 | backoff = [backoff, max_backoff].min
30 | sleep backoff *= 2
31 | puts "Retrying ..."
32 | Rake::Task['sync'].reenable
33 | end; end
34 | when 'find' then Rake::Task['find'].invoke(*ARGV[1..-1])
35 | when 'reset' then Rake::Task['reset'].invoke
36 | else
37 | puts "\nBegin/resume sync:"
38 | puts "\t#{filename} sync [chain] [block_num]\n\n"
39 | puts "Publish witness schedule:"
40 | puts "\t#{filename} witness:schedule\n\n"
41 | puts "Start in the ruby console:"
42 | puts "\t#{filename} console\n\n"
43 | puts 'Find block or transaction:'
44 | puts "\t#{filename} find block 3044538"
45 | puts "\t#{filename} find trx 983c5e5c6aef52f1647d952a18771f76b885e6de\n\n"
46 | puts 'Clear all keys:'
47 | puts "\t#{filename} reset\n\n"
48 | puts "Note, using find and reset is not intended for routine operations. It is intended for analysis only."
49 | puts "See: https://github.com/inertia186/meeseeker#usage"
50 | end
51 |
--------------------------------------------------------------------------------
/lib/meeseeker.rb:
--------------------------------------------------------------------------------
1 | require 'redis'
2 | require 'steem'
3 | require 'hive'
4 |
5 | require 'meeseeker/version'
6 | require 'meeseeker/block_follower_job'
7 | require 'meeseeker/witness_schedule_job'
8 | require 'meeseeker/steem_engine/agent'
9 | require 'meeseeker/steem_engine/follower_job'
10 | require 'meeseeker/hive_engine'
11 |
12 | module Meeseeker
13 | STEEM_CHAIN_ID = '0000000000000000000000000000000000000000000000000000000000000000'
14 | HIVE_LEGACY_CHAIN_ID = '0000000000000000000000000000000000000000000000000000000000000000'
15 | HIVE_CHAIN_ID = 'beeab0de00000000000000000000000000000000000000000000000000000000'
16 | STEEM_CHAIN_KEY_PREFIX = 'steem'
17 | HIVE_CHAIN_KEY_PREFIX = 'hive'
18 | STEEM_ENGINE_CHAIN_KEY_PREFIX = 'steem_engine'
19 | HIVE_ENGINE_CHAIN_KEY_PREFIX = 'hive_engine'
20 | LAST_BLOCK_NUM_KEY_SUFFIX = ':meeseeker:last_block_num'
21 | LAST_STEEM_ENGINE_BLOCK_NUM_KEY_SUFFIX = ':meeseeker:last_block_num'
22 | BLOCKS_PER_DAY = 28800
23 | VIRTUAL_TRX_ID = '0000000000000000000000000000000000000000'
24 | BLOCK_INTERVAL = 3
25 | SHUFFLE_URL = 'shuffle'
26 | DEFAULT_STEEM_URL = 'https://api.steemit.com'
27 | DEFAULT_STEEM_FAILOVER_URLS = [
28 | DEFAULT_STEEM_URL,
29 | # 'https://steemd.minnowsupportproject.org',
30 | # 'https://anyx.io',
31 | # 'http://anyx.io',
32 | # 'https://steemd.privex.io',
33 | # 'https://api.steem.house'
34 | ]
35 | DEFAULT_HIVE_URL = 'https://api.openhive.network'
36 | DEFAULT_HIVE_FAILOVER_URLS = [
37 | DEFAULT_HIVE_URL,
38 | 'https://api.hivekings.com',
39 | 'https://anyx.io',
40 | 'http://anyx.io',
41 | 'https://techcoderx.com',
42 | 'https://rpc.esteem.app',
43 | 'https://hived.privex.io',
44 | 'https://api.pharesim.me',
45 | 'https://api.hive.blog',
46 | 'https://rpc.ausbit.dev'
47 | ]
48 |
49 | def default_chain_key_prefix
50 | ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', chain_key_prefix)
51 | end
52 |
53 | def self.chain_key_prefix
54 | @chain_key_prefix ||= {}
55 | url = default_url(HIVE_CHAIN_KEY_PREFIX)
56 |
57 | return @chain_key_prefix[url] if !!@chain_key_prefix[url]
58 |
59 | # Just use the Hive API for either chain, until we know which one we're
60 | # using.
61 | api = Hive::DatabaseApi.new(url: url)
62 |
63 | api.get_config do |config|
64 | @chain_key_prefix[node_url] = if !!config.HIVE_CHAIN_ID && config.HIVE_CHAIN_ID == HIVE_CHAIN_ID
65 | HIVE_CHAIN_KEY_PREFIX
66 | elsif !!config.HIVE_CHAIN_ID && config.HIVE_CHAIN_ID == HIVE_LEGACY_CHAIN_ID
67 | HIVE_CHAIN_KEY_PREFIX
68 | elsif !!config.STEEM_CHAIN_ID && config.STEEM_CHAIN_ID == STEEM_CHAIN_ID
69 | STEEM_CHAIN_KEY_PREFIX
70 | else
71 | config.keys.find{|k| k.end_with? '_CHAIN_ID'}.split('_').first.downcase.tap do |guess|
72 | warn "Guessing chain_key_prefix = '#{guess}' for unknown chain on: #{node_url}"
73 | end
74 | end
75 | end
76 | end
77 |
78 | def self.default_url(chain = default_chain_key_prefix)
79 | ENV.fetch('MEESEEKER_NODE_URL') do
80 | case chain.to_s
81 | when STEEM_CHAIN_KEY_PREFIX then DEFAULT_STEEM_URL
82 | when HIVE_CHAIN_KEY_PREFIX then DEFAULT_HIVE_URL
83 | else
84 | raise "Unknown chain: #{chain}"
85 | end
86 | end
87 | end
88 |
89 | @problem_node_urls = []
90 |
91 | @redis = Redis.new(url: ENV.fetch('MEESEEKER_REDIS_URL', 'redis://127.0.0.1:6379/0'))
92 | @node_url = default_url(ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
93 | @steem_engine_node_url = ENV.fetch('MEESEEKER_STEEM_ENGINE_NODE_URL', 'https://api.steem-engine.com/rpc')
94 | @hive_engine_node_url = ENV.fetch('MEESEEKER_HIVE_ENGINE_NODE_URL', 'https://api.hive-engine.com/rpc')
95 | @stream_mode = ENV.fetch('MEESEEKER_STREAM_MODE', 'head').downcase.to_sym
96 | @include_virtual = ENV.fetch('MEESEEKER_INCLUDE_VIRTUAL', 'true').downcase == 'true'
97 | @include_block_header = ENV.fetch('MEESEEKER_INCLUDE_BLOCK_HEADER', 'true').downcase == 'true'
98 | @publish_op_custom_id = ENV.fetch('MEESEEKER_PUBLISH_OP_CUSTOM_ID', 'false').downcase == 'true'
99 | @expire_keys = ENV.fetch('MEESEEKER_EXPIRE_KEYS', BLOCKS_PER_DAY * BLOCK_INTERVAL).to_i
100 | @max_keys = ENV.fetch('MEESEEKER_MAX_KEYS', '-1').to_i
101 |
102 | extend self
103 |
104 | attr_accessor :redis, :node_url, :steem_engine_node_url,
105 | :hive_engine_node_url, :expire_keys, :max_keys, :stream_mode,
106 | :include_virtual, :include_block_header, :publish_op_custom_id
107 |
108 | def self.shuffle_node_url(chain = ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
109 | chain = chain.to_s
110 | node_url = ENV.fetch('MEESEEKER_NODE_URL', default_url(ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', chain)))
111 | return node_url unless node_url == SHUFFLE_URL
112 |
113 | @problem_node_urls = [] if rand(1..1000) == 13
114 | shuffle_node_url!(chain)
115 | end
116 |
117 | def self.api_class(chain = default_chain_key_prefix)
118 | case chain.to_s
119 | when STEEM_CHAIN_KEY_PREFIX then Steem::Api
120 | when HIVE_CHAIN_KEY_PREFIX then Hive::Api
121 | else
122 | raise "Unknown chain: #{chain}"
123 | end
124 | end
125 |
126 | def self.condenser_api_class(chain = default_chain_key_prefix)
127 | case chain.to_s
128 | when STEEM_CHAIN_KEY_PREFIX then Steem::CondenserApi
129 | when HIVE_CHAIN_KEY_PREFIX then Hive::CondenserApi
130 | else
131 | raise "Unknown chain: #{chain}"
132 | end
133 | end
134 |
135 | def self.block_api_class(chain = default_chain_key_prefix)
136 | case chain.to_s
137 | when STEEM_CHAIN_KEY_PREFIX then Steem::BlockApi
138 | when HIVE_CHAIN_KEY_PREFIX then Hive::BlockApi
139 | else
140 | raise "Unknown chain: #{chain}"
141 | end
142 | end
143 |
144 | def self.database_api_class(chain = default_chain_key_prefix)
145 | case chain.to_s
146 | when STEEM_CHAIN_KEY_PREFIX then Steem::DatabaseApi
147 | when HIVE_CHAIN_KEY_PREFIX then Hive::DatabaseApi
148 | else
149 | raise "Unknown chain: #{chain}"
150 | end
151 | end
152 |
153 | def self.stream_class(chain = default_chain_key_prefix)
154 | case chain.to_s
155 | when STEEM_CHAIN_KEY_PREFIX then Steem::Stream
156 | when HIVE_CHAIN_KEY_PREFIX then Hive::Stream
157 | else
158 | raise "Unknown chain: #{chain}"
159 | end
160 | end
161 |
162 | def self.shuffle_node_url!(chain = ENV.fetch('MEESEEKER_CHAIN_KEY_PREFIX', HIVE_CHAIN_KEY_PREFIX))
163 | chain = chain.to_s
164 | failover_urls = case chain
165 | when STEEM_CHAIN_KEY_PREFIX then DEFAULT_STEEM_FAILOVER_URLS - @problem_node_urls
166 | when HIVE_CHAIN_KEY_PREFIX then DEFAULT_HIVE_FAILOVER_URLS - @problem_node_urls
167 | else; []
168 | end
169 | url = failover_urls.sample
170 | api = api_class(chain).new(url: url)
171 |
172 | api.get_accounts(['fullnodeupdate']) do |accounts|
173 | fullnodeupdate = accounts.first
174 | metadata = (JSON[fullnodeupdate.json_metadata] rescue nil) || {}
175 |
176 | nodes = metadata.fetch('report', []).map do |report|
177 | next if chain == HIVE_CHAIN_KEY_PREFIX && !report[HIVE_CHAIN_KEY_PREFIX]
178 | next if chain != HIVE_CHAIN_KEY_PREFIX && !!report[HIVE_CHAIN_KEY_PREFIX]
179 |
180 | report['node']
181 | end.compact.uniq
182 |
183 | nodes -= @problem_node_urls
184 |
185 | if nodes.any?
186 | nodes.sample
187 | else
188 | @node_url = failover_urls.sample
189 | end
190 | end
191 | rescue => e
192 | puts "#{url}: #{e}"
193 |
194 | @problem_node_urls << url
195 | failover_urls -= @problem_node_urls
196 | failover_urls.sample
197 | end
198 |
199 | shuffle_node_url! if @node_url == SHUFFLE_URL
200 | end
201 |
--------------------------------------------------------------------------------
/lib/meeseeker/block_follower_job.rb:
--------------------------------------------------------------------------------
1 | module Meeseeker
2 | class BlockFollowerJob
3 | MAX_VOP_RETRY = 3
4 |
5 | def perform(options = {})
6 | chain = (options[:chain] || 'hive').to_sym
7 | url = Meeseeker.default_url(chain)
8 | block_api = Meeseeker.block_api_class(chain).new(url: url)
9 | redis = Meeseeker.redis
10 | last_key_prefix = nil
11 | trx_index = 0
12 | current_block_num = nil
13 | block_transactions = []
14 | chain_key_prefix = chain.to_s if !!options[:chain]
15 | chain_key_prefix ||= Meeseeker.default_chain_key_prefix
16 |
17 | stream_operations(options) do |op, trx_id, block_num|
18 | begin
19 | current_key_prefix = "#{chain_key_prefix}:#{block_num}:#{trx_id}"
20 |
21 | if current_key_prefix == last_key_prefix
22 | trx_index += 1
23 | else
24 | if !!last_key_prefix
25 | _, b, t = last_key_prefix.split(':')
26 | transaction_payload = {
27 | block_num: b.to_i,
28 | transaction_id: t,
29 | transaction_num: block_transactions.size
30 | }
31 |
32 | block_transactions << trx_id unless trx_id == VIRTUAL_TRX_ID
33 | redis.publish("#{chain_key_prefix}:transaction", transaction_payload.to_json)
34 | end
35 | last_key_prefix = "#{chain_key_prefix}:#{block_num}:#{trx_id}"
36 | trx_index = 0
37 | end
38 |
39 | op_type = if op.type.end_with? '_operation'
40 | op.type.split('_')[0..-2].join('_')
41 | else
42 | op.type
43 | end
44 |
45 | key = "#{current_key_prefix}:#{trx_index}:#{op_type}"
46 | puts key
47 | end
48 |
49 | unless Meeseeker.max_keys == -1
50 | while redis.keys("#{chain_key_prefix}:*").size > Meeseeker.max_keys
51 | sleep Meeseeker::BLOCK_INTERVAL
52 | end
53 | end
54 |
55 | redis.set(key, op.to_json)
56 | redis.expire(key, Meeseeker.expire_keys) unless Meeseeker.expire_keys == -1
57 |
58 | if current_block_num != block_num
59 | block_transactions = []
60 | block_payload = {
61 | block_num: block_num
62 | }
63 |
64 | if Meeseeker.include_block_header
65 | catch :block_header do
66 | block_api.get_block_header(block_num: block_num) do |result|
67 | if result.nil? || result.header.nil?
68 | puts "Node returned empty result for block_header on block_num: #{block_num} (rate limiting?). Retrying ..."
69 | sleep Meeseeker::BLOCK_INTERVAL
70 | throw :block_header
71 | end
72 |
73 | block_payload.merge!(result.header.to_h)
74 | end
75 | end
76 | end
77 |
78 | redis.set(chain_key_prefix + LAST_BLOCK_NUM_KEY_SUFFIX, block_num)
79 | redis.publish("#{chain_key_prefix}:block", block_payload.to_json)
80 | current_block_num = block_num
81 | end
82 |
83 | redis.publish("#{chain_key_prefix}:op:#{op_type}", {key: key}.to_json)
84 |
85 | if Meeseeker.publish_op_custom_id
86 | if %w(custom custom_binary custom_json).include? op_type
87 | id = (op["value"]["id"] rescue nil).to_s
88 |
89 | if id.size > 0
90 | redis.publish("#{chain_key_prefix}:op:#{op_type}:#{id}", {key: key}.to_json)
91 | end
92 | end
93 | end
94 | end
95 | end
96 | private
97 | def stream_operations(options = {}, &block)
98 | chain = (options[:chain] || 'hive').to_sym
99 | redis = Meeseeker.redis
100 | chain_key_prefix = chain.to_s if !!options[:chain]
101 | chain_key_prefix ||= Meeseeker.chain_key_prefix
102 | last_block_num = nil
103 | mode = options.delete(:mode) || Meeseeker.stream_mode
104 | options[:include_virtual] ||= Meeseeker.include_virtual
105 |
106 | if !!options[:at_block_num]
107 | last_block_num = options[:at_block_num].to_i
108 | else
109 | url = Meeseeker.default_url(chain)
110 | database_api = Meeseeker.database_api_class(chain).new(url: url)
111 | last_block_num = redis.get(chain_key_prefix + LAST_BLOCK_NUM_KEY_SUFFIX).to_i + 1
112 |
113 | block_num = catch :dynamic_global_properties do
114 | database_api.get_dynamic_global_properties do |dgpo|
115 | throw :dynamic_global_properties if dgpo.nil?
116 |
117 | case mode
118 | when :head then dgpo.head_block_number
119 | when :irreversible then dgpo.last_irreversible_block_num
120 | else; abort "Unknown stream mode: #{mode}"
121 | end
122 | end
123 | end
124 |
125 | if Meeseeker.expire_keys == -1
126 | last_block_num = [last_block_num, block_num].max
127 |
128 | puts "Sync from: #{last_block_num}"
129 | elsif block_num - last_block_num > Meeseeker.expire_keys / 3
130 | last_block_num = block_num
131 |
132 | puts 'Starting new sync.'
133 | else
134 | behind_sec = block_num - last_block_num
135 | behind_sec *= 3.0
136 |
137 | puts "Resuming from #{behind_sec / 60} minutes ago ..."
138 | end
139 | end
140 |
141 | begin
142 | url = Meeseeker.default_url(chain)
143 | stream_options = {url: url, mode: mode}
144 | options = options.merge(at_block_num: last_block_num)
145 | condenser_api = nil
146 |
147 | Meeseeker.stream_class.new(stream_options).tap do |stream|
148 | puts "Stream begin: #{stream_options.to_json}; #{options.to_json}"
149 |
150 | # Prior to v0.0.4, we only streamed operations with stream.operations.
151 |
152 | # After v0.0.5, we stream blocks so that we can get block.timestamp,
153 | # to embed it into op values. This should also reduce streaming
154 | # overhead since we no longer stream block_headers inder the hood.
155 |
156 | loop do
157 | begin
158 | stream.blocks(options) do |b, n|
159 | redo if b.nil?
160 |
161 | b.transactions.each_with_index do |transaction, index|
162 | transaction.operations.each do |op|
163 | op = op.merge(timestamp: b.timestamp)
164 |
165 | yield op, b.transaction_ids[index], n
166 | end
167 | end
168 |
169 | next unless !!Meeseeker.include_virtual
170 |
171 | retries = 0
172 |
173 | # This is where it gets tricky. Virtual ops sometims don't show up
174 | # right away, especially if we're streaming on head blocks. In that
175 | # situation, we might only need to wait about 1 block. This loop
176 | # will likely one execute one iteration, but we have fallback logic
177 | # in case there are complications.
178 | #
179 | # See: https://developers.steem.io/tutorials-recipes/virtual-operations-when-streaming-blockchain-transactions
180 |
181 | loop do
182 | # TODO (HF23) Switch to account_history_api.enum_virtual_ops if supported.
183 | url = Meeseeker.default_url(chain)
184 | condenser_api ||= Meeseeker.condenser_api_class(chain).new(url: url)
185 | condenser_api.get_ops_in_block(n, true) do |vops|
186 | if vops.nil?
187 | puts "Node returned empty result for get_ops_in_block on block_num: #{n} (rate limiting?). Retrying ..."
188 | vops = []
189 | end
190 |
191 | if vops.empty? && mode != :head
192 | # Usually, we just need to slow down to allow virtual ops to
193 | # show up after a short delay. Adding this delay doesn't
194 | # impact overall performance because steem-ruby will batch
195 | # when block streams fall behind.
196 |
197 | if retries < MAX_VOP_RETRY
198 | retries = retries + 1
199 | condenser_api = nil
200 | sleep Meeseeker::BLOCK_INTERVAL * retries
201 |
202 | redo
203 | end
204 |
205 | puts "Gave up retrying virtual ops lookup on block #{n}"
206 |
207 | break
208 | end
209 |
210 | if retries > 0
211 | puts "Found virtual ops for block #{n} aftere #{retries} retrie(s)"
212 | end
213 |
214 | vops.each do |vop|
215 | normalized_op = Hashie::Mash.new(
216 | type: vop.op[0],
217 | value: vop.op[1],
218 | timestamp: vop.timestamp
219 | )
220 |
221 | yield normalized_op, vop.trx_id, vop.block
222 | end
223 | end
224 |
225 | break
226 | end
227 | end
228 |
229 | break
230 | rescue => e
231 | raise e unless e.to_s.include? 'Request Entity Too Large'
232 |
233 | # We need to tell steem-ruby to avoid json-rpc-batch on this
234 | # node.
235 |
236 | Meeseeker.block_api_class(chain).const_set 'MAX_RANGE_SIZE', 1
237 | sleep Meeseeker::BLOCK_INTERVAL
238 | redo
239 | end
240 | end
241 | end
242 | end
243 | end
244 | end
245 | end
246 |
--------------------------------------------------------------------------------
/lib/meeseeker/hive_engine.rb:
--------------------------------------------------------------------------------
1 | module Meeseeker::HiveEngine
2 |
3 | class Agent < Meeseeker::SteemEngine::Agent
4 | def initialize(options = {})
5 | super
6 |
7 | self.user_agent = Meeseeker::AGENT_ID
8 | self.max_history = 0
9 | self.default_encoding = 'UTF-8'
10 |
11 | @node_url = options[:url] || Meeseeker::hive_engine_node_url
12 | end
13 | end
14 |
15 | class FollowerJob < Meeseeker::SteemEngine::FollowerJob
16 | def initialize(options = {})
17 | @chain_key_prefix = options[:chain_key_prefix] || Meeseeker::HIVE_ENGINE_CHAIN_KEY_PREFIX
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/lib/meeseeker/steem_engine/agent.rb:
--------------------------------------------------------------------------------
1 | require 'mechanize'
2 |
3 | module Meeseeker::SteemEngine
4 | class Agent < Mechanize
5 | POST_HEADERS = {
6 | 'Content-Type' => 'application/json; charset=utf-8',
7 | 'User-Agent' => Meeseeker::AGENT_ID
8 | }
9 |
10 | def initialize(options = {})
11 | super
12 |
13 | self.user_agent = Meeseeker::AGENT_ID
14 | self.max_history = 0
15 | self.default_encoding = 'UTF-8'
16 |
17 | @node_url = options[:url] || Meeseeker::steem_engine_node_url
18 | end
19 |
20 | def blockchain_uri
21 | @blockchain_uri ||= URI.parse(@node_url + '/blockchain')
22 | end
23 |
24 | def blockchain_http_post
25 | @http_post ||= Net::HTTP::Post.new(blockchain_uri.request_uri, POST_HEADERS)
26 | end
27 |
28 | def latest_block_info
29 | 5.times do
30 | request_body = {
31 | jsonrpc: "2.0",
32 | method: :getLatestBlockInfo,
33 | id: rpc_id
34 | }.to_json
35 |
36 | response = request_with_entity :post, blockchain_uri, request_body, POST_HEADERS
37 | latest_block_info = JSON[response.body]["result"]
38 |
39 | return latest_block_info if !!latest_block_info
40 |
41 | sleep 3
42 | end
43 |
44 | return nil
45 | end
46 |
47 | def block(block_num)
48 | 5.times do
49 | request_body = {
50 | jsonrpc: "2.0",
51 | method: :getBlockInfo,
52 | params: {
53 | blockNumber: block_num.to_i
54 | },
55 | id: rpc_id
56 | }.to_json
57 |
58 | response = request_with_entity :post, blockchain_uri, request_body, POST_HEADERS
59 | block = JSON[response.body]["result"]
60 |
61 | return block if !!block
62 |
63 | sleep 3
64 | end
65 |
66 | return nil
67 | end
68 | private
69 | def rpc_id
70 | @rpc_id ||= 0
71 | @rpc_id = @rpc_id + 1
72 | end
73 | end
74 | end
75 |
--------------------------------------------------------------------------------
/lib/meeseeker/steem_engine/follower_job.rb:
--------------------------------------------------------------------------------
1 | module Meeseeker::SteemEngine
2 | MAX_RETRY_INTERVAL = 18.0
3 |
4 | class FollowerJob
5 | def initialize(options = {})
6 | @chain_key_prefix = options[:chain_key_prefix] || Meeseeker::STEEM_ENGINE_CHAIN_KEY_PREFIX
7 | end
8 |
9 | def chain_name
10 | @chain_key_prefix.split('_').map(&:capitalize).join(' ')
11 | end
12 |
13 | def perform(options = {})
14 | redis = Meeseeker.redis
15 | last_key_prefix = nil
16 | trx_index = 0
17 | current_block_num = nil
18 | block_transactions = []
19 |
20 | stream_transactions(options) do |data, block|
21 | transaction = data[:transaction]
22 | virtual = !!data[:virtual]
23 |
24 | begin
25 | trx_id = transaction['transactionId'].to_s.split('-').first
26 | block_num = block['blockNumber']
27 | current_key_prefix = "#{@chain_key_prefix}:#{block_num}:#{trx_id}"
28 | contract = transaction['contract']
29 | action = transaction['action']
30 |
31 | if current_key_prefix == last_key_prefix
32 | trx_index += 1
33 | else
34 | if !!last_key_prefix
35 | _, b, t = last_key_prefix.split(':')
36 | transaction_payload = {
37 | block_num: b.to_i,
38 | transaction_id: t,
39 | transaction_num: block_transactions.size
40 | }
41 |
42 | block_transactions << trx_id
43 |
44 | trx_pub_key = if !!virtual
45 | "#{@chain_key_prefix}:virtual_transaction"
46 | else
47 | "#{@chain_key_prefix}:transaction"
48 | end
49 |
50 | redis.publish(trx_pub_key, transaction_payload.to_json)
51 | end
52 |
53 | last_key_prefix = "#{@chain_key_prefix}:#{block_num}:#{trx_id}"
54 | trx_index = 0
55 | end
56 |
57 | key = "#{current_key_prefix}:#{trx_index}:#{contract}:#{action}"
58 | puts key
59 | end
60 |
61 | unless Meeseeker.max_keys == -1
62 | while redis.keys("#{@chain_key_prefix}:*").size > Meeseeker.max_keys
63 | sleep Meeseeker::BLOCK_INTERVAL
64 | end
65 | end
66 |
67 | redis.set(key, transaction.to_json)
68 | redis.expire(key, Meeseeker.expire_keys) unless Meeseeker.expire_keys == -1
69 |
70 | if current_block_num != block_num
71 | block_transactions = []
72 | block_payload = {
73 | block_num: block_num
74 | }
75 |
76 | redis.set(@chain_key_prefix + Meeseeker::LAST_STEEM_ENGINE_BLOCK_NUM_KEY_SUFFIX, block_num)
77 | redis.publish("#{@chain_key_prefix}:block", block_payload.to_json)
78 | current_block_num = block_num
79 | end
80 |
81 | redis.publish("#{@chain_key_prefix}:#{contract}", {key: key}.to_json)
82 | redis.publish("#{@chain_key_prefix}:#{contract}:#{action}", {key: key}.to_json)
83 | end
84 | end
85 | private
86 | def agent
87 | @agent ||= case @chain_key_prefix
88 | when 'steem_engine' then Agent.new
89 | when 'hive_engine' then Meeseeker::HiveEngine::Agent.new
90 | end
91 | end
92 |
93 | def agent_reset
94 | return if @agent.nil?
95 |
96 | @agent.shutdown
97 | @agent = nil
98 | end
99 |
100 | def retry_interval
101 | @retry_interval ||= 0.1
102 | @retry_interval *= 2
103 |
104 | [@retry_interval, MAX_RETRY_INTERVAL].min
105 | end
106 |
107 | def reset_retry_interval
108 | @retry_interval = nil
109 | end
110 |
111 | def stream_transactions(options = {}, &block)
112 | redis = Meeseeker.redis
113 | last_block_num = nil
114 | until_block_num = options[:until_block_num].to_i
115 |
116 | if !!options[:at_block_num]
117 | last_block_num = options[:at_block_num].to_i
118 | else
119 | new_sync = false
120 | last_block_num = redis.get(@chain_key_prefix + Meeseeker::LAST_STEEM_ENGINE_BLOCK_NUM_KEY_SUFFIX)
121 | block_info = agent.latest_block_info
122 | block_num = block_info['blockNumber']
123 | last_block = agent.block(block_num)
124 | last_block_timestamp = Time.parse(last_block['timestamp'] + 'Z')
125 |
126 | if last_block_num.nil?
127 | new_sync = true
128 | last_block_num = block_num
129 | else
130 | last_block_num = last_block_num.to_i + 1
131 | end
132 |
133 | if Meeseeker.expire_keys == -1
134 | last_block_num = [last_block_num, block_num].max
135 |
136 | puts "Sync #{chain_name} from: #{last_block_num}"
137 | elsif new_sync || (Time.now.utc - last_block_timestamp > Meeseeker.expire_keys)
138 | last_block_num = block_num + 1
139 |
140 | puts "Starting new #{chain_name} sync."
141 | else
142 | puts "Resuming from #{chain_name} block #{last_block_num} ..."
143 | end
144 | end
145 |
146 | block_num = last_block_num
147 |
148 | loop do
149 | begin
150 | block = agent.block(block_num)
151 | reset_retry_interval
152 | rescue Net::HTTP::Persistent::Error => e
153 | puts "Retrying: #{e}"
154 | agent_reset
155 | sleep retry_interval
156 | redo
157 | end
158 |
159 | if block.nil?
160 | sleep Meeseeker::BLOCK_INTERVAL
161 | redo
162 | end
163 |
164 | transactions = block['transactions']
165 |
166 | transactions.each do |transaction|
167 | yield({transaction: transaction.merge(timestamp: block['timestamp'])}, block)
168 | end
169 |
170 | virtual_transactions = block['virtualTransactions']
171 |
172 | virtual_transactions.each do |virtual_transaction|
173 | _, vtrx_in_block = virtual_transaction['transactionId'].split('-')
174 | virtual_transaction = virtual_transaction.merge(
175 | timestamp: block['timestamp'],
176 | 'transactionId' => "#{Meeseeker::VIRTUAL_TRX_ID}-#{vtrx_in_block}"
177 | )
178 |
179 | yield({transaction: virtual_transaction, virtual: true}, block)
180 | end
181 |
182 | break if until_block_num != 0 && block_num > until_block_num
183 |
184 | block_num = block_num + 1
185 | end
186 | end
187 | end
188 | end
189 |
--------------------------------------------------------------------------------
/lib/meeseeker/version.rb:
--------------------------------------------------------------------------------
1 | module Meeseeker
2 | VERSION = '2.0.0'
3 | AGENT_ID = "meeseeker/#{VERSION}"
4 | end
5 |
--------------------------------------------------------------------------------
/lib/meeseeker/witness_schedule_job.rb:
--------------------------------------------------------------------------------
1 | module Meeseeker
2 | class WitnessScheduleJob
3 | def perform(options = {})
4 | chain = (options[:chain] || 'hive').to_sym
5 | chain_key_prefix = chain.to_s if !!options[:chain]
6 | chain_key_prefix ||= Meeseeker.default_chain_key_prefix
7 | url = Meeseeker.default_url(chain_key_prefix)
8 | database_api = Meeseeker.database_api_class(chain_key_prefix).new(url: url)
9 | redis = Meeseeker.redis
10 | mode = options.delete(:mode) || Meeseeker.stream_mode
11 | schedule = nil
12 | last_shuffle_block_num = nil
13 |
14 | loop do
15 | # Using hammer assignment will ensure we only request a new schedule
16 | # after we've published.
17 |
18 | schedule ||= catch :witness_schedule do
19 | database_api.get_witness_schedule do |result|
20 | throw :witness_schedule if result.nil?
21 |
22 | result
23 | end
24 | end
25 |
26 | next_shuffle_block_num = schedule.next_shuffle_block_num
27 | block_num = catch :dynamic_global_properties do
28 | database_api.get_dynamic_global_properties do |dgpo|
29 | throw :dynamic_global_properties if dgpo.nil?
30 |
31 | case mode
32 | when :head then dgpo.head_block_number
33 | when :irreversible then dgpo.last_irreversible_block_num
34 | else; abort "Unknown stream mode: #{mode}"
35 | end
36 | end
37 | end
38 |
39 | # Find out how far away we are from the next schedule.
40 |
41 | remaining_blocks = [next_shuffle_block_num - block_num - 1.5, 0].max
42 |
43 | # It's better for the schedule to publish a little late than to miss
44 | # an entire schedule, so we subtract 1.5 blocks from the total.
45 | # Sometimes we check a little early and sometimes we check a little
46 | # late. But it all averages out.
47 |
48 | if remaining_blocks > 0
49 | delay = [remaining_blocks * 3.0, 0.25].max
50 | puts "Sleeping for #{delay} seconds (remaining blocks: #{remaining_blocks})."
51 | sleep delay
52 | next
53 | end
54 |
55 | # Now that we've reached the current schedule, check if we've published
56 | # it already. If not, publish and reset for the next schedule.
57 |
58 | if next_shuffle_block_num != last_shuffle_block_num
59 | puts "next_shuffle_block_num: #{next_shuffle_block_num}; current_shuffled_witnesses: #{schedule.current_shuffled_witnesses.join(', ')}"
60 | redis.publish("#{chain_key_prefix}:witness:schedule", schedule.to_json)
61 | last_shuffle_block_num = next_shuffle_block_num
62 | end
63 |
64 | schedule = nil # re-enabled hammer assignment
65 |
66 | if !!options[:until_block_num]
67 | break if block_num >= options[:until_block_num].to_i
68 | end
69 | end
70 | end
71 | end
72 | end
73 |
--------------------------------------------------------------------------------
/meeseeker.gemspec:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | lib = File.expand_path('../lib', __FILE__)
3 | $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4 | require 'meeseeker/version'
5 |
6 | Gem::Specification.new do |s|
7 | s.name = 'meeseeker'
8 | s.version = Meeseeker::VERSION
9 | s.licenses = 'CC0-1.0'
10 | s.summary = 'Redis based block follower is an efficient way for multiple apps to stream the Steem Blockchain.'
11 | s.description = 'If you have multiple applications that need to perform actions as operations occur, `meeseeker` will allow your apps to each perform actions for specific operations without each app having to streaming the entire blockchain.'
12 | s.authors = ['Anthony Martin']
13 | s.email = ['meeseeker@martin-studio.com,']
14 | s.files = Dir['bin/**/*', 'lib/**/*', 'test/**/*', 'Gemfile', 'LICENSE', 'Rakefile', 'README.md', 'meeseeker.gemspec']
15 | s.test_files = Dir['test/**/*']
16 | s.executables = Dir['bin/*'].map{ |f| File.basename(f) }
17 | s.homepage = 'https://rubygems.org/gems/meeseeker'
18 | s.metadata = { 'source_code_uri' => 'https://github.com/inertia186/meeseeker' }
19 | s.bindir = 'bin'
20 | s.executables = 'meeseeker'
21 |
22 | # Ruby Make (interprets the Rakefile DSL).
23 | s.add_development_dependency 'rake', '~> 12.3', '>= 12.3.1'
24 | s.add_development_dependency 'minitest', '~> 5.10', '>= 5.10.3'
25 | s.add_development_dependency 'minitest-line', '~> 0.6', '>= 0.6.4'
26 | s.add_development_dependency 'minitest-proveit', '~> 1.0', '>= 1.0.0'
27 | s.add_development_dependency 'simplecov', '~> 0.15', '>= 0.15.1'
28 | s.add_development_dependency 'pry', '~> 0.11', '>= 0.11.3'
29 | s.add_development_dependency 'irb', '~> 1.0', '>= 1.0.0'
30 | s.add_development_dependency 'mock_redis', '~> 0.22', '>= 0.22.0'
31 |
32 | s.add_dependency 'redis', '~> 4.1', '>= 4.1.0'
33 | s.add_dependency 'steem-ruby', '~> 0.9', '>= 0.9.4'
34 | s.add_dependency 'hive-ruby', '~> 1.0.0', '>= 1.0.0'
35 | s.add_dependency 'mechanize', '~> 2.7', '>= 2.7.6'
36 | s.add_dependency 'rb-readline', '~> 0.5', '>= 0.5.5'
37 | end
38 |
--------------------------------------------------------------------------------
/test/meeseeker/meeseeker_test.rb:
--------------------------------------------------------------------------------
1 | require 'test_helper'
2 | require 'rake'
3 |
4 | module Meeseeker
5 | class MeeseekerTest < Meeseeker::Test
6 | def setup
7 | @max_blocks = 30 # must be at least 15 to get past irreversible
8 | end
9 |
10 | def test_verify_hive_jobs
11 | chain = 'hive'
12 | check_keys(chain)
13 | keys = []
14 |
15 | begin
16 | Rake::Task['verify:block_org'].reenable
17 | assert Rake::Task['verify:block_org'].invoke('hive', @max_blocks)
18 | rescue SystemExit => e
19 | puts 'Exited.'
20 | rescue Redis::TimeoutError => e
21 | skip 'Timed out.'
22 | end
23 |
24 | begin
25 | Rake::Task['verify:witness:schedule'].reenable
26 | assert Rake::Task['verify:witness:schedule'].invoke('hive', @max_blocks)
27 | rescue SystemExit => e
28 | puts 'Exited.'
29 | rescue Redis::TimeoutError => e
30 | skip 'Timed out.'
31 | end
32 |
33 | block_api = Hive::BlockApi.new(url: 'http://anyx.io')
34 | keys = Meeseeker.redis.keys('hive:*')
35 | data = keys.map do |key|
36 | next if key == 'hive:meeseeker:last_block_num'
37 |
38 | n, b, t, i, o = key.split(':')
39 |
40 | assert_equal 'hive', n, "expected hive key, got: #{key}"
41 |
42 | [b, t]
43 | end.compact.sample(10).to_h
44 |
45 | assert data.any?, 'expect hive data'
46 |
47 | data.each do |b, t|
48 | block_api.get_block(block_num: b) do |result|
49 | block = result.block
50 |
51 | refute_nil block, "did not expect nil block (#{b})"
52 |
53 | if !!block.transaction_ids
54 | assert block.transaction_ids.include?(t), "Could not find hive trx_id (#{t}) in block (#{b})."
55 | else
56 | puts "Skipped check for hive trx_id (#{t}) in block (#{b}) because API does not support lookup."
57 | end
58 | end
59 | end
60 |
61 | if keys.any?
62 | dropped = Meeseeker.redis.del(*keys)
63 | puts "Dropped #{dropped} keys."
64 | else
65 | fail 'No keys.'
66 | end
67 | end
68 |
69 | def test_verify_steem_jobs
70 | chain = 'steem'
71 | check_keys(chain)
72 | keys = []
73 |
74 | begin
75 | Rake::Task['verify:block_org'].reenable
76 | assert Rake::Task['verify:block_org'].invoke('steem', @max_blocks)
77 | rescue SystemExit => e
78 | puts 'Exited.'
79 | rescue Redis::TimeoutError => e
80 | skip 'Timed out.'
81 | end
82 |
83 | begin
84 | Rake::Task['verify:witness:schedule'].reenable
85 | assert Rake::Task['verify:witness:schedule'].invoke('steem', @max_blocks)
86 | rescue SystemExit => e
87 | puts 'Exited.'
88 | rescue Redis::TimeoutError => e
89 | skip 'Timed out.'
90 | end
91 |
92 | block_api = Steem::BlockApi.new
93 | keys = Meeseeker.redis.keys('steem:*')
94 | data = keys.map do |key|
95 | next if key == 'steem:meeseeker:last_block_num'
96 |
97 | n, b, t, i, o = key.split(':')
98 |
99 | assert_equal 'steem', n, "expected steem key, got: #{key}"
100 |
101 | [b, t]
102 | end.compact.sample(10).to_h
103 |
104 | assert data.any?, 'expect steem data'
105 |
106 | data.each do |b, t|
107 | block_api.get_block(block_num: b) do |result|
108 | block = result.block
109 |
110 | refute_nil block, "did not expect nil block (#{b})"
111 |
112 | if !!block.transaction_ids
113 | assert block.transaction_ids.include?(t), "Could not find steem trx_id (#{t}) in block (#{b})."
114 | else
115 | puts "Skipped check for steem trx_id (#{t}) in block (#{b}) because API does not support lookup."
116 | end
117 | end
118 | end
119 |
120 | if keys.any?
121 | dropped = Meeseeker.redis.del(*keys)
122 | puts "Dropped #{dropped} keys."
123 | else
124 | fail 'No keys.'
125 | end
126 | end
127 |
128 | def test_verify_steem_engine_jobs
129 | chain = 'steem_engine'
130 | check_keys(chain)
131 | keys = []
132 |
133 | begin
134 | Rake::Task['verify:steem_engine_block_org'].reenable
135 | Rake::Task['verify:engine_block_org'].reenable
136 | assert Rake::Task['verify:steem_engine_block_org'].invoke(@max_blocks)
137 | rescue SystemExit => e
138 | puts 'Exited.'
139 | rescue Redis::TimeoutError => e
140 | skip 'Timed out.'
141 | end
142 |
143 | begin
144 | Rake::Task['verify:steem_engine_ref_blocks'].reenable
145 | Rake::Task['verify:engine_ref_blocks'].reenable
146 | assert Rake::Task['verify:steem_engine_ref_blocks'].invoke(@max_blocks)
147 | rescue SystemExit => e
148 | puts 'Exited.'
149 | rescue Redis::TimeoutError => e
150 | skip 'Timed out.'
151 | end
152 |
153 | agent = Meeseeker::SteemEngine::Agent.new
154 | keys = Meeseeker.redis.keys('steem_engine:*')
155 | data = keys.map do |key|
156 | n, b, t, i, o = key.split(':')
157 |
158 | assert_equal chain, n, "expected steem_engine key, got: #{key}"
159 |
160 | next if t == Meeseeker::VIRTUAL_TRX_ID
161 |
162 | [b, t]
163 | end.compact.sample(10).to_h
164 |
165 | assert data.any?, 'expect steem_engine data'
166 |
167 | data.each do |b, t|
168 | block = agent.block(b)
169 | refute_nil block, "did not expect nil block (#{b})"
170 |
171 | count = block['transactions'].select do |trx|
172 | trx['transactionId'].include? t
173 | end.size
174 |
175 | assert count > 0, "Could not find steem_engine trx_id (#{t}) in block (#{b})."
176 | end
177 |
178 | agent.shutdown
179 |
180 | if keys.any?
181 | dropped = Meeseeker.redis.del(*keys)
182 | puts "Dropped #{dropped} keys."
183 | else
184 | fail 'No keys.'
185 | end
186 | end
187 |
188 | def test_verify_hive_engine_jobs
189 | chain = 'hive_engine'
190 | check_keys(chain)
191 | keys = []
192 |
193 | begin
194 | Rake::Task['verify:hive_engine_block_org'].reenable
195 | Rake::Task['verify:engine_block_org'].reenable
196 | assert Rake::Task['verify:hive_engine_block_org'].invoke(@max_blocks)
197 | rescue SystemExit => e
198 | puts 'Exited.'
199 | rescue Redis::TimeoutError => e
200 | skip 'Timed out.'
201 | end
202 |
203 | begin
204 | Rake::Task['verify:hive_engine_ref_blocks'].reenable
205 | Rake::Task['verify:engine_ref_blocks'].reenable
206 | assert Rake::Task['verify:hive_engine_ref_blocks'].invoke(@max_blocks)
207 | rescue SystemExit => e
208 | puts 'Exited.'
209 | rescue Redis::TimeoutError => e
210 | skip 'Timed out.'
211 | end
212 |
213 | agent = Meeseeker::HiveEngine::Agent.new
214 | keys = Meeseeker.redis.keys('hive_engine:*')
215 | data = keys.map do |key|
216 | n, b, t, i, o = key.split(':')
217 |
218 | assert_equal chain, n, "expected hive_engine key, got: #{key}"
219 |
220 | next if t == Meeseeker::VIRTUAL_TRX_ID
221 |
222 | [b, t]
223 | end.compact.sample(10).to_h
224 |
225 | assert data.any?, 'expect hive_engine data'
226 |
227 | data.each do |b, t|
228 | block = agent.block(b)
229 | refute_nil block, "did not expect nil block (#{b})"
230 |
231 | count = block['transactions'].select do |trx|
232 | trx['transactionId'].include? t
233 | end.size
234 |
235 | assert count > 0, "Could not find hive_engine trx_id (#{t}) in block (#{b})."
236 | end
237 |
238 | agent.shutdown
239 |
240 | if keys.any?
241 | dropped = Meeseeker.redis.del(*keys)
242 | puts "Dropped #{dropped} keys."
243 | else
244 | fail 'No keys.'
245 | end
246 | end
247 | private
248 | def check_keys(chain)
249 | chain = chain_key_prefix = chain.to_s
250 | Meeseeker.node_url = case chain.to_sym
251 | when :hive_engine then Meeseeker.shuffle_node_url('hive')
252 | when :steem_engine then Meeseeker.shuffle_node_url('steem')
253 | else
254 | Meeseeker.shuffle_node_url(chain.to_s)
255 | end
256 |
257 | begin
258 | if !!Meeseeker.redis.get(chain_key_prefix + Meeseeker::LAST_BLOCK_NUM_KEY_SUFFIX)
259 | fail "Found existing keys. Please use 'rake reset' to enable this test."
260 | end
261 | rescue Redis::CannotConnectError => e
262 | warn "Cannot connect to redis, using MockRedis instead."
263 |
264 | Meeseeker.redis = MockRedis.new
265 | end
266 | end
267 | end
268 | end
269 |
--------------------------------------------------------------------------------
/test/test_helper.rb:
--------------------------------------------------------------------------------
1 | $LOAD_PATH.unshift File.expand_path('../../lib', __FILE__)
2 |
3 | require 'simplecov'
4 |
5 | SimpleCov.start
6 | SimpleCov.merge_timeout 3600
7 |
8 | require 'meeseeker'
9 | require 'mock_redis'
10 | require 'minitest/autorun'
11 | require 'minitest/line/describe_track'
12 | require 'minitest/hell'
13 | require 'minitest/proveit'
14 | require 'pry'
15 |
16 | # In order to test Rakefile:
17 | gem_dir = File.expand_path("..", File.dirname(__FILE__))
18 | $LOAD_PATH.unshift gem_dir
19 |
20 | pwd = Dir.pwd
21 | Dir.chdir(gem_dir)
22 | Rake.application.init
23 | Rake.application.load_rakefile
24 | Dir.chdir(pwd)
25 |
26 | class Minitest::Test
27 | parallelize_me!
28 | end
29 |
30 | class Meeseeker::Test < MiniTest::Test
31 | defined? prove_it! and prove_it!
32 | end
33 |
--------------------------------------------------------------------------------