28 |
29 |
30 |
31 |
32 |
33 |
91 |
92 |
111 |
--------------------------------------------------------------------------------
/maintainer/maintainer/header_forwarder/h.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import asyncio
3 | import logging
4 |
5 | from maintainer import base, utils
6 | from maintainer.bitcoin import bcoin_rpc, bsock
7 | from maintainer.ethereum import contract, shared
8 | from maintainer.header_forwarder import pull, push
9 |
10 | from typing import cast
11 | from btcspv.types import RelayHeader
12 |
13 | logger = logging.getLogger('root.header_forwarder')
14 | logging.basicConfig(
15 | format='%(asctime)6s %(name)s: %(levelname)s %(message)s',
16 | level=logging.INFO,
17 | datefmt='%Y-%m-%d %H:%M:%S')
18 |
19 |
20 | async def run() -> None:
21 | header_q: 'asyncio.Queue[RelayHeader]' = asyncio.Queue(maxsize=50)
22 | await shared.init()
23 |
24 | latest_digest = await contract.get_best_block()
25 |
26 | if len(latest_digest) != 64:
27 | raise ValueError(
28 | 'Expected 32 byte digest from contract. '
29 | f'Received {len(latest_digest) // 2} bytes instead. '
30 | 'Hint: is this account authorized?')
31 |
32 | latest_or_none = await bcoin_rpc.get_header_by_hash_le(latest_digest)
33 | if latest_or_none is None:
34 | raise ValueError(
35 | 'Relay\'s latest digest is not known to the Bitcoin node. '
36 | f'Got {latest_digest}. '
37 | 'Hint: is your node on the same Bitcoin network as the relay?')
38 | latest = cast(RelayHeader, latest_or_none)
39 | better_or_same = cast(
40 | RelayHeader,
41 | await bcoin_rpc.get_header_by_height(latest['height']))
42 |
43 | # see if there's a better block at that height
44 | # if so, crawl backwards
45 | while latest != better_or_same:
46 | latest = cast(
47 | RelayHeader,
48 | await bcoin_rpc.get_header_by_hash_le(latest['prevhash']))
49 | better_or_same = cast(
50 | RelayHeader,
51 | await bcoin_rpc.get_header_by_height(latest['height']))
52 |
53 | logger.info(
54 | f'latest is {utils.format_header(latest)}')
55 |
56 | asyncio.create_task(pull.pull_headers(latest, header_q))
57 | asyncio.create_task(push.push_headers(latest, header_q))
58 |
59 |
60 | async def teardown() -> None:
61 | coros = [
62 | # close http session
63 | bcoin_rpc.close_connection(),
64 | # close socketio connection
65 | bsock.close_connection(),
66 | # close infura websocket
67 | shared.close_connection()
68 | ]
69 |
70 | await asyncio.gather(*coros, return_exceptions=True)
71 |
72 |
73 | if __name__ == '__main__':
74 | try:
75 | name = 'header_forwarder'
76 | base.main(run=run, logger=logger, name=name, teardown=teardown)
77 | except Exception:
78 | logger.exception('---- Fatal Exception ----')
79 | sys.exit(1)
80 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/validator.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | sdk "github.com/cosmos/cosmos-sdk/types"
5 | "github.com/summa-tx/relays/golang/x/relay/types"
6 | )
7 |
8 | func (k Keeper) emitProofProvided(
9 | ctx sdk.Context,
10 | filled types.FilledRequests,
11 | ) {
12 | filledIDs := []types.RequestID{}
13 | for _, f := range filled.Filled {
14 | filledIDs = append(filledIDs, f.ID)
15 | }
16 | ctx.EventManager().EmitEvent(types.NewProofProvidedEvent(filled.Proof.TxID, filledIDs))
17 | }
18 |
19 | // getConfs returns the number of confirmations of any given header
20 | func (k Keeper) getConfs(ctx sdk.Context, header types.BitcoinHeader) (uint32, sdk.Error) {
21 | bestKnown, err := k.GetBestKnownDigest(ctx)
22 | if err != nil {
23 | return 0, err
24 | }
25 | bestKnownHeader, err := k.GetHeader(ctx, bestKnown)
26 | if err != nil {
27 | return 0, err
28 | }
29 | return bestKnownHeader.Height - header.Height, nil
30 | }
31 |
32 | // validateProof validates an SPV Proof and checks that it is stored correctly
33 | func (k Keeper) validateProof(ctx sdk.Context, proof types.SPVProof) sdk.Error {
34 | // If it is not valid, it will return an error
35 | _, err := proof.Validate()
36 | if err != nil {
37 | return types.FromBTCSPVError(types.DefaultCodespace, err)
38 | }
39 |
40 | lca, lcaErr := k.GetLastReorgLCA(ctx)
41 | if lcaErr != nil {
42 | return lcaErr
43 | }
44 | isAncestor := k.IsAncestor(ctx, proof.ConfirmingHeader.Hash, lca, 240)
45 | if !isAncestor {
46 | return types.ErrNotAncestor(types.DefaultCodespace, proof.ConfirmingHeader.Hash)
47 | }
48 |
49 | return nil
50 | }
51 |
52 | func (k Keeper) checkRequestsFilled(ctx sdk.Context, filledRequests types.FilledRequests) ([]types.ProofRequest, sdk.Error) {
53 | // Validate Proof once
54 | err := k.validateProof(ctx, filledRequests.Proof)
55 | if err != nil {
56 | return nil, err
57 | }
58 |
59 | confs, confsErr := k.getConfs(ctx, filledRequests.Proof.ConfirmingHeader)
60 | if confsErr != nil {
61 | return nil, confsErr
62 | }
63 |
64 | var filled []types.ProofRequest
65 |
66 | for i := range filledRequests.Filled {
67 | // get request
68 | request, getErr := k.getRequest(ctx, filledRequests.Filled[i].ID)
69 | if getErr != nil {
70 | return nil, getErr
71 | }
72 | // check confirmations
73 | if confs < uint32(request.NumConfs) {
74 | return nil, types.ErrNotEnoughConfs(types.DefaultCodespace, filledRequests.Filled[i].ID)
75 | }
76 |
77 | // check request
78 | err := k.checkRequests(
79 | ctx,
80 | filledRequests.Filled[i].InputIndex,
81 | filledRequests.Filled[i].OutputIndex,
82 | filledRequests.Proof.Vin,
83 | filledRequests.Proof.Vout,
84 | filledRequests.Filled[i].ID)
85 | if err != nil {
86 | return nil, err
87 | }
88 |
89 | filled = append(filled, request)
90 | }
91 |
92 | k.emitProofProvided(ctx, filledRequests)
93 | return filled, nil
94 | }
95 |
--------------------------------------------------------------------------------
/golang/dashboard/src/store/external.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios'
2 | import * as types from '@/store/mutation-types'
3 | import { lStorage, convertUnixTimestamp } from '@/utils/utils'
4 |
5 | const state = {
6 | source: 'blockstream.info',
7 |
8 | lastComms: lStorage.get('lastCommsExternal') || undefined, // Date
9 |
10 | currentBlock: lStorage.get('currentBlock') || {
11 | height: 0, // Number - Current block height, from external
12 | hash: '', // String - Current block hash, from external
13 | time: undefined, // Date - Current block timestamp, from external
14 | updatedAt: undefined, // Date - When was this data updated
15 | },
16 |
17 | // Keep track of previous block information
18 | // If incoming block number increments, then move currentBlock info to here
19 | // and incoming block info goes to currentBlock
20 | previousBlocks: lStorage.get('previousBlocks') || []
21 | }
22 |
23 | const mutations = {
24 | [types.SET_LAST_COMMS_EXTERNAL] (state, date) {
25 | state.lastComms = date
26 | lStorage.set('lastCommsExternal', state.lastComms)
27 | },
28 |
29 | [types.SET_CURRENT_BLOCK] (state, block) {
30 | let newBlock = state.currentBlock
31 | Object.keys(block).forEach((prop) => {
32 | newBlock[prop] = block[prop]
33 | })
34 | state.currentBlock = newBlock
35 | lStorage.set('currentBlock', state.currentBlock)
36 | },
37 |
38 | // This is called when current block is updated
39 | // Take all data and put it here
40 | // TODO: Make sure to control and handle duplicates
41 | [types.ADD_PREVIOUS_BLOCK] (state, block) {
42 | state.previousBlocks.push(block)
43 | lStorage.set('previousBlocks', state.previousBlocks)
44 | }
45 | }
46 |
47 | const actions = {
48 | addPreviousBlock ({ commit, state }, newBlock) {
49 | if (newBlock.height > state.currentBlock.height) {
50 | commit(types.ADD_PREVIOUS_BLOCK, state.currentBlock)
51 | }
52 | },
53 |
54 | async updateCurrentBlock ({ dispatch, commit }, newBlock) {
55 | await dispatch('addPreviousBlock', newBlock)
56 | commit(types.SET_CURRENT_BLOCK, newBlock)
57 |
58 | },
59 |
60 | getExternalInfo ({ dispatch, commit, rootState }) {
61 | console.log('Getting external info')
62 | axios.get(`${rootState.blockchainURL}/blocks`).then((res) => {
63 | console.log('EXTERNAL INFO:', res.data[0])
64 | const { height, id: hash, timestamp } = res.data[0]
65 | const time = convertUnixTimestamp(timestamp)
66 |
67 | dispatch('updateCurrentBlock', {
68 | height,
69 | hash,
70 | time,
71 | updatedAt: new Date()
72 | })
73 |
74 | commit(types.SET_LAST_COMMS_EXTERNAL, new Date())
75 | }).catch((err) => {
76 | console.log('blockstream error', err)
77 | })
78 |
79 | }
80 | }
81 |
82 | export default {
83 | namespaced: true,
84 | state,
85 | mutations,
86 | actions
87 | }
88 |
--------------------------------------------------------------------------------
/maintainer/maintainer/config/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from ether import crypto
4 |
5 | from typing import cast, Tuple, Optional
6 | from maintainer.relay_types import RelayConfig
7 |
8 | CONFIG: RelayConfig
9 |
10 |
11 | def is_infura() -> bool:
12 | pid = get()['PROJECT_ID']
13 | return not pid == ''
14 |
15 |
16 | def _set_keys() -> Tuple[Optional[bytes], Optional[bytes], Optional[str]]:
17 | # Keys
18 | PRIVKEY: Optional[bytes]
19 | PUBKEY: Optional[bytes]
20 | ETH_ADDRESS: Optional[str]
21 |
22 | PRIVKEY_HEX = os.environ.get('SUMMA_RELAY_OPERATOR_KEY', None)
23 | try:
24 | PRIVKEY = bytes.fromhex(cast(str, PRIVKEY_HEX))
25 | except (ValueError, TypeError): # hex errors or is None
26 | PRIVKEY = None
27 |
28 | PUBKEY = crypto.priv_to_pub(PRIVKEY) if PRIVKEY else None
29 |
30 | if PRIVKEY:
31 | ETH_ADDRESS = crypto.priv_to_addr(PRIVKEY)
32 | else:
33 | ETH_ADDRESS = os.environ.get('OPERATOR_ADDRESS', None)
34 |
35 | return PRIVKEY, PUBKEY, ETH_ADDRESS
36 |
37 |
38 | def _set_net() -> Tuple[str, int]:
39 | CHAIN_IDS = {'mainnet': 1, 'ropsten': 3, 'kovan': 42}
40 | NETWORK = os.environ.get('SUMMA_RELAY_ETH_NETWORK', 'ropsten')
41 | if NETWORK in CHAIN_IDS:
42 | CHAIN_ID = CHAIN_IDS[NETWORK]
43 | else:
44 | try:
45 | CHAIN_ID = int(
46 | os.environ.get('SUMMA_RELAY_ETH_CHAIN_ID')) # type: ignore
47 | except (ValueError, TypeError):
48 | CHAIN_ID = 1
49 |
50 | return NETWORK, CHAIN_ID
51 |
52 |
53 | def get() -> RelayConfig:
54 | return CONFIG
55 |
56 |
57 | def set() -> RelayConfig:
58 | BCOIN_HOST = os.environ.get('SUMMA_RELAY_BCOIN_HOST', '127.0.0.1')
59 | API_KEY = os.environ.get('SUMMA_RELAY_BCOIN_API_KEY', '')
60 | BCOIN_PORT = os.environ.get('SUMMA_RELAY_BCOIN_PORT', '8332')
61 |
62 | ETHER_HOST = os.environ.get('SUMMA_RELAY_ETHER_HOST', '127.0.0.1')
63 | ETHER_PORT = os.environ.get('SUMMA_RELAY_ETHER_PORT', '8545')
64 |
65 | GETH_UNLOCK = os.environ.get('SUMMA_RELAY_GETH_UNLOCK', None)
66 |
67 | PRIVKEY, PUBKEY, ETH_ADDRESS = _set_keys()
68 |
69 | NETWORK, CHAIN_ID = _set_net()
70 |
71 | global CONFIG
72 | CONFIG = RelayConfig(
73 | PRIVKEY=PRIVKEY,
74 | PUBKEY=PUBKEY,
75 | ETH_ADDRESS=ETH_ADDRESS,
76 | NETWORK=NETWORK,
77 | CHAIN_ID=CHAIN_ID,
78 | API_KEY=API_KEY,
79 | BCOIN_HOST=BCOIN_HOST,
80 | BCOIN_PORT=BCOIN_PORT,
81 | ETHER_HOST=ETHER_HOST,
82 | ETHER_PORT=ETHER_PORT,
83 | GETH_UNLOCK=GETH_UNLOCK,
84 | ETHER_URL=f'http://{ETHER_HOST}:{ETHER_PORT}',
85 | BCOIN_URL=f'http://x:{API_KEY}@{BCOIN_HOST}:{BCOIN_PORT}',
86 | BCOIN_WS_URL=f'ws://{BCOIN_HOST}:{BCOIN_PORT}',
87 | PROJECT_ID=os.environ.get('SUMMA_RELAY_INFURA_KEY', ''),
88 | CONTRACT=os.environ.get('SUMMA_RELAY_CONTRACT', ''),
89 | )
90 |
91 | return CONFIG
92 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/handler.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | "fmt"
5 |
6 | sdk "github.com/cosmos/cosmos-sdk/types"
7 | "github.com/summa-tx/relays/golang/x/relay/types"
8 | )
9 |
10 | // NewHandler returns a handler for relay type messages.
11 | func NewHandler(keeper Keeper) sdk.Handler {
12 | return func(ctx sdk.Context, msg sdk.Msg) sdk.Result {
13 | switch msg := msg.(type) {
14 | case types.MsgIngestHeaderChain:
15 | return handleMsgIngestHeaderChain(ctx, keeper, msg)
16 | case types.MsgIngestDifficultyChange:
17 | return handleMsgIngestDifficultyChange(ctx, keeper, msg)
18 | case types.MsgMarkNewHeaviest:
19 | return handleMsgMarkNewHeaviest(ctx, keeper, msg)
20 | case types.MsgNewRequest:
21 | return handleMsgNewRequest(ctx, keeper, msg)
22 | case types.MsgProvideProof:
23 | return handleMsgProvideProof(ctx, keeper, msg)
24 | default:
25 | errMsg := fmt.Sprintf("Unrecognized relay Msg type: %v", msg.Type())
26 | return sdk.ErrUnknownRequest(errMsg).Result()
27 | }
28 | }
29 | }
30 |
31 | func handleMsgIngestHeaderChain(ctx sdk.Context, keeper Keeper, msg types.MsgIngestHeaderChain) sdk.Result {
32 | err := keeper.IngestHeaderChain(ctx, msg.Headers)
33 | if err != nil {
34 | return err.Result()
35 | }
36 | return sdk.Result{
37 | Events: ctx.EventManager().Events(),
38 | }
39 | }
40 |
41 | func handleMsgIngestDifficultyChange(ctx sdk.Context, keeper Keeper, msg types.MsgIngestDifficultyChange) sdk.Result {
42 | err := keeper.IngestDifficultyChange(ctx, msg.Start, msg.Headers)
43 | if err != nil {
44 | return err.Result()
45 | }
46 | return sdk.Result{
47 | Events: ctx.EventManager().Events(),
48 | }
49 | }
50 |
51 | func handleMsgMarkNewHeaviest(ctx sdk.Context, keeper Keeper, msg types.MsgMarkNewHeaviest) sdk.Result {
52 | err := keeper.MarkNewHeaviest(ctx, msg.Ancestor, msg.CurrentBest, msg.NewBest, msg.Limit)
53 | if err != nil {
54 | return err.Result()
55 | }
56 | return sdk.Result{
57 | Events: ctx.EventManager().Events(),
58 | }
59 | }
60 |
61 | func handleMsgNewRequest(ctx sdk.Context, keeper Keeper, msg types.MsgNewRequest) sdk.Result {
62 | // Validate message
63 | err := msg.ValidateBasic()
64 | if err != nil {
65 | return err.Result()
66 | }
67 |
68 | // TODO: Add more complex permissioning
69 | // Set request
70 | err = keeper.setRequest(ctx, msg.Spends, msg.Pays, msg.PaysValue, msg.NumConfs, msg.Origin, msg.Action)
71 | if err != nil {
72 | return err.Result()
73 | }
74 |
75 | return sdk.Result{
76 | Events: ctx.EventManager().Events(),
77 | }
78 | }
79 |
80 | func handleMsgProvideProof(ctx sdk.Context, keeper Keeper, msg types.MsgProvideProof) sdk.Result {
81 | filled, err := keeper.checkRequestsFilled(ctx, msg.Filled)
82 | if err != nil {
83 | return err.Result()
84 | }
85 |
86 | // Dispatch the proof to the keeper's proof handler
87 | keeper.ProofHandler.HandleValidProof(ctx, msg.Filled, filled)
88 |
89 | return sdk.Result{
90 | Events: ctx.EventManager().Events(),
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/golang/scripts/json_data/4_ingest_headers.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "raw": "00e0ff2ff8d0a038bfe4027e5de3b6bf07262122636fd2916d75030000000000000000009c66fd29c230fbc348ba962f1c2ca8c6c9bca2cd01fd55006f31d65d0ed139016572425ed41a1217c6c10ed1",
4 | "hash": "5eda4c9ca8947f4f4f9848c55af2ab4ad62758c45d4002000000000000000000",
5 | "height": 616909,
6 | "prevhash": "f8d0a038bfe4027e5de3b6bf07262122636fd2916d7503000000000000000000",
7 | "merkle_root": "9c66fd29c230fbc348ba962f1c2ca8c6c9bca2cd01fd55006f31d65d0ed13901"
8 | },
9 | {
10 | "raw": "000000205eda4c9ca8947f4f4f9848c55af2ab4ad62758c45d40020000000000000000002af75f1c0581dfaf0aa7007deec24ef232fd280d60a16a2ab26ee8dd509c746bcd74425ed41a1217f47350e8",
11 | "hash": "f444030e5d30968f330377646dc657817a35657e6d2907000000000000000000",
12 | "height": 616910,
13 | "prevhash": "5eda4c9ca8947f4f4f9848c55af2ab4ad62758c45d4002000000000000000000",
14 | "merkle_root": "2af75f1c0581dfaf0aa7007deec24ef232fd280d60a16a2ab26ee8dd509c746b"
15 | },
16 | {
17 | "raw": "00e00020f444030e5d30968f330377646dc657817a35657e6d29070000000000000000007ec6b452207ab96612f2b3231325877fa83b814a03e2069bac136f2f0ac42435e176425ed41a12173c2750a5",
18 | "hash": "0efecebfb6c77fa2ee92e5b132b9b5f9653fe7ba64ff07000000000000000000",
19 | "height": 616911,
20 | "prevhash": "f444030e5d30968f330377646dc657817a35657e6d2907000000000000000000",
21 | "merkle_root": "7ec6b452207ab96612f2b3231325877fa83b814a03e2069bac136f2f0ac42435"
22 | },
23 | {
24 | "raw": "00e000200efecebfb6c77fa2ee92e5b132b9b5f9653fe7ba64ff070000000000000000007d1740590d7b907cec752812c3202fed4f1c39ab565d27867981bc6b4d4a20b28578425ed41a12170984291f",
25 | "hash": "f1b67a58e98576479c7ba7202174ade860e3afff6d8110000000000000000000",
26 | "height": 616912,
27 | "prevhash": "0efecebfb6c77fa2ee92e5b132b9b5f9653fe7ba64ff07000000000000000000",
28 | "merkle_root": "7d1740590d7b907cec752812c3202fed4f1c39ab565d27867981bc6b4d4a20b2"
29 | },
30 | {
31 | "raw": "00000020f1b67a58e98576479c7ba7202174ade860e3afff6d81100000000000000000009293b2d0c8f430a39cc8346c5478df790027176b5d26b0893a088ff6179b63108a7a425ed41a12173041448e",
32 | "hash": "51677bf39dd3318f95bc79d821e936013f717528adbe0b000000000000000000",
33 | "height": 616913,
34 | "prevhash": "f1b67a58e98576479c7ba7202174ade860e3afff6d8110000000000000000000",
35 | "merkle_root": "9293b2d0c8f430a39cc8346c5478df790027176b5d26b0893a088ff6179b6310"
36 | },
37 | {
38 | "raw": "0000002051677bf39dd3318f95bc79d821e936013f717528adbe0b0000000000000000008bdd1ff50a88a852d497061c51542db436818bd3b6b2455c0f6333d7533cffd1387c425ed41a12175f902567",
39 | "hash": "e6f0334bf990fe5172f3b15575347f4269f4e420614f10000000000000000000",
40 | "height": 616914,
41 | "prevhash": "51677bf39dd3318f95bc79d821e936013f717528adbe0b000000000000000000",
42 | "merkle_root": "8bdd1ff50a88a852d497061c51542db436818bd3b6b2455c0f6333d7533cffd1"
43 | }
44 | ]
45 |
--------------------------------------------------------------------------------
/golang/cmd/relayd/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "io"
6 |
7 | "github.com/cosmos/cosmos-sdk/server"
8 | "github.com/cosmos/cosmos-sdk/x/genaccounts"
9 | genaccscli "github.com/cosmos/cosmos-sdk/x/genaccounts/client/cli"
10 | "github.com/cosmos/cosmos-sdk/x/staking"
11 |
12 | "github.com/spf13/cobra"
13 | "github.com/tendermint/tendermint/libs/cli"
14 | "github.com/tendermint/tendermint/libs/log"
15 |
16 | sdk "github.com/cosmos/cosmos-sdk/types"
17 | genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
18 | abci "github.com/tendermint/tendermint/abci/types"
19 | tmtypes "github.com/tendermint/tendermint/types"
20 | dbm "github.com/tendermint/tm-db"
21 |
22 | app "github.com/summa-tx/relays/golang"
23 | )
24 |
25 | func main() {
26 | cobra.EnableCommandSorting = false
27 |
28 | cdc := app.MakeCodec()
29 |
30 | config := sdk.GetConfig()
31 | config.SetBech32PrefixForAccount(sdk.Bech32PrefixAccAddr, sdk.Bech32PrefixAccPub)
32 | config.SetBech32PrefixForValidator(sdk.Bech32PrefixValAddr, sdk.Bech32PrefixValPub)
33 | config.SetBech32PrefixForConsensusNode(sdk.Bech32PrefixConsAddr, sdk.Bech32PrefixConsPub)
34 | config.Seal()
35 |
36 | ctx := server.NewDefaultContext()
37 |
38 | rootCmd := &cobra.Command{
39 | Use: "relayd",
40 | Short: "relay App Daemon (server)",
41 | PersistentPreRunE: server.PersistentPreRunEFn(ctx),
42 | }
43 | // CLI commands to initialize the chain
44 | rootCmd.AddCommand(
45 | genutilcli.InitCmd(ctx, cdc, app.ModuleBasics, app.DefaultNodeHome),
46 | genutilcli.CollectGenTxsCmd(ctx, cdc, genaccounts.AppModuleBasic{}, app.DefaultNodeHome),
47 | genutilcli.GenTxCmd(ctx, cdc, app.ModuleBasics, staking.AppModuleBasic{}, genaccounts.AppModuleBasic{}, app.DefaultNodeHome, app.DefaultCLIHome),
48 | genutilcli.ValidateGenesisCmd(ctx, cdc, app.ModuleBasics),
49 | // AddGenesisAccountCmd allows users to add accounts to the genesis file
50 | genaccscli.AddGenesisAccountCmd(ctx, cdc, app.DefaultNodeHome, app.DefaultCLIHome),
51 | )
52 |
53 | server.AddCommands(ctx, cdc, rootCmd, newApp, exportAppStateAndTMValidators)
54 |
55 | // prepare and add flags
56 | executor := cli.PrepareBaseCmd(rootCmd, "RE", app.DefaultNodeHome)
57 | err := executor.Execute()
58 | if err != nil {
59 | panic(err)
60 | }
61 | }
62 |
63 | func newApp(logger log.Logger, db dbm.DB, traceStore io.Writer) abci.Application {
64 | return app.NewRelayApp(logger, db)
65 | }
66 |
67 | func exportAppStateAndTMValidators(
68 | logger log.Logger, db dbm.DB, traceStore io.Writer, height int64, forZeroHeight bool, jailWhiteList []string,
69 | ) (json.RawMessage, []tmtypes.GenesisValidator, error) {
70 |
71 | if height != -1 {
72 | relayApp := app.NewRelayApp(logger, db)
73 | err := relayApp.LoadHeight(height)
74 | if err != nil {
75 | return nil, nil, err
76 | }
77 | return relayApp.ExportAppStateAndValidators(forZeroHeight, jailWhiteList)
78 | }
79 |
80 | relayApp := app.NewRelayApp(logger, db)
81 |
82 | return relayApp.ExportAppStateAndValidators(forZeroHeight, jailWhiteList)
83 | }
84 |
--------------------------------------------------------------------------------
/maintainer/maintainer/ethereum/contract.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from ether import abi, calldata, events
3 |
4 | from maintainer import config
5 | from maintainer.ethereum import shared
6 | from maintainer.relay_abi import ABI as relay_ABI
7 |
8 | from typing import cast
9 |
10 | CLOSED = events._make_topic0(
11 | abi.find('RequestClosed', relay_ABI)[0])
12 | FILLED = events._make_topic0(
13 | abi.find('RequestFilled', relay_ABI)[0])
14 |
15 | logger = logging.getLogger('root.summa_relay.eth_contract')
16 |
17 |
18 | async def find_height(digest_le: bytes) -> int:
19 | data = calldata.call(
20 | "findHeight",
21 | [digest_le],
22 | relay_ABI)
23 | res = await shared.CONNECTION._RPC(
24 | method='eth_call',
25 | params=[
26 | {
27 | 'from': config.get()['ETH_ADDRESS'],
28 | 'to': config.get()['CONTRACT'],
29 | 'data': f'0x{data.hex()}'
30 | },
31 | 'latest' # block height parameter
32 | ]
33 | )
34 | # if more than 1 ABI slot long, return 0
35 | if len(res) > 36:
36 | logger.debug(f'findHeight for {digest_le.hex()} is unknown')
37 | return 0
38 | logger.debug(f'findHeight for {digest_le.hex()} is {res}')
39 | return int(res, 16)
40 |
41 |
42 | async def has_block(digest_le: bytes) -> bool:
43 | '''Check if the relay knows of a block'''
44 | height = await find_height(digest_le)
45 | logger.debug(f'height is {height}')
46 | return height != 0
47 |
48 |
49 | async def is_ancestor(
50 | ancestor: bytes,
51 | descendant: bytes,
52 | limit: int = 240) -> bool:
53 | '''
54 | Determine if ancestor precedes descendant
55 | ancestor and descendant MUST be LE
56 | '''
57 | data = calldata.call(
58 | "isAncestor",
59 | [ancestor, descendant, limit],
60 | relay_ABI)
61 | res = await shared.CONNECTION._RPC(
62 | method='eth_call',
63 | params=[
64 | {
65 | 'from': config.get()['ETH_ADDRESS'],
66 | 'to': config.get()['CONTRACT'],
67 | 'data': f'0x{data.hex()}'
68 | },
69 | 'latest' # block height parameter
70 | ]
71 | )
72 | # returned as 0x-prepended hex string representing 32 bytes
73 | return bool(int(res, 16))
74 |
75 |
76 | async def get_best_block() -> str:
77 | '''
78 | Get the contract's marked best known digest.
79 | Counterintuitively, the contract may know of a better digest
80 | that hasn't been marked yet
81 |
82 | returns LE digest
83 | '''
84 | f = abi.find('getBestKnownDigest', relay_ABI)[0]
85 | selector = calldata.make_selector(f)
86 | res = await shared.CONNECTION._RPC(
87 | method='eth_call',
88 | params=[
89 | {
90 | 'from': config.get()['ETH_ADDRESS'],
91 | 'to': config.get()['CONTRACT'],
92 | 'data': f'0x{selector.hex()}'
93 | },
94 | 'latest' # block height parameter
95 | ]
96 | )
97 | return cast(str, res[2:]) # block-explorer format
98 |
--------------------------------------------------------------------------------
/golang/cli_test/README.md:
--------------------------------------------------------------------------------
1 | # Relay CLI Integration tests
2 |
3 | The relay cli integration tests live in this folder. You can run the full suite by running:
4 |
5 | ```bash
6 | go test -mod=readonly -p 4 `go list ./cli_test/...`
7 | ```
8 |
9 | To run a single test run:
10 | ```bash
11 | go test -mod=readonly -p 4 `go list ./cli_test/...` -testify.m TestName
12 | ```
13 |
14 | > NOTE: While the full suite runs in parallel, some of the tests can take up to a minute to complete
15 |
16 | ### Test Structure
17 |
18 | This integration suite [uses a thin wrapper](https://godoc.org/github.com/cosmos/cosmos-sdk/tests) over the [`os/exec`](https://golang.org/pkg/os/exec/) package. This allows the integration test to run against built binaries (both `relayd` and `relaycli` are used) while being written in golang. This allows tests to take advantage of the various golang code we have for operations like marshal/unmarshal, crypto, etc...
19 |
20 | > NOTE: The tests will use whatever `relayd` or `relaycli` binaries are available in your `$GOPATH/bin`. You can check which binary will be run by the suite by running `which relayd` or `which relaycli`. If you have your `$GOPATH` properly setup they should be in `$GOPATH/bin/relay*`. This will ensure that your test uses the latest binary you have built
21 |
22 | Tests generally follow this structure:
23 |
24 | ```go
25 | func (suite *UtilsSuite) TestMyNewCommand() {
26 | suite.T().Parallel()
27 | f := InitFixtures(suite.T())
28 |
29 | // start relayd server
30 | proc := f.GDStart()
31 | defer proc.Stop(false)
32 |
33 | // Your test code goes here...
34 |
35 | f.Cleanup()
36 | }
37 | ```
38 |
39 | This boilerplate above:
40 |
41 | - Ensures the tests run in parallel. Because the tests are calling out to `os/exec` for many operations these tests can take a long time to run.
42 | - Creates `.relayd` and `.relaycli` folders in a new temp folder.
43 | - Uses `relaycli` to create test account for use in testing: `foo`
44 | - Creates a genesis file with coins (`1000footoken,1000feetoken,150stake`) controlled by the `foo` key
45 | - Generates an initial bonding transaction (`gentx`) to make the `foo` key a validator at genesis
46 | - Starts `relayd` and stops it once the test exits
47 | - Cleans up test state on a successful run
48 |
49 | ### Notes when adding/running tests
50 |
51 | - Because the tests run against a built binary, you should make sure you build every time the code changes and you want to test again, otherwise you will be testing against an older version. If you are adding new tests this can easily lead to confusing test results.
52 | - The [`test_helpers.go`](./test_helpers.go) file is organized according to the format of `relaycli` and `relayd` commands. There are comments with section headers describing the different areas. Helper functions to call CLI functionality are generally named after the command (e.g. `relaycli query bestknowndigest` would be `QueryBestKnownDigest`). Try to keep functions grouped by their position in the command tree.
53 | - Test state that is needed by `tx` and `query` commands (`home`, `chain_id`, etc...) is stored on the `Fixtures` object. This makes constructing your new tests almost trivial. Each test needs unique Fixture to run in parallel
54 | - Sometimes if you exit a test early there can be still running `relayd` and `relaycli` processes that will interrupt subsequent runs. Still running `relayd` processes will block ports and prevent new tests from spinning up. You can ensure new tests spin up clean by running `pkill -9 relayd && pkill -9 relaycli` before each test run.
55 | - Most `query` and `tx` commands take a variadic `flags` argument. This pattern allows for the creation of a general function which is easily modified by adding flags.
56 | - `Tx*` functions follow a general pattern and return `(success bool, stdout string, stderr string)`. This allows for easy testing of multiple different flag configurations.
57 |
--------------------------------------------------------------------------------
/golang/x/relay/module.go:
--------------------------------------------------------------------------------
1 | package relay
2 |
3 | import (
4 | "encoding/json"
5 |
6 | "github.com/gorilla/mux"
7 | "github.com/spf13/cobra"
8 |
9 | "github.com/cosmos/cosmos-sdk/codec"
10 | "github.com/cosmos/cosmos-sdk/types/module"
11 |
12 | "github.com/summa-tx/relays/golang/x/relay/client/cli"
13 | "github.com/summa-tx/relays/golang/x/relay/client/rest"
14 | "github.com/summa-tx/relays/golang/x/relay/keeper"
15 |
16 | "github.com/cosmos/cosmos-sdk/client/context"
17 | sdk "github.com/cosmos/cosmos-sdk/types"
18 | abci "github.com/tendermint/tendermint/abci/types"
19 | )
20 |
21 | // type check to ensure the interface is properly implemented
22 | var (
23 | _ module.AppModule = AppModule{}
24 | _ module.AppModuleBasic = AppModuleBasic{}
25 | )
26 |
27 | // AppModuleBasic is app module Basics object
28 | type AppModuleBasic struct{}
29 |
30 | // Name is
31 | func (AppModuleBasic) Name() string {
32 | return ModuleName
33 | }
34 |
35 | // RegisterCodec is
36 | func (AppModuleBasic) RegisterCodec(cdc *codec.Codec) {
37 | RegisterCodec(cdc)
38 | }
39 |
40 | // DefaultGenesis is
41 | func (AppModuleBasic) DefaultGenesis() json.RawMessage {
42 | return ModuleCdc.MustMarshalJSON(DefaultGenesisState())
43 | }
44 |
45 | // ValidateGenesis validates check of the Genesis
46 | func (AppModuleBasic) ValidateGenesis(bz json.RawMessage) error {
47 | var data GenesisState
48 | err := ModuleCdc.UnmarshalJSON(bz, &data)
49 | if err != nil {
50 | return err
51 | }
52 | // Once json successfully marshalled, passes along to genesis.go
53 | return ValidateGenesis(data)
54 | }
55 |
56 | // RegisterRESTRoutes registers rest routes
57 | func (AppModuleBasic) RegisterRESTRoutes(ctx context.CLIContext, rtr *mux.Router) {
58 | rest.RegisterRoutes(ctx, rtr, StoreKey)
59 | }
60 |
61 | // GetQueryCmd get the root query command of this module
62 | func (AppModuleBasic) GetQueryCmd(cdc *codec.Codec) *cobra.Command {
63 | return cli.GetQueryCmd(StoreKey, cdc)
64 | }
65 |
66 | // GetTxCmd get the root tx command of this module
67 | func (AppModuleBasic) GetTxCmd(cdc *codec.Codec) *cobra.Command {
68 | return cli.GetTxCmd(StoreKey, cdc)
69 | }
70 |
71 | // AppModule is the AppModule
72 | type AppModule struct {
73 | AppModuleBasic
74 | keeper Keeper
75 | }
76 |
77 | // NewAppModule creates a new AppModule Object
78 | func NewAppModule(k Keeper) AppModule {
79 | return AppModule{
80 | AppModuleBasic: AppModuleBasic{},
81 | keeper: k,
82 | }
83 | }
84 |
85 | // Name is
86 | func (AppModule) Name() string {
87 | return ModuleName
88 | }
89 |
90 | // RegisterInvariants is
91 | func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {}
92 |
93 | // Route is
94 | func (am AppModule) Route() string {
95 | return RouterKey
96 | }
97 |
98 | // NewHandler makes a new handler
99 | func (am AppModule) NewHandler() sdk.Handler {
100 | return keeper.NewHandler(am.keeper)
101 | }
102 |
103 | // QuerierRoute is
104 | func (am AppModule) QuerierRoute() string {
105 | return ModuleName
106 | }
107 |
108 | // NewQuerierHandler is
109 | func (am AppModule) NewQuerierHandler() sdk.Querier {
110 | return NewQuerier(am.keeper)
111 | }
112 |
113 | // BeginBlock is
114 | func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {}
115 |
116 | // EndBlock is
117 | func (am AppModule) EndBlock(sdk.Context, abci.RequestEndBlock) []abci.ValidatorUpdate {
118 | return []abci.ValidatorUpdate{}
119 | }
120 |
121 | // InitGenesis is
122 | func (am AppModule) InitGenesis(ctx sdk.Context, data json.RawMessage) []abci.ValidatorUpdate {
123 | var genesisState GenesisState
124 | ModuleCdc.MustUnmarshalJSON(data, &genesisState)
125 | return InitGenesis(ctx, am.keeper, genesisState)
126 | }
127 |
128 | // ExportGenesis is
129 | func (am AppModule) ExportGenesis(ctx sdk.Context) json.RawMessage {
130 | gs := ExportGenesis(ctx, am.keeper)
131 | return ModuleCdc.MustMarshalJSON(gs)
132 | }
133 |
--------------------------------------------------------------------------------
/solidity/contracts/TestnetRelay.sol:
--------------------------------------------------------------------------------
1 | pragma solidity ^0.5.10;
2 |
3 | /** @title TestnetRelay */
4 | /** @author Summa (https://summa.one) */
5 |
6 | import {OnDemandSPV} from "./OnDemandSPV.sol";
7 | import {TypedMemView} from "@summa-tx/bitcoin-spv-sol/contracts/TypedMemView.sol";
8 |
9 | contract TestnetRelay is OnDemandSPV {
10 |
11 | constructor(
12 | bytes memory _genesisHeader,
13 | uint256 _height,
14 | bytes32 _periodStart,
15 | uint256 _firstID
16 | ) OnDemandSPV(
17 | _genesisHeader,
18 | _height,
19 | _periodStart,
20 | _firstID
21 | ) public {return ;}
22 |
23 | function _addHeadersWithRetarget(
24 | bytes memory, // _oldPeriodStartHeader,
25 | bytes memory _oldPeriodEndHeader,
26 | bytes memory _headers
27 | ) internal returns (bool) {
28 | bytes29 _oldEnd = _oldPeriodEndHeader.ref(0).tryAsHeader();
29 | bytes29 _headersView = _headers.ref(0).tryAsHeaderArray();
30 |
31 | require(
32 | _oldEnd.notNull() && _headersView.notNull(),
33 | "Bad args. Check header and array byte lengths."
34 | );
35 | return _addHeaders(_oldEnd, _headersView, true);
36 | }
37 |
38 | /// @notice Adds headers to storage after validating
39 | /// @dev We check integrity and consistency of the header chain
40 | /// @param _anchor The header immediately preceeding the new chain
41 | /// @param _headers A tightly-packed list of new 80-byte Bitcoin headers to record
42 | /// @return True if successfully written, error otherwise
43 | function _addHeaders(bytes29 _anchor, bytes29 _headers, bool _internal) internal returns (bool) {
44 | /// Extract basic info
45 | bytes32 _previousDigest = _anchor.hash256();
46 | uint256 _anchorHeight = _findHeight(_previousDigest); /* NB: errors if unknown */
47 | uint256 _target = _headers.indexHeaderArray(0).target();
48 |
49 | require(
50 | _internal || _anchor.target() == _target,
51 | "Unexpected retarget on external call"
52 | );
53 |
54 | /*
55 | NB:
56 | 1. check that the header has sufficient work
57 | 2. check that headers are in a coherent chain (no retargets, hash links good)
58 | 3. Store the block connection
59 | 4. Store the height
60 | */
61 | uint256 _height;
62 | bytes32 _currentDigest;
63 | for (uint256 i = 0; i < _headers.len() / 80; i += 1) {
64 | bytes29 _header = _headers.indexHeaderArray(i);
65 | _height = _anchorHeight.add(i + 1);
66 | _currentDigest = _header.hash256();
67 |
68 | /*
69 | NB:
70 | if the block is already authenticated, we don't need to a work check
71 | Or write anything to state. This saves gas
72 | */
73 | if (previousBlock[_currentDigest] == bytes32(0)) {
74 | require(
75 | TypedMemView.reverseUint256(uint256(_currentDigest)) <= _target,
76 | "Header work is insufficient"
77 | );
78 | previousBlock[_currentDigest] = _previousDigest;
79 | if (_height % HEIGHT_INTERVAL == 0) {
80 | /*
81 | NB: We store the height only every 4th header to save gas
82 | */
83 | blockHeight[_currentDigest] = _height;
84 | }
85 | }
86 |
87 | /* NB: we do still need to make chain level checks tho */
88 | require(_header.target() == _target, "Target changed unexpectedly");
89 | require(_header.checkParent(_previousDigest), "Headers do not form a consistent chain");
90 |
91 | _previousDigest = _currentDigest;
92 | }
93 |
94 | emit Extension(
95 | _anchor.hash256(),
96 | _currentDigest);
97 | return true;
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/validator_test.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | sdk "github.com/cosmos/cosmos-sdk/types"
5 | "github.com/summa-tx/relays/golang/x/relay/types"
6 | )
7 |
8 | func (s *KeeperSuite) TestGetConfs() {
9 | header := s.Fixtures.ValidatorTestCases.ValidateProof[0].Proof.ConfirmingHeader
10 | bestKnown := s.Fixtures.ValidatorTestCases.ValidateProof[0].BestKnown
11 |
12 | // errors if Best Known Digest is not found
13 | confs, err := s.Keeper.getConfs(s.Context, header)
14 | s.Equal(sdk.CodeType(types.BadHash256Digest), err.Code())
15 | s.Equal(uint32(0), confs)
16 |
17 | // errors if Best Known Digest header is not found
18 | s.Keeper.setBestKnownDigest(s.Context, bestKnown.Hash)
19 |
20 | confs, err = s.Keeper.getConfs(s.Context, header)
21 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
22 | s.Equal(uint32(0), confs)
23 |
24 | // success
25 | s.Keeper.ingestHeader(s.Context, bestKnown)
26 |
27 | confs, err = s.Keeper.getConfs(s.Context, header)
28 | s.SDKNil(err)
29 | s.Equal(uint32(4), confs)
30 | }
31 |
32 | func (s *KeeperSuite) TestValidateProof() {
33 | proofCases := s.Fixtures.ValidatorTestCases.ValidateProof
34 | proof := proofCases[0].Proof
35 |
36 | // errors if LCA is not found
37 | err := s.Keeper.validateProof(s.Context, proof)
38 | s.Equal(sdk.CodeType(types.BadHash256Digest), err.Code())
39 |
40 | // errors if link is not found
41 | s.Keeper.setLastReorgLCA(s.Context, proofCases[0].LCA)
42 |
43 | err = s.Keeper.validateProof(s.Context, proof)
44 | s.Equal(sdk.CodeType(types.NotAncestor), err.Code())
45 |
46 | for i := range proofCases {
47 | // Store lots of stuff
48 | s.Keeper.setLastReorgLCA(s.Context, proofCases[i].LCA)
49 | s.Keeper.ingestHeader(s.Context, proofCases[i].Proof.ConfirmingHeader)
50 | s.Keeper.setLink(s.Context, proofCases[i].Proof.ConfirmingHeader)
51 |
52 | if proofCases[i].Error != 0 {
53 | err := s.Keeper.validateProof(s.Context, proofCases[i].Proof)
54 | s.Equal(sdk.CodeType(proofCases[i].Error), err.Code())
55 | } else {
56 | err := s.Keeper.validateProof(s.Context, proofCases[i].Proof)
57 | s.Nil(err)
58 | }
59 | }
60 | }
61 |
62 | func (s *KeeperSuite) TestCheckRequestsFilled() {
63 | tc := s.Fixtures.ValidatorTestCases.CheckRequestsFilled
64 | validProof := s.Fixtures.ValidatorTestCases.ValidateProof[0]
65 |
66 | s.Keeper.setLastReorgLCA(s.Context, validProof.LCA)
67 | s.Keeper.ingestHeader(s.Context, validProof.Proof.ConfirmingHeader)
68 | s.Keeper.setLink(s.Context, validProof.Proof.ConfirmingHeader)
69 | s.Keeper.ingestHeader(s.Context, validProof.BestKnown)
70 | requestErr := s.Keeper.setRequest(s.Context, []byte{}, []byte{}, 0, 4, types.Local, nil)
71 | s.Nil(requestErr)
72 |
73 | // errors if getConfs fails
74 | _, err := s.Keeper.checkRequestsFilled(s.Context, tc[0].FilledRequests)
75 | s.Equal(sdk.CodeType(types.BadHash256Digest), err.Code())
76 |
77 | s.Keeper.setBestKnownDigest(s.Context, validProof.BestKnown.Hash)
78 |
79 | // errors if checkRequest errors
80 | // deactivate request
81 | activeErr := s.Keeper.setRequestState(s.Context, types.RequestID{}, false)
82 | s.SDKNil(activeErr)
83 |
84 | _, err = s.Keeper.checkRequestsFilled(s.Context, tc[0].FilledRequests)
85 | s.Equal(sdk.CodeType(types.ClosedRequest), err.Code())
86 |
87 | // reactivate request
88 | activeErr = s.Keeper.setRequestState(s.Context, types.RequestID{}, true)
89 | s.SDKNil(activeErr)
90 |
91 | for i := range tc {
92 | _, err := s.Keeper.checkRequestsFilled(s.Context, tc[i].FilledRequests)
93 | if tc[i].Error != 0 {
94 | s.Equal(sdk.CodeType(tc[i].Error), err.Code())
95 | } else {
96 | s.SDKNil(err)
97 | }
98 | }
99 |
100 | // errors if number of confirmations is less than the number of confirmations on the request
101 | requestErr = s.Keeper.setRequest(s.Context, []byte{0}, []byte{0}, 0, 5, types.Local, nil)
102 | s.Nil(requestErr)
103 |
104 | copiedRequest := tc[0].FilledRequests
105 | copiedRequest.Filled[0].ID = types.RequestID{0, 0, 0, 0, 0, 0, 0, 1}
106 | _, err = s.Keeper.checkRequestsFilled(s.Context, copiedRequest)
107 | s.Equal(sdk.CodeType(types.NotEnoughConfs), err.Code())
108 | }
109 |
--------------------------------------------------------------------------------
/solidity/contracts/test/DummyOnDemandSPV.sol:
--------------------------------------------------------------------------------
1 | pragma solidity ^0.5.10;
2 |
3 | /** @title OnDemandSPV */
4 | /** @author Summa (https://summa.one) */
5 |
6 | import {ISPVConsumer} from "../Interfaces.sol";
7 | import {OnDemandSPV} from "../OnDemandSPV.sol";
8 |
9 | contract DummyConsumer is ISPVConsumer {
10 | event Consumed(bytes32 indexed _txid, uint256 indexed _requestID, uint256 _gasLeft);
11 |
12 | bool broken = false;
13 |
14 | function setBroken(bool _b) external {
15 | broken = _b;
16 | }
17 |
18 | function spv(
19 | bytes32 _txid,
20 | bytes calldata,
21 | bytes calldata,
22 | uint256 _requestID,
23 | uint8,
24 | uint8
25 | ) external {
26 | emit Consumed(_txid, _requestID, gasleft());
27 | if (broken) {
28 | revert("BORKED");
29 | }
30 | }
31 |
32 | function cancel(
33 | uint256 _requestID,
34 | address payable _odspv
35 | ) external returns (bool) {
36 | return OnDemandSPV(_odspv).cancelRequest(_requestID);
37 | }
38 | }
39 |
40 | contract DummyOnDemandSPV is OnDemandSPV {
41 |
42 | constructor(
43 | bytes memory _genesisHeader,
44 | uint256 _height,
45 | bytes32 _periodStart,
46 | uint256 _firstID
47 | ) OnDemandSPV(
48 | _genesisHeader,
49 | _height,
50 | _periodStart,
51 | _firstID
52 | ) public {return ;}
53 |
54 | bool callResult = false;
55 |
56 | function requestTest(
57 | uint256 _requestID,
58 | bytes calldata _spends,
59 | bytes calldata _pays,
60 | uint64 _paysValue,
61 | address _consumer,
62 | uint8 _numConfs,
63 | uint256 _notBefore
64 | ) external returns (uint256) {
65 | nextID = _requestID;
66 | return _request(_spends, _pays, _paysValue, _consumer, _numConfs, _notBefore);
67 | }
68 |
69 | function setCallResult(bool _r) external {
70 | callResult = _r;
71 | }
72 |
73 | function _isAncestor(bytes32, bytes32, uint256) internal view returns (bool) {
74 | return callResult;
75 | }
76 |
77 | function getValidatedTx(bytes32 _txid) public view returns (bool) {
78 | return validatedTxns[_txid];
79 | }
80 |
81 | function setValidatedTx(bytes32 _txid) public {
82 | validatedTxns[_txid] = true;
83 | }
84 |
85 | function unsetValidatedTx(bytes32 _txid) public {
86 | validatedTxns[_txid] = false;
87 | }
88 |
89 | function callCallback(
90 | bytes32 _txid,
91 | uint16 _reqIndices,
92 | bytes calldata _vin,
93 | bytes calldata _vout,
94 | uint256 _requestID
95 | ) external returns (bool) {
96 | return _callCallback(_txid, _reqIndices, _vin, _vout, _requestID);
97 | }
98 |
99 | function checkInclusion(
100 | bytes calldata _header,
101 | bytes calldata _proof,
102 | uint256 _index,
103 | bytes32 _txid,
104 | uint256 _requestID
105 | ) external view returns (bool) {
106 | return _checkInclusion(
107 | _header.ref(0).tryAsHeader().assertValid(),
108 | _proof.ref(0).tryAsMerkleArray().assertValid(),
109 | _index,
110 | _txid,
111 | _requestID
112 | );
113 | }
114 |
115 | function _getConfs(bytes32 _header) internal view returns (uint8){
116 | if (_header == bytes32(0)) {
117 | return OnDemandSPV._getConfs(lastReorgCommonAncestor);
118 | }
119 | return 8;
120 | }
121 |
122 | function getConfsTest() external view returns (uint8) {
123 | return _getConfs(bytes32(0));
124 | }
125 |
126 | function checkRequests(
127 | uint16 _requestIndices,
128 | bytes calldata _vin,
129 | bytes calldata _vout,
130 | uint256 _requestID
131 | ) external view returns (bool) {
132 | return _checkRequests(_requestIndices, _vin, _vout, _requestID);
133 | }
134 |
135 | function whatTimeIsItRightNowDotCom() external view returns (uint256) {
136 | return block.timestamp;
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/golang/cmd/relaycli/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "path"
6 |
7 | "github.com/cosmos/cosmos-sdk/client"
8 | "github.com/cosmos/cosmos-sdk/client/keys"
9 | "github.com/cosmos/cosmos-sdk/client/lcd"
10 | "github.com/cosmos/cosmos-sdk/client/rpc"
11 | sdk "github.com/cosmos/cosmos-sdk/types"
12 | "github.com/cosmos/cosmos-sdk/version"
13 | authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
14 | bankcmd "github.com/cosmos/cosmos-sdk/x/bank/client/cli"
15 | "github.com/spf13/cobra"
16 | "github.com/spf13/viper"
17 | app "github.com/summa-tx/relays/golang"
18 | amino "github.com/tendermint/go-amino"
19 | "github.com/tendermint/tendermint/libs/cli"
20 | )
21 |
22 | func main() {
23 | cobra.EnableCommandSorting = false
24 |
25 | cdc := app.MakeCodec()
26 |
27 | // Read in the configuration file for the sdk
28 | config := sdk.GetConfig()
29 | config.SetBech32PrefixForAccount(sdk.Bech32PrefixAccAddr, sdk.Bech32PrefixAccPub)
30 | config.SetBech32PrefixForValidator(sdk.Bech32PrefixValAddr, sdk.Bech32PrefixValPub)
31 | config.SetBech32PrefixForConsensusNode(sdk.Bech32PrefixConsAddr, sdk.Bech32PrefixConsPub)
32 | config.Seal()
33 |
34 | rootCmd := &cobra.Command{
35 | Use: "relaycli",
36 | Short: "relay Client",
37 | }
38 |
39 | // Add --chain-id to persistent flags and mark it required
40 | rootCmd.PersistentFlags().String(client.FlagChainID, "", "Chain ID of tendermint node")
41 | rootCmd.PersistentPreRunE = func(_ *cobra.Command, _ []string) error {
42 | return initConfig(rootCmd)
43 | }
44 |
45 | // Construct Root Command
46 | rootCmd.AddCommand(
47 | rpc.StatusCommand(),
48 | client.ConfigCmd(app.DefaultCLIHome),
49 | queryCmd(cdc),
50 | txCmd(cdc),
51 | client.LineBreak,
52 | lcd.ServeCommand(cdc, registerRoutes),
53 | client.LineBreak,
54 | keys.Commands(),
55 | client.LineBreak,
56 | version.Cmd,
57 | client.NewCompletionCmd(rootCmd, true),
58 | )
59 |
60 | executor := cli.PrepareMainCmd(rootCmd, "NS", app.DefaultCLIHome)
61 | err := executor.Execute()
62 | if err != nil {
63 | panic(err)
64 | }
65 | }
66 |
67 | func registerRoutes(rs *lcd.RestServer) {
68 | client.RegisterRoutes(rs.CliCtx, rs.Mux)
69 | app.ModuleBasics.RegisterRESTRoutes(rs.CliCtx, rs.Mux)
70 | }
71 |
72 | func queryCmd(cdc *amino.Codec) *cobra.Command {
73 | queryCmd := &cobra.Command{
74 | Use: "query",
75 | Aliases: []string{"q"},
76 | Short: "Querying subcommands",
77 | }
78 |
79 | queryCmd.AddCommand(
80 | authcmd.GetAccountCmd(cdc),
81 | client.LineBreak,
82 | rpc.ValidatorCommand(cdc),
83 | rpc.BlockCommand(),
84 | authcmd.QueryTxsByEventsCmd(cdc),
85 | authcmd.QueryTxCmd(cdc),
86 | client.LineBreak,
87 | )
88 |
89 | // add modules' query commands
90 | app.ModuleBasics.AddQueryCommands(queryCmd, cdc)
91 |
92 | return queryCmd
93 | }
94 |
95 | func txCmd(cdc *amino.Codec) *cobra.Command {
96 | txCmd := &cobra.Command{
97 | Use: "tx",
98 | Short: "Transactions subcommands",
99 | }
100 |
101 | txCmd.AddCommand(
102 | bankcmd.SendTxCmd(cdc),
103 | client.LineBreak,
104 | authcmd.GetSignCommand(cdc),
105 | authcmd.GetMultiSignCommand(cdc),
106 | client.LineBreak,
107 | authcmd.GetBroadcastCommand(cdc),
108 | authcmd.GetEncodeCommand(cdc),
109 | client.LineBreak,
110 | )
111 |
112 | // add modules' tx commands
113 | app.ModuleBasics.AddTxCommands(txCmd, cdc)
114 |
115 | return txCmd
116 | }
117 |
118 | func initConfig(cmd *cobra.Command) error {
119 | home, err := cmd.PersistentFlags().GetString(cli.HomeFlag)
120 | if err != nil {
121 | return err
122 | }
123 |
124 | cfgFile := path.Join(home, "config", "config.toml")
125 | if _, err := os.Stat(cfgFile); err == nil {
126 | viper.SetConfigFile(cfgFile)
127 |
128 | if err := viper.ReadInConfig(); err != nil {
129 | return err
130 | }
131 | }
132 | if err := viper.BindPFlag(client.FlagChainID, cmd.PersistentFlags().Lookup(client.FlagChainID)); err != nil {
133 | return err
134 | }
135 | if err := viper.BindPFlag(cli.EncodingFlag, cmd.PersistentFlags().Lookup(cli.EncodingFlag)); err != nil {
136 | return err
137 | }
138 | return viper.BindPFlag(cli.OutputFlag, cmd.PersistentFlags().Lookup(cli.OutputFlag))
139 | }
140 |
--------------------------------------------------------------------------------
/golang/scripts/README.md:
--------------------------------------------------------------------------------
1 | # Build and Run App
2 |
3 | ## Setup
4 | If you have never used the `go mod` before, you must add some parameters to your environment.
5 |
6 | ```bash
7 | mkdir -p $HOME/go/bin
8 | echo "export GOBIN=\$GOPATH/bin" >> ~/.bash_profile
9 | echo "export PATH=\$PATH:\$GOBIN" >> ~/.bash_profile
10 | source ~/.bash_profile
11 | ```
12 |
13 | Now, you can install and run the application.
14 |
15 | ```bash
16 | # Clone repository
17 | git clone https://github.com/summa-tx/relays.git
18 | cd relays/golang
19 |
20 | # Install the app into your $GOBIN
21 | make install
22 |
23 | # Now you should be able to run the following commands:
24 | relayd help
25 | relaycli help
26 | ```
27 | ## Running the CLI
28 | To run the CLI for manual testing, you can run `make init` to initialize a new chain.
29 | All chain related data lives in `scripts/json_data`. Edit `scripts/json_data/genesis.json` to generate a customized genesis state. This JSON must be a list of block headers pertaining to one epoch. The first header must be the first block of the epoch. The remaining headers must be ordered headers beginning at any height in the epoch.
30 | ```bash
31 | # Set the executable rights if not done already
32 | chmod +x scripts/init_chain.sh
33 |
34 | # initialize chain with data from scripts/json_data/genesis.json
35 | make init
36 | ```
37 | Open up a new terminal tab in the same directory to begin interacting with the chain. As per the setup script, you can now interact via username/password `me / 12345678` such that when submitting transactions using flag `--from me` when prompted for the password enter: `12345678`
38 |
39 | ### Query CLI
40 | Querying neither requires the `--from` flag nor a password.
41 | ```bash
42 | # Retrieve the first digest of the relay
43 | relaycli query relay getrelaygenesis
44 |
45 | # Retrieve the best known digest
46 | relaycli query relay getlastreorglca
47 |
48 | # List other query options
49 | relaycli query relay
50 | ```
51 |
52 | ### Transact with CLI
53 | Transactions require the `--from` flag and password.
54 | JSON parameters can be accepted as either raw json or json files. including the `--inputfile` flag will interpret all json parameters as json files from directory `scripts/json_data`
55 | use the flag ` --broadcast-mode block` to get errors synchronously upon transactions. Otherwise errors could get swallowed resulting in false positive success
56 | Here are some transactions and queries you can run upon initializing the chain with the default genesis state:
57 |
58 | ```bash
59 | # Add the following bitcoin headers which also correspond with a difficulty change in the bitcoin change
60 | relaycli tx relay ingestdiffchange ef8248820b277b542ac2a726ccd293e8f2a3ea24c1fe04000000000000000000 0_new_difficulty.json --inputfile --from me --broadcast-mode block
61 |
62 | # Submit Proof Request
63 | relaycli tx relay newrequest 0x 0x17a91423737cd98bb6b2da5a11bcd82e5de36591d69f9f87 0 1 --broadcast-mode block --from me
64 |
65 | # Check whether given proof is valid: It will not because block with transaction has not been ingested yet
66 | relaycli query relay checkproof 1_check_proof.json --inputfile
67 |
68 | # Ingest new headers to relay (without any change in difficulty)
69 | relaycli tx relay ingestheaders 2_ingest_headers.json --from me --inputfile --broadcast-mode block
70 |
71 | # Check whether given proof is valid: It will will be valid with new headers from previous tx
72 | relaycli query relay checkproof 1_check_proof.json --inputfile
73 |
74 | # Provide valid proof that fulfils a proof request
75 | relaycli tx relay provideproof 1_check_proof.json 3_filled_requests.json --from me --inputfile --broadcast-mode block
76 |
77 | # Ingest remaining headers to relay (without any change in difficulty)
78 | relaycli tx relay ingestheaders 4_ingest_headers.json --from me --inputfile --broadcast-mode block
79 |
80 | # Mark new heaviest to update the best known digest
81 | relaycli tx relay marknewheaviest 0x4c2078d0388e3844fe6241723e9543074bd3a974c16611000000000000000000 0x0000c020954ea1d980abc34fd5c260205e025a405f59cdf510960c000000000000000000ad864d04a6ca14e597da45c4936dd3a07946e7d72aab72a3ed7444f0f6da618dd150425eff3212173f0c982d 0x0000c020bc00d40ffb1b0e8850475b0ff71d990080bb0e8203d1090000000000000000008a317b377cc53010ed4c741bd6bcea5fe6748665a6a9374510ff77e5cdfac7e3b971425ed41a12174334a315 0 --broadcast-mode block --from me
82 |
83 | ```
84 |
--------------------------------------------------------------------------------
/golang/dashboard/src/components/Relay-Info/Info-Main.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
9 |
10 | Cosmos Relay
11 |
12 |
13 |
14 |
18 |
{{ blockDifference }}
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
30 |
31 |
36 |
37 |
38 |
39 |
40 |
41 |
45 |
46 |
51 |
52 |
53 |
54 |
55 |
56 |
60 |
61 | Bitcoin Explorer {{ source }}
62 |
63 |
64 |
65 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
81 |
82 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
124 |
125 |
141 |
--------------------------------------------------------------------------------
/golang/dashboard/src/store/relay.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios'
2 | import * as types from '@/store/mutation-types'
3 | import {
4 | reverseEndianness,
5 | convertUnixTimestamp,
6 | lStorage
7 | } from '@/utils/utils'
8 | const relayURL = '/relay'
9 |
10 | const state = {
11 | connected: true,
12 |
13 | lastComms: lStorage.get('lastCommsRelay') || undefined,
14 |
15 | // Best Known Digest
16 | bkd: lStorage.get('bkd') || {
17 | height: 0, // Number - height of the BKD
18 | hash: '', // String - BKD hash
19 | time: undefined, // Date - BKD timestamp, from external
20 | updatedAt: undefined // Date - When was the BKD last updated
21 | },
22 |
23 | // last (reorg) common ancestor
24 | lca: lStorage.get('lca') || {
25 | height: 0, // Number - height of the LCA
26 | hash: '', // String - LCA hash
27 | time: undefined, // Date - LCA timestamp, from external
28 | updatedAt: undefined // Date - When was the LCA last updated
29 | }
30 | }
31 |
32 | const mutations = {
33 | [types.SET_CONNECTED] (state, connected) {
34 | state.connected = connected
35 | },
36 |
37 | [types.SET_LAST_COMMS_RELAY] (state, { date }) {
38 | state.lastComms = date
39 | lStorage.set('lastCommsRelay', state.lastComms)
40 | },
41 |
42 | // NB: BKD = best known digest
43 | [types.SET_BKD] (state, payload) {
44 | for (let key in payload) {
45 | state.bkd[key] = payload[key]
46 | }
47 | lStorage.set('bkd', state.bkd)
48 | },
49 |
50 | // NB: LCA = last (reorg) common ancestor
51 | [types.SET_LCA] (state, payload) {
52 | for (let key in payload) {
53 | state.lca[key] = payload[key]
54 | }
55 | lStorage.set('lca', state.lca)
56 | }
57 | }
58 |
59 | const actions = {
60 | getBKD ({ commit, dispatch }) {
61 | axios.get(`${relayURL}/getbestdigest`)
62 | .then((res) => {
63 | commit(types.SET_CONNECTED, true)
64 |
65 | const hashBE = reverseEndianness(res.data.result.result)
66 | console.log('get BKD: ', hashBE)
67 |
68 | dispatch('setBKD', { hash: hashBE })
69 | dispatch('verifyHash', { hash: hashBE, type: 'BKD' })
70 | })
71 | .catch((e) => {
72 | console.error('relay/getBKD:\n', e)
73 | if (
74 | e.message === 'Request failed with status code 500' ||
75 | e.message === 'Network Error'
76 | ) {
77 | commit(types.SET_CONNECTED, false)
78 | }
79 | })
80 | },
81 |
82 | getLCA ({ commit, dispatch }) {
83 | axios.get(`${relayURL}/getlastreorglca`)
84 | .then((res) => {
85 | commit(types.SET_CONNECTED, true)
86 |
87 | const hashBE = reverseEndianness(res.data.result.result)
88 | console.log('get LCA: ', hashBE)
89 |
90 | dispatch('setLCA', { hash: hashBE })
91 | dispatch('verifyHash', { hash: hashBE, type: 'LCA'})
92 | })
93 | .catch((e) => {
94 | console.error('relay/getLCA:\n', e)
95 | if (
96 | e.message === 'Request failed with status code 500' ||
97 | e.message === 'Network Error'
98 | ) {
99 | commit(types.SET_CONNECTED, false)
100 | }
101 | })
102 | },
103 |
104 | verifyHash ({ rootState, dispatch, commit }, data) {
105 | // data.hash, data.type = 'BKD', 'LCA'
106 | console.log({ data })
107 | axios.get(`${rootState.blockchainURL}/block/${data.hash}`)
108 | .then((block) => {
109 | console.log('block', block)
110 | dispatch(
111 | `set${data.type}`,
112 | {
113 | height: block.data.height,
114 | time: convertUnixTimestamp(block.data.timestamp),
115 | updatedAt: new Date()
116 | }
117 | )
118 | commit(types.SET_LAST_COMMS_RELAY, { date: new Date() })
119 | }).catch((e) => {
120 | console.error('relay/verifyHash:\n', e)
121 | })
122 | },
123 |
124 | // payload: { key: '', data: '' }
125 | setBKD ({ commit }, payload) {
126 | commit(types.SET_BKD, payload)
127 | commit(types.SET_LAST_COMMS_RELAY, { date: new Date() })
128 | },
129 |
130 | // payload: { key: '', data: '' }
131 | setLCA ({ commit }, payload) {
132 | commit(types.SET_LCA, payload)
133 | commit(types.SET_LAST_COMMS_RELAY, { date: new Date() })
134 | },
135 | }
136 |
137 | export default {
138 | namespaced: true,
139 | state,
140 | mutations,
141 | actions
142 | }
143 |
--------------------------------------------------------------------------------
/golang/dashboard/src/utils/utils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * How many minutes has passed from a past point until now?
3 | * Expected usage: current block verified at, last comms relay, last comms
4 | * external
5 | * @param {Date} date Starting date
6 | * @returns {Number} Minutes that have passed from starting date until now
7 | *
8 | */
9 | export function getMinsAgo (date) {
10 | if (!date) {
11 | return undefined
12 | }
13 | const from = timeInSecs(date)
14 | const now = timeInSecs(new Date())
15 | return Math.round((now - from) / 60)
16 | }
17 |
18 | /**
19 | * Gets time in seconds from a date object
20 | * @param {Date} date if no date is passed in, current date is used
21 | * @returns {Number} Returns time in seconds
22 | */
23 | export function timeInSecs (date) {
24 | const d = new Date(date) || new Date()
25 | return d.getTime() / 1000
26 | }
27 |
28 | /**
29 | * Convenience class to verify localStorage exists
30 | * TODO: Consider adding polyfill...
31 | *
32 | */
33 | class LStorage {
34 | _verify () {
35 | if (window && window.localStorage) {
36 | return true
37 | }
38 | return false
39 | }
40 |
41 | /**
42 | * Sets item to localStorage
43 | * @param {String} item - name of item to set
44 | * @param {Any} value - value of item to set
45 | */
46 | set (item, value) {
47 | if (this._verify()) {
48 | window.localStorage.setItem(item, JSON.stringify(value))
49 | } else {
50 | console.error('Error saving value to localStorage')
51 | }
52 | }
53 |
54 | /**
55 | * Gets items from localStorage
56 | * @param {String} item - name of item to retrieve
57 | * @returns {String}
58 | */
59 | get (item) {
60 | if (this._verify()) {
61 | const i = window.localStorage.getItem(item)
62 |
63 | let value
64 | try {
65 | value = JSON.parse(i)
66 | } catch (e) {
67 | console.log('storage error', e)
68 | }
69 | return value
70 | } else {
71 | console.error('Error getting value from localStorage')
72 | }
73 | }
74 |
75 | /**
76 | * Removes an item from localStorage
77 | * @param {String} item - name of item to remove
78 | */
79 | remove (item) {
80 | if (this._verify()) {
81 | window.localStorage.removeItem(item)
82 | } else {
83 | console.error('Error removing value from localStorage')
84 | }
85 | }
86 | }
87 |
88 | export const lStorage = new LStorage()
89 |
90 | const assert = require('bsert')
91 |
92 | /**
93 | * Checks if value is of type string
94 | * @param {String} str - string value to check
95 | * @returns {Boolean} true if value is string, false if not
96 | */
97 | export function isString (str) {
98 | const isStr = typeof str === 'string'
99 | assert(isStr, `Must pass in string, received ${typeof str}`)
100 | }
101 |
102 | /**
103 | * Checks if string is hex
104 | * @param {String} str - string value to check
105 | * @returns {Boolean} true if string is hex, false if not
106 | */
107 | export function isHex (str) {
108 | isString(str)
109 |
110 | let hexStr = remove0x(str)
111 |
112 | assert(hexStr && /^[0-9a-fA-F]+$/.test(hexStr), 'Must pass in hex string')
113 | }
114 |
115 | /**
116 | * If a hex string is '0x' prepended, it removes it
117 | * @param {String} str - hex string
118 | * @returns {String} hex string without '0x'
119 | */
120 | export function remove0x (str) {
121 | isString(str)
122 |
123 | if (str.slice(0, 2) === '0x') {
124 | return str.slice(2, str.length)
125 | }
126 | return str
127 | }
128 |
129 | /**
130 | * If a hex string is not already '0x' prepended, it adds it
131 | * @param {String} str - hex string
132 | * @returns {String} hex string beginning with '0x'
133 | */
134 | export function add0x (str) {
135 | isString(str)
136 |
137 | if (str.slice(0, 2) === '0x') {
138 | return str
139 | }
140 | return `0x${str}`
141 | }
142 |
143 | /**
144 | * Reverses Endianness of a hex bytes string
145 | * @param {String} str - hex string
146 | * @returns {String} hex string with reverse endianness
147 | */
148 | export function reverseEndianness (str) {
149 | var formatStr = remove0x(str)
150 | return formatStr.match(/../g).reverse().join('')
151 | }
152 |
153 | /**
154 | * Converts a Unix timestamp
155 | * BlockStream returns Unix timestamps that must be converted
156 | * @param {Number} time - hex string
157 | * @returns {Date} time as a JavaScript Date object
158 | */
159 | export function convertUnixTimestamp (time) {
160 | return new Date(time * 1000)
161 | }
162 |
--------------------------------------------------------------------------------
/golang/EXTENDING.md:
--------------------------------------------------------------------------------
1 | ## Adding new functionality
2 |
3 | This is a cosmos-sdk module. It can be extended with new messages and/or
4 | queries. Generally, this module is feature-complete, and should not be
5 | extended. The main exception is the WIP hooks system on proof validation. All
6 | other functionality should likely be put into a separate module.
7 |
8 | ### Integrating with other modules
9 |
10 | The relay keeper keeps a reference to an object that implements the following
11 | interface (found in `x/types/types.go`).
12 |
13 | ```go
14 | type ProofHandler interface {
15 | HandleValidProof(ctx sdk.Context, filled FilledRequests, requests []ProofRequest)
16 | }
17 | ```
18 |
19 | The `FilledRequests` struct contains an `SPVProof` and supporting information
20 | about the transaction that fulfills the request.
21 | It can be found in `x/types/validator.go`. `requests []ProofRequest` is a slice
22 | of `ProofRequest`s that have been filled.
23 |
24 | When the keeper validates a proof, it will call the `HandleValidProof` function
25 | with the valid `FilledRequests` struct and the `ProofRequests` that have been
26 | filled.
27 |
28 | First, instantiate a `handler` that fulfills the `ProofHandler` interface. Then
29 | add an instance of `relay.Keeper` to your app in `app.go`. It can be
30 | instantiated as follows:
31 |
32 | ```go
33 | handler = types.NewNullHandler() // or your preferred handler
34 |
35 | app.relayKeeper = relay.NewKeeper(
36 | keys[relay.StoreKey],
37 | app.cdc,
38 | true,
39 | handler
40 | )
41 | ```
42 |
43 | After that, the relay can be accessed via the Keeper's public interface.
44 |
45 | ### Extending this module
46 |
47 | In order to extend this module, follow these steps:
48 |
49 | ## How to add a view function (queries)
50 | 1. Add necessary getter(s) in `x/relay/keeper/keeper.go`
51 | 1. Add response type to `x/relay/types/querier.go`
52 | 1. Add new string tag for the new query
53 | 1. Response type is a struct with the return values
54 | 1. Implement `String()` for the response type
55 | 1. Add function to querier `x/relay/keeper/querier.go`
56 | 1. Add new `query___` function
57 | 1. Add new case block to `switch` in `NewQuerier()`
58 | 1. Add to CLI
59 | 1. add to `x/relay/client/cli/query.go`
60 | 1. `func GetCmd______`
61 | 1. returns a `cobra.Command` object
62 | 1. define `Use` `Example` `Short` `Long` `Args` and `RunE`
63 | 1. `RunE` parses args, returns errors, and calls `cliCtx.QueryWithData`
64 | 1. parses the output and returns it with `cliCtx.PrintOutput`
65 | 1. Add to REST
66 | 1. add to `x/relay/client/rest/query.go`
67 | 1. new function `_____Handler`
68 | 1. parse args and build structs
69 | 1. cliCtx.QueryWithData
70 | 1. return errors with `rest.WriteErrorResponse`
71 | 1. return query result with `rest.PostProcessResponse`
72 | 1. add GET route to `x/relay/client/rest/rest.go`
73 | 1. new `s.HandleFunc` with the route and arguments
74 | 1. `.Methods("GET")`
75 | 1. duplicate for optional args (see `isancestor` for example)
76 |
77 |
78 | ## How to add a non-view function (messages)
79 | 1. Add necessary getters/setters in `x/relay/keeper/keeper.go`
80 | 1. Add msg type in `x/relay/types/msgs.go`
81 | 1. Message type is a struct with the arguments
82 | 1. Implement `New___()`
83 | 1. Implement `GetSigners()` <--- Ask me about this later
84 | 1. Implement `Type()`
85 | 1. Implement `ValidateBasic()`
86 | 1. Implement `GetSignBytes()`
87 | 1. Implement `Route()`
88 | 1. Add to handler
89 | 1. Add new `handle____` function
90 | 1. Add new case block to `switch` in `NewHandler()`
91 | 1. Add aliases in `x/relay/alias.go`
92 | 1. Add alias in `var` block
93 | 1. Add alias in `type` block
94 | 1. Add to CLI
95 | 1. add to `x/relay/client/cli/tx.go`
96 | 1. `func GetCmd______`
97 | 1. returns a `cobra.Command` object
98 | 1. define `Use` `Example` `Short` `Long` `Args` and `RunE`
99 | 1. `RunE` parses args, returns errors, and calls `utils.GenerateOrBroadcastMsgs`
100 | 1. Add to REST
101 | 1. add to `x/relay/client/rest/tx.go`
102 | 1. new http request struct `______Req`
103 | 1. `BaseReq` + the struct from `x/relay/types/msgs.go`
104 | 1. new function `_____Handler`
105 | 1. parse args and build structs
106 | 1. return errors with `rest.WriteErrorResponse`
107 | 1. make the tx with `utils.WriteGenerateStdTxResponse`
108 | 1. add POST route to `x/relay/client/rest/rest.go`
109 | 1. new `s.HandleFunc` with the route and arguments
110 | 1. `.Methods("POST")`
111 |
--------------------------------------------------------------------------------
/maintainer/maintainer/base.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import signal
3 | import asyncio
4 | import logging
5 | from pathlib import Path
6 | from functools import partial
7 | from dotenv import load_dotenv
8 |
9 | from maintainer import config
10 |
11 | from typing import Awaitable, Callable
12 | from asyncio.events import AbstractEventLoop
13 |
14 | AsyncFunction = Callable[[], Awaitable[None]]
15 |
16 |
17 | def registerFileHandler(name: str, logger: logging.Logger) -> None:
18 | if sys.platform.startswith('win'):
19 | raise NotImplementedError('Windows not supported') # pragma: nocover
20 | logDir = Path.home() / '.summa' / 'relays'
21 | logDir.mkdir(parents=True, exist_ok=True)
22 |
23 | logPath = logDir / name
24 |
25 | formatter = logging.Formatter(
26 | fmt='%(asctime)6s %(name)s: %(levelname)s %(message)s',
27 | datefmt='%Y-%m-%d %H:%M:%S')
28 | fh = logging.FileHandler(logPath)
29 | fh.setFormatter(formatter)
30 | fh.setLevel(logging.DEBUG)
31 | logger.addHandler(fh)
32 |
33 |
34 | def get_env_name(default: str) -> str:
35 | '''Checks for a argv-passed env name, formats the default otherwise'''
36 | if len(sys.argv) > 1:
37 | return sys.argv[1]
38 | return f'.{default}.env'
39 |
40 |
41 | def set_config(env_name: str) -> None:
42 | '''Load dotfiles and set the config object'''
43 | # Load config from .env file(s)
44 | # Load a base .env first, then override with an app-specified version
45 | path = Path(__file__).parent / 'config'
46 | base_env = path / '.env'
47 | load_dotenv(base_env, override=True)
48 | load_dotenv(path / env_name, override=True)
49 | config.set()
50 |
51 |
52 | def main(
53 | run: AsyncFunction,
54 | teardown: AsyncFunction,
55 | name: str,
56 | logger: logging.Logger) -> None:
57 | '''Template for small, headless, async applications'''
58 | logger.info(f'Setting config {name}')
59 | env_name = get_env_name(default=name)
60 | set_config(env_name=env_name)
61 | registerFileHandler(name=env_name, logger=logger)
62 | logger.info('Starting relay')
63 |
64 | loop = asyncio.get_event_loop()
65 |
66 | # set up graceful exit
67 | signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
68 | for s in signals:
69 | loop.add_signal_handler(
70 | s, lambda s=s: asyncio.create_task(
71 | shutdown(loop, logger, teardown, signal=s)))
72 |
73 | handler = partial(handle_exception, logger=logger, teardown=teardown)
74 | loop.set_exception_handler(handler)
75 |
76 | asyncio.ensure_future(run())
77 | loop.run_forever()
78 |
79 |
80 | async def shutdown( # type: ignore[no-untyped-def]
81 | loop: AbstractEventLoop,
82 | logger: logging.Logger,
83 | teardown: AsyncFunction,
84 | signal=None # a named enum of ints
85 | ) -> None:
86 | '''Cancel active tasks for shutdown'''
87 | if signal:
88 | logger.info(f'Received exit signal {signal.name}')
89 | else:
90 | logger.info('Unexpeced shutdown initiated')
91 | await asyncio.sleep(5) # stall error loops
92 |
93 | if teardown:
94 | try:
95 | await teardown()
96 | except Exception:
97 | logger.exception('Error during teardown function')
98 | logger.error('Exiting uncleanly')
99 | sys.exit(1)
100 |
101 | tasks = [t for t in asyncio.Task.all_tasks() if t is not
102 | asyncio.current_task()]
103 |
104 | logger.info(f'Cancelling {len(tasks)} tasks')
105 | [task.cancel() for task in tasks]
106 |
107 | try:
108 | await asyncio.gather(*tasks, return_exceptions=True)
109 | except Exception:
110 | logger.exception('Error during loop task cancellation')
111 | logger.error('Exiting uncleanly')
112 | sys.exit(1)
113 |
114 | loop.stop()
115 |
116 |
117 | def handle_exception( # type: ignore[no-untyped-def]
118 | loop: AbstractEventLoop,
119 | context, # don't worry about it
120 | logger: logging.Logger,
121 | teardown: AsyncFunction):
122 | '''Global exception handler. Gets all unhandled exceptions from tasks'''
123 | # context['message'] will always be there; but context['exception'] may not
124 | if 'exception' in context:
125 | # reraise so we can log it
126 | try:
127 | raise context['exception']
128 | except Exception:
129 | logger.exception('Caught exception')
130 | else:
131 | logger.error(f'Caught exception: {context["message"]}')
132 |
133 | logger.info('Shutting down')
134 | asyncio.create_task(shutdown(loop, logger, teardown))
135 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/headers_test.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | sdk "github.com/cosmos/cosmos-sdk/types"
5 | "github.com/summa-tx/relays/golang/x/relay/types"
6 | )
7 |
8 | func (s *KeeperSuite) TestGetHeader() {
9 | // errors if header is not found
10 | header := s.Fixtures.HeaderTestCases.ValidateChain[0].Headers[0]
11 | _, err := s.Keeper.GetHeader(s.Context, header.Hash)
12 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
13 | }
14 |
15 | func (s *KeeperSuite) TestEmitExtension() {
16 | // tests extension was emitted successfully
17 | headers := s.Fixtures.HeaderTestCases.ValidateChain[0].Headers
18 | s.Keeper.emitExtension(s.Context, headers[0], headers[1])
19 |
20 | events := s.Context.EventManager().Events()
21 | e := events[0]
22 | s.Equal("extension", e.Type)
23 | }
24 |
25 | func (s *KeeperSuite) TestValidateHeaderChain() {
26 | cases := s.Fixtures.HeaderTestCases.ValidateChain
27 |
28 | for _, tc := range cases {
29 | err := validateHeaderChain(tc.Anchor, tc.Headers, tc.Internal, tc.IsMainnet)
30 | if tc.Output == 0 {
31 | logIfTestCaseError(tc, err)
32 | s.SDKNil(err)
33 | } else {
34 | s.NotNil(err)
35 | s.Equal(tc.Output, err.Code())
36 | }
37 | }
38 | }
39 |
40 | func (s *KeeperSuite) TestIngestHeaders() {
41 | cases := s.Fixtures.HeaderTestCases.ValidateChain
42 |
43 | // errors if anchor is not found
44 | err := s.Keeper.ingestHeaders(s.Context, cases[0].Headers, cases[0].Internal)
45 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
46 |
47 | for _, tc := range cases {
48 | s.InitTestContext(tc.IsMainnet, false)
49 | s.Keeper.ingestHeader(s.Context, tc.Anchor)
50 | err := s.Keeper.ingestHeaders(s.Context, tc.Headers, tc.Internal)
51 | if tc.Output == 0 {
52 | logIfTestCaseError(tc, err)
53 | s.SDKNil(err)
54 | } else {
55 | s.NotNil(err)
56 | s.Equal(tc.Output, err.Code())
57 | }
58 | }
59 | }
60 |
61 | func (s *KeeperSuite) TestIngestHeaderChain() {
62 | cases := s.Fixtures.HeaderTestCases.ValidateChain
63 |
64 | for _, tc := range cases {
65 | if tc.Internal == false {
66 | s.InitTestContext(tc.IsMainnet, false)
67 | s.Keeper.ingestHeader(s.Context, tc.Anchor)
68 | err := s.Keeper.IngestHeaderChain(s.Context, tc.Headers)
69 | if tc.Output == 0 {
70 | logIfTestCaseError(tc, err)
71 | s.SDKNil(err)
72 | } else {
73 | s.NotNil(err)
74 | s.Equal(tc.Output, err.Code())
75 | }
76 | }
77 | }
78 | }
79 |
80 | // TestIngestHeader tests ingestHeader, HasHeader, and GetHeader
81 | func (s *KeeperSuite) TestIngestHeader() {
82 | cases := s.Fixtures.HeaderTestCases.ValidateChain
83 |
84 | for _, tc := range cases {
85 | s.Keeper.ingestHeader(s.Context, tc.Headers[0])
86 | hasHeader := s.Keeper.HasHeader(s.Context, tc.Headers[0].Hash)
87 | s.Equal(true, hasHeader)
88 | header, err := s.Keeper.GetHeader(s.Context, tc.Headers[0].Hash)
89 | s.SDKNil(err)
90 | s.Equal(tc.Headers[0], header)
91 | }
92 | }
93 |
94 | func (s *KeeperSuite) TestValidateDifficultyChange() {
95 | cases := s.Fixtures.HeaderTestCases.ValidateDiffChange
96 |
97 | for _, tc := range cases {
98 | err := validateDifficultyChange(tc.Headers, tc.PrevEpochStart, tc.Anchor)
99 | if tc.Output == 0 {
100 | logIfTestCaseError(tc, err)
101 | s.SDKNil(err)
102 | } else {
103 | s.NotNil(err)
104 | s.Equal(tc.Output, err.Code())
105 | }
106 | }
107 | }
108 |
109 | func (s *KeeperSuite) TestIngestDifficultyChange() {
110 | cases := s.Fixtures.HeaderTestCases.ValidateDiffChange
111 |
112 | // errors if PrevEpochStart is not found
113 | err := s.Keeper.IngestDifficultyChange(s.Context, cases[0].PrevEpochStart.Hash, cases[0].Headers)
114 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
115 |
116 | // errors if anchor is not found
117 | s.Keeper.ingestHeader(s.Context, cases[0].PrevEpochStart)
118 | err = s.Keeper.IngestDifficultyChange(s.Context, cases[0].PrevEpochStart.Hash, cases[0].Headers)
119 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
120 |
121 | for _, tc := range cases {
122 | s.Keeper.ingestHeader(s.Context, tc.PrevEpochStart)
123 | s.Keeper.ingestHeader(s.Context, tc.Anchor)
124 | err := s.Keeper.IngestDifficultyChange(s.Context, tc.PrevEpochStart.Hash, tc.Headers)
125 | if tc.Output == 0 {
126 | logIfTestCaseError(tc, err)
127 | s.SDKNil(err)
128 | } else {
129 | s.NotNil(err)
130 | s.Equal(tc.Output, err.Code())
131 | }
132 | }
133 | }
134 |
135 | func (s *KeeperSuite) TestCompareTargets() {
136 | cases := s.Fixtures.HeaderTestCases.CompareTargets
137 |
138 | for _, tc := range cases {
139 | result := compareTargets(tc.Full, tc.Truncated)
140 | s.Equal(tc.Output, result)
141 | }
142 | }
143 |
144 | func (s *KeeperSuite) TestSetCurrentEpochDiff() {
145 | val := sdk.NewUint(1000)
146 | err := s.Keeper.setCurrentEpochDifficulty(s.Context, val)
147 | s.SDKNil(err)
148 |
149 | d := s.Keeper.getCurrentEpochDifficulty(s.Context)
150 |
151 | s.Equal(d, val)
152 | }
153 |
154 | func (s *KeeperSuite) TestSetPrevEpochDiff() {
155 | val := sdk.NewUint(1000)
156 | err := s.Keeper.setPrevEpochDifficulty(s.Context, val)
157 | s.SDKNil(err)
158 |
159 | d := s.Keeper.getPrevEpochDifficulty(s.Context)
160 |
161 | s.Equal(d, val)
162 | }
163 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/handler_test.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | "bytes"
5 |
6 | sdk "github.com/cosmos/cosmos-sdk/types"
7 | "github.com/summa-tx/relays/golang/x/relay/types"
8 | )
9 |
10 | func getAccAddress() sdk.AccAddress {
11 | address, _ := sdk.AccAddressFromBech32("cosmos1ay37rp2pc3kjarg7a322vu3sa8j9puah8msyfw")
12 | return address
13 | }
14 |
15 | // Create a bad sdk.msg to pass into TestNewHandler
16 | type MsgBadMessage struct {
17 | Signer sdk.AccAddress `json:"signer"`
18 | }
19 |
20 | func (msg MsgBadMessage) GetSigners() []sdk.AccAddress {
21 | return []sdk.AccAddress{msg.Signer}
22 | }
23 | func (msg MsgBadMessage) Type() string { return "bad_message" }
24 | func (msg MsgBadMessage) ValidateBasic() sdk.Error { return nil }
25 | func (msg MsgBadMessage) GetSignBytes() []byte {
26 | return sdk.MustSortJSON(types.ModuleCdc.MustMarshalJSON(msg))
27 | }
28 | func (msg MsgBadMessage) Route() string { return types.RouterKey }
29 |
30 | func (s *KeeperSuite) TestNewHandler() {
31 | handler := NewHandler(s.Keeper)
32 |
33 | badMsg := MsgBadMessage{
34 | Signer: getAccAddress(),
35 | }
36 |
37 | res := handler(s.Context, badMsg)
38 | s.Equal("{\"codespace\":\"sdk\",\"code\":6,\"message\":\"Unrecognized relay Msg type: bad_message\"}", res.Log)
39 | }
40 |
41 | func (s *KeeperSuite) TestHandleMsgIngestHeaderChain() {
42 | testCases := s.Fixtures.HeaderTestCases.ValidateChain
43 | handler := NewHandler(s.Keeper)
44 |
45 | newMsg := types.NewMsgIngestHeaderChain(getAccAddress(), testCases[0].Headers)
46 |
47 | res := handler(s.Context, newMsg)
48 | s.Equal(sdk.CodeType(types.UnknownBlock), res.Code)
49 |
50 | s.Keeper.ingestHeader(s.Context, testCases[0].Anchor)
51 | res = handler(s.Context, newMsg)
52 | s.Equal("extension", res.Events[0].Type)
53 | }
54 |
55 | func (s *KeeperSuite) TestHandleMsgIngestDifficultyChange() {
56 | testCases := s.Fixtures.HeaderTestCases.ValidateDiffChange
57 | handler := NewHandler(s.Keeper)
58 |
59 | newMsg := types.NewMsgIngestDifficultyChange(getAccAddress(), testCases[0].PrevEpochStart.Hash, testCases[0].Headers)
60 |
61 | res := handler(s.Context, newMsg)
62 | s.Equal(sdk.CodeType(types.UnknownBlock), res.Code)
63 |
64 | s.Keeper.ingestHeader(s.Context, testCases[0].PrevEpochStart)
65 | s.Keeper.ingestHeader(s.Context, testCases[0].Anchor)
66 | res = handler(s.Context, newMsg)
67 | s.Equal("extension", res.Events[0].Type)
68 | }
69 |
70 | func (s *KeeperSuite) TestHandleMsgMarkNewHeaviest() {
71 | testCases := s.Fixtures.HeaderTestCases.ValidateDiffChange
72 | handler := NewHandler(s.Keeper)
73 |
74 | s.Keeper.ingestHeader(s.Context, testCases[0].PrevEpochStart)
75 | s.Keeper.ingestHeader(s.Context, testCases[0].Anchor)
76 | newMsg := types.NewMsgIngestDifficultyChange(getAccAddress(), testCases[0].PrevEpochStart.Hash, testCases[0].Headers)
77 | res := handler(s.Context, newMsg)
78 | s.Equal("extension", res.Events[0].Type)
79 | }
80 |
81 | func (s *KeeperSuite) TestHandleMarkNewHeaviest() {
82 | tv := s.Fixtures.ChainTestCases.IsMostRecentCA
83 | pre := tv.PreRetargetChain
84 | post := tv.PostRetargetChain
85 | handler := NewHandler(s.Keeper)
86 |
87 | var postWithOrphan []types.BitcoinHeader
88 | postWithOrphan = append(postWithOrphan, post[:len(post)-2]...)
89 | postWithOrphan = append(postWithOrphan, tv.Orphan)
90 |
91 | err := s.Keeper.SetGenesisState(s.Context, tv.Genesis, tv.OldPeriodStart)
92 | s.SDKNil(err)
93 |
94 | err = s.Keeper.IngestHeaderChain(s.Context, pre)
95 | s.SDKNil(err)
96 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, post)
97 | s.SDKNil(err)
98 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, postWithOrphan)
99 | s.SDKNil(err)
100 |
101 | // returns correct error
102 | newMsg := types.NewMsgMarkNewHeaviest(getAccAddress(), tv.OldPeriodStart.Hash, tv.OldPeriodStart.Raw, tv.OldPeriodStart.Raw, 10)
103 | res := handler(s.Context, newMsg)
104 | s.Equal(sdk.CodeType(types.NotBestKnown), res.Code)
105 |
106 | // Successfully marks new heaviest
107 | newMsg = types.NewMsgMarkNewHeaviest(getAccAddress(), tv.Genesis.Hash, tv.Genesis.Raw, pre[0].Raw, 10)
108 | res = handler(s.Context, newMsg)
109 | s.Equal("extension", res.Events[0].Type)
110 | }
111 |
112 | func (s *KeeperSuite) TestHandleNewRequest() {
113 | handler := NewHandler(s.Keeper)
114 |
115 | // Success
116 | newRequest := types.NewMsgNewRequest(getAccAddress(), bytes.Repeat([]byte{0}, 36), []byte{0}, 0, 0, types.Local, nil)
117 | res := handler(s.Context, newRequest)
118 | hasRequest := s.Keeper.hasRequest(s.Context, types.RequestID{})
119 | s.Equal(true, hasRequest)
120 | s.Equal("proof_request", res.Events[0].Type)
121 |
122 | // Msg validation failed
123 | newRequest = types.NewMsgNewRequest(getAccAddress(), []byte{0}, []byte{0}, 0, 0, types.Local, nil)
124 | res = handler(s.Context, newRequest)
125 | s.Equal(sdk.CodeType(types.SpendsLength), res.Code)
126 |
127 | // setRequest error
128 | store := s.Keeper.getRequestStore(s.Context)
129 | store.Set([]byte(types.RequestIDTag), []byte("badID"))
130 |
131 | newRequest = types.NewMsgNewRequest(getAccAddress(), bytes.Repeat([]byte{0}, 36), []byte{0}, 0, 0, types.Local, nil)
132 | res = handler(s.Context, newRequest)
133 | s.Equal(sdk.CodeType(types.BadHexLen), res.Code)
134 | }
135 |
--------------------------------------------------------------------------------
/golang/scripts/json_data/2_ingest_headers.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "raw": "00e000200641238051855d1759da9b6603b156684a68a146d36a09000000000000000000f1f5d7f347aea286b805162817fca959edba51b5bf025a3b745d7da5683faba4be58425ed41a1217d632cdcc",
4 | "hash": "7f9923db1d3ad6a08054b4a80a5cd7478b57a9650eaf09000000000000000000",
5 | "height": 616898,
6 | "prevhash": "0641238051855d1759da9b6603b156684a68a146d36a09000000000000000000",
7 | "merkle_root": "f1f5d7f347aea286b805162817fca959edba51b5bf025a3b745d7da5683faba4"
8 | },
9 | {
10 | "raw": "0000ff3f7f9923db1d3ad6a08054b4a80a5cd7478b57a9650eaf090000000000000000000c2aeb41cdea44a03b437fcd294805092c294d613f7f7192e6ffc9c6ec9130cddf58425ed41a121783dd5b80",
11 | "hash": "396b3248c6b8dd11d0f02c6e688dce408dc4fe80b0c702000000000000000000",
12 | "height": 616899,
13 | "prevhash": "7f9923db1d3ad6a08054b4a80a5cd7478b57a9650eaf09000000000000000000",
14 | "merkle_root": "0c2aeb41cdea44a03b437fcd294805092c294d613f7f7192e6ffc9c6ec9130cd"
15 | },
16 | {
17 | "raw": "00e0ff3f396b3248c6b8dd11d0f02c6e688dce408dc4fe80b0c70200000000000000000033b53cacf908eb0763918b833b80936d88c3bfa6999c280450d03855208791691f5a425ed41a1217940afef1",
18 | "hash": "13249a1f8fbfe42e8e8ec1340f4fc01df17762b4dcb80c000000000000000000",
19 | "height": 616900,
20 | "prevhash": "396b3248c6b8dd11d0f02c6e688dce408dc4fe80b0c702000000000000000000",
21 | "merkle_root": "33b53cacf908eb0763918b833b80936d88c3bfa6999c280450d0385520879169"
22 | },
23 | {
24 | "raw": "00e0002013249a1f8fbfe42e8e8ec1340f4fc01df17762b4dcb80c000000000000000000a9a61a4be4289169e82e9ba5ac35b9680cb6f9cb4bea0ae5b1b2ab6cda7eaac6745b425ed41a1217ddcacc90",
25 | "hash": "8888ba6537f453b6ed11c01d1b9670e09271ccdcd65a02000000000000000000",
26 | "height": 616901,
27 | "prevhash": "13249a1f8fbfe42e8e8ec1340f4fc01df17762b4dcb80c000000000000000000",
28 | "merkle_root": "a9a61a4be4289169e82e9ba5ac35b9680cb6f9cb4bea0ae5b1b2ab6cda7eaac6"
29 | },
30 | {
31 | "raw": "00e0ff3f8888ba6537f453b6ed11c01d1b9670e09271ccdcd65a02000000000000000000b2988e5fd8ef3af6aa08e1724147bd371c572bf80d86ad78a2bb3350f675c3a27d62425ed41a1217d68e19fd",
32 | "hash": "ea87aa3b099679015eb8fd21268e9295a9fa0d568edb0c000000000000000000",
33 | "height": 616902,
34 | "prevhash": "8888ba6537f453b6ed11c01d1b9670e09271ccdcd65a02000000000000000000",
35 | "merkle_root": "b2988e5fd8ef3af6aa08e1724147bd371c572bf80d86ad78a2bb3350f675c3a2"
36 | },
37 | {
38 | "raw": "00008020ea87aa3b099679015eb8fd21268e9295a9fa0d568edb0c0000000000000000008ebb3bf274b7ab7f299279f3086dd59f5d45e4dc116db4d470c38dd2ff2b2c8f5b65425ed41a12174bd195f8",
39 | "hash": "4755b42cb9dcc5106d4ec4dcc930d1ec68816e1169830b000000000000000000",
40 | "height": 616903,
41 | "prevhash": "ea87aa3b099679015eb8fd21268e9295a9fa0d568edb0c000000000000000000",
42 | "merkle_root": "8ebb3bf274b7ab7f299279f3086dd59f5d45e4dc116db4d470c38dd2ff2b2c8f"
43 | },
44 | {
45 | "raw": "0000c0204755b42cb9dcc5106d4ec4dcc930d1ec68816e1169830b00000000000000000040fd126a7fac3d39cd65b7aa87df4785fe3e585eac589a56578cb4e6a42824bdae68425ed41a1217155cc584",
46 | "hash": "ab02a0a506ed429e9d40e3a7c58f80dcdf744318c98f0d000000000000000000",
47 | "height": 616904,
48 | "prevhash": "4755b42cb9dcc5106d4ec4dcc930d1ec68816e1169830b000000000000000000",
49 | "merkle_root": "40fd126a7fac3d39cd65b7aa87df4785fe3e585eac589a56578cb4e6a42824bd"
50 | },
51 | {
52 | "raw": "00e0ff7fab02a0a506ed429e9d40e3a7c58f80dcdf744318c98f0d000000000000000000d338bb7242ca738d8cb0ef0f05c4d888c52eb5a075041ffafbabc3268f041544166b425ed41a12178cd64a96",
53 | "hash": "0eb5e17dccf81445c88645f95f7ea9dd9e26d651d56209000000000000000000",
54 | "height": 616905,
55 | "prevhash": "ab02a0a506ed429e9d40e3a7c58f80dcdf744318c98f0d000000000000000000",
56 | "merkle_root": "d338bb7242ca738d8cb0ef0f05c4d888c52eb5a075041ffafbabc3268f041544"
57 | },
58 | {
59 | "raw": "00c0ff3f0eb5e17dccf81445c88645f95f7ea9dd9e26d651d5620900000000000000000064b53b8a3e102a1b7e7cbb245dd1339e87d34abcea730009bd2a2dca93683a71426f425ed41a12177b11d4dd",
60 | "hash": "ddfa3cf805f8de7e520b4ebaa6f18ac58d8d7a462ade11000000000000000000",
61 | "height": 616906,
62 | "prevhash": "0eb5e17dccf81445c88645f95f7ea9dd9e26d651d56209000000000000000000",
63 | "merkle_root": "64b53b8a3e102a1b7e7cbb245dd1339e87d34abcea730009bd2a2dca93683a71"
64 | },
65 | {
66 | "raw": "00e00020ddfa3cf805f8de7e520b4ebaa6f18ac58d8d7a462ade110000000000000000000d3e06e5e25b12623a9d6cd9f2c0b6df31315b64934a1e9b61edcc90e9f4ae275870425ed41a1217a1371395",
67 | "hash": "bc00d40ffb1b0e8850475b0ff71d990080bb0e8203d109000000000000000000",
68 | "height": 616907,
69 | "prevhash": "ddfa3cf805f8de7e520b4ebaa6f18ac58d8d7a462ade11000000000000000000",
70 | "merkle_root": "0d3e06e5e25b12623a9d6cd9f2c0b6df31315b64934a1e9b61edcc90e9f4ae27"
71 | },
72 | {
73 | "raw": "0000c020bc00d40ffb1b0e8850475b0ff71d990080bb0e8203d1090000000000000000008a317b377cc53010ed4c741bd6bcea5fe6748665a6a9374510ff77e5cdfac7e3b971425ed41a12174334a315",
74 | "hash": "f8d0a038bfe4027e5de3b6bf07262122636fd2916d7503000000000000000000",
75 | "height": 616908,
76 | "prevhash": "bc00d40ffb1b0e8850475b0ff71d990080bb0e8203d109000000000000000000",
77 | "merkle_root": "8a317b377cc53010ed4c741bd6bcea5fe6748665a6a9374510ff77e5cdfac7e3"
78 | }
79 | ]
80 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/chain_test.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | "bytes"
5 |
6 | sdk "github.com/cosmos/cosmos-sdk/types"
7 | "github.com/summa-tx/relays/golang/x/relay/types"
8 | )
9 |
10 | func (s *KeeperSuite) TestEmitReorg() {
11 | headers := s.Fixtures.HeaderTestCases.ValidateChain[0].Headers
12 | s.Keeper.emitReorg(s.Context, headers[0].Hash, headers[1].Hash, headers[2].Hash)
13 |
14 | events := s.Context.EventManager().Events()
15 | e := events[0]
16 | s.Equal("reorg", e.Type)
17 | }
18 |
19 | func (s *KeeperSuite) TestGetDigestByStoreKey() {
20 | wrongLenDigest := bytes.Repeat([]byte{0}, 31)
21 | key := "bad-digest"
22 |
23 | store := s.Keeper.getChainStore(s.Context)
24 | store.Set([]byte(key), wrongLenDigest)
25 |
26 | _, err := s.Keeper.getDigestByStoreKey(s.Context, key)
27 | s.Equal(sdk.CodeType(types.BadHash256Digest), err.Code())
28 | }
29 |
30 | func (s *KeeperSuite) TestGetBestKnownDigest() {
31 | digest := s.Fixtures.HeaderTestCases.ValidateChain[0].Headers[0].Hash
32 | s.Keeper.setBestKnownDigest(s.Context, digest)
33 | bestKnown, _ := s.Keeper.GetBestKnownDigest(s.Context)
34 | s.Equal(digest, bestKnown)
35 | }
36 |
37 | func (s *KeeperSuite) TestGetLastReorgLCA() {
38 | digest := s.Fixtures.HeaderTestCases.ValidateChain[0].Headers[0].Hash
39 | s.Keeper.setLastReorgLCA(s.Context, digest)
40 | lca, _ := s.Keeper.GetLastReorgLCA(s.Context)
41 | s.Equal(digest, lca)
42 | }
43 |
44 | func (s *KeeperSuite) TestIsMostRecentCommonAncestor() {
45 | tv := s.Fixtures.ChainTestCases.IsMostRecentCA
46 | pre := tv.PreRetargetChain
47 | post := tv.PostRetargetChain
48 |
49 | var postWithOrphan []types.BitcoinHeader
50 | postWithOrphan = append(postWithOrphan, post[:len(post)-2]...)
51 | postWithOrphan = append(postWithOrphan, tv.Orphan)
52 |
53 | err := s.Keeper.SetGenesisState(s.Context, tv.Genesis, tv.OldPeriodStart)
54 | s.SDKNil(err)
55 |
56 | err = s.Keeper.IngestHeaderChain(s.Context, pre)
57 | s.SDKNil(err)
58 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, post)
59 | s.SDKNil(err)
60 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, postWithOrphan)
61 | s.SDKNil(err)
62 |
63 | for i := range tv.TestCases {
64 | isMostRecent := s.Keeper.IsMostRecentCommonAncestor(
65 | s.Context,
66 | tv.TestCases[i].Ancestor,
67 | tv.TestCases[i].Left,
68 | tv.TestCases[i].Right,
69 | tv.TestCases[i].Limit)
70 | s.Equal(tv.TestCases[i].Output, isMostRecent)
71 | }
72 | }
73 |
74 | func (s *KeeperSuite) TestHeaviestFromAncestor() {
75 | tv := s.Fixtures.ChainTestCases.HeaviestFromAncestor
76 | headers := tv.Headers[0:8]
77 | headersWithMain := tv.Headers[0:9]
78 |
79 | var headersWithOrphan []types.BitcoinHeader
80 | headersWithOrphan = append(headersWithOrphan, headers...)
81 | headersWithOrphan = append(headersWithOrphan, tv.Orphan)
82 |
83 | s.Keeper.ingestHeader(s.Context, tv.Genesis)
84 | err := s.Keeper.IngestHeaderChain(s.Context, headersWithMain)
85 | s.SDKNil(err)
86 | err = s.Keeper.IngestHeaderChain(s.Context, headersWithOrphan)
87 | s.SDKNil(err)
88 |
89 | for i := range tv.TestCases {
90 | heaviest, err := s.Keeper.HeaviestFromAncestor(
91 | s.Context,
92 | tv.TestCases[i].Ancestor,
93 | tv.TestCases[i].CurrentBest,
94 | tv.TestCases[i].NewBest,
95 | tv.TestCases[i].Limit)
96 | if tv.TestCases[i].Error == 0 {
97 | s.SDKNil(err)
98 | s.Equal(heaviest, tv.TestCases[i].Output)
99 | } else {
100 | s.Equal(sdk.CodeType(tv.TestCases[i].Error), err.Code())
101 | }
102 | }
103 | }
104 |
105 | func (s *KeeperSuite) TestMarkNewHeaviest() {
106 | tv := s.Fixtures.ChainTestCases.IsMostRecentCA
107 | tc := s.Fixtures.ChainTestCases.MarkNewHeaviest
108 | pre := tv.PreRetargetChain
109 | post := tv.PostRetargetChain
110 | var postWithOrphan []types.BitcoinHeader
111 | postWithOrphan = append(postWithOrphan, post[:len(post)-2]...)
112 | postWithOrphan = append(postWithOrphan, tv.Orphan)
113 |
114 | err := s.Keeper.SetGenesisState(s.Context, tv.Genesis, tv.OldPeriodStart)
115 | s.SDKNil(err)
116 |
117 | err = s.Keeper.MarkNewHeaviest(
118 | s.Context,
119 | tv.Genesis.Hash,
120 | pre[0].Raw,
121 | pre[1].Raw,
122 | 10,
123 | )
124 | s.Equal(sdk.CodeType(types.UnknownBlock), err.Code())
125 |
126 | err = s.Keeper.IngestHeaderChain(s.Context, pre)
127 | s.SDKNil(err)
128 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, post)
129 | s.SDKNil(err)
130 | err = s.Keeper.IngestDifficultyChange(s.Context, tv.OldPeriodStart.Hash, postWithOrphan)
131 | s.SDKNil(err)
132 |
133 | // errors if the ancestor is not the heaviest common ancestor
134 | err = s.Keeper.MarkNewHeaviest(
135 | s.Context,
136 | tv.Genesis.Hash,
137 | tv.Genesis.Raw,
138 | pre[0].Raw,
139 | 10,
140 | )
141 | s.SDKNil(err)
142 | err = s.Keeper.MarkNewHeaviest(
143 | s.Context,
144 | tv.Genesis.Hash,
145 | pre[0].Raw,
146 | pre[1].Raw,
147 | 10,
148 | )
149 | s.Equal(sdk.CodeType(types.NotHeaviestAncestor), err.Code())
150 |
151 | for i := range tc {
152 | s.Keeper.setBestKnownDigest(s.Context, tc[i].BestKnownDigest)
153 | // updates the best known and emits an event
154 | err = s.Keeper.MarkNewHeaviest(
155 | s.Context,
156 | tc[i].Ancestor,
157 | tc[i].CurrentBest,
158 | tc[i].NewBest,
159 | tc[i].Limit,
160 | )
161 |
162 | if tc[i].Error == 0 {
163 | s.SDKNil(err)
164 | events := s.Context.EventManager().Events()
165 | e := events[i]
166 | s.Equal(tc[i].Output, e.Type)
167 | } else {
168 | s.Equal(sdk.CodeType(tc[i].Error), err.Code())
169 | }
170 | }
171 | }
172 |
--------------------------------------------------------------------------------
/golang/dashboard/README.md:
--------------------------------------------------------------------------------
1 | # cosmos-relay-dashboard
2 |
3 | ## Description
4 |
5 | The dashboard displays the Cosmos Relay chain data and verifies it against an external Bitcoin explorer (currently BlockStream).
6 |
7 | --------------------------
8 |
9 | ## Getting Started
10 |
11 | ### Start Cosmos Relay
12 |
13 | The dashboard connects with a locally run `relay`.
14 |
15 | 1. If you don't have Go installed, install Go.
16 | 2. If you haven't used the `go mod` before, add this to your environment:
17 |
18 | ```bash
19 | $ mkdir -p $HOME/go/bin
20 | $ echo "export GOBIN=\$GOPATH/bin" >> ~/.bash_profile
21 | $ echo "export PATH=\$PATH:\$GOBIN" >> ~/.bash_profile
22 | $ source ~/.bash_profile
23 | ```
24 |
25 | > *Troubleshooting tip*
26 | >
27 | > If, after following steps 3 and 4 below, you are not able to successfully run `make install` or `make init` then try replacing the above lines with the following:
28 | >
29 | >```bash
30 | > $ export GOPATH=$HOME/go
31 | > $ export PATH=$GOPATH/bin:$PATH
32 | > ```
33 | >
34 | > Don't forget to run:
35 | > ```bash
36 | > $ source ~/.bash_profile
37 | > ```
38 | >
39 | > You may even need to restart your terminal.
40 |
41 | 3. Make sure you are in the `relays/golang` directory (one level up from here) and install the app into your `$GOBIN`.
42 |
43 | ```bash
44 | $ make install
45 | ```
46 |
47 | 4. Initialize a new chain for testing.
48 |
49 | ```bash
50 | $ make init
51 | ```
52 |
53 | 5. In the same folder, but in another terminal window, run the REST routes `rest-server`. This will make the relay application REST routes available on `http://localhost:1317`.
54 |
55 | ```bash
56 | $ relaycli rest-server --chain-id relay
57 | ```
58 |
59 | All routes are at `/relay/${route}`. For a list of available routes, see the golang README located at `relays/golang/README.md`.
60 |
61 | [Relay Chain Instructions](https://github.com/summa-tx/relays/blob/master/golang/scripts/README.md).
62 |
63 | ### Dashboard
64 |
65 | 1. Install dependencies (`/relay/golang/dashboard`).
66 |
67 | ```base
68 | $ npm install
69 | ```
70 |
71 | 2. Start dashboard.
72 |
73 | ```bash
74 | $ npm run serve
75 | ```
76 |
77 | View at http://localhost:8080 in your browser.
78 |
79 | --------------------------
80 |
81 | ## Development
82 |
83 | ### Set Environment Variables
84 |
85 | If no `.env` file is present, defaults are used. See `/src/config.js`.
86 |
87 | ### Commands
88 |
89 | #### Start dashboard
90 |
91 | Compiles and hot-reloads.
92 |
93 | ```sh
94 | $ npm run serve
95 | ```
96 |
97 | #### Run your tests
98 |
99 | ```sh
100 | $ npm run test
101 | ```
102 |
103 | #### Lints and fixes files
104 |
105 | ```sh
106 | $ npm run lint
107 | ```
108 |
109 | #### Compiles and minifies for production
110 |
111 | ```sh
112 | $ npm run build
113 | ```
114 |
115 | ### Customize configuration
116 |
117 | See [Configuration Reference](https://cli.vuejs.org/config/).
118 |
119 | --------------------------
120 |
121 | ## Dashboard Overview: How Things Work
122 |
123 | There are 2 sources used for the dashboard, the relay and an external source. The Best Known Digest and Last Reorg Common Ancestor are polled every 2 minutes from the relay. Information from the external source is polled every 3 minutes.
124 |
125 | ### Current Block
126 |
127 | The user wants to know about new headers. In order to do that, we:
128 |
129 | 1. Get the best tip (most recent block height) from an external source.
130 | 2. Display the height, hash, and timestamp.
131 |
132 | ### Best Known Digest
133 |
134 | This is most current and best block the relay knows about. It is updated approximately every 5 blocks, and will be behind the newest header.
135 |
136 | 1. Poll `/relay/getbestdigest`.
137 | 2. Store digest and display the height, hash, and timestamp.
138 |
139 | This is conceptually equivalent to Github tags.
140 |
141 | ### Last (Reorg) Common Ancestor (LCA)
142 |
143 | This is the latest block that is in the history of both the current best known digest, and the previous best known digest.
144 |
145 | 1. Poll `/relay/getlastreorglca`.
146 | 2. Store LCA and display the height, hash, and timestamp.
147 |
148 | ### Health Checks and Verification
149 |
150 | The dashboard keeps track of and displays the following:
151 |
152 | * **lastComms**: When was the last successful communication made?
153 | * **lastComms.relay** - Last successful communication from the relay.
154 | * **lastComms.external** - Last successful communication from the external source.
155 |
156 | * **blockDifference**: What is the difference in blocks between the relay and the external source?
157 |
158 | Health pulses are displayed as `TIME in MINUTES ago`.
159 |
160 | ### Networks Names
161 |
162 | Displays the network for the relay and for the external source. For these purposes, the relay will always be a locally run relay, and the external source shows mainnet blocks.
163 |
164 | --------------------------
165 |
166 | ## Relay
167 |
168 | The following is mainly for informational purposes, rather than development.
169 |
170 | ### Relay updates
171 |
172 | The relay is updated ~every 5 blocks.
173 |
174 | **Advance chaining:**
175 | Suppose this happens:
176 |
177 | ```
178 | BEST
179 | V
180 | 500 <- 501 <- 502 <- 503a <- 504 <- 505
181 | ^
182 | | --- 503b <- 504b <- 505b <- 506b <- 507b
183 | ```
184 |
185 | we would update to this:
186 | ```
187 | LCA
188 | V
189 | 500 <- 501 <- 502 <- 503a <- 504 <- 505
190 | ^
191 | | --- 503b <- 504b <- 505b <- 506b <- 507b
192 | ^
193 | BEST
194 | ```
195 |
196 | --------------------------
197 |
--------------------------------------------------------------------------------
/maintainer/maintainer/relay_abi.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | true = True
4 | false = False
5 | null = None
6 |
7 |
8 | ABI =
9 | [{"constant":true,"inputs":[],"name":"getCurrentEpochDifficulty","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getBestKnownDigest","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"nextID","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"latestValidatedTx","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getPrevEpochDifficulty","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_digest","type":"bytes32"},{"name":"_offset","type":"uint256"}],"name":"findAncestor","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_digest","type":"bytes32"}],"name":"findHeight","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_anchor","type":"bytes"},{"name":"_headers","type":"bytes"}],"name":"addHeaders","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"HEIGHT_INTERVAL","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_ancestor","type":"bytes32"},{"name":"_currentBest","type":"bytes"},{"name":"_newBest","type":"bytes"},{"name":"_limit","type":"uint256"}],"name":"markNewHeaviest","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_oldPeriodStartHeader","type":"bytes"},{"name":"_oldPeriodEndHeader","type":"bytes"},{"name":"_headers","type":"bytes"}],"name":"addHeadersWithRetarget","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"BASE_COST","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"remoteGasAllowance","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_ancestor","type":"bytes32"},{"name":"_descendant","type":"bytes32"},{"name":"_limit","type":"uint256"}],"name":"isAncestor","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getLastReorgCommonAncestor","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getRelayGenesis","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[{"name":"_genesisHeader","type":"bytes"},{"name":"_height","type":"uint256"},{"name":"_periodStart","type":"bytes32"},{"name":"_firstID","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_first","type":"bytes32"},{"indexed":true,"name":"_last","type":"bytes32"}],"name":"Extension","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_from","type":"bytes32"},{"indexed":true,"name":"_to","type":"bytes32"},{"indexed":true,"name":"_gcd","type":"bytes32"}],"name":"NewTip","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_requester","type":"address"},{"indexed":true,"name":"_requestID","type":"uint256"},{"indexed":false,"name":"_paysValue","type":"uint64"},{"indexed":false,"name":"_spends","type":"bytes"},{"indexed":false,"name":"_pays","type":"bytes"}],"name":"NewProofRequest","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_requestID","type":"uint256"}],"name":"RequestClosed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_txid","type":"bytes32"},{"indexed":true,"name":"_requestID","type":"uint256"}],"name":"RequestFilled","type":"event"},{"constant":false,"inputs":[{"name":"_requestID","type":"uint256"}],"name":"cancelRequest","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"getLatestValidatedTx","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_requestID","type":"uint256"}],"name":"getRequest","outputs":[{"name":"spends","type":"bytes32"},{"name":"pays","type":"bytes32"},{"name":"paysValue","type":"uint64"},{"name":"state","type":"uint8"},{"name":"consumer","type":"address"},{"name":"owner","type":"address"},{"name":"numConfs","type":"uint8"},{"name":"notBefore","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_spends","type":"bytes"},{"name":"_pays","type":"bytes"},{"name":"_paysValue","type":"uint64"},{"name":"_consumer","type":"address"},{"name":"_numConfs","type":"uint8"},{"name":"_notBefore","type":"uint256"}],"name":"request","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_header","type":"bytes"},{"name":"_proof","type":"bytes"},{"name":"_version","type":"bytes4"},{"name":"_locktime","type":"bytes4"},{"name":"_index","type":"uint256"},{"name":"_reqIndices","type":"uint16"},{"name":"_vin","type":"bytes"},{"name":"_vout","type":"bytes"},{"name":"_requestID","type":"uint256"}],"name":"provideProof","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]
10 |
--------------------------------------------------------------------------------
/maintainer/maintainer/ethereum/shared.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 |
4 | from ether import calldata, ethrpc
5 |
6 | from maintainer import config
7 |
8 | from ether.ether_types import Receipt
9 | from ether.transactions import UnsignedEthTx
10 | from typing import Any, cast, Dict, Iterator, List, Optional
11 |
12 | logger = logging.getLogger('root.summa_relay.shared_eth')
13 |
14 |
15 | GWEI = 1000000000
16 | DEFAULT_GAS = 500_000
17 | DEFAULT_GAS_PRICE = 2 * GWEI
18 |
19 | CONNECTION: ethrpc.BaseRPC
20 | NONCE: Iterator[int] # yields ints, takes no sends
21 |
22 |
23 | def _nonce(i: int) -> Iterator[int]:
24 | '''Infinite generator for nonces'''
25 | index = i
26 | while 1:
27 | yield index
28 | index += 1
29 |
30 |
31 | async def init() -> None:
32 | '''Set up a connection to the interwebs'''
33 | global CONNECTION
34 |
35 | c = config.get()
36 | network = c['NETWORK']
37 | project_id = c['PROJECT_ID']
38 | uri = c['ETHER_URL']
39 | force_https = project_id != ''
40 |
41 | logger.info(f'contract is {c["CONTRACT"]}')
42 |
43 | CONNECTION = ethrpc.get_client(
44 | network=network,
45 | infura_key=project_id,
46 | uri=uri,
47 | logger=logger.getChild('ethrpc'),
48 | force_https=force_https)
49 |
50 | await CONNECTION.open()
51 |
52 | if c['PRIVKEY'] is None and c['GETH_UNLOCK'] is None:
53 | logger.warn(
54 | 'No ethereum privkey found in env config. Txns will error')
55 | else:
56 | global NONCE
57 | address = cast(str, c['ETH_ADDRESS'])
58 | n = await CONNECTION.get_nonce(address)
59 | NONCE = _nonce(n)
60 | logger.info(f'nonce is {n}')
61 |
62 |
63 | async def close_connection() -> None:
64 | try:
65 | global CONNECTION
66 | await CONNECTION.close()
67 | except NameError:
68 | pass
69 |
70 |
71 | async def sign_and_broadcast(
72 | tx: UnsignedEthTx,
73 | ignore_result: bool = False) -> None:
74 | '''Sign an ethereum transaction and broadcast it to the network'''
75 | c = config.get()
76 | privkey = c['PRIVKEY']
77 | address = c['ETH_ADDRESS']
78 | unlock_code = c['GETH_UNLOCK']
79 |
80 | if privkey is None and unlock_code is None:
81 | raise RuntimeError('Attempted to sign tx without access to key')
82 |
83 | if privkey is None:
84 | logger.debug('signing with ether node')
85 | await CONNECTION._RPC(
86 | 'personal_unlockAccount',
87 | [address, unlock_code])
88 | tx_id = await CONNECTION.send_transaction(cast(str, address), tx)
89 | else:
90 | logger.debug('signing with local key')
91 | signed = tx.sign(cast(bytes, privkey))
92 | serialized = signed.serialize_hex()
93 | tx_id = await CONNECTION.broadcast(serialized)
94 |
95 | logger.info(f'dispatched transaction {tx_id}')
96 | if not ignore_result:
97 | asyncio.ensure_future(_track_tx_result(tx_id))
98 |
99 |
100 | def make_call_tx(
101 | contract: str,
102 | abi: List[Dict[str, Any]],
103 | method: str,
104 | args: List[Any],
105 | nonce: int,
106 | value: int = 0,
107 | gas: int = DEFAULT_GAS,
108 | gas_price: int = DEFAULT_GAS_PRICE) -> UnsignedEthTx:
109 | '''
110 | Sends tokens to a recipient
111 | Args:
112 | contract (str): address of contract being called
113 | abi (dict): contract ABI
114 | method (str): the name of the method to call
115 | args (list): the arguments to the method call
116 | nonce (int): the account nonce for the txn
117 | value (int): ether in wei
118 | gas_price (int): the price of gas in wei or gwei
119 | Returns:
120 | (UnsignedEthTx): the unsigned tx object
121 | '''
122 | logger.debug(f'making tx call {method} on {contract} '
123 | f'with value {value} and {len(args)} args')
124 |
125 | gas_price = _adjust_gas_price(gas_price)
126 | chainId = config.get()['CHAIN_ID']
127 |
128 | data = calldata.call(
129 | method,
130 | args,
131 | abi)
132 |
133 | txn = UnsignedEthTx(
134 | to=contract,
135 | value=value,
136 | gas=gas,
137 | gasPrice=gas_price,
138 | nonce=nonce,
139 | data=data,
140 | chainId=chainId)
141 |
142 | return txn
143 |
144 |
145 | def _adjust_gas_price(gas_price: int) -> int:
146 | '''
147 | We accept gas price in GWEI or in WEI.
148 | This adjusts, and ensures we error if it's high.
149 | Args:
150 | gas_price (int): the user-provided gas price
151 | Returns:
152 | (int): the adjusted price
153 | '''
154 | if gas_price < GWEI:
155 | gas_price = gas_price * GWEI
156 | if gas_price > 1000 * GWEI:
157 | logger.error('rejecting high gas price')
158 | raise ValueError(
159 | 'very high gas price detected: {} gwei'.format(gas_price / GWEI))
160 | return gas_price
161 |
162 |
163 | async def _track_tx_result(tx_id: str) -> None:
164 | '''Keep track of the result of a transaction by polling every 25 seconds'''
165 | receipt_or_none: Optional[Receipt] = None
166 |
167 | for _ in range(20):
168 | await asyncio.sleep(30)
169 | receipt_or_none = await CONNECTION.get_tx_receipt(tx_id)
170 | if receipt_or_none is not None:
171 | break
172 |
173 | if receipt_or_none is None:
174 | raise RuntimeError(f'No receipt after 10 minutes: {tx_id}')
175 |
176 | receipt = cast(Receipt, receipt_or_none)
177 | logger.info(f'Receipt for {tx_id} status is {receipt["status"]}')
178 |
179 | if receipt['status'] != '0x1':
180 | raise RuntimeError(f'Failed tx: {receipt["transactionHash"]}')
181 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ### Summa Relay
2 |
3 | This is a Bitcoin Relay. It uses 1 + 1/n slots per header relayed (n is
4 | currently 4), and 2 slots to externalize useful information (best chain tip and
5 | best shared ancestor of latest reorg).
6 |
7 | Implementations are available in Solidity (for EVM chains) and Golang using the
8 | cosmos-sdk framework.
9 |
10 | ### How does it work?
11 |
12 | The core idea behind the relay is to minimize storage costs by increasing
13 | calldata costs. Rather than storing headers, the relay stores the
14 | `hashPrevBlock`field of each header and the height of every nth header. Should
15 | the relay need to reference information in old headers (like the difficulty),
16 | the header data is passed to the relay again, and validated against known
17 | `hashPrevBlock` links. This allows the relay to check that newly submitted
18 | blocks are valid extensions of existing blocks, without storing all past header
19 | information.
20 |
21 | As opposed to other relays, we separate the function of the relay into two
22 | categories: "learning about new blocks" and "following the best chain tip."
23 | Users may add new blocks in groups of at least 5 by calling `addHeaders` If the
24 | block slice includes a difficulty retarget, users are required to call
25 | `addHeadersWithRetarget`, which performs additional validation. The relay does
26 | not update its tip unless it is specifically requested to do so by a user. The
27 | user must call `markNewHeaviest` with the new heaviest, the old heaviest
28 | header, and the digest of their most recent common ancestor (which may be the
29 | old heaviest header.
30 |
31 | As part of the process, the relay externalizes the most recent common ancestor,
32 | which is to say, the heaviest header that both old and new heaviest tip
33 | confirm. This is a metric of "subjective finality" for that block. During
34 | normal operation without reorgs it lags behind the tip by 5 blocks. During
35 | reorgs, it is the shared base of the competing branches (and as such may move
36 | backwards!). This indicates that competing sets of miners both viewed it as
37 | subjectively finalized. As such, it is a reasonable source of finalization
38 | information for relay-consuming smart contracts.
39 |
40 | This model provides large gas savings compared to previous relay designs (TODO:
41 | benchmarking). It also gets especially attractive if EIP2028 activates,
42 | reducing calldata gas costs.
43 |
44 | ### A Note on Endianness
45 |
46 | Bitcoin internally uses little-endian representations of integers and digests.
47 | Block explorers and most user-facing applications use the more common
48 | big-endian representation. To minimize order swaps and prevent confusion, all
49 | our tooling uses the LE representation exclusively. If using the JS, rust,
50 | golang, or python tooling in [bitcoin-spv](http://bitcoin-spv.com), everything
51 | will Just Work. If writing custom software using data from block explorers,
52 | full nodes, or other data sources, make sure digests are LE before submitting
53 | to the relay.
54 |
55 | ### Requests and Proofs
56 |
57 | The Relay implementations here have an SPV request system built in. This allows
58 | for abstraction of the off-chain proving software. Requesters don't need to
59 | write a custom Bitcoin indexer, and existing Bitcoin indexers can work with any
60 | requester, whether it's a module, a smart contract, or a user.
61 |
62 | The relay coordinates an interaction between 3 roles:
63 | 1. Requester: creates a new SPV Proof request and designates a Handler
64 | 2. Handler: handles incoming SPV Proofs on the Requester's behalf
65 | 3. Indexer: watches requests, indexes Bitcoin, and provides SPV Proofs
66 |
67 | While implementation details differ, the architecture is simple:
68 |
69 | 1. Requesters register a request for SPV Proofs.
70 | 1. The request specifies a transaction filter and a proof handler.
71 | 1. golang: submit a `MsgRequestProof`.
72 | 1. golang CLI: `relaycli tx relay newrequest`.
73 | 1. solidity: `OndemandSPV.request()`.
74 | 1. An event with request details is logged.
75 | 1. golang: watch for `proof_request` events.
76 | 1. solidity: subscribe to `NewProofRequest` events.
77 | 1. Indexers watch the Bitcoin chain for transactions that satisfy Requests.
78 | 1. [Example](https://github.com/summa-tx/bcoin-relaylib).
79 | 1. Indexers create an SPV Proof and submit it to the relay.
80 | 1. golang: submit a `MsgProvideProof`.
81 | 1. golang CLI: `relaycli tx relay provideproof`.
82 | 1. solidity: call `OnDemandSPV.provideProof()`.
83 | 1. The relay validates this proof.
84 | 1. If valid, on-chain handler dispatches tx info to the proof Handler
85 | 1. golang: the module's `ProofHandler` routes info the the Handler
86 | 1. solidity: the relay calls `spv()` on the handling contract
87 |
88 | Essentially the requester is subscribing to a feed of Bitcoin transactions
89 | matching a specific filter. This filter can specify which UTXO is being spent,
90 | and/or an address that receives funds. The handler expects to receive
91 | a stream of transactions that meet the filter's specifications.
92 |
93 | **Note**: Due to solidity constraints, this filter system is unrelated to
94 | existing Bitcoin filtering systems (e.g. BIP37 & BIP157) In the future,
95 | the filter system may be upgraded to support more complex transaction
96 | descriptions.
97 |
98 | **Important**: All requests may be filled more than once. Setting a `spends`
99 | filter is NOT sufficient to prevent this, as long reorgs may cause a UTXO to be
100 | spent multiple times. There is NO WAY to ensure that only a single proof is
101 | provided, so the handler should deal with multiple proofs gracefully.
102 |
103 | ### Misc Project Notes
104 |
105 | Complete relays are available in Solidity, for EVM-based chains (like Ethereum)
106 | and Golang using the cosmos-sdk framework.
107 |
108 | The Python relay mainter in `./maintainer/` is not thoroughly tested, and does
109 | not yet support the cosmos-sdk relay.
110 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/requests.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "encoding/json"
7 |
8 | btcspv "github.com/summa-tx/bitcoin-spv/golang/btcspv"
9 | "github.com/summa-tx/relays/golang/x/relay/types"
10 |
11 | sdk "github.com/cosmos/cosmos-sdk/types"
12 | )
13 |
14 | func (k Keeper) emitProofRequest(ctx sdk.Context, pays, spends []byte, paysValue uint64, id types.RequestID, origin types.Origin) {
15 | ctx.EventManager().EmitEvent(types.NewProofRequestEvent(pays, spends, paysValue, id, origin))
16 | }
17 |
18 | func (k Keeper) getRequestStore(ctx sdk.Context) sdk.KVStore {
19 | return k.getPrefixStore(ctx, types.RequestStorePrefix)
20 | }
21 |
22 | func (k Keeper) hasRequest(ctx sdk.Context, id types.RequestID) bool {
23 | store := k.getRequestStore(ctx)
24 | return store.Has(id[:])
25 | }
26 |
27 | func (k Keeper) setRequest(ctx sdk.Context, spends []byte, pays []byte, paysValue uint64, numConfs uint8, origin types.Origin, action types.HexBytes) sdk.Error {
28 | store := k.getRequestStore(ctx)
29 |
30 | var spendsDigest types.Hash256Digest
31 | if len(spends) == 0 {
32 | spendsDigest = types.Hash256Digest{}
33 | } else {
34 | spendsDigest = btcspv.Hash256(spends)
35 | }
36 |
37 | var paysDigest types.Hash256Digest
38 | if len(pays) == 0 {
39 | paysDigest = types.Hash256Digest{}
40 | } else {
41 | paysDigest = btcspv.Hash256(pays)
42 | }
43 |
44 | request := types.ProofRequest{
45 | Spends: spendsDigest,
46 | Pays: paysDigest,
47 | PaysValue: paysValue,
48 | ActiveState: true,
49 | NumConfs: numConfs,
50 | Origin: origin,
51 | Action: action,
52 | }
53 |
54 | // When a new request comes in, get the id and use it to store request
55 | id, err := k.getNextID(ctx)
56 | if err != nil {
57 | return err
58 | }
59 |
60 | buf, marshalErr := json.Marshal(request)
61 | if marshalErr != nil {
62 | return types.ErrMarshalJSON(types.DefaultCodespace)
63 | }
64 | store.Set(id[:], buf)
65 |
66 | // Increment the ID
67 | incrementErr := k.incrementID(ctx)
68 | if incrementErr != nil {
69 | return incrementErr
70 | }
71 |
72 | // Emit Proof Request event
73 | k.emitProofRequest(ctx, pays, spends, request.PaysValue, id, origin)
74 | return nil
75 | }
76 |
77 | func (k Keeper) setRequestState(ctx sdk.Context, requestID types.RequestID, active bool) sdk.Error {
78 | store := k.getRequestStore(ctx)
79 | request, err := k.getRequest(ctx, requestID)
80 | if err != nil {
81 | return err
82 | }
83 |
84 | request.ActiveState = active
85 |
86 | buf, marshalErr := json.Marshal(request)
87 | if marshalErr != nil {
88 | return types.ErrMarshalJSON(types.DefaultCodespace)
89 | }
90 | store.Set(requestID[:], buf)
91 | return nil
92 | }
93 |
94 | func (k Keeper) getRequest(ctx sdk.Context, id types.RequestID) (types.ProofRequest, sdk.Error) {
95 | store := k.getRequestStore(ctx)
96 |
97 | hasRequest := k.hasRequest(ctx, id)
98 | if !hasRequest {
99 | return types.ProofRequest{}, types.ErrUnknownRequest(types.DefaultCodespace)
100 | }
101 |
102 | buf := store.Get(id[:])
103 |
104 | var request types.ProofRequest
105 | jsonErr := json.Unmarshal(buf, &request)
106 | if jsonErr != nil {
107 | return types.ProofRequest{}, types.ErrExternal(types.DefaultCodespace, jsonErr)
108 | }
109 | return request, nil
110 | }
111 |
112 | // incrementID increments the id used to store a request,
113 | // ID must be in bytes
114 | func (k Keeper) incrementID(ctx sdk.Context) sdk.Error {
115 | store := k.getRequestStore(ctx)
116 | // get id
117 | id, err := k.getNextID(ctx)
118 | if err != nil {
119 | return err
120 | }
121 | // convert id to uint64 and add 1
122 | newID := binary.BigEndian.Uint64(id[:]) + 1
123 | // convert back to bytes and store
124 | b := make([]byte, 8)
125 | binary.BigEndian.PutUint64(b, newID)
126 | store.Set([]byte(types.RequestIDTag), b)
127 | // if no errors, return nil
128 | return nil
129 | }
130 |
131 | // getNextID retrieves the ID. The ID is incremented after storing a request,
132 | // so this returns the next ID to be used.
133 | func (k Keeper) getNextID(ctx sdk.Context) (types.RequestID, sdk.Error) {
134 | store := k.getRequestStore(ctx)
135 | idTag := []byte(types.RequestIDTag)
136 | if !store.Has(idTag) {
137 | store.Set(idTag, bytes.Repeat([]byte{0}, 8))
138 | }
139 | id := store.Get(idTag)
140 | newID, err := types.NewRequestID(id)
141 | if err != nil {
142 | return types.RequestID{}, err
143 | }
144 | return newID, nil
145 | }
146 |
147 | // checkRequests validates a request
148 | func (k Keeper) checkRequests(ctx sdk.Context, inputIndex, outputIndex uint32, vin []byte, vout []byte, requestID types.RequestID) sdk.Error {
149 | if !btcspv.ValidateVin(vin) {
150 | return types.ErrInvalidVin(types.DefaultCodespace)
151 | }
152 | if !btcspv.ValidateVout(vout) {
153 | return types.ErrInvalidVout(types.DefaultCodespace)
154 | }
155 |
156 | req, reqErr := k.getRequest(ctx, requestID)
157 | if reqErr != nil {
158 | return reqErr
159 | }
160 | if !req.ActiveState {
161 | return types.ErrClosedRequest(types.DefaultCodespace)
162 | }
163 |
164 | hasPays := req.Pays != btcspv.Hash256Digest{}
165 | if hasPays {
166 | // We can ignore this error because we know that ValidateVout passed
167 | out, _ := btcspv.ExtractOutputAtIndex(vout, uint(outputIndex))
168 | // hash the output script (out[8:])
169 | outDigest := btcspv.Hash256(out[8:])
170 | if outDigest != req.Pays {
171 | return types.ErrRequestPays(types.DefaultCodespace, requestID)
172 | }
173 | paysValue := req.PaysValue
174 | if paysValue != 0 && uint64(btcspv.ExtractValue(out)) < paysValue {
175 | return types.ErrRequestValue(types.DefaultCodespace, requestID)
176 | }
177 | }
178 |
179 | hasSpends := req.Spends != btcspv.Hash256Digest{}
180 | if hasSpends {
181 | in, err := btcspv.ExtractInputAtIndex(vin, uint(inputIndex))
182 | if err != nil {
183 | return types.FromBTCSPVError(types.DefaultCodespace, err)
184 | }
185 | outpoint := btcspv.ExtractOutpoint(in)
186 | inDigest := btcspv.Hash256(outpoint)
187 | if hasSpends && inDigest != req.Spends {
188 | return types.ErrRequestSpends(types.DefaultCodespace, requestID)
189 | }
190 | }
191 | return nil
192 | }
193 |
--------------------------------------------------------------------------------
/golang/x/relay/keeper/request_test.go:
--------------------------------------------------------------------------------
1 | package keeper
2 |
3 | import (
4 | "bytes"
5 |
6 | sdk "github.com/cosmos/cosmos-sdk/types"
7 | "github.com/summa-tx/bitcoin-spv/golang/btcspv"
8 | "github.com/summa-tx/relays/golang/x/relay/types"
9 | )
10 |
11 | func (s *KeeperSuite) TestEmitProofRequest() {
12 | s.Keeper.emitProofRequest(s.Context, []byte{0}, []byte{0}, 0, types.RequestID{}, types.Local)
13 |
14 | events := s.Context.EventManager().Events()
15 | e := events[0]
16 | s.Equal("proof_request", e.Type)
17 | }
18 |
19 | // tests getNextID and incrementID
20 | func (s *KeeperSuite) TestIncrementID() {
21 | id, err := s.Keeper.getNextID(s.Context)
22 | s.SDKNil(err)
23 | s.Equal(types.RequestID{}, id)
24 |
25 | err = s.Keeper.incrementID(s.Context)
26 | s.SDKNil(err)
27 |
28 | id, err = s.Keeper.getNextID(s.Context)
29 | s.SDKNil(err)
30 | s.Equal(types.RequestID{0, 0, 0, 0, 0, 0, 0, 1}, id)
31 |
32 | // errors if it cannot get next ID
33 | store := s.Keeper.getRequestStore(s.Context)
34 | idTag := []byte(types.RequestIDTag)
35 | store.Set(idTag, bytes.Repeat([]byte{9}, 9))
36 |
37 | err = s.Keeper.incrementID(s.Context)
38 | s.Equal(sdk.CodeType(107), err.Code())
39 | }
40 |
41 | func (s *KeeperSuite) TestHasRequest() {
42 | hasRequest := s.Keeper.hasRequest(s.Context, types.RequestID{})
43 | s.Equal(false, hasRequest)
44 | requestErr := s.Keeper.setRequest(s.Context, []byte{0}, []byte{0}, 0, 4, types.Local, nil)
45 | s.Nil(requestErr)
46 | hasRequest = s.Keeper.hasRequest(s.Context, types.RequestID{})
47 | s.Equal(true, hasRequest)
48 | }
49 |
50 | func (s *KeeperSuite) TestSetRequest() {
51 | store := s.Keeper.getRequestStore(s.Context)
52 | idTag := []byte(types.RequestIDTag)
53 | store.Set(idTag, bytes.Repeat([]byte{9}, 9))
54 |
55 | err := s.Keeper.setRequest(s.Context, []byte{0}, []byte{0}, 0, 0, types.Local, nil)
56 | s.Equal(sdk.CodeType(107), err.Code())
57 | }
58 |
59 | func (s *KeeperSuite) TestSetRequestState() {
60 | // errors if request is not found
61 | activeErr := s.Keeper.setRequestState(s.Context, types.RequestID{}, false)
62 | s.Equal(sdk.CodeType(601), activeErr.Code())
63 |
64 | // set request
65 | requestErr := s.Keeper.setRequest(s.Context, []byte{1}, []byte{1}, 0, 0, types.Local, nil)
66 | s.Nil(requestErr)
67 | // change active state to false
68 | activeErr = s.Keeper.setRequestState(s.Context, types.RequestID{}, false)
69 | s.Nil(activeErr)
70 |
71 | deactivatedRequest, deactivatedRequestErr := s.Keeper.getRequest(s.Context, types.RequestID{})
72 | s.Nil(deactivatedRequestErr)
73 | s.Equal(false, deactivatedRequest.ActiveState)
74 | }
75 |
76 | func (s *KeeperSuite) TestGetRequest() {
77 | requestRes := s.Fixtures.RequestTestCases.EmptyRequest
78 | request, err := s.Keeper.getRequest(s.Context, types.RequestID{})
79 | s.Equal(sdk.CodeType(601), err.Code())
80 | s.Equal(types.ProofRequest{}, request)
81 |
82 | requestErr := s.Keeper.setRequest(s.Context, []byte{0}, []byte{0}, 0, 0, types.Local, nil)
83 | s.Nil(requestErr)
84 |
85 | request, err = s.Keeper.getRequest(s.Context, types.RequestID{})
86 | s.Nil(err)
87 | s.Equal(requestRes, request)
88 | }
89 |
90 | func (s *KeeperSuite) TestCheckRequests() {
91 | tc := s.Fixtures.RequestTestCases.CheckRequests
92 | v := tc[0]
93 |
94 | // Errors if request is not found
95 | err := s.Keeper.checkRequests(
96 | s.Context,
97 | v.InputIdx,
98 | v.OutputIdx,
99 | v.Vin,
100 | v.Vout,
101 | v.RequestID)
102 | s.Equal(sdk.CodeType(601), err.Code())
103 |
104 | // set request
105 | requestErr := s.Keeper.setRequest(s.Context, []byte{1}, []byte{1}, 0, 0, types.Local, nil)
106 | s.Nil(requestErr)
107 | // change active state to false
108 | activeErr := s.Keeper.setRequestState(s.Context, types.RequestID{}, false)
109 | s.Nil(activeErr)
110 | // errors if request is not active
111 | err = s.Keeper.checkRequests(
112 | s.Context,
113 | v.InputIdx,
114 | v.OutputIdx,
115 | v.Vin,
116 | v.Vout,
117 | v.RequestID)
118 | s.Equal(sdk.CodeType(606), err.Code())
119 |
120 | // change active state to false
121 | activeErr = s.Keeper.setRequestState(s.Context, types.RequestID{}, true)
122 | s.Nil(activeErr)
123 | // errors if request pays is not equal to output
124 | err = s.Keeper.checkRequests(
125 | s.Context,
126 | v.InputIdx,
127 | v.OutputIdx,
128 | v.Vin,
129 | v.Vout,
130 | v.RequestID)
131 | s.Equal(sdk.CodeType(607), err.Code())
132 |
133 | // Errors if output value is less than pays value
134 | out, outErr := btcspv.ExtractOutputAtIndex(v.Vout, uint(v.OutputIdx))
135 | s.Nil(outErr)
136 | // out[8:] extracts the output script which we use to set the request
137 | requestErr = s.Keeper.setRequest(s.Context, []byte{0}, out[8:], 1000, 0, types.Local, nil)
138 | s.SDKNil(requestErr)
139 | err = s.Keeper.checkRequests(
140 | s.Context,
141 | v.InputIdx,
142 | v.OutputIdx,
143 | v.Vin,
144 | v.Vout,
145 | types.RequestID{0, 0, 0, 0, 0, 0, 0, 1})
146 | s.Equal(sdk.CodeType(608), err.Code())
147 |
148 | // Errors if input value does not equal spends value
149 | requestErr = s.Keeper.setRequest(s.Context, []byte{1}, []byte{}, 0, 255, types.Local, nil)
150 | s.SDKNil(requestErr)
151 | err = s.Keeper.checkRequests(
152 | s.Context,
153 | v.InputIdx,
154 | v.OutputIdx,
155 | v.Vin,
156 | v.Vout,
157 | types.RequestID{0, 0, 0, 0, 0, 0, 0, 2})
158 | s.Equal(sdk.CodeType(609), err.Code())
159 |
160 | // Success
161 | in, extractErr := btcspv.ExtractInputAtIndex(v.Vin, uint(v.InputIdx))
162 | s.Nil(extractErr)
163 | outpoint := btcspv.ExtractOutpoint(in)
164 | // out[8:] extracts the output script which we use to set the request
165 | requestErr = s.Keeper.setRequest(s.Context, outpoint, out[8:], 10, 255, types.Local, nil)
166 | s.SDKNil(requestErr)
167 | err = s.Keeper.checkRequests(
168 | s.Context,
169 | v.InputIdx,
170 | v.OutputIdx,
171 | v.Vin,
172 | v.Vout,
173 | types.RequestID{0, 0, 0, 0, 0, 0, 0, 3})
174 | s.SDKNil(err)
175 |
176 | for i := 1; i < len(tc); i++ {
177 | err := s.Keeper.checkRequests(
178 | s.Context,
179 | tc[i].InputIdx,
180 | tc[i].OutputIdx,
181 | tc[i].Vin,
182 | tc[i].Vout,
183 | tc[i].RequestID)
184 | if tc[i].Error == 0 {
185 | s.SDKNil(err)
186 | } else {
187 | s.Equal(sdk.CodeType(tc[i].Error), err.Code())
188 | }
189 | }
190 | }
191 |
--------------------------------------------------------------------------------
/maintainer/maintainer/bitcoin/bcoin_rpc.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 | import logging
3 |
4 | from maintainer import config
5 |
6 | from maintainer.relay_types import BCoinTx
7 | from btcspv.types import RelayHeader
8 | from typing import Any, cast, Dict, List, Optional, Tuple, Union
9 | S = aiohttp.ClientSession
10 |
11 | SESSION = aiohttp.ClientSession(
12 | headers={"Connection": "close"} # close the connection after each request
13 | )
14 |
15 | logger = logging.getLogger('root.summa_relay.bcoin_rpc')
16 |
17 |
18 | async def close_connection() -> None:
19 | logger.info('closing http session')
20 | await SESSION.close()
21 |
22 |
23 | async def unwrap_json(resp: aiohttp.ClientResponse) -> Dict[str, Any]:
24 | try:
25 | return cast(Dict[str, Any], await resp.json())
26 | except aiohttp.client_exceptions.ContentTypeError as e:
27 | logger.error('Failed to unwrap json from response. '
28 | 'Hint: is your bcoin api key correct?')
29 | raise e
30 |
31 |
32 | async def _GET(route: str, session: S = SESSION) -> Tuple[int, Any]:
33 | '''Dispatch a GET request'''
34 | URL = config.get()['BCOIN_URL']
35 |
36 | logger.debug('get request {route}')
37 | full_route = f'{URL}/{route}'
38 | resp = await session.get(full_route)
39 |
40 | return resp.status, await resp.json()
41 |
42 |
43 | async def _POST(
44 | route: str = '',
45 | payload: Dict[str, Any] = {},
46 | session: S = SESSION) -> Tuple[int, Any]:
47 | '''Dispatch a POST request'''
48 | URL = config.get()['BCOIN_URL']
49 |
50 | logger.debug(f'sending bcoin post request {payload["method"]}')
51 | resp = await session.post(f'{URL}/{route}', json=payload)
52 | status = resp.status
53 | resp_json = await unwrap_json(resp)
54 |
55 | result = None
56 | if resp_json is not None:
57 | logger.debug(f'got response {len(resp_json)}')
58 | result = resp_json['result'] if 'result' in resp_json else resp_json
59 |
60 | if status != 200:
61 | r = await resp.read()
62 | logger.error(f'Unexpected status {status} body {r!r}')
63 | return resp.status, result
64 |
65 |
66 | async def _PUT(
67 | route: str,
68 | payload: Dict[str, Any],
69 | session: S = SESSION) -> Tuple[int, Any]:
70 | '''Dispatch a POST request'''
71 | URL = config.get()['BCOIN_URL']
72 |
73 | logger.debug(f'sending bcoin put request {payload["method"]}')
74 |
75 | resp = await session.put(f'{URL}/{route}', json=payload)
76 | status = resp.status
77 | resp_json = await unwrap_json(resp)
78 |
79 | result = None
80 | if resp_json is not None:
81 | logger.debug(f'got response {len(resp_json)}')
82 | result = resp_json['result'] if 'result' in resp_json else resp_json
83 |
84 | if status != 200:
85 | r = await resp.read()
86 | logger.error(f'Unexpected status {status} body {r!r}')
87 |
88 | return status, result
89 |
90 |
91 | async def get_header_by_hash_le(
92 | hash: Union[str, bytes],
93 | session: S = SESSION) -> Optional[RelayHeader]:
94 | hash_hex: str
95 | try:
96 | hash_hex = cast(bytes, hash)[::-1].hex()
97 | except AttributeError:
98 | hash_hex = bytes.fromhex(cast(str, hash))[::-1].hex()
99 | return await get_header_by_hash_be(hash_hex)
100 |
101 |
102 | async def get_header_by_hash_be(
103 | hash: Union[str, bytes],
104 | session: S = SESSION) -> Optional[RelayHeader]:
105 | '''Gets a header by it's LE hash'''
106 | hash_hex: str
107 |
108 | try:
109 | hash_hex = cast(bytes, hash).hex()
110 | except AttributeError:
111 | hash_hex = cast(str, hash)
112 |
113 | logger.debug(f'retrieving info on {hash_hex}')
114 | payload = {
115 | 'method': 'getblockheader',
116 | 'params': [hash_hex, True] # verbose
117 | }
118 | status, block_info_or_none = await _POST(payload=payload, session=session)
119 | if status != 200 or block_info_or_none is None:
120 | return None
121 |
122 | block_info = cast(dict, block_info_or_none)
123 |
124 | raw_payload = {
125 | 'method': 'getblockheader',
126 | 'params': [hash_hex, False] # not verbose
127 | }
128 | status, raw = await _POST(payload=raw_payload, session=session)
129 | if status != 200:
130 | return None
131 |
132 | digest = bytes.fromhex(block_info['hash'])
133 | merkle_root = bytes.fromhex(block_info['merkleroot'])
134 | prevhash = bytes.fromhex(block_info['previousblockhash'])
135 |
136 | return RelayHeader(
137 | raw=bytes.fromhex(raw)[:80],
138 | hash=digest[::-1],
139 | height=block_info['height'],
140 | merkle_root=merkle_root[::-1],
141 | prevhash=prevhash[::-1])
142 |
143 |
144 | async def _get_header_by_height(
145 | height: int,
146 | session: S = SESSION) -> Optional[Dict]:
147 | payload = {
148 | 'method': 'getblockbyheight',
149 | 'params': [height, True, False] # verbose, no txns
150 | }
151 | status, block_info_or_none = await _POST(payload=payload, session=session)
152 | if status != 200 or block_info_or_none is None:
153 | return None
154 | return cast(dict, block_info_or_none)
155 |
156 |
157 | async def get_header_by_height(
158 | height: int,
159 | session: S = SESSION) -> Optional[RelayHeader]:
160 | '''Gets useful information about a header'''
161 | logger.debug(f'retrieving info on block at height {height}')
162 | block_info_or_none = await _get_header_by_height(height, session)
163 | if block_info_or_none is None:
164 | return None
165 |
166 | block_info = cast(dict, block_info_or_none)
167 | return await get_header_by_hash_be(block_info['hash'])
168 |
169 |
170 | async def get_chain_tips(session: S = SESSION) -> List[str]:
171 | logger.debug(f'retrieving info on block at chain tips')
172 |
173 | payload = {
174 | 'method': 'getchaintips'
175 | }
176 | status, res = await _POST(payload=payload, session=session)
177 | if status != 200:
178 | raise RuntimeError(f'Unexpected status in get_chain_tips: {status}')
179 | return [a['hash'] for a in res]
180 |
181 |
182 | async def get_tx(tx_id: bytes, session: S = SESSION) -> Optional[BCoinTx]:
183 | route = f'tx/{tx_id[::-1].hex()}' # make BE
184 | status, res = await _GET(route, session)
185 | if status != 200:
186 | return None
187 | return cast(BCoinTx, res)
188 |
--------------------------------------------------------------------------------