├── .dockerignore
├── .gitignore
├── .style.yapf
├── Dockerfile
├── Dockerfile.base
├── LICENSE
├── README.md
├── __init__.py
├── assets
├── Lightning.png
├── NN.jpeg
├── brain.png
├── brain_engineering_diagram.png
├── knowm.png
└── mycellium.jpeg
├── bittensor.sh
├── bittensor
├── __init__.py
├── proto
│ ├── __init__.py
│ ├── bittensor.proto
│ ├── bittensor_pb2.py
│ └── bittensor_pb2_grpc.py
└── tools
│ ├── __init__.py
│ ├── tblogger.py
│ └── tools.py
├── contract
└── bittensor
│ ├── bittensor.cpp
│ └── bittensor.hpp
├── eos_config
├── config.ini
├── logging.json
└── protocol_features
│ ├── BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json
│ ├── BUILTIN-FIX_LINKAUTH_RESTRICTION.json
│ ├── BUILTIN-FORWARD_SETCODE.json
│ ├── BUILTIN-GET_SENDER.json
│ ├── BUILTIN-NO_DUPLICATE_DEFERRED_ID.json
│ ├── BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json
│ ├── BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json
│ ├── BUILTIN-PREACTIVATE_FEATURE.json
│ ├── BUILTIN-RAM_RESTRICTIONS.json
│ ├── BUILTIN-REPLACE_DEFERRED.json
│ ├── BUILTIN-RESTRICT_ACTION_TO_SELF.json
│ ├── BUILTIN-WEBAUTHN_KEY.json
│ └── BUILTIN-WTMSIG_BLOCK_SIGNATURES.json
├── neurons
├── CoLA
│ ├── Dockerfile
│ ├── __init__.py
│ ├── config.py
│ ├── data.zip
│ ├── dendrite.py
│ ├── main.py
│ └── metagraph.py
├── GoogleUSE
│ ├── Dockerfile
│ ├── __init__.py
│ ├── config.py
│ └── main.py
├── Mach
│ ├── Dockerfile
│ ├── __init__.py
│ ├── data
│ │ └── text8.zip
│ ├── main.py
│ ├── metagraph.py
│ ├── neuron.py
│ └── nucleus.py
├── Neumann
│ └── model_fn.py
├── Null
│ ├── Dockerfile
│ ├── __init__.py
│ ├── main.py
│ └── metagraph.py
├── __init_.py
├── boltzmann
│ ├── Dockerfile
│ ├── __init__.py
│ ├── config.py
│ ├── data
│ │ └── text8.zip
│ ├── dendrite.py
│ ├── main.py
│ ├── metagraph.py
│ ├── neuron.py
│ └── nucleus.py
├── elmo
│ ├── Dockerfile
│ ├── __init__.py
│ ├── config.py
│ └── main.py
└── feynman
│ ├── Dockerfile
│ ├── __init__.py
│ ├── config.py
│ ├── data
│ └── text8.zip
│ ├── dendrite.py
│ ├── main.py
│ ├── metagraph.py
│ ├── nucleus.py
│ ├── synapse.py
│ └── visualization.py
├── requirements.txt
├── scripts
├── bittensor.sh
├── build_protos.sh
├── check_requirements.sh
├── constant.sh
├── deploy_contract.sh
├── init_eos.sh
├── push_image.sh
├── todo.sh
└── upnpc.py
├── setup.py
├── start_eos.sh
├── start_visualizer.sh
├── testing
├── Emission testing.ipynb
├── GradientTests.ipynb
├── Market Testing.ipynb
└── Visualization Testing.ipynb
└── visualizer
├── Dockerfile
├── TBLogger.py
├── __init__.py
├── config.py
├── config.yaml
├── main.py
├── proto
├── __init__.py
├── visualizer.proto
├── visualizer_pb2.py
└── visualizer_pb2_grpc.py
└── visualizer.sh
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | checkpoints/
3 | data/*
4 | __pycach__
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # State folder for all neurons.
3 | # TODO(const) This folder should be set with a var.
4 | **/data/*
5 | !data/.gitkeep
6 | !**/data/text8.zip
7 |
8 | **/__pycache__/
9 | **/.ipynb_checkpoints/
10 | *.pyc
11 |
12 | # Tests
13 | testing/
14 |
15 | #compiled c++ files
16 | **/*.o
17 |
18 | #compiled contract files
19 | **/*.abi
20 | **/*.wasm
21 | **/*.wast
22 |
23 | # misc
24 | .DS_Store
25 | .env.local
26 | .env.development.local
27 | .env.test.local
28 | .env.production.local
29 |
30 | # PIPY Stuff
31 | bittensor.egg-info
32 | bittensor*.egg
33 | bdist.*
34 |
35 | npm-debug.log*
36 | yarn-debug.log*
37 | yarn-error.log*
38 |
39 | **/build/*
40 | **/dist/*
41 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | # YAPF uses the chromium style
3 | based_on_style = chromium
4 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
--------------------------------------------------------------------------------
/Dockerfile.base:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | RUN apt-get update && apt-get install -y wget sudo curl vim
4 | RUN wget https://github.com/EOSIO/eosio.cdt/releases/download/v1.6.1/eosio.cdt_1.6.1-1_amd64.deb
5 | RUN apt-get update && sudo apt install -y ./eosio.cdt_1.6.1-1_amd64.deb
6 | RUN wget https://github.com/eosio/eos/releases/download/v2.0.2/eosio_2.0.2-1-ubuntu-18.04_amd64.deb
7 | RUN apt-get update && sudo apt install -y ./eosio_2.0.2-1-ubuntu-18.04_amd64.deb
8 |
9 | RUN apt-get update \
10 | && apt-get install -y python3-pip python3-dev \
11 | && cd /usr/local/bin \
12 | && ln -s /usr/bin/python3 python \
13 | && pip3 install --upgrade pip
14 |
15 | RUN apt-get update && apt-get install -y --no-install-recommends \
16 | git \
17 | build-essential \
18 | curl \
19 | python \
20 | python3-dev \
21 | python3-setuptools \
22 | python3-pip \
23 | && \
24 | apt-get clean && \
25 | rm -rf /var/lib/apt/lists/*
26 |
27 | RUN pip3 install --upgrade pip && \
28 | pip3 install argparse && \
29 | pip3 install grpcio && \
30 | pip3 install grpcio-tools && \
31 | pip3 install libeospy && \
32 | pip3 install loguru && \
33 | pip3 install matplotlib && \
34 | pip3 install "numpy<1.17" && \
35 | pip3 install networkx && \
36 | pip3 install pebble && \
37 | pip3 install pickle-mixin && \
38 | pip3 install pycrypto && \
39 | pip3 install --upgrade setuptools && \
40 | pip3 install timeloop && \
41 | pip3 install zipfile36
42 |
43 | # Copy BitTensor source to this image.
44 | RUN mkdir bittensor
45 | COPY . bittensor/
46 | WORKDIR /bittensor
47 |
48 | # Install Bittensor
49 | RUN pip3 install -e .
50 |
51 | RUN pip3 uninstall -y tensorboard tensorflow tensorflow-estimator
52 | RUN pip3 install "tensorflow==1.15.2"
53 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2010-2018 Google, Inc. http://angularjs.org
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## BitTensor Network Daemon
2 |
3 |
4 |
5 | ---
6 |
7 | ## Overview
8 |
9 | This software is designed to run an intermodel machine intelligence benchmark which measures the performance of other learners via informational significance.
10 |
11 | The code, when run, generates an aynchronous, decentralized, mixture of experts model which trains across computers in a peer-to-peer fashion.
12 |
13 | In absence of centralized control, the network ranks using collaborative filtering where each participant is running a pruning method to measure the informational significance of their peers. To computers with large rank the network mints digital tokens which provide power over the network.
14 |
15 | This repo contains an implementation of a peer in this network. It trains a self-supervised language representation using a dumpy corpus of text by taking as input the output of its peers in the network. In doing so, it mines the network native digital token.
16 |
17 | ---
18 | ## Run Locally
19 | 1. Install [python3](https://realpython.com/installing-python/)
20 | 1. Install [Docker](https://docs.docker.com/install/)
21 |
22 | ```
23 | $ git clone https://github.com/unconst/BitTensor & cd BitTensor
24 |
25 | # Start EOS chain.
26 | $ ./start_eos.sh
27 |
28 | # Start node 1.
29 | $ ./bittensor.sh
30 |
31 | # Start node 2.
32 | $ ./bittensor.sh
33 | ```
34 | ---
35 |
36 | ## Learn More
37 |
38 | Join our [slack](https://bittensor.slack.com/) and say hello :)
39 |
40 | ---
41 |
42 | ## Pull Requests
43 |
44 | This is alpha software, so in the interest of speed, just directly commit to the repo and use [Yapf](https://github.com/google/yapf) for code formatting.
45 | ```
46 | $ pip install yapf
47 | $ yapf --style google -r -vv -i .
48 | ```
49 |
50 | ---
51 |
52 | ## License
53 |
54 | MIT
55 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/__init__.py
--------------------------------------------------------------------------------
/assets/Lightning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/Lightning.png
--------------------------------------------------------------------------------
/assets/NN.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/NN.jpeg
--------------------------------------------------------------------------------
/assets/brain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/brain.png
--------------------------------------------------------------------------------
/assets/brain_engineering_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/brain_engineering_diagram.png
--------------------------------------------------------------------------------
/assets/knowm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/knowm.png
--------------------------------------------------------------------------------
/assets/mycellium.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/assets/mycellium.jpeg
--------------------------------------------------------------------------------
/bittensor/__init__.py:
--------------------------------------------------------------------------------
1 | name = "bittensor"
2 | from bittensor.proto import bittensor_pb2
3 | from bittensor.proto import bittensor_pb2_grpc
4 |
--------------------------------------------------------------------------------
/bittensor/proto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/bittensor/proto/__init__.py
--------------------------------------------------------------------------------
/bittensor/proto/bittensor.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | // NOTE(const): Not used.
4 | // TODO(const): Switch to Bittensor protocol.
5 |
6 | service Bittensor {
7 | // Query remote component with text-features, responses are var-length vector
8 | // representations of the text.
9 | rpc Spike(SpikeRequest) returns (SpikeResponse) {}
10 |
11 | // Query a remote component with gradients. Responses are boolean affirmatives.
12 | rpc Grade(GradeRequest) returns (GradeResponse) {}
13 | }
14 |
15 | // Forward query to a peer, carries text and expects feature representations
16 | message SpikeRequest {
17 | // Protocol version 1.0
18 | float version = 1;
19 |
20 | // Source identifier. Where the message originated. Format is an EOS account
21 | // address.
22 | string source_id = 2;
23 |
24 | // Source proof. A signature linking the message id to the source identity.
25 | // RSA( Message_hash, Source_Private_Key.)
26 | // bytes source_proof = 3;
27 |
28 | // Parent identifier. The last hop this message took. Format is an EOS account
29 | // address.
30 | string parent_id = 4;
31 |
32 | // Parent proof. A signature linking the message id to the parent identity.
33 | // RSA( Message_hash, Parent_Private_Key.)
34 | // bytes parent_proof = 5;
35 |
36 | // Message Nounce. A random or incrementing digit.
37 | // bytes nounce = 6;
38 |
39 | // Message identifier. A hash of the message payload, nounce, and source id.
40 | // Uniquely identifies this message with the triple(contents, nounce, source).
41 | bytes message_id = 7;
42 |
43 | // Message Payload. Zero or more var length bytes strings.
44 | // Interpretable as UTF-8 encoded strings. After deserialization these are
45 | // variable length unicode strings.
46 | // Message length must be checked at components.
47 | bytes payload = 8;
48 | }
49 |
50 | // Reverse response from a peer, carries vectors and expects no response.
51 | message SpikeResponse {
52 | // Protocol version 1.0
53 | float version = 1;
54 |
55 | // Source identifier. Where the message originated. Format is an EOS account
56 | // address.
57 | string source_id = 2;
58 |
59 | // Source proof. A signature linking the message id to the source identity.
60 | // RSA( Message_hash, Source_Private_Key.)
61 | // bytes source_proof = 3;
62 |
63 | // Child identifier. The last hop this message took. Format is an EOS account
64 | // address.
65 | string child_id = 4;
66 |
67 | // Child proof. A signature linking the message id to the child identity.
68 | // RSA( Message_hash, Child_Private_Key.)
69 | // bytes child_proof = 5;
70 |
71 | // Message Nounce. A random or incrementing digit.
72 | // bytes nounce = 6;
73 |
74 | // Message identifier. A hash of the message payload, nounce, and source id.
75 | // Uniquely identifies this message with the triple(contents, nounce, source).
76 | bytes message_id = 7;
77 |
78 | // Message Payload. Zero or more fixed length bytes strings.
79 | // Interpretable as 1028 dimensional vector representations of tf.float32s.
80 | // These are spikes or activation values.
81 | bytes payload = 8;
82 | }
83 |
84 | // Forward gradient to peer. Expects peer to train over gradients. Boolean response.
85 | message GradeRequest {
86 | // Protocol version 1.0
87 | float version = 1;
88 |
89 | // Source identifier. Where the message originated. Format is an EOS account
90 | // address.
91 | string source_id = 2;
92 |
93 | // Source proof. A signature linking the message id to the source identity.
94 | // RSA( Message_hash, Source_Private_Key.)
95 | // bytes source_proof = 3;
96 |
97 | // Parent identifier. The last hop this message took. Format is an EOS account
98 | // address.
99 | string parent_id = 4;
100 |
101 | // Parent proof. A signature linking the message id to the parent identity.
102 | // RSA( Message_hash, Parent_Private_Key.)
103 | // bytes parent_proof = 5;
104 |
105 | // Message Nounce. A random or incrementing digit.
106 | // bytes nounce = 6;
107 |
108 | // Message identifier. A hash of the message payload, nounce, and source id.
109 | // Uniquely identifies this message with the triple(contents, nounce, source).
110 | bytes message_id = 7;
111 |
112 | // Message Payload. Zero or more fixed length bytes strings.
113 | // Interpretable as 1028 dimensional vector representations of tf.float32s.
114 | // These are gradient values.
115 | bytes payload = 8;
116 | }
117 |
118 | // Reverse gradient call.
119 | message GradeResponse {
120 | // Protocol version 1.0
121 | float version = 1;
122 |
123 | // Boolean, Gradient accepted message.
124 | bool accept = 2;
125 | }
126 |
--------------------------------------------------------------------------------
/bittensor/proto/bittensor_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | import grpc
3 |
4 | from bittensor.proto import bittensor_pb2 as bittensor_dot_proto_dot_bittensor__pb2
5 |
6 |
7 | class BittensorStub(object):
8 | """NOTE(const): Not used.
9 | TODO(const): Switch to Bittensor protocol.
10 |
11 | """
12 |
13 | def __init__(self, channel):
14 | """Constructor.
15 |
16 | Args:
17 | channel: A grpc.Channel.
18 | """
19 | self.Spike = channel.unary_unary(
20 | '/Bittensor/Spike',
21 | request_serializer=bittensor_dot_proto_dot_bittensor__pb2.SpikeRequest.SerializeToString,
22 | response_deserializer=bittensor_dot_proto_dot_bittensor__pb2.SpikeResponse.FromString,
23 | )
24 | self.Grade = channel.unary_unary(
25 | '/Bittensor/Grade',
26 | request_serializer=bittensor_dot_proto_dot_bittensor__pb2.GradeRequest.SerializeToString,
27 | response_deserializer=bittensor_dot_proto_dot_bittensor__pb2.GradeResponse.FromString,
28 | )
29 |
30 |
31 | class BittensorServicer(object):
32 | """NOTE(const): Not used.
33 | TODO(const): Switch to Bittensor protocol.
34 |
35 | """
36 |
37 | def Spike(self, request, context):
38 | """Query remote component with text-features, responses are var-length vector
39 | representations of the text.
40 | """
41 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
42 | context.set_details('Method not implemented!')
43 | raise NotImplementedError('Method not implemented!')
44 |
45 | def Grade(self, request, context):
46 | """Query a remote component with gradients. Responses are boolean affirmatives.
47 | """
48 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
49 | context.set_details('Method not implemented!')
50 | raise NotImplementedError('Method not implemented!')
51 |
52 |
53 | def add_BittensorServicer_to_server(servicer, server):
54 | rpc_method_handlers = {
55 | 'Spike': grpc.unary_unary_rpc_method_handler(
56 | servicer.Spike,
57 | request_deserializer=bittensor_dot_proto_dot_bittensor__pb2.SpikeRequest.FromString,
58 | response_serializer=bittensor_dot_proto_dot_bittensor__pb2.SpikeResponse.SerializeToString,
59 | ),
60 | 'Grade': grpc.unary_unary_rpc_method_handler(
61 | servicer.Grade,
62 | request_deserializer=bittensor_dot_proto_dot_bittensor__pb2.GradeRequest.FromString,
63 | response_serializer=bittensor_dot_proto_dot_bittensor__pb2.GradeResponse.SerializeToString,
64 | ),
65 | }
66 | generic_handler = grpc.method_handlers_generic_handler(
67 | 'Bittensor', rpc_method_handlers)
68 | server.add_generic_rpc_handlers((generic_handler,))
69 |
--------------------------------------------------------------------------------
/bittensor/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/bittensor/tools/__init__.py
--------------------------------------------------------------------------------
/bittensor/tools/tblogger.py:
--------------------------------------------------------------------------------
1 | """Simple example on how to log scalars and images to tensorboard without tensor ops.
2 | License: BSD License 2.0
3 | """
4 | __author__ = "Michael Gygli"
5 |
6 | import tensorflow as tf
7 | from StringIO import StringIO
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 |
11 | class Logger(object):
12 | """Logging in tensorboard without tensorflow ops."""
13 |
14 | def __init__(self, log_dir):
15 | """Creates a summary writer logging to log_dir."""
16 | self.writer = tf.summary.FileWriter(log_dir)
17 |
18 | def log_scalar(self, tag, value, step):
19 | """Log a scalar variable.
20 | Parameter
21 | ----------
22 | tag : basestring
23 | Name of the scalar
24 | value
25 | step : int
26 | training iteration
27 | """
28 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag,
29 | simple_value=value)])
30 | self.writer.add_summary(summary, step)
31 |
32 | def log_images(self, tag, images, step):
33 | """Logs a list of images."""
34 |
35 | im_summaries = []
36 | for nr, img in enumerate(images):
37 | # Write the image to a string
38 | s = StringIO()
39 | plt.imsave(s, img, format='png')
40 |
41 | # Create an Image object
42 | img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
43 | height=img.shape[0],
44 | width=img.shape[1])
45 | # Create a Summary value
46 | im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),
47 | image=img_sum))
48 |
49 | # Create and write Summary
50 | summary = tf.Summary(value=im_summaries)
51 | self.writer.add_summary(summary, step)
52 |
53 |
54 | def log_histogram(self, tag, values, step, bins=1000):
55 | """Logs the histogram of a list/vector of values."""
56 | # Convert to a numpy array
57 | values = np.array(values)
58 |
59 | # Create histogram using numpy
60 | counts, bin_edges = np.histogram(values, bins=bins)
61 |
62 | # Fill fields of histogram proto
63 | hist = tf.HistogramProto()
64 | hist.min = float(np.min(values))
65 | hist.max = float(np.max(values))
66 | hist.num = int(np.prod(values.shape))
67 | hist.sum = float(np.sum(values))
68 | hist.sum_squares = float(np.sum(values**2))
69 |
70 | # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
71 | # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
72 | # Thus, we drop the start of the first bin
73 | bin_edges = bin_edges[1:]
74 |
75 | # Add bin edges and counts
76 | for edge in bin_edges:
77 | hist.bucket_limit.append(edge)
78 | for c in counts:
79 | hist.bucket.append(c)
80 |
81 | # Create and write Summary
82 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
83 | self.writer.add_summary(summary, step)
84 | self.writer.flush()
85 |
--------------------------------------------------------------------------------
/bittensor/tools/tools.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from loguru import logger
3 | from eospy.cleos import Cleos
4 | import networkx as nx
5 | import matplotlib.pyplot as plt
6 | import numpy
7 |
8 | parser = argparse.ArgumentParser(description='TF graph client args.')
9 | parser.add_argument('--command', default="info")
10 | parser.add_argument('--eosurl', default="http://host.docker.internal:8888")
11 |
12 |
13 | class Node():
14 |
15 | def __init__(self, entry):
16 | # EOS account name.
17 | self.identity = entry['identity']
18 | # Network Stake.
19 | self.stake = entry['stake']
20 | # Last emit.
21 | self.last_emit = entry['last_emit']
22 | # IP address.
23 | self.address = entry['address']
24 | # Port number.
25 | self.port = entry['port']
26 | # List of tuples (edge name, edge weight)
27 | self.edges = entry['edges']
28 |
29 | def __repr__(self):
30 | return self.__str__()
31 |
32 | def __str__(self):
33 | edge_str = []
34 | for el in self.edges:
35 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
36 | edge_str = str(edge_str)
37 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
38 | self.last_emit) + " | " + self.address + ":" + str(
39 | self.port) + ' | ' + edge_str + " )"
40 |
41 | def __eq__(self, other):
42 | if not other:
43 | return False
44 | return (self.identity == other.identity)
45 |
46 | def __ne__(self, other):
47 | return not self.__eq__(other)
48 |
49 | def __hash__(self):
50 | return hash(str(self.identity))
51 |
52 |
53 | def _make_plot_table(nodes):
54 | logger.info(b_nodes)
55 | G = nx.DiGraph()
56 |
57 | total_stake = sum([node.stake for node in b_nodes])
58 |
59 | # Build node sizes in proportion to stake held within the graph.
60 | node_sizes = []
61 | node_labels = {}
62 | for node in b_nodes:
63 | G.add_node(node.identity)
64 | node_sizes.append(25 + 500 * (node.stake / total_stake))
65 | node_labels[node.identity] = str(node.identity)
66 |
67 | # Edge colors (alphas and weight) reflect attribution wieghts of each
68 | # connection.
69 | edge_colors = {}
70 | edge_labels = {}
71 | for node in b_nodes:
72 | for edge in node.edges:
73 | if (node.identity, edge['first']) not in edge_labels:
74 | G.add_edge(node.identity, edge['first'])
75 | edge_colors[(node.identity,
76 | edge['first'])] = float(edge['second'])
77 | if node.identity != edge['first']:
78 | edge_labels[(
79 | node.identity,
80 | edge['first'])] = "%.3f" % float(edge['second'])
81 | else:
82 | edge_labels[(node.identity, edge['first'])] = ""
83 |
84 | # Set edge weights.
85 | for u, v, d in G.edges(data=True):
86 | d['weight'] = edge_colors[(u, v)]
87 | edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
88 |
89 | # Clear Matplot lib buffer and create new figure.
90 | plt.cla()
91 | plt.clf()
92 | figure = plt.figure(figsize=(15, 8))
93 |
94 | pos = nx.layout.circular_layout(G)
95 | nodes = nx.draw_networkx_nodes(G,
96 | pos,
97 | node_size=node_sizes,
98 | node_color='blue')
99 | edges = nx.draw_networkx_edges(G,
100 | pos,
101 | arrowstyle='->',
102 | arrowsize=15,
103 | edge_color=weights,
104 | edge_cmap=plt.cm.Blues,
105 | width=5)
106 |
107 | edge_labels = nx.draw_networkx_edge_labels(G,
108 | pos,
109 | edge_labels=edge_labels,
110 | with_labels=True,
111 | label_pos=0.3)
112 |
113 | for node in b_nodes:
114 | pos[node.identity] = pos[node.identity] + numpy.array([0, 0.1])
115 | labels = nx.draw_networkx_labels(G, pos, node_labels)
116 |
117 | plt.show(figure)
118 |
119 | def tonodes(table):
120 | nodes = {}
121 | for entry in table['rows']:
122 | next_node = Node(entry)
123 | nodes[entry['identity']] = next_node
124 | if len(nodes) == 0:
125 | logger.info('table is empty, check your eosurl is correct.')
126 | return
127 | return nodes
128 |
129 | if __name__ == "__main__":
130 | args = parser.parse_args()
131 | cleos = Cleos(url=args.eosurl)
132 | if args.command == "info":
133 | logger.info(cleos.get_info())
134 | elif args.command == "print":
135 | cleos.get_info()
136 | table = cleos.get_table('bittensoracc', 'bittensoracc', 'metagraph')
137 | logger.info(tonodes(table))
138 | elif args.command == "table":
139 | cleos.get_info()
140 | table = cleos.get_table('bittensoracc', 'bittensoracc', 'metagraph')
141 | nodes = tonodes(table)
142 | _make_plot_table(nodes)
143 | else:
144 | logger.info('Command not found.')
145 |
--------------------------------------------------------------------------------
/contract/bittensor/bittensor.hpp:
--------------------------------------------------------------------------------
1 | /**
2 | * @file
3 | * @copyright defined in eos/LICENSE.txt
4 | */
5 | #pragma once
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | #include
12 |
13 | namespace eosiosystem {
14 | class system_contract;
15 | }
16 |
17 | namespace eosio {
18 |
19 | using std::string;
20 |
21 | class [[eosio::contract("bittensor")]] bittensor : public contract {
22 | public:
23 | using contract::contract;
24 |
25 | bittensor(name receiver, name code, datastream ds):contract(receiver, code, ds), global_state(_self, _self.value) {}
26 |
27 | // -- BitTensor-- //
28 | // Resets the token contract. Clears nodes and resets stake.
29 | // Called user give the table scope.
30 | [[eosio::action]]
31 | void reset( name this_user );
32 |
33 | // Subscribes a new neuron to the Metagraph, publishes a new endpoint.
34 | [[eosio::action]]
35 | void subscribe( const name user,
36 | const string address,
37 | const string port );
38 |
39 | // Unsubscribes a neuron to the Metagraph removing an endpoint.
40 | [[eosio::action]]
41 | void unsubscribe( const name user );
42 |
43 | // Emits pending stake release to this node AND updates edge set.
44 | // NOTE(const): The release is applied assuming the previous edge
45 | // set was in place up until this block.
46 | [[eosio::action]]
47 | void emit( const name this_user,
48 | const std::vector > this_edges);
49 |
50 | // Metagraph functions.
51 | using subscribe_action = eosio::action_wrapper<"subscribe"_n, &bittensor::subscribe>;
52 | using unsubscribe_action = eosio::action_wrapper<"unsubscribe"_n, &bittensor::unsubscribe>;
53 | using emit_action = eosio::action_wrapper<"emit"_n, &bittensor::emit>;
54 |
55 | // -- BitTensor-- //
56 |
57 |
58 | // -- EOS Token-- //
59 | [[eosio::action]]
60 | void create( name issuer,
61 | asset maximum_supply );
62 |
63 | [[eosio::action]]
64 | void issue( name to, asset quantity, string memo );
65 |
66 | [[eosio::action]]
67 | void retire( asset quantity, string memo );
68 |
69 | [[eosio::action]]
70 | void transfer( name from,
71 | name to,
72 | asset quantity,
73 | string memo );
74 |
75 | [[eosio::action]]
76 | void open( name owner, const symbol& symbol, name ram_payer );
77 |
78 | [[eosio::action]]
79 | void close( name owner, const symbol& symbol );
80 |
81 | static asset get_supply( name token_contract_account, symbol_code sym_code )
82 | {
83 | stats statstable( token_contract_account, sym_code.raw() );
84 | const auto& st = statstable.get( sym_code.raw() );
85 | return st.supply;
86 | }
87 |
88 | static asset get_balance( name token_contract_account, name owner, symbol_code sym_code )
89 | {
90 | accounts accountstable( token_contract_account, owner.value );
91 | const auto& ac = accountstable.get( sym_code.raw() );
92 | return ac.balance;
93 | }
94 |
95 | // EOS token functions.
96 | using create_action = eosio::action_wrapper<"create"_n, &bittensor::create>;
97 | using issue_action = eosio::action_wrapper<"issue"_n, &bittensor::issue>;
98 | using retire_action = eosio::action_wrapper<"retire"_n, &bittensor::retire>;
99 | using transfer_action = eosio::action_wrapper<"transfer"_n, &bittensor::transfer>;
100 | using open_action = eosio::action_wrapper<"open"_n, &bittensor::open>;
101 | using close_action = eosio::action_wrapper<"close"_n, &bittensor::close>;
102 |
103 | // -- EOS Token-- //
104 |
105 |
106 | private:
107 |
108 | // -- BitTensor-- //
109 |
110 | struct [[eosio::table]] neuron {
111 | name identity;
112 | uint64_t stake;
113 | uint64_t last_emit;
114 | std::vector > edges;
115 | std::string address;
116 | std::string port;
117 | uint64_t primary_key()const {return identity.value;}
118 | };
119 | typedef eosio::multi_index< "metagraph"_n, neuron> metagraph;
120 |
121 |
122 | struct [[eosio::table]] globaluint {
123 | uint64_t total_stake = 0;
124 | };
125 |
126 | typedef eosio::singleton< "globaluint"_n, globaluint> globaluintt;
127 |
128 | // This next typedef is only here because of this bug: https://github.com/EOSIO/eosio.cdt/issues/280
129 | // Once that's fixed this can be removed.
130 | typedef eosio::multi_index<"globaluint"_n, globaluint> globaluintt_for_abi;
131 |
132 | globaluintt global_state;
133 |
134 |
135 | uint64_t _get_emission(const name this_user,
136 | const uint64_t this_last_emit,
137 | const uint64_t this_stake);
138 |
139 | void _do_emit(const name this_user,
140 | const uint64_t this_emission);
141 |
142 | // -- BitTensor-- //
143 |
144 | // -- EOS Token-- //
145 |
146 | struct [[eosio::table]] account {
147 | asset balance;
148 | uint64_t primary_key()const { return balance.symbol.code().raw(); }
149 | };
150 |
151 | struct [[eosio::table]] currency_stats {
152 | asset supply;
153 | asset max_supply;
154 | name issuer;
155 |
156 | uint64_t primary_key()const { return supply.symbol.code().raw(); }
157 | };
158 |
159 | typedef eosio::multi_index< "accounts"_n, account > accounts;
160 | typedef eosio::multi_index< "stat"_n, currency_stats > stats;
161 |
162 | void sub_balance( name owner, asset value );
163 | void add_balance( name owner, asset value, name ram_payer );
164 |
165 | // -- EOS Token-- //
166 | };
167 |
168 | } /// namespace eosio
169 |
--------------------------------------------------------------------------------
/eos_config/logging.json:
--------------------------------------------------------------------------------
1 | {
2 | "includes": [],
3 | "appenders": [{
4 | "name": "consoleout",
5 | "type": "console",
6 | "args": {
7 | "stream": "std_out",
8 | "level_colors": [{
9 | "level": "debug",
10 | "color": "green"
11 | },{
12 | "level": "warn",
13 | "color": "brown"
14 | },{
15 | "level": "error",
16 | "color": "red"
17 | }
18 | ]
19 | },
20 | "enabled": true
21 | },{
22 | "name": "net",
23 | "type": "gelf",
24 | "args": {
25 | "endpoint": "10.10.10.10",
26 | "host": "test"
27 | },
28 | "enabled": true
29 | }
30 | ],
31 | "loggers": [{
32 | "name": "default",
33 | "level": "all",
34 | "enabled": true,
35 | "additivity": false,
36 | "appenders": [
37 | "consoleout",
38 | "net"
39 | ]
40 | },{
41 | "name": "net_plugin_impl",
42 | "level": "all",
43 | "enabled": true,
44 | "additivity": false,
45 | "appenders": [
46 | "net"
47 | ]
48 | }
49 | ]
50 | }
51 |
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-DISALLOW_EMPTY_PRODUCER_SCHEDULE.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "2853617cec3eabd41881eb48882e6fc5e81a0db917d375057864b3befbe29acd",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "DISALLOW_EMPTY_PRODUCER_SCHEDULE"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-FIX_LINKAUTH_RESTRICTION.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "a98241c83511dc86c857221b9372b4aa7cea3aaebc567a48604e1d3db3557050",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "FIX_LINKAUTH_RESTRICTION"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-FORWARD_SETCODE.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "898082c59f921d0042e581f00a59d5ceb8be6f1d9c7a45b6f07c0e26eaee0222",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "FORWARD_SETCODE"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-GET_SENDER.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "1eab748b95a2e6f4d7cb42065bdee5566af8efddf01a55a0a8d831b823f8828a",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "GET_SENDER"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-NO_DUPLICATE_DEFERRED_ID.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [
4 | "ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99"
5 | ],
6 | "description_digest": "45967387ee92da70171efd9fefd1ca8061b5efe6f124d269cd2468b47f1575a0",
7 | "subjective_restrictions": {
8 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
9 | "preactivation_required": true,
10 | "enabled": true
11 | },
12 | "builtin_feature_codename": "NO_DUPLICATE_DEFERRED_ID"
13 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-ONLY_BILL_FIRST_AUTHORIZER.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "2f1f13e291c79da5a2bbad259ed7c1f2d34f697ea460b14b565ac33b063b73e2",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "ONLY_BILL_FIRST_AUTHORIZER"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "f3c3d91c4603cde2397268bfed4e662465293aab10cd9416db0d442b8cec2949",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "ONLY_LINK_TO_EXISTING_PERMISSION"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-PREACTIVATE_FEATURE.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": false,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "PREACTIVATE_FEATURE"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-RAM_RESTRICTIONS.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "1812fdb5096fd854a4958eb9d53b43219d114de0e858ce00255bd46569ad2c68",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "RAM_RESTRICTIONS"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-REPLACE_DEFERRED.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "9908b3f8413c8474ab2a6be149d3f4f6d0421d37886033f27d4759c47a26d944",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "REPLACE_DEFERRED"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-RESTRICT_ACTION_TO_SELF.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "e71b6712188391994c78d8c722c1d42c477cf091e5601b5cf1befd05721a57f3",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "RESTRICT_ACTION_TO_SELF"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-WEBAUTHN_KEY.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "927fdf78c51e77a899f2db938249fb1f8bb38f4e43d9c1f75b190492080cbc34",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "WEBAUTHN_KEY"
11 | }
--------------------------------------------------------------------------------
/eos_config/protocol_features/BUILTIN-WTMSIG_BLOCK_SIGNATURES.json:
--------------------------------------------------------------------------------
1 | {
2 | "protocol_feature_type": "builtin",
3 | "dependencies": [],
4 | "description_digest": "ab76031cad7a457f4fd5f5fca97a3f03b8a635278e0416f77dcc91eb99a48e10",
5 | "subjective_restrictions": {
6 | "earliest_allowed_activation_time": "1970-01-01T00:00:00.000",
7 | "preactivation_required": true,
8 | "enabled": true
9 | },
10 | "builtin_feature_codename": "WTMSIG_BLOCK_SIGNATURES"
11 | }
--------------------------------------------------------------------------------
/neurons/CoLA/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor:latest
2 |
3 | # Copy across the local files.
4 | COPY neurons/CoLA bittensor/neurons/CoLA
5 |
6 | RUN pip install tensor2tensor
7 | RUN pip3 install 'gast==0.2.2'
8 |
--------------------------------------------------------------------------------
/neurons/CoLA/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/CoLA/__init__.py
--------------------------------------------------------------------------------
/neurons/CoLA/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tensorflow as tf
4 |
5 | # Tensorflow flags.
6 | flags = tf.app.flags
7 | FLAGS = flags.FLAGS
8 |
9 | # TODO (const): This needs to be global bittensor.config
10 | flags.DEFINE_string("identity", "xxxxxxx", "Nueron Identity")
11 | flags.DEFINE_string("serve_address", "0.0.0.0", "Address serve synapse.")
12 | flags.DEFINE_string("bind_address", "0.0.0.0", "Address bind synapse.")
13 | flags.DEFINE_string("port", "9090", "Port to serve on.")
14 | flags.DEFINE_string("eosurl", "http://0.0.0.0:8888", "EOS Url.")
15 | flags.DEFINE_string("logdir", "/tmp/", "logginf directory.")
16 | flags.DEFINE_integer("k", 3, "Out edge degree.")
17 | flags.DEFINE_float("alpha", 0.01, "Learning rate.")
18 | flags.DEFINE_integer("batch_size", 10, "batch_size")
19 |
20 |
21 | class Config():
22 |
23 | def __init__(self):
24 | self.identity = FLAGS.identity
25 | self.serve_address = FLAGS.serve_address
26 | self.bind_address = FLAGS.bind_address
27 | self.port = FLAGS.port
28 | self.eosurl = FLAGS.eosurl
29 | self.logdir = FLAGS.logdir
30 | self.k = FLAGS.k
31 | self.alpha = FLAGS.alpha
32 | self.batch_size = FLAGS.batch_size
33 |
34 | def __repr__(self):
35 | return self.__str__()
36 |
37 | def __str__(self):
38 | return "\nconfig = {\n\tidentity: " + self.identity + " \n" +\
39 | "\tserve_address: " + self.serve_address + " \n" +\
40 | "\tbind_address: " + self.bind_address + " \n" +\
41 | "\teosurl: " + self.eosurl ++ " \n" +\
42 | "\tport: " + self.port + " \n" +\
43 | "\tk: " + str(self.k) + " \n" + \
44 | "\talpha: " + str(self.alpha) + " \n" +\
45 | "\ttime_till_expire: " + str(self.time_till_expire) + " \n}."
46 |
--------------------------------------------------------------------------------
/neurons/CoLA/data.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/CoLA/data.zip
--------------------------------------------------------------------------------
/neurons/CoLA/dendrite.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from Crypto.Hash import SHA256
4 | import grpc
5 | from loguru import logger
6 | import numpy as np
7 | import pickle
8 | import tensorflow as tf
9 |
10 | EMBEDDING_SIZE = 128
11 |
12 |
13 | def _delete_callback(future):
14 | del future
15 |
16 |
17 | class Dendrite():
18 |
19 | def __init__(self, config, metagraph):
20 | self.config = config
21 | self.metagraph = metagraph
22 | self.channels = [None for _ in range(self.config.k)]
23 | self.channel_nodes = [None for _ in range(self.config.k)]
24 | self.select_channels()
25 |
26 | def select_channels(self):
27 | nodes = self.metagraph.nodes
28 | for i in range(self.config.k):
29 | if self.channels[i] != None:
30 | continue
31 |
32 | selected_node = None
33 | for node in nodes.values():
34 | if node not in self.channel_nodes and node.identity != self.config.identity:
35 | selected_node = node
36 | break
37 |
38 | if selected_node:
39 | address = selected_node.address + ':' + selected_node.port
40 | self.channels[i] = grpc.insecure_channel(address)
41 | self.channel_nodes[i] = selected_node
42 |
43 | def grad(self, nounce, spikes, grads):
44 | # Type checks.
45 | assert (type(nounce) == str)
46 | assert (type(spikes) == type(np.array([])))
47 | assert (type(grads) == list)
48 | assert (type(grads[0]) == type(np.array([])))
49 |
50 | # Encode nounce and source.
51 | nounce_bytes = bytes(nounce, 'utf-8')
52 | source_bytes = bytes(self.config.identity, 'utf-8')
53 | spikes_bytes = pickle.dumps(spikes, protocol=0)
54 |
55 | # Create message hash.
56 | hash = SHA256.new()
57 | hash.update(nounce_bytes)
58 | hash.update(source_bytes)
59 | hash.update(spikes_bytes)
60 | message_hash = hash.digest()
61 |
62 | #logger.info('nounce {} hash {}', nounce, message_hash)
63 |
64 | # Query downstream.
65 | for (i, channel) in enumerate(self.channels):
66 | # Check channel exists.
67 | if not channel:
68 | continue
69 |
70 | # Encode gradient for this channel.
71 | grad_bytes = pickle.dumps(grads[i], protocol=0)
72 |
73 | # Create request proto.
74 | request = bittensor.proto.bittensor_pb2.GradeRequest(
75 | version=1.0,
76 | source_id=self.config.identity,
77 | parent_id=self.config.identity,
78 | message_id=message_hash,
79 | payload=grad_bytes)
80 |
81 | try:
82 | # Build stub.
83 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
84 |
85 | # Send non-waiting Grade request.
86 | future = stub.Grade.future(request)
87 | future.add_done_callback(_delete_callback)
88 |
89 | except:
90 | pass
91 |
92 | return
93 |
94 | def spike(self, nounce, spikes):
95 | # Type checks.
96 | assert (type(nounce) == str)
97 | assert (type(spikes) == type(np.array([])))
98 |
99 | # Encode nounce and source.
100 | nounce_bytes = bytes(nounce, 'utf-8')
101 | source_bytes = bytes(self.config.identity, 'utf-8')
102 | payload_bytes = pickle.dumps(spikes, protocol=0)
103 |
104 | # Create message hash.
105 | hash = SHA256.new()
106 | hash.update(nounce_bytes)
107 | hash.update(source_bytes)
108 | hash.update(payload_bytes)
109 | message_hash = hash.digest()
110 |
111 | #logger.info('nounce {} hash {}', nounce, message_hash)
112 |
113 | # Build request proto.
114 | request = bittensor.proto.bittensor_pb2.SpikeRequest(
115 | version=1.0,
116 | source_id=self.config.identity,
117 | parent_id=self.config.identity,
118 | message_id=message_hash,
119 | payload=payload_bytes)
120 |
121 | # Query downstream.
122 | futures = []
123 | for channel in self.channels:
124 | # Check channel exists.
125 | if channel == None:
126 | futures.append(None)
127 | continue
128 |
129 | try:
130 | # Build channel
131 | # TODO(const): having prebuilt stubs would be better.
132 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
133 |
134 | # Send non-waiting spike request.
135 | futures.append(stub.Spike.future(request))
136 | except Exception as e:
137 | pass
138 |
139 | return futures
140 |
--------------------------------------------------------------------------------
/neurons/CoLA/metagraph.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from loguru import logger
3 | import sys
4 |
5 | import eospy.keys
6 | from eospy.cleos import Cleos
7 |
8 |
9 | class Node():
10 |
11 | def __init__(self, entry):
12 | # EOS account name.
13 | self.identity = entry['identity']
14 | # Network Stake.
15 | self.stake = entry['stake']
16 | # Last emit.
17 | self.last_emit = entry['last_emit']
18 | # IP address.
19 | self.address = entry['address']
20 | # Port number.
21 | self.port = entry['port']
22 | # List of tuples (edge name, edge weight)
23 | self.edges = entry['edges']
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
28 | def __str__(self):
29 | edge_str = []
30 | for el in self.edges:
31 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
32 | edge_str = str(edge_str)
33 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
34 | self.last_emit) + " | " + self.address + ":" + str(
35 | self.port) + ' | ' + edge_str + " )"
36 |
37 | def __eq__(self, other):
38 | if not other:
39 | return False
40 | return (self.identity == other.identity)
41 |
42 | def __ne__(self, other):
43 | return not self.__eq__(other)
44 |
45 | def __hash__(self):
46 | return hash(str(self.identity))
47 |
48 |
49 | # TODO (const): Pull Token supply.
50 | # TODO (const): Call Token Emit.
51 | # TODO (const): Calculate attribution derivatives for synapse prunning.
52 |
53 |
54 | class Metagraph():
55 |
56 | def __init__(self, config):
57 | self.config = config
58 | self.cleos = Cleos(url=config.eosurl)
59 | self.nodes = {}
60 | self.pull_metagraph()
61 | self.attributions = [(config.identity, 1.0)]
62 | # TODO(const) this should be our own key. NOT EOSMAIN.
63 | self.eoskey = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
64 |
65 | def get_my_stake(self):
66 | return int(self.nodes[self.config.identity].stake)
67 |
68 | # TODO(const): pull this from the eos chain under the var 'total stake'
69 | # instead of doing a sum.
70 | def get_total_stake(self):
71 | return int(sum([node.stake for node in self.nodes.values()]))
72 |
73 | def pull_metagraph(self):
74 | table = self.cleos.get_table('bittensoracc', 'bittensoracc',
75 | 'metagraph')
76 | for entry in table['rows']:
77 | next_node = Node(entry)
78 | self.nodes[entry['identity']] = next_node
79 | logger.debug(self.__str__())
80 |
81 | # Push attribution scores.
82 | def publish_attributions(self):
83 | logger.debug('Publish attributions: ' + str(self.attributions))
84 | transaction = self.publish_attributions_trx()
85 | try:
86 | # TODO (const) Rewrite the cleos library for our selves.
87 | resp = self.cleos.push_transaction(transaction,
88 | self.eoskey,
89 | broadcast=True)
90 | except:
91 | try:
92 | eoskey = eospy.keys.EOSKey(self.eoskey)
93 | resp = self.cleos.push_transaction(transaction,
94 | eoskey,
95 | broadcast=True)
96 | except Exception as e:
97 | logger.error('Failed to publish transaction', e)
98 |
99 | def publish_attributions_trx(self):
100 | arguments = {
101 | "this_user":
102 | self.config.identity,
103 | "this_edges": [
104 | (attr[0], float(attr[1])) for attr in self.attributions
105 | ],
106 | }
107 | payload = {
108 | "account":
109 | "bittensoracc",
110 | "name":
111 | "emit",
112 | "authorization": [{
113 | "actor": self.config.identity,
114 | "permission": "active",
115 | }],
116 | }
117 | #Converting payload to binary
118 | data = self.cleos.abi_json_to_bin(payload['account'], payload['name'],
119 | arguments)
120 | #Inserting payload binary form as "data" field in original payload
121 | payload['data'] = data['binargs']
122 | #final transaction formed
123 | trx = {"actions": [payload]}
124 | return trx
125 |
126 | def __repr__(self):
127 | return self.__str__()
128 |
129 | def __str__(self):
130 | str_rep = "\nmetagraph = {\n"
131 | for node in self.nodes.values():
132 | str_rep += ("\t" + str(node) + "\n")
133 | str_rep += "}."
134 | return str_rep
135 |
--------------------------------------------------------------------------------
/neurons/GoogleUSE/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | # Copy across the local files.
4 | COPY neurons/GoogleUSE bittensor/neurons/GoogleUSE
5 |
6 | ENV TFHUB_CACHE_DIR /bittensor/cache
7 |
8 | RUN pip install --upgrade pip && \
9 | pip install sentencepiece && \
10 | pip install tensorflow_hub==0.4.0 && \
11 | pip install tf-sentencepiece
12 |
--------------------------------------------------------------------------------
/neurons/GoogleUSE/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/GoogleUSE/__init__.py
--------------------------------------------------------------------------------
/neurons/GoogleUSE/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tensorflow as tf
4 |
5 | # Tensorflow flags.
6 | flags = tf.app.flags
7 | FLAGS = flags.FLAGS
8 |
9 | # TODO (const): This needs to be global bittensor.config
10 | flags.DEFINE_string("identity", "xxxxxxx", "Nueron Identity")
11 | flags.DEFINE_string("serve_address", "0.0.0.0", "Address serve synapse.")
12 | flags.DEFINE_string("bind_address", "0.0.0.0", "Address bind synapse.")
13 | flags.DEFINE_string("port", "9090", "Port to serve on.")
14 | flags.DEFINE_string("eosurl", "http://0.0.0.0:8888", "EOS Url.")
15 | flags.DEFINE_string("logdir", "/tmp/", "logginf directory.")
16 | flags.DEFINE_integer("k", 3, "Out edge degree.")
17 | flags.DEFINE_float("alpha", 0.01, "Learning rate.")
18 |
19 |
20 | class Config():
21 |
22 | def __init__(self):
23 | self.identity = FLAGS.identity
24 | self.serve_address = FLAGS.serve_address
25 | self.bind_address = FLAGS.bind_address
26 | self.port = FLAGS.port
27 | self.eosurl = FLAGS.eosurl
28 | self.logdir = FLAGS.logdir
29 | self.k = FLAGS.k
30 | self.alpha = FLAGS.alpha
31 |
32 | def __repr__(self):
33 | return self.__str__()
34 |
35 | def __str__(self):
36 | return "\nconfig = {\n\tidentity: " + self.identity + " \n" +\
37 | "\tserve_address: " + self.serve_address + " \n" +\
38 | "\tbind_address: " + self.bind_address + " \n" +\
39 | "\teosurl: " + self.eosurl ++ " \n" +\
40 | "\tport: " + self.port + " \n" +\
41 | "\tk: " + str(self.k) + " \n" + \
42 | "\talpha: " + str(self.alpha) + " \n" +\
43 | "\ttime_till_expire: " + str(self.time_till_expire) + " \n}."
44 |
--------------------------------------------------------------------------------
/neurons/GoogleUSE/main.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from config import Config
4 |
5 | from concurrent import futures
6 | import grpc
7 | from loguru import logger
8 | import numpy
9 | import pickle
10 |
11 | import time
12 | import tensorflow as tf
13 | import tensorflow_hub as hub
14 | import tf_sentencepiece
15 |
16 | EMBEDDING_SIZE = 128
17 |
18 |
19 | class Neuron(bittensor.proto.bittensor_pb2_grpc.BittensorServicer):
20 |
21 | def __init__(self, config):
22 | self.config = config
23 |
24 | self.graph = tf.Graph()
25 | with self.graph.as_default():
26 | logger.info('Loading tensorflow hub module.')
27 | module_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/1"
28 | module = hub.Module(module_url, trainable=False)
29 |
30 | self.text_placeholder = tf.compat.v1.placeholder(
31 | tf.string, shape=[None], name="text_placeholder")
32 | question_embeddings = module(dict(input=self.text_placeholder),
33 | signature="question_encoder",
34 | as_dict=True)
35 |
36 | # Projection onto EMBEDDING_SIZE
37 | w1 = tf.Variable(tf.random.uniform([512, EMBEDDING_SIZE], -1.0,
38 | 1.0))
39 | b1 = tf.Variable(tf.zeros([EMBEDDING_SIZE]))
40 | self.output = tf.sigmoid(
41 | tf.matmul(question_embeddings["outputs"], w1) + b1)
42 |
43 | init_op = tf.group(
44 | [tf.global_variables_initializer(),
45 | tf.tables_initializer()])
46 | self.graph.finalize()
47 |
48 | # Initialize session.
49 | self.session = tf.Session(graph=self.graph)
50 | self.session.run(init_op)
51 |
52 | # Init server.
53 | self.server_address = self.config.bind_address + ":" + self.config.port
54 | self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
55 | bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(
56 | self, self.server)
57 | self.server.add_insecure_port(self.server_address)
58 |
59 | def __del__(self):
60 | self.server.stop(0)
61 | logger.debug('Stopped Serving Neuron at: {}.', self.server_address)
62 |
63 | def serve(self):
64 | self.server.start()
65 | logger.debug('Started Serving Neuron at: {}.', self.server_address)
66 |
67 | def Spike(self, request, context):
68 | # Unpack message.
69 | parent_id = request.parent_id
70 | message_id = request.message_id
71 | inputs = pickle.loads(request.payload)
72 | logger.info('. {}', parent_id)
73 |
74 | # Inference through Google USE.
75 | numpy_inputs = inputs.flatten() # [batch_size, var length]
76 | represenations = self.session.run(self.output,
77 | {self.text_placeholder: numpy_inputs})
78 | represenations = represenations.reshape(EMBEDDING_SIZE, -1)
79 |
80 | # Pack response.
81 | response_payload = pickle.dumps(represenations, protocol=0)
82 | response = bittensor.proto.bittensor_pb2.SpikeResponse(
83 | child_id=self.config.identity,
84 | message_id=message_id,
85 | payload=response_payload)
86 |
87 | return response
88 |
89 | def Grade(self, request, context):
90 | # Pass.
91 | return bittensor.proto.bittensor_pb2.GradeResponse(accept=False)
92 |
93 |
94 | def main():
95 | config = Config()
96 |
97 | neuron = Neuron(config)
98 |
99 | neuron.serve()
100 |
101 | def tear_down(_config, _neuron):
102 | logger.debug('tear down.')
103 | del _neuron
104 | del _config
105 |
106 | try:
107 | logger.info('Begin wait on main...')
108 | while True:
109 | logger.debug('heartbeat')
110 | time.sleep(100)
111 |
112 | except KeyboardInterrupt:
113 | logger.debug('Neuron stopped with keyboard interrupt.')
114 | tear_down(config, neuron)
115 |
116 | except Exception as e:
117 | logger.error('Neuron stopped with interrupt on error: ' + str(e))
118 | tear_down(config, neuron)
119 |
120 |
121 | if __name__ == '__main__':
122 | logger.debug("started neuron.")
123 | main()
124 |
--------------------------------------------------------------------------------
/neurons/Mach/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | COPY neurons/Mach bittensor/neurons/Mach
4 |
--------------------------------------------------------------------------------
/neurons/Mach/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/Mach/__init__.py
--------------------------------------------------------------------------------
/neurons/Mach/data/text8.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/Mach/data/text8.zip
--------------------------------------------------------------------------------
/neurons/Mach/main.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from metagraph import Metagraph
4 | from nucleus import Nucleus
5 | from neuron import Neuron
6 |
7 | import argparse
8 | from datetime import timedelta
9 | import grpc
10 | from loguru import logger
11 | import random
12 | import time
13 | from timeloop import Timeloop
14 |
15 | def set_timed_loops(tl, config, neuron, metagraph):
16 |
17 | # Pull the updated graph state (Vertices, Edges, Weights)
18 | @tl.job(interval=timedelta(seconds=7))
19 | def pull_metagraph():
20 | metagraph.pull_metagraph()
21 |
22 | # Publish attributions (Edges, Weights.)
23 | @tl.job(interval=timedelta(seconds=3))
24 | def publish_attributions():
25 | metagraph.publish_attributions()
26 |
27 | # Reselect channels.
28 | @tl.job(interval=timedelta(seconds=10))
29 | def connect():
30 | neuron.connect()
31 |
32 | def main(hparams):
33 |
34 | metagraph = Metagraph(hparams)
35 |
36 | nucleus = Nucleus(hparams)
37 |
38 | neuron = Neuron(hparams, nucleus, metagraph)
39 |
40 | neuron.serve()
41 |
42 | # Start timed calls.
43 | tl = Timeloop()
44 | set_timed_loops(tl, hparams, neuron, metagraph)
45 | tl.start(block=False)
46 | logger.info('Started Timers.')
47 |
48 | def tear_down(_hparams, _neuron, _nucleus, _metagraph):
49 | logger.debug('tear down.')
50 | del _neuron
51 | del _nucleus
52 | del _metagraph
53 | del _hparams
54 |
55 | try:
56 | logger.info('Begin wait on main...')
57 | while True:
58 | logger.debug('heartbeat')
59 | time.sleep(100)
60 |
61 | except KeyboardInterrupt:
62 | logger.debug('Neuron stopped with keyboard interrupt.')
63 | tear_down(hparams, neuron, nucleus, metagraph)
64 |
65 | except Exception as e:
66 | logger.error('Neuron stopped with interrupt on error: ' + str(e))
67 | tear_down(hparams, neuron, nucleus, metagraph)
68 |
69 |
70 | if __name__ == '__main__':
71 | logger.debug("started neuron.")
72 | parser = argparse.ArgumentParser()
73 |
74 | # Server parameters.
75 | parser.add_argument(
76 | '--identity',
77 | default='abcd',
78 | type=str,
79 | help="network identity. Default identity=abcd")
80 | parser.add_argument(
81 | '--serve_address',
82 | default='0.0.0.0',
83 | type=str,
84 | help="Address to server neuron. Default serve_address=0.0.0.0")
85 | parser.add_argument(
86 | '--bind_address',
87 | default='0.0.0.0',
88 | type=str,
89 | help="Address to bind neuron. Default bind_address=0.0.0.0")
90 | parser.add_argument(
91 | '--port',
92 | default='9090',
93 | type=str,
94 | help="Port to serve neuron on. Default port=9090")
95 | parser.add_argument(
96 | '--eosurl',
97 | default='http://0.0.0.0:8888',
98 | type=str,
99 | help="Address to eos chain. Default eosurl=http://0.0.0.0:8888")
100 | parser.add_argument(
101 | '--logdir',
102 | default="/tmp/",
103 | type=str,
104 | help="logging output directory. Default logdir=/tmp/")
105 |
106 | # Word embedding parameters.
107 | parser.add_argument(
108 | '--corpus_path',
109 | default='neurons/Mach/data/text8.zip',
110 | type=str,
111 | help='Path to corpus of text. Default corpus_path=neurons/Mach/data/text8.zip')
112 | parser.add_argument(
113 | '--n_vocabulary',
114 | default=50000,
115 | type=int,
116 | help='Size fof corpus vocabulary. Default vocabulary_size=50000')
117 | parser.add_argument(
118 | '--n_sampled',
119 | default=64,
120 | type=int,
121 | help='Number of negative examples to sample during training. Default num_sampled=64')
122 |
123 | # Scoring parameters.
124 | parser.add_argument(
125 | '--score_ema',
126 | default=0.05,
127 | type=float,
128 | help='Moving average param for score calc. Default score_ema=0.05')
129 |
130 |
131 | # Training params.
132 | parser.add_argument(
133 | '--batch_size',
134 | default=50,
135 | type=int,
136 | help='The number of examples per batch. Default batch_size=128')
137 | parser.add_argument(
138 | '--learning_rate',
139 | default=1e-4,
140 | type=float,
141 | help='Component learning rate. Default learning_rate=1e-4')
142 | parser.add_argument(
143 | '--n_targets',
144 | default=1,
145 | type=int,
146 | help='Number of targets to sample. Default n_targets=1')
147 | parser.add_argument(
148 | '--n_embedding',
149 | default=128,
150 | type=int,
151 | help='Size of embedding between components. Default n_embedding=128')
152 | parser.add_argument(
153 | '--n_children',
154 | default=5,
155 | type=int,
156 | help='The number of graph neighbors. Default n_children=2')
157 | parser.add_argument('--n_hidden1',
158 | default=512,
159 | type=int,
160 | help='Size of layer 1. Default n_hidden1=512')
161 | parser.add_argument('--n_hidden2',
162 | default=512,
163 | type=int,
164 | help='Size of layer 1. Default n_hidden2=512')
165 | parser.add_argument(
166 | '--n_shidden1',
167 | default=512,
168 | type=int,
169 | help='Size of synthetic model hidden layer 1. Default n_shidden1=512')
170 | parser.add_argument(
171 | '--n_shidden2',
172 | default=512,
173 | type=int,
174 | help='Size of synthetic model hidden layer 2. Default n_shidden2=512')
175 | parser.add_argument(
176 | '--use_joiner_network',
177 | default=False,
178 | type=bool,
179 | help=
180 | 'Do we combine downstream spikes using a trainable network. Default use_joiner_network=False'
181 | )
182 | parser.add_argument(
183 | '--n_jhidden1',
184 | default=512,
185 | type=int,
186 | help='Size of Joiner model hidden layer 1. Default n_shidden1=512')
187 | parser.add_argument(
188 | '--n_jhidden2',
189 | default=512,
190 | type=int,
191 | help='Size of Joinermodel hidden layer 2. Default n_shidden2=512')
192 |
193 | hparams = parser.parse_args()
194 |
195 | main(hparams)
196 |
--------------------------------------------------------------------------------
/neurons/Mach/metagraph.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from loguru import logger
3 | import sys
4 |
5 | import eospy.keys
6 | from eospy.cleos import Cleos
7 |
8 |
9 | class Node():
10 |
11 | def __init__(self, entry):
12 | # EOS account name.
13 | self.identity = entry['identity']
14 | # Network Stake.
15 | self.stake = entry['stake']
16 | # Last emit.
17 | self.last_emit = entry['last_emit']
18 | # IP address.
19 | self.address = entry['address']
20 | # Port number.
21 | self.port = entry['port']
22 | # List of tuples (edge name, edge weight)
23 | self.edges = entry['edges']
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
28 | def __str__(self):
29 | edge_str = []
30 | for el in self.edges:
31 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
32 | edge_str = str(edge_str)
33 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
34 | self.last_emit) + " | " + self.address + ":" + str(
35 | self.port) + ' | ' + edge_str + " )"
36 |
37 | def __eq__(self, other):
38 | if not other:
39 | return False
40 | return (self.identity == other.identity)
41 |
42 | def __ne__(self, other):
43 | return not self.__eq__(other)
44 |
45 | def __hash__(self):
46 | return hash(str(self.identity))
47 |
48 |
49 | # TODO (const): Pull Token supply.
50 | # TODO (const): Call Token Emit.
51 | # TODO (const): Calculate attribution derivatives for synapse prunning.
52 |
53 |
54 | class Metagraph():
55 |
56 | def __init__(self, config):
57 | self.config = config
58 | self.cleos = Cleos(url=config.eosurl)
59 | self.nodes = {}
60 | self.pull_metagraph()
61 | self.attributions = [(config.identity, 1.0)]
62 | # TODO(const) this should be our own key. NOT EOSMAIN.
63 | self.eoskey = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
64 |
65 | def get_my_stake(self):
66 | return int(self.nodes[self.config.identity].stake)
67 |
68 | # TODO(const): pull this from the eos chain under the var 'total stake'
69 | # instead of doing a sum.
70 | def get_total_stake(self):
71 | return int(sum([node.stake for node in self.nodes.values()]))
72 |
73 | def pull_metagraph(self):
74 | table = self.cleos.get_table('bittensoracc', 'bittensoracc',
75 | 'metagraph')
76 | for entry in table['rows']:
77 | next_node = Node(entry)
78 | self.nodes[entry['identity']] = next_node
79 | logger.debug(self.__str__())
80 |
81 | # Push attribution scores.
82 | def publish_attributions(self):
83 | logger.debug('Publish attributions: ' + str(self.attributions))
84 | transaction = self.publish_attributions_trx()
85 | try:
86 | # TODO (const) Rewrite the cleos library for our selves.
87 | resp = self.cleos.push_transaction(transaction,
88 | self.eoskey,
89 | broadcast=True)
90 | except:
91 | try:
92 | eoskey = eospy.keys.EOSKey(self.eoskey)
93 | resp = self.cleos.push_transaction(transaction,
94 | eoskey,
95 | broadcast=True)
96 | except Exception as e:
97 | logger.error('Failed to publish transaction', e)
98 |
99 | def publish_attributions_trx(self):
100 | arguments = {
101 | "this_user":
102 | self.config.identity,
103 | "this_edges": [
104 | (attr[0], float(attr[1])) for attr in self.attributions
105 | ],
106 | }
107 | payload = {
108 | "account":
109 | "bittensoracc",
110 | "name":
111 | "emit",
112 | "authorization": [{
113 | "actor": self.config.identity,
114 | "permission": "active",
115 | }],
116 | }
117 | #Converting payload to binary
118 | data = self.cleos.abi_json_to_bin(payload['account'], payload['name'],
119 | arguments)
120 | #Inserting payload binary form as "data" field in original payload
121 | payload['data'] = data['binargs']
122 | #final transaction formed
123 | trx = {"actions": [payload]}
124 | return trx
125 |
126 | def __repr__(self):
127 | return self.__str__()
128 |
129 | def __str__(self):
130 | str_rep = "\nmetagraph = {\n"
131 | for node in self.nodes.values():
132 | str_rep += ("\t" + str(node) + "\n")
133 | str_rep += "}."
134 | return str_rep
135 |
--------------------------------------------------------------------------------
/neurons/Neumann/model_fn.py:
--------------------------------------------------------------------------------
1 |
2 | class Modelfn():
3 |
4 | def __init__(self, hparams):
5 | self._hparams = hparams
6 |
7 | def _gate_dispatch(self, spikes):
8 | raise NotImplementedError
9 |
10 | def _gate_combine(self, spikes):
11 | raise NotImplementedError
12 |
13 | def _tokenizer_network(self, spikes):
14 | raise NotImplementedError
15 |
16 | def _synthetic_network(self, tokenized_spikes):
17 | raise NotImplementedError
18 |
19 | def _embedding_network(self, tokenized_spikes, downstream_spikes):
20 | raise NotImplementedError
21 |
22 | def _target_network(self, embedding_spikes):
23 | raise NotImplementedError
24 |
25 | def _target_loss(self, embedding_spikes):
26 | raise NotImplementedError
27 |
28 | def _synthetic_loss(self, embedding_spikes):
29 | raise NotImplementedError
30 |
31 | def _model_fn(self):
32 |
33 | # Spikes: inputs from the dataset of arbitrary batch_size.
34 | self.spikes = tf.compat.v1.placeholder(tf.string, [None, 1], name='spikes')
35 |
36 | # Parent gradients: Gradients passed by this components parent.
37 | self.parent_error = tf.compat.v1.placeholder(tf.float32, [None, self._hparams.n_embedding], name='parent_grads')
38 |
39 | # Targets: Supervised signals used during training and testing.
40 | self.targets = tf.compat.v1.placeholder(tf.float32, [None, self._hparams.n_targets], name='targets')
41 |
42 | # Use Synthetic: Flag, use synthetic inputs when running graph.
43 | self.use_synthetic = tf.compat.v1.placeholder(tf.bool, shape=[], name='use_synthetic')
44 |
45 | # Gating network.
46 | with tf.compat.v1.variable_scope("gating_network"):
47 | gated_spikes = self._gate_dispatch(self.spikes)
48 | child_inputs = []
49 | for i, gated_spikes in enumerate(gated_spikes):
50 | child_inputs.append(_child_input_for_gated_spikes(gated_spikes))
51 | child_spikes = self._gate_combine(child_inputs)
52 | gating_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="gating_network")
53 |
54 | # Tokenizer network.
55 | with tf.compat.v1.variable_scope("tokenizer_network"):
56 | tokenized_spikes = self._tokenizer(self.spikes)
57 | tokenizer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="tokenizer_network")
58 |
59 | # Synthetic network.
60 | with tf.compat.v1.variable_scope("synthetic_network"):
61 | synthetic_spikes = self._synthetic_network(tokenized_spikes)
62 | synthetic_loss = self._synthetic_loss(synthetic_spikes, self.child_spikes)
63 | synthetic_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="synthetic_network")
64 |
65 | # Downstream switch
66 | downstream_spikes = tf.cond(
67 | tf.equal(self.use_synthetic, tf.constant(True)),
68 | true_fn=lambda: synthetic_spikes,
69 | false_fn=lambda: child_spikes)
70 |
71 | # Embedding network.
72 | with tf.compat.v1.variable_scope("embedding_network"):
73 | self.embedding = self._embedding_network(tokenized_spikes, downstream_spikes)
74 | embedding_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="embedding_network")
75 |
76 | # Target network
77 | with tf.compat.v1.variable_scope("target_network"):
78 | logits = self._target_network(embedding_spikes)
79 | target_loss = self._target_loss(logits, self.targets)
80 | target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target_network")
81 |
82 |
83 | # Optimizer
84 | optimizer = self._optimizer()
85 |
86 | # Synthetic grads.
87 | synthetic_grads = optimizer.compute_gradients( loss = synthetic_loss,
88 | var_list = synthetic_vars)
89 |
90 | # Parent grads
91 | parent_grads = optimizer.compute_gradients( loss = self.embedding,
92 | var_list = embedding_vars,
93 | grad_loss = self.parent_error)
94 |
95 | # Target grads
96 | target_grads = optimizer.compute_gradients( loss = target_loss,
97 | var_list = target_vars + embedding_vars + gate_vars)
98 |
99 | # Child grads
100 | child_grads = optimizer.compute_gradients( loss = target_loss,
101 | var_list = child_inputs)
102 |
103 | # Synthetic step.
104 | synthetic_step = optimizer.apply_gradients(synthetic_grads)
105 |
106 | # Parent step.
107 | parent_step = optimizer.apply_gradients(parent_grads)
108 |
109 | # Target step.
110 | target_step = optimizer.apply_gradients(parent_grads)
111 |
--------------------------------------------------------------------------------
/neurons/Null/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | # Copy across the local files.
4 | COPY neurons/Null/ bittensor/neurons/Null
5 |
--------------------------------------------------------------------------------
/neurons/Null/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/Null/__init__.py
--------------------------------------------------------------------------------
/neurons/Null/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import bittensor
3 | from Crypto.Hash import SHA256
4 | from concurrent import futures
5 | from datetime import timedelta
6 | from metagraph import Metagraph
7 | from loguru import logger
8 | import pickle
9 | import random
10 | import time
11 | import numpy
12 | import grpc
13 | from timeloop import Timeloop
14 |
15 | class Neuron(bittensor.proto.bittensor_pb2_grpc.BittensorServicer):
16 | def __init__(self, hparams, metagraph):
17 | self._hparams = hparams
18 | self._metagraph = metagraph
19 | self._channels = {}
20 | self._channels = []
21 | self._channel_ids = []
22 | self._channel_reliability = []
23 | self.connect()
24 |
25 | def connect(self):
26 | for node in self._metagraph.nodes.values():
27 | if node.identity == self._hparams.identity:
28 | continue
29 | elif node.identity not in self._channels:
30 | address = node.address + ':' + node.port
31 | self._channels.append(grpc.insecure_channel(address))
32 | self._channel_ids.append(node.identity)
33 | self._channel_reliability.append(0.5)
34 |
35 | def query(self):
36 | logger.info('query')
37 | # 1. Create nounce.
38 | nounce = str(random.randint(0, 1000000000))
39 |
40 | # 2. Encode nounce and source
41 | source_id = self._hparams.identity
42 | nounce_bytes = bytes(nounce, 'utf-8')
43 | source_bytes = bytes(source_id, 'utf-8')
44 | spikes = numpy.array(['this is a test'])
45 | payload_bytes = pickle.dumps(spikes, protocol=0)
46 |
47 | # 3. Create unique message hash.
48 | hash = SHA256.new()
49 | hash.update(nounce_bytes)
50 | hash.update(source_bytes)
51 | hash.update(payload_bytes)
52 | message_id = hash.digest()
53 |
54 | # 4. Create futures.
55 | spike_futures = []
56 | for i,channel in enumerate(self._channels):
57 | try:
58 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
59 | request = bittensor.proto.bittensor_pb2.SpikeRequest(
60 | version=1.0,
61 | source_id=self._hparams.identity,
62 | parent_id=self._hparams.identity,
63 | message_id=message_id,
64 | payload=payload_bytes)
65 | spike_futures.append(stub.Spike.future(request))
66 | except Exception as e:
67 | logger.error(str(e))
68 |
69 | # 5. Catch future responses
70 | start = time.time()
71 | exception = [False for _ in spike_futures]
72 | result = [False for _ in spike_futures]
73 | returned = [False for _ in spike_futures]
74 | timed = [0 for _ in spike_futures]
75 | while True:
76 | for i, future in enumerate(spike_futures):
77 | if future.done():
78 | returned[i] = True
79 | timed[i] = time.time() - start
80 | try:
81 | if future.exception():
82 | exception[i] = True
83 | failing_channels[i] = True
84 |
85 | except Exception as e:
86 | pass
87 | try:
88 | future.result()
89 | result[i] = True
90 | except Exception as e:
91 | pass
92 |
93 | if time.time() - start > 3:
94 | break
95 | if sum(returned) == len(spike_futures):
96 | break
97 |
98 | for i in range(len(returned)):
99 | if returned[i]:
100 | r1 = self._channel_reliability[i]
101 | self._channel_reliability[i] = (r1 * 0.95) + (0.05 * 1)
102 | else:
103 | r1 = self._channel_reliability[i]
104 | self._channel_reliability[i] = (r1 * 0.95) + (0.05 * 0)
105 |
106 | # 6. Create grad futures.
107 | grad_futures = []
108 | for channel in self._channels:
109 | try:
110 | zeros_payload = pickle.dumps(numpy.zeros((1, self._hparams.n_embedding)), protocol=0)
111 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
112 | request = bittensor.proto.bittensor_pb2.GradeRequest(
113 | version=1.0,
114 | source_id=self._hparams.identity,
115 | parent_id=self._hparams.identity,
116 | message_id=message_id,
117 | payload=zeros_payload)
118 | grad_futures.append(stub.Grade.future(request))
119 | except Exception as e:
120 | logger.error(str(e))
121 |
122 | # 7. Catch grad future responses
123 | start = time.time()
124 | exception = [False for _ in grad_futures]
125 | result = [False for _ in grad_futures]
126 | returned = [False for _ in grad_futures]
127 | timed = [0 for _ in grad_futures]
128 | while True:
129 | for i, future in enumerate(grad_futures):
130 | if future.done():
131 | returned[i] = True
132 | timed[i] = time.time() - start
133 | try:
134 | if future.exception():
135 | exception[i] = True
136 | except Exception as e:
137 | pass
138 | try:
139 | future.result()
140 | result[i] = True
141 | except Exception as e:
142 | pass
143 |
144 | if time.time() - start > 3:
145 | break
146 | if sum(returned) == len(grad_futures):
147 | break
148 |
149 | logger.info('C: {}', list(zip(result, timed, self._channel_reliability)))
150 |
151 |
152 | def Spike(self, request, context):
153 | logger.info('{} --> S', request.source_id)
154 | inputs = numpy.asarray(pickle.loads(request.payload))
155 | zeros_payload = pickle.dumps(numpy.zeros((len(inputs), self._hparams.n_embedding)), protocol=0)
156 | response = bittensor.proto.bittensor_pb2.SpikeResponse(
157 | version=1.0,
158 | source_id=request.source_id,
159 | child_id=self._hparams.identity,
160 | message_id=request.message_id,
161 | payload=zeros_payload)
162 | return response
163 |
164 | def Grade(self, request, context):
165 | logger.info('{} --> G', request.source_id)
166 | return bittensor.proto.bittensor_pb2.GradeResponse(accept=True)
167 |
168 | def set_timed_loops(tl, hparams, neuron, metagraph):
169 |
170 | # Pull the updated graph state (Vertices, Edges, Weights)
171 | @tl.job(interval=timedelta(seconds=5))
172 | def pull_metagraph():
173 | metagraph.pull_metagraph()
174 |
175 | # Reselect channels.
176 | @tl.job(interval=timedelta(seconds=2))
177 | def connect():
178 | neuron.connect()
179 |
180 | # Reselect channels.
181 | @tl.job(interval=timedelta(seconds=1))
182 | def query():
183 | neuron.query()
184 |
185 | def main(hparams):
186 |
187 | metagraph = Metagraph(hparams)
188 | neuron = Neuron(hparams, metagraph)
189 |
190 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
191 | bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(neuron, server)
192 | server.add_insecure_port(hparams.bind_address + ":" + hparams.port)
193 | server.start()
194 |
195 | tl = Timeloop()
196 | set_timed_loops(tl, hparams, neuron, metagraph)
197 | tl.start(block=False)
198 | logger.info('Started Timers.')
199 |
200 | try:
201 | logger.info('Begin wait on main...')
202 | while True:
203 | logger.debug('heartbeat')
204 | time.sleep(100)
205 |
206 | except KeyboardInterrupt:
207 | logger.debug('Neuron stopped with keyboard interrupt.')
208 | server.stop()
209 | del neuron
210 | del metagraph
211 |
212 | except Exception as e:
213 | logger.error('Neuron stopped with interrupt on error: ' + str(e))
214 | server.stop()
215 | del neuron
216 | del metagraph
217 |
218 | if __name__ == '__main__':
219 | logger.debug("started neuron.")
220 | parser = argparse.ArgumentParser()
221 |
222 | # Server parameters.
223 | parser.add_argument(
224 | '--identity',
225 | default='abcd',
226 | type=str,
227 | help="network identity. Default identity=abcd")
228 | parser.add_argument(
229 | '--serve_address',
230 | default='0.0.0.0',
231 | type=str,
232 | help="Address to server neuron. Default serve_address=0.0.0.0")
233 | parser.add_argument(
234 | '--bind_address',
235 | default='0.0.0.0',
236 | type=str,
237 | help="Address to bind neuron. Default bind_address=0.0.0.0")
238 | parser.add_argument(
239 | '--port',
240 | default='9090',
241 | type=str,
242 | help="Port to serve neuron on. Default port=9090")
243 | parser.add_argument(
244 | '--eosurl',
245 | default='http://0.0.0.0:8888',
246 | type=str,
247 | help="Address to eos chain. Default eosurl=http://0.0.0.0:8888")
248 | parser.add_argument(
249 | '--logdir',
250 | default="/tmp/",
251 | type=str,
252 | help="logging output directory. Default logdir=/tmp/")
253 | parser.add_argument(
254 | '--n_embedding',
255 | default=128,
256 | type=int,
257 | help='Size of embedding between components. Default n_embedding=128')
258 |
259 | hparams = parser.parse_args()
260 |
261 | main(hparams)
262 |
--------------------------------------------------------------------------------
/neurons/Null/metagraph.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from loguru import logger
3 | import sys
4 |
5 | import eospy.keys
6 | from eospy.cleos import Cleos
7 |
8 |
9 | class Node():
10 |
11 | def __init__(self, entry):
12 | # EOS account name.
13 | self.identity = entry['identity']
14 | # Network Stake.
15 | self.stake = entry['stake']
16 | # Last emit.
17 | self.last_emit = entry['last_emit']
18 | # IP address.
19 | self.address = entry['address']
20 | # Port number.
21 | self.port = entry['port']
22 | # List of tuples (edge name, edge weight)
23 | self.edges = entry['edges']
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
28 | def __str__(self):
29 | edge_str = []
30 | for el in self.edges:
31 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
32 | edge_str = str(edge_str)
33 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
34 | self.last_emit) + " | " + self.address + ":" + str(
35 | self.port) + ' | ' + edge_str + " )"
36 |
37 | def __eq__(self, other):
38 | if not other:
39 | return False
40 | return (self.identity == other.identity)
41 |
42 | def __ne__(self, other):
43 | return not self.__eq__(other)
44 |
45 | def __hash__(self):
46 | return hash(str(self.identity))
47 |
48 |
49 | # TODO (const): Pull Token supply.
50 | # TODO (const): Call Token Emit.
51 | # TODO (const): Calculate attribution derivatives for synapse prunning.
52 |
53 |
54 | class Metagraph():
55 |
56 | def __init__(self, config):
57 | self.config = config
58 | self.cleos = Cleos(url=config.eosurl)
59 | self.nodes = {}
60 | self.pull_metagraph()
61 | self.attributions = [(config.identity, 1.0)]
62 | # TODO(const) this should be our own key. NOT EOSMAIN.
63 | self.eoskey = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
64 |
65 | def get_my_stake(self):
66 | return int(self.nodes[self.config.identity].stake)
67 |
68 | # TODO(const): pull this from the eos chain under the var 'total stake'
69 | # instead of doing a sum.
70 | def get_total_stake(self):
71 | return int(sum([node.stake for node in self.nodes.values()]))
72 |
73 | def pull_metagraph(self):
74 | table = self.cleos.get_table('bittensoracc', 'bittensoracc',
75 | 'metagraph')
76 | for entry in table['rows']:
77 | next_node = Node(entry)
78 | self.nodes[entry['identity']] = next_node
79 | logger.debug(self.__str__())
80 |
81 | # Push attribution scores.
82 | def publish_attributions(self):
83 | transaction = self.publish_attributions_trx()
84 | try:
85 | # TODO (const) Rewrite the cleos library for our selves.
86 | resp = self.cleos.push_transaction(transaction,
87 | self.eoskey,
88 | broadcast=True)
89 | except:
90 | try:
91 | eoskey = eospy.keys.EOSKey(self.eoskey)
92 | resp = self.cleos.push_transaction(transaction,
93 | eoskey,
94 | broadcast=True)
95 | except Exception as e:
96 | logger.error('Failed to publish transaction', e)
97 |
98 | def publish_attributions_trx(self):
99 | arguments = {
100 | "this_user":
101 | self.config.identity,
102 | "this_edges": [
103 | (attr[0], float(attr[1])) for attr in self.attributions
104 | ],
105 | }
106 | payload = {
107 | "account":
108 | "bittensoracc",
109 | "name":
110 | "emit",
111 | "authorization": [{
112 | "actor": self.config.identity,
113 | "permission": "active",
114 | }],
115 | }
116 | #Converting payload to binary
117 | data = self.cleos.abi_json_to_bin(payload['account'], payload['name'],
118 | arguments)
119 | #Inserting payload binary form as "data" field in original payload
120 | payload['data'] = data['binargs']
121 | #final transaction formed
122 | trx = {"actions": [payload]}
123 | return trx
124 |
125 | def __repr__(self):
126 | return self.__str__()
127 |
128 | def __str__(self):
129 | str_rep = "\nmetagraph = {\n"
130 | for node in self.nodes.values():
131 | str_rep += ("\t" + str(node) + "\n")
132 | str_rep += "}."
133 | return str_rep
134 |
--------------------------------------------------------------------------------
/neurons/__init_.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/__init_.py
--------------------------------------------------------------------------------
/neurons/boltzmann/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor:latest
2 |
3 | # Copy across the local files.
4 | COPY neurons/Boltzmann bittensor/neurons/Boltzmann
5 |
--------------------------------------------------------------------------------
/neurons/boltzmann/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/boltzmann/__init__.py
--------------------------------------------------------------------------------
/neurons/boltzmann/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tensorflow as tf
4 |
5 | # Tensorflow flags.
6 | flags = tf.app.flags
7 | FLAGS = flags.FLAGS
8 |
9 | # TODO (const): This needs to be global bittensor.config
10 | flags.DEFINE_string("identity", "xxxxxxx", "Nueron Identity")
11 | flags.DEFINE_string("serve_address", "0.0.0.0", "Address serve synapse.")
12 | flags.DEFINE_string("bind_address", "0.0.0.0", "Address bind synapse.")
13 | flags.DEFINE_string("port", "9090", "Port to serve on.")
14 | flags.DEFINE_string("eosurl", "http://0.0.0.0:8888", "EOS Url.")
15 | flags.DEFINE_string("logdir", "/tmp/", "logginf directory.")
16 | flags.DEFINE_integer("k", 3, "Out edge degree.")
17 | flags.DEFINE_float("alpha", 0.01, "Learning rate.")
18 | flags.DEFINE_integer("time_till_expire", 5, "time till query expire")
19 |
20 |
21 | class Config():
22 |
23 | def __init__(self):
24 | self.identity = FLAGS.identity
25 | self.serve_address = FLAGS.serve_address
26 | self.bind_address = FLAGS.bind_address
27 | self.port = FLAGS.port
28 | self.eosurl = FLAGS.eosurl
29 | self.logdir = FLAGS.logdir
30 | self.k = FLAGS.k
31 | self.alpha = FLAGS.alpha
32 | self.time_till_expire = FLAGS.time_till_expire
33 |
34 | def __repr__(self):
35 | return self.__str__()
36 |
37 | def __str__(self):
38 | return "\nconfig = {\n\tidentity: " + self.identity + " \n" +\
39 | "\tserve_address: " + self.serve_address + " \n" +\
40 | "\tbind_address: " + self.bind_address + " \n" +\
41 | "\teosurl: " + self.eosurl ++ " \n" +\
42 | "\tport: " + self.port + " \n" +\
43 | "\tk: " + str(self.k) + " \n" + \
44 | "\talpha: " + str(self.alpha) + " \n" +\
45 | "\ttime_till_expire: " + str(self.time_till_expire) + " \n}."
46 |
--------------------------------------------------------------------------------
/neurons/boltzmann/data/text8.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/boltzmann/data/text8.zip
--------------------------------------------------------------------------------
/neurons/boltzmann/dendrite.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | import numpy as np
4 | import grpc
5 |
6 |
7 | class Dendrite:
8 |
9 | def __init__(self, config, metagraph):
10 | pass
11 |
12 | # self.config = config
13 | # self.metagraph = metagraph
14 | # self.channels = [None for _ in range(self.config.k)]
15 | # self.channel_ids = [None for _ in range(self.config.k)]
16 | # self.connect()
17 | #
18 | # def connect(self):
19 | # for i in range(self.config.k):
20 | # if self.channels[i] == None:
21 | # self._set_channel(i)
22 | #
23 | # def _set_channel(self, i):
24 | # for node in self.metagraph.nodes.values():
25 | # if node.identity in self.channel_ids:
26 | # continue
27 | # if node.identity == self.config.identity:
28 | # continue
29 | # else:
30 | # address = node.address + ':' + node.port
31 | # self.channels[i] = grpc.insecure_channel(address)
32 | # self.channel_ids[i] = node.identity
33 | # break
34 | #
35 | # def spike(self, message_id, spikes):
36 | # dspikes = []
37 | # for channel in self.channels:
38 | # dspikes.append(self._spikerpc(channel, message_id, spikes))
39 | # return dspikes
40 | #
41 | # def grade(self, message_id, dgrades):
42 | # for channel, grad in zip(self.channels, dgrades):
43 | # self._gradrpc(channel, message_id, grad)
44 | # return
45 | #
46 | # def _spikerpc(self, channel, message_id, spikes):
47 | #
48 | # # If channel is empty. Return Zeros.
49 | # if channel is None:
50 | # return np.zeros((len(spikes), 128))
51 | #
52 | # try:
53 | # # Build Stub and request proto.
54 | # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
55 | #
56 | # # Build payload.
57 | # # TODO(const) This is a waste, deserialize to serialize again.
58 | # spikes_bytes = pickle.dumps(spikes, protocol=0)
59 | #
60 | # # Create spike request proto.
61 | # request = bittensor.proto.bolt_pb2.SpikeRequest(
62 | # parent_id=self.config.identity,
63 | # message_id=message_id,
64 | # payload=spikes_bytes)
65 | #
66 | # # Send TCP spike request.
67 | # response = stub.Spike(request)
68 | #
69 | # # Deserialize response.
70 | # return pickle.loads(response.payload).reshape(128, -1)
71 | #
72 | # except Exception as error:
73 | # #logger.info('failed call {}', error)
74 | # return np.zeros((len(spikes), 128))
75 | #
76 | # def _gradrpc(self, channel, message_id, grad):
77 | #
78 | # # If channel is empty return
79 | # if channel is None:
80 | # return
81 | #
82 | # try:
83 | # # Build Stub and request proto.
84 | # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
85 | #
86 | # # Build Grade Request proto.
87 | # request = bittensor.proto.bittensor_pb2.GradeRequest(
88 | # parent_id=self.config.identity,
89 | # message_id=message_id,
90 | # grad_payload=pickle.dumps(grad, protocol=0))
91 | #
92 | # # Send grade request.
93 | # stub.Grade(request)
94 | #
95 | # # Pass.
96 | #
97 | # except Exception as error:
98 | # return
99 |
--------------------------------------------------------------------------------
/neurons/boltzmann/main.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from config import Config
4 | from metagraph import Metagraph
5 | from dendrite import Dendrite
6 | from nucleus import Nucleus
7 | from neuron import Neuron
8 |
9 | from Crypto.Hash import SHA256
10 | from datetime import timedelta
11 | import grpc
12 | from loguru import logger
13 | import pickle
14 | import numpy as np
15 | import random
16 | import time
17 | from timeloop import Timeloop
18 |
19 |
20 | def set_timed_loops(tl, config, neuron, metagraph):
21 |
22 | # Test self.
23 | # @tl.job(interval=timedelta(seconds=1))
24 | # def test():
25 | # channel = grpc.insecure_channel(config.serve_address + ":" + config.port)
26 | #
27 | # for _ in range(100):
28 | # # Inc message id.
29 | # message_id = random.randint(0, 1000000)
30 | #
31 | # # Make request.
32 | # spikes = np.array([['apples']])
33 | # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
34 | #
35 | # time_str = str(time.time())
36 | # # Build hash.
37 | # hash = SHA256.new()
38 | # hash.update(config.identity.encode())
39 | # hash.update(spikes.tobytes())
40 | # hash.update(time_str.encode())
41 | # message_hash = hash.digest()
42 | #
43 | # # Build request.
44 | # request = bittensor.proto.bittensor_pb2.SpikeRequest()
45 | # request.parent_id = config.identity
46 | # request.message_id = message_hash
47 | # request.payload = pickle.dumps(spikes, protocol=0)
48 | #
49 | # # Send Spike.
50 | # try:
51 | # response = stub.Spike(request)
52 | # response = pickle.loads(response.payload).reshape(1, 128)
53 | #
54 | # except Exception as e:
55 | # logger.error(str(e))
56 | #
57 | # # Make grad request.
58 | # grad = np.zeros((1, 128))
59 | # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
60 | #
61 | # # Build hash.
62 | # hash = SHA256.new()
63 | # hash.update(config.identity.encode())
64 | # hash.update(spikes.tobytes())
65 | # hash.update(time_str.encode())
66 | # message_hash = hash.digest()
67 | #
68 | # request = bittensor.proto.bittensor_pb2.GradeRequest()
69 | # request.parent_id = config.identity
70 | # request.message_id = message_hash
71 | # request.payload = pickle.dumps(grad, protocol=0)
72 | #
73 | # # Send grade request.
74 | # try:
75 | # stub.Grade(request)
76 | # except Exception as e:
77 | # logger.error(str(e))
78 |
79 | # Pull the updated graph state (Vertices, Edges, Weights)
80 | @tl.job(interval=timedelta(seconds=7))
81 | def pull_metagraph():
82 | metagraph.pull_metagraph()
83 |
84 | # Reselect channels.
85 | @tl.job(interval=timedelta(seconds=10))
86 | def connect():
87 | neuron.connect()
88 |
89 | # Apply a gradient step.
90 | @tl.job(interval=timedelta(seconds=3))
91 | def learn():
92 | neuron.Learn()
93 |
94 |
95 | def main():
96 |
97 | config = Config()
98 |
99 | metagraph = Metagraph(config)
100 |
101 | dendrite = Dendrite(config, metagraph)
102 |
103 | nucleus = Nucleus(config)
104 |
105 | neuron = Neuron(config, dendrite, nucleus, metagraph)
106 |
107 | neuron.serve()
108 |
109 | # Start timed calls.
110 | tl = Timeloop()
111 | set_timed_loops(tl, config, neuron, metagraph)
112 | tl.start(block=False)
113 | logger.info('Started Timers.')
114 |
115 | def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph):
116 | logger.debug('tear down.')
117 | del _neuron
118 | del _dendrite
119 | del _nucleus
120 | del _metagraph
121 | del _config
122 |
123 | try:
124 | logger.info('Begin wait on main...')
125 | while True:
126 | logger.debug('heartbeat')
127 | time.sleep(100)
128 |
129 | except KeyboardInterrupt:
130 | logger.debug('Neuron stopped with keyboard interrupt.')
131 | tear_down(config, neuron, dendrite, nucleus, metagraph)
132 |
133 | except Exception as e:
134 | logger.error('Neuron stopped with interrupt on error: ' + str(e))
135 | tear_down(config, neuron, dendrite, nucleus, metagraph)
136 |
137 |
138 | if __name__ == '__main__':
139 | logger.debug("started neuron.")
140 | main()
141 |
--------------------------------------------------------------------------------
/neurons/boltzmann/metagraph.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from loguru import logger
3 | import sys
4 |
5 | import eospy.keys
6 | from eospy.cleos import Cleos
7 |
8 |
9 | class Node():
10 |
11 | def __init__(self, entry):
12 | # EOS account name.
13 | self.identity = entry['identity']
14 | # Network Stake.
15 | self.stake = entry['stake']
16 | # Last emit.
17 | self.last_emit = entry['last_emit']
18 | # IP address.
19 | self.address = entry['address']
20 | # Port number.
21 | self.port = entry['port']
22 | # List of tuples (edge name, edge weight)
23 | self.edges = entry['edges']
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
28 | def __str__(self):
29 | edge_str = []
30 | for el in self.edges:
31 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
32 | edge_str = str(edge_str)
33 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
34 | self.last_emit) + " | " + self.address + ":" + str(
35 | self.port) + ' | ' + edge_str + " )"
36 |
37 | def __eq__(self, other):
38 | if not other:
39 | return False
40 | return (self.identity == other.identity)
41 |
42 | def __ne__(self, other):
43 | return not self.__eq__(other)
44 |
45 | def __hash__(self):
46 | return hash(str(self.identity))
47 |
48 |
49 | # TODO (const): Pull Token supply.
50 | # TODO (const): Call Token Emit.
51 | # TODO (const): Calculate attribution derivatives for synapse prunning.
52 |
53 |
54 | class Metagraph():
55 |
56 | def __init__(self, config):
57 | self.config = config
58 | self.cleos = Cleos(url=config.eosurl)
59 | self.nodes = {}
60 | self.pull_metagraph()
61 | self.attributions = [(config.identity, 1.0)]
62 | # TODO(const) this should be our own key. NOT EOSMAIN.
63 | self.eoskey = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
64 |
65 | def get_my_stake(self):
66 | return int(self.nodes[self.config.identity].stake)
67 |
68 | # TODO(const): pull this from the eos chain under the var 'total stake'
69 | # instead of doing a sum.
70 | def get_total_stake(self):
71 | return int(sum([node.stake for node in self.nodes.values()]))
72 |
73 | def pull_metagraph(self):
74 | table = self.cleos.get_table('bittensoracc', 'bittensoracc',
75 | 'metagraph')
76 | for entry in table['rows']:
77 | next_node = Node(entry)
78 | self.nodes[entry['identity']] = next_node
79 | logger.debug(self.__str__())
80 |
81 | # Push attribution scores.
82 | def publish_attributions(self):
83 | logger.debug('Publish attributions: ' + str(self.attributions))
84 | transaction = self.publish_attributions_trx()
85 | try:
86 | # TODO (const) Rewrite the cleos library for our selves.
87 | resp = self.cleos.push_transaction(transaction,
88 | self.eoskey,
89 | broadcast=True)
90 | except:
91 | try:
92 | eoskey = eospy.keys.EOSKey(self.eoskey)
93 | resp = self.cleos.push_transaction(transaction,
94 | eoskey,
95 | broadcast=True)
96 | except Exception as e:
97 | logger.error('Failed to publish transaction', e)
98 |
99 | def publish_attributions_trx(self):
100 | arguments = {
101 | "this_user":
102 | self.config.identity,
103 | "this_edges": [
104 | (attr[0], float(attr[1])) for attr in self.attributions
105 | ],
106 | }
107 | payload = {
108 | "account":
109 | "bittensoracc",
110 | "name":
111 | "emit",
112 | "authorization": [{
113 | "actor": self.config.identity,
114 | "permission": "active",
115 | }],
116 | }
117 | #Converting payload to binary
118 | data = self.cleos.abi_json_to_bin(payload['account'], payload['name'],
119 | arguments)
120 | #Inserting payload binary form as "data" field in original payload
121 | payload['data'] = data['binargs']
122 | #final transaction formed
123 | trx = {"actions": [payload]}
124 | return trx
125 |
126 | def __repr__(self):
127 | return self.__str__()
128 |
129 | def __str__(self):
130 | str_rep = "\nmetagraph = {\n"
131 | for node in self.nodes.values():
132 | str_rep += ("\t" + str(node) + "\n")
133 | str_rep += "}."
134 | return str_rep
135 |
--------------------------------------------------------------------------------
/neurons/boltzmann/nucleus.py:
--------------------------------------------------------------------------------
1 | import collections
2 | from loguru import logger
3 | import tensorflow as tf
4 | import zipfile
5 |
6 |
7 | class Nucleus():
8 |
9 | def __init__(self, config):
10 | self.config = config
11 |
12 | # Dataset zip file.
13 | self.filename = 'neurons/boltzmann/data/text8.zip'
14 | # Size of vocab embedding.
15 | self.vocabulary_size = 50000
16 | # Size of training batch.
17 | self.batch_size = 128
18 | # Dimension of the embedding vector.
19 | self.embedding_size = 128
20 | # Number of negative examples to sample.
21 | self.num_sampled = 64
22 |
23 | # Build Tokenizer vocabulary.
24 | self.build_vocabulary()
25 |
26 | self.graph = tf.Graph()
27 | with self.graph.as_default(), tf.device('/cpu:0'):
28 | self.build_graph()
29 |
30 | # Create TF Session.
31 | self.session = tf.compat.v1.Session(graph=self.graph)
32 |
33 | # Init tables and vars.
34 | self.session.run(self.var_init)
35 |
36 | self.session.run(self.table_init)
37 |
38 | def spike(self, uspikes, dspikes):
39 |
40 | # Build Feeds dictionary.
41 | feeds = {self.text_placeholder: uspikes}
42 | for i in range(self.config.k):
43 | feeds[self.dspikes[i]] = dspikes[i]
44 |
45 | # Build Fetches dictionary.
46 | fetches = {'output': self.output}
47 |
48 | # Run graph.
49 | run_output = self.session.run(fetches, feeds)
50 |
51 | # Return spikes.
52 | return run_output['output']
53 |
54 | def grade(self, ugrades, uspikes, dspikes):
55 |
56 | # Build Feeds dictionary.
57 | feeds = {}
58 | feeds[self.text_placeholder] = uspikes
59 | feeds[self.output_grad] = ugrades
60 | for i in range(self.config.k):
61 | feeds[self.dspikes[i]] = dspikes[i]
62 |
63 | fetches = {}
64 | fetches['lgrads'] = self.gradient_values
65 | for i in range(self.config.k):
66 | fetches["dgrads" + str(i)] = self.downstream_grads[i]
67 |
68 | # Run graph.
69 | run_output = self.session.run(fetches, feeds)
70 |
71 | # Return spikes.
72 | return [run_output["dgrads" + str(i)] for i in range(self.config.k)
73 | ], run_output['lgrads']
74 |
75 | def learn(self, gradients):
76 |
77 | # Build Feeds dictionary.
78 | # Feed batch of gradients.
79 | feeds = {}
80 | for i, grad_var in enumerate(gradients):
81 | feeds[self.placeholder_gradients[i][0]] = gradients[i]
82 |
83 | # Fetches. Call apply gradients.
84 | fetches = {}
85 | fetches['step'] = self.step
86 |
87 | # Run graph. No output.
88 | self.session.run(fetches, feeds)
89 |
90 | def build_graph(self):
91 |
92 | # Text input placeholder.
93 | self.text_placeholder = tf.compat.v1.placeholder(
94 | tf.string, shape=[None, 1], name="text_placeholder")
95 | input_text = tf.reshape(self.text_placeholder, [-1])
96 |
97 | # Tokenization.
98 | vocabulary_table = tf.contrib.lookup.index_table_from_tensor(
99 | mapping=tf.constant(self.string_map),
100 | num_oov_buckets=1,
101 | default_value=0)
102 | input_tokens = vocabulary_table.lookup(input_text)
103 |
104 | # Token spikes.
105 | embedding_matrix = tf.Variable(
106 | tf.random.uniform([self.vocabulary_size, self.embedding_size], -1.0,
107 | 1.0))
108 | self.token_spikes = tf.nn.embedding_lookup(embedding_matrix,
109 | input_tokens)
110 | self.token_spikes = tf.reshape(self.token_spikes,
111 | [-1, self.embedding_size])
112 |
113 | # Placeholders for downstream spikes.
114 | self.dspikes = []
115 | for i in range(self.config.k):
116 | downstream_spikes = tf.compat.v1.placeholder(
117 | tf.float32,
118 | shape=[None, self.embedding_size],
119 | name="dspikes_placeholder" + str(i))
120 | self.dspikes.append(downstream_spikes)
121 |
122 | # activation_spikes = [None, embedding_size * (self.config.k + 1)]
123 | self.activation_size = self.embedding_size * (self.config.k + 1)
124 | self.activation_spikes = tf.concat([self.token_spikes] + self.dspikes,
125 | axis=1)
126 |
127 | # Layer 1.
128 | w1 = tf.Variable(
129 | tf.random.uniform([self.activation_size, self.embedding_size], -1.0,
130 | 1.0))
131 | b1 = tf.Variable(tf.zeros([self.embedding_size]))
132 | local_spikes = tf.sigmoid(tf.matmul(self.activation_spikes, w1) + b1)
133 |
134 | # Representation. Output Spikes,
135 | self.output = tf.identity(local_spikes, name="output")
136 |
137 | # Upstream gradient placeholder.
138 | self.output_grad = tf.placeholder(tf.float32,
139 | [None, self.embedding_size])
140 |
141 | # Build downstream grad tensors.
142 | self.downstream_grads = []
143 | for i in range(self.config.k):
144 | dspikes_grad = tf.gradients(xs=[self.dspikes[i]],
145 | ys=self.output,
146 | grad_ys=self.output_grad,
147 | name="dgrads" + str(i))
148 | self.downstream_grads.append(dspikes_grad)
149 |
150 | # Build optimizer.
151 | self.optimizer = tf.train.GradientDescentOptimizer(self.config.alpha)
152 | gradients = self.optimizer.compute_gradients(loss=self.output,
153 | grad_loss=self.output_grad)
154 |
155 | # Build gradient placeholders for the Learn step.
156 | self.gradient_values = []
157 | self.placeholder_gradients = []
158 | for gradient_variable in gradients:
159 | grad_placeholder = tf.placeholder(
160 | 'float', shape=gradient_variable[1].get_shape())
161 | self.gradient_values.append(gradient_variable[1])
162 | self.placeholder_gradients.append(
163 | (grad_placeholder, gradient_variable[1]))
164 |
165 | self.step = self.optimizer.apply_gradients(self.placeholder_gradients)
166 |
167 | # Init vars.
168 | self.var_init = tf.compat.v1.global_variables_initializer()
169 | self.table_init = tf.compat.v1.tables_initializer(
170 | name='init_all_tables')
171 |
172 | logger.debug('Built Nucleus graph.')
173 |
174 | def build_vocabulary(self):
175 | """ Parses the dummy corpus into a single sequential array.
176 | Each sentence is appended to each other. Also produces count dictionary
177 | for each word in the corpus.
178 | """
179 |
180 | # Read textfile.
181 | f = zipfile.ZipFile(self.filename)
182 | for name in f.namelist():
183 | self.words = tf.compat.as_str(f.read(name)).split()
184 | f.close()
185 |
186 | counts = [('UNK', -1)]
187 | counts.extend(
188 | collections.Counter(self.words).most_common(self.vocabulary_size -
189 | 2))
190 | self.string_map = [c[0] for c in counts]
191 |
192 | logger.debug('Built Nucleus vocabulary.')
193 |
--------------------------------------------------------------------------------
/neurons/elmo/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | # Copy across the local files.
4 |
5 | COPY neurons/ELMO bittensor/neurons/ELMO
6 |
7 | ENV TFHUB_CACHE_DIR /bittensor/cache
8 |
9 | RUN pip install --upgrade pip && \
10 | pip install tensorflow_hub==0.4.0
11 |
--------------------------------------------------------------------------------
/neurons/elmo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/elmo/__init__.py
--------------------------------------------------------------------------------
/neurons/elmo/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tensorflow as tf
4 |
5 | # Tensorflow flags.
6 | flags = tf.app.flags
7 | FLAGS = flags.FLAGS
8 |
9 | # TODO (const): This needs to be global bittensor.config
10 | flags.DEFINE_string("identity", "xxxxxxx", "Nueron Identity")
11 | flags.DEFINE_string("serve_address", "0.0.0.0", "Address serve synapse.")
12 | flags.DEFINE_string("bind_address", "0.0.0.0", "Address bind synapse.")
13 | flags.DEFINE_string("port", "9090", "Port to serve on.")
14 | flags.DEFINE_string("eosurl", "http://0.0.0.0:8888", "EOS Url.")
15 | flags.DEFINE_string("logdir", "/tmp/", "logginf directory.")
16 | flags.DEFINE_integer("k", 3, "Out edge degree.")
17 | flags.DEFINE_float("alpha", 0.01, "Learning rate.")
18 |
19 |
20 | class Config():
21 |
22 | def __init__(self):
23 | self.identity = FLAGS.identity
24 | self.serve_address = FLAGS.serve_address
25 | self.bind_address = FLAGS.bind_address
26 | self.port = FLAGS.port
27 | self.eosurl = FLAGS.eosurl
28 | self.logdir = FLAGS.logdir
29 | self.k = FLAGS.k
30 | self.alpha = FLAGS.alpha
31 |
32 | def __repr__(self):
33 | return self.__str__()
34 |
35 | def __str__(self):
36 | return "\nconfig = {\n\tidentity: " + self.identity + " \n" +\
37 | "\tserve_address: " + self.serve_address + " \n" +\
38 | "\tbind_address: " + self.bind_address + " \n" +\
39 | "\teosurl: " + self.eosurl ++ " \n" +\
40 | "\tport: " + self.port + " \n" +\
41 | "\tk: " + str(self.k) + " \n" + \
42 | "\talpha: " + str(self.alpha) + " \n" +\
43 | "\ttime_till_expire: " + str(self.time_till_expire) + " \n}."
44 |
--------------------------------------------------------------------------------
/neurons/elmo/main.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from config import Config
4 |
5 | from concurrent import futures
6 | import grpc
7 | from loguru import logger
8 | import numpy
9 | import pickle
10 |
11 | import time
12 | import tensorflow as tf
13 | import tensorflow_hub as hub
14 |
15 | EMBEDDING_SIZE = 128
16 |
17 |
18 | class Neuron(bittensor.proto.bittensor_pb2_grpc.BittensorServicer):
19 |
20 | def __init__(self, config):
21 | self.config = config
22 |
23 | # Load ELMO model.
24 | logger.info('Loading tensorflow hub module.')
25 | logger.info('https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1')
26 | tf.compat.v1.enable_eager_execution()
27 | module_url = "https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1"
28 | self.embed = hub.KerasLayer(module_url)
29 | logger.info('done.')
30 |
31 | # Init server.
32 | self.server_address = self.config.bind_address + ":" + self.config.port
33 | self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
34 | bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(
35 | self, self.server)
36 | self.server.add_insecure_port(self.server_address)
37 |
38 | def __del__(self):
39 | self.server.stop(0)
40 | logger.debug('Stopped Serving Neuron at: {}.', self.server_address)
41 |
42 | def serve(self):
43 | self.server.start()
44 | logger.debug('Started Serving Neuron at: {}.', self.server_address)
45 |
46 | def Spike(self, request, context):
47 | # Unpack message.
48 | version = request.version
49 | source_id = request.source_id
50 | parent_id = request.parent_id
51 | message_id = request.message_id
52 | inputs = numpy.asarray(pickle.loads(request.payload))
53 | logger.info('s {}', parent_id)
54 |
55 | # Inference through EMLO.
56 | embeddings = numpy.array(self.embed(inputs.flatten())).reshape(
57 | EMBEDDING_SIZE, -1)
58 |
59 | # Pack response.
60 | response_payload = pickle.dumps(embeddings, protocol=0)
61 | response = bittensor.proto.bittensor_pb2.SpikeResponse(
62 | child_id=self.config.identity,
63 | message_id=message_id,
64 | payload=response_payload)
65 |
66 | return response
67 |
68 | def Grade(self, request, context):
69 | parent_id = request.parent_id
70 | logger.info('g {}', parent_id)
71 | # Pass.
72 | return bittensor.proto.bittensor_pb2.GradeResponse(accept=True)
73 |
74 |
75 | def main():
76 | config = Config()
77 |
78 | neuron = Neuron(config)
79 |
80 | neuron.serve()
81 |
82 | def tear_down(_config, _neuron):
83 | logger.debug('tear down.')
84 | del _neuron
85 | del _config
86 |
87 | try:
88 | logger.info('Begin wait on main...')
89 | while True:
90 | logger.debug('heartbeat')
91 | time.sleep(100)
92 |
93 | except KeyboardInterrupt:
94 | logger.debug('Neuron stopped with keyboard interrupt.')
95 | tear_down(config, neuron)
96 |
97 | except Exception as e:
98 | logger.error('Neuron stopped with interrupt on error: ' + str(e))
99 | tear_down(config, neuron)
100 |
101 |
102 | if __name__ == '__main__':
103 | logger.debug("started neuron.")
104 | main()
105 |
--------------------------------------------------------------------------------
/neurons/feynman/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | # Copy across the local files.
4 | COPY neurons/Feynman bittensor/neurons/Feynman
5 |
--------------------------------------------------------------------------------
/neurons/feynman/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/feynman/__init__.py
--------------------------------------------------------------------------------
/neurons/feynman/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tensorflow as tf
4 |
5 | # Tensorflow flags.
6 | flags = tf.app.flags
7 | FLAGS = flags.FLAGS
8 |
9 | # TODO (const): This needs to be global bittensor.config
10 | flags.DEFINE_string("identity", "xxxxxxx", "Nueron Identity")
11 | flags.DEFINE_string("serve_address", "0.0.0.0", "Address serve synapse.")
12 | flags.DEFINE_string("bind_address", "0.0.0.0", "Address bind synapse.")
13 | flags.DEFINE_string("port", "9090", "Port to serve on.")
14 | flags.DEFINE_string("eosurl", "http://0.0.0.0:8888", "EOS Url.")
15 | flags.DEFINE_string("logdir", "/tmp/", "logginf directory.")
16 | flags.DEFINE_integer("k", 3, "Out edge degree.")
17 | flags.DEFINE_float("alpha", 0.01, "Learning rate.")
18 |
19 |
20 | class Config():
21 |
22 | def __init__(self):
23 | self.identity = FLAGS.identity
24 | self.serve_address = FLAGS.serve_address
25 | self.bind_address = FLAGS.bind_address
26 | self.port = FLAGS.port
27 | self.eosurl = FLAGS.eosurl
28 | self.logdir = FLAGS.logdir
29 | self.k = FLAGS.k
30 | self.alpha = FLAGS.alpha
31 |
32 | def __repr__(self):
33 | return self.__str__()
34 |
35 | def __str__(self):
36 | return "\nconfig = {\n\tidentity: " + self.identity + " \n" +\
37 | "\tserve_address: " + self.serve_address + " \n" +\
38 | "\tbind_address: " + self.bind_address + " \n" +\
39 | "\teosurl: " + self.eosurl ++ " \n" +\
40 | "\tport: " + self.port + " \n" +\
41 | "\tk: " + str(self.k) + " \n" + \
42 | "\talpha: " + str(self.alpha) + " \n" +\
43 | "\ttime_till_expire: " + str(self.time_till_expire) + " \n}."
44 |
--------------------------------------------------------------------------------
/neurons/feynman/data/text8.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/neurons/feynman/data/text8.zip
--------------------------------------------------------------------------------
/neurons/feynman/dendrite.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from Crypto.Hash import SHA256
4 | import grpc
5 | from loguru import logger
6 | import pickle
7 | import numpy as np
8 | import random
9 | import struct
10 | import tensorflow as tf
11 | from tensorflow.python.framework import ops
12 | import time
13 |
14 | # TODO (const): Negotiate channels with upstream nodes.
15 |
16 | EMBEDDING_SIZE = 128
17 |
18 |
19 | class Dendrite():
20 |
21 | def __init__(self, config, metagraph):
22 | self.config = config
23 | self.metagraph = metagraph
24 | self.channels = [None for _ in range(self.config.k)]
25 | self.channel_nodes = [None for _ in range(self.config.k)]
26 | self.reselect_channels()
27 |
28 | def reselect_channels(self):
29 | nodes = self.metagraph.nodes
30 | for i in range(self.config.k):
31 | if self.channels[i] != None:
32 | continue
33 |
34 | selected_node = None
35 | for node in nodes.values():
36 | if node not in self.channel_nodes and node.identity != self.config.identity:
37 | selected_node = node
38 | break
39 |
40 | if selected_node:
41 | address = selected_node.address + ':' + selected_node.port
42 | self.channels[i] = grpc.insecure_channel(address)
43 | self.channel_nodes[i] = selected_node
44 |
45 | logger.debug(self.__str__())
46 |
47 | def __repr__(self):
48 | return self.__str__()
49 |
50 | def __str__(self):
51 | str_rep = "\ndendrite = {\n"
52 | i = 0
53 | for node in self.channel_nodes:
54 | if node:
55 | str_rep += ('\t\t[' + str(i) + ']:' + str(node.identity) + "\n")
56 | else:
57 | str_rep += ('\t\t[' + str(i) + ']:' + "None" + "\n")
58 | i += 1
59 | str_rep += "}."
60 | return str_rep
61 |
62 | def _gradrpc(self, channel, spikes, grad):
63 | if channel is None:
64 | return
65 |
66 | try:
67 | # Build Stub.
68 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
69 |
70 | # Build message hash
71 | identity_bytes = self.config.identity.encode()
72 | grad_bytes = pickle.dumps(grad.numpy(), protocol=0)
73 | spike_bytes = pickle.dumps(spikes.numpy(), protocol=0)
74 |
75 | # Create hash from self.id and spikes.
76 | hash = SHA256.new()
77 | hash.update(identity_bytes)
78 | hash.update(spike_bytes)
79 | message_hash = hash.digest()
80 |
81 | # Create request proto.
82 | request = bittensor.proto.bittensor_pb2.GradeRequest(
83 | parent_id=self.config.identity,
84 | message_id=message_hash,
85 | payload=grad_bytes)
86 |
87 | # Send Grade request.
88 | stub.Grade(request)
89 |
90 | # Pass.
91 |
92 | except Exception as error:
93 | #logger.info('failed call {}', error)
94 | pass
95 |
96 | def _spikerpc(self, channel, spikes):
97 | #logger.info('dendrite._spikerpc')
98 | if channel is None:
99 | return None
100 |
101 | try:
102 | # Build Stub and request proto.
103 | stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
104 |
105 | # Build message hash
106 | identity_bytes = self.config.identity.encode()
107 | spike_bytes = pickle.dumps(spikes.numpy(), protocol=0)
108 |
109 | # Create hash from self.identity and spikes.
110 | hash = SHA256.new()
111 | hash.update(identity_bytes)
112 | hash.update(spike_bytes)
113 | message_hash = hash.digest()
114 |
115 | # Build request proto.
116 | request = bittensor.proto.bittensor_pb2.SpikeRequest(
117 | parent_id=self.config.identity,
118 | message_id=message_hash,
119 | payload=spike_bytes)
120 |
121 | # Send spike request.
122 | response = stub.Spike(request)
123 |
124 | # Deserialize response as numpy.
125 | return pickle.loads(response.payload).reshape(EMBEDDING_SIZE, -1)
126 |
127 | except Exception as error:
128 | #logger.info('failed call {}', error)
129 | return None
130 |
131 | def _grad(self, spikes, *grads):
132 | for i in range(self.config.k):
133 | channel = self.channels[i]
134 | grad_i = grads[i]
135 | if channel:
136 | self._gradrpc(channel, spikes, grad_i)
137 |
138 | def _spike(self, spikes):
139 | #logger.info('dendrite._spikecast')
140 | # TODO(const) Currently this function is syncronous. Calls to the
141 | # dendrite nodes should be async to save on time.
142 | result = []
143 | for i in range(self.config.k):
144 | res = self._spikerpc(self.channels[i], spikes)
145 | if res is None:
146 | result.append(
147 | np.zeros((len(spikes), EMBEDDING_SIZE), dtype=np.float32))
148 | else:
149 | result.append(res)
150 | return result
151 |
152 | def grade(self, spikes, grads):
153 | inputs = [spikes] + grads
154 | return tf.py_function(self._grad, inputs, [])
155 |
156 | def spike(self, words_tensor):
157 | #logger.info('dendrite.spike')
158 | rtypes = [tf.float32 for _ in range(self.config.k)]
159 | inputs = [words_tensor]
160 | return tf.py_function(self._spike, inputs, rtypes)
161 |
--------------------------------------------------------------------------------
/neurons/feynman/main.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from config import Config
4 | from metagraph import Metagraph
5 | from dendrite import Dendrite
6 | from nucleus import Nucleus
7 | from synapse import BoltServicer
8 | import visualization
9 |
10 | from concurrent import futures
11 | import grpc
12 | from loguru import logger
13 | import sys
14 | import time
15 | from timeloop import Timeloop
16 | from datetime import timedelta
17 |
18 |
19 | def set_timed_loops(tl, metagraph, nucleus, synapse, dendrite):
20 |
21 | # Pull the updated graph state (Vertices, Edges, Weights)
22 | @tl.job(interval=timedelta(seconds=7))
23 | def pull_metagraph():
24 | metagraph.pull_metagraph()
25 |
26 | # Publish attributions (Edges, Weights.)
27 | @tl.job(interval=timedelta(seconds=2))
28 | def publish_attributions():
29 | metagraph.publish_attributions()
30 |
31 | # Load an updated inference nn-tensorflow model.
32 | @tl.job(interval=timedelta(seconds=15))
33 | def load_graph():
34 | synapse.load_graph()
35 |
36 | # Reselect downstream nodes.
37 | # TODO(const) perhaps this should be removed. Instead downstream state is
38 | # fixed at the start.
39 | @tl.job(interval=timedelta(seconds=13))
40 | def reselect_channels():
41 | dendrite.reselect_channels()
42 |
43 |
44 | def serve():
45 |
46 | # TODO(const) Use Hparams and FLAGS like in ARC nets.
47 | config = Config()
48 | logger.debug(config)
49 |
50 | # The metagrpah manages the global network state.
51 | metagraph = Metagraph(config)
52 |
53 | # The dendrite manages our connections to 'upstream' nodes.
54 | dendrite = Dendrite(config, metagraph)
55 |
56 | # The nucleus trains the NN object.
57 | nucleus = Nucleus(config, metagraph, dendrite)
58 |
59 | # The synapse manages our connection to downstream nodes.
60 | synapse = BoltServicer(config, metagraph)
61 | logger.info('Started Synapse.')
62 |
63 | # Start the Nucleus.
64 | nucleus.start()
65 | logger.info('Started Nucleus.')
66 |
67 | # Start timed calls.
68 | tl = Timeloop()
69 | set_timed_loops(tl, metagraph, nucleus, synapse, dendrite)
70 | tl.start(block=False)
71 | logger.info('Started Timers.')
72 |
73 | # Serve the synapse on a grpc server.
74 | server_address = config.bind_address + ":" + config.port
75 | grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
76 | bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(
77 | synapse, grpc_server)
78 | grpc_server.add_insecure_port(server_address)
79 | logger.debug('Served synapse on: {}.', server_address)
80 | grpc_server.start()
81 |
82 | def tear_down(_server, _nucleus, _metagraph, _dendrite, _synapse):
83 | _server.stop(0)
84 | _nucleus.stop()
85 | del _metagraph
86 | del _dendrite
87 | del _nucleus
88 | del _synapse
89 |
90 | try:
91 | logger.info('Begin wait on main...')
92 | while True:
93 |
94 | # NOTE(const): Matplotib must run in the main thread.
95 | image_buffer = visualization.generate_edge_weight_buffer(
96 | metagraph.nodes)
97 | nucleus.update_metagraph_summary(image_buffer)
98 | logger.info('Updated metagraph image.')
99 | time.sleep(30)
100 |
101 | except KeyboardInterrupt:
102 | logger.debug('keyboard interrupt.')
103 | tear_down(grpc_server, nucleus, metagraph, dendrite, synapse)
104 |
105 | except:
106 | logger.error('unknown interrupt.')
107 | tear_down(grpc_server, nucleus, metagraph, dendrite, synapse)
108 |
109 |
110 | if __name__ == '__main__':
111 | logger.debug("started neuron.")
112 | serve()
113 |
--------------------------------------------------------------------------------
/neurons/feynman/metagraph.py:
--------------------------------------------------------------------------------
1 | import ast
2 | from loguru import logger
3 | import sys
4 |
5 | import eospy.keys
6 | from eospy.cleos import Cleos
7 |
8 |
9 | class Node():
10 |
11 | def __init__(self, entry):
12 | # EOS account name.
13 | self.identity = entry['identity']
14 | # Network Stake.
15 | self.stake = entry['stake']
16 | # Last emit.
17 | self.last_emit = entry['last_emit']
18 | # IP address.
19 | self.address = entry['address']
20 | # Port number.
21 | self.port = entry['port']
22 | # List of tuples (edge name, edge weight)
23 | self.edges = entry['edges']
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
28 | def __str__(self):
29 | edge_str = []
30 | for el in self.edges:
31 | edge_str.append((el['first'], "%.4f" % float(el['second'])))
32 | edge_str = str(edge_str)
33 | return "( " + self.identity + " | " + str(self.stake) + " | " + str(
34 | self.last_emit) + " | " + self.address + ":" + str(
35 | self.port) + ' | ' + edge_str + " )"
36 |
37 | def __eq__(self, other):
38 | if not other:
39 | return False
40 | return (self.identity == other.identity)
41 |
42 | def __ne__(self, other):
43 | return not self.__eq__(other)
44 |
45 | def __hash__(self):
46 | return hash(str(self.identity))
47 |
48 |
49 | # TODO (const): Pull Token supply.
50 | # TODO (const): Call Token Emit.
51 | # TODO (const): Calculate attribution derivatives for synapse prunning.
52 |
53 |
54 | class Metagraph():
55 |
56 | def __init__(self, config):
57 | self.config = config
58 | self.cleos = Cleos(url=config.eosurl)
59 | self.nodes = {}
60 | self.pull_metagraph()
61 | self.attributions = [(config.identity, 1.0)]
62 | # TODO(const) this should be our own key. NOT EOSMAIN.
63 | self.eoskey = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
64 |
65 | def get_my_stake(self):
66 | return int(self.nodes[self.config.identity].stake)
67 |
68 | # TODO(const): pull this from the eos chain under the var 'total stake'
69 | # instead of doing a sum.
70 | def get_total_stake(self):
71 | return int(sum([node.stake for node in self.nodes.values()]))
72 |
73 | def pull_metagraph(self):
74 | table = self.cleos.get_table('bittensoracc', 'bittensoracc',
75 | 'metagraph')
76 | for entry in table['rows']:
77 | next_node = Node(entry)
78 | self.nodes[entry['identity']] = next_node
79 | logger.debug(self.__str__())
80 |
81 | # Push attribution scores.
82 | def publish_attributions(self):
83 | logger.debug('Publish attributions: ' + str(self.attributions))
84 | transaction = self.publish_attributions_trx()
85 | try:
86 | # TODO (const) Rewrite the cleos library for our selves.
87 | resp = self.cleos.push_transaction(transaction,
88 | self.eoskey,
89 | broadcast=True)
90 | except:
91 | try:
92 | eoskey = eospy.keys.EOSKey(self.eoskey)
93 | resp = self.cleos.push_transaction(transaction,
94 | eoskey,
95 | broadcast=True)
96 | except Exception as e:
97 | logger.error('Failed to publish transaction', e)
98 |
99 | def publish_attributions_trx(self):
100 | arguments = {
101 | "this_user":
102 | self.config.identity,
103 | "this_edges": [
104 | (attr[0], float(attr[1])) for attr in self.attributions
105 | ],
106 | }
107 | payload = {
108 | "account":
109 | "bittensoracc",
110 | "name":
111 | "emit",
112 | "authorization": [{
113 | "actor": self.config.identity,
114 | "permission": "active",
115 | }],
116 | }
117 | #Converting payload to binary
118 | data = self.cleos.abi_json_to_bin(payload['account'], payload['name'],
119 | arguments)
120 | #Inserting payload binary form as "data" field in original payload
121 | payload['data'] = data['binargs']
122 | #final transaction formed
123 | trx = {"actions": [payload]}
124 | return trx
125 |
126 | def __repr__(self):
127 | return self.__str__()
128 |
129 | def __str__(self):
130 | str_rep = "\nmetagraph = {\n"
131 | for node in self.nodes.values():
132 | str_rep += ("\t" + str(node) + "\n")
133 | str_rep += "}."
134 | return str_rep
135 |
--------------------------------------------------------------------------------
/neurons/feynman/synapse.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 |
3 | from loguru import logger
4 | import numpy as np
5 | import pickle
6 | import sys
7 | import time
8 | import tensorflow as tf
9 |
10 |
11 | # TODO (const): Rate limit and block ip.
12 | class BoltServicer(bittensor.proto.bittensor_pb2_grpc.BittensorServicer):
13 |
14 | def __init__(self, config, metagraph):
15 | """ Serves the inference graph for use by the network.
16 | Graphs being produced in trainging are served by the Synapse object.
17 | The Synapse is responsible for upstream connections, for rate limiting,
18 | and through this, negotiating for higher attribution within the Metagraph.
19 |
20 | Since the Synapse object is merely serving the inference graph,
21 | it is detached from the Nucleus and Dendrite during training,
22 | only communicating with these objects by pulling the latest and
23 | best inference graph from the storage directory.
24 |
25 | identity:
26 | This node's identity within the network (tied to an EOS account)
27 | config:
28 | A Bittensor config object.
29 | metagraph:
30 | A Metagraph object which maintains state about the bittensor network.
31 | """
32 | logger.debug('Init Synapse.')
33 | self.identity = config.identity
34 | self.config = config
35 | self.metegraph = metagraph
36 | self.load_graph()
37 |
38 | def load_graph(self):
39 | """ Loads the latest checkpoint from the neuron root dir.
40 | Args:
41 | Returns:
42 | """
43 | logger.debug('Trying to serve graph on Synapse ...')
44 | try:
45 | graph = tf.Graph()
46 | with graph.as_default(), tf.device('/cpu:0'):
47 | saver = tf.compat.v1.train.import_meta_graph('data/' +
48 | self.identity +
49 | '/model.meta')
50 | next_session = tf.compat.v1.Session()
51 | saver.restore(
52 | next_session,
53 | tf.compat.v1.train.latest_checkpoint('data/' +
54 | self.identity))
55 | next_session.run('init_all_tables')
56 | next_session.run(tf.compat.v1.local_variables_initializer())
57 | next_session.run(
58 | "embedding_output:0",
59 | feed_dict={
60 | "inference_batch_words:0": [['UNK']], # Inference.
61 | 'is_training:0': False
62 | })
63 | except Exception as e:
64 | logger.error('Failed to server new graph. Exception {}', e)
65 | raise Exception(e)
66 |
67 | logger.debug('Served graph on Synapse.')
68 | self.session = next_session
69 |
70 | def Spike(self, request, context):
71 | """ GRPC request handler for message Spike; Runs tensor request through the graph.
72 | Args:
73 | request: A bolttensorflow.TensorProto proto as defined in src/proto/bolt.proto
74 | containing the incoming words. The proto should be a unidimensional array of strings.
75 | theses strings are words to be embedded.
76 | context: A GPRC message context.
77 | Returns:
78 | response: A SpikeResponse proto containing identity,
79 | message identifier, and the embedded words (payload)
80 | as outputed by running the session graph.
81 | """
82 | # TODO (const) The synapse should be competitively selecting which nodes
83 | # are allowed to query us based on the Metagraph information.
84 | batch_words = pickle.loads(request.payload)
85 | embeddings = self.session.run(
86 | "embedding_output:0",
87 | feed_dict={
88 | "inference_batch_words:0": batch_words.tolist(), # Inference.
89 | 'is_training:0': False
90 | })
91 | payload = pickle.dumps(embeddings, protocol=0)
92 | response = bittensor.proto.bolt_pb2.SpikeResponse(
93 | child_id=self.config.identity,
94 | message_id=request.message_id,
95 | payload=payload)
96 | return response
97 |
98 | def Grade(self, request, context):
99 | """ GRPC request handler for message Grade; Accepts a gradient message.
100 | Args:
101 | request: A grade message proto as defined in src/proto/bolt.proto
102 | containing the request identity, message identifier, and payload.
103 | The payload should be interpreted as a gradients w.r.t the input payload
104 | theses strings are words to be embedded.
105 | context: A GPRC message context.
106 | Returns:
107 | response: A GradeResponse proto containing an accepted message.
108 | """
109 | # TODO(const) this should append gradient messages to a training queue.
110 | return bittensor.proto.bittensor_pb2.GradeResponse(accept=True)
111 | # pass
112 | # # TODO (const) The synapse should be competitively selecting which nodes
113 | # # are allowed to query us based on the Metagraph information.
114 | # batch_words = [[word] for word in request.string_val]
115 | # embeddings = self.session.run("embedding_output:0",
116 | # feed_dict={
117 | # "inference_batch_words:0": batch_words, # Inference.
118 | # 'is_training:0': False
119 | # })
120 | # embed_proto = tf.compat.v1.make_tensor_proto(embeddings)
121 | # return proto.bolt_pb2.GradeResponse(accept=True)
122 |
--------------------------------------------------------------------------------
/neurons/feynman/visualization.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import io
3 | import matplotlib as mpl
4 | import matplotlib.pyplot as plt
5 | import networkx as nx
6 | import numpy
7 | import os
8 | import tensorflow as tf
9 |
10 |
11 | def figure_to_buff(figure):
12 | """Converts the matplotlib plot specified by 'figure' to a PNG image and
13 | returns it. The supplied figure is closed and inaccessible after this call."""
14 | # Save the plot to a PNG in memory.
15 | buf = io.BytesIO()
16 | plt.savefig(buf, format='png')
17 | # Closing the figure prevents it from being displayed directly inside
18 | # the notebook.
19 | plt.close(figure)
20 | buf.seek(0)
21 | return buf
22 |
23 |
24 | def generate_edge_weight_buffer(nodes):
25 | b_nodes = list(nodes.values())
26 | print(b_nodes)
27 | G = nx.DiGraph()
28 |
29 | total_stake = sum([node.stake for node in b_nodes])
30 |
31 | # Build node sizes in proportion to stake held within the graph.
32 | node_sizes = []
33 | node_labels = {}
34 | for node in b_nodes:
35 | G.add_node(node.identity)
36 | node_sizes.append(25 + 500 * (node.stake / total_stake))
37 | node_labels[node.identity] = str(node.identity)
38 |
39 | # Edge colors (alphas and weight) reflect attribution wieghts of each
40 | # connection.
41 | edge_colors = {}
42 | edge_labels = {}
43 | for node in b_nodes:
44 | for edge in node.edges:
45 | if (node.identity, edge['first']) not in edge_labels:
46 | G.add_edge(node.identity, edge['first'])
47 | edge_colors[(node.identity,
48 | edge['first'])] = float(edge['second'])
49 | if node.identity != edge['first']:
50 | edge_labels[(
51 | node.identity,
52 | edge['first'])] = "%.3f" % float(edge['second'])
53 | else:
54 | edge_labels[(node.identity, edge['first'])] = ""
55 |
56 | # Set edge weights.
57 | for u, v, d in G.edges(data=True):
58 | d['weight'] = edge_colors[(u, v)]
59 | edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
60 |
61 | # Clear Matplot lib buffer and create new figure.
62 | plt.cla()
63 | plt.clf()
64 | figure = plt.figure(figsize=(20, 15))
65 |
66 | pos = nx.layout.circular_layout(G)
67 | nodes = nx.draw_networkx_nodes(G,
68 | pos,
69 | node_size=node_sizes,
70 | node_color='blue')
71 | edges = nx.draw_networkx_edges(G,
72 | pos,
73 | arrowstyle='->',
74 | arrowsize=15,
75 | edge_color=weights,
76 | edge_cmap=plt.cm.Blues,
77 | width=5)
78 |
79 | edge_labels = nx.draw_networkx_edge_labels(G,
80 | pos,
81 | edge_labels=edge_labels,
82 | with_labels=True,
83 | label_pos=0.3)
84 |
85 | for node in b_nodes:
86 | pos[node.identity] = pos[node.identity] + numpy.array([0, 0.1])
87 | labels = nx.draw_networkx_labels(G, pos, node_labels)
88 |
89 | # Save the plot to a PNG in memory.
90 | buf = io.BytesIO()
91 | plt.savefig(buf, format='png')
92 | # Closing the figure prevents it from being displayed directly inside
93 | # the notebook.
94 | plt.close(figure)
95 | buf.seek(0)
96 |
97 | return buf
98 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | argparse
2 | grpcio
3 | grpcio-tools
4 | libeospy
5 | loguru
6 | matplotlib
7 | miniupnpc
8 | networkx
9 | numpy
10 | pebble
11 | pickle-mixin
12 | pycrypto
13 | tensorflow==1.15.2
14 | tensorflow_hub==0.4.0
15 | timeloop
16 | zipfile36
17 |
--------------------------------------------------------------------------------
/scripts/bittensor.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source ./scripts/constant.sh
3 |
4 | # Arguments to this script.
5 |
6 | # Network identity
7 | IDENTITY=$1
8 | # Address to post to the EOS chain. (our API endpoint)
9 | SERVE_ADDRESS=$2
10 | # Local address to bind our server to.
11 | BIND_ADDRESS=$3
12 | # Port to bind endpoint on.
13 | PORT=$4
14 | # URL of EOS endpoint.
15 | EOSURL=$5
16 | # Directory to save checkpoints and logs.
17 | LOGDIR=$6
18 | # Python client to run.
19 | NEURON=$7
20 |
21 | # Creates the system eoisio wallet. This is used to build our unique account.
22 | # In the future the eosio account will be replaced with your own.
23 | function create_eosio(){
24 | trace "cleos -u $EOSURL wallet create -n eosio --to-console"
25 | cleos -u $EOSURL wallet create -n eosio --to-console >> data/$IDENTITY/bittensor_logs.out 2>&1
26 | if [ $? -eq 0 ]; then
27 | success "created wallet: eosio."
28 | else
29 | failure 'failed to create eosio wallet.'
30 | cat data/$IDENTITY/bittensor_logs.out 2>&1
31 | fi
32 | }
33 |
34 | # Imports the eosio private key into the eosio wallet.
35 | function import_eosio() {
36 | trace "cleos -u $EOSURL wallet import -n eosio --private-key $EOSIO_PRIVATE_KEY"
37 | cleos -u $EOSURL wallet import -n eosio --private-key $EOSIO_PRIVATE_KEY >> data/$IDENTITY/bittensor_logs.out 2>&1
38 | if [ $? -eq 0 ]; then
39 | success "imported eosio key."
40 | else
41 | failure 'failed to import eosio key.'
42 | cat data/$IDENTITY/bittensor_logs.out 2>&1
43 | exit 1
44 | fi
45 | }
46 |
47 | # Unlocks the eosio wallet using the eosio wallet password.
48 | # In the future this will us your wallet own password.
49 | function unlock_eosio() {
50 | trace "cleos -u $EOSURL wallet unlock -n eosio --password $EOSIO_PASSWORD"
51 | cleos -u $EOSURL wallet unlock -n eosio --password $EOSIO_PASSWORD >> data/$IDENTITY/bittensor_logs.out 2>&1
52 | if [ $? -eq 0 ]; then
53 | success "unlocked eosio."
54 | else
55 | success 'unlocked eosio.'
56 | fi
57 | }
58 |
59 | # Creates an account on the eos blockchain and assigns the eosio pub key as
60 | # owner and active key giving us permission to tranfer it's funds and make
61 | # contract transactions at a later time.
62 | function create_account() {
63 | trace "cleos -u $EOSURL create account eosio $IDENTITY $EOSIO_PUBLIC_KEY $EOSIO_PUBLIC_KEY"
64 | cleos -u $EOSURL create account eosio $IDENTITY $EOSIO_PUBLIC_KEY $EOSIO_PUBLIC_KEY >> data/$IDENTITY/bittensor_logs.out 2>&1
65 | if [ $? -eq 0 ]; then
66 | success "created account: $IDENTITY."
67 | else
68 | failure "failed to created account: $IDENTITY. With error above."
69 | cat data/$IDENTITY/bittensor_logs.out 2>&1
70 | #exit 1
71 | fi
72 | }
73 |
74 | # Publish our newly formed account into the bittensoracc metagraph. We publish
75 | # our id, address, and port allowing other nodes to communicate with us.
76 | function subscribe_account() {
77 | trace "cleos -u $EOSURL push action bittensoracc subscribe "["$IDENTITY", "$SERVE_ADDRESS", "$PORT"]" -p $IDENTITY@active"
78 | cleos -u $EOSURL push action bittensoracc subscribe "["$IDENTITY", "$SERVE_ADDRESS", "$PORT"]" -p $IDENTITY@active >> data/$IDENTITY/bittensor_logs.out 2>&1
79 | if [ $? -eq 0 ]; then
80 | success "subscribe account: $IDENTITY."
81 | else
82 | failure "failed to subscribe account: $IDENTITY. Check your EOSURL connection."
83 | cat data/$IDENTITY/bittensor_logs.out 2>&1
84 | exit 1
85 | fi
86 | }
87 |
88 | # Unpublish our account in the bittensoracc contract. This signals our leaving
89 | # the network also, it uncluters the network.
90 | function unsubscribe_account() {
91 | trace "cleos -u $EOSURL push action bittensoracc unsubscribe "["$IDENTITY"]" -p $IDENTITY@active"
92 | cleos -u $EOSURL push action bittensoracc unsubscribe "["$IDENTITY"]" -p $IDENTITY@active >> data/$IDENTITY/bittensor_logs.out 2>&1
93 | if [ $? -eq 0 ]; then
94 | success "unsubscribe account: $IDENTITY."
95 | else
96 | failure "failed to unsubscribe account: $IDENTITY. Check your EOSURL connection."
97 | cat data/$IDENTITY/bittensor_logs.out 2>&1
98 | exit 1
99 | fi
100 | }
101 |
102 | # Prints the metagraph state to terminal.
103 | function print_metagraph() {
104 | trace "cleos get table bittensoracc bittensoracc metagraph"
105 | log "Metagraph:"
106 | cleos -u $EOSURL get table bittensoracc bittensoracc metagraph
107 | }
108 |
109 | function start_neuron() {
110 | # The main command.
111 | # Start our Neuron object training, server graph, open dendrite etc.
112 | log ""
113 | log "=== start Neuron ==="
114 | COMMAND="python3 neurons/$NEURON/main.py --identity $IDENTITY --serve_address $SERVE_ADDRESS --bind_address $BIND_ADDRESS --port $PORT --eosurl $EOSURL --logdir $LOGDIR"
115 | log "$COMMAND"
116 | eval $COMMAND &
117 | NueronPID=$!
118 | }
119 |
120 | function main() {
121 | # Create the state directory for logs and model checkpoints.
122 | # TODO(const) In the future this could be preset and contain our conf file.
123 | mkdir -p data/$IDENTITY
124 | touch data/$IDENTITY/bittensor_logs.out
125 |
126 | # Intro logs.
127 | log "=== BitTensor ==="
128 | log "Args {"
129 | log " EOSURL: $EOSURL"
130 | log " IDENTITY: $IDENTITY"
131 | log " SERVE_ADDRESS: $SERVE_ADDRESS"
132 | log " BIND_ADDRESS: $BIND_ADDRESS"
133 | log " PORT: $PORT"
134 | log " LOGDIR: $LOGDIR"
135 | log " NEURON: neurons/$NEURON/main.py"
136 | log "}"
137 | log ""
138 | log "=== setup accounts ==="
139 |
140 |
141 | # TODO(const) These are currently hard coded to eosio main. In prodution this
142 | # should absolutely change.
143 | # Check to see if eosio wallet exists.
144 | # If not, create eosio account and pull private keys to this wallet.
145 | echo "EOSIO_PRIVATE_KEY=5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
146 | echo "EOSIO_PUBLIC_KEY=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"
147 | echo "EOSIO_PASSWORD=PW5JgJBjC1QXf8XoYPFY56qF5SJLLJNfjHbCai3DyC6We1FeBRL8q"
148 | EOSIO_PRIVATE_KEY=5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3
149 | EOSIO_PUBLIC_KEY=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV
150 | EOSIO_PASSWORD=PW5JgJBjC1QXf8XoYPFY56qF5SJLLJNfjHbCai3DyC6We1FeBRL8q
151 |
152 |
153 | # Unlock eosio wallet. Silent failure on 'already unlocked error'.
154 | # or silent failure on does not exist.
155 | unlock_eosio
156 |
157 | # Pull the eosio pub key.
158 | PUBLIC_KEY=$(cleos -u $EOSURL wallet keys | tail -2 | head -n 1 | tr -d '"' | tr -d ' ')
159 |
160 | # Create eosio wallet if it does not exist.
161 | if [[ $EOSIO_PUBLIC_KEY != $PUBLIC_KEY ]]; then
162 | create_eosio
163 | import_eosio
164 | fi
165 |
166 | # Create out Identity account on the EOS blockchain. Set ownership to eosio.
167 | create_account
168 |
169 | # Publish our newly formed account to the eos blockchain.
170 | subscribe_account
171 |
172 | # Print metagraph.
173 | print_metagraph
174 |
175 | # Build protos
176 | ./scripts/build_protos.sh
177 |
178 | # Start the Neuron object.
179 | start_neuron
180 |
181 | # Trap control C (for clean docker container tear down.)
182 | function teardown() {
183 |
184 | kill -9 $NueronPID
185 | log "=== stopped Nucleus ==="
186 |
187 | unsubscribe_account
188 | log "=== unsubscribed from Metagraph. ==="
189 |
190 | exit 0
191 | }
192 | # NOTE(const) SIGKILL cannot be caught because it goes directly to the kernal.
193 | trap teardown INT SIGHUP SIGINT SIGTERM
194 |
195 | # idle waiting for abort from user
196 | read -r -d '' _ /dev/null; then
11 | echo "You need to install loguru"
12 | pip3 install loguru
13 | fi
14 |
15 | # Test for upnpc
16 | if -x python3 -c "import miniupnpc" &> /dev/null; then
17 | echo "You need to install miniupnpc"
18 | pip3 install miniupnpc
19 | fi
20 |
21 |
22 | # Test for docker
23 | if [ -x "$(docker -v)" ]; then
24 | echo "You need to install docker: https://docs.docker.com/install/"
25 | exit 0
26 | fi
27 |
--------------------------------------------------------------------------------
/scripts/constant.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DOCKER_IMAGE_NAME="bittensor"
4 | DOCKER_IMAGE_TAG="latest"
5 |
6 | function trace() {
7 | python3 -c "from loguru import logger; logger.add(\"data/$IDENTITY/bittensor_logs.out\"); logger.debug(\"$1\")" > /dev/null 2>&1
8 | }
9 |
10 | function log() {
11 | python3 -c "from loguru import logger; logger.add(\"data/$IDENTITY/bittensor_logs.out\"); logger.debug(\"$1\")"
12 | }
13 |
14 | function success() {
15 | python3 -c "from loguru import logger; logger.add(\"data/$IDENTITY/bittensor_logs.out\"); logger.success(\"$1\")"
16 | }
17 |
18 | function failure() {
19 | python3 -c "from loguru import logger; logger.add(\"data/$IDENTITY/bittensor_logs.out\"); logger.error(\"$1\")"
20 | }
21 |
22 | function whichmachine() {
23 | unameOut="$(uname -s)"
24 | case "${unameOut}" in
25 | Linux*) machine=Linux;;
26 | Darwin*) machine=Mac;;
27 | CYGWIN*) machine=Cygwin;;
28 | MINGW*) machine=MinGw;;
29 | *) machine="UNKNOWN:${unameOut}"
30 | esac
31 | echo ${machine}
32 | }
33 |
34 |
35 | # Parse config YAML.
36 | parse_yaml() {
37 | local prefix=$2
38 | local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
39 | sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
40 | -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
41 | awk -F$fs '{
42 | indent = length($1)/2;
43 | vname[indent] = $2;
44 | for (i in vname) {if (i > indent) {delete vname[i]}}
45 | if (length($3) > 0) {
46 | vn=""; for (i=0; i eosiomain_wallet_password.txt
38 | cleos wallet import -n eosiomain --private-key 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3
39 |
40 | log "=== setup wallet: bittensorwal ==="
41 | # key for eosio account and export the generated password to a file for unlocking wallet later
42 | cleos wallet create -n bittensorwal --to-console | tail -1 | sed -e 's/^"//' -e 's/"$//' > bittensor_wallet_password.txt
43 | # Owner key for bittensorwal wallet
44 | cleos wallet import -n bittensorwal --private-key 5JpWT4ehouB2FF9aCfdfnZ5AwbQbTtHBAwebRXt94FmjyhXwL4K
45 | # Active key for bittensorwal wallet
46 | cleos wallet import -n bittensorwal --private-key 5JD9AGTuTeD5BXZwGQ5AtwBqHK21aHmYnTetHgk1B3pjj7krT8N
47 |
48 | # * Replace "bittensorwal" by your own wallet name when you start your own project
49 |
50 | # create account for bittensoracc with above wallet's public keys
51 | cleos create account eosio bittensoracc EOS6PUh9rs7eddJNzqgqDx1QrspSHLRxLMcRdwHZZRL4tpbtvia5B EOS8BCgapgYA2L4LJfCzekzeSr3rzgSTUXRXwNi8bNRoz31D14en9
52 |
53 | # TODO(const) "bittensoracc" by a new account in production.
54 |
55 | log "=== deploy smart contract ==="
56 | # $1 smart contract name
57 | # $2 account holder name of the smart contract
58 | # $3 wallet for unlock the account
59 | # $4 password for unlocking the wallet
60 | ./scripts/deploy_contract.sh bittensor bittensoracc bittensorwal $(cat bittensor_wallet_password.txt)
61 |
62 | log "=== end of setup blockchain accounts and smart contract ==="
63 | # create a file to indicate the blockchain has been initialized
64 | touch "/mnt/dev/data/initialized"
65 |
66 | # put the background nodeos job to foreground for docker run
67 | fg %1
68 |
--------------------------------------------------------------------------------
/scripts/push_image.sh:
--------------------------------------------------------------------------------
1 | ./scripts/build_protos.sh
2 | docker build -t unconst/bittensor:latest -f Dockerfile.base .
3 | docker push unconst/bittensor:latest
4 |
--------------------------------------------------------------------------------
/scripts/todo.sh:
--------------------------------------------------------------------------------
1 | rm -f TODO.txt
2 | grep -rnw . -e 'TODO' > TODO.txt
3 |
--------------------------------------------------------------------------------
/scripts/upnpc.py:
--------------------------------------------------------------------------------
1 | # A Tool for punching a hole in UPNPC enabled routers.
2 |
3 | import argparse
4 | import miniupnpc
5 | from loguru import logger
6 |
7 | def delete_port_map(args):
8 | try:
9 | logger.info('Using UPnP for port mapping...')
10 | u = miniupnpc.UPnP()
11 | u.discoverdelay = 200
12 | logger.info('Discovering... delay=%ums' % u.discoverdelay)
13 | ndevices = u.discover()
14 | logger.info(str(ndevices) + ' device(s) detected')
15 | u.selectigd()
16 | logger.info('Deleting mapped port=%u' % args.port)
17 | b = u.deleteportmapping(args.port, 'TCP')
18 | except Exception as e:
19 | logger.error('Exception in UPnP :', e)
20 | exit(1)
21 |
22 | def create_port_map(args):
23 | try:
24 | u = miniupnpc.UPnP()
25 | u.discoverdelay = 200
26 | logger.info('Using UPnP for port mapping...')
27 | logger.info('Discovering... delay=%ums' % u.discoverdelay)
28 | ndevices = u.discover()
29 | logger.info(str(ndevices) + ' device(s) detected')
30 |
31 | u.selectigd()
32 | local_ip = u.lanaddr
33 | external_ip = u.externalipaddress()
34 | local_port = int(args.port)
35 | external_port = local_port
36 |
37 | logger.info('local ip address :' + str(local_ip))
38 | logger.info('external ip address :' + str(external_ip))
39 | logger.info(str(u.statusinfo()) + " " + str(u.connectiontype()))
40 |
41 | # find a free port for the redirection
42 | rc = u.getspecificportmapping(external_port, 'TCP')
43 | while rc != None and external_port < 65536:
44 | external_port += 1
45 | rc = u.getspecificportmapping(external_port, 'TCP')
46 | if rc != None:
47 | logger.error('Exception in UPnP : ' + str(rc))
48 |
49 | logger.info('trying to redirect %s port %u TCP => %s port %u TCP' %
50 | (external_ip, external_port, local_ip, local_port))
51 | b = u.addportmapping(external_port, 'TCP', local_ip, local_port,
52 | 'UPnP IGD Tester port %u' % external_port, '')
53 |
54 | except Exception as e:
55 | logger.error('Exception in UPnP :', e)
56 | exit(1)
57 |
58 | print ('success' + ':' + str(external_ip) + ':' + str(external_port))
59 |
60 | def main(args):
61 | if args.delete == True:
62 | delete_port_map(args)
63 | else:
64 | create_port_map(args)
65 |
66 | if __name__ == '__main__':
67 | parser = argparse.ArgumentParser(description='UPnP Tool.')
68 | parser.add_argument(
69 | '--port',
70 | default=9090,
71 | type=int,
72 | help='The port to try porting or deleting Default port=9090')
73 | parser.add_argument(
74 | '--delete',
75 | default=False,
76 | type=bool,
77 | help='Delete port or create port. Default delete=False')
78 | args = parser.parse_args()
79 | main(args)
80 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """A setuptools based setup module.
2 | See:
3 | https://packaging.python.org/en/latest/distributing.html
4 | https://github.com/pypa/sampleproject
5 | """
6 |
7 | # Always prefer setuptools over distutils
8 | from setuptools import setup, find_packages
9 | from os import path
10 | # io.open is needed for projects that support Python 2.7
11 | # It ensures open() defaults to text mode with universal newlines,
12 | # and accepts an argument to specify the text encoding
13 | # Python 3 only projects can skip this import
14 | from io import open
15 |
16 | here = path.abspath(path.dirname(__file__))
17 |
18 | # Get the long description from the README file
19 | with open(path.join(here, 'README.md'), encoding='utf-8') as f:
20 | long_description = f.read()
21 |
22 | # Arguments marked as "Required" below must be included for upload to PyPI.
23 | # Fields marked as "Optional" may be commented out.
24 |
25 | setup(
26 | name='bittensor', # Required
27 | version='0.0.1', # Required
28 | description='Decentralized Machine Intelligence Deamon', # Optional
29 | long_description=long_description, # Optional
30 | long_description_content_type='text/markdown', # Optional (see note above)
31 | url='https://github.com/unconst/bittensor', # Optional
32 | author='Jacob R. Steeves and Ala Shaabana', # Optional
33 | author_email='jake@bittensor.com', # Optional
34 | classifiers=[ # Optional
35 | 'Development Status :: 3 - Alpha',
36 | 'Intended Audience :: Developers',
37 | 'Topic :: Software Development :: Build Tools',
38 |
39 | # Pick your license as you wish
40 | 'License :: OSI Approved :: MIT License',
41 | 'Programming Language :: Python :: 3.5',
42 | 'Programming Language :: Python :: 3.6',
43 | 'Programming Language :: Python :: 3.7',
44 | ],
45 | packages=find_packages(
46 | exclude=['data', 'contract', 'assets', 'scripts', 'docs']), # Required
47 | python_requires='>=3.5',
48 | install_requires=[
49 | 'argparse', 'grpcio', 'grpcio-tools', 'libeospy', 'loguru',
50 | 'matplotlib', 'miniupnpc', 'networkx', 'numpy', 'pebble',
51 | 'pickle-mixin', 'pycrypto', 'sklearn', 'tensorflow==1.15.2',
52 | 'tensorflow_hub==0.4.0', 'timeloop', 'zipfile36'
53 | ], # Optional
54 | )
55 |
--------------------------------------------------------------------------------
/start_eos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -o errexit
3 |
4 | # change to script's directory
5 | cd "$(dirname "$0")"
6 |
7 | # Load constants.
8 | source scripts/constant.sh
9 | source scripts/check_requirements.sh
10 |
11 | # Default args.
12 | remote="false"
13 | token="none"
14 |
15 | # Commandline args.
16 | while test 3 -gt 0; do
17 | case "$1" in
18 | -h|--help)
19 | echo "Script for starting an Bittensor-EOS chain instance."
20 | echo "Usage ./start_eos.sh [OPTIONS]"
21 | echo ""
22 | echo "Options:"
23 | echo " -h, --help Print these comments and exit."
24 | echo " -r, --remote Run instance on a remote digital ocean instance."
25 | echo " -t, --token If -r is set: Use this token to create instance."
26 | exit 0
27 | ;;
28 | -r|--remote)
29 | remote="true"
30 | shift
31 | ;;
32 | -t|--token)
33 | token=$2
34 | shift
35 | ;;
36 | *)
37 | break
38 | ;;
39 | esac
40 | done
41 | #
42 |
43 | function run_local() {
44 | # Run the EOS chain through a local docker instance.
45 |
46 | # Build image if not existent.
47 | if [[ "$(docker images -q $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG 2> /dev/null)" == "" ]]; then
48 | log "=== building eos container ==="
49 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG .
50 | fi
51 |
52 | # Kill already running instance.
53 | if [[ "$(docker ps -a | grep eos_container)" ]]; then
54 | log "=== stopping eos ==="
55 | docker kill eos_container || true
56 | docker rm eos_container || true
57 | fi
58 |
59 | # Run the local container with start script.
60 | log "=== run docker container from the $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG image ==="
61 | script="./scripts/init_eos.sh"
62 | docker run --rm --name eos_container -d \
63 | -p 8888:8888 -p 9876:9876 \
64 | --mount type=bind,src="$(pwd)"/contract,dst=/opt/eosio/bin/contract \
65 | --mount type=bind,src="$(pwd)"/scripts,dst=/opt/eosio/bin/scripts \
66 | --mount type=bind,src="$(pwd)"/data,dst=/mnt/dev/data \
67 | --mount type=bind,src="$(pwd)"/eos_config,dst=/mnt/dev/config \
68 | -w "/opt/eosio/bin/" $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG /bin/bash -c "$script"
69 |
70 | # Follow logs from the container.
71 | log "=== follow eos_container logs ==="
72 | docker logs eos_container --follow
73 | }
74 |
75 | function run_remote() {
76 | # Run the EOS chain on a remote digital ocean instance.
77 |
78 | # Create a digital ocean instance if non-existent.
79 | # This uses the $TOKEN arg passed as a command line argument.
80 | if [[ "$(docker-machine ls | grep eosremote)" ]]; then
81 | log "eos_remote machine already exists"
82 | else
83 | log "Creating Droplet:eosremote"
84 | DM_CREATE="docker-machine create --driver digitalocean --digitalocean-size s-2vcpu-2gb --digitalocean-access-token ${token} eosremote"
85 | # DM_CREATE="docker-machine create --driver digitalocean --digitalocean-access-token ${token} eosremote"
86 | log $DM_CREATE
87 | eval $DM_CREATE
88 | fi
89 |
90 | # Print instance IP and port for connection.
91 | EXTERN_IP=$(eval docker-machine ip eosremote)
92 | log "eosurl: $EXTERN_IP:8888"
93 |
94 | # Set docker machine env to the created host.
95 | eval $(docker-machine env eosremote)
96 |
97 | # Build the container if non-existent.
98 | if [[ "$(docker images -q $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG 2> /dev/null)" == "" ]]; then
99 | log "=== building eos container ==="
100 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG .
101 | else
102 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG .
103 | fi
104 |
105 |
106 | # Stop running container if existent.
107 | if [[ "$(docker ps -a | grep eos_container)" ]]; then
108 | log "=== stopping eos ==="
109 | docker kill eos_container || true
110 | docker rm eos_container || true
111 | fi
112 |
113 | # Run start up script through docker on host.
114 | log "=== run docker container from the $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG image ==="
115 | script="./scripts/init_eos.sh"
116 | docker run --rm --name eos_container -d \
117 | -p 8888:8888 -p 9876:9876 \
118 | $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG /bin/bash -c "$script"
119 |
120 | # Follow eos instance logs.
121 | log "=== follow eos_container logs ==="
122 | docker logs eos_container --follow
123 |
124 | # Clean destroy instance.
125 | docker stop eos_container
126 | docker rm eos_container
127 | docker-machine kill eosremote
128 | docker-machine rm eosremote --force
129 |
130 | # Unset docker-machine environment.
131 | eval "$(docker-machine env -u)"
132 | }
133 |
134 | function main() {
135 |
136 | if [ "$remote" == "false" ]; then
137 | log "Running eos Local."
138 | run_local
139 | else
140 | log "Running eos Remote."
141 | run_remote
142 | fi
143 | }
144 |
145 | main
146 |
--------------------------------------------------------------------------------
/start_visualizer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -o errexit
3 |
4 | # change to script's directory
5 | cd "$(dirname "$0")"
6 | source ./scripts/constant.sh
7 |
8 | # Check script check_requirements
9 | source scripts/check_requirements.sh
10 |
11 | function print_help () {
12 | echo "Script for starting Visualization instance."
13 | echo "Usage ./start_visualizer.sh [OPTIONS]"
14 | echo ""
15 | echo "Options:"
16 | echo " -h, --help Print this help message and exit"
17 | echo " -c, --config_path Path to config yaml."
18 | echo " -e, --eos_url Path to EOS chain."
19 | echo " -r, --remote Run instance locally."
20 | echo " -t, --token Digital ocean API token."
21 | }
22 |
23 | config_path='visualizer/config.yaml'
24 |
25 | # bind and advertise this port
26 | port=$(( ( RANDOM % 60000 ) + 5000 ))
27 |
28 | # Is this service running on digital ocean.
29 | remote="false"
30 | # Digital ocean API token for creating remote instances.
31 | token="none"
32 |
33 | # Read command line args
34 | while test 5 -gt 0; do
35 | case "$1" in
36 | -h|--help)
37 | print_help
38 | exit 0
39 | ;;
40 | -c|--config_path)
41 | config_path=`echo $2`
42 | shift
43 | shift
44 | ;;
45 | -e|--eos_url)
46 | eos_url=`echo $2`
47 | shift
48 | shift
49 | ;;
50 | -r|--remote)
51 | remote="true"
52 | shift
53 | shift
54 | ;;
55 | -t|--token)
56 | token=`echo $2`
57 | shift
58 | shift
59 | ;;
60 | *)
61 | break
62 | ;;
63 | esac
64 | done
65 |
66 | # read yaml file
67 | eval $(parse_yaml "$config_path" "config_")
68 |
69 | function start_remote_service() {
70 | log "=== run remote. ==="
71 |
72 | # Build trap control C (for clean docker container tear down.)
73 | function teardown() {
74 | log "=== tear down. ==="
75 | eval $(docker-machine env -u)
76 | echo "To tear down this host run:"
77 | echo " docker-machine stop visualizer-container & docker-machine rm visualizer-container --force "
78 | exit 0
79 | }
80 | # NOTE(const) SIGKILL cannot be caught because it goes directly to the kernal.
81 | trap teardown INT SIGHUP SIGINT SIGTERM ERR EXIT
82 |
83 | # Initialize the host.
84 | log "=== initializing remote host. ==="
85 | if [[ "$(docker-machine ls | grep visualizer-container)" ]]; then
86 | # Host already exists.
87 | log "visualizer-container droplet already exists."
88 | else
89 | log "Creating Droplet: visualizer-container"
90 | DROPLET_CREATE="docker-machine create --driver digitalocean --digitalocean-size s-4vcpu-8gb --digitalocean-access-token ${token} visualizer-container"
91 | log "Create command: $DROPLET_CREATE"
92 | eval $DROPLET_CREATE
93 | fi
94 |
95 | # Set the docker context to the droplet.
96 | log "=== switching droplet context. ==="
97 | eval $(docker-machine env visualizer-container)
98 |
99 | # Build the image.
100 | # Init image if non-existent.
101 | log "=== building visualizer image. ==="
102 | #docker pull unconst/bittensor:latest
103 | if [[ "$(docker images -q $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG 2> /dev/null)" == "" ]]; then
104 | log "Building $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG"
105 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG -f ./visualizer/Dockerfile .
106 | else
107 | # Build anyway
108 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG -f ./visualizer/Dockerfile .
109 | fi
110 |
111 | # Stop the container if it is already running.
112 | if [[ "$(docker ps -a | grep visualizer-container)" ]]; then
113 | log "=== stopping visualizer-container ==="
114 | docker stop visualizer-container || true
115 | docker rm visualizer-container|| true
116 | fi
117 |
118 | # Find the external ip address for this droplet.
119 | serve_address=$(eval docker-machine ip visualizer-container)
120 | log "serve_address: $serve_address:$port"
121 |
122 | # Build start command.
123 | script=$config_script$
124 | COMMAND="$config_script --config_path $config_path --eos_url $eos_url"
125 | log "Command: $COMMAND"
126 | log "Image: $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG"
127 |
128 | # Run docker service.
129 | log "=== run the docker container on remote host. ==="
130 | log "=== container image: $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG ==="
131 | docker run --rm --name visualizer-container -d -t \
132 | -p $port:$port \
133 | -p $config_tbport:$config_tbport \
134 | $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG /bin/bash -c "$COMMAND"
135 |
136 | log "=== follow ==="
137 | docker logs visualizer-container --follow
138 | }
139 |
140 | function start_local_service() {
141 | log "=== run locally. ==="
142 | log ""
143 |
144 | # Init image if non-existent.
145 | log "=== build image. ==="
146 | log ""
147 |
148 | if [[ "$(docker images -q $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG 2> /dev/null)" == "" ]]; then
149 | log "Building $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG"
150 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG -f ./visualizer/Dockerfile .
151 | else
152 | # Build anyway
153 | docker build -t $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG -f ./visualizer/Dockerfile .
154 | fi
155 |
156 | # Stop the container if it is already running.
157 | if [[ "$(docker ps -a | grep visualizer-container)" ]]; then
158 | log "=== stop visualizer-container ==="
159 | docker stop visualizer-container || true
160 | docker rm visualizer-container || true
161 | log ""
162 | fi
163 |
164 | # Trap control C (for clean docker container tear down.)
165 | function teardown() {
166 | log "=== stop visualizer-container ==="
167 | docker stop visualizer-container
168 | log ""
169 |
170 | exit 0
171 | }
172 |
173 | # NOTE(const) SIGKILL cannot be caught because it goes directly to the kernal.
174 | trap teardown INT SIGHUP SIGINT SIGTERM ERR EXIT
175 |
176 | echo "Monitoring chain at $eos_url"
177 |
178 | # Build start command.
179 | log "=== run container === "
180 |
181 | script=$config_script$
182 | COMMAND="$config_script --config_path $config_path --eos_url $eos_url"
183 | log "Command: $COMMAND"
184 | log "Image: $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG"
185 |
186 | docker run --rm --name visualizer-container -d -t \
187 | -p $config_port:$config_port \
188 | -p $config_tbport:$config_tbport \
189 | --mount type=bind,src="$(pwd)"/scripts,dst=/bittensor/scripts \
190 | --mount type=bind,src="$(pwd)"/data/visualizer/logs,dst=/bittensor/data/visualizer/logs \
191 | --mount type=bind,src="$(pwd)"/neurons,dst=/bittensor/neurons \
192 | --mount type=bind,src="$(pwd)"/visualizer,dst=/bittensor/visualizer \
193 | $DOCKER_IMAGE_NAME:$DOCKER_IMAGE_TAG /bin/bash -c "$COMMAND"
194 | log ""
195 |
196 | docker logs visualizer-container --follow
197 | }
198 |
199 | function main() {
200 | log "%%%%%%%%.%%%%%%.%.....%..%%%%%..%%%%%%%.%%%%%%..%%%%%%..%%%%%%%....%....%%%%%%..%%%%%%.."
201 | log "...%....%.......%%....%.%.....%.%.....%.%.....%.%.....%.%.....%...%.%...%.....%.%.....%."
202 | log "...%....%.......%.%...%.%.......%.....%.%.....%.%.....%.%.....%..%...%..%.....%.%.....%."
203 | log "...%....%%%%%...%..%..%..%%%%%..%.....%.%%%%%%..%%%%%%..%.....%.%.....%.%%%%%%..%.....%."
204 | log "...%....%.......%...%.%.......%.%.....%.%...%...%.....%.%.....%.%%%%%%%.%...%...%.....%."
205 | log "...%....%.......%....%%.%.....%.%.....%.%....%..%.....%.%.....%.%.....%.%....%..%.....%."
206 | log "...%....%%%%%%%.%.....%..%%%%%..%%%%%%%.%.....%.%%%%%%..%%%%%%%.%.....%.%.....%.%%%%%%.."
207 | log "........................................................................................"
208 |
209 |
210 | log "remote: $remote"
211 | log "eosurl: $eos_url"
212 | log "logdir: $logdir"
213 | log "token: $token"
214 |
215 | if [ "$remote" == "true" ]; then
216 | if [ "$token" == "none" ]; then
217 | failure "Error: token is none but requesting remote host."
218 | failure "Visit: https://cloud.digitalocean.com/account/api/tokens"
219 | exit 0
220 | fi
221 | fi
222 |
223 | if [ "$remote" == "true" ]; then
224 | if [ "$upnpc" == "true" ]; then
225 | failure "Error: cannot port map on remote hosts"
226 | exit 0
227 | fi
228 | fi
229 |
230 | if [ "$remote" == "true" ]; then
231 | start_remote_service
232 | else
233 | start_local_service
234 | fi
235 | }
236 |
237 | main
238 |
--------------------------------------------------------------------------------
/testing/GradientTests.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 7,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stderr",
10 | "output_type": "stream",
11 | "text": [
12 | "2019-08-29 16:06:57.408 | INFO | __main__:spikerpc:21 - spikerpc [1.]\n"
13 | ]
14 | },
15 | {
16 | "name": "stdout",
17 | "output_type": "stream",
18 | "text": [
19 | "[1.]\n",
20 | "[array([1.], dtype=float32)]\n"
21 | ]
22 | }
23 | ],
24 | "source": [
25 | "import numpy as np\n",
26 | "import tensorflow as tf\n",
27 | "from loguru import logger\n",
28 | "from tensorflow.python.framework import ops\n",
29 | "import numpy as np\n",
30 | "\n",
31 | "\n",
32 | "class Dendrite:\n",
33 | " def __init__(self):\n",
34 | " pass\n",
35 | " \n",
36 | " def gradrpc(self, inputs, grad):\n",
37 | " logger.info('gradrpc {} {}', inputs, grad)\n",
38 | " return inputs\n",
39 | "\n",
40 | " def spike_grad(self, op, grad):\n",
41 | " logger.info('spike_grad')\n",
42 | " return tf.py_function(self.gradrpc, [op.inputs[0], grad], [tf.float32])\n",
43 | "\n",
44 | " def spikerpc(self, tensor):\n",
45 | " logger.info('spikerpc {}', tensor)\n",
46 | " return tensor\n",
47 | "\n",
48 | " def spike(self, inp, Tout, stateful=True, name=None, grad=None):\n",
49 | " rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))\n",
50 | " tf.RegisterGradient(rnd_name)(self.spike_grad) \n",
51 | " g = tf.get_default_graph()\n",
52 | " with g.gradient_override_map({\"PyFunc\": rnd_name}):\n",
53 | " return tf.py_function(self.spikerpc, inp, Tout, name=name)\n",
54 | "\n",
55 | "def pyfunc_test():\n",
56 | " \n",
57 | " g = tf.Graph()\n",
58 | " s = tf.Session()\n",
59 | " dendrite = Dendrite()\n",
60 | "\n",
61 | " x = tf.compat.v1.placeholder(tf.float32, shape=[1])\n",
62 | " \n",
63 | " w = tf.Variable(tf.ones_like(x))\n",
64 | " h = tf.multiply(x, w)\n",
65 | " \n",
66 | " y = dendrite.spike([h], [tf.float32])[0]\n",
67 | " \n",
68 | " d = tf.gradients(y, w)\n",
69 | "\n",
70 | " init = tf.compat.v1.global_variables_initializer()\n",
71 | " s.run(init)\n",
72 | " for out in s.run([y, d], feed_dict={x:[1]}):\n",
73 | " print (out)\n",
74 | "\n",
75 | "\n",
76 | "# # create data\n",
77 | "# x_data = tf.placeholder(dtype=tf.float32, shape=[None])\n",
78 | "# y_data = tf.placeholder(dtype=tf.float32, shape=[None])\n",
79 | "\n",
80 | "# w = tf.Variable(tf.constant([0.5]))\n",
81 | "# b = tf.Variable(tf.zeros([1]))\n",
82 | "\n",
83 | "# y1 = tf.multiply(w, x_data, name='y1')\n",
84 | "# y2 = spike([y1], [tf.float32], grad=spike_grad)[0]\n",
85 | "# y = tf.add(y2, b)\n",
86 | "\n",
87 | "# loss = tf.reduce_mean(tf.square(y - y_data))\n",
88 | "# optimizer = tf.train.GradientDescentOptimizer(0.01)\n",
89 | "# train = optimizer.minimize(loss)\n",
90 | "\n",
91 | "# print(\"Pyfunc grad\", ops.get_gradient_function(y2.op))\n",
92 | "# with tf.Session() as sess:\n",
93 | "# sess.run(tf.global_variables_initializer())\n",
94 | "\n",
95 | "# for step in range(1):\n",
96 | "# # ran = np.random.rand(115).astype(np.float32)\n",
97 | "# ran = np.ones((115)).astype(np.float32)\n",
98 | "# ans = ran * 1.5 + 3\n",
99 | "# dic = {x_data: ran, y_data: ans}\n",
100 | "# tt, yy, yy1= sess.run([train, y1, y2], feed_dict=dic)\n",
101 | "# if step % 1 == 0:\n",
102 | "# print('step {}'.format(step))\n",
103 | "# print('{}, {}'.format(w.eval(), b.eval()))\n",
104 | "\n",
105 | "# test = sess.run(y, feed_dict={x_data:[1]})\n",
106 | "# print('test = {}'.format(test))\n",
107 | "\n",
108 | "\n",
109 | "if __name__ == '__main__':\n",
110 | " pyfunc_test()"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {},
117 | "outputs": [],
118 | "source": []
119 | }
120 | ],
121 | "metadata": {
122 | "kernelspec": {
123 | "display_name": "Python 3",
124 | "language": "python",
125 | "name": "python3"
126 | },
127 | "language_info": {
128 | "codemirror_mode": {
129 | "name": "ipython",
130 | "version": 3
131 | },
132 | "file_extension": ".py",
133 | "mimetype": "text/x-python",
134 | "name": "python",
135 | "nbconvert_exporter": "python",
136 | "pygments_lexer": "ipython3",
137 | "version": "3.7.4"
138 | }
139 | },
140 | "nbformat": 4,
141 | "nbformat_minor": 2
142 | }
143 |
--------------------------------------------------------------------------------
/testing/Market Testing.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import copy\n",
10 | "class Bittensor:\n",
11 | " # NOTE(const): This is a python template for the Bittensor EOS contract. It is written to approximate the\n",
12 | " # Matrix multiplication emission system from above but using serial transactions which distribute the execution \n",
13 | " # cost across all the network nodes.\n",
14 | " \n",
15 | " def __init__(self): \n",
16 | " self.edges = {} # id --> edges.\n",
17 | " self.emit_height = {} # id --> last emit block.\n",
18 | " self.block_height = 0 # chain height.\n",
19 | " \n",
20 | " self.stake = {} # id --> stake\n",
21 | " self.next_stake = {} # temporary version of self.stake (only for testing.)\n",
22 | " self.total_stake = 0 # total supply of staked token.\n",
23 | " self.next_total_stake = 0 # temporary version of self.total_supply (only for testing.)\n",
24 | " \n",
25 | " \n",
26 | " def subscribe(self, this_identity):\n",
27 | " # NOTE(const): Subscribing a neuron to the metagraph will involve locking token funds in the main EOS contract\n",
28 | " # and shifting them into the metagraph to act as stake. The token contract is thus two tokens, one which acts \n",
29 | " # like a normal EOS token, it can be transfered easily, and tokens which act as stake within the metagraph.\n",
30 | " # When a node \"unsubscribes\" the stake that has been gained through metagraph emission is transfered back to the\n",
31 | " # participants account on EOS.\n",
32 | " \n",
33 | " # TODO(const): It is not yet obvious if tokens that are held out of the metagraph should be attaining emission.\n",
34 | " # If they are not, then it seems like people are likely to hold their tokens in the metagraph without running a \n",
35 | " # neuron. This will clutter the graph. It would be better if only participants were offered the opportunity to attain\n",
36 | " # mining emissions.\n",
37 | " self.next_total_stake += 1 \n",
38 | " self.next_stake[this_identity] = 1\n",
39 | " \n",
40 | " \n",
41 | " self.edges[this_identity] = [(this_identity, 1.0)]\n",
42 | " self.emit_height[this_identity] = self.block_height\n",
43 | " \n",
44 | " def inc_height(self):\n",
45 | " # Note(const): This function will not be included in the EOS contract. This is used to keep all the nodes in lockstep,\n",
46 | " # a.k.a at the same height and using the same denominator in ( self.stake[id] / self.total_stake).\n",
47 | " # In reality these will differ and it will give nodes an advantage if they are able to call the emit function\n",
48 | " # before the total_stake term is incremented by their competitors. This should give the network incentive \n",
49 | " # to continually emit tokens (remember that this is costly for the calling node.)\n",
50 | " self.stake = copy.deepcopy(self.next_stake)\n",
51 | " self.total_stake = self.next_total_stake\n",
52 | " \n",
53 | " self.block_height += 1\n",
54 | " \n",
55 | " def emit(self, this_identity, edges):\n",
56 | " # NOTE(const): We are wrapping the emission function with the update attribution function. \n",
57 | " # This can be done because we require an emission call BEFORE a node changes his edge set in the graph.\n",
58 | " # hence they are essentially the same call.\n",
59 | " \n",
60 | " # Edge set checks.\n",
61 | " assert(edges[0][0] == this_identity)\n",
62 | " assert(abs(sum([w for i, w in edges]) - 1.0) < 0.001 )\n",
63 | "\n",
64 | " # Get emission and apply it.\n",
65 | " this_emission = self._get_emission(this_identity)\n",
66 | " self._do_emit(this_identity, this_emission)\n",
67 | "\n",
68 | " # Update to the new edge set.\n",
69 | " self.edges[this_identity] = edges\n",
70 | " self.emit_height[this_identity] = self.block_height\n",
71 | " \n",
72 | "\n",
73 | " # Bitcoin (esque) emission.\n",
74 | " def _get_emission(self, this_identity):\n",
75 | " \n",
76 | " # NOTE(const): Emit rate governs the average number of tokens to emit after each EOS block.\n",
77 | " # In the future, this amount should change with block height to model an emission decay\n",
78 | " # as is the case with Bitcoin.\n",
79 | " EMIT_RATE = 50 \n",
80 | " \n",
81 | " # The time since my last token emit. \n",
82 | " # Note: if the emit rate changes in time then this calculation will need to be changed.\n",
83 | " delta_blocks = self.block_height - self.emit_height[this_identity] \n",
84 | " \n",
85 | " # NOTE(const): The reinforcement term governs the effect of stake on the output. \n",
86 | " # Removing this term makes all nodes equal. Dampening it with a logarithm will\n",
87 | " # decrease the effect of having a higher ratio of the total stake.\n",
88 | " stake_reinforcement = (self.stake[this_identity] / self.total_stake) \n",
89 | " \n",
90 | " # NOTE(const): The emission term here is built to distribute rewards evenly across nodes\n",
91 | " # in proportion to their attribution in the graph (as well as a reflection of their stake holdings)\n",
92 | " this_emission = EMIT_RATE * delta_blocks * stake_reinforcement\n",
93 | " \n",
94 | " return this_emission\n",
95 | " \n",
96 | " def _do_emit(self, this_identity, this_emission):\n",
97 | " # NOTE(const): The _do_emit function is the most computationally expensive call within the contract. \n",
98 | " # It involves a depth first recursion which terminates when the emission total has been exhausted. \n",
99 | " # In the worst case this function's runtime is O(DELTA_BLOCKS * EMIT_RATE * (1 / MIN_INCREMENT))\n",
100 | " # The function is paid by the calling node which will require that node holds enough EOS to make the call.\n",
101 | " # TODO(const): I am uncertain of the EOS token requirements this function requires, or what happens when a \n",
102 | " # a callee cannot afford the transaction.\n",
103 | " MIN_INCREMENT = 0.00001\n",
104 | " \n",
105 | " # Recurse the emmision through the tree.\n",
106 | " emission_queue = [(this_identity, this_emission)]\n",
107 | " while len(emission_queue) > 0:\n",
108 | " \n",
109 | " # Next.\n",
110 | " current_identity, current_emission = emission_queue.pop()\n",
111 | " current_inedge = self.edges[current_identity][0][1]\n",
112 | " \n",
113 | " # NOTE(const): Update global vars for next step. In reality these will be updated immediately.\n",
114 | " # For testing purposes we hold them locked until every node has updated.\n",
115 | " self.next_stake[current_identity] += (current_emission * current_inedge) \n",
116 | " self.next_total_stake += (current_emission * current_inedge)\n",
117 | " \n",
118 | " # Recurse.\n",
119 | " for next_identity, next_weight in self.edges[current_identity][1:]:\n",
120 | " next_emission = current_emission * next_weight \n",
121 | " if next_emission > MIN_INCREMENT:\n",
122 | " emission_queue.append((next_identity, current_emission * next_weight)) \n",
123 | " \n",
124 | " \n",
125 | " def __str__(self):\n",
126 | " strng = \"\" \n",
127 | " strng += \"S: \" + str_arr([self.stake[identity] for identity in self.stake.keys()]) + \"\\n\" \n",
128 | " return strng"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": []
137 | }
138 | ],
139 | "metadata": {
140 | "kernelspec": {
141 | "display_name": "Python 3",
142 | "language": "python",
143 | "name": "python3"
144 | },
145 | "language_info": {
146 | "codemirror_mode": {
147 | "name": "ipython",
148 | "version": 3
149 | },
150 | "file_extension": ".py",
151 | "mimetype": "text/x-python",
152 | "name": "python",
153 | "nbconvert_exporter": "python",
154 | "pygments_lexer": "ipython3",
155 | "version": "3.7.4"
156 | }
157 | },
158 | "nbformat": 4,
159 | "nbformat_minor": 2
160 | }
161 |
--------------------------------------------------------------------------------
/visualizer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM unconst/bittensor
2 |
3 | # Copy across the local files.
4 | COPY visualizer/ bittensor/visualizer
5 |
--------------------------------------------------------------------------------
/visualizer/TBLogger.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | class TBLogger(object):
4 | """Logging in tensorboard without tensorflow ops."""
5 |
6 | def __init__(self, log_dir):
7 | """Creates a summary writer logging to log_dir."""
8 | self.writer = tf.compat.v1.summary.FileWriter(log_dir)
9 |
10 | def log_scalar(self, tag, value, step):
11 | """Log a scalar variable.
12 | Parameter
13 | ----------
14 | tag : basestring
15 | Name of the scalar
16 | value
17 | step : int
18 | training iteration
19 | """
20 | summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag,
21 | simple_value=value)])
22 | self.writer.add_summary(summary, step)
23 |
24 | def log_images(self, tag, images, step):
25 | """Logs a list of images."""
26 |
27 | im_summaries = []
28 | for nr, img in enumerate(images):
29 | # Write the image to a string
30 | s = StringIO()
31 | plt.imsave(s, img, format='png')
32 |
33 | # Create an Image object
34 | img_sum = tf.compat.v1.Summary.Image(encoded_image_string=s.getvalue(),
35 | height=img.shape[0],
36 | width=img.shape[1])
37 | # Create a Summary value
38 | im_summaries.append(tf.compat.v1.Summary.Value(tag='%s/%d' % (tag, nr),
39 | image=img_sum))
40 |
41 | # Create and write Summary
42 | summary = tf.compat.v1.Summary(value=im_summaries)
43 | self.writer.add_summary(summary, step)
44 |
45 |
46 | def log_histogram(self, tag, values, step, bins=1000):
47 | """Logs the histogram of a list/vector of values."""
48 | # Convert to a numpy array
49 | values = np.array(values)
50 |
51 | # Create histogram using numpy
52 | counts, bin_edges = np.histogram(values, bins=bins)
53 |
54 | # Fill fields of histogram proto
55 | hist = tf.HistogramProto()
56 | hist.min = float(np.min(values))
57 | hist.max = float(np.max(values))
58 | hist.num = int(np.prod(values.shape))
59 | hist.sum = float(np.sum(values))
60 | hist.sum_squares = float(np.sum(values**2))
61 |
62 | # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
63 | # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
64 | # Thus, we drop the start of the first bin
65 | bin_edges = bin_edges[1:]
66 |
67 | # Add bin edges and counts
68 | for edge in bin_edges:
69 | hist.bucket_limit.append(edge)
70 | for c in counts:
71 | hist.bucket.append(c)
72 |
73 | # Create and write Summary
74 | summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag, histo=hist)])
75 | self.writer.add_summary(summary, step)
76 | self.writer.flush()
77 |
--------------------------------------------------------------------------------
/visualizer/__init__.py:
--------------------------------------------------------------------------------
1 | name = "visualizer"
2 | from visualizer.proto import visualizer_pb2
3 | from visualizer.proto import visualizer_pb2_grpc
--------------------------------------------------------------------------------
/visualizer/config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import json
3 | import argparse
4 |
5 | class obj(object):
6 | def __init__(self, d):
7 | for a, b in d.items():
8 | if isinstance(b, (list, tuple)):
9 | setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
10 | else:
11 | setattr(self, a, obj(b) if isinstance(b, dict) else b)
12 |
13 | class Config:
14 | def get_config_from_yaml(path):
15 | with open(path, 'r') as ymlfile:
16 | cfg = yaml.safe_load(ymlfile)
17 | config = obj(cfg)
18 | return config
19 |
20 | def get_config_from_json(path):
21 | with open(path) as fd:
22 | config = json.load(fd, object_hook=dict_to_sns)
23 | return config
24 |
--------------------------------------------------------------------------------
/visualizer/config.yaml:
--------------------------------------------------------------------------------
1 |
2 | # (str) Visualizer script.
3 | script: ./visualizer/visualizer.sh
4 |
5 | # (int) Seconds between refresh.
6 | heartbeat: 5
7 |
8 | # (str) Visualization mode
9 | visualization_mode: tensorboard
10 |
11 | # (str) The address to bind the visualizer server.
12 | bind_address: 0.0.0.0
13 |
14 | # (str) The visualizer serving port.
15 | port: 14142
16 |
17 | # (str) The tensorboard serving port.
18 | tbport: 14143
19 |
20 | # (str) Visualizer logging directory.
21 | logdir: data/visualizer/logs
22 |
23 | # (str) BitTensor contract account on EOS
24 | eos:
25 | account: bittensoracc
26 |
27 | # (str) Contract scope.
28 | scope: bittensoracc
29 |
30 | # (str) EOS contract code name.
31 | code: bittensoracc
32 |
33 | # (str) EOS table which stores metagraph information.
34 | table: metagraph
35 |
36 | # (str) EOS chain key type.
37 | key_type: i64
38 |
39 | # (str) URL of EOS chain.
40 | url: http://host.docker.internal:8888
41 |
42 | # (str) EOS endpoint command.
43 | get_table_command: /v1/chain/get_table_rows
44 |
--------------------------------------------------------------------------------
/visualizer/main.py:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | import visualizer
3 |
4 | import argparse
5 | import grpc
6 | import json
7 | import pickle
8 | import requests
9 | import subprocess
10 | import time
11 | import yaml
12 |
13 | from config import Config
14 | from concurrent import futures
15 | from http import HTTPStatus
16 | from loguru import logger
17 | from TBLogger import TBLogger
18 |
19 | class NodeStatsListener():
20 |
21 | def __init__(self, config, eos_url):
22 | # Init configurations and arguments
23 | self._config = config
24 |
25 | # Map from node_id to node attr dict.
26 | self._nodes = {}
27 |
28 | # Map of node_id to channels.
29 | self._channels = {}
30 |
31 | # Map from node_id to logger.
32 | self._tbloggers = {}
33 |
34 | # Global step counter.
35 | self._global_step = 0
36 |
37 | # EOS url
38 | self._eos_url = eos_url
39 |
40 | def refresh(self):
41 | logger.info('Refresh')
42 |
43 | # Refresh state.
44 | try:
45 | self._retrieve_all_nodes()
46 | except Exception as e:
47 | logger.exception("Failed to retrieve all node. Likely an issue connecting to the EOS chain", e)
48 |
49 | try:
50 | self._refresh_all_channels()
51 | except Exception as e:
52 | logger.exception("Failed refresh channels with exception: {}", e)
53 |
54 | try:
55 | self._refresh_all_tbloggers()
56 | except Exception as e:
57 | logger.exception("Failed refresh loggers with exception: {}", e)
58 |
59 | # Make queries.
60 | if self._config.visualization_mode != "tensorboard":
61 | logger.info("Unexpected visualization_mode: {}", self._config.visualization_mode)
62 | else:
63 |
64 | # Update the visualizer step.
65 | self._global_step += 1
66 | for node_id in self._nodes.keys():
67 | tblogger = self._tbloggers[node_id]
68 | channel = self._channels[node_id]
69 |
70 | # Try to query node.
71 | try:
72 | response = self._query_node(node_id, channel)
73 | except Exception as e:
74 | logger.info("Failed to query node: {}", node_id)
75 | continue
76 |
77 | # Try to log response.
78 | try:
79 | self._log_response(node_id, response, tblogger)
80 | except Exception as e:
81 | logger.info("Failed to log response: {}, node: {}, error: {}", response, node_id, e)
82 | continue
83 |
84 | def _retrieve_all_nodes(self):
85 | # Refresh list of nodes in the network
86 | self._nodes = {}
87 |
88 | # Query the chain and retrieve all nodes that are "mining"
89 | payload = dict(
90 | code = self._config.eos.code,
91 | table = self._config.eos.table,
92 | scope = self._config.eos.scope,
93 | key_type = self._config.eos.key_type,
94 | json = "true"
95 | )
96 |
97 | payload_json = json.dumps(payload)
98 | request_url = self._eos_url + self._config.eos.get_table_command
99 | response = requests.post(url=request_url, data=payload_json)
100 | if response.status_code == HTTPStatus.OK:
101 | response_json = response.json()
102 | rows = response_json['rows']
103 | for row in rows:
104 | node_dict = dict(identity=row['identity'], url=row['address'], port=row['port'])
105 | self._nodes[ row['identity'] ] = node_dict
106 | else:
107 | logger.error("Error: Could not retrieve the nodes connected to the chain.")
108 |
109 |
110 | def _refresh_all_channels(self):
111 | for node_id in list(self._channels.keys()):
112 | if node_id not in self._nodes:
113 | self._channels[node_id].close()
114 | del self._channels[node_id]
115 |
116 | # Add new node channels.
117 | for node in self._nodes.values():
118 | if node['identity'] not in self._channels:
119 | node_url = "{}:{}".format(node['url'], node['port'])
120 | new_channel = grpc.insecure_channel(node_url)
121 | self._channels[node['identity']] = new_channel
122 |
123 | def _refresh_all_tbloggers(self):
124 | for node_id in list(self._tbloggers.keys()):
125 | if node_id not in self._nodes:
126 | del self._tbloggers[node_id]
127 |
128 | # Add new node loggers.
129 | for node_id in self._nodes.keys():
130 | if node_id not in self._tbloggers:
131 | log_dir = self._config.logdir + "/" + node_id
132 | self._tbloggers[node_id] = TBLogger(log_dir)
133 |
134 | def _query_node(self, node_id, channel):
135 | stub = visualizer.visualizer_pb2_grpc.VisualizerStub(channel)
136 | request_payload_bytes = pickle.dumps("tb_metrics", protocol=0)
137 | response = stub.Report(
138 | visualizer.visualizer_pb2.ReportRequest(
139 | version = 1.0,
140 | source_id = '2',
141 | payload = request_payload_bytes
142 | )
143 | )
144 | # Let's process the incoming data
145 | response = pickle.loads(response.payload)
146 | return response
147 |
148 | def _log_response(self, node_id, response, tblogger):
149 | tblogger.log_scalar("step", response['step'], self._global_step)
150 | tblogger.log_scalar("gs", response['gs'], self._global_step)
151 | tblogger.log_scalar("mem", response['mem'], self._global_step)
152 | tblogger.log_scalar("loss", response['loss'], self._global_step)
153 |
154 | # Metrics
155 | metrics = response['metrics']
156 | for idn in metrics.keys():
157 | tblogger.log_scalar(idn, metrics[idn], self._global_step)
158 |
159 | # Post scores to tb.
160 | scores = response['scores']
161 | if scores:
162 | for idn, score in scores:
163 | tblogger.log_scalar(idn, score, self._global_step)
164 |
165 | logger.info('Logging: node {}: step {} gs {} mem {} loss {} scores {}'.format(node_id, response['step'], response['gs'], response['mem'], response['loss'], scores))
166 |
167 |
168 | def main(config, eos_url):
169 | listener = NodeStatsListener(config, eos_url)
170 | try:
171 | logger.info('Started listener ...')
172 | while True:
173 | listener.refresh()
174 | time.sleep(config.heartbeat)
175 |
176 | except KeyboardInterrupt:
177 | logger.info('Stopping listener with keyboard interrupt.')
178 |
179 | if __name__ == "__main__":
180 | parser = argparse.ArgumentParser()
181 | parser.add_argument(
182 | '--config_path',
183 | default='./visualizer/config.yaml',
184 | type=str,
185 | help='Path to config file.'
186 | )
187 | parser.add_argument(
188 | '--eos_url',
189 | default='http://host.docker.internal',
190 | type=str,
191 | help='EOS chain URL.'
192 | )
193 | args = parser.parse_args()
194 | config = Config.get_config_from_yaml(args.config_path)
195 | main(config, args.eos_url)
196 |
--------------------------------------------------------------------------------
/visualizer/proto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unconst/BitTensor/d1af6993c1d6bca273a0c8d147132ee9867f5543/visualizer/proto/__init__.py
--------------------------------------------------------------------------------
/visualizer/proto/visualizer.proto:
--------------------------------------------------------------------------------
1 |
2 | syntax = "proto3";
3 |
4 |
5 | // Visualizer protocol to define messages passed
6 | // between nodes and visualizer server.
7 |
8 | service Visualizer {
9 |
10 | // Query remote node for a report on its findings thus far, response is an object containing
11 | // loss, gs, step, mem, etc.
12 | rpc Report(ReportRequest) returns (ReportResponse) {}
13 | }
14 |
15 | message ReportRequest {
16 | // Protocol version 1.0
17 | float version = 1;
18 |
19 | // Source ID. Where the message originated. Format should be known IP of visualizer
20 | // server
21 | string source_id = 2;
22 |
23 | // Message payload. Zero or more var length byte strings.
24 | // Payload should just contain the fields we need a report back on.
25 | // i.e. 'mem', 'loss', etc.
26 | bytes payload = 3;
27 | }
28 |
29 | // Reverse response from remote node, carries stats data and expects no tertiary response.
30 | message ReportResponse {
31 | // Protocol version 1.0
32 | float version = 1;
33 |
34 | // Source ID. Where this message originated. Format is the IP of the remote node.
35 | string source_id = 2;
36 |
37 | // Message payload. Zero or more fixed length byte strings.
38 | // Will contain a vector representation of tf.float32 of the requested
39 | // statistics
40 | bytes payload = 3;
41 | }
42 |
43 |
--------------------------------------------------------------------------------
/visualizer/proto/visualizer_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: visualizer.proto
4 |
5 | import sys
6 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
7 | from google.protobuf import descriptor as _descriptor
8 | from google.protobuf import message as _message
9 | from google.protobuf import reflection as _reflection
10 | from google.protobuf import symbol_database as _symbol_database
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 |
17 |
18 | DESCRIPTOR = _descriptor.FileDescriptor(
19 | name='visualizer.proto',
20 | package='',
21 | syntax='proto3',
22 | serialized_options=None,
23 | serialized_pb=_b('\n\x10visualizer.proto\"D\n\rReportRequest\x12\x0f\n\x07version\x18\x01 \x01(\x02\x12\x11\n\tsource_id\x18\x02 \x01(\t\x12\x0f\n\x07payload\x18\x08 \x01(\x0c\"E\n\x0eReportResponse\x12\x0f\n\x07version\x18\x01 \x01(\x02\x12\x11\n\tsource_id\x18\x02 \x01(\t\x12\x0f\n\x07payload\x18\x08 \x01(\x0c\x32\x39\n\nVisualizer\x12+\n\x06Report\x12\x0e.ReportRequest\x1a\x0f.ReportResponse\"\x00\x62\x06proto3')
24 | )
25 |
26 |
27 |
28 |
29 | _REPORTREQUEST = _descriptor.Descriptor(
30 | name='ReportRequest',
31 | full_name='ReportRequest',
32 | filename=None,
33 | file=DESCRIPTOR,
34 | containing_type=None,
35 | fields=[
36 | _descriptor.FieldDescriptor(
37 | name='version', full_name='ReportRequest.version', index=0,
38 | number=1, type=2, cpp_type=6, label=1,
39 | has_default_value=False, default_value=float(0),
40 | message_type=None, enum_type=None, containing_type=None,
41 | is_extension=False, extension_scope=None,
42 | serialized_options=None, file=DESCRIPTOR),
43 | _descriptor.FieldDescriptor(
44 | name='source_id', full_name='ReportRequest.source_id', index=1,
45 | number=2, type=9, cpp_type=9, label=1,
46 | has_default_value=False, default_value=_b("").decode('utf-8'),
47 | message_type=None, enum_type=None, containing_type=None,
48 | is_extension=False, extension_scope=None,
49 | serialized_options=None, file=DESCRIPTOR),
50 | _descriptor.FieldDescriptor(
51 | name='payload', full_name='ReportRequest.payload', index=2,
52 | number=8, type=12, cpp_type=9, label=1,
53 | has_default_value=False, default_value=_b(""),
54 | message_type=None, enum_type=None, containing_type=None,
55 | is_extension=False, extension_scope=None,
56 | serialized_options=None, file=DESCRIPTOR),
57 | ],
58 | extensions=[
59 | ],
60 | nested_types=[],
61 | enum_types=[
62 | ],
63 | serialized_options=None,
64 | is_extendable=False,
65 | syntax='proto3',
66 | extension_ranges=[],
67 | oneofs=[
68 | ],
69 | serialized_start=20,
70 | serialized_end=88,
71 | )
72 |
73 |
74 | _REPORTRESPONSE = _descriptor.Descriptor(
75 | name='ReportResponse',
76 | full_name='ReportResponse',
77 | filename=None,
78 | file=DESCRIPTOR,
79 | containing_type=None,
80 | fields=[
81 | _descriptor.FieldDescriptor(
82 | name='version', full_name='ReportResponse.version', index=0,
83 | number=1, type=2, cpp_type=6, label=1,
84 | has_default_value=False, default_value=float(0),
85 | message_type=None, enum_type=None, containing_type=None,
86 | is_extension=False, extension_scope=None,
87 | serialized_options=None, file=DESCRIPTOR),
88 | _descriptor.FieldDescriptor(
89 | name='source_id', full_name='ReportResponse.source_id', index=1,
90 | number=2, type=9, cpp_type=9, label=1,
91 | has_default_value=False, default_value=_b("").decode('utf-8'),
92 | message_type=None, enum_type=None, containing_type=None,
93 | is_extension=False, extension_scope=None,
94 | serialized_options=None, file=DESCRIPTOR),
95 | _descriptor.FieldDescriptor(
96 | name='payload', full_name='ReportResponse.payload', index=2,
97 | number=8, type=12, cpp_type=9, label=1,
98 | has_default_value=False, default_value=_b(""),
99 | message_type=None, enum_type=None, containing_type=None,
100 | is_extension=False, extension_scope=None,
101 | serialized_options=None, file=DESCRIPTOR),
102 | ],
103 | extensions=[
104 | ],
105 | nested_types=[],
106 | enum_types=[
107 | ],
108 | serialized_options=None,
109 | is_extendable=False,
110 | syntax='proto3',
111 | extension_ranges=[],
112 | oneofs=[
113 | ],
114 | serialized_start=90,
115 | serialized_end=159,
116 | )
117 |
118 | DESCRIPTOR.message_types_by_name['ReportRequest'] = _REPORTREQUEST
119 | DESCRIPTOR.message_types_by_name['ReportResponse'] = _REPORTRESPONSE
120 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
121 |
122 | ReportRequest = _reflection.GeneratedProtocolMessageType('ReportRequest', (_message.Message,), {
123 | 'DESCRIPTOR' : _REPORTREQUEST,
124 | '__module__' : 'visualizer_pb2'
125 | # @@protoc_insertion_point(class_scope:ReportRequest)
126 | })
127 | _sym_db.RegisterMessage(ReportRequest)
128 |
129 | ReportResponse = _reflection.GeneratedProtocolMessageType('ReportResponse', (_message.Message,), {
130 | 'DESCRIPTOR' : _REPORTRESPONSE,
131 | '__module__' : 'visualizer_pb2'
132 | # @@protoc_insertion_point(class_scope:ReportResponse)
133 | })
134 | _sym_db.RegisterMessage(ReportResponse)
135 |
136 |
137 |
138 | _VISUALIZER = _descriptor.ServiceDescriptor(
139 | name='Visualizer',
140 | full_name='Visualizer',
141 | file=DESCRIPTOR,
142 | index=0,
143 | serialized_options=None,
144 | serialized_start=161,
145 | serialized_end=218,
146 | methods=[
147 | _descriptor.MethodDescriptor(
148 | name='Report',
149 | full_name='Visualizer.Report',
150 | index=0,
151 | containing_service=None,
152 | input_type=_REPORTREQUEST,
153 | output_type=_REPORTRESPONSE,
154 | serialized_options=None,
155 | ),
156 | ])
157 | _sym_db.RegisterServiceDescriptor(_VISUALIZER)
158 |
159 | DESCRIPTOR.services_by_name['Visualizer'] = _VISUALIZER
160 |
161 | # @@protoc_insertion_point(module_scope)
162 |
--------------------------------------------------------------------------------
/visualizer/proto/visualizer_pb2_grpc.py:
--------------------------------------------------------------------------------
1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 | import grpc
3 |
4 | from visualizer.proto import visualizer_pb2 as visualizer__pb2
5 |
6 |
7 | class VisualizerStub(object):
8 | """Visualizer protocol to define messages passed
9 | between nodes and visualizer server.
10 |
11 | """
12 |
13 | def __init__(self, channel):
14 | """Constructor.
15 |
16 | Args:
17 | channel: A grpc.Channel.
18 | """
19 | self.Report = channel.unary_unary(
20 | '/Visualizer/Report',
21 | request_serializer=visualizer__pb2.ReportRequest.SerializeToString,
22 | response_deserializer=visualizer__pb2.ReportResponse.FromString,
23 | )
24 |
25 |
26 | class VisualizerServicer(object):
27 | """Visualizer protocol to define messages passed
28 | between nodes and visualizer server.
29 |
30 | """
31 |
32 | def Report(self, request, context):
33 | """Query remote node for a report on its findings thus far, response is an object containing
34 | loss, gs, step, mem, etc.
35 | """
36 | context.set_code(grpc.StatusCode.UNIMPLEMENTED)
37 | context.set_details('Method not implemented!')
38 | raise NotImplementedError('Method not implemented!')
39 |
40 |
41 | def add_VisualizerServicer_to_server(servicer, server):
42 | rpc_method_handlers = {
43 | 'Report': grpc.unary_unary_rpc_method_handler(
44 | servicer.Report,
45 | request_deserializer=visualizer__pb2.ReportRequest.FromString,
46 | response_serializer=visualizer__pb2.ReportResponse.SerializeToString,
47 | ),
48 | }
49 | generic_handler = grpc.method_handlers_generic_handler(
50 | 'Visualizer', rpc_method_handlers)
51 | server.add_generic_rpc_handlers((generic_handler,))
52 |
--------------------------------------------------------------------------------
/visualizer/visualizer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source ./scripts/constant.sh
3 |
4 | function print_help () {
5 | echo "Script for starting Visualization instance."
6 | echo "Usage ./visualizer.sh [OPTIONS]"
7 | echo ""
8 | echo "Options:"
9 | echo " -h, --help Print this help message and exit"
10 | echo " -c, --config_path Path to config yaml."
11 | echo " -e, --eos_url EOS chain path"
12 | }
13 |
14 | config_path='visualizer/config.yaml'
15 | eos_url='docker.host.internal'
16 |
17 | # Read command line args
18 | while test 4 -gt 0; do
19 | case "$1" in
20 | -h|--help)
21 | print_help
22 | exit 0
23 | ;;
24 | -c|--config_path)
25 | config_path=`echo $2`
26 | shift
27 | shift
28 | ;;
29 | -e|--eos_url)
30 | eos_url=`echo $2`
31 | shift
32 | ;;
33 | *)
34 | break
35 | ;;
36 | esac
37 | done
38 |
39 | # read yaml file
40 | eval $(parse_yaml "$config_path" "config_")
41 |
42 |
43 | function start_tensorboard() {
44 | log "=== start Tensorboard ==="
45 | log "Command: tensorboard --logdir=$config_logdir --port=$config_tbport --host=$config_bind_address"
46 | log "Endpoint: http://$config_bind_address:$config_tbport"
47 | log ""
48 | tensorboard --logdir=$config_logdir --port=$config_tbport --host=$config_bind_address &
49 | TensorboardPID=$!
50 | }
51 |
52 | function start_node_listener() {
53 |
54 | log "=== start Visualizer ==="
55 | COMMAND="python3 visualizer/main.py --config_path=$config_path --eos_url=$eos_url"
56 | log "Command: $COMMAND"
57 | eval $COMMAND &
58 | LISTENERPID=$!
59 | log ""
60 |
61 | }
62 |
63 | # Clean docker tear down.
64 | function teardown() {
65 | # Perform program exit & housekeeping
66 | kill -9 $TensorboardPID
67 | log "=== stopped Tensorboard ==="
68 |
69 | kill -9 $LISTENERPID
70 | log "=== stopped node listener ==="
71 |
72 | exit 0
73 | }
74 |
75 | function main() {
76 |
77 | # Build protos
78 | ./scripts/build_protos.sh
79 |
80 | # Start Tensorboard.
81 | start_tensorboard
82 |
83 | # start listening to incoming data from running nodes
84 | start_node_listener
85 |
86 | # NOTE(const) SIGKILL cannot be caught because it goes directly to the kernal.
87 | trap teardown INT SIGHUP SIGINT SIGTERM
88 |
89 | # idle waiting for abort from user
90 | read -r -d '' _