├── bigchaindb ├── web │ ├── __init__.py │ ├── views │ │ ├── __init__.py │ │ ├── base.py │ │ ├── info.py │ │ └── transactions.py │ └── server.py ├── commands │ ├── __init__.py │ ├── utils.py │ └── bigchain.py ├── pipelines │ ├── __init__.py │ ├── election.py │ ├── utils.py │ ├── stale.py │ ├── vote.py │ └── block.py ├── version.py ├── db │ ├── __init__.py │ └── utils.py ├── crypto.py ├── monitor.py ├── __init__.py ├── processes.py ├── exceptions.py ├── client.py ├── consensus.py ├── config_utils.py ├── util.py └── core.py ├── setup.cfg ├── pytest.ini ├── ntools └── one-m │ ├── ansible │ ├── roles │ │ ├── bigchaindb │ │ │ ├── files │ │ │ │ └── start_bigchaindb.sh │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── ntp │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── rethinkdb │ │ │ ├── templates │ │ │ │ └── rethinkdb.conf.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── db_storage │ │ │ └── tasks │ │ │ └── main.yml │ ├── group_vars │ │ └── all │ └── one-m-node.yml │ └── aws │ ├── providers.tf │ ├── outputs.tf │ ├── variables.tf │ ├── amis.tf │ ├── resources.tf │ └── security_group.tf ├── .dockerignore ├── Dockerfile-dev ├── Dockerfile ├── docker-compose-monitor.yml ├── docker-compose.yml └── setup.py /bigchaindb/web/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bigchaindb/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bigchaindb/web/views/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bigchaindb/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.6.0' 2 | __short_version__ = '0.6' 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [coverage:run] 5 | source = . 6 | omit = *test* 7 | -------------------------------------------------------------------------------- /bigchaindb/db/__init__.py: -------------------------------------------------------------------------------- 1 | # TODO can we use explicit imports? 2 | from bigchaindb.db.utils import * 3 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | norecursedirs = .* *.egg *.egg-info env* devenv* docs 4 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/bigchaindb/files/start_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | bigchaindb -y start & 3 | disown 4 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .cache/ 2 | .coverage 3 | .eggs/ 4 | .git/ 5 | .gitignore 6 | .ropeproject/ 7 | .travis.yml 8 | BigchainDB.egg-info/ 9 | dist/ 10 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/group_vars/all 3 | # Variables in this file are for *all* host groups (i.e. all hosts) 4 | 5 | example_var: 23 -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/ntp/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/roles/ntp/handlers/main.yml 3 | 4 | - name: restart ntp 5 | service: name=ntp state=restarted 6 | -------------------------------------------------------------------------------- /Dockerfile-dev: -------------------------------------------------------------------------------- 1 | FROM python:3.5 2 | 3 | RUN apt-get update 4 | 5 | RUN mkdir -p /usr/src/app 6 | WORKDIR /usr/src/app 7 | 8 | RUN pip install --upgrade pip 9 | 10 | COPY . /usr/src/app/ 11 | 12 | RUN pip install --no-cache-dir -e .[dev] 13 | -------------------------------------------------------------------------------- /ntools/one-m/aws/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | # An AWS access_key and secret_key are needed; Terraform looks 3 | # for an AWS credentials file in the default location. 4 | # See https://tinyurl.com/pu8gd9h 5 | region = "${var.aws_region}" 6 | } 7 | -------------------------------------------------------------------------------- /ntools/one-m/aws/outputs.tf: -------------------------------------------------------------------------------- 1 | # You can get the value of "ip_address" after running terraform apply using: 2 | # $ terraform output ip_address 3 | # You could use that in a script, for example 4 | output "ip_address" { 5 | value = "${aws_eip.ip.public_ip}" 6 | } 7 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/one-m-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys a BigchainDB node in one machine (one-m). 3 | 4 | - name: Ensure a one-machine BigchainDB node is configured properly 5 | hosts: all 6 | remote_user: ubuntu 7 | 8 | roles: 9 | - ntp 10 | - db_storage 11 | - rethinkdb 12 | - bigchaindb 13 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/rethinkdb/templates/rethinkdb.conf.j2: -------------------------------------------------------------------------------- 1 | directory=/data 2 | runuser=rethinkdb 3 | rungroup=rethinkdb 4 | bind=all 5 | # The IP address of localhost is 127.0.0.1 6 | #bind-http=127.0.0.1 7 | # direct-io 8 | # List of *other* hostnames in the cluster: 9 | # join=hostname1:29015 10 | # join=hostname2:29015 11 | # etc. 12 | -------------------------------------------------------------------------------- /ntools/one-m/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | default = "eu-central-1" 3 | } 4 | 5 | variable "aws_instance_type" { 6 | default = "m4.large" 7 | } 8 | 9 | variable "root_storage_in_GiB" { 10 | default = 10 11 | } 12 | 13 | variable "DB_storage_in_GiB" { 14 | default = 30 15 | } 16 | 17 | variable "ssh_key_name" { 18 | # No default. Ask as needed. 19 | } 20 | -------------------------------------------------------------------------------- /bigchaindb/web/views/base.py: -------------------------------------------------------------------------------- 1 | from flask import jsonify 2 | 3 | 4 | def make_error(status_code, message=None): 5 | 6 | if status_code == 404 and message is None: 7 | message = 'Not found' 8 | 9 | response = jsonify({ 10 | 'status': status_code, 11 | 'message': message 12 | }) 13 | response.status_code = status_code 14 | return response 15 | 16 | -------------------------------------------------------------------------------- /bigchaindb/crypto.py: -------------------------------------------------------------------------------- 1 | # Separate all crypto code so that we can easily test several implementations 2 | 3 | import sha3 4 | from cryptoconditions import crypto 5 | 6 | 7 | def hash_data(data): 8 | """Hash the provided data using SHA3-256""" 9 | return sha3.sha3_256(data.encode()).hexdigest() 10 | 11 | 12 | def generate_key_pair(): 13 | sk, pk = crypto.ed25519_generate_key_pair() 14 | return sk.decode(), pk.decode() 15 | 16 | SigningKey = crypto.Ed25519SigningKey 17 | VerifyingKey = crypto.Ed25519VerifyingKey 18 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rethinkdb:2.3 2 | 3 | RUN apt-get update 4 | RUN apt-get -y install python3 python3-pip 5 | RUN pip3 install --upgrade pip 6 | RUN pip3 install --upgrade setuptools 7 | 8 | RUN mkdir -p /usr/src/app 9 | 10 | COPY . /usr/src/app/ 11 | 12 | WORKDIR /usr/src/app 13 | 14 | RUN pip3 install --no-cache-dir -e . 15 | 16 | WORKDIR /data 17 | 18 | ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb 19 | ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984 20 | ENV BIGCHAINDB_API_ENDPOINT http://bigchaindb:9984/api/v1 21 | 22 | ENTRYPOINT ["bigchaindb", "--experimental-start-rethinkdb"] 23 | 24 | CMD ["start"] 25 | 26 | EXPOSE 8080 9984 28015 29015 27 | -------------------------------------------------------------------------------- /docker-compose-monitor.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | influxdb: 4 | image: tutum/influxdb 5 | ports: 6 | - "8083:8083" 7 | - "8086:8086" 8 | - "8090" 9 | - "8099" 10 | environment: 11 | PRE_CREATE_DB: "telegraf" 12 | volumes: 13 | - $INFLUXDB_DATA:/data 14 | 15 | grafana: 16 | image: bigchaindb/grafana-bigchaindb-docker 17 | tty: true 18 | ports: 19 | - "3000:3000" 20 | environment: 21 | INFLUXDB_HOST: "influxdb" 22 | 23 | statsd: 24 | image: bigchaindb/docker-telegraf-statsd 25 | ports: 26 | - "8125:8125/udp" 27 | environment: 28 | INFLUXDB_HOST: "influxdb" -------------------------------------------------------------------------------- /ntools/one-m/aws/amis.tf: -------------------------------------------------------------------------------- 1 | # Each AWS region has a different AMI name 2 | # even though the contents are the same. 3 | # This file has the mapping from region --> AMI name. 4 | # 5 | # These are all Ubuntu 14.04 LTS AMIs 6 | # with Arch = amd64, Instance Type = hvm:ebs-ssd 7 | # from https://cloud-images.ubuntu.com/locator/ec2/ 8 | variable "amis" { 9 | type = "map" 10 | default = { 11 | eu-west-1 = "ami-55452e26" 12 | eu-central-1 = "ami-b1cf39de" 13 | us-east-1 = "ami-8e0b9499" 14 | us-west-2 = "ami-547b3834" 15 | ap-northeast-1 = "ami-49d31328" 16 | ap-southeast-1 = "ami-5e429c3d" 17 | ap-southeast-2 = "ami-25f3c746" 18 | sa-east-1 = "ami-97980efb" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /bigchaindb/web/views/info.py: -------------------------------------------------------------------------------- 1 | """This module provides the blueprint for some basic API endpoints. 2 | 3 | For more information please refer to the documentation on ReadTheDocs: 4 | - https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html 5 | """ 6 | 7 | import flask 8 | from flask import Blueprint 9 | 10 | import bigchaindb 11 | from bigchaindb import version 12 | 13 | 14 | info_views = Blueprint('info_views', __name__) 15 | 16 | 17 | @info_views.route('/') 18 | def home(): 19 | return flask.jsonify({ 20 | 'software': 'BigchainDB', 21 | 'version': version.__version__, 22 | 'public_key': bigchaindb.config['keypair']['public'], 23 | 'keyring': bigchaindb.config['keyring'], 24 | 'api_endpoint': bigchaindb.config['api_endpoint'] 25 | }) 26 | 27 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | rdb: 5 | image: rethinkdb 6 | ports: 7 | - "58080:8080" 8 | - "28015" 9 | volumes_from: 10 | - rdb-data 11 | 12 | rdb-data: 13 | image: rethinkdb 14 | volumes: 15 | - /data 16 | command: "true" 17 | 18 | bdb: 19 | build: 20 | context: . 21 | dockerfile: Dockerfile-dev 22 | container_name: docker-bigchaindb 23 | volumes: 24 | - ./bigchaindb:/usr/src/app/bigchaindb 25 | - ./tests:/usr/src/app/tests 26 | - ./docs:/usr/src/app/docs 27 | - ./setup.py:/usr/src/app/setup.py 28 | - ./setup.cfg:/usr/src/app/setup.cfg 29 | - ./pytest.ini:/usr/src/app/pytest.ini 30 | - ~/.bigchaindb_docker:/root/.bigchaindb_docker 31 | environment: 32 | BIGCHAINDB_DATABASE_HOST: rdb 33 | BIGCHAINDB_CONFIG_PATH: /root/.bigchaindb_docker/config 34 | command: bigchaindb start 35 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/roles/ntp/tasks/main.yml 3 | 4 | - name: Ensure ntpdate is not installed (and uninstall it if necessary) 5 | apt: name=ntpdate state=absent 6 | become: true 7 | 8 | - name: Ensure the latest ntp is installed 9 | apt: name=ntp state=latest update_cache=yes 10 | become: true 11 | 12 | - name: Retrieve facts about the file /etc/ntp.conf 13 | stat: path=/etc/ntp.conf 14 | register: ntp_conf_file 15 | 16 | - debug: var=ntp_conf_file.stat.exists 17 | 18 | - name: Fail when /etc/ntp.conf doesn't exist 19 | fail: msg="The NTP config file /etc/ntp.conf doesn't exist'" 20 | when: ntp_conf_file.stat.exists == False 21 | 22 | # For now, we assume the default /etc/ntp.conf file is okay 23 | 24 | - name: Ensure the ntp service is now started and should start on boot (enabled=yes) 25 | service: name=ntp state=started enabled=yes 26 | become: true 27 | 28 | # All notified handlers are executed now (only once each, and only if notified) 29 | -------------------------------------------------------------------------------- /bigchaindb/monitor.py: -------------------------------------------------------------------------------- 1 | from platform import node 2 | 3 | import statsd 4 | 5 | import bigchaindb 6 | from bigchaindb import config_utils 7 | 8 | 9 | class Monitor(statsd.StatsClient): 10 | """Set up statsd monitoring.""" 11 | 12 | def __init__(self, *args, **kwargs): 13 | """Overrides statsd client, fixing prefix to messages and loading configuration 14 | 15 | Args: 16 | *args: arguments (identical to Statsclient) 17 | **kwargs: keyword arguments (identical to Statsclient) 18 | """ 19 | 20 | config_utils.autoconfigure() 21 | 22 | if not kwargs: 23 | kwargs = {} 24 | 25 | # set prefix, parameters from configuration file 26 | if 'prefix' not in kwargs: 27 | kwargs['prefix'] = '{hostname}.'.format(hostname=node()) 28 | if 'host' not in kwargs: 29 | kwargs['host'] = bigchaindb.config['statsd']['host'] 30 | if 'port' not in kwargs: 31 | kwargs['port'] = bigchaindb.config['statsd']['port'] 32 | super().__init__(*args, **kwargs) 33 | 34 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/db_storage/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/roles/db_storage/tasks/main.yml 3 | 4 | #- name: Ensure the /data directory (for DB storage) exists 5 | # file: path=/data state=directory 6 | 7 | - name: Format the block storage device at /dev/xvdp with an ext4 file system 8 | filesystem: fstype=ext4 dev=/dev/xvdp 9 | become: true 10 | 11 | # Note that this also modifies /etc/fstab so the mount will persist through a crash. 12 | # To better understand the /etc/fstab fields/columns, see: 13 | # http://man7.org/linux/man-pages/man5/fstab.5.html 14 | # https://tinyurl.com/jmmsyon = the soure code of the mount module 15 | - name: Ensure /data dir exists and is mounted + update /etc/fstab 16 | mount: 17 | name=/data 18 | src=/dev/xvdp 19 | fstype=ext4 20 | opts="defaults,nofail,nobootwait" 21 | dump=0 22 | passno=2 23 | state=mounted 24 | become: true 25 | 26 | # Modify the I/O scheduler? Is that even a good idea? 27 | # Must do this in /sys/block/xvdp/queue/scheduler 28 | # and also with grub (so the I/O scheduler stays changed on reboot) 29 | # Example: https://gist.github.com/keithchambers/80b60559ad83cebf1672 30 | -------------------------------------------------------------------------------- /bigchaindb/__init__.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | 4 | # from functools import reduce 5 | # PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'BigchainDB')) % 2**16 6 | # basically, the port number is 9984 7 | 8 | 9 | config = { 10 | 'server': { 11 | # Note: this section supports all the Gunicorn settings: 12 | # - http://docs.gunicorn.org/en/stable/settings.html 13 | 'bind': 'localhost:9984', 14 | 'workers': None, # if none, the value will be cpu_count * 2 + 1 15 | 'threads': None, # if none, the value will be cpu_count * 2 + 1 16 | }, 17 | 'database': { 18 | 'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'), 19 | 'port': 28015, 20 | 'name': 'bigchain', 21 | }, 22 | 'keypair': { 23 | 'public': None, 24 | 'private': None, 25 | }, 26 | 'keyring': [], 27 | 'statsd': { 28 | 'host': 'localhost', 29 | 'port': 8125, 30 | 'rate': 0.01, 31 | }, 32 | 'api_endpoint': 'http://localhost:9984/api/v1', 33 | 'consensus_plugin': 'default', 34 | 'backlog_reassign_delay': 30 35 | } 36 | 37 | # We need to maintain a backup copy of the original config dict in case 38 | # the user wants to reconfigure the node. Check ``bigchaindb.config_utils`` 39 | # for more info. 40 | _config = copy.deepcopy(config) 41 | from bigchaindb.core import Bigchain # noqa 42 | from bigchaindb.version import __version__ # noqa 43 | -------------------------------------------------------------------------------- /bigchaindb/processes.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import multiprocessing as mp 3 | 4 | import bigchaindb 5 | from bigchaindb.pipelines import vote, block, election, stale 6 | from bigchaindb.web import server 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | BANNER = """ 12 | **************************************************************************** 13 | * * 14 | * Initialization complete. BigchainDB is ready and waiting for events. * 15 | * You can send events through the API documented at: * 16 | * - http://docs.bigchaindb.apiary.io/ * 17 | * * 18 | * Listening to client connections on: {:<15} * 19 | * * 20 | **************************************************************************** 21 | """ 22 | 23 | 24 | def start(): 25 | logger.info('Initializing BigchainDB...') 26 | 27 | # start the processes 28 | logger.info('Starting block') 29 | block.start() 30 | 31 | logger.info('Starting voter') 32 | vote.start() 33 | 34 | logger.info('Starting stale transaction monitor') 35 | stale.start() 36 | 37 | logger.info('Starting election') 38 | election.start() 39 | 40 | # start the web api 41 | app_server = server.create_server(bigchaindb.config['server']) 42 | p_webapi = mp.Process(name='webapi', target=app_server.run) 43 | p_webapi.start() 44 | 45 | # start message 46 | logger.info(BANNER.format(bigchaindb.config['server']['bind'])) 47 | -------------------------------------------------------------------------------- /ntools/one-m/aws/resources.tf: -------------------------------------------------------------------------------- 1 | # One instance (virtual machine) on AWS: 2 | # https://www.terraform.io/docs/providers/aws/r/instance.html 3 | resource "aws_instance" "instance" { 4 | ami = "${lookup(var.amis, var.aws_region)}" 5 | instance_type = "${var.aws_instance_type}" 6 | tags { 7 | Name = "BigchainDB_node" 8 | } 9 | ebs_optimized = true 10 | key_name = "${var.ssh_key_name}" 11 | vpc_security_group_ids = ["${aws_security_group.node_sg1.id}"] 12 | root_block_device = { 13 | volume_type = "gp2" 14 | volume_size = "${var.root_storage_in_GiB}" 15 | delete_on_termination = true 16 | } 17 | # Enable EC2 Instance Termination Protection 18 | disable_api_termination = true 19 | } 20 | 21 | # This EBS volume will be used for database storage (not for root). 22 | # https://www.terraform.io/docs/providers/aws/r/ebs_volume.html 23 | resource "aws_ebs_volume" "db_storage" { 24 | type = "gp2" 25 | availability_zone = "${aws_instance.instance.availability_zone}" 26 | # Size in GiB (not GB!) 27 | size = "${var.DB_storage_in_GiB}" 28 | tags { 29 | Name = "BigchainDB_db_storage" 30 | } 31 | } 32 | 33 | # This allocates a new elastic IP address, if necessary 34 | # and then associates it with the above aws_instance 35 | resource "aws_eip" "ip" { 36 | instance = "${aws_instance.instance.id}" 37 | vpc = true 38 | } 39 | 40 | # This attaches the instance to the EBS volume for RethinkDB storage 41 | # https://www.terraform.io/docs/providers/aws/r/volume_attachment.html 42 | resource "aws_volume_attachment" "ebs_att" { 43 | # Why /dev/sdp? See https://tinyurl.com/z2zqm6n 44 | device_name = "/dev/sdp" 45 | volume_id = "${aws_ebs_volume.db_storage.id}" 46 | instance_id = "${aws_instance.instance.id}" 47 | } 48 | -------------------------------------------------------------------------------- /bigchaindb/exceptions.py: -------------------------------------------------------------------------------- 1 | """Custom exceptions used in the `bigchaindb` package. 2 | """ 3 | 4 | class ConfigurationError(Exception): 5 | """Raised when there is a problem with server configuration""" 6 | 7 | 8 | class OperationError(Exception): 9 | """Raised when an operation cannot go through""" 10 | 11 | 12 | class TransactionDoesNotExist(Exception): 13 | """Raised if the transaction is not in the database""" 14 | 15 | 16 | class TransactionOwnerError(Exception): 17 | """Raised if a user tries to transfer a transaction they don't own""" 18 | 19 | 20 | class DoubleSpend(Exception): 21 | """Raised if a double spend is found""" 22 | 23 | 24 | class InvalidHash(Exception): 25 | """Raised if there was an error checking the hash for a particular operation""" 26 | 27 | 28 | class InvalidSignature(Exception): 29 | """Raised if there was an error checking the signature for a particular operation""" 30 | 31 | 32 | class DatabaseAlreadyExists(Exception): 33 | """Raised when trying to create the database but the db is already there""" 34 | 35 | 36 | class DatabaseDoesNotExist(Exception): 37 | """Raised when trying to delete the database but the db is not there""" 38 | 39 | 40 | class KeypairNotFoundException(Exception): 41 | """Raised if operation cannot proceed because the keypair was not given""" 42 | 43 | 44 | class KeypairMismatchException(Exception): 45 | """Raised if the private key(s) provided for signing don't match any of the curret owner(s)""" 46 | 47 | 48 | class StartupError(Exception): 49 | """Raised when there is an error starting up the system""" 50 | 51 | 52 | class ImproperVoteError(Exception): 53 | """Raised if a vote is not constructed correctly, or signed incorrectly""" 54 | 55 | 56 | class MultipleVotesError(Exception): 57 | """Raised if a voter has voted more than once""" 58 | 59 | 60 | class GenesisBlockAlreadyExistsError(Exception): 61 | """Raised when trying to create the already existing genesis block""" 62 | 63 | 64 | class CyclicBlockchainError(Exception): 65 | """Raised when there is a cycle in the blockchain""" 66 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/election.py: -------------------------------------------------------------------------------- 1 | """This module takes care of all the logic related to block status. 2 | 3 | Specifically, what happens when a block becomes invalid. The logic is 4 | encapsulated in the ``Election`` class, while the sequence of actions 5 | is specified in ``create_pipeline``. 6 | """ 7 | import logging 8 | 9 | import rethinkdb as r 10 | from multipipes import Pipeline, Node 11 | 12 | from bigchaindb.pipelines.utils import ChangeFeed 13 | from bigchaindb import Bigchain 14 | 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class Election: 20 | 21 | def __init__(self): 22 | self.bigchain = Bigchain() 23 | 24 | def check_for_quorum(self, next_vote): 25 | """ 26 | Checks if block has enough invalid votes to make a decision 27 | """ 28 | next_block = r.table('bigchain')\ 29 | .get(next_vote['vote']['voting_for_block'])\ 30 | .run(self.bigchain.conn) 31 | if self.bigchain.block_election_status(next_block) == self.bigchain.BLOCK_INVALID: 32 | return next_block 33 | 34 | def requeue_transactions(self, invalid_block): 35 | """ 36 | Liquidates transactions from invalid blocks so they can be processed again 37 | """ 38 | logger.info('Rewriting %s transactions from invalid block %s', 39 | len(invalid_block['block']['transactions']), 40 | invalid_block['id']) 41 | for tx in invalid_block['block']['transactions']: 42 | self.bigchain.write_transaction(tx) 43 | return invalid_block 44 | 45 | 46 | def get_changefeed(): 47 | return ChangeFeed(table='votes', operation=ChangeFeed.INSERT) 48 | 49 | 50 | def create_pipeline(): 51 | election = Election() 52 | 53 | election_pipeline = Pipeline([ 54 | Node(election.check_for_quorum), 55 | Node(election.requeue_transactions) 56 | ]) 57 | 58 | return election_pipeline 59 | 60 | 61 | def start(): 62 | pipeline = create_pipeline() 63 | pipeline.setup(indata=get_changefeed()) 64 | pipeline.start() 65 | return pipeline 66 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/utils.py: -------------------------------------------------------------------------------- 1 | """Utility classes and functions to work with the pipelines.""" 2 | 3 | 4 | import rethinkdb as r 5 | from multipipes import Node 6 | 7 | from bigchaindb import Bigchain 8 | 9 | 10 | class ChangeFeed(Node): 11 | """This class wraps a RethinkDB changefeed adding a `prefeed`. 12 | 13 | It extends the ``multipipes::Node`` class to make it pluggable in 14 | other Pipelines instances, and it makes usage of ``self.outqueue`` 15 | to output the data. 16 | 17 | A changefeed is a real time feed on inserts, updates, and deletes, and 18 | it's volatile. This class is a helper to create changefeeds. Moreover 19 | it provides a way to specify a `prefeed`, that is a set of data (iterable) 20 | to output before the actual changefeed. 21 | """ 22 | 23 | INSERT = 1 24 | DELETE = 2 25 | UPDATE = 4 26 | 27 | def __init__(self, table, operation, prefeed=None): 28 | """Create a new RethinkDB ChangeFeed. 29 | 30 | Args: 31 | table (str): name of the table to listen to for changes. 32 | operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or 33 | ChangeFeed.UPDATE. Combining multiple operation is possible using 34 | the bitwise ``|`` operator 35 | (e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``) 36 | prefeed (iterable): whatever set of data you want to be published 37 | first. 38 | """ 39 | 40 | super().__init__(name='changefeed') 41 | self.prefeed = prefeed if prefeed else [] 42 | self.table = table 43 | self.operation = operation 44 | self.bigchain = Bigchain() 45 | 46 | def run_forever(self): 47 | for element in self.prefeed: 48 | self.outqueue.put(element) 49 | 50 | for change in r.table(self.table).changes().run(self.bigchain.conn): 51 | 52 | is_insert = change['old_val'] is None 53 | is_delete = change['new_val'] is None 54 | is_update = not is_insert and not is_delete 55 | 56 | if is_insert and (self.operation & ChangeFeed.INSERT): 57 | self.outqueue.put(change['new_val']) 58 | elif is_delete and (self.operation & ChangeFeed.DELETE): 59 | self.outqueue.put(change['old_val']) 60 | elif is_update and (self.operation & ChangeFeed.UPDATE): 61 | self.outqueue.put(change['new_val']) 62 | 63 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/stale.py: -------------------------------------------------------------------------------- 1 | """This module monitors for stale transactions. 2 | 3 | It reassigns transactions which have been assigned a node but 4 | remain in the backlog past a certain amount of time. 5 | """ 6 | 7 | import logging 8 | from multipipes import Pipeline, Node 9 | from bigchaindb import Bigchain 10 | from time import sleep 11 | 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | class StaleTransactionMonitor: 17 | """This class encapsulates the logic for re-assigning stale transactions. 18 | 19 | Note: 20 | Methods of this class will be executed in different processes. 21 | """ 22 | 23 | def __init__(self, timeout=5, backlog_reassign_delay=None): 24 | """Initialize StaleTransaction monitor 25 | 26 | Args: 27 | timeout: how often to check for stale tx (in sec) 28 | backlog_reassign_delay: How stale a transaction should 29 | be before reassignment (in sec). If supplied, overrides the 30 | Bigchain default value. 31 | """ 32 | self.bigchain = Bigchain(backlog_reassign_delay=backlog_reassign_delay) 33 | self.timeout = timeout 34 | 35 | def check_transactions(self): 36 | """Poll backlog for stale transactions 37 | 38 | Returns: 39 | txs (list): txs to be re assigned 40 | """ 41 | sleep(self.timeout) 42 | for tx in self.bigchain.get_stale_transactions(): 43 | yield tx 44 | 45 | def reassign_transactions(self, tx): 46 | """Put tx back in backlog with new assignee 47 | 48 | Returns: 49 | transaction 50 | """ 51 | self.bigchain.reassign_transaction(tx) 52 | return tx 53 | 54 | 55 | def create_pipeline(timeout=5, backlog_reassign_delay=5): 56 | """Create and return the pipeline of operations to be distributed 57 | on different processes.""" 58 | 59 | stm = StaleTransactionMonitor(timeout=timeout, 60 | backlog_reassign_delay=backlog_reassign_delay) 61 | 62 | monitor_pipeline = Pipeline([ 63 | Node(stm.check_transactions), 64 | Node(stm.reassign_transactions) 65 | ]) 66 | 67 | return monitor_pipeline 68 | 69 | 70 | def start(timeout=5, backlog_reassign_delay=5): 71 | """Create, start, and return the block pipeline.""" 72 | pipeline = create_pipeline(timeout=timeout, 73 | backlog_reassign_delay=backlog_reassign_delay) 74 | pipeline.setup() 75 | pipeline.start() 76 | return pipeline 77 | -------------------------------------------------------------------------------- /ntools/one-m/aws/security_group.tf: -------------------------------------------------------------------------------- 1 | # It might be better to: 2 | # 1. start by only allowing SSH on port 22 (in the security group) 3 | # 2. use SSH to set up a proper firewall on the (virtual) machine 4 | # 3. add a second security group with more ports open 5 | 6 | resource "aws_security_group" "node_sg1" { 7 | name_prefix = "BigchainDB_" 8 | description = "Single-machine BigchainDB node security group" 9 | tags = { 10 | Name = "BigchainDB_one-m" 11 | } 12 | 13 | # Allow all outbound traffic 14 | egress { 15 | from_port = 0 16 | to_port = 0 17 | protocol = "-1" 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | 21 | # SSH 22 | ingress { 23 | from_port = 22 24 | to_port = 22 25 | protocol = "tcp" 26 | cidr_blocks = ["0.0.0.0/0"] 27 | } 28 | 29 | # DNS 30 | ingress { 31 | from_port = 53 32 | to_port = 53 33 | protocol = "udp" 34 | cidr_blocks = ["0.0.0.0/0"] 35 | } 36 | 37 | # HTTP is used by some package managers 38 | ingress { 39 | from_port = 80 40 | to_port = 80 41 | protocol = "tcp" 42 | cidr_blocks = ["0.0.0.0/0"] 43 | } 44 | 45 | # NTP daemons use port 123 but the request will 46 | # come from inside the firewall so a response is expected 47 | 48 | # SNMP (e.g. for server monitoring) 49 | ingress { 50 | from_port = 161 51 | to_port = 161 52 | protocol = "udp" 53 | cidr_blocks = ["0.0.0.0/0"] 54 | } 55 | 56 | # HTTPS is used when installing RethinkDB 57 | # and by some package managers 58 | ingress { 59 | from_port = 443 60 | to_port = 443 61 | protocol = "tcp" 62 | cidr_blocks = ["0.0.0.0/0"] 63 | } 64 | 65 | # StatsD 66 | ingress { 67 | from_port = 8125 68 | to_port = 8125 69 | protocol = "udp" 70 | cidr_blocks = ["0.0.0.0/0"] 71 | } 72 | 73 | # Future: Don't allow port 8080 for the RethinkDB web interface. 74 | # Use a SOCKS proxy or reverse proxy instead. 75 | 76 | ingress { 77 | from_port = 8080 78 | to_port = 8080 79 | protocol = "tcp" 80 | cidr_blocks = ["0.0.0.0/0"] 81 | } 82 | 83 | # BigchainDB Client-Server REST API 84 | ingress { 85 | from_port = 9984 86 | to_port = 9984 87 | protocol = "tcp" 88 | cidr_blocks = ["0.0.0.0/0"] 89 | } 90 | 91 | # Port 28015 doesn't have to be open to the outside 92 | # since the RethinkDB client and server are on localhost 93 | 94 | # RethinkDB intracluster communications use port 29015 95 | ingress { 96 | from_port = 29015 97 | to_port = 29015 98 | protocol = "tcp" 99 | cidr_blocks = ["0.0.0.0/0"] 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/rethinkdb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/roles/rethinkdb/tasks/main.yml 3 | 4 | # Note: the .list extension will be added to the rethinkdb filename automatically 5 | - name: > 6 | Ensure RethinkDB's APT repository for Ubuntu trusty is present 7 | in /etc/apt/sources.list.d/rethinkdb.list 8 | apt_repository: 9 | repo='deb http://download.rethinkdb.com/apt trusty main' 10 | filename=rethinkdb 11 | state=present 12 | become: true 13 | 14 | - name: Ensure RethinkDB's public GPG key is in the set of APT keys 15 | apt_key: url=http://download.rethinkdb.com/apt/pubkey.gpg state=present 16 | become: true 17 | 18 | - name: Ensure the Ubuntu package rethinkdb 2.3.4~0trusty is installed 19 | apt: name=rethinkdb=2.3.4~0trusty state=present update_cache=yes 20 | become: true 21 | 22 | - name: Ensure the /data directory's owner and group are both 'rethinkdb' 23 | file: path=/data state=directory owner=rethinkdb group=rethinkdb 24 | become: true 25 | 26 | - name: Gather facts about the file /tmp/created_on_run1 27 | stat: path=/tmp/created_on_run1 28 | register: file_created_on_run1 29 | 30 | - name: if the file /tmp/created_on_run1 doesn't exist then create /data/delete_me 31 | file: path=/data/delete_me state=touch owner=rethinkdb group=rethinkdb 32 | become: true 33 | when: not file_created_on_run1.stat.exists 34 | 35 | - name: if the file /tmp/created_on_run1 doesn't exist then do sudo rm -rf /data/* 36 | shell: rm -rf /data/* 37 | become: true 38 | when: not file_created_on_run1.stat.exists 39 | 40 | - name: if the file /tmp/created_on_run1 doesn't exist then create it 41 | file: path=/tmp/created_on_run1 state=touch 42 | become: true 43 | when: not file_created_on_run1.stat.exists 44 | 45 | # To enable starting RethinkDB on boot (on init.d systems), 46 | # the config file must be put in /etc/rethinkdb/instances.d/ 47 | # See https://www.rethinkdb.com/docs/start-on-startup/ 48 | # Note: This task does NOT have a notify: rethinkdb restart 49 | # A task to ensure RethinkDB is started comes later. 50 | - name: > 51 | Generate a RethinkDB config file from rethinkdb.conf.j2 and put it in 52 | /etc/rethinkdb/instances.d/instance1.conf 53 | template: 54 | src=rethinkdb.conf.j2 55 | dest=/etc/rethinkdb/instances.d/instance1.conf 56 | owner=root 57 | group=root 58 | mode=0664 59 | become: true 60 | register: config_file 61 | 62 | - name: Ensure rethinkdb is now started 63 | service: name=rethinkdb state=started 64 | become: true 65 | register: rethinkdb_started 66 | 67 | - debug: var=rethinkdb_started 68 | 69 | - debug: msg="The RethinkDB config file changed while RethinkDB was already running. 70 | RethinkDB was not stopped and restarted. You must do that manually." 71 | when: config_file.changed and (not rethinkdb_started.changed) 72 | -------------------------------------------------------------------------------- /bigchaindb/web/server.py: -------------------------------------------------------------------------------- 1 | """This module contains basic functions to instantiate the BigchainDB API. 2 | 3 | The application is implemented in Flask and runs using Gunicorn. 4 | """ 5 | 6 | import copy 7 | import multiprocessing 8 | 9 | from flask import Flask 10 | import gunicorn.app.base 11 | 12 | from bigchaindb import util 13 | from bigchaindb import Bigchain 14 | from bigchaindb.web.views.info import info_views 15 | from bigchaindb.web.views.transactions import transaction_views 16 | 17 | from bigchaindb.monitor import Monitor 18 | 19 | 20 | class StandaloneApplication(gunicorn.app.base.BaseApplication): 21 | """Run a **wsgi** app wrapping it in a Gunicorn Base Application. 22 | 23 | Adapted from: 24 | - http://docs.gunicorn.org/en/latest/custom.html 25 | """ 26 | 27 | def __init__(self, app, options=None): 28 | '''Initialize a new standalone application. 29 | 30 | Args: 31 | app: A wsgi Python application. 32 | options (dict): the configuration. 33 | 34 | ''' 35 | self.options = options or {} 36 | self.application = app 37 | super(StandaloneApplication, self).__init__() 38 | 39 | def load_config(self): 40 | config = dict((key, value) for key, value in self.options.items() 41 | if key in self.cfg.settings and value is not None) 42 | 43 | for key, value in config.items(): 44 | # not sure if we need the `key.lower` here, will just keep 45 | # keep it for now. 46 | self.cfg.set(key.lower(), value) 47 | 48 | def load(self): 49 | return self.application 50 | 51 | 52 | def create_app(settings): 53 | """Return an instance of the Flask application. 54 | 55 | Args: 56 | debug (bool): a flag to activate the debug mode for the app 57 | (default: False). 58 | """ 59 | 60 | app = Flask(__name__) 61 | 62 | app.debug = settings.get('debug', False) 63 | 64 | app.config['bigchain_pool'] = util.pool(Bigchain, size=settings.get('threads', 4)) 65 | app.config['monitor'] = Monitor() 66 | 67 | app.register_blueprint(info_views, url_prefix='/') 68 | app.register_blueprint(transaction_views, url_prefix='/api/v1') 69 | return app 70 | 71 | 72 | def create_server(settings): 73 | """Wrap and return an application ready to be run. 74 | 75 | Args: 76 | settings (dict): a dictionary containing the settings, more info 77 | here http://docs.gunicorn.org/en/latest/settings.html 78 | 79 | Return: 80 | an initialized instance of the application. 81 | """ 82 | 83 | settings = copy.deepcopy(settings) 84 | 85 | if not settings.get('workers'): 86 | settings['workers'] = (multiprocessing.cpu_count() * 2) + 1 87 | 88 | if not settings.get('threads'): 89 | settings['threads'] = (multiprocessing.cpu_count() * 2) + 1 90 | 91 | app = create_app(settings) 92 | standalone = StandaloneApplication(app, settings) 93 | return standalone 94 | 95 | -------------------------------------------------------------------------------- /ntools/one-m/ansible/roles/bigchaindb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible/roles/bigchaindb/tasks/main.yml 3 | 4 | # Note: "become: true" basically means "become root user for this task" i.e. sudo 5 | # See https://docs.ansible.com/ansible/become.html 6 | 7 | # Note: update_cache=yes means it will do the equivalent of 8 | # sudo apt-get update before the operation. 9 | - name: Ensure the latest BigchainDB-required Ubuntu packages are installed 10 | apt: name={{item}} state=latest update_cache=yes 11 | become: true 12 | with_items: 13 | - git 14 | - g++ 15 | - python3-dev 16 | - python3-setuptools # mainly for easy_install3, which is used to get latest pip3 17 | 18 | # This should make both pip and pip3 be pip version >=8.1.2 (python 3.4). 19 | # See the comments about this below. 20 | - name: Ensure the latest pip/pip3 is installed, using easy_install3 21 | easy_install: executable=easy_install3 name=pip state=latest 22 | become: true 23 | 24 | - name: Ensure the latest setuptools (Python package) is installed 25 | pip: executable=pip3 name=setuptools state=latest 26 | become: true 27 | 28 | - name: Install BigchainDB from PyPI using sudo pip3 install bigchaindb 29 | pip: executable=pip3 name=bigchaindb state=latest 30 | become: true 31 | 32 | - name: Gather facts about the file ~/.bigchaindb 33 | stat: path={{ ansible_env.HOME }}/.bigchaindb 34 | register: home_bigchaindb_config_file 35 | 36 | - name: If ~/.bigchaindb doesn't exist, generate a default BigchainDB config file there 37 | shell: bigchaindb -y configure 38 | when: not home_bigchaindb_config_file.stat.exists 39 | 40 | - name: Look up all processes with 'bigchaindb' in their name 41 | shell: pgrep bigchaindb 42 | ignore_errors: true 43 | changed_when: false 44 | register: pgrep_bigchaindb 45 | 46 | # pgrep_bigchaindb.rc (return code) should be 0 if a bigchaindb process is already running 47 | 48 | - name: Ensure a copy of start_bigchaindb.sh is on the remote host 49 | copy: src=start_bigchaindb.sh dest=/tmp/start_bigchaindb.sh mode=0775 50 | become: true 51 | 52 | # Running BigchainDB in the background from Ansible is tricky, see: 53 | # https://superuser.com/questions/870871/run-a-remote-script-application-in-detached-mode-in-ansible 54 | - name: If BigchainDB isn't running then run it 55 | command: /tmp/start_bigchaindb.sh 56 | async: 45 57 | poll: 0 58 | when: pgrep_bigchaindb.rc != 0 59 | 60 | 61 | # Notes about getting the latest version of pip3: 62 | # 63 | # The first way I tried to get the latest pip3 (commented-out below) didn't work. 64 | # The first task works, but then the attempt to do 65 | # the equivalent of "pip install -U pip" fails. "Found existing installation" 66 | # and it didn't want to uninstall it 67 | 68 | # Installing the python3-pip package installs a Python 3 version of pip named pip3 69 | #- name: Ensure the latest python-pip and python3-pip Ubuntu packages are installed 70 | # apt: name={{item}} state=latest update_cache=yes 71 | # become: true 72 | # with_items: 73 | # - python-pip 74 | # - python3-pip 75 | # 76 | #- name: Ensure pip is the latest version 77 | # pip: executable=pip name=pip state=latest 78 | # 79 | #- name: Ensure pip3 is the latest version 80 | # pip: executable=pip3 name=pip state=latest -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | BigchainDB: A Scalable Blockchain Database 3 | 4 | For full docs visit https://bigchaindb.readthedocs.org 5 | 6 | """ 7 | from setuptools import setup, find_packages 8 | 9 | 10 | # get the version 11 | version = {} 12 | with open('bigchaindb/version.py') as fp: 13 | exec(fp.read(), version) 14 | 15 | 16 | # check if setuptools is up to date 17 | def check_setuptools_features(): 18 | import pkg_resources 19 | try: 20 | list(pkg_resources.parse_requirements('foo~=1.0')) 21 | except ValueError: 22 | exit('Your Python distribution comes with an incompatible version ' 23 | 'of `setuptools`. Please run:\n' 24 | ' $ pip3 install --upgrade setuptools\n' 25 | 'and then run this command again') 26 | 27 | 28 | check_setuptools_features() 29 | 30 | 31 | tests_require = [ 32 | 'coverage', 33 | 'pep8', 34 | 'pyflakes', 35 | 'pylint', 36 | 'pytest', 37 | 'pytest-cov==2.2.1', 38 | 'pytest-xdist', 39 | 'pytest-flask', 40 | ] 41 | 42 | dev_require = [ 43 | 'ipdb', 44 | 'ipython', 45 | ] 46 | 47 | docs_require = [ 48 | 'Sphinx>=1.3.5', 49 | 'recommonmark>=0.4.0', 50 | 'sphinx-rtd-theme>=0.1.9', 51 | 'sphinxcontrib-napoleon>=0.4.4', 52 | 'sphinxcontrib-httpdomain>=1.5.0', 53 | ] 54 | 55 | benchmarks_require = [ 56 | 'line-profiler==1.0', 57 | ] 58 | 59 | setup( 60 | name='BigchainDB', 61 | version=version['__version__'], 62 | description='BigchainDB: A Scalable Blockchain Database', 63 | long_description=__doc__, 64 | url='https://github.com/BigchainDB/bigchaindb/', 65 | author='BigchainDB Contributors', 66 | author_email='dev@bigchaindb.com', 67 | license='AGPLv3', 68 | zip_safe=False, 69 | 70 | classifiers=[ 71 | 'Development Status :: 3 - Alpha', 72 | 'Intended Audience :: Developers', 73 | 'Topic :: Database', 74 | 'Topic :: Database :: Database Engines/Servers', 75 | 'Topic :: Software Development', 76 | 'Natural Language :: English', 77 | 'License :: OSI Approved :: GNU Affero General Public License v3', 78 | 'Programming Language :: Python :: 3', 79 | 'Programming Language :: Python :: 3.4', 80 | 'Programming Language :: Python :: 3.5', 81 | 'Operating System :: MacOS :: MacOS X', 82 | 'Operating System :: POSIX :: Linux', 83 | ], 84 | 85 | packages=find_packages(exclude=['tests*']), 86 | 87 | entry_points={ 88 | 'console_scripts': [ 89 | 'bigchaindb=bigchaindb.commands.bigchain:main' 90 | ], 91 | 'bigchaindb.consensus': [ 92 | 'default=bigchaindb.consensus:BaseConsensusRules' 93 | ] 94 | }, 95 | install_requires=[ 96 | 'rethinkdb~=2.3', 97 | 'pysha3==0.3', 98 | 'pytz==2015.7', 99 | 'cryptoconditions==0.4.1', 100 | 'statsd==3.2.1', 101 | 'python-rapidjson==0.0.6', 102 | 'logstats==0.2.1', 103 | 'base58==0.2.2', 104 | 'flask==0.10.1', 105 | 'flask-restful~=0.3.0', 106 | 'requests~=2.9', 107 | 'gunicorn~=19.0', 108 | 'multipipes~=0.1.0', 109 | ], 110 | setup_requires=['pytest-runner'], 111 | tests_require=tests_require, 112 | extras_require={ 113 | 'test': tests_require, 114 | 'dev': dev_require + tests_require + docs_require + benchmarks_require, 115 | 'docs': docs_require, 116 | }, 117 | ) 118 | -------------------------------------------------------------------------------- /bigchaindb/commands/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions and basic common arguments 2 | for ``argparse.ArgumentParser``. 3 | """ 4 | 5 | import argparse 6 | import multiprocessing as mp 7 | import subprocess 8 | 9 | import rethinkdb as r 10 | 11 | import bigchaindb 12 | from bigchaindb.exceptions import StartupError 13 | from bigchaindb import db 14 | from bigchaindb.version import __version__ 15 | 16 | 17 | def start_rethinkdb(): 18 | """Start RethinkDB as a child process and wait for it to be 19 | available. 20 | 21 | Raises: 22 | ``bigchaindb.exceptions.StartupError`` if RethinkDB cannot 23 | be started. 24 | """ 25 | 26 | proc = subprocess.Popen(['rethinkdb', '--bind', 'all'], 27 | stdout=subprocess.PIPE, 28 | stderr=subprocess.STDOUT, 29 | universal_newlines=True) 30 | 31 | dbname = bigchaindb.config['database']['name'] 32 | line = '' 33 | 34 | for line in proc.stdout: 35 | if line.startswith('Server ready'): 36 | # FIXME: seems like tables are not ready when the server is ready, 37 | # that's why we need to query RethinkDB to know the state 38 | # of the database. This code assumes the tables are ready 39 | # when the database is ready. This seems a valid assumption. 40 | 41 | try: 42 | conn = db.get_conn() 43 | # Before checking if the db is ready, we need to query 44 | # the server to check if it contains that db 45 | if r.db_list().contains(dbname).run(conn): 46 | r.db(dbname).wait().run(conn) 47 | except (r.ReqlOpFailedError, r.ReqlDriverError) as exc: 48 | raise StartupError('Error waiting for the database `{}` ' 49 | 'to be ready'.format(dbname)) from exc 50 | 51 | return proc 52 | 53 | # We are here when we exhaust the stdout of the process. 54 | # The last `line` contains info about the error. 55 | raise StartupError(line) 56 | 57 | 58 | def start(parser, scope): 59 | """Utility function to execute a subcommand. 60 | 61 | The function will look up in the ``scope`` 62 | if there is a function called ``run_`` 63 | and will run it using ``parser.args`` as first positional argument. 64 | 65 | Args: 66 | parser: an ArgumentParser instance. 67 | scope (dict): map containing (eventually) the functions to be called. 68 | 69 | Raises: 70 | NotImplementedError: if ``scope`` doesn't contain a function called 71 | ``run_``. 72 | """ 73 | args = parser.parse_args() 74 | 75 | if not args.command: 76 | parser.print_help() 77 | return 78 | 79 | # look up in the current scope for a function called 'run_' 80 | # replacing all the dashes '-' with the lowercase character '_' 81 | func = scope.get('run_' + args.command.replace('-', '_')) 82 | 83 | # if no command has been found, raise a `NotImplementedError` 84 | if not func: 85 | raise NotImplementedError('Command `{}` not yet implemented'. 86 | format(args.command)) 87 | 88 | args.multiprocess = getattr(args, 'multiprocess', False) 89 | 90 | if args.multiprocess is False: 91 | args.multiprocess = 1 92 | elif args.multiprocess is None: 93 | args.multiprocess = mp.cpu_count() 94 | 95 | func(args) 96 | 97 | 98 | base_parser = argparse.ArgumentParser(add_help=False, prog='bigchaindb') 99 | 100 | base_parser.add_argument('-c', '--config', 101 | help='Specify the location of the configuration file ' 102 | '(use "-" for stdout)') 103 | 104 | base_parser.add_argument('-y', '--yes', '--yes-please', 105 | action='store_true', 106 | help='Assume "yes" as answer to all prompts and run ' 107 | 'non-interactively') 108 | 109 | base_parser.add_argument('-v', '--version', 110 | action='version', 111 | version='%(prog)s {}'.format(__version__)) 112 | -------------------------------------------------------------------------------- /bigchaindb/web/views/transactions.py: -------------------------------------------------------------------------------- 1 | """This module provides the blueprint for some basic API endpoints. 2 | 3 | For more information please refer to the documentation on ReadTheDocs: 4 | - https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html 5 | """ 6 | from flask import current_app, request, Blueprint 7 | from flask_restful import Resource, Api 8 | 9 | import bigchaindb 10 | from bigchaindb import util 11 | from bigchaindb.web.views.base import make_error 12 | 13 | 14 | transaction_views = Blueprint('transaction_views', __name__) 15 | transaction_api = Api(transaction_views) 16 | 17 | 18 | # Unfortunately I cannot find a reference to this decorator. 19 | # This answer on SO is quite useful tho: 20 | # - http://stackoverflow.com/a/13432373/597097 21 | @transaction_views.record 22 | def record(state): 23 | """This function checks if the blueprint can be initialized 24 | with the provided state.""" 25 | 26 | bigchain_pool = state.app.config.get('bigchain_pool') 27 | monitor = state.app.config.get('monitor') 28 | 29 | if bigchain_pool is None: 30 | raise Exception('This blueprint expects you to provide ' 31 | 'a pool of Bigchain instances called `bigchain_pool`') 32 | 33 | if monitor is None: 34 | raise ValueError('This blueprint expects you to provide ' 35 | 'a monitor instance to record system ' 36 | 'performance.') 37 | 38 | 39 | class TransactionApi(Resource): 40 | def get(self, tx_id): 41 | """API endpoint to get details about a transaction. 42 | 43 | Args: 44 | tx_id (str): the id of the transaction. 45 | 46 | Return: 47 | A JSON string containing the data about the transaction. 48 | """ 49 | pool = current_app.config['bigchain_pool'] 50 | 51 | with pool() as bigchain: 52 | tx = bigchain.get_transaction(tx_id) 53 | 54 | if not tx: 55 | return make_error(404) 56 | 57 | return tx 58 | 59 | 60 | class TransactionStatusApi(Resource): 61 | def get(self, tx_id): 62 | """API endpoint to get details about the status of a transaction. 63 | 64 | Args: 65 | tx_id (str): the id of the transaction. 66 | 67 | Return: 68 | A ``dict`` in the format ``{'status': }``, where ```` 69 | is one of "valid", "invalid", "undecided", "backlog". 70 | """ 71 | 72 | pool = current_app.config['bigchain_pool'] 73 | 74 | with pool() as bigchain: 75 | status = bigchain.get_status(tx_id) 76 | 77 | if not status: 78 | return make_error(404) 79 | 80 | return {'status': status} 81 | 82 | 83 | class TransactionListApi(Resource): 84 | def post(self): 85 | """API endpoint to push transactions to the Federation. 86 | 87 | Return: 88 | A ``dict`` containing the data about the transaction. 89 | """ 90 | pool = current_app.config['bigchain_pool'] 91 | monitor = current_app.config['monitor'] 92 | 93 | # `force` will try to format the body of the POST request even if the `content-type` header is not 94 | # set to `application/json` 95 | tx = request.get_json(force=True) 96 | 97 | with pool() as bigchain: 98 | if tx['transaction']['operation'] == 'CREATE': 99 | tx = util.transform_create(tx) 100 | tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private) 101 | 102 | if not bigchain.is_valid_transaction(tx): 103 | return make_error(400, 'Invalid transaction') 104 | 105 | with monitor.timer('write_transaction', rate=bigchaindb.config['statsd']['rate']): 106 | bigchain.write_transaction(tx) 107 | 108 | return tx 109 | 110 | transaction_api.add_resource(TransactionApi, 111 | '/transactions/', 112 | strict_slashes=False) 113 | transaction_api.add_resource(TransactionStatusApi, 114 | '/transactions//status', 115 | strict_slashes=False) 116 | transaction_api.add_resource(TransactionListApi, 117 | '/transactions', 118 | strict_slashes=False) 119 | -------------------------------------------------------------------------------- /bigchaindb/client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | import bigchaindb 4 | from bigchaindb import config_utils 5 | from bigchaindb import exceptions 6 | from bigchaindb import crypto 7 | 8 | 9 | class Client: 10 | """Client for BigchainDB. 11 | 12 | A Client is initialized with a keypair and is able to create, sign, and submit transactions to a Node 13 | in the Federation. At the moment, a Client instance is bounded to a specific ``host`` in the Federation. 14 | In the future, a Client might connect to >1 hosts. 15 | """ 16 | 17 | def __init__(self, public_key=None, private_key=None, api_endpoint=None, 18 | consensus_plugin=None): 19 | """Initialize the Client instance 20 | 21 | There are three ways in which the Client instance can get its parameters. 22 | The order by which the parameters are chosen are: 23 | 24 | 1. Setting them by passing them to the `__init__` method itself. 25 | 2. Setting them as environment variables 26 | 3. Reading them from the `config.json` file. 27 | 28 | Args: 29 | public_key (str): the base58 encoded public key for the ED25519 curve. 30 | private_key (str): the base58 encoded private key for the ED25519 curve. 31 | api_endpoint (str): a URL where rethinkdb is running. 32 | format: scheme://hostname:port 33 | consensus_plugin (str): the registered name of your installed 34 | consensus plugin. The `core` plugin is built into BigchainDB; 35 | others must be installed via pip. 36 | """ 37 | 38 | config_utils.autoconfigure() 39 | 40 | self.public_key = public_key or bigchaindb.config['keypair']['public'] 41 | self.private_key = private_key or bigchaindb.config['keypair']['private'] 42 | self.api_endpoint = api_endpoint or bigchaindb.config['api_endpoint'] 43 | self.consensus = config_utils.load_consensus_plugin(consensus_plugin) 44 | 45 | if not self.public_key or not self.private_key: 46 | raise exceptions.KeypairNotFoundException() 47 | 48 | def create(self, payload=None): 49 | """Issue a transaction to create an asset. 50 | 51 | Args: 52 | payload (dict): the payload for the transaction. 53 | 54 | Return: 55 | The transaction pushed to the Federation. 56 | """ 57 | 58 | tx = self.consensus.create_transaction( 59 | owner_before=self.public_key, 60 | owner_after=self.public_key, 61 | tx_input=None, 62 | operation='CREATE', 63 | payload=payload) 64 | 65 | signed_tx = self.consensus.sign_transaction( 66 | tx, private_key=self.private_key) 67 | return self._push(signed_tx) 68 | 69 | def transfer(self, owner_after, tx_input, payload=None): 70 | """Issue a transaction to transfer an asset. 71 | 72 | Args: 73 | owner_after (str): the public key of the new owner 74 | tx_input (str): the id of the transaction to use as input 75 | payload (dict, optional): the payload for the transaction. 76 | 77 | Return: 78 | The transaction pushed to the Federation. 79 | """ 80 | 81 | tx = self.consensus.create_transaction( 82 | owner_before=self.public_key, 83 | owner_after=owner_after, 84 | tx_input=tx_input, 85 | operation='TRANSFER', 86 | payload=payload) 87 | 88 | signed_tx = self.consensus.sign_transaction( 89 | tx, private_key=self.private_key) 90 | return self._push(signed_tx) 91 | 92 | def _push(self, tx): 93 | """Submit a transaction to the Federation. 94 | 95 | Args: 96 | tx (dict): the transaction to be pushed to the Federation. 97 | 98 | Return: 99 | The transaction pushed to the Federation. 100 | """ 101 | 102 | res = requests.post(self.api_endpoint + '/transactions/', json=tx) 103 | return res.json() 104 | 105 | 106 | def temp_client(): 107 | """Create a new temporary client. 108 | 109 | Return: 110 | A client initialized with a keypair generated on the fly. 111 | """ 112 | 113 | private_key, public_key = crypto.generate_key_pair() 114 | return Client(private_key=private_key, public_key=public_key, api_endpoint=bigchaindb.config['api_endpoint']) 115 | 116 | -------------------------------------------------------------------------------- /bigchaindb/db/utils.py: -------------------------------------------------------------------------------- 1 | """Utils to initialize and drop the database.""" 2 | 3 | import logging 4 | 5 | import rethinkdb as r 6 | 7 | import bigchaindb 8 | from bigchaindb import exceptions 9 | 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def get_conn(): 15 | '''Get the connection to the database.''' 16 | 17 | return r.connect(bigchaindb.config['database']['host'], 18 | bigchaindb.config['database']['port']) 19 | 20 | 21 | def get_database_name(): 22 | return bigchaindb.config['database']['name'] 23 | 24 | 25 | def create_database(conn, dbname): 26 | if r.db_list().contains(dbname).run(conn): 27 | raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(dbname)) 28 | 29 | logger.info('Create database `%s`.', dbname) 30 | r.db_create(dbname).run(conn) 31 | 32 | 33 | def create_table(conn, dbname, table_name): 34 | logger.info('Create `%s` table.', table_name) 35 | # create the table 36 | r.db(dbname).table_create(table_name).run(conn) 37 | 38 | 39 | def create_bigchain_secondary_index(conn, dbname): 40 | logger.info('Create `bigchain` secondary index.') 41 | # to order blocks by timestamp 42 | r.db(dbname).table('bigchain')\ 43 | .index_create('block_timestamp', r.row['block']['timestamp'])\ 44 | .run(conn) 45 | # to query the bigchain for a transaction id 46 | r.db(dbname).table('bigchain')\ 47 | .index_create('transaction_id', 48 | r.row['block']['transactions']['id'], multi=True)\ 49 | .run(conn) 50 | # secondary index for payload data by UUID 51 | r.db(dbname).table('bigchain')\ 52 | .index_create('payload_uuid', 53 | r.row['block']['transactions']['transaction']['data']['uuid'], multi=True)\ 54 | .run(conn) 55 | 56 | # wait for rethinkdb to finish creating secondary indexes 57 | r.db(dbname).table('bigchain').index_wait().run(conn) 58 | 59 | 60 | def create_backlog_secondary_index(conn, dbname): 61 | logger.info('Create `backlog` secondary index.') 62 | # to order transactions by timestamp 63 | r.db(dbname).table('backlog')\ 64 | .index_create('transaction_timestamp', 65 | r.row['transaction']['timestamp'])\ 66 | .run(conn) 67 | # compound index to read transactions from the backlog per assignee 68 | r.db(dbname).table('backlog')\ 69 | .index_create('assignee__transaction_timestamp', 70 | [r.row['assignee'], r.row['transaction']['timestamp']])\ 71 | .run(conn) 72 | 73 | # wait for rethinkdb to finish creating secondary indexes 74 | r.db(dbname).table('backlog').index_wait().run(conn) 75 | 76 | 77 | def create_votes_secondary_index(conn, dbname): 78 | logger.info('Create `votes` secondary index.') 79 | # compound index to order votes by block id and node 80 | r.db(dbname).table('votes')\ 81 | .index_create('block_and_voter', 82 | [r.row['vote']['voting_for_block'], 83 | r.row['node_pubkey']])\ 84 | .run(conn) 85 | 86 | # wait for rethinkdb to finish creating secondary indexes 87 | r.db(dbname).table('votes').index_wait().run(conn) 88 | 89 | 90 | def init(): 91 | # Try to access the keypair, throws an exception if it does not exist 92 | b = bigchaindb.Bigchain() 93 | 94 | conn = get_conn() 95 | dbname = get_database_name() 96 | create_database(conn, dbname) 97 | 98 | table_names = ['bigchain', 'backlog', 'votes'] 99 | for table_name in table_names: 100 | create_table(conn, dbname, table_name) 101 | create_bigchain_secondary_index(conn, dbname) 102 | create_backlog_secondary_index(conn, dbname) 103 | create_votes_secondary_index(conn, dbname) 104 | 105 | logger.info('Create genesis block.') 106 | b.create_genesis_block() 107 | logger.info('Done, have fun!') 108 | 109 | 110 | def drop(assume_yes=False): 111 | conn = get_conn() 112 | dbname = bigchaindb.config['database']['name'] 113 | 114 | if assume_yes: 115 | response = 'y' 116 | else: 117 | response = input('Do you want to drop `{}` database? [y/n]: '.format(dbname)) 118 | 119 | if response == 'y': 120 | try: 121 | logger.info('Drop database `%s`', dbname) 122 | r.db_drop(dbname).run(conn) 123 | logger.info('Done.') 124 | except r.ReqlOpFailedError: 125 | raise exceptions.DatabaseDoesNotExist('Database `{}` does not exist'.format(dbname)) 126 | else: 127 | logger.info('Drop aborted') 128 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/vote.py: -------------------------------------------------------------------------------- 1 | """This module takes care of all the logic related to block voting. 2 | 3 | The logic is encapsulated in the ``Vote`` class, while the sequence 4 | of actions to do on transactions is specified in the ``create_pipeline`` 5 | function. 6 | """ 7 | 8 | from collections import Counter 9 | 10 | from multipipes import Pipeline, Node 11 | 12 | from bigchaindb import config_utils, exceptions 13 | from bigchaindb.pipelines.utils import ChangeFeed 14 | from bigchaindb import Bigchain 15 | 16 | 17 | def create_invalid_tx(): 18 | """Create and return an invalid transaction. 19 | 20 | The transaction is invalid because it's missing the signature.""" 21 | 22 | b = Bigchain() 23 | tx = b.create_transaction(b.me, b.me, None, 'CREATE') 24 | return tx 25 | 26 | 27 | class Vote: 28 | """This class encapsulates the logic to vote on blocks. 29 | 30 | Note: 31 | Methods of this class will be executed in different processes. 32 | """ 33 | 34 | def __init__(self): 35 | """Initialize the Block voter.""" 36 | 37 | # Since cannot share a connection to RethinkDB using multiprocessing, 38 | # we need to create a temporary instance of BigchainDB that we use 39 | # only to query RethinkDB 40 | last_voted = Bigchain().get_last_voted_block() 41 | self.consensus = config_utils.load_consensus_plugin() 42 | 43 | # This is the Bigchain instance that will be "shared" (aka: copied) 44 | # by all the subprocesses 45 | self.bigchain = Bigchain() 46 | self.last_voted_id = last_voted['id'] 47 | 48 | self.counters = Counter() 49 | self.validity = {} 50 | 51 | self.invalid_dummy_tx = create_invalid_tx() 52 | 53 | def validate_block(self, block): 54 | if not self.bigchain.has_previous_vote(block): 55 | try: 56 | self.consensus.validate_block(self.bigchain, block) 57 | valid = True 58 | except (exceptions.InvalidHash, 59 | exceptions.OperationError, 60 | exceptions.InvalidSignature) as e: 61 | valid = False 62 | return block, valid 63 | 64 | def ungroup(self, block, valid): 65 | """Given a block, ungroup the transactions in it. 66 | 67 | Args: 68 | block (dict): the block to process 69 | 70 | Returns: 71 | ``None`` if the block has been already voted, an iterator that 72 | yields a transaction, block id, and the total number of 73 | transactions contained in the block otherwise. 74 | """ 75 | 76 | # XXX: if a block is invalid we should skip the `validate_tx` step, 77 | # but since we are in a pipeline we cannot just jump to another 78 | # function. Hackish solution: generate an invalid transaction 79 | # and propagate it to the next steps of the pipeline 80 | if valid: 81 | num_tx = len(block['block']['transactions']) 82 | for tx in block['block']['transactions']: 83 | yield tx, block['id'], num_tx 84 | else: 85 | yield self.invalid_dummy_tx, block['id'], 1 86 | 87 | def validate_tx(self, tx, block_id, num_tx): 88 | """Validate a transaction. 89 | 90 | Args: 91 | tx (dict): the transaction to validate 92 | block_id (str): the id of block containing the transaction 93 | num_tx (int): the total number of transactions to process 94 | 95 | Returns: 96 | Three values are returned, the validity of the transaction, 97 | ``block_id``, ``num_tx``. 98 | """ 99 | return bool(self.bigchain.is_valid_transaction(tx)), block_id, num_tx 100 | 101 | def vote(self, tx_validity, block_id, num_tx): 102 | """Collect the validity of transactions and cast a vote when ready. 103 | 104 | Args: 105 | tx_validity (bool): the validity of the transaction 106 | block_id (str): the id of block containing the transaction 107 | num_tx (int): the total number of transactions to process 108 | 109 | Returns: 110 | None, or a vote if a decision has been reached. 111 | """ 112 | 113 | self.counters[block_id] += 1 114 | self.validity[block_id] = tx_validity and self.validity.get(block_id, 115 | True) 116 | 117 | if self.counters[block_id] == num_tx: 118 | vote = self.bigchain.vote(block_id, 119 | self.last_voted_id, 120 | self.validity[block_id]) 121 | self.last_voted_id = block_id 122 | del self.counters[block_id] 123 | del self.validity[block_id] 124 | return vote 125 | 126 | def write_vote(self, vote): 127 | """Write vote to the database. 128 | 129 | Args: 130 | vote: the vote to write. 131 | """ 132 | 133 | self.bigchain.write_vote(vote) 134 | return vote 135 | 136 | 137 | def initial(): 138 | """Return unvoted blocks.""" 139 | b = Bigchain() 140 | rs = b.get_unvoted_blocks() 141 | return rs 142 | 143 | 144 | def get_changefeed(): 145 | """Create and return the changefeed for the bigchain table.""" 146 | 147 | return ChangeFeed('bigchain', operation=ChangeFeed.INSERT, prefeed=initial()) 148 | 149 | 150 | def create_pipeline(): 151 | """Create and return the pipeline of operations to be distributed 152 | on different processes.""" 153 | 154 | voter = Vote() 155 | 156 | vote_pipeline = Pipeline([ 157 | Node(voter.validate_block), 158 | Node(voter.ungroup), 159 | Node(voter.validate_tx, fraction_of_cores=1), 160 | Node(voter.vote), 161 | Node(voter.write_vote) 162 | ]) 163 | 164 | return vote_pipeline 165 | 166 | 167 | def start(): 168 | """Create, start, and return the block pipeline.""" 169 | 170 | pipeline = create_pipeline() 171 | pipeline.setup(indata=get_changefeed()) 172 | pipeline.start() 173 | return pipeline 174 | -------------------------------------------------------------------------------- /bigchaindb/pipelines/block.py: -------------------------------------------------------------------------------- 1 | """This module takes care of all the logic related to block creation. 2 | 3 | The logic is encapsulated in the ``Block`` class, while the sequence 4 | of actions to do on transactions is specified in the ``create_pipeline`` 5 | function. 6 | """ 7 | 8 | import logging 9 | 10 | import rethinkdb as r 11 | from multipipes import Pipeline, Node 12 | 13 | from bigchaindb.pipelines.utils import ChangeFeed 14 | from bigchaindb import Bigchain 15 | 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class Block: 21 | """This class encapsulates the logic to create blocks. 22 | 23 | Note: 24 | Methods of this class will be executed in different processes. 25 | """ 26 | 27 | def __init__(self): 28 | """Initialize the Block creator""" 29 | self.bigchain = Bigchain() 30 | self.txs = [] 31 | 32 | def filter_tx(self, tx): 33 | """Filter a transaction. 34 | 35 | Args: 36 | tx (dict): the transaction to process. 37 | 38 | Returns: 39 | The transaction if assigned to the current node, 40 | ``None`` otherwise. 41 | """ 42 | 43 | if tx['assignee'] == self.bigchain.me: 44 | tx.pop('assignee') 45 | tx.pop('assignment_timestamp') 46 | return tx 47 | 48 | def validate_tx(self, tx): 49 | """Validate a transaction. 50 | 51 | Also checks if the transaction already exists in the blockchain. If it 52 | does, or it's invalid, it's deleted from the backlog immediately. 53 | 54 | Args: 55 | tx (dict): the transaction to validate. 56 | 57 | Returns: 58 | The transaction if valid, ``None`` otherwise. 59 | """ 60 | if self.bigchain.transaction_exists(tx['id']): 61 | # if the transaction already exists, we must check whether 62 | # it's in a valid or undecided block 63 | tx, status = self.bigchain.get_transaction(tx['id'], 64 | include_status=True) 65 | if status == self.bigchain.TX_VALID \ 66 | or status == self.bigchain.TX_UNDECIDED: 67 | # if the tx is already in a valid or undecided block, 68 | # then it no longer should be in the backlog, or added 69 | # to a new block. We can delete and drop it. 70 | r.table('backlog').get(tx['id']) \ 71 | .delete(durability='hard') \ 72 | .run(self.bigchain.conn) 73 | return None 74 | 75 | tx_validated = self.bigchain.is_valid_transaction(tx) 76 | if tx_validated: 77 | return tx 78 | else: 79 | # if the transaction is not valid, remove it from the 80 | # backlog 81 | r.table('backlog').get(tx['id']) \ 82 | .delete(durability='hard') \ 83 | .run(self.bigchain.conn) 84 | return None 85 | 86 | def create(self, tx, timeout=False): 87 | """Create a block. 88 | 89 | This method accumulates transactions to put in a block and outputs 90 | a block when one of the following conditions is true: 91 | - the size limit of the block has been reached, or 92 | - a timeout happened. 93 | 94 | Args: 95 | tx (dict): the transaction to validate, might be None if 96 | a timeout happens. 97 | timeout (bool): ``True`` if a timeout happened 98 | (Default: ``False``). 99 | 100 | Returns: 101 | The block, if a block is ready, or ``None``. 102 | """ 103 | if tx: 104 | self.txs.append(tx) 105 | if len(self.txs) == 1000 or (timeout and self.txs): 106 | block = self.bigchain.create_block(self.txs) 107 | self.txs = [] 108 | return block 109 | 110 | def write(self, block): 111 | """Write the block to the Database. 112 | 113 | Args: 114 | block (dict): the block of transactions to write to the database. 115 | 116 | Returns: 117 | The block. 118 | """ 119 | logger.info('Write new block %s with %s transactions', 120 | block['id'], 121 | len(block['block']['transactions'])) 122 | self.bigchain.write_block(block) 123 | return block 124 | 125 | def delete_tx(self, block): 126 | """Delete transactions. 127 | 128 | Args: 129 | block (dict): the block containg the transactions to delete. 130 | 131 | Returns: 132 | The block. 133 | """ 134 | r.table('backlog')\ 135 | .get_all(*[tx['id'] for tx in block['block']['transactions']])\ 136 | .delete(durability='hard')\ 137 | .run(self.bigchain.conn) 138 | 139 | return block 140 | 141 | 142 | def initial(): 143 | """Return old transactions from the backlog.""" 144 | 145 | b = Bigchain() 146 | 147 | rs = r.table('backlog')\ 148 | .between([b.me, r.minval], 149 | [b.me, r.maxval], 150 | index='assignee__transaction_timestamp')\ 151 | .order_by(index=r.asc('assignee__transaction_timestamp'))\ 152 | .run(b.conn) 153 | return rs 154 | 155 | 156 | def get_changefeed(): 157 | """Create and return the changefeed for the backlog.""" 158 | 159 | return ChangeFeed('backlog', ChangeFeed.INSERT | ChangeFeed.UPDATE, 160 | prefeed=initial()) 161 | 162 | 163 | def create_pipeline(): 164 | """Create and return the pipeline of operations to be distributed 165 | on different processes.""" 166 | 167 | block = Block() 168 | 169 | block_pipeline = Pipeline([ 170 | Node(block.filter_tx), 171 | Node(block.validate_tx, fraction_of_cores=1), 172 | Node(block.create, timeout=1), 173 | Node(block.write), 174 | Node(block.delete_tx), 175 | ]) 176 | 177 | return block_pipeline 178 | 179 | 180 | def start(): 181 | """Create, start, and return the block pipeline.""" 182 | 183 | pipeline = create_pipeline() 184 | pipeline.setup(indata=get_changefeed()) 185 | pipeline.start() 186 | return pipeline 187 | 188 | -------------------------------------------------------------------------------- /bigchaindb/consensus.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | from bigchaindb import crypto, exceptions, util 4 | 5 | 6 | class AbstractConsensusRules(metaclass=ABCMeta): 7 | """Abstract base class for Bigchain plugins which implement consensus logic. 8 | 9 | A consensus plugin must expose a class inheriting from this one via an 10 | entry_point. 11 | 12 | All methods listed below must be implemented. 13 | """ 14 | 15 | @staticmethod 16 | @abstractmethod 17 | def validate_transaction(bigchain, transaction): 18 | """Validate a transaction. 19 | 20 | Args: 21 | bigchain (Bigchain): an instantiated ``bigchaindb.Bigchain`` object. 22 | transaction (dict): transaction to validate. 23 | 24 | Returns: 25 | The transaction if the transaction is valid else it raises an 26 | exception describing the reason why the transaction is invalid. 27 | 28 | Raises: 29 | Descriptive exceptions indicating the reason the transaction failed. 30 | See the `exceptions` module for bigchain-native error classes. 31 | """ 32 | 33 | @staticmethod 34 | @abstractmethod 35 | def validate_block(bigchain, block): 36 | """Validate a block. 37 | 38 | Args: 39 | bigchain (Bigchain): an instantiated ``bigchaindb.Bigchain`` object. 40 | block (dict): block to validate. 41 | 42 | Returns: 43 | The block if the block is valid else it raises an exception 44 | describing the reason why the block is invalid. 45 | 46 | Raises: 47 | Descriptive exceptions indicating the reason the block failed. 48 | See the `exceptions` module for bigchain-native error classes. 49 | """ 50 | 51 | @staticmethod 52 | @abstractmethod 53 | def create_transaction(*args, **kwargs): 54 | """Create a new transaction. 55 | 56 | Args: 57 | The signature of this method is left to plugin authors to decide. 58 | 59 | Returns: 60 | dict: newly constructed transaction. 61 | """ 62 | 63 | @staticmethod 64 | @abstractmethod 65 | def sign_transaction(transaction, *args, **kwargs): 66 | """Sign a transaction. 67 | 68 | Args: 69 | transaction (dict): transaction to sign. 70 | any other arguments are left to plugin authors to decide. 71 | 72 | Returns: 73 | dict: transaction with any signatures applied. 74 | """ 75 | 76 | @staticmethod 77 | @abstractmethod 78 | def validate_fulfillments(signed_transaction): 79 | """Validate the fulfillments of a transaction. 80 | 81 | Args: 82 | signed_transaction (dict): signed transaction to verify 83 | 84 | Returns: 85 | bool: True if the transaction's required fulfillments are present 86 | and correct, False otherwise. 87 | """ 88 | 89 | @abstractmethod 90 | def verify_vote_signature(block, signed_vote): 91 | """Verify a cast vote. 92 | 93 | Args: 94 | block (dict): block under election 95 | signed_vote (dict): signed vote to verify 96 | 97 | Returns: 98 | bool: True if the votes's required signature data is present 99 | and correct, False otherwise. 100 | """ 101 | 102 | 103 | class BaseConsensusRules(AbstractConsensusRules): 104 | """Base consensus rules for Bigchain. 105 | 106 | This class can be copied or overridden to write your own consensus rules! 107 | """ 108 | 109 | @staticmethod 110 | def validate_transaction(bigchain, transaction): 111 | """Validate a transaction. 112 | 113 | Args: 114 | bigchain (Bigchain): an instantiated bigchaindb.Bigchain object. 115 | transaction (dict): transaction to validate. 116 | 117 | Returns: 118 | The transaction if the transaction is valid else it raises an 119 | exception describing the reason why the transaction is invalid. 120 | 121 | Raises: 122 | OperationError: if the transaction operation is not supported 123 | TransactionDoesNotExist: if the input of the transaction is not found 124 | TransactionOwnerError: if the new transaction is using an input it doesn't own 125 | DoubleSpend: if the transaction is a double spend 126 | InvalidHash: if the hash of the transaction is wrong 127 | InvalidSignature: if the signature of the transaction is wrong 128 | """ 129 | 130 | # If the operation is CREATE the transaction should have no inputs and 131 | # should be signed by a federation node 132 | if transaction['transaction']['operation'] in ('CREATE', 'GENESIS'): 133 | # TODO: for now lets assume a CREATE transaction only has one fulfillment 134 | if transaction['transaction']['fulfillments'][0]['input']: 135 | raise ValueError('A CREATE operation has no inputs') 136 | # TODO: for now lets assume a CREATE transaction only has one owner_before 137 | if transaction['transaction']['fulfillments'][0]['owners_before'][0] not in ( 138 | bigchain.nodes_except_me + [bigchain.me]): 139 | raise exceptions.OperationError( 140 | 'Only federation nodes can use the operation `CREATE`') 141 | 142 | else: 143 | # check if the input exists, is owned by the owner_before 144 | if not transaction['transaction']['fulfillments']: 145 | raise ValueError('Transaction contains no fulfillments') 146 | 147 | # check inputs 148 | for fulfillment in transaction['transaction']['fulfillments']: 149 | if not fulfillment['input']: 150 | raise ValueError('Only `CREATE` transactions can have null inputs') 151 | tx_input = bigchain.get_transaction(fulfillment['input']['txid']) 152 | 153 | if not tx_input: 154 | raise exceptions.TransactionDoesNotExist( 155 | 'input `{}` does not exist in the bigchain'.format( 156 | fulfillment['input']['txid'])) 157 | # TODO: check if current owners own tx_input (maybe checked by InvalidSignature) 158 | # check if the input was already spent by a transaction other than 159 | # this one. 160 | spent = bigchain.get_spent(fulfillment['input']) 161 | if spent and spent['id'] != transaction['id']: 162 | raise exceptions.DoubleSpend( 163 | 'input `{}` was already spent'.format(fulfillment['input'])) 164 | 165 | # Check hash of the transaction 166 | calculated_hash = util.get_hash_data(transaction) 167 | if calculated_hash != transaction['id']: 168 | raise exceptions.InvalidHash() 169 | 170 | # Check fulfillments 171 | if not util.validate_fulfillments(transaction): 172 | raise exceptions.InvalidSignature() 173 | 174 | return transaction 175 | 176 | @staticmethod 177 | def validate_block(bigchain, block): 178 | """Validate a block. 179 | 180 | Args: 181 | bigchain (Bigchain): an instantiated bigchaindb.Bigchain object. 182 | block (dict): block to validate. 183 | 184 | Returns: 185 | The block if the block is valid else it raises an exception 186 | describing the reason why the block is invalid. 187 | 188 | Raises: 189 | InvalidHash: if the hash of the block is wrong. 190 | """ 191 | 192 | # Check if current hash is correct 193 | calculated_hash = crypto.hash_data(util.serialize(block['block'])) 194 | if calculated_hash != block['id']: 195 | raise exceptions.InvalidHash() 196 | 197 | # Check if the block was created by a federation node 198 | if block['block']['node_pubkey'] not in (bigchain.nodes_except_me + [bigchain.me]): 199 | raise exceptions.OperationError('Only federation nodes can create blocks') 200 | 201 | # Check if block signature is valid 202 | verifying_key = crypto.VerifyingKey(block['block']['node_pubkey']) 203 | if not verifying_key.verify(util.serialize(block['block']), block['signature']): 204 | raise exceptions.InvalidSignature('Invalid block signature') 205 | 206 | return block 207 | 208 | @staticmethod 209 | def create_transaction(owner_before, owner_after, tx_input, operation, 210 | payload=None): 211 | """Create a new transaction 212 | 213 | Refer to the documentation of ``bigchaindb.util.create_tx`` 214 | """ 215 | 216 | return util.create_tx(owner_before, owner_after, tx_input, operation, 217 | payload) 218 | 219 | @staticmethod 220 | def sign_transaction(transaction, private_key, bigchain=None): 221 | """Sign a transaction 222 | 223 | Refer to the documentation of ``bigchaindb.util.sign_tx`` 224 | """ 225 | 226 | return util.sign_tx(transaction, private_key, bigchain=bigchain) 227 | 228 | @staticmethod 229 | def validate_fulfillments(signed_transaction): 230 | """Validate the fulfillments of a transaction. 231 | 232 | Refer to the documentation of ``bigchaindb.util.validate_fulfillments`` 233 | """ 234 | 235 | return util.validate_fulfillments(signed_transaction) 236 | 237 | @staticmethod 238 | def verify_vote_signature(block, signed_vote): 239 | """Verify the signature of a vote. 240 | 241 | Refer to the documentation of ``bigchaindb.util.verify_signature`` 242 | """ 243 | 244 | return util.verify_vote_signature(block, signed_vote) 245 | -------------------------------------------------------------------------------- /bigchaindb/config_utils.py: -------------------------------------------------------------------------------- 1 | """Utils to configure BigchainDB. 2 | 3 | By calling `file_config`, the global configuration (stored in 4 | `$HOME/.bigchaindb`) will be updated with the values contained 5 | in the configuration file. 6 | 7 | Note that there is a precedence in reading configuration values: 8 | - local config file; 9 | - environment vars; 10 | - default config file (contained in ``bigchaindb.__init__``). 11 | """ 12 | 13 | 14 | import os 15 | import copy 16 | import json 17 | import logging 18 | import collections 19 | from functools import lru_cache 20 | 21 | from pkg_resources import iter_entry_points, ResolutionError 22 | 23 | import bigchaindb 24 | from bigchaindb.consensus import AbstractConsensusRules 25 | from bigchaindb import exceptions 26 | 27 | # TODO: move this to a proper configuration file for logging 28 | logging.getLogger('requests').setLevel(logging.WARNING) 29 | logger = logging.getLogger(__name__) 30 | 31 | CONFIG_DEFAULT_PATH = os.environ.setdefault( 32 | 'BIGCHAINDB_CONFIG_PATH', 33 | os.path.join(os.path.expanduser('~'), '.bigchaindb'), 34 | ) 35 | 36 | CONFIG_PREFIX = 'BIGCHAINDB' 37 | CONFIG_SEP = '_' 38 | 39 | 40 | def map_leafs(func, mapping): 41 | """Map a function to the leafs of a mapping.""" 42 | 43 | def _inner(mapping, path=None): 44 | if path is None: 45 | path = [] 46 | 47 | for key, val in mapping.items(): 48 | if isinstance(val, collections.Mapping): 49 | _inner(val, path + [key]) 50 | else: 51 | mapping[key] = func(val, path=path+[key]) 52 | 53 | return mapping 54 | 55 | return _inner(copy.deepcopy(mapping)) 56 | 57 | 58 | # Thanks Alex <3 59 | # http://stackoverflow.com/a/3233356/597097 60 | def update(d, u): 61 | """Recursively update a mapping (i.e. a dict, list, set, or tuple). 62 | 63 | Conceptually, d and u are two sets trees (with nodes and edges). 64 | This function goes through all the nodes of u. For each node in u, 65 | if d doesn't have that node yet, then this function adds the node from u, 66 | otherwise this function overwrites the node already in d with u's node. 67 | 68 | Args: 69 | d (mapping): The mapping to overwrite and add to. 70 | u (mapping): The mapping to read for changes. 71 | 72 | Returns: 73 | mapping: An updated version of d (updated by u). 74 | """ 75 | for k, v in u.items(): 76 | if isinstance(v, collections.Mapping): 77 | r = update(d.get(k, {}), v) 78 | d[k] = r 79 | else: 80 | d[k] = u[k] 81 | return d 82 | 83 | 84 | def file_config(filename=None): 85 | """Returns the config values found in a configuration file. 86 | 87 | Args: 88 | filename (str): the JSON file with the configuration values. 89 | If ``None``, CONFIG_DEFAULT_PATH will be used. 90 | 91 | Returns: 92 | dict: The config values in the specified config file (or the 93 | file at CONFIG_DEFAULT_PATH, if filename == None) 94 | """ 95 | logger.debug('On entry into file_config(), filename = {}'.format(filename)) 96 | 97 | if filename is None: 98 | filename = CONFIG_DEFAULT_PATH 99 | 100 | logger.debug('file_config() will try to open `{}`'.format(filename)) 101 | with open(filename) as f: 102 | try: 103 | config = json.load(f) 104 | except ValueError as err: 105 | raise exceptions.ConfigurationError( 106 | 'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err) 107 | ) 108 | 109 | logger.info('Configuration loaded from `{}`'.format(filename)) 110 | 111 | return config 112 | 113 | 114 | def env_config(config): 115 | """Return a new configuration with the values found in the environment. 116 | 117 | The function recursively iterates over the config, checking if there is 118 | a matching env variable. If an env variable is found, the func updates 119 | the configuration with that value. 120 | 121 | The name of the env variable is built combining a prefix (``BIGCHAINDB``) 122 | with the path to the value. If the ``config`` in input is: 123 | ``{'database': {'host': 'localhost'}}`` 124 | this function will try to read the env variable ``BIGCHAINDB_DATABASE_HOST``. 125 | """ 126 | 127 | def load_from_env(value, path): 128 | var_name = CONFIG_SEP.join([CONFIG_PREFIX] + list(map(lambda s: s.upper(), path))) 129 | return os.environ.get(var_name, value) 130 | 131 | return map_leafs(load_from_env, config) 132 | 133 | 134 | def update_types(config, reference, list_sep=':'): 135 | """Return a new configuration where all the values types 136 | are aligned with the ones in the default configuration""" 137 | 138 | def _coerce(current, value): 139 | # Coerce a value to the `current` type. 140 | try: 141 | # First we try to apply current to the value, since it 142 | # might be a function 143 | return current(value) 144 | except TypeError: 145 | # Then we check if current is a list AND if the value 146 | # is a string. 147 | if isinstance(current, list) and isinstance(value, str): 148 | # If so, we use the colon as the separator 149 | return value.split(list_sep) 150 | 151 | try: 152 | # If we are here, we should try to apply the type 153 | # of `current` to the value 154 | return type(current)(value) 155 | except TypeError: 156 | # Worst case scenario we return the value itself. 157 | return value 158 | 159 | def _update_type(value, path): 160 | current = reference 161 | 162 | for elem in path: 163 | try: 164 | current = current[elem] 165 | except KeyError: 166 | return value 167 | 168 | return _coerce(current, value) 169 | 170 | return map_leafs(_update_type, config) 171 | 172 | 173 | def set_config(config): 174 | """Set bigchaindb.config equal to the default config dict, 175 | then update that with whatever is in the provided config dict, 176 | and then set bigchaindb.config['CONFIGURED'] = True 177 | 178 | Args: 179 | config (dict): the config dict to read for changes 180 | to the default config 181 | 182 | Note: 183 | Any previous changes made to ``bigchaindb.config`` will be lost. 184 | """ 185 | # Deep copy the default config into bigchaindb.config 186 | bigchaindb.config = copy.deepcopy(bigchaindb._config) 187 | # Update the default config with whatever is in the passed config 188 | update(bigchaindb.config, update_types(config, bigchaindb.config)) 189 | bigchaindb.config['CONFIGURED'] = True 190 | 191 | 192 | def update_config(config): 193 | """Update bigchaindb.config with whatever is in the provided config dict, 194 | and then set bigchaindb.config['CONFIGURED'] = True 195 | 196 | Args: 197 | config (dict): the config dict to read for changes 198 | to the default config 199 | """ 200 | 201 | # Update the default config with whatever is in the passed config 202 | update(bigchaindb.config, update_types(config, bigchaindb.config)) 203 | bigchaindb.config['CONFIGURED'] = True 204 | 205 | 206 | def write_config(config, filename=None): 207 | """Write the provided configuration to a specific location. 208 | 209 | Args: 210 | config (dict): a dictionary with the configuration to load. 211 | filename (str): the name of the file that will store the new configuration. Defaults to ``None``. 212 | If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used. 213 | """ 214 | if not filename: 215 | filename = CONFIG_DEFAULT_PATH 216 | 217 | with open(filename, 'w') as f: 218 | json.dump(config, f, indent=4) 219 | 220 | 221 | def autoconfigure(filename=None, config=None, force=False): 222 | """Run ``file_config`` and ``env_config`` if the module has not 223 | been initialized.""" 224 | 225 | if not force and bigchaindb.config.get('CONFIGURED'): 226 | logger.debug('System already configured, skipping autoconfiguration') 227 | return 228 | 229 | # start with the current configuration 230 | newconfig = bigchaindb.config 231 | 232 | # update configuration from file 233 | try: 234 | newconfig = update(newconfig, file_config(filename=filename)) 235 | except FileNotFoundError as e: 236 | logger.warning('Cannot find config file `%s`.' % e.filename) 237 | 238 | # override configuration with env variables 239 | newconfig = env_config(newconfig) 240 | 241 | if config: 242 | newconfig = update(newconfig, config) 243 | 244 | set_config(newconfig) # sets bigchaindb.config 245 | 246 | 247 | @lru_cache() 248 | def load_consensus_plugin(name=None): 249 | """Find and load the chosen consensus plugin. 250 | 251 | Args: 252 | name (string): the name of the entry_point, as advertised in the 253 | setup.py of the providing package. 254 | 255 | Returns: 256 | an uninstantiated subclass of ``bigchaindb.consensus.AbstractConsensusRules`` 257 | """ 258 | if not name: 259 | name = bigchaindb.config.get('consensus_plugin', 'default') 260 | 261 | # TODO: This will return the first plugin with group `bigchaindb.consensus` 262 | # and name `name` in the active WorkingSet. 263 | # We should probably support Requirements specs in the config, e.g. 264 | # consensus_plugin: 'my-plugin-package==0.0.1;default' 265 | plugin = None 266 | for entry_point in iter_entry_points('bigchaindb.consensus', name): 267 | plugin = entry_point.load() 268 | 269 | # No matching entry_point found 270 | if not plugin: 271 | raise ResolutionError( 272 | 'No plugin found in group `bigchaindb.consensus` with name `{}`'. 273 | format(name)) 274 | 275 | # Is this strictness desireable? 276 | # It will probably reduce developer headaches in the wild. 277 | if not issubclass(plugin, (AbstractConsensusRules)): 278 | raise TypeError("object of type '{}' does not implement `bigchaindb." 279 | "consensus.AbstractConsensusRules`".format(type(plugin))) 280 | 281 | return plugin 282 | -------------------------------------------------------------------------------- /bigchaindb/commands/bigchain.py: -------------------------------------------------------------------------------- 1 | """Implementation of the `bigchaindb` command, 2 | which is one of the commands in the BigchainDB 3 | command-line interface. 4 | """ 5 | 6 | import os 7 | import sys 8 | import logging 9 | import argparse 10 | import copy 11 | import json 12 | import builtins 13 | 14 | import logstats 15 | 16 | import rethinkdb as r 17 | 18 | import bigchaindb 19 | import bigchaindb.config_utils 20 | from bigchaindb.util import ProcessGroup 21 | from bigchaindb.client import temp_client 22 | from bigchaindb import db 23 | from bigchaindb.exceptions import (StartupError, 24 | DatabaseAlreadyExists, 25 | KeypairNotFoundException) 26 | from bigchaindb.commands import utils 27 | from bigchaindb import processes 28 | from bigchaindb import crypto 29 | 30 | 31 | logging.basicConfig(level=logging.INFO) 32 | logger = logging.getLogger(__name__) 33 | 34 | 35 | # We need this because `input` always prints on stdout, while it should print 36 | # to stderr. It's a very old bug, check it out here: 37 | # - https://bugs.python.org/issue1927 38 | def input(prompt): 39 | print(prompt, end='', file=sys.stderr) 40 | return builtins.input() 41 | 42 | 43 | def run_show_config(args): 44 | """Show the current configuration""" 45 | # TODO Proposal: remove the "hidden" configuration. Only show config. If 46 | # the system needs to be configured, then display information on how to 47 | # configure the system. 48 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 49 | config = copy.deepcopy(bigchaindb.config) 50 | del config['CONFIGURED'] 51 | private_key = config['keypair']['private'] 52 | config['keypair']['private'] = 'x' * 45 if private_key else None 53 | print(json.dumps(config, indent=4, sort_keys=True)) 54 | 55 | 56 | def run_configure(args, skip_if_exists=False): 57 | """Run a script to configure the current node. 58 | 59 | Args: 60 | skip_if_exists (bool): skip the function if a config file already exists 61 | """ 62 | config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH 63 | 64 | config_file_exists = False 65 | # if the config path is `-` then it's stdout 66 | if config_path != '-': 67 | config_file_exists = os.path.exists(config_path) 68 | 69 | if config_file_exists and skip_if_exists: 70 | return 71 | 72 | if config_file_exists and not args.yes: 73 | want = input('Config file `{}` exists, do you want to override it? ' 74 | '(cannot be undone) [y/N]: '.format(config_path)) 75 | if want != 'y': 76 | return 77 | 78 | conf = copy.deepcopy(bigchaindb.config) 79 | 80 | # Patch the default configuration with the new values 81 | conf = bigchaindb.config_utils.update( 82 | conf, 83 | bigchaindb.config_utils.env_config(bigchaindb.config)) 84 | 85 | 86 | print('Generating keypair', file=sys.stderr) 87 | conf['keypair']['private'], conf['keypair']['public'] = \ 88 | crypto.generate_key_pair() 89 | 90 | if not args.yes: 91 | for key in ('bind', ): 92 | val = conf['server'][key] 93 | conf['server'][key] = \ 94 | input('API Server {}? (default `{}`): '.format(key, val)) \ 95 | or val 96 | 97 | for key in ('host', 'port', 'name'): 98 | val = conf['database'][key] 99 | conf['database'][key] = \ 100 | input('Database {}? (default `{}`): '.format(key, val)) \ 101 | or val 102 | 103 | for key in ('host', 'port', 'rate'): 104 | val = conf['statsd'][key] 105 | conf['statsd'][key] = \ 106 | input('Statsd {}? (default `{}`): '.format(key, val)) \ 107 | or val 108 | 109 | val = conf['backlog_reassign_delay'] 110 | conf['backlog_reassign_delay'] = \ 111 | input('Stale transaction reassignment delay (in seconds)? (default `{}`): '.format(val)) \ 112 | or val 113 | 114 | if config_path != '-': 115 | bigchaindb.config_utils.write_config(conf, config_path) 116 | else: 117 | print(json.dumps(conf, indent=4, sort_keys=True)) 118 | print('Configuration written to {}'.format(config_path), file=sys.stderr) 119 | print('Ready to go!', file=sys.stderr) 120 | 121 | 122 | def run_export_my_pubkey(args): 123 | """Export this node's public key to standard output 124 | """ 125 | logger.debug('bigchaindb args = {}'.format(args)) 126 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 127 | pubkey = bigchaindb.config['keypair']['public'] 128 | if pubkey is not None: 129 | print(pubkey) 130 | else: 131 | sys.exit("This node's public key wasn't set anywhere " 132 | "so it can't be exported") 133 | # raises SystemExit exception 134 | # message is sent to stderr 135 | # exits with exit code 1 (signals tha an error happened) 136 | 137 | 138 | def run_init(args): 139 | """Initialize the database""" 140 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 141 | # TODO Provide mechanism to: 142 | # 1. prompt the user to inquire whether they wish to drop the db 143 | # 2. force the init, (e.g., via -f flag) 144 | try: 145 | db.init() 146 | except DatabaseAlreadyExists: 147 | print('The database already exists.', file=sys.stderr) 148 | print('If you wish to re-initialize it, first drop it.', file=sys.stderr) 149 | 150 | 151 | def run_drop(args): 152 | """Drop the database""" 153 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 154 | db.drop(assume_yes=args.yes) 155 | 156 | 157 | def run_start(args): 158 | """Start the processes to run the node""" 159 | logger.info('BigchainDB Version {}'.format(bigchaindb.__version__)) 160 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 161 | 162 | if args.start_rethinkdb: 163 | try: 164 | proc = utils.start_rethinkdb() 165 | except StartupError as e: 166 | sys.exit('Error starting RethinkDB, reason is: {}'.format(e)) 167 | logger.info('RethinkDB started with PID %s' % proc.pid) 168 | 169 | try: 170 | db.init() 171 | except DatabaseAlreadyExists: 172 | pass 173 | except KeypairNotFoundException: 174 | sys.exit("Can't start BigchainDB, no keypair found. " 175 | 'Did you run `bigchaindb configure`?') 176 | 177 | logger.info('Starting BigchainDB main process') 178 | processes.start() 179 | 180 | 181 | def _run_load(tx_left, stats): 182 | logstats.thread.start(stats) 183 | client = temp_client() 184 | 185 | while True: 186 | tx = client.create() 187 | 188 | stats['transactions'] += 1 189 | 190 | if tx_left is not None: 191 | tx_left -= 1 192 | if tx_left == 0: 193 | break 194 | 195 | 196 | def run_load(args): 197 | bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) 198 | logger.info('Starting %s processes', args.multiprocess) 199 | stats = logstats.Logstats() 200 | logstats.thread.start(stats) 201 | 202 | tx_left = None 203 | if args.count > 0: 204 | tx_left = int(args.count / args.multiprocess) 205 | 206 | workers = ProcessGroup(concurrency=args.multiprocess, 207 | target=_run_load, 208 | args=(tx_left, stats.get_child())) 209 | workers.start() 210 | 211 | 212 | def run_set_shards(args): 213 | b = bigchaindb.Bigchain() 214 | for table in ['bigchain', 'backlog', 'votes']: 215 | # See https://www.rethinkdb.com/api/python/config/ 216 | table_config = r.table(table).config().run(b.conn) 217 | num_replicas = len(table_config['shards'][0]['replicas']) 218 | try: 219 | r.table(table).reconfigure(shards=args.num_shards, replicas=num_replicas).run(b.conn) 220 | except r.ReqlOpFailedError as e: 221 | logger.warn(e) 222 | 223 | 224 | def run_set_replicas(args): 225 | b = bigchaindb.Bigchain() 226 | for table in ['bigchain', 'backlog', 'votes']: 227 | # See https://www.rethinkdb.com/api/python/config/ 228 | table_config = r.table(table).config().run(b.conn) 229 | num_shards = len(table_config['shards']) 230 | try: 231 | r.table(table).reconfigure(shards=num_shards, replicas=args.num_replicas).run(b.conn) 232 | except r.ReqlOpFailedError as e: 233 | logger.warn(e) 234 | 235 | 236 | def main(): 237 | parser = argparse.ArgumentParser( 238 | description='Control your BigchainDB node.', 239 | parents=[utils.base_parser]) 240 | 241 | parser.add_argument('--experimental-start-rethinkdb', 242 | dest='start_rethinkdb', 243 | action='store_true', 244 | help='Run RethinkDB on start') 245 | 246 | # all the commands are contained in the subparsers object, 247 | # the command selected by the user will be stored in `args.command` 248 | # that is used by the `main` function to select which other 249 | # function to call. 250 | subparsers = parser.add_subparsers(title='Commands', 251 | dest='command') 252 | 253 | # parser for writing a config file 254 | subparsers.add_parser('configure', 255 | help='Prepare the config file ' 256 | 'and create the node keypair') 257 | 258 | # parsers for showing/exporting config values 259 | subparsers.add_parser('show-config', 260 | help='Show the current configuration') 261 | 262 | subparsers.add_parser('export-my-pubkey', 263 | help="Export this node's public key") 264 | 265 | # parser for database-level commands 266 | subparsers.add_parser('init', 267 | help='Init the database') 268 | 269 | subparsers.add_parser('drop', 270 | help='Drop the database') 271 | 272 | # parser for starting BigchainDB 273 | subparsers.add_parser('start', 274 | help='Start BigchainDB') 275 | 276 | # parser for configuring the number of shards 277 | sharding_parser = subparsers.add_parser('set-shards', 278 | help='Configure number of shards') 279 | 280 | sharding_parser.add_argument('num_shards', metavar='num_shards', 281 | type=int, default=1, 282 | help='Number of shards') 283 | 284 | # parser for configuring the number of replicas 285 | replicas_parser = subparsers.add_parser('set-replicas', 286 | help='Configure number of replicas') 287 | 288 | replicas_parser.add_argument('num_replicas', metavar='num_replicas', 289 | type=int, default=1, 290 | help='Number of replicas (i.e. the replication factor)') 291 | 292 | load_parser = subparsers.add_parser('load', 293 | help='Write transactions to the backlog') 294 | 295 | load_parser.add_argument('-m', '--multiprocess', 296 | nargs='?', 297 | type=int, 298 | default=False, 299 | help='Spawn multiple processes to run the command, ' 300 | 'if no value is provided, the number of processes ' 301 | 'is equal to the number of cores of the host machine') 302 | 303 | load_parser.add_argument('-c', '--count', 304 | default=0, 305 | type=int, 306 | help='Number of transactions to push. If the parameter -m ' 307 | 'is set, the count is distributed equally to all the ' 308 | 'processes') 309 | 310 | utils.start(parser, globals()) 311 | 312 | 313 | if __name__ == '__main__': 314 | main() 315 | -------------------------------------------------------------------------------- /bigchaindb/util.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import time 3 | import contextlib 4 | import threading 5 | import queue 6 | import multiprocessing as mp 7 | import uuid 8 | 9 | import rapidjson 10 | 11 | import cryptoconditions as cc 12 | from cryptoconditions.exceptions import ParsingError 13 | 14 | import bigchaindb 15 | from bigchaindb import exceptions 16 | from bigchaindb import crypto 17 | 18 | 19 | class ProcessGroup(object): 20 | 21 | def __init__(self, concurrency=None, group=None, target=None, name=None, 22 | args=None, kwargs=None, daemon=None): 23 | self.concurrency = concurrency or mp.cpu_count() 24 | self.group = group 25 | self.target = target 26 | self.name = name 27 | self.args = args or () 28 | self.kwargs = kwargs or {} 29 | self.daemon = daemon 30 | self.processes = [] 31 | 32 | def start(self): 33 | for i in range(self.concurrency): 34 | proc = mp.Process(group=self.group, target=self.target, 35 | name=self.name, args=self.args, 36 | kwargs=self.kwargs, daemon=self.daemon) 37 | proc.start() 38 | self.processes.append(proc) 39 | 40 | 41 | # Inspired by: 42 | # - http://stackoverflow.com/a/24741694/597097 43 | def pool(builder, size, timeout=None): 44 | """Create a pool that imposes a limit on the number of stored 45 | instances. 46 | 47 | Args: 48 | builder: a function to build an instance. 49 | size: the size of the pool. 50 | timeout(Optional[float]): the seconds to wait before raising 51 | a ``queue.Empty`` exception if no instances are available 52 | within that time. 53 | Raises: 54 | If ``timeout`` is defined but the request is taking longer 55 | than the specified time, the context manager will raise 56 | a ``queue.Empty`` exception. 57 | 58 | Returns: 59 | A context manager that can be used with the ``with`` 60 | statement. 61 | 62 | """ 63 | 64 | lock = threading.Lock() 65 | local_pool = queue.Queue() 66 | current_size = 0 67 | 68 | @contextlib.contextmanager 69 | def pooled(): 70 | nonlocal current_size 71 | instance = None 72 | 73 | # If we still have free slots, then we have room to create new 74 | # instances. 75 | if current_size < size: 76 | with lock: 77 | # We need to check again if we have slots available, since 78 | # the situation might be different after acquiring the lock 79 | if current_size < size: 80 | current_size += 1 81 | instance = builder() 82 | 83 | # Watchout: current_size can be equal to size if the previous part of 84 | # the function has been executed, that's why we need to check if the 85 | # instance is None. 86 | if instance is None: 87 | instance = local_pool.get(timeout=timeout) 88 | 89 | yield instance 90 | 91 | local_pool.put(instance) 92 | 93 | return pooled 94 | 95 | 96 | def serialize(data): 97 | """Serialize a dict into a JSON formatted string. 98 | 99 | This function enforces rules like the separator and order of keys. This ensures that all dicts 100 | are serialized in the same way. 101 | 102 | This is specially important for hashing data. We need to make sure that everyone serializes their data 103 | in the same way so that we do not have hash mismatches for the same structure due to serialization 104 | differences. 105 | 106 | Args: 107 | data (dict): dict to serialize 108 | 109 | Returns: 110 | str: JSON formatted string 111 | 112 | """ 113 | return rapidjson.dumps(data, skipkeys=False, ensure_ascii=False, sort_keys=True) 114 | 115 | 116 | def deserialize(data): 117 | """Deserialize a JSON formatted string into a dict. 118 | 119 | Args: 120 | data (str): JSON formatted string. 121 | 122 | Returns: 123 | dict: dict resulting from the serialization of a JSON formatted string. 124 | """ 125 | 126 | return rapidjson.loads(data) 127 | 128 | 129 | def timestamp(): 130 | """The Unix time, rounded to the nearest second. 131 | See https://en.wikipedia.org/wiki/Unix_time 132 | 133 | Returns: 134 | str: the Unix time 135 | """ 136 | return str(round(time.time())) 137 | 138 | 139 | # TODO: Consider remove the operation (if there are no inputs CREATE else TRANSFER) 140 | def create_tx(owners_before, owners_after, inputs, operation, payload=None): 141 | """Create a new transaction 142 | 143 | A transaction in the bigchain is a transfer of a digital asset between two entities represented 144 | by public keys. 145 | 146 | Currently the bigchain supports two types of operations: 147 | 148 | `CREATE` - Only federation nodes are allowed to use this operation. In a create operation 149 | a federation node creates a digital asset in the bigchain and assigns that asset to a public 150 | key. The owner of the private key can then decided to transfer this digital asset by using the 151 | `transaction id` of the transaction as an input in a `TRANSFER` transaction. 152 | 153 | `TRANSFER` - A transfer operation allows for a transfer of the digital assets between entities. 154 | 155 | Args: 156 | owners_before (list): base58 encoded public key of the current owners of the asset. 157 | owners_after (list): base58 encoded public key of the new owners of the digital asset. 158 | inputs (list): id of the transaction to use as input. 159 | operation (str): Either `CREATE` or `TRANSFER` operation. 160 | payload (Optional[dict]): dictionary with information about asset. 161 | 162 | Returns: 163 | dict: unsigned transaction. 164 | 165 | 166 | Raises: 167 | TypeError: if the optional ``payload`` argument is not a ``dict``. 168 | 169 | Reference: 170 | { 171 | "id": "", 172 | "transaction": { 173 | "version": "transaction version number", 174 | "fulfillments": [ 175 | { 176 | "owners_before": ["list of "], 177 | "input": { 178 | "txid": "", 179 | "cid": "condition index" 180 | }, 181 | "fulfillment": "fulfillement of condition cid", 182 | "fid": "fulfillment index" 183 | } 184 | ], 185 | "conditions": [ 186 | { 187 | "owners_after": ["list of "], 188 | "condition": "condition to be met", 189 | "cid": "condition index (1-to-1 mapping with fid)" 190 | } 191 | ], 192 | "operation": "", 193 | "timestamp": "", 194 | "data": { 195 | "hash": "", 196 | "payload": { 197 | "title": "The Winds of Plast", 198 | "creator": "Johnathan Plunkett", 199 | "IPFS_key": "QmfQ5QAjvg4GtA3wg3adpnDJug8ktA1BxurVqBD8rtgVjP" 200 | } 201 | } 202 | }, 203 | } 204 | """ 205 | # validate arguments (owners and inputs should be lists or None) 206 | 207 | # The None case appears on fulfilling a hashlock 208 | if owners_before is None: 209 | owners_before = [] 210 | if not isinstance(owners_before, list): 211 | owners_before = [owners_before] 212 | 213 | # The None case appears on assigning a hashlock 214 | if owners_after is None: 215 | owners_after = [] 216 | if not isinstance(owners_after, list): 217 | owners_after = [owners_after] 218 | 219 | if not isinstance(inputs, list): 220 | inputs = [inputs] 221 | 222 | # handle payload 223 | if payload is not None and not isinstance(payload, dict): 224 | raise TypeError('`payload` must be an dict instance or None') 225 | 226 | data = { 227 | 'uuid': str(uuid.uuid4()), 228 | 'payload': payload 229 | } 230 | 231 | # handle inputs 232 | fulfillments = [] 233 | 234 | # transfer 235 | if inputs: 236 | for fid, tx_input in enumerate(inputs): 237 | fulfillments.append({ 238 | 'owners_before': owners_before, 239 | 'input': tx_input, 240 | 'fulfillment': None, 241 | 'fid': fid 242 | }) 243 | # create 244 | else: 245 | fulfillments.append({ 246 | 'owners_before': owners_before, 247 | 'input': None, 248 | 'fulfillment': None, 249 | 'fid': 0 250 | }) 251 | 252 | # handle outputs 253 | conditions = [] 254 | for fulfillment in fulfillments: 255 | 256 | # threshold condition 257 | if len(owners_after) > 1: 258 | condition = cc.ThresholdSha256Fulfillment(threshold=len(owners_after)) 259 | for owner_after in owners_after: 260 | condition.add_subfulfillment(cc.Ed25519Fulfillment(public_key=owner_after)) 261 | 262 | # simple signature condition 263 | elif len(owners_after) == 1: 264 | condition = cc.Ed25519Fulfillment(public_key=owners_after[0]) 265 | 266 | # to be added later (hashlock conditions) 267 | else: 268 | condition = None 269 | 270 | if condition: 271 | conditions.append({ 272 | 'owners_after': owners_after, 273 | 'condition': { 274 | 'details': condition.to_dict(), 275 | 'uri': condition.condition_uri 276 | }, 277 | 'cid': fulfillment['fid'] 278 | }) 279 | 280 | tx = { 281 | 'version': 1, 282 | 'fulfillments': fulfillments, 283 | 'conditions': conditions, 284 | 'operation': operation, 285 | 'timestamp': timestamp(), 286 | 'data': data 287 | } 288 | 289 | # serialize and convert to bytes 290 | tx_hash = get_hash_data(tx) 291 | 292 | # create the transaction 293 | transaction = { 294 | 'id': tx_hash, 295 | 'transaction': tx 296 | } 297 | 298 | return transaction 299 | 300 | 301 | def sign_tx(transaction, signing_keys, bigchain=None): 302 | """Sign a transaction 303 | 304 | A transaction signed with the `owner_before` corresponding private key. 305 | 306 | Args: 307 | transaction (dict): transaction to sign. 308 | signing_keys (list): list of base58 encoded private keys to create the fulfillments of the transaction. 309 | bigchain (obj): bigchain instance used to get the details of the previous transaction outputs. Useful 310 | if the `Bigchain` instance was instantiated with parameters that override the config file. 311 | 312 | Returns: 313 | dict: transaction with the `fulfillment` fields populated. 314 | 315 | """ 316 | # validate sk 317 | if not isinstance(signing_keys, list): 318 | signing_keys = [signing_keys] 319 | 320 | # create a mapping between sk and vk so that we can match the private key to the owners_before 321 | key_pairs = {} 322 | for sk in signing_keys: 323 | signing_key = crypto.SigningKey(sk) 324 | vk = signing_key.get_verifying_key().to_ascii().decode() 325 | key_pairs[vk] = signing_key 326 | 327 | tx = copy.deepcopy(transaction) 328 | 329 | bigchain = bigchain if bigchain is not None else bigchaindb.Bigchain() 330 | 331 | for fulfillment in tx['transaction']['fulfillments']: 332 | fulfillment_message = get_fulfillment_message(transaction, fulfillment) 333 | # TODO: avoid instantiation, pass as argument! 334 | input_condition = get_input_condition(bigchain, fulfillment) 335 | parsed_fulfillment = cc.Fulfillment.from_dict(input_condition['condition']['details']) 336 | # for the case in which the type of fulfillment is not covered by this method 337 | parsed_fulfillment_signed = parsed_fulfillment 338 | 339 | # single current owner 340 | if isinstance(parsed_fulfillment, cc.Ed25519Fulfillment): 341 | parsed_fulfillment_signed = fulfill_simple_signature_fulfillment(fulfillment, 342 | parsed_fulfillment, 343 | fulfillment_message, 344 | key_pairs) 345 | # multiple current owners 346 | elif isinstance(parsed_fulfillment, cc.ThresholdSha256Fulfillment): 347 | parsed_fulfillment_signed = fulfill_threshold_signature_fulfillment(fulfillment, 348 | parsed_fulfillment, 349 | fulfillment_message, 350 | key_pairs) 351 | 352 | signed_fulfillment = parsed_fulfillment_signed.serialize_uri() 353 | fulfillment.update({'fulfillment': signed_fulfillment}) 354 | 355 | return tx 356 | 357 | 358 | def fulfill_simple_signature_fulfillment(fulfillment, parsed_fulfillment, fulfillment_message, key_pairs): 359 | """Fulfill a cryptoconditions.Ed25519Fulfillment 360 | 361 | Args: 362 | fulfillment (dict): BigchainDB fulfillment to fulfill. 363 | parsed_fulfillment (cryptoconditions.Ed25519Fulfillment): cryptoconditions.Ed25519Fulfillment instance. 364 | fulfillment_message (dict): message to sign. 365 | key_pairs (dict): dictionary of (public_key, private_key) pairs. 366 | 367 | Returns: 368 | object: fulfilled cryptoconditions.Ed25519Fulfillment 369 | 370 | """ 371 | owner_before = fulfillment['owners_before'][0] 372 | 373 | try: 374 | parsed_fulfillment.sign(serialize(fulfillment_message), key_pairs[owner_before]) 375 | except KeyError: 376 | raise exceptions.KeypairMismatchException('Public key {} is not a pair to any of the private keys' 377 | .format(owner_before)) 378 | 379 | return parsed_fulfillment 380 | 381 | 382 | def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, fulfillment_message, key_pairs): 383 | """Fulfill a cryptoconditions.ThresholdSha256Fulfillment 384 | 385 | Args: 386 | fulfillment (dict): BigchainDB fulfillment to fulfill. 387 | parsed_fulfillment (cryptoconditions.ThresholdSha256Fulfillment): cryptoconditions.ThresholdSha256Fulfillment instance. 388 | fulfillment_message (dict): message to sign. 389 | key_pairs (dict): dictionary of (public_key, private_key) pairs. 390 | 391 | Returns: 392 | object: fulfilled cryptoconditions.ThresholdSha256Fulfillment 393 | 394 | """ 395 | parsed_fulfillment_copy = copy.deepcopy(parsed_fulfillment) 396 | parsed_fulfillment.subconditions = [] 397 | 398 | for owner_before in fulfillment['owners_before']: 399 | try: 400 | subfulfillment = parsed_fulfillment_copy.get_subcondition_from_vk(owner_before)[0] 401 | except IndexError: 402 | raise exceptions.KeypairMismatchException( 403 | 'Public key {} cannot be found in the fulfillment'.format(owner_before)) 404 | try: 405 | private_key = key_pairs[owner_before] 406 | except KeyError: 407 | raise exceptions.KeypairMismatchException( 408 | 'Public key {} is not a pair to any of the private keys'.format(owner_before)) 409 | 410 | subfulfillment.sign(serialize(fulfillment_message), private_key) 411 | parsed_fulfillment.add_subfulfillment(subfulfillment) 412 | 413 | return parsed_fulfillment 414 | 415 | 416 | def create_and_sign_tx(private_key, owner_before, owner_after, tx_input, operation='TRANSFER', payload=None): 417 | tx = create_tx(owner_before, owner_after, tx_input, operation, payload) 418 | return sign_tx(tx, private_key) 419 | 420 | 421 | def check_hash_and_signature(transaction): 422 | # Check hash of the transaction 423 | calculated_hash = get_hash_data(transaction) 424 | if calculated_hash != transaction['id']: 425 | raise exceptions.InvalidHash() 426 | 427 | # Check signature 428 | if not validate_fulfillments(transaction): 429 | raise exceptions.InvalidSignature() 430 | 431 | 432 | def validate_fulfillments(signed_transaction): 433 | """Verify the signature of a transaction 434 | 435 | A valid transaction should have been signed `owner_before` corresponding private key. 436 | 437 | Args: 438 | signed_transaction (dict): a transaction with the `signature` included. 439 | 440 | Returns: 441 | bool: True if the signature is correct, False otherwise. 442 | """ 443 | for fulfillment in signed_transaction['transaction']['fulfillments']: 444 | fulfillment_message = get_fulfillment_message(signed_transaction, fulfillment) 445 | try: 446 | parsed_fulfillment = cc.Fulfillment.from_uri(fulfillment['fulfillment']) 447 | except (TypeError, ValueError, ParsingError): 448 | return False 449 | 450 | # TODO: might already break on a False here 451 | is_valid = parsed_fulfillment.validate(message=serialize(fulfillment_message), 452 | now=timestamp()) 453 | 454 | # if transaction has an input (i.e. not a `CREATE` transaction) 455 | # TODO: avoid instantiation, pass as argument! 456 | bigchain = bigchaindb.Bigchain() 457 | input_condition = get_input_condition(bigchain, fulfillment) 458 | is_valid = is_valid and parsed_fulfillment.condition_uri == input_condition['condition']['uri'] 459 | 460 | if not is_valid: 461 | return False 462 | 463 | return True 464 | 465 | 466 | def get_fulfillment_message(transaction, fulfillment, serialized=False): 467 | """Get the fulfillment message for signing a specific fulfillment in a transaction 468 | 469 | Args: 470 | transaction (dict): a transaction 471 | fulfillment (dict): a specific fulfillment (for a condition index) within the transaction 472 | serialized (Optional[bool]): False returns a dict, True returns a serialized string 473 | 474 | Returns: 475 | str|dict: fulfillment message 476 | """ 477 | # data to sign contains common transaction data 478 | fulfillment_message = { 479 | 'operation': transaction['transaction']['operation'], 480 | 'timestamp': transaction['transaction']['timestamp'], 481 | 'data': transaction['transaction']['data'], 482 | 'version': transaction['transaction']['version'], 483 | 'id': transaction['id'] 484 | } 485 | # and the condition which needs to be retrieved from the output of a previous transaction 486 | # or created on the fly it this is a `CREATE` transaction 487 | fulfillment_message.update({ 488 | 'fulfillment': copy.deepcopy(fulfillment), 489 | 'condition': transaction['transaction']['conditions'][fulfillment['fid']] 490 | }) 491 | 492 | # remove any fulfillment, as a fulfillment cannot sign itself 493 | fulfillment_message['fulfillment']['fulfillment'] = None 494 | 495 | if serialized: 496 | return serialize(fulfillment_message) 497 | return fulfillment_message 498 | 499 | 500 | def get_input_condition(bigchain, fulfillment): 501 | """ 502 | 503 | Args: 504 | bigchain: 505 | fulfillment: 506 | Returns: 507 | """ 508 | input_tx = fulfillment['input'] 509 | # if `TRANSFER` transaction 510 | if input_tx: 511 | # get previous condition 512 | previous_tx = bigchain.get_transaction(input_tx['txid']) 513 | conditions = sorted(previous_tx['transaction']['conditions'], key=lambda d: d['cid']) 514 | return conditions[input_tx['cid']] 515 | 516 | # if `CREATE` transaction 517 | # there is no previous transaction so we need to create one on the fly 518 | else: 519 | owner_before = fulfillment['owners_before'][0] 520 | condition = cc.Ed25519Fulfillment(public_key=owner_before) 521 | 522 | return { 523 | 'condition': { 524 | 'details': condition.to_dict(), 525 | 'uri': condition.condition_uri 526 | } 527 | } 528 | 529 | 530 | def condition_details_has_owner(condition_details, owner): 531 | """ 532 | 533 | Check if the public_key of owner is in the condition details 534 | as an Ed25519Fulfillment.public_key 535 | 536 | Args: 537 | condition_details (dict): dict with condition details 538 | owner (str): base58 public key of owner 539 | 540 | Returns: 541 | bool: True if the public key is found in the condition details, False otherwise 542 | 543 | """ 544 | if 'subfulfillments' in condition_details: 545 | result = condition_details_has_owner(condition_details['subfulfillments'], owner) 546 | if result: 547 | return True 548 | 549 | elif isinstance(condition_details, list): 550 | for subcondition in condition_details: 551 | result = condition_details_has_owner(subcondition, owner) 552 | if result: 553 | return True 554 | else: 555 | if 'public_key' in condition_details \ 556 | and owner == condition_details['public_key']: 557 | return True 558 | return False 559 | 560 | 561 | def get_hash_data(transaction): 562 | """ Get the hashed data that (should) correspond to the `transaction['id']` 563 | 564 | Args: 565 | transaction (dict): the transaction to be hashed 566 | 567 | Returns: 568 | str: the hash of the transaction 569 | """ 570 | tx = copy.deepcopy(transaction) 571 | if 'transaction' in tx: 572 | tx = tx['transaction'] 573 | 574 | # remove the fulfillment messages (signatures) 575 | for fulfillment in tx['fulfillments']: 576 | fulfillment['fulfillment'] = None 577 | 578 | return crypto.hash_data(serialize(tx)) 579 | 580 | 581 | def verify_vote_signature(block, signed_vote): 582 | """Verify the signature of a vote 583 | 584 | A valid vote should have been signed `owner_before` corresponding private key. 585 | 586 | Args: 587 | block (dict): block under election 588 | signed_vote (dict): a vote with the `signature` included. 589 | 590 | Returns: 591 | bool: True if the signature is correct, False otherwise. 592 | """ 593 | 594 | signature = signed_vote['signature'] 595 | vk_base58 = signed_vote['node_pubkey'] 596 | 597 | # immediately return False if the voter is not in the block voter list 598 | if vk_base58 not in block['block']['voters']: 599 | return False 600 | 601 | public_key = crypto.VerifyingKey(vk_base58) 602 | return public_key.verify(serialize(signed_vote['vote']), signature) 603 | 604 | 605 | def transform_create(tx): 606 | """Change the owner and signature for a ``CREATE`` transaction created by a node""" 607 | 608 | # XXX: the next instruction opens a new connection to the DB, consider using a singleton or a global 609 | # if you need a Bigchain instance. 610 | b = bigchaindb.Bigchain() 611 | transaction = tx['transaction'] 612 | payload = None 613 | if transaction['data'] and 'payload' in transaction['data']: 614 | payload = transaction['data']['payload'] 615 | new_tx = create_tx(b.me, transaction['fulfillments'][0]['owners_before'], None, 'CREATE', payload=payload) 616 | return new_tx 617 | 618 | 619 | def is_genesis_block(block): 620 | """Check if the block is the genesis block. 621 | 622 | Args: 623 | block (dict): the block to check 624 | 625 | Returns: 626 | bool: True if the block is the genesis block, False otherwise. 627 | """ 628 | 629 | # we cannot have empty blocks, there will always be at least one 630 | # element in the list so we can safely refer to it 631 | return block['block']['transactions'][0]['transaction']['operation'] == 'GENESIS' 632 | 633 | -------------------------------------------------------------------------------- /bigchaindb/core.py: -------------------------------------------------------------------------------- 1 | import random 2 | import math 3 | import collections 4 | from copy import deepcopy 5 | from time import time 6 | 7 | from itertools import compress 8 | import rethinkdb as r 9 | import rapidjson 10 | 11 | import bigchaindb 12 | from bigchaindb import config_utils, crypto, exceptions, util 13 | 14 | 15 | class Bigchain(object): 16 | """Bigchain API 17 | 18 | Create, read, sign, write transactions to the database 19 | """ 20 | 21 | # return if a block has been voted invalid 22 | BLOCK_INVALID = 'invalid' 23 | # return if a block is valid, or tx is in valid block 24 | BLOCK_VALID = TX_VALID = 'valid' 25 | # return if block is undecided, or tx is in undecided block 26 | BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided' 27 | # return if transaction is in backlog 28 | TX_IN_BACKLOG = 'backlog' 29 | 30 | def __init__(self, host=None, port=None, dbname=None, 31 | public_key=None, private_key=None, keyring=[], 32 | consensus_plugin=None, backlog_reassign_delay=None): 33 | """Initialize the Bigchain instance 34 | 35 | A Bigchain instance has several configuration parameters (e.g. host). 36 | If a parameter value is passed as an argument to the Bigchain 37 | __init__ method, then that is the value it will have. 38 | Otherwise, the parameter value will come from an environment variable. 39 | If that environment variable isn't set, then the value 40 | will come from the local configuration file. And if that variable 41 | isn't in the local configuration file, then the parameter will have 42 | its default value (defined in bigchaindb.__init__). 43 | 44 | Args: 45 | host (str): hostname where RethinkDB is running. 46 | port (int): port in which RethinkDB is running (usually 28015). 47 | dbname (str): the name of the database to connect to (usually bigchain). 48 | public_key (str): the base58 encoded public key for the ED25519 curve. 49 | private_key (str): the base58 encoded private key for the ED25519 curve. 50 | keyring (list[str]): list of base58 encoded public keys of the federation nodes. 51 | """ 52 | 53 | config_utils.autoconfigure() 54 | self.host = host or bigchaindb.config['database']['host'] 55 | self.port = port or bigchaindb.config['database']['port'] 56 | self.dbname = dbname or bigchaindb.config['database']['name'] 57 | self.me = public_key or bigchaindb.config['keypair']['public'] 58 | self.me_private = private_key or bigchaindb.config['keypair']['private'] 59 | self.nodes_except_me = keyring or bigchaindb.config['keyring'] 60 | self.backlog_reassign_delay = backlog_reassign_delay or bigchaindb.config['backlog_reassign_delay'] 61 | self.consensus = config_utils.load_consensus_plugin(consensus_plugin) 62 | # change RethinkDB read mode to majority. This ensures consistency in query results 63 | self.read_mode = 'majority' 64 | 65 | if not self.me or not self.me_private: 66 | raise exceptions.KeypairNotFoundException() 67 | 68 | self._conn = None 69 | 70 | @property 71 | def conn(self): 72 | if not self._conn: 73 | self._conn = self.reconnect() 74 | return self._conn 75 | 76 | def reconnect(self): 77 | return r.connect(host=self.host, port=self.port, db=self.dbname) 78 | 79 | def create_transaction(self, *args, **kwargs): 80 | """Create a new transaction 81 | 82 | Refer to the documentation of your consensus plugin. 83 | 84 | Returns: 85 | dict: newly constructed transaction. 86 | """ 87 | 88 | return self.consensus.create_transaction(*args, **kwargs) 89 | 90 | def sign_transaction(self, transaction, *args, **kwargs): 91 | """Sign a transaction 92 | 93 | Refer to the documentation of your consensus plugin. 94 | 95 | Returns: 96 | dict: transaction with any signatures applied. 97 | """ 98 | 99 | return self.consensus.sign_transaction(transaction, *args, bigchain=self, **kwargs) 100 | 101 | def validate_fulfillments(self, signed_transaction, *args, **kwargs): 102 | """Validate the fulfillment(s) of a transaction. 103 | 104 | Refer to the documentation of your consensus plugin. 105 | 106 | Returns: 107 | bool: True if the transaction's required fulfillments are present 108 | and correct, False otherwise. 109 | """ 110 | 111 | return self.consensus.validate_fulfillments( 112 | signed_transaction, *args, **kwargs) 113 | 114 | def write_transaction(self, signed_transaction, durability='soft'): 115 | """Write the transaction to bigchain. 116 | 117 | When first writing a transaction to the bigchain the transaction will be kept in a backlog until 118 | it has been validated by the nodes of the federation. 119 | 120 | Args: 121 | signed_transaction (dict): transaction with the `signature` included. 122 | 123 | Returns: 124 | dict: database response 125 | """ 126 | 127 | # we will assign this transaction to `one` node. This way we make sure that there are no duplicate 128 | # transactions on the bigchain 129 | 130 | if self.nodes_except_me: 131 | assignee = random.choice(self.nodes_except_me) 132 | else: 133 | # I am the only node 134 | assignee = self.me 135 | 136 | # We copy the transaction here to not add `assignee` to the transaction 137 | # dictionary passed to this method (as it would update by reference). 138 | signed_transaction = deepcopy(signed_transaction) 139 | # update the transaction 140 | signed_transaction.update({'assignee': assignee}) 141 | signed_transaction.update({'assignment_timestamp': time()}) 142 | 143 | # write to the backlog 144 | response = r.table('backlog').insert(signed_transaction, durability=durability).run(self.conn) 145 | return response 146 | 147 | def reassign_transaction(self, transaction, durability='hard'): 148 | """Assign a transaction to a new node 149 | 150 | Args: 151 | transaction (dict): assigned transaction 152 | 153 | Returns: 154 | dict: database response or None if no reassignment is possible 155 | """ 156 | 157 | if self.nodes_except_me: 158 | try: 159 | federation_nodes = self.nodes_except_me + [self.me] 160 | index_current_assignee = federation_nodes.index(transaction['assignee']) 161 | new_assignee = random.choice(federation_nodes[:index_current_assignee] + 162 | federation_nodes[index_current_assignee + 1:]) 163 | except ValueError: 164 | # current assignee not in federation 165 | new_assignee = random.choice(self.nodes_except_me) 166 | 167 | else: 168 | # There is no other node to assign to 169 | new_assignee = self.me 170 | 171 | response = r.table('backlog')\ 172 | .get(transaction['id'])\ 173 | .update({'assignee': new_assignee, 174 | 'assignment_timestamp': time()}, 175 | durability=durability).run(self.conn) 176 | return response 177 | 178 | def get_stale_transactions(self): 179 | """Get a RethinkDB cursor of stale transactions 180 | 181 | Transactions are considered stale if they have been assigned a node, but are still in the 182 | backlog after some amount of time specified in the configuration 183 | """ 184 | 185 | return r.table('backlog')\ 186 | .filter(lambda tx: time() - tx['assignment_timestamp'] > 187 | self.backlog_reassign_delay).run(self.conn) 188 | 189 | def get_transaction(self, txid, include_status=False): 190 | """Retrieve a transaction with `txid` from bigchain. 191 | 192 | Queries the bigchain for a transaction, if it's in a valid or invalid 193 | block. 194 | 195 | Args: 196 | txid (str): transaction id of the transaction to query 197 | include_status (bool): also return the status of the transaction 198 | the return value is then a tuple: (tx, status) 199 | 200 | Returns: 201 | A dict with the transaction details if the transaction was found. 202 | Will add the transaction status to payload ('valid', 'undecided', 203 | or 'backlog'). If no transaction with that `txid` was found it 204 | returns `None` 205 | """ 206 | 207 | response, tx_status = None, None 208 | 209 | validity = self.get_blocks_status_containing_tx(txid) 210 | 211 | if validity: 212 | # Disregard invalid blocks, and return if there are no valid or undecided blocks 213 | validity = {_id: status for _id, status in validity.items() 214 | if status != Bigchain.BLOCK_INVALID} 215 | if validity: 216 | 217 | tx_status = self.TX_UNDECIDED 218 | # If the transaction is in a valid or any undecided block, return it. Does not check 219 | # if transactions in undecided blocks are consistent, but selects the valid block before 220 | # undecided ones 221 | for target_block_id in validity: 222 | if validity[target_block_id] == Bigchain.BLOCK_VALID: 223 | tx_status = self.TX_VALID 224 | break 225 | 226 | # Query the transaction in the target block and return 227 | response = r.table('bigchain', read_mode=self.read_mode).get(target_block_id)\ 228 | .get_field('block').get_field('transactions')\ 229 | .filter(lambda tx: tx['id'] == txid).run(self.conn)[0] 230 | 231 | else: 232 | # Otherwise, check the backlog 233 | response = r.table('backlog').get(txid).run(self.conn) 234 | if response: 235 | tx_status = self.TX_IN_BACKLOG 236 | 237 | if include_status: 238 | return response, tx_status 239 | else: 240 | return response 241 | 242 | def get_status(self, txid): 243 | """Retrieve the status of a transaction with `txid` from bigchain. 244 | 245 | Args: 246 | txid (str): transaction id of the transaction to query 247 | 248 | Returns: 249 | (string): transaction status ('valid', 'undecided', 250 | or 'backlog'). If no transaction with that `txid` was found it 251 | returns `None` 252 | """ 253 | _, status = self.get_transaction(txid, include_status=True) 254 | return status 255 | 256 | def search_block_election_on_index(self, value, index): 257 | """Retrieve block election information given a secondary index and value 258 | 259 | Args: 260 | value: a value to search (e.g. transaction id string, payload hash string) 261 | index (str): name of a secondary index, e.g. 'transaction_id' 262 | 263 | Returns: 264 | A list of blocks with with only election information 265 | """ 266 | # First, get information on all blocks which contain this transaction 267 | response = r.table('bigchain', read_mode=self.read_mode).get_all(value, index=index)\ 268 | .pluck('votes', 'id', {'block': ['voters']}).run(self.conn) 269 | 270 | return list(response) 271 | 272 | def get_blocks_status_containing_tx(self, txid): 273 | """Retrieve block ids and statuses related to a transaction 274 | 275 | Transactions may occur in multiple blocks, but no more than one valid block. 276 | 277 | Args: 278 | txid (str): transaction id of the transaction to query 279 | 280 | Returns: 281 | A dict of blocks containing the transaction, 282 | e.g. {block_id_1: 'valid', block_id_2: 'invalid' ...}, or None 283 | """ 284 | 285 | # First, get information on all blocks which contain this transaction 286 | blocks = self.search_block_election_on_index(txid, 'transaction_id') 287 | 288 | if blocks: 289 | # Determine the election status of each block 290 | validity = {block['id']: self.block_election_status(block) for block in blocks} 291 | 292 | # If there are multiple valid blocks with this transaction, something has gone wrong 293 | if list(validity.values()).count(Bigchain.BLOCK_VALID) > 1: 294 | block_ids = str([block for block in validity 295 | if validity[block] == Bigchain.BLOCK_VALID]) 296 | raise Exception('Transaction {tx} is present in multiple valid blocks: {block_ids}' 297 | .format(tx=txid, block_ids=block_ids)) 298 | 299 | return validity 300 | 301 | else: 302 | return None 303 | 304 | def get_tx_by_payload_uuid(self, payload_uuid): 305 | """Retrieves transactions related to a digital asset. 306 | 307 | When creating a transaction one of the optional arguments is the `payload`. The payload is a generic 308 | dict that contains information about the digital asset. 309 | 310 | To make it easy to query the bigchain for that digital asset we create a UUID for the payload and 311 | store it with the transaction. This makes it easy for developers to keep track of their digital 312 | assets in bigchain. 313 | 314 | Args: 315 | payload_uuid (str): the UUID for this particular payload. 316 | 317 | Returns: 318 | A list of transactions containing that payload. If no transaction exists with that payload it 319 | returns an empty list `[]` 320 | """ 321 | cursor = r.table('bigchain', read_mode=self.read_mode) \ 322 | .get_all(payload_uuid, index='payload_uuid') \ 323 | .concat_map(lambda block: block['block']['transactions']) \ 324 | .filter(lambda transaction: transaction['transaction']['data']['uuid'] == payload_uuid) \ 325 | .run(self.conn) 326 | 327 | transactions = list(cursor) 328 | return transactions 329 | 330 | def get_spent(self, tx_input): 331 | """Check if a `txid` was already used as an input. 332 | 333 | A transaction can be used as an input for another transaction. Bigchain needs to make sure that a 334 | given `txid` is only used once. 335 | 336 | Args: 337 | tx_input (dict): Input of a transaction in the form `{'txid': 'transaction id', 'cid': 'condition id'}` 338 | 339 | Returns: 340 | The transaction that used the `txid` as an input if it exists else it returns `None` 341 | """ 342 | # checks if an input was already spent 343 | # checks if the bigchain has any transaction with input {'txid': ..., 'cid': ...} 344 | response = r.table('bigchain', read_mode=self.read_mode)\ 345 | .concat_map(lambda doc: doc['block']['transactions'])\ 346 | .filter(lambda transaction: transaction['transaction']['fulfillments'] 347 | .contains(lambda fulfillment: fulfillment['input'] == tx_input))\ 348 | .run(self.conn) 349 | 350 | transactions = list(response) 351 | 352 | # a transaction_id should have been spent at most one time 353 | if transactions: 354 | # determine if these valid transactions appear in more than one valid block 355 | num_valid_transactions = 0 356 | for transaction in transactions: 357 | # ignore invalid blocks 358 | if self.get_transaction(transaction['id']): 359 | num_valid_transactions += 1 360 | if num_valid_transactions > 1: 361 | raise exceptions.DoubleSpend('`{}` was spent more then once. There is a problem with the chain'.format( 362 | tx_input['txid'])) 363 | 364 | if num_valid_transactions: 365 | return transactions[0] 366 | else: 367 | # all queried transactions were invalid 368 | return None 369 | else: 370 | return None 371 | 372 | def get_owned_ids(self, owner): 373 | """Retrieve a list of `txids` that can we used has inputs. 374 | 375 | Args: 376 | owner (str): base58 encoded public key. 377 | 378 | Returns: 379 | list: list of `txids` currently owned by `owner` 380 | """ 381 | 382 | # get all transactions in which owner is in the `owners_after` list 383 | response = r.table('bigchain', read_mode=self.read_mode) \ 384 | .concat_map(lambda doc: doc['block']['transactions']) \ 385 | .filter(lambda tx: tx['transaction']['conditions'] 386 | .contains(lambda c: c['owners_after'] 387 | .contains(owner))) \ 388 | .run(self.conn) 389 | owned = [] 390 | 391 | for tx in response: 392 | # disregard transactions from invalid blocks 393 | validity = self.get_blocks_status_containing_tx(tx['id']) 394 | if Bigchain.BLOCK_VALID not in validity.values(): 395 | if Bigchain.BLOCK_UNDECIDED not in validity.values(): 396 | continue 397 | 398 | # a transaction can contain multiple outputs (conditions) so we need to iterate over all of them 399 | # to get a list of outputs available to spend 400 | for condition in tx['transaction']['conditions']: 401 | # for simple signature conditions there are no subfulfillments 402 | # check if the owner is in the condition `owners_after` 403 | if len(condition['owners_after']) == 1: 404 | if condition['condition']['details']['public_key'] == owner: 405 | tx_input = {'txid': tx['id'], 'cid': condition['cid']} 406 | else: 407 | # for transactions with multiple `owners_after` there will be several subfulfillments nested 408 | # in the condition. We need to iterate the subfulfillments to make sure there is a 409 | # subfulfillment for `owner` 410 | if util.condition_details_has_owner(condition['condition']['details'], owner): 411 | tx_input = {'txid': tx['id'], 'cid': condition['cid']} 412 | # check if input was already spent 413 | if not self.get_spent(tx_input): 414 | owned.append(tx_input) 415 | 416 | return owned 417 | 418 | def validate_transaction(self, transaction): 419 | """Validate a transaction. 420 | 421 | Args: 422 | transaction (dict): transaction to validate. 423 | 424 | Returns: 425 | The transaction if the transaction is valid else it raises an 426 | exception describing the reason why the transaction is invalid. 427 | """ 428 | 429 | return self.consensus.validate_transaction(self, transaction) 430 | 431 | def is_valid_transaction(self, transaction): 432 | """Check whether a transacion is valid or invalid. 433 | 434 | Similar to `validate_transaction` but never raises an exception. 435 | It returns `False` if the transaction is invalid. 436 | 437 | Args: 438 | transaction (dict): transaction to check. 439 | 440 | Returns: 441 | `transaction` if the transaction is valid, `False` otherwise 442 | """ 443 | 444 | try: 445 | self.validate_transaction(transaction) 446 | return transaction 447 | except (ValueError, exceptions.OperationError, exceptions.TransactionDoesNotExist, 448 | exceptions.TransactionOwnerError, exceptions.DoubleSpend, 449 | exceptions.InvalidHash, exceptions.InvalidSignature): 450 | return False 451 | 452 | def create_block(self, validated_transactions): 453 | """Creates a block given a list of `validated_transactions`. 454 | 455 | Note that this method does not validate the transactions. Transactions should be validated before 456 | calling create_block. 457 | 458 | Args: 459 | validated_transactions (list): list of validated transactions. 460 | 461 | Returns: 462 | dict: created block. 463 | """ 464 | 465 | # Prevent the creation of empty blocks 466 | if len(validated_transactions) == 0: 467 | raise exceptions.OperationError('Empty block creation is not allowed') 468 | 469 | # Create the new block 470 | block = { 471 | 'timestamp': util.timestamp(), 472 | 'transactions': validated_transactions, 473 | 'node_pubkey': self.me, 474 | 'voters': self.nodes_except_me + [self.me] 475 | } 476 | 477 | # Calculate the hash of the new block 478 | block_data = util.serialize(block) 479 | block_hash = crypto.hash_data(block_data) 480 | block_signature = crypto.SigningKey(self.me_private).sign(block_data) 481 | 482 | block = { 483 | 'id': block_hash, 484 | 'block': block, 485 | 'signature': block_signature, 486 | } 487 | 488 | return block 489 | 490 | # TODO: check that the votings structure is correctly constructed 491 | def validate_block(self, block): 492 | """Validate a block. 493 | 494 | Args: 495 | block (dict): block to validate. 496 | 497 | Returns: 498 | The block if the block is valid else it raises and exception 499 | describing the reason why the block is invalid. 500 | """ 501 | # First, make sure this node hasn't already voted on this block 502 | if self.has_previous_vote(block): 503 | return block 504 | 505 | # Run the plugin block validation logic 506 | self.consensus.validate_block(self, block) 507 | 508 | # Finally: Tentative assumption that every blockchain will want to 509 | # validate all transactions in each block 510 | for transaction in block['block']['transactions']: 511 | if not self.is_valid_transaction(transaction): 512 | # this will raise the exception 513 | self.validate_transaction(transaction) 514 | 515 | return block 516 | 517 | def has_previous_vote(self, block): 518 | """Check for previous votes from this node 519 | 520 | Args: 521 | block (dict): block to check. 522 | 523 | Returns: 524 | bool: :const:`True` if this block already has a 525 | valid vote from this node, :const:`False` otherwise. 526 | 527 | Raises: 528 | ImproperVoteError: If there is already a vote, 529 | but the vote is invalid. 530 | 531 | """ 532 | votes = list(r.table('votes', read_mode=self.read_mode)\ 533 | .get_all([block['id'], self.me], index='block_and_voter').run(self.conn)) 534 | 535 | if len(votes) > 1: 536 | raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes from public key {me}' 537 | .format(block_id=block['id'], n_votes=str(len(votes)), me=self.me)) 538 | has_previous_vote = False 539 | if votes: 540 | if util.verify_vote_signature(block, votes[0]): 541 | has_previous_vote = True 542 | else: 543 | raise exceptions.ImproperVoteError('Block {block_id} already has an incorrectly signed vote ' 544 | 'from public key {me}'.format(block_id=block['id'], me=self.me)) 545 | 546 | return has_previous_vote 547 | 548 | def is_valid_block(self, block): 549 | """Check whether a block is valid or invalid. 550 | 551 | Similar to `validate_block` but does not raise an exception if the block is invalid. 552 | 553 | Args: 554 | block (dict): block to check. 555 | 556 | Returns: 557 | bool: `True` if the block is valid, `False` otherwise. 558 | """ 559 | 560 | try: 561 | self.validate_block(block) 562 | return True 563 | except Exception: 564 | return False 565 | 566 | def write_block(self, block, durability='soft'): 567 | """Write a block to bigchain. 568 | 569 | Args: 570 | block (dict): block to write to bigchain. 571 | """ 572 | 573 | block_serialized = rapidjson.dumps(block) 574 | r.table('bigchain').insert(r.json(block_serialized), durability=durability).run(self.conn) 575 | 576 | def transaction_exists(self, transaction_id): 577 | response = r.table('bigchain', read_mode=self.read_mode)\ 578 | .get_all(transaction_id, index='transaction_id').run(self.conn) 579 | return len(response.items) > 0 580 | 581 | def prepare_genesis_block(self): 582 | """Prepare a genesis block.""" 583 | 584 | payload = {'message': 'Hello World from the BigchainDB'} 585 | transaction = self.create_transaction([self.me], [self.me], None, 'GENESIS', payload=payload) 586 | transaction_signed = self.sign_transaction(transaction, self.me_private) 587 | 588 | # create the block 589 | return self.create_block([transaction_signed]) 590 | 591 | # TODO: Unless we prescribe the signature of create_transaction, this will 592 | # also need to be moved into the plugin API. 593 | def create_genesis_block(self): 594 | """Create the genesis block 595 | 596 | Block created when bigchain is first initialized. This method is not atomic, there might be concurrency 597 | problems if multiple instances try to write the genesis block when the BigchainDB Federation is started, 598 | but it's a highly unlikely scenario. 599 | """ 600 | 601 | # 1. create one transaction 602 | # 2. create the block with one transaction 603 | # 3. write the block to the bigchain 604 | 605 | blocks_count = r.table('bigchain', read_mode=self.read_mode).count().run(self.conn) 606 | 607 | if blocks_count: 608 | raise exceptions.GenesisBlockAlreadyExistsError('Cannot create the Genesis block') 609 | 610 | block = self.prepare_genesis_block() 611 | self.write_block(block, durability='hard') 612 | 613 | return block 614 | 615 | def vote(self, block_id, previous_block_id, decision, invalid_reason=None): 616 | """Cast your vote on the block given the previous_block_hash and the decision (valid/invalid) 617 | return the block to the updated in the database. 618 | 619 | Args: 620 | block_id (str): The id of the block to vote. 621 | previous_block_id (str): The id of the previous block. 622 | decision (bool): Whether the block is valid or invalid. 623 | invalid_reason (Optional[str]): Reason the block is invalid 624 | """ 625 | 626 | if block_id == previous_block_id: 627 | raise exceptions.CyclicBlockchainError() 628 | 629 | vote = { 630 | 'voting_for_block': block_id, 631 | 'previous_block': previous_block_id, 632 | 'is_block_valid': decision, 633 | 'invalid_reason': invalid_reason, 634 | 'timestamp': util.timestamp() 635 | } 636 | 637 | vote_data = util.serialize(vote) 638 | signature = crypto.SigningKey(self.me_private).sign(vote_data) 639 | 640 | vote_signed = { 641 | 'node_pubkey': self.me, 642 | 'signature': signature, 643 | 'vote': vote 644 | } 645 | 646 | return vote_signed 647 | 648 | def write_vote(self, vote): 649 | """Write the vote to the database.""" 650 | 651 | r.table('votes') \ 652 | .insert(vote) \ 653 | .run(self.conn) 654 | 655 | def get_last_voted_block(self): 656 | """Returns the last block that this node voted on.""" 657 | 658 | try: 659 | # get the latest value for the vote timestamp (over all votes) 660 | max_timestamp = r.table('votes', read_mode=self.read_mode) \ 661 | .filter(r.row['node_pubkey'] == self.me) \ 662 | .max(r.row['vote']['timestamp']) \ 663 | .run(self.conn)['vote']['timestamp'] 664 | 665 | last_voted = list(r.table('votes', read_mode=self.read_mode) \ 666 | .filter(r.row['vote']['timestamp'] == max_timestamp) \ 667 | .filter(r.row['node_pubkey'] == self.me) \ 668 | .run(self.conn)) 669 | 670 | except r.ReqlNonExistenceError: 671 | # return last vote if last vote exists else return Genesis block 672 | return list(r.table('bigchain', read_mode=self.read_mode) 673 | .filter(util.is_genesis_block) 674 | .run(self.conn))[0] 675 | 676 | # Now the fun starts. Since the resolution of timestamp is a second, 677 | # we might have more than one vote per timestamp. If this is the case 678 | # then we need to rebuild the chain for the blocks that have been retrieved 679 | # to get the last one. 680 | 681 | # Given a block_id, mapping returns the id of the block pointing at it. 682 | mapping = {v['vote']['previous_block']: v['vote']['voting_for_block'] 683 | for v in last_voted} 684 | 685 | # Since we follow the chain backwards, we can start from a random 686 | # point of the chain and "move up" from it. 687 | last_block_id = list(mapping.values())[0] 688 | 689 | # We must be sure to break the infinite loop. This happens when: 690 | # - the block we are currenty iterating is the one we are looking for. 691 | # This will trigger a KeyError, breaking the loop 692 | # - we are visiting again a node we already explored, hence there is 693 | # a loop. This might happen if a vote points both `previous_block` 694 | # and `voting_for_block` to the same `block_id` 695 | explored = set() 696 | 697 | while True: 698 | try: 699 | if last_block_id in explored: 700 | raise exceptions.CyclicBlockchainError() 701 | explored.add(last_block_id) 702 | last_block_id = mapping[last_block_id] 703 | except KeyError: 704 | break 705 | 706 | res = r.table('bigchain', read_mode=self.read_mode).get(last_block_id).run(self.conn) 707 | 708 | return res 709 | 710 | def get_unvoted_blocks(self): 711 | """Return all the blocks that has not been voted by this node.""" 712 | 713 | unvoted = r.table('bigchain', read_mode=self.read_mode) \ 714 | .filter(lambda block: r.table('votes', read_mode=self.read_mode) 715 | .get_all([block['id'], self.me], index='block_and_voter') 716 | .is_empty()) \ 717 | .order_by(r.asc(r.row['block']['timestamp'])) \ 718 | .run(self.conn) 719 | 720 | # FIXME: I (@vrde) don't like this solution. Filtering should be done at a 721 | # database level. Solving issue #444 can help untangling the situation 722 | unvoted = filter(lambda block: not util.is_genesis_block(block), unvoted) 723 | 724 | return list(unvoted) 725 | 726 | def block_election_status(self, block): 727 | """Tally the votes on a block, and return the status: valid, invalid, or undecided.""" 728 | 729 | votes = r.table('votes', read_mode=self.read_mode) \ 730 | .between([block['id'], r.minval], [block['id'], r.maxval], index='block_and_voter') \ 731 | .run(self.conn) 732 | 733 | votes = list(votes) 734 | 735 | n_voters = len(block['block']['voters']) 736 | 737 | voter_counts = collections.Counter([vote['node_pubkey'] for vote in votes]) 738 | for node in voter_counts: 739 | if voter_counts[node] > 1: 740 | raise exceptions.MultipleVotesError('Block {block_id} has multiple votes ({n_votes}) from voting node {node_id}' 741 | .format(block_id=block['id'], n_votes=str(voter_counts[node]), node_id=node)) 742 | 743 | if len(votes) > n_voters: 744 | raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes cast, but only {n_voters} voters' 745 | .format(block_id=block['id'], n_votes=str(len(votes)), n_voters=str(n_voters))) 746 | 747 | # vote_cast is the list of votes e.g. [True, True, False] 748 | vote_cast = [vote['vote']['is_block_valid'] for vote in votes] 749 | # prev_block are the ids of the nominal prev blocks e.g. 750 | # ['block1_id', 'block1_id', 'block2_id'] 751 | prev_block = [vote['vote']['previous_block'] for vote in votes] 752 | # vote_validity checks whether a vote is valid 753 | # or invalid, e.g. [False, True, True] 754 | vote_validity = [self.consensus.verify_vote_signature(block, vote) for vote in votes] 755 | 756 | # element-wise product of stated vote and validity of vote 757 | # vote_cast = [True, True, False] and 758 | # vote_validity = [False, True, True] gives 759 | # [True, False] 760 | # Only the correctly signed votes are tallied. 761 | vote_list = list(compress(vote_cast, vote_validity)) 762 | 763 | # Total the votes. Here, valid and invalid refer 764 | # to the vote cast, not whether the vote itself 765 | # is valid or invalid. 766 | n_valid_votes = sum(vote_list) 767 | n_invalid_votes = len(vote_cast) - n_valid_votes 768 | 769 | # The use of ceiling and floor is to account for the case of an 770 | # even number of voters where half the voters have voted 'invalid' 771 | # and half 'valid'. In this case, the block should be marked invalid 772 | # to avoid a tie. In the case of an odd number of voters this is not 773 | # relevant, since one side must be a majority. 774 | if n_invalid_votes >= math.ceil(n_voters / 2): 775 | return Bigchain.BLOCK_INVALID 776 | elif n_valid_votes > math.floor(n_voters / 2): 777 | # The block could be valid, but we still need to check if votes 778 | # agree on the previous block. 779 | # 780 | # First, only consider blocks with legitimate votes 781 | prev_block_list = list(compress(prev_block, vote_validity)) 782 | # Next, only consider the blocks with 'yes' votes 783 | prev_block_valid_list = list(compress(prev_block_list, vote_list)) 784 | counts = collections.Counter(prev_block_valid_list) 785 | # Make sure the majority vote agrees on previous node. 786 | # The majority vote must be the most common, by definition. 787 | # If it's not, there is no majority agreement on the previous 788 | # block. 789 | if counts.most_common()[0][1] > math.floor(n_voters / 2): 790 | return Bigchain.BLOCK_VALID 791 | else: 792 | return Bigchain.BLOCK_INVALID 793 | else: 794 | return Bigchain.BLOCK_UNDECIDED 795 | --------------------------------------------------------------------------------