├── .env.example
├── .gitignore
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── bitdata
├── __init__.py
├── analysis
│ ├── addresses.py
│ ├── coinbase.py
│ ├── mining.py
│ └── taproot.py
├── core
│ ├── __init__.py
│ └── config.py
├── notifiers
│ ├── discord.py
│ └── telegram.py
└── provider
│ ├── __init__.py
│ ├── bitcoin_rpc.py
│ ├── blockstream.py
│ ├── mempool.py
│ └── quiknode.py
├── dashboard
├── On-chain.py
├── bitpolito_logo.png
├── lightning.gif
└── pages
│ └── Lightning_Network.py
├── grafana
├── docker-compose.yml
├── exporter
│ ├── Dockerfile
│ ├── btc_conf.py
│ └── client.py
├── grafana
│ ├── Dockerfile
│ └── config
│ │ └── grafana.ini
├── prometheus
│ ├── Dockerfile
│ └── config
│ │ └── prometheus.yml
└── readme.md
├── pyproject.toml
├── requirements.txt
├── setup.py
└── tests
├── conftest.py
├── test_blockstream.py
└── test_btc_rpc.py
/.env.example:
--------------------------------------------------------------------------------
1 | RPC_USER='bitpolito'
2 | RPC_PASSWORD='bitpolito'
3 | RPC_HOST='localhost'
4 | RPC_PORT=8332
5 |
6 | BOT_TOKEN="XXX"
7 | CHAT_ID="XXX"
8 | DISCORD_WEBHOOK_URL="XXX"
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | poetry.lock
132 |
133 |
134 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim
2 | RUN apt-get update
3 | RUN apt-get install -y curl python3-dev autoconf
4 | RUN curl -sSL https://install.python-poetry.org | python3 -
5 | ENV PATH="/root/.local/bin:$PATH"
6 | WORKDIR /app
7 | COPY pyproject.toml poetry.lock ./
8 | RUN poetry config virtualenvs.create false
9 | RUN poetry install --no-dev --no-root
10 |
11 | COPY . .
12 | EXPOSE 8501
13 |
14 | CMD ["poetry", "run", "streamlit", "run", "dashboard/On-chain.py"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 BitPolito
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | frontend:
2 | poetry run streamlit run dashboard/On-chain.py
3 |
4 | test:
5 | pytest tests/
6 |
7 | isort:
8 | poetry run isort --profile black .
9 |
10 | black:
11 | poetry run black .
12 |
13 | format:
14 | make isort
15 | make black
16 | make mypy
17 |
18 | mypy:
19 | poetry run mypy bitdata --ignore-missing
20 |
21 | clean:
22 | rm -r bitdata.egg-info/ || true
23 | find . -name ".DS_Store" -exec rm -f {} \; || true
24 | rm -rf dist || true
25 | rm -rf build || true
26 |
27 | package:
28 | poetry export -f requirements.txt --without-hashes --output requirements.txt
29 | make clean
30 | python setup.py sdist bdist_wheel
31 |
32 | test:
33 | pytest tests/
34 |
35 | install:
36 | make clean
37 | python setup.py sdist bdist_wheel
38 | pip install --upgrade dist/*
39 |
40 | upload:
41 | make clean
42 | python setup.py sdist bdist_wheel
43 | twine upload --repository pypi dist/*
44 |
45 | docker-build:
46 | docker build -t bitdata .
47 |
48 | docker-run:
49 | docker run -p 8501:8501 -v ./frontend:/app/frontend bitdata
50 |
51 | docker-stop:
52 | -docker stop bitdata
53 | -docker rm bitdata
54 |
55 | docker:
56 | -make docker-stop
57 | -make docker-build
58 | -make docker-run
59 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Bitcoin Data Analysis
2 |
3 | [](https://github.com/your-username/bitcoin-data-analysis/blob/main/LICENSE)
4 |
5 | ## Overview
6 |
7 | The Bitcoin Data Analysis is a Python library designed to facilitate the analysis of Bitcoin on-chain data and Lightning Network data. It provides various functionalities and data providers to retrieve, process, and analyze Bitcoin-related information.
8 |
9 | The library consists of the following components:
10 |
11 | - **bitdata**: This module contains different providers to fetch Bitcoin data and some functions to help analysis.
12 | - **dashboard**: This folder contains a Streamlit web page for visualizing and interacting with the analyzed data.
13 |
14 |
15 | ### Installation
16 |
17 | There are different ways to install the library.
18 |
19 |
26 |
27 |
28 | Clone the repository:
29 |
30 | ```bash
31 | git clone https://github.com/BitPolito/bitcoin-data-analysis
32 | cd bitcoin-data-analysis
33 | ```
34 |
35 | #### Docker
36 | If you don't have Docker installed, you can follow the instructions [here](https://docs.docker.com/get-docker/).
37 |
38 | Build and run the docker image with:
39 |
40 | ```bash
41 | make docker
42 | ```
43 | Access the [streamlit](https://streamlit.io/) web page in your browser at http://localhost:8501.
44 |
45 | #### Poetry
46 | If you don't have poetry installed, follow the instructions in the [official Poetry documentation](https://python-poetry.org/docs/#installation) to install Poetry for your operating system.
47 |
48 |
49 | Install python libraries
50 | ```
51 | poetry install
52 | ```
53 | ### Config
54 | Add your own configuration file in the root folder of the project.
55 | You can use the .env.example file as a template.
56 |
57 | ```bash
58 | cp .env.example .env
59 | # edit .env file with your configuration
60 | nano .env
61 | ```
62 | ## Data Dashboard
63 |
64 | Webpage built with streamlit, that displays some live statistics about bitcoin network. Try it [here](https://bumblebee00-data-analysis-on-chain-kk5uep.streamlit.app/)
65 |
66 | ### Run the dashboard
67 | ```
68 | poetry run streamlit run dashboard/On-chain.py
69 | ```
70 |
71 | Access the [streamlit](https://streamlit.io/) web page in your browser at http://localhost:8501.
72 |
73 |
74 | ## BitData - Analysis
75 |
76 | Some examples tools and script to analyze bitcoin data.
77 |
78 | ### Coinbase String Monitor
79 | This script analyze the coinbase of the last 10 blocks of the testnet, if it found the target string on the coinbase transaction will send a message in a telegram channel.
80 | Will continue to analyze new blocks every 30 seconds.
81 |
82 | - Change BOT_TOKEN and CHAT_ID in the .env file to enable the telegram bot
83 | - The bot should be added to the channel as an administrator. The CHAT_ID is the chat of the bot with the channel.
84 | - Change DISCORD_WEBHOOK_URL in the .env file to enable the discord bot
85 | - The bot should be created as explained [here](https://discord.com/developers/docs/getting-started) and added with the right priviledges in the server. At this point the webhook url can be exctracted from the sub-channel that you want the bot will notify into.
86 |
87 | ```
88 | poetry run python -m bitdata.analysis.coinbase -n testnet -t "Stratum v2" -p 10
89 | ```
90 |
91 |
92 | ### Mining pool distribution
93 |
94 | ```bash
95 | poetry run python -m bitdata.analysis.mining
96 | ```
97 |
98 | ### Transactions per block
99 | ```bash
100 | poetry run python -m bitdata.analysis.addresses
101 | ```
102 |
103 | ### Taproot transaction count
104 | ```bash
105 | poetry run python -m bitdata.analysis.taproot
106 | ```
107 |
108 | ## Contributing
109 |
110 | Contributions to the Bitcoin Data Analysis Library are welcome! If you encounter any issues, have feature suggestions, or would like to contribute code, feel free to open an issue or submit a pull request.
111 |
112 |
113 |
114 | ## License
115 |
116 | The Bitcoin Data Analysis Library is open source and released under the [MIT License](https://github.com/your-username/bitcoin-data-analysis/blob/main/LICENSE).
117 |
118 |
119 | ## Acknowledgements
120 | We would like to acknowledge the following resources and libraries that have contributed to the development of this project:
121 |
122 | [bitnodes.io](https://bitnodes.io/)
123 | [blockchain.info](https://www.blockchain.info)
124 | [bloackstream.info](https://blockstream.info)
125 |
126 |
127 |
--------------------------------------------------------------------------------
/bitdata/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BitPolito/bitcoin-data-analysis/404e705fe2bc8e6c900e75a3ac28bd27588b2bc4/bitdata/__init__.py
--------------------------------------------------------------------------------
/bitdata/analysis/addresses.py:
--------------------------------------------------------------------------------
1 | from ..provider.bitcoin_rpc import BitcoinRPC
2 |
3 |
4 | def last_block_analytics(rpc_manager: BitcoinRPC):
5 | current_height = rpc_manager.get_last_block_height()
6 | last_block = rpc_manager.get_block_by_height(current_height)
7 | tx_count = len(last_block["tx"])
8 | # Print the results
9 | print(
10 | "Number of Transactions in Block " + str(current_height) + ": " + str(tx_count)
11 | )
12 | address_result = rpc_manager.address_block_analytics(last_block)
13 | address_types_count = address_result["address_types_count"]
14 | address_types_amount = address_result["address_types_amount"]
15 |
16 | print("\nNumber of UTXOs by address type:")
17 | for address_type, count in address_types_count.items():
18 | print(f"{address_type}: {count}")
19 |
20 | print("\nAmount of UTXOs by address type:")
21 | for address_type, amount in address_types_amount.items():
22 | print(f"{address_type}: {amount}")
23 |
24 |
25 | if __name__ == "__main__":
26 | from ..core.config import BitConfig
27 |
28 | cfg = BitConfig()
29 | rpc_manager = BitcoinRPC(cfg)
30 | last_block_analytics(rpc_manager)
31 |
--------------------------------------------------------------------------------
/bitdata/analysis/coinbase.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import asyncio
3 | import time
4 |
5 | from bitdata.notifiers.telegram import TelegramWriter
6 | from bitdata.notifiers.discord import DiscordWriter
7 | from bitdata.provider.mempool import MempoolProvider
8 |
9 | # This script will listen for new blocks and check if the coinbase transaction contains the string "Stratum v2"
10 | # If it does, it will send a message to the Telegram channel
11 | # The raw transaction converted in text contains also the miner input (?)
12 |
13 | SLEEP_TIME = 30 # Time to wait in seconds between checks
14 |
15 | class CoinbaseAnalyzer:
16 | def __init__(
17 | self,
18 | provider,
19 | telegram_writer,
20 | discord_writer,
21 | target_string,
22 | network="testnet4",
23 | n_previous_blocks=0,
24 | ):
25 | self.provider = provider
26 | self.telegram_writer = telegram_writer
27 | self.discord_writer = discord_writer
28 | self.target_string = target_string
29 | self.last_hash = None
30 | self.network = network
31 | self.n_previous_blocks = n_previous_blocks
32 |
33 | def get_miner_text_input(self, raw_transaction):
34 | try:
35 | inputs = raw_transaction["result"]["vin"]
36 | script = inputs[0]["coinbase"]
37 | ascii_string = ""
38 | for i in range(0, len(script), 2):
39 | ascii_string += chr(int(script[i : i + 2], 16))
40 | return ascii_string.lower()
41 | except Exception as e:
42 | # The raw transaction as string may contain the miner input
43 | return raw_transaction.lower()
44 |
45 | async def notify_message(self, block_height, block_hash):
46 | url = (
47 | "https://mempool.space/it/testnet4/block/"
48 | if self.network == "testnet4"
49 | else "https://mempool.space/it/block/"
50 | )
51 | message = f"""Found a new block from SRI Pool : **{self.target_string}** in {self.network} block: [@{block_height}]({url}{block_hash})"""
52 | await self.telegram_writer.send_telegram_message(message)
53 | await self.discord_writer.send_discord_message(message)
54 |
55 | async def check_new_block(self):
56 | last_hash = self.provider.last_hash()
57 | if last_hash == self.last_hash:
58 | return
59 | self.last_hash = last_hash
60 | # New block found
61 | last_height = self.provider.get_last_height()
62 | coinbase_raw_transaction = self.provider.get_raw_coinbase_transaction(last_hash)
63 | miner_input = self.get_miner_text_input(coinbase_raw_transaction)
64 | if self.target_string.lower() in miner_input:
65 | await self.notify_message(last_height, last_hash)
66 | else:
67 | print(f"New block: {last_height} - {self.last_hash}")
68 |
69 | async def check_from_previous_n_blocks(self):
70 | if self.n_previous_blocks <= 0:
71 | return
72 | list_of_blocks = self.provider.get_last_n_blocks(self.n_previous_blocks)
73 | for block in list_of_blocks[: self.n_previous_blocks]:
74 | #print(block)
75 | block_hash = block["id"]
76 | block_height = block["height"]
77 | coinbase_raw_transaction = self.provider.get_raw_coinbase_transaction(
78 | block_hash
79 | )
80 | miner_input = self.get_miner_text_input(coinbase_raw_transaction)
81 | if self.target_string.lower() in miner_input:
82 | await self.notify_message(block_height, block_hash)
83 |
84 | async def run(self):
85 | await self.check_from_previous_n_blocks()
86 | while True:
87 | await self.check_new_block()
88 | await asyncio.sleep(SLEEP_TIME) # Wait for 10 seconds before checking again
89 |
90 | if __name__ == "__main__":
91 | parser = argparse.ArgumentParser(description="Block Analyzer Script")
92 | parser.add_argument(
93 | "--network",
94 | "-n",
95 | type=str,
96 | default="testnet4",
97 | help="Network (e.g., testnet or mainnet)",
98 | )
99 | parser.add_argument(
100 | "--target",
101 | "-t",
102 | type=str,
103 | default="Stratum V2",
104 | help="Target string to search in miner input",
105 | )
106 | parser.add_argument(
107 | "--previous",
108 | "-p",
109 | type=int,
110 | default=0,
111 | help="Number of previous blocks to check from",
112 | )
113 | args = parser.parse_args()
114 |
115 | network = args.network
116 | target_string = args.target
117 | n_previous_blocks = args.previous
118 | provider = MempoolProvider(network=network)
119 | telegram_writer = TelegramWriter()
120 | discord_writer = DiscordWriter()
121 | coinbase_analyzer = CoinbaseAnalyzer(
122 | provider, telegram_writer, discord_writer, target_string, network, n_previous_blocks
123 | )
124 | loop = asyncio.get_event_loop()
125 | loop.run_until_complete(coinbase_analyzer.run())
126 |
--------------------------------------------------------------------------------
/bitdata/analysis/mining.py:
--------------------------------------------------------------------------------
1 | import base64
2 |
3 | import pandas as pd
4 |
5 | from ..provider.bitcoin_rpc import BitcoinRPC
6 |
7 |
8 | # Iterate over the last 10 blocks and extract the required data.
9 | def mining_analytics(rpc_manager: BitcoinRPC, past_blocks: int = 10):
10 | block_data = []
11 | current_height = rpc_manager.get_last_block_height()
12 | assert current_height is not None, "Unable to get last block height"
13 | assert (
14 | past_blocks <= current_height
15 | ), "Past blocks must be less than or equal to the current height"
16 |
17 | for height in range(current_height, current_height - past_blocks, -1):
18 | # Get the block hash and information for the current height.
19 | block = rpc_manager.get_block_by_height(height)
20 | if block is None:
21 | print(f"Block at height {height} not found")
22 | continue
23 | # Get the coinbase transaction for the block.
24 | tx0 = block["tx"][0]
25 | coinbase_tx = rpc_manager.get_transaction(tx0)
26 |
27 | # Extract the value of the OP_RETURN output from the coinbase transaction for the block.
28 | op_return_value = None
29 |
30 | for output in coinbase_tx["vout"]:
31 | if output["scriptPubKey"]["type"] == "nulldata":
32 | op_return_value = output["scriptPubKey"]["asm"].split(" ")[1]
33 | break
34 |
35 | # Add the block data to the block data list.
36 | block_data.append(
37 | {
38 | "Height": height,
39 | "Timestamp": block["time"],
40 | "Transaction Count": len(block["tx"]),
41 | "BTC Fees": block.get("fee", 0),
42 | "Size (MB)": block["size"] / 1000000,
43 | "Branch ID": "Orphan" if block["confirmations"] == 0 else "Main",
44 | "Coinbase Transaction": coinbase_tx,
45 | "OP_RETURN": op_return_value,
46 | }
47 | )
48 |
49 | # Map the OP_RETURN value to the mining operation and add it to the pandas dataframe.
50 | mining_ops = {
51 | "54686520496e7465726e65746f662042697420426f6e6473": "Unknown",
52 | "5765622050726f766f736b79": "Web Provosky",
53 | "416c6978612054726164696e67205465726d": "Alexa Trading",
54 | "4d696e656420426974636f696e": "Mined Bitcoin",
55 | "4e6f746172696f757320434f564944": "Notarious COVID",
56 | "496e66696e69747950726f6d6f74696f6e": "InfinityPromotion",
57 | "466f726d756c61205465726d": "FormulaTerm",
58 | "4269746d696e657273206f662046756c6c466f726365": "FullForce",
59 | "44696769626f7920426974636f696e": "Digibyte Pool",
60 | "426974486f6c6520486f6c64696e67": "BitHole Holding",
61 | "4c696768746e696e6720526f636b73": "Lightning Rocks",
62 | "52696768674d696e656420416c6c69616e6365": "RightMining Alliance",
63 | # '50696f6e657820576f726b73': 'Pionex',
64 | # '4269747374616d70': 'Bitstamp',
65 | "536c75736820506f6f6c": "Slush Pool",
66 | "4632506f6f6c": "F2Pool",
67 | "416e74706f6f6c": "Antpool",
68 | "566961425463": "ViaBTC",
69 | "4254632e636f6d": "BTC.com",
70 | "506f6f6c696e": "Poolin",
71 | "47656e65736973204d696e696e67": "Genesis Mining",
72 | "42697466757279": "Bitfury",
73 | "42696e616e636520506f6f6c": "Binance Pool",
74 | "4b616e6f20506f6f6c": "Kano Pool",
75 | "636f696e62617365": "Coinbase",
76 | "4254432d474c": "BTCC Pool",
77 | "456c6967697573": "Eligius",
78 | "4b616e6f": "KanoPool",
79 | "5761746572746578": "Waterhole",
80 | }
81 |
82 | for block in block_data:
83 | op_return_value = block["OP_RETURN"]
84 | if op_return_value in mining_ops:
85 | block["Mining Operation"] = mining_ops[op_return_value]
86 | else:
87 | block["Mining Operation"] = "Unknown"
88 |
89 | # Create a pandas dataframe with the block data.
90 | block_df = pd.DataFrame(block_data)
91 |
92 | return block_df
93 |
94 |
95 | if __name__ == "__main__":
96 | from ..core.config import BitConfig
97 |
98 | cfg = BitConfig()
99 | rpc_manager = BitcoinRPC(cfg)
100 | df = mining_analytics(rpc_manager, 10)
101 | # Print the resulting dataframe.
102 | print(df)
103 |
--------------------------------------------------------------------------------
/bitdata/analysis/taproot.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import matplotlib.pyplot as plt
4 |
5 | from ..provider.bitcoin_rpc import BitcoinRPC
6 |
7 | # Define the taproot activation height
8 | TAPROOT_ACTIVATION_HEIGHT = 709632
9 |
10 | """
11 | This Python code allows for the analysis of taproot transactions on the Bitcoin blockchain.
12 | It uses the bitcoinrpc library to connect to a local Bitcoin node via RPC credentials, and then retrieves and analyzes block data to determine the number of taproot transactions.
13 | The code continuously runs in the background, checking for new blocks and updating the plot accordingly.
14 | To use this code, a Bitcoin node running locally and RPC credentials set up are needed. bitcoinrpc and matplotlib libraries installed are necessary too.
15 | The plot shows the number of taproot transactions on the y-axis and the block height on the x-axis. The plot updates in real-time as new blocks are added to the blockchain, showing the trend in taproot transactions over time.
16 | Overall, this code provides a useful tool for analyzing the adoption and usage of taproot transactions on the Bitcoin blockchain.
17 | """
18 |
19 |
20 | def taproot_counter(rpc_manager: BitcoinRPC):
21 | # Initialize the plot
22 | fig, ax = plt.subplots()
23 | ax.set_xlabel("Block Height")
24 | ax.set_ylabel("Taproot Transactions")
25 | ax.set_title("Taproot Transactions Since Activation")
26 |
27 | # Start analyzing blocks between current_height and taproot_activation_height
28 | current_height = rpc_manager.get_last_block_height()
29 | for i in range(current_height, TAPROOT_ACTIVATION_HEIGHT, -1):
30 | block = rpc_manager.get_block_by_height(i)
31 | tx_count = len(block["tx"])
32 | taproot_tx_count = 0
33 | for txid in block["tx"]:
34 | tx = rpc_manager.get_transaction(txid)
35 | for output in tx["vout"]:
36 | if "taproot" in output["scriptPubKey"]["type"]:
37 | taproot_tx_count += 1
38 | break
39 | print(
40 | f"Block Height: {i}, Transactions: {tx_count}, Taproot Transactions: {taproot_tx_count}"
41 | )
42 | ax.plot(i, taproot_tx_count, "bo")
43 | plt.draw()
44 |
45 | while True:
46 | # Check if a new blocks has been added to the blockchain
47 | new_height = rpc_manager.get_last_block_height()
48 | if new_height > current_height:
49 | # Analyze the new blocks
50 | for i in range(current_height + 1, new_height + 1):
51 | block = rpc_manager.get_block_by_height(i)
52 | tx_count = len(block["tx"])
53 | taproot_tx_count = 0
54 | for txid in block["tx"]:
55 | tx = rpc_manager.get_transaction(txid)
56 | for output in tx["vout"]:
57 | if "taproot" in output["scriptPubKey"]["type"]:
58 | taproot_tx_count += 1
59 | break
60 | print(
61 | f"Block Height: {new_height}, Transactions: {tx_count}, Taproot Transactions: {taproot_tx_count}"
62 | )
63 | ax.plot(i, taproot_tx_count, "bo")
64 | plt.draw()
65 | plt.pause(0.001)
66 | current_height = new_height
67 | time.sleep(1)
68 |
69 |
70 | if __name__ == "__main__":
71 | from ..core.config import BitConfig
72 |
73 | cfg = BitConfig()
74 |
75 | rpc_manager = BitcoinRPC(cfg)
76 | taproot_counter(rpc_manager)
77 |
--------------------------------------------------------------------------------
/bitdata/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BitPolito/bitcoin-data-analysis/404e705fe2bc8e6c900e75a3ac28bd27588b2bc4/bitdata/core/__init__.py
--------------------------------------------------------------------------------
/bitdata/core/config.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseSettings
2 |
3 |
4 | class BitConfig(BaseSettings):
5 | RPC_USER: str
6 | RPC_PASSWORD: str
7 | RPC_HOST: str
8 | RPC_PORT: int
9 |
10 | class Config:
11 | env_file = "./.env"
12 |
13 |
14 | cfg = BitConfig()
15 |
--------------------------------------------------------------------------------
/bitdata/notifiers/discord.py:
--------------------------------------------------------------------------------
1 | from discordwebhook import Discord
2 | import os
3 | from dotenv import load_dotenv
4 |
5 | load_dotenv() # take environment variables from .env.
6 |
7 | class DiscordWriter:
8 | def __init__(self):
9 | self.webhookurl = os.getenv("DISCORD_WEBHOOK_URL")
10 | self.discord = Discord(url = self.webhookurl)
11 |
12 | async def send_discord_message(self, message):
13 | self.discord.post(content=message)
--------------------------------------------------------------------------------
/bitdata/notifiers/telegram.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dotenv import load_dotenv
4 | from telegram import Bot
5 | from telegram.constants import ParseMode
6 |
7 | load_dotenv() # take environment variables from .env.
8 |
9 |
10 | class TelegramWriter:
11 | def __init__(
12 | self,
13 | ):
14 | self._token = os.getenv("BOT_TOKEN")
15 | self.chat_id = os.getenv("CHAT_ID")
16 | self.bot = Bot(token=self._token)
17 |
18 | async def send_telegram_message(self, message):
19 | if self.bot:
20 | await self.bot.send_message(
21 | chat_id=self.chat_id, text=message, parse_mode=ParseMode.MARKDOWN_V2
22 | )
23 |
--------------------------------------------------------------------------------
/bitdata/provider/__init__.py:
--------------------------------------------------------------------------------
1 | from .bitcoin_rpc import BitcoinRPC
2 | from .blockstream import BlockstreamProvider
3 |
--------------------------------------------------------------------------------
/bitdata/provider/bitcoin_rpc.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
4 | from loguru import logger
5 | from tqdm import tqdm
6 |
7 | from ..core.config import BitConfig
8 |
9 |
10 | class BitcoinRPC:
11 | def __init__(self, cfg: BitConfig):
12 | self.cfg = cfg
13 | self.url = f"http://{cfg.RPC_USER}:{cfg.RPC_PASSWORD}@{cfg.RPC_HOST}:{str(cfg.RPC_PORT)}"
14 | self.rpc_conn = AuthServiceProxy(self.url, timeout=120)
15 |
16 | def get_block_by_height(self, height: int):
17 | # Get the block hash from the height
18 | try:
19 | block_hash = self.rpc_conn.getblockhash(height)
20 | logger.info(f"Getting block {block_hash} at height {height}")
21 | except JSONRPCException:
22 | logger.error(f"Block at height {height} not found")
23 | return None
24 |
25 | return self.rpc_conn.getblock(block_hash)
26 |
27 | def get_last_block_height(self):
28 | try:
29 | block_time = self.rpc_conn.getblockcount()
30 | except JSONRPCException:
31 | logger.error("Unable to get block height")
32 | return None
33 |
34 | logger.info(f"Getting last block height {block_time}")
35 | return block_time
36 |
37 | def get_new_address(self):
38 | try:
39 | address = self.rpc_conn.getnewaddress()
40 | except JSONRPCException:
41 | logger.error("Unable to get new address")
42 | return None
43 |
44 | logger.info(f"Getting new address {address}")
45 | return address
46 |
47 | def get_transaction(self, txid: str):
48 | return self.rpc_conn.getrawtransaction(txid, True)
49 |
50 | def address_block_analytics(self, block: dict):
51 | # Create dictionaries to store the number and the amounts associated with each address type
52 | address_types_count = {}
53 | address_types_amount = {}
54 |
55 | for txid in tqdm(block["tx"]):
56 | tx = self.get_transaction(txid)
57 | for output in tx["vout"]:
58 | address_type = output["scriptPubKey"]["type"]
59 | address_types_count[address_type] = (
60 | address_types_count.get(address_type, 0) + 1
61 | )
62 | address_types_amount[address_type] = (
63 | address_types_amount.get(address_type, 0) + output["value"]
64 | )
65 |
66 | return {
67 | "address_types_count": address_types_count,
68 | "address_types_amount": address_types_amount,
69 | }
70 |
--------------------------------------------------------------------------------
/bitdata/provider/blockstream.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 |
4 | # Class that use the Blockstream API to get the blockchain data to do analysis
5 | class BlockstreamProvider:
6 | def __init__(self, network="mainnet"):
7 | self.network = network
8 | if network == "testnet":
9 | self.base_url = "https://blockstream.info/testnet/api"
10 | else:
11 | self.base_url = "https://blockstream.info/api"
12 |
13 | def get_block(self, block_height):
14 | return requests.get(f"{self.base_url}/block-height/{block_height}").text
15 |
16 | def get_last_height(self):
17 | return requests.get(f"{self.base_url}/blocks/tip/height").text
18 |
19 | def last_hash(self):
20 | return requests.get(f"{self.base_url}/blocks/tip/hash").text
21 |
22 | def get_last_n_blocks(self, n=10):
23 | result = requests.get(f"{self.base_url}/blocks/:{n}")
24 | print(f"{self.base_url}/blocks/:{n}")
25 | return self.parse_result(result)
26 |
27 | def get_raw_coinbase_transaction(self, block_hash: str = ""):
28 | coinbase_transaction_hash = requests.get(
29 | f"{self.base_url}/block/{block_hash}/txid/0"
30 | ).text
31 | coinbase_transaction_raw = requests.get(
32 | f"{self.base_url}/tx/{coinbase_transaction_hash}/raw"
33 | ).text
34 | return coinbase_transaction_raw
35 |
36 | def parse_result(self, result):
37 | if result.status_code == 200:
38 | return result.json()
39 | else:
40 | return None
41 |
42 |
43 | if __name__ == "__main__":
44 | print("BlockstreamProvider")
45 | # bp = BlockstreamProvider()
46 |
47 | # block = bp.get_block(0)
48 | # print(block)
49 |
50 | # blockchain_info = bp.get_blockchain_info()
51 | # print(blockchain_info)
52 |
--------------------------------------------------------------------------------
/bitdata/provider/mempool.py:
--------------------------------------------------------------------------------
1 | import time
2 | import requests
3 | from pydantic import BaseModel
4 |
5 |
6 | class LightningStats(BaseModel):
7 | id: int
8 | added: str
9 | channel_count: int
10 | node_count: int
11 | total_capacity: int
12 | tor_nodes: int
13 | clearnet_nodes: int
14 | unannounced_nodes: int
15 | avg_capacity: int
16 | avg_fee_rate: int
17 | avg_base_fee_mtokens: int
18 | med_capacity: int
19 | med_fee_rate: int
20 | med_base_fee_mtokens: int
21 | clearnet_tor_nodes: int
22 |
23 |
24 | class MempoolProvider:
25 | def __init__(self, network="mainnet") -> None:
26 | self.network = network
27 | if network == "testnet4":
28 | self.base_url = "https://mempool.space/testnet4/api"
29 | else:
30 | self.base_url = "https://mempool.space/api"
31 |
32 | def get_block_hash(self, block_height):
33 | """Fetches block hash by height."""
34 | response = requests.get(f"{self.base_url}/block-height/{block_height}")
35 | return response.text.strip()
36 |
37 | def get_block(self, block_hash):
38 | """Fetches block data by hash and returns it as a dictionary."""
39 | response = requests.get(f"{self.base_url}/block/{block_hash}")
40 | return self.parse_result(response)
41 |
42 | def get_last_height(self):
43 | """Fetches the height of the latest block."""
44 | response = requests.get(f"{self.base_url}/blocks/tip/height")
45 | return int(response.text.strip())
46 |
47 | def last_hash(self):
48 | """Fetches the hash of the latest block."""
49 | response = requests.get(f"{self.base_url}/blocks/tip/hash")
50 | return response.text.strip()
51 |
52 | def get_last_n_blocks(self, n=10):
53 | """Fetches the last n blocks."""
54 | latest_height = self.get_last_height()
55 | start_height = max(0, latest_height - n + 1)
56 | blocks = []
57 | for height in range(start_height, latest_height + 1):
58 | block_hash = self.get_block_hash(height)
59 | print(block_hash)
60 | time.sleep(1)
61 | block = self.get_block(block_hash)
62 | if block:
63 | blocks.append(block)
64 | return blocks
65 |
66 | def get_raw_coinbase_transaction(self, block_hash: str = ""):
67 | """Fetches the raw coinbase transaction from a given block hash."""
68 | coinbase_transaction_hash = requests.get(
69 | f"{self.base_url}/block/{block_hash}/txid/0"
70 | ).text
71 | coinbase_transaction_raw = requests.get(
72 | f"{self.base_url}/tx/{coinbase_transaction_hash}/raw"
73 | ).text
74 | return coinbase_transaction_raw
75 |
76 | def get_block_by_hash(self, block_hash):
77 | """Fetches block data by hash."""
78 | response = requests.get(f"{self.base_url}/block/{block_hash}")
79 | return self.parse_result(response)
80 |
81 | def get_lightning_stats(self):
82 | """Returns network-wide stats such as total number of channels and nodes, total capacity, and average/median fee figures."""
83 | url = f"{self.base_url}/v1/lightning/statistics/latest"
84 | response = requests.get(url)
85 | result = self.parse_result(response)
86 | if not result:
87 | return None
88 |
89 | stats = result.get("latest", None)
90 | if not stats:
91 | return None
92 |
93 | try:
94 | return LightningStats(**stats)
95 | except Exception as e:
96 | print(e)
97 | return stats
98 |
99 | def parse_result(self, response):
100 | """Parses HTTP response into JSON if status code is 200."""
101 | if response.status_code == 200:
102 | return response.json()
103 | else:
104 | return None
105 |
106 |
107 | if __name__ == "__main__":
108 | mempool = MempoolProvider()
109 |
110 | # Example usage of the new methods
111 | block_height = 0
112 | print("Block at height", block_height, ":", mempool.get_block(block_height))
113 | print("Latest block height:", mempool.get_last_height())
114 | print("Latest block hash:", mempool.last_hash())
115 | print("Last 10 blocks:", mempool.get_last_n_blocks(10))
116 |
117 | block_hash = mempool.last_hash()
118 | print("Raw coinbase transaction from block hash", block_hash, ":", mempool.get_raw_coinbase_transaction(block_hash))
119 |
120 | stats = mempool.get_lightning_stats()
121 | print("Lightning network stats:", stats)
122 |
--------------------------------------------------------------------------------
/bitdata/provider/quiknode.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 |
4 | class QuickNode:
5 | def __init__(self, api_url="https://docs-demo.btc.quiknode.pro/"):
6 | self.api_url = api_url
7 |
8 | def decode_raw_transaction(self, raw_transaction_hex):
9 | headers = {"Content-Type": "application/json"}
10 |
11 | # JSON data to be sent in the POST request
12 | data = {"method": "decoderawtransaction", "params": [raw_transaction_hex]}
13 |
14 | try:
15 | response = requests.post(api_url, json=data, headers=headers)
16 | if response.status_code == 200:
17 | decoded_transaction = response.json()
18 | return decoded_transaction
19 | else:
20 | print(f"Error: {response.status_code} - {response.text}")
21 | return None
22 | except Exception as e:
23 | print("Error:", e)
24 | return None
25 |
--------------------------------------------------------------------------------
/dashboard/On-chain.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 | import matplotlib.pyplot as plt
4 | import pandas as pd
5 | import plotly.express as px
6 | import requests
7 | import streamlit as st
8 | from PIL import Image
9 |
10 | # ------------- Title of the page -------------
11 | st.set_page_config(
12 | page_title="Bitcoin Blockchain live analysis", page_icon="₿", layout="wide"
13 | )
14 | # Title and bitcoin logos. a lot of them.
15 | st.title("Analisi in diretta di Bitcoin - BitPolito")
16 | bitcoin_logo = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/46/Bitcoin.svg/1200px-Bitcoin.svg.png"
17 | bitpolito_logo = Image.open("dashboard/bitpolito_logo.png")
18 | col = st.columns(12)
19 | logos = [bitcoin_logo, bitpolito_logo] * 6
20 | for i in range(12):
21 | col[i].image(logos[i], width=50)
22 |
23 | TODO = """ TODO: personalize this
24 | # Configure CSS styles
25 | st.markdown('''
26 | ''', unsafe_allow_html=True)"""
61 |
62 | # ------------- Bitcoin Nodes -------------
63 | # create two columns
64 | col1, col2 = st.columns(2)
65 | # ----- on the first column put a map of the world with all the bitcoin nodes
66 | map_data = requests.get(
67 | "https://bitnodes.io/api/v1/snapshots/latest/?field=coordinates"
68 | )
69 | col1.header("Nodi Bitcoin nel mondo")
70 | map_data = pd.DataFrame(map_data.json()["coordinates"], columns=["lat", "lon"])
71 | col1.map(map_data, zoom=1, use_container_width=True)
72 | st.write("Fonte: https://bitnodes.io/")
73 |
74 | # ----- on the second column put some statistics about the nodes
75 | col2.header("Statistiche sui nodi")
76 | nodes_data = requests.get("https://bitnodes.io/api/v1/snapshots/latest/")
77 | nodes_data = nodes_data.json()
78 | # numbr of nodes
79 | col2.write(f"Nodi totali: **{nodes_data['total_nodes']}**")
80 | # top cities
81 | cities = {}
82 | for node in nodes_data["nodes"].values():
83 | if node[-3] not in cities:
84 | cities[node[-3]] = 1
85 | else:
86 | cities[node[-3]] += 1
87 | # sort cities by number of nodes
88 | cities = {
89 | k: v for k, v in sorted(cities.items(), key=lambda item: item[1], reverse=True)
90 | }
91 | del cities[None]
92 | # display top 10 cities in a bullet list
93 | col2.write("Top 10 città per numero di nodi:")
94 | for i, info in enumerate(list(cities)[:10]):
95 | city = info.split("/")[1].replace("_", " ")
96 | continent = info.split("/")[0]
97 | col2.write(f"{i+1}) {city} ({continent}): **{cities[info]} nodi**")
98 |
99 |
100 | # ------------- Date sidebar (for network data) -------------
101 | st.header("Startistiche sulla rete Bitcoin")
102 | # Define date range dropdown options
103 | date_ranges = {
104 | "All": 365 * 20,
105 | "Last 7 Days": 7,
106 | "Last 30 Days": 30,
107 | "Last 90 Days": 90,
108 | "Last Year": 365,
109 | "Last 5 Years": 365 * 5,
110 | }
111 | # Create a selectbox panel for date filters
112 | date_range = st.selectbox("Date Range", options=list(date_ranges.keys()))
113 | end_date = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
114 | start_date = end_date - timedelta(days=date_ranges[date_range])
115 |
116 | # ------------- Load network data -------------
117 | def get_blockchaincom_data(url, col):
118 | data = requests.get(url).json()
119 | print(data.keys())
120 | df = pd.DataFrame(data["values"]).rename(columns={"x": "Date", "y": col})
121 | df["Date"] = pd.to_datetime(df["Date"], unit="s")
122 | df = df.sort_values(by="Date", ascending=False)
123 | return df
124 |
125 |
126 | def load_heavy_data():
127 | # Get historical BTC address data from Blockchain.com
128 | addr_url = (
129 | "https://api.blockchain.info/charts/n-unique-addresses?timespan=all&format=json"
130 | )
131 | addr_df = get_blockchaincom_data(addr_url, "Addresses")
132 |
133 | # Get historical BTC transaction data from Blockchain.com
134 | tx_url = (
135 | "https://api.blockchain.info/charts/n-transactions?timespan=all&format=json"
136 | )
137 | tx_df = get_blockchaincom_data(tx_url, "Transactions")
138 |
139 | # Get historical BTC hash rate data from Blockchain.com
140 | hs_url = "https://api.blockchain.info/charts/hash-rate?timespan=all&format=json"
141 | hs_df = get_blockchaincom_data(hs_url, "Hash")
142 |
143 | # Get latest and second to last block data from Blockchain.com
144 | lastblock = requests.get("https://blockchain.info/latestblock").json()
145 | second_to_last_block = requests.get(
146 | f'https://blockchain.info/block-height/{lastblock["height"]-1}?format=json'
147 | ).json()
148 |
149 | return addr_df, tx_df, hs_df, lastblock, second_to_last_block
150 |
151 |
152 | addr_df, tx_df, hash_df, lastblock, second_to_last_block = load_heavy_data()
153 | addr_df = addr_df.loc[
154 | (addr_df["Date"] >= pd.Timestamp(start_date))
155 | & (addr_df["Date"] <= pd.Timestamp(end_date))
156 | ]
157 | tx_df = tx_df.loc[
158 | (tx_df["Date"] >= pd.Timestamp(start_date))
159 | & (tx_df["Date"] <= pd.Timestamp(end_date))
160 | ]
161 | hash_df = hash_df.loc[
162 | (hash_df["Date"] >= pd.Timestamp(start_date))
163 | & (hash_df["Date"] <= pd.Timestamp(end_date))
164 | ]
165 |
166 |
167 | # ------------- Display network data in charts and metrics -------------
168 | col1, col2 = st.columns(2)
169 | # Create a line chart of hash rate
170 | with col1:
171 | chart_hash = px.line(
172 | hash_df,
173 | x="Date",
174 | y="Hash",
175 | title="Hash rate totale",
176 | color_discrete_sequence=["#071CD8"],
177 | )
178 | chart_hash.update_layout(yaxis_title="Hash rate Hash/s")
179 | st.plotly_chart(chart_hash, use_container_width=True)
180 | # Create some other values
181 | with col2:
182 | # metric for current hashrate
183 | current_hash = round(hash_df.iloc[0]["Hash"] / 10**9, 2)
184 | delta = round((hash_df.iloc[0]["Hash"] - hash_df.iloc[1]["Hash"]) / 10**9, 2)
185 | col2.metric(
186 | label="Hash rate attuale",
187 | value=f"{current_hash} TH/s",
188 | delta=f"{delta} TH/s rispetto a 3 giorni fa",
189 | )
190 | st.divider()
191 | # metric for current fees
192 | st.write("Commissioni (in sat/vB) per includere una transazione in ...")
193 | fees = requests.get("https://blockstream.info/api/fee-estimates").json()
194 | col2_1, col2_2, col2_3 = st.columns(3)
195 | col2_1.metric("1 blocco", f"{fees['1']:0.1f}")
196 | col2_2.metric("6 blocchi", f"{fees['6']:0.1f}")
197 | col2_3.metric("18 blocchi", f"{fees['18']:0.1f}")
198 | st.divider()
199 | # metric for lastest block time
200 | time_since_last_block = datetime.now() - datetime.fromtimestamp(lastblock["time"])
201 | last_block_minimg_time = datetime.fromtimestamp(
202 | lastblock["time"]
203 | ) - datetime.fromtimestamp(second_to_last_block["blocks"][0]["time"])
204 | m = "-" if last_block_minimg_time.seconds > 10 * 60 else ""
205 |
206 | col2.metric(
207 | "Ultimo blocco minato ",
208 | f"{time_since_last_block.seconds//60} minuti e {time_since_last_block.seconds%60} seccondi fa",
209 | f"{m}in {last_block_minimg_time.seconds//60} minuti e {last_block_minimg_time.seconds%60} secondi",
210 | )
211 | st.divider()
212 |
213 | # ------------- Display pools data in charts -------------
214 | pools = requests.get("https://api.blockchain.info/pools?timespan=7days").json()
215 | # sort json based on values
216 | pools = {k: v for k, v in sorted(pools.items(), key=lambda item: item[1], reverse=True)}
217 | # Extract the top 9 keys and values, and group all the others in a single key
218 | sizes = list(pools.values())[:9]
219 | labels = list(pools.keys())[:9]
220 | sizes.append(sum(list(pools.values())[9:]))
221 | labels.append("Others")
222 |
223 | explode = [0.2 if k == "Unknown" else 0 for k in labels]
224 | colors = [
225 | "#FFC300",
226 | "#0080FF",
227 | "#FF0000",
228 | "#00BFFF",
229 | "#FF4D4D",
230 | "#0052CC",
231 | "#800000",
232 | "#FF9500",
233 | "#FFEA00",
234 | "#4B0082",
235 | ]
236 | hatches = ["oo", "o", ".", "OO", "xx", "-", "..", "x", "O"]
237 |
238 | fig1, ax1 = plt.subplots(figsize=(2, 2))
239 | ax1.pie(
240 | sizes,
241 | autopct="%1.1f%%",
242 | pctdistance=1.25,
243 | explode=explode,
244 | colors=colors,
245 | hatch=hatches,
246 | textprops={"fontsize": 6},
247 | )
248 | ax1.legend(labels, loc="center left", bbox_to_anchor=(1.25, 0.5), fontsize=6)
249 | st.pyplot(fig1, use_container_width=False)
250 |
251 | # ------------- Display address and transaction data in graphs -------------
252 | col1, col2 = st.columns(2)
253 | # Create a line chart of daily addresses
254 | with col1:
255 | chart_txn = px.line(
256 | tx_df,
257 | x="Date",
258 | y="Transactions",
259 | title="Transazioni giornaliere",
260 | color_discrete_sequence=["#F7931A"],
261 | )
262 | chart_txn.update_layout(yaxis_title="Transactions")
263 | st.plotly_chart(chart_txn, use_container_width=True)
264 | # Create a line chart of daily transactions
265 | with col2:
266 | chart_addr = px.line(
267 | addr_df,
268 | x="Date",
269 | y="Addresses",
270 | title="Indirizzi attivi giornalieri",
271 | color_discrete_sequence=["#F7931A"],
272 | )
273 | chart_addr.update_layout(yaxis_title="Active Addresses")
274 | st.plotly_chart(chart_addr, use_container_width=True)
275 |
276 | st.write("Fonte: https://www.blockchain.info")
277 | st.write("Fonte: https://blockstream.info")
278 |
279 | preference = st.sidebar.radio("Cosa preferisci?", ("-seleziona-", "Bitcoin", "Fiat"))
280 |
281 | if preference == "Bitcoin":
282 | st.balloons()
283 | elif preference == "Fiat":
284 | st.snow()
285 |
--------------------------------------------------------------------------------
/dashboard/bitpolito_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BitPolito/bitcoin-data-analysis/404e705fe2bc8e6c900e75a3ac28bd27588b2bc4/dashboard/bitpolito_logo.png
--------------------------------------------------------------------------------
/dashboard/lightning.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BitPolito/bitcoin-data-analysis/404e705fe2bc8e6c900e75a3ac28bd27588b2bc4/dashboard/lightning.gif
--------------------------------------------------------------------------------
/dashboard/pages/Lightning_Network.py:
--------------------------------------------------------------------------------
1 | import base64
2 |
3 | import requests
4 | import streamlit as st
5 | from PIL import Image
6 |
7 | from bitdata.provider.mempool import MempoolProvider
8 |
9 | # ------------- Title of the page -------------
10 | st.set_page_config(
11 | page_title="Bitcoin Blockchain live analysis", page_icon="₿", layout="wide"
12 | )
13 | # Title and bitcoin logos. a lot of them.
14 | st.title("Analisi in diretta di Lightning Network - BitPolito")
15 | bitcoin_logo = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/46/Bitcoin.svg/1200px-Bitcoin.svg.png"
16 | bitpolito_logo = Image.open("dashboard/bitpolito_logo.png")
17 | col = st.columns(12)
18 | logos = [bitcoin_logo, bitpolito_logo] * 6
19 | for i in range(12):
20 | col[i].image(logos[i], width=50)
21 |
22 |
23 | # Lightning Network Stats
24 | ln_stats = MempoolProvider().get_lightning_stats()
25 | # ln_sats: id=37293 added='2023-06-02T00:00:00.000Z' channel_count=70378 node_count=15700 total_capacity=536810389159 tor_nodes=11095 clearnet_nodes=2167 unannounced_nodes=1000 avg_capacity=7627531 avg_fee_rate=547 avg_base_fee_mtokens=850 med_capacity=2000000 med_fee_rate=40 med_base_fee_mtokens=125 clearnet_tor_nodes=1438
26 |
27 | st.metric(label="Total number of nodes", value=ln_stats.node_count)
28 | st.metric(label="Total number of channels", value=ln_stats.channel_count)
29 | st.metric(label="Total capacity", value=ln_stats.total_capacity)
30 | st.metric(label="Tor nodes", value=ln_stats.tor_nodes)
31 | st.metric(label="Clearnet nodes", value=ln_stats.clearnet_nodes)
32 | st.metric(label="Unannounced nodes", value=ln_stats.unannounced_nodes)
33 | st.metric(label="Average capacity", value=ln_stats.avg_capacity)
34 | st.metric(label="Average fee rate", value=ln_stats.avg_fee_rate)
35 |
36 |
37 | st.header("Numero totale di nodi e canali")
38 | st.subheader("Numero totale di nodi")
39 | st.write("Il numero totale di nodi è pari a: ")
40 | st.write("Il numero totale di canali è pari a: ")
41 |
42 | # Lightning Graph
43 |
44 | st.header("Lightning Network Graph")
45 | # Add description
46 | st.expander(
47 | """
48 | Li
49 | """
50 | )
51 |
52 | # Add iframe graph
53 | width = 800
54 | height = 600
55 | lngraph = f''
56 |
57 | with st.expander("Lightning Network Graph"):
58 | st.markdown(lngraph, unsafe_allow_html=True)
59 |
--------------------------------------------------------------------------------
/grafana/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | exporter:
3 | build: exporter/.
4 | ports:
5 | - "9000:9000"
6 | networks:
7 | - umbrel_main_networ
8 | restart: on-failure
9 |
10 | prometheus:
11 | build: prometheus/.
12 | ports:
13 | - "9090:9090"
14 | networks:
15 | - umbrel_main_network
16 | restart: on-failure
17 |
18 | grafana:
19 | build: grafana/.
20 | ports:
21 | - "11000:11000"
22 | volumes:
23 | - grafana_data:/var/lib/grafana
24 | - ./grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards
25 | - ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources
26 | restart: on-failure
27 |
28 | networks:
29 | umbrel_main_network:
30 | name: umbrel_main_network
31 | external: true
32 |
33 | volumes:
34 | grafana_data:
--------------------------------------------------------------------------------
/grafana/exporter/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 |
3 | ADD client.py .
4 | ADD btc_conf.py .
5 |
6 | RUN pip install prometheus_client python-bitcoinlib requests
7 |
8 | CMD ["python", "./client.py"]
--------------------------------------------------------------------------------
/grafana/exporter/btc_conf.py:
--------------------------------------------------------------------------------
1 | import os
2 | RPC_SCHEME = "http"
3 | RPC_HOST = "localhost"
4 | RPC_PORT = "8332"
5 | RPC_USER = "Put_here_your_btc_server_username"
6 | RPC_PASSWORD = "put_here_your_btc_server_password"
7 | CONF_PATH = os.environ.get("BITCOIN_CONF_PATH")
8 |
9 | TIMEOUT = 30
--------------------------------------------------------------------------------
/grafana/exporter/client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | #TODO
4 | # Implement subprocess (Priority: High) --> not needed if used BITCOIN_CLI = 0, fast enough
5 | # Impelement signal handler (ex. ctlr+cv to exit process gratefully) (Priority: Low)
6 |
7 | from prometheus_client import start_http_server
8 | import prometheus_client as prom
9 | import time
10 | import json
11 |
12 | from bitcoin.rpc import Proxy
13 | from typing import Union
14 | from typing import Dict
15 | from typing import Any
16 | from typing import List
17 | from btc_conf import *
18 |
19 |
20 | import socket
21 | import requests
22 |
23 | import os
24 |
25 | PORT = 9000
26 | DEBUG = 0
27 | BITCOIN_CLI = 0
28 |
29 |
30 | BITCOIN_INTERFACE = "~/umbrel/scripts/app compose bitcoin exec bitcoind bitcoin-cli"
31 |
32 | # Utilities
33 | NUMBER_OF_REQUESTS = prom.Counter('Requests', 'Number of requesdts made to BTC container')
34 | WARNINGS = prom.Counter("bitcoin_warnings", "Number of warning generated from btc node")
35 | # BTC server info
36 | SERVER_VERSION = prom.Gauge('bitcoin_server_info', 'Version of Bitcoin server')
37 | PROTOCOL_VERSION = prom.Gauge('bitcoin_protocol_version', 'Bitcoin protocol number')
38 | SIZE = prom.Gauge('size_on_disk', 'Blockchain size on disk')
39 | UP_TIME = prom.Gauge('up_time', 'Server uptime value')
40 | VERIFICATION_PROGRESS = prom.Gauge("verification_progress", "transaction's verification progress")
41 |
42 | #Metrics
43 | BLOCKS = prom.Gauge('bitcoin_blocks', 'Bitcoin blocks count')
44 | PEERS = prom.Gauge('node_peers', 'Number of peers connected to the btc node')
45 | DIFFICULTY = prom.Gauge('blockchain_stats_difficulty', 'Bitcoin blockchain current difficulty')
46 | CONNECTION_IN = prom.Gauge('connections_in', 'connections in to node')
47 | CONNECTION_OUT = prom.Gauge('connections_out', 'connections outgoing from node')
48 | BAN_CREATED = prom.Gauge("ban_created", "Time the ban was created", labelnames=["address", "reason"])
49 | BANNED_UNTIL = prom.Gauge("banned_until", "Time the ban expires", labelnames=["address", "reason"])
50 | TXCOUNT = prom.Gauge('tx_count', 'total tx')
51 | NUM_CHAINTIPS = prom.Gauge('num_chains', 'number of chains on the node') #different from each node, orphan chains are not sync so it depends on sync time
52 | MEMINFO_USED = prom.Gauge("meminfo_used", "Number of bytes used")
53 | MEMINFO_FREE = prom.Gauge("meminfo_free", "Number of bytes available")
54 | MEMINFO_TOTAL = prom.Gauge("meminfo_total", "Number of bytes managed")
55 | MEMINFO_LOCKED = prom.Gauge("meminfo_locked", "Number of bytes locked")
56 | MEMINFO_CHUNKS_USED = prom.Gauge("meminfo_chunks_used", "Number of allocated chunks")
57 | MEMINFO_CHUNKS_FREE = prom.Gauge("meminfo_chunks_free", "Number of unused chunks")
58 |
59 | MEMPOOL_BYTES = prom.Gauge("mempool_bytes", "Size of mempool in bytes")
60 | MEMPOOL_SIZE = prom.Gauge("mempool_size", "Number of unconfirmed transactions in mempool")
61 | MEMPOOL_USAGE = prom.Gauge("mempool_usage", "Total memory usage for the mempool")
62 | MEMPOOL_UNBROADCAST = prom.Gauge("mempool_unbroadcast", "Number of transactions waiting for acknowledgment")
63 |
64 | TOTAL_BYTES_RECV = prom.Gauge("total_bytes_recv", "Total bytes received")
65 | TOTAL_BYTES_SENT = prom.Gauge("total_bytes_sent", "Total bytes sent")
66 |
67 |
68 | def rpc_client_slave():
69 | use_conf = ((CONF_PATH is not None) or (RPC_USER is None) or (RPC_PASSWORD is None))
70 | if use_conf:
71 | return lambda: Proxy(btc_conf_file=CONF_PATH, timeout=TIMEOUT)
72 | else:
73 | host = RPC_HOST
74 | host = "{}:{}@{}".format(RPC_USER, RPC_PASSWORD, host)
75 | if RPC_PORT:
76 | host = "{}:{}".format(host, RPC_PORT)
77 | service_url = "{}://{}".format(RPC_SCHEME, host)
78 | return lambda: Proxy(service_url=service_url, timeout=TIMEOUT)
79 |
80 |
81 | def rpc_client_master():
82 | return rpc_client_slave()()
83 |
84 |
85 | def bitcoinrpc(*args):
86 | result = rpc_client_master().call(*args)
87 | return result
88 |
89 |
90 |
91 | def request(command):
92 | NUMBER_OF_REQUESTS.inc()
93 | if BITCOIN_CLI:
94 | request = BITCOIN_INTERFACE + " " + command
95 | run = os.popen(request)
96 | response = run.read()
97 | respons = json.loads(response)
98 | else:
99 | response = bitcoinrpc(command)
100 | return response
101 |
102 |
103 |
104 |
105 | def tryPort(port):
106 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
107 | result = False
108 | try:
109 | sock.bind((RPC_HOST, port))
110 | result = True
111 | except:
112 | print("Port is in use")
113 | sock.close()
114 | return result
115 |
116 |
117 | def wait_for_successful_connection(host,port):
118 | while True:
119 | try:
120 | r = requests.get("http://" + host + ":" + port)
121 | print("Connected")
122 | return
123 | except requests.exceptions.RequestException as e:
124 | pass
125 |
126 |
127 |
128 | if __name__ == '__main__':
129 | #time.sleep(240)
130 | start_http_server(PORT)
131 | if DEBUG:
132 | print("server started on port: " + str(PORT))
133 | print(tryPort(8332))
134 | wait_for_successful_connection(RPC_HOST, RPC_PORT)
135 |
136 |
137 | while True:
138 | #Main object
139 | uptime = request("uptime")
140 | if DEBUG:
141 | print("uptime: ", uptime)
142 | print("\n")
143 | meminfo = request("getmemoryinfo")
144 | meminfo = meminfo["locked"]
145 | if DEBUG:
146 | print("meminfo: ", meminfo)
147 | print("\n")
148 | blockchaininfo = request("getblockchaininfo")
149 | if DEBUG:
150 | print("blockchaininfo: ", blockchaininfo)
151 | print("\n")
152 | networkinfo = request ("getnetworkinfo")
153 | if DEBUG:
154 | print("networkinfo: ", networkinfo)
155 | print("\n")
156 | chaintips = request("getchaintips")
157 | if DEBUG:
158 | print("chaintips: ", chaintips)
159 | print("\n")
160 | mempool = request("getmempoolinfo")
161 | if DEBUG:
162 | print("mempool: ",mempool)
163 | print("\n")
164 | nettotals = request("getnettotals")
165 | if DEBUG:
166 | print("nettotals: ", nettotals)
167 | print("\n")
168 | txstats = request("getchaintxstats")
169 | if DEBUG:
170 | print("txstats: ", txstats)
171 | print("\n")
172 | banned = request("listbanned") #Should be empty now :)
173 | if DEBUG:
174 | print("Banned: ", banned)
175 | print("\n")
176 |
177 | #Extract sub-object
178 | UP_TIME.set(int(uptime)) #Bitcoin node uptime (from last reboot)
179 | BLOCKS.set(blockchaininfo["blocks"])
180 | PEERS.set(networkinfo["connections"])
181 | if "connections_in" in networkinfo:
182 | CONNECTION_IN.set(networkinfo['connections_in'])
183 | if "connections_out" in networkinfo:
184 | CONNECTION_OUT.set(networkinfo['connections_out'])
185 | DIFFICULTY.set(blockchaininfo['difficulty'])
186 |
187 | SERVER_VERSION.set(networkinfo['version'])
188 | PROTOCOL_VERSION.set(networkinfo['protocolversion'])
189 | SIZE.set(blockchaininfo['size_on_disk'])
190 | VERIFICATION_PROGRESS.set(blockchaininfo['verificationprogress'])
191 |
192 | for Addban in banned:
193 | BAN_CREATED.labels(address=Addban["address"], reason=Addban.get("ban_reason", "manually added")).set(Addban["ban_created"])
194 | BANNED_UNTIL.labels(address=Addban["address"], reason=Addban.get("ban_reason", "manually added")).set(Addban["banned_until"])
195 |
196 | if networkinfo["warnings"]:
197 | WARNINGS.inc()
198 |
199 | TXCOUNT.set(txstats["txcount"])
200 |
201 | NUM_CHAINTIPS.set(len(chaintips))
202 |
203 | MEMINFO_USED.set(meminfo["used"])
204 | MEMINFO_FREE.set(meminfo["free"])
205 | MEMINFO_TOTAL.set(meminfo["total"])
206 | MEMINFO_LOCKED.set(meminfo["locked"])
207 | MEMINFO_CHUNKS_USED.set(meminfo["chunks_used"])
208 | MEMINFO_CHUNKS_FREE.set(meminfo["chunks_free"])
209 |
210 | MEMPOOL_BYTES.set(mempool["bytes"])
211 | MEMPOOL_SIZE.set(mempool["size"])
212 | MEMPOOL_USAGE.set(mempool["usage"])
213 | if "unbroadcastcount" in mempool:
214 | MEMPOOL_UNBROADCAST.set(mempool["unbroadcastcount"])
215 |
216 | TOTAL_BYTES_RECV.set(nettotals["totalbytesrecv"])
217 | TOTAL_BYTES_SENT.set(nettotals["totalbytessent"])
218 |
219 | time.sleep(300)
--------------------------------------------------------------------------------
/grafana/grafana/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM grafana/grafana
2 |
3 | ADD config/grafana.ini /etc/grafana/
--------------------------------------------------------------------------------
/grafana/grafana/config/grafana.ini:
--------------------------------------------------------------------------------
1 | ##################### Grafana Configuration Defaults #####################
2 | #
3 | # Do not modify this file in grafana installs
4 | #
5 |
6 | # possible values : production, development
7 | app_mode = production
8 |
9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
10 | instance_name = ${HOSTNAME}
11 |
12 | # force migration will run migrations that might cause dataloss
13 | force_migration = false
14 |
15 | #################################### Paths ###############################
16 | [paths]
17 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
18 | data = data
19 |
20 | # Temporary files in `data` directory older than given duration will be removed
21 | temp_data_lifetime = 24h
22 |
23 | # Directory where grafana can store logs
24 | logs = data/log
25 |
26 | # Directory where grafana will automatically scan and look for plugins
27 | plugins = data/plugins
28 |
29 | # folder that contains provisioning config files that grafana will apply on startup and while running.
30 | provisioning = conf/provisioning
31 |
32 | #################################### Server ##############################
33 | [server]
34 | # Protocol (http, https, h2, socket)
35 | protocol = http
36 |
37 | # Minimum TLS version allowed. By default, this value is empty. Accepted values are: TLS1.2, TLS1.3. If nothing is set TLS1.2 would be taken
38 | min_tls_version = ""
39 |
40 | # The ip address to bind to, empty will bind to all interfaces
41 | http_addr =
42 |
43 | # The http port to use
44 | http_port = 11000
45 |
46 | # The public facing domain name used to access grafana from a browser
47 | domain = localhost
48 |
49 | # Redirect to correct domain if host header does not match domain
50 | # Prevents DNS rebinding attacks
51 | enforce_domain = false
52 |
53 | # The full public facing url
54 | root_url = %(protocol)s://%(domain)s:%(http_port)s/
55 |
56 | # Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
57 | serve_from_sub_path = false
58 |
59 | # Log web requests
60 | router_logging = false
61 |
62 | # the path relative working path
63 | static_root_path = public
64 |
65 | # enable gzip
66 | enable_gzip = false
67 |
68 | # https certs & key file
69 | cert_file =
70 | cert_key =
71 |
72 | # Unix socket gid
73 | # Changing the gid of a file without privileges requires that the target group is in the group of the process and that the process is the file owner
74 | # It is recommended to set the gid as http server user gid
75 | # Not set when the value is -1
76 | socket_gid = -1
77 |
78 | # Unix socket mode
79 | socket_mode = 0660
80 |
81 | # Unix socket path
82 | socket = /tmp/grafana.sock
83 |
84 | # CDN Url
85 | cdn_url =
86 |
87 | # Sets the maximum time in minutes before timing out read of an incoming request and closing idle connections.
88 | # `0` means there is no timeout for reading the request.
89 | read_timeout = 0
90 |
91 | # This setting enables you to specify additional headers that the server adds to HTTP(S) responses.
92 | [server.custom_response_headers]
93 | #exampleHeader1 = exampleValue1
94 | #exampleHeader2 = exampleValue2
95 |
96 | #################################### GRPC Server #########################
97 | [grpc_server]
98 | network = "tcp"
99 | address = "127.0.0.1:10000"
100 | use_tls = false
101 | cert_file =
102 | key_file =
103 |
104 | #################################### Database ############################
105 | [database]
106 | # You can configure the database connection by specifying type, host, name, user and password
107 | # as separate properties or as on string using the url property.
108 |
109 | # Either "mysql", "postgres" or "sqlite3", it's your choice
110 | type = sqlite3
111 | host = 127.0.0.1:3306
112 | name = grafana
113 | user = root
114 | # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
115 | password =
116 | # Use either URL or the previous fields to configure the database
117 | # Example: mysql://user:secret@host:port/database
118 | url =
119 |
120 | # Max idle conn setting default is 2
121 | max_idle_conn = 2
122 |
123 | # Max conn setting default is 0 (mean not set)
124 | max_open_conn =
125 |
126 | # Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
127 | conn_max_lifetime = 14400
128 |
129 | # Set to true to log the sql calls and execution times.
130 | log_queries =
131 |
132 | # For "postgres", use either "disable", "require" or "verify-full"
133 | # For "mysql", use either "true", "false", or "skip-verify".
134 | ssl_mode = disable
135 |
136 | # Database drivers may support different transaction isolation levels.
137 | # Currently, only "mysql" driver supports isolation levels.
138 | # If the value is empty - driver's default isolation level is applied.
139 | # For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE".
140 | isolation_level =
141 |
142 | ca_cert_path =
143 | client_key_path =
144 | client_cert_path =
145 | server_cert_name =
146 |
147 | # For "sqlite3" only, path relative to data_path setting
148 | path = grafana.db
149 |
150 | # For "sqlite3" only. cache mode setting used for connecting to the database
151 | cache_mode = private
152 |
153 | # For "sqlite3" only. Enable/disable Write-Ahead Logging, https://sqlite.org/wal.html. Default is false.
154 | wal = false
155 |
156 | # For "mysql" only if migrationLocking feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0.
157 | locking_attempt_timeout_sec = 0
158 |
159 | # For "sqlite" only. How many times to retry query in case of database is locked failures. Default is 0 (disabled).
160 | query_retries = 0
161 |
162 | # For "sqlite" only. How many times to retry transaction in case of database is locked failures. Default is 5.
163 | transaction_retries = 5
164 |
165 | # Set to true to add metrics and tracing for database queries.
166 | instrument_queries = false
167 |
168 | #################################### Cache server #############################
169 | [remote_cache]
170 | # Either "redis", "memcached" or "database" default is "database"
171 | type = database
172 |
173 | # cache connectionstring options
174 | # database: will use Grafana primary database.
175 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
176 | # memcache: 127.0.0.1:11211
177 | connstr =
178 |
179 | # prefix prepended to all the keys in the remote cache
180 | prefix =
181 |
182 | # This enables encryption of values stored in the remote cache
183 | encryption =
184 |
185 | #################################### Data proxy ###########################
186 | [dataproxy]
187 |
188 | # This enables data proxy logging, default is false
189 | logging = false
190 |
191 | # How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds.
192 | # This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
193 | timeout = 30
194 |
195 | # How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds.
196 | dialTimeout = 10
197 |
198 | # How many seconds the data proxy waits before sending a keepalive request.
199 | keep_alive_seconds = 30
200 |
201 | # How many seconds the data proxy waits for a successful TLS Handshake before timing out.
202 | tls_handshake_timeout_seconds = 10
203 |
204 | # How many seconds the data proxy will wait for a server's first response headers after
205 | # fully writing the request headers if the request has an "Expect: 100-continue"
206 | # header. A value of 0 will result in the body being sent immediately, without
207 | # waiting for the server to approve.
208 | expect_continue_timeout_seconds = 1
209 |
210 | # Optionally limits the total number of connections per host, including connections in the dialing,
211 | # active, and idle states. On limit violation, dials will block.
212 | # A value of zero (0) means no limit.
213 | max_conns_per_host = 0
214 |
215 | # The maximum number of idle connections that Grafana will keep alive.
216 | max_idle_connections = 100
217 |
218 | # How many seconds the data proxy keeps an idle connection open before timing out.
219 | idle_conn_timeout_seconds = 90
220 |
221 | # If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request.
222 | send_user_header = false
223 |
224 | # Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests.
225 | response_limit = 0
226 |
227 | # Limits the number of rows that Grafana will process from SQL data sources.
228 | row_limit = 1000000
229 |
230 | # Sets a custom value for the `User-Agent` header for outgoing data proxy requests. If empty, the default value is `Grafana/` (for example `Grafana/9.0.0`).
231 | user_agent =
232 |
233 | #################################### Analytics ###########################
234 | [analytics]
235 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
236 | # No ip addresses are being tracked, only simple counters to track
237 | # running instances, dashboard and error counts. It is very helpful to us.
238 | # Change this option to false to disable reporting.
239 | reporting_enabled = true
240 |
241 | # The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs
242 | reporting_distributor = grafana-labs
243 |
244 | # Set to false to disable all checks to https://grafana.com
245 | # for new versions of grafana. The check is used
246 | # in some UI views to notify that a grafana update exists.
247 | # This option does not cause any auto updates, nor send any information
248 | # only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version.
249 | check_for_updates = true
250 |
251 | # Set to false to disable all checks to https://grafana.com
252 | # for new versions of plugins. The check is used
253 | # in some UI views to notify that a plugin update exists.
254 | # This option does not cause any auto updates, nor send any information
255 | # only a GET request to https://grafana.com to get the latest versions.
256 | check_for_plugin_updates = true
257 |
258 | # Google Analytics universal tracking code, only enabled if you specify an id here
259 | google_analytics_ua_id =
260 |
261 | # Google Analytics 4 tracking code, only enabled if you specify an id here
262 | google_analytics_4_id =
263 |
264 | # When Google Analytics 4 Enhanced event measurement is enabled, we will try to avoid sending duplicate events and let Google Analytics 4 detect navigation changes, etc.
265 | google_analytics_4_send_manual_page_views = false
266 |
267 | # Google Tag Manager ID, only enabled if you specify an id here
268 | google_tag_manager_id =
269 |
270 | # Rudderstack write key, enabled only if rudderstack_data_plane_url is also set
271 | rudderstack_write_key =
272 |
273 | # Rudderstack data plane url, enabled only if rudderstack_write_key is also set
274 | rudderstack_data_plane_url =
275 |
276 | # Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set
277 | rudderstack_sdk_url =
278 |
279 | # Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config
280 | rudderstack_config_url =
281 |
282 | # Intercom secret, optional, used to hash user_id before passing to Intercom via Rudderstack
283 | intercom_secret =
284 |
285 | # Application Insights connection string. Specify an URL string to enable this feature.
286 | application_insights_connection_string =
287 |
288 | # Optional. Specifies an Application Insights endpoint URL where the endpoint string is wrapped in backticks ``.
289 | application_insights_endpoint_url =
290 |
291 | # Controls if the UI contains any links to user feedback forms
292 | feedback_links_enabled = true
293 |
294 | #################################### Security ############################
295 | [security]
296 | # disable creation of admin user on first start of grafana
297 | disable_initial_admin_creation = false
298 |
299 | # default admin user, created on startup
300 | admin_user = admin
301 |
302 | # default admin password, can be changed before first start of grafana, or in profile settings
303 | admin_password = admin
304 |
305 | # default admin email, created on startup
306 | admin_email = admin@localhost
307 |
308 | # used for signing
309 | secret_key = SW2YcwTIb9zpOOhoPsMm
310 |
311 | # current key provider used for envelope encryption, default to static value specified by secret_key
312 | encryption_provider = secretKey.v1
313 |
314 | # list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1
315 | available_encryption_providers =
316 |
317 | # disable gravatar profile images
318 | disable_gravatar = false
319 |
320 | # data source proxy whitelist (ip_or_domain:port separated by spaces)
321 | data_source_proxy_whitelist =
322 |
323 | # disable protection against brute force login attempts
324 | disable_brute_force_login_protection = false
325 |
326 | # set to true if you host Grafana behind HTTPS. default is false.
327 | cookie_secure = false
328 |
329 | # set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
330 | cookie_samesite = lax
331 |
332 | # set to true if you want to allow browsers to render Grafana in a ,