├── backend
├── src
│ ├── __init__.py
│ ├── misc
│ │ ├── __init__.py
│ │ ├── airflow_utils.py
│ │ ├── jinja_helper.py
│ │ ├── agora.py
│ │ └── octant_lib
│ │ │ └── codegen
│ │ │ └── gql
│ │ │ └── README.md
│ ├── adapters
│ │ ├── __init__.py
│ │ ├── clients
│ │ │ ├── __init__.py
│ │ │ └── bigquery.py
│ │ ├── rpc_funcs
│ │ │ └── __init__.py
│ │ └── abstract_adapters.py
│ ├── queries
│ │ ├── __init__.py
│ │ └── postgres
│ │ │ ├── oli
│ │ │ ├── analytics_count_chain_tag_id.sql.j2
│ │ │ ├── analytics_count_by_attester.sql.j2
│ │ │ ├── analytics_all_attesters.sql.j2
│ │ │ ├── analytics_latest_by_attester.sql.j2
│ │ │ ├── analytics_totals.j2
│ │ │ └── extract_labels_for_review.sql.j2
│ │ │ ├── api
│ │ │ ├── quick_bites
│ │ │ │ ├── select_ethereum_blob_count_per_block.sql.j2
│ │ │ │ ├── hyperliquid_usdc_arb.sql.j2
│ │ │ │ ├── linea_profit_calculation.sql.j2
│ │ │ │ └── linea_burn.sql.j2
│ │ │ ├── select_fact_kpis_latest.sql.j2
│ │ │ ├── select_count_apps.sql.j2
│ │ │ ├── select_total_aa.sql.j2
│ │ │ ├── select_fact_kpis.sql.j2
│ │ │ ├── select_top_apps.sql.j2
│ │ │ ├── select_fact_kpis_multi_oks.sql.j2
│ │ │ ├── select_fact_kpis_achievements.sql.j2
│ │ │ ├── select_top_main_categories.sql.j2
│ │ │ ├── select_blockspace_main_categories.sql.j2
│ │ │ ├── select_tps_historical.sql.j2
│ │ │ ├── select_sum_metric_l2s.sql.j2
│ │ │ ├── select_fact_kpis_rolling.sql.j2
│ │ │ ├── select_sum_metric_l2s_rolling.sql.j2
│ │ │ ├── select_streak_today.sql.j2
│ │ │ ├── select_top_da_consumers_list.sql.j2
│ │ │ ├── select_tps_projected.sql.j2
│ │ │ ├── select_new_user_contracts.sql.j2
│ │ │ ├── select_rankings.sql.j2
│ │ │ ├── select_l2count_over_time.sql.j2
│ │ │ ├── select_top_da_consumers.sql.j2
│ │ │ └── select_highlights.sql.j2
│ │ │ ├── chain_metrics
│ │ │ ├── select_gas_per_second.sql.j2
│ │ │ ├── custom
│ │ │ │ ├── orbit_select_gas_per_second.sql.j2
│ │ │ │ └── mantle_select_gas_per_second.sql.j2
│ │ │ ├── select_fees_paid.sql.j2
│ │ │ ├── select_txcount_plain.sql.j2
│ │ │ ├── select_txcount_type4.sql.j2
│ │ │ ├── select_fees_paid_combined.sql.j2
│ │ │ ├── select_daa.sql.j2
│ │ │ ├── select_maa.sql.j2
│ │ │ ├── select_txcount.sql.j2
│ │ │ ├── select_waa.sql.j2
│ │ │ ├── select_qaa.sql.j2
│ │ │ ├── select_txcosts_median.sql.j2
│ │ │ ├── select_user_base_weekly.sql.j2
│ │ │ ├── upsert_fact_kpis_agg_ecosystem.sql.j2
│ │ │ ├── select_aa_lastXXd.sql.j2
│ │ │ ├── select_cca_last7d.sql.j2
│ │ │ ├── select_cca_weekly.sql.j2
│ │ │ ├── upsert_cca_weekly_multiple_l2s.sql.j2
│ │ │ ├── select_fees_paid_custom_gas.sql.j2
│ │ │ ├── select_total_stable_supply.sql.j2
│ │ │ └── select_txcosts_median_custom_gas.sql.j2
│ │ │ └── da_metrics
│ │ │ ├── celestia_da_blob_count.sql.j2
│ │ │ ├── celestia_da_data_posted_bytes.sql.j2
│ │ │ ├── celestia_da_fees_eth.sql.j2
│ │ │ ├── celestia_da_unique_blob_producers.sql.j2
│ │ │ ├── upsert_fact_kpis_celestia_chain_metrics.sql.j2
│ │ │ ├── upsert_fact_da_consumers_celestia_blob_count.sql.j2
│ │ │ └── upsert_fact_da_consumers_celestia_blob_size.sql.j2
│ ├── realtime
│ │ ├── __init__.py
│ │ └── sse_app_run
│ │ │ ├── requirements.txt
│ │ │ ├── Dockerfile
│ │ │ └── redis_keys.py
│ ├── api
│ │ └── og_resources
│ │ │ ├── gtp_logo.png
│ │ │ ├── og_backdrop.png
│ │ │ ├── quick-bites
│ │ │ ├── eth-supply.webp
│ │ │ ├── linea-burn.webp
│ │ │ ├── base-commerce.webp
│ │ │ ├── pectra-upgrade.webp
│ │ │ ├── ethereum-scaling.webp
│ │ │ ├── robinhood-stock.webp
│ │ │ ├── anniversary-report.webp
│ │ │ ├── arbitrum-timeboost.webp
│ │ │ └── arbitrum-hyperliquid-bridge.webp
│ │ │ └── icons
│ │ │ └── small
│ │ │ ├── x.svg
│ │ │ ├── gtp-arrowdown.svg
│ │ │ ├── gtp-arrowup.svg
│ │ │ ├── glo-dollar.svg
│ │ │ ├── gtp-ethereumlogo.svg
│ │ │ ├── gtp-checkmark-unchecked.svg
│ │ │ ├── gtp-chevrondown.svg
│ │ │ ├── gtp-chevronup.svg
│ │ │ ├── gtp-plus.svg
│ │ │ ├── gtp-table.svg
│ │ │ ├── farcaster.svg
│ │ │ ├── gtp-chevronright.svg
│ │ │ ├── gtp-chevronleft.svg
│ │ │ ├── gtp-calendar-clean.svg
│ │ │ ├── gtp-checkmark-single-select.svg
│ │ │ ├── gtp-tokeneth.svg
│ │ │ ├── gtp-close.svg
│ │ │ ├── gtp-layer2-maturity-early-phase.svg
│ │ │ ├── gtp-piechart.svg
│ │ │ ├── gtp-risk.svg
│ │ │ ├── gtp-compare.svg
│ │ │ ├── octant.svg
│ │ │ ├── gtp-metrics-activity.svg
│ │ │ ├── gtp-metrics-valuelocked.svg
│ │ │ ├── gtp-compass.svg
│ │ │ ├── gtp-blog.svg
│ │ │ ├── gtp-layer2-maturity-emerging.svg
│ │ │ ├── github.svg
│ │ │ ├── gtp-house.svg
│ │ │ ├── gtp-rank.svg
│ │ │ ├── gtp-email.svg
│ │ │ ├── gtp-metrics-economics.svg
│ │ │ ├── gtp-burger-menu.svg
│ │ │ ├── gtp-lock.svg
│ │ │ ├── gtp-filter.svg
│ │ │ ├── gtp-ethereum-weekly.svg
│ │ │ ├── gtp-download.svg
│ │ │ ├── gtp-blobs.svg
│ │ │ ├── gtp-night.svg
│ │ │ ├── giveth.svg
│ │ │ ├── gtp-wallet.svg
│ │ │ ├── gtp-info.svg
│ │ │ ├── gtp-metrics-total-value-locked.svg
│ │ │ ├── gtp-metrics-total-value-secured.svg
│ │ │ ├── gtp-metrics-totalvaluelocked.svg
│ │ │ ├── gtp-metrics-totalvaluesecured.svg
│ │ │ ├── gtp-notification.svg
│ │ │ ├── gtp-book-open.svg
│ │ │ └── gtp-layer2-maturity-developing.svg
│ └── oli
│ │ └── api
│ │ ├── requirements.txt
│ │ └── Dockerfile
├── tests
│ ├── __init__.py
│ ├── test_celo_handler.py
│ ├── test_main_config.py
│ ├── test_chain.py
│ ├── test_env_vars.py
│ ├── test_ethereum.py
│ ├── test_da_config.py
│ └── check_tx_types.py
├── airflow
│ └── dags
│ │ ├── utility
│ │ ├── utility_rpc_sync_check.py
│ │ ├── utility_dummy.py
│ │ ├── utility_healthcheck.py
│ │ ├── utility_cross_check.py
│ │ ├── utility_4byte.py
│ │ └── utility_db_backup.py
│ │ ├── api
│ │ ├── api_og_images.py
│ │ └── api_json_gen_sub_daily.py
│ │ ├── metrics
│ │ ├── metrics_defillama.py
│ │ ├── metrics_starknet_proof.py
│ │ ├── metrics_total_supply.py
│ │ ├── metrics_sql_blockspace.py
│ │ ├── metrics_coingecko.py
│ │ └── metrics_eigenDA.py
│ │ ├── oli
│ │ └── oli_mev_contract.py
│ │ ├── raw
│ │ ├── raw_imx.py
│ │ ├── raw_starknet.py
│ │ ├── raw_base.py
│ │ ├── raw_ink.py
│ │ ├── raw_celo.py
│ │ ├── raw_lisk.py
│ │ ├── raw_mode.py
│ │ ├── raw_plume.py
│ │ ├── raw_zora.py
│ │ ├── raw_blast.py
│ │ ├── raw_gravity.py
│ │ ├── raw_linea.py
│ │ └── raw_manta.py
│ │ └── other
│ │ └── other_octant.py
└── .env.example
├── FUNDING.json
├── requirements_test.in
├── LICENSE
└── .gitignore
/backend/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/misc/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/adapters/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/queries/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/realtime/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/adapters/clients/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/adapters/rpc_funcs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/src/realtime/sse_app_run/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp>=3.8.0
2 | aiohttp-cors>=0.7.0
3 | redis>=4.5.0
4 | requests>=2.28.2
--------------------------------------------------------------------------------
/backend/src/api/og_resources/gtp_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/gtp_logo.png
--------------------------------------------------------------------------------
/backend/src/api/og_resources/og_backdrop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/og_backdrop.png
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/eth-supply.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/eth-supply.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/linea-burn.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/linea-burn.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/base-commerce.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/base-commerce.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/pectra-upgrade.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/pectra-upgrade.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/ethereum-scaling.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/ethereum-scaling.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/robinhood-stock.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/robinhood-stock.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/anniversary-report.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/anniversary-report.webp
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/arbitrum-timeboost.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/arbitrum-timeboost.webp
--------------------------------------------------------------------------------
/backend/src/oli/api/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.120.1
2 | uvicorn==0.38.0
3 | asyncpg==0.30.0
4 | pydantic==2.11.3
5 | eth-account
6 | eth-abi
7 | eth-utils
8 | python-dotenv
9 | oli-python==2.0.3
--------------------------------------------------------------------------------
/backend/src/api/og_resources/quick-bites/arbitrum-hyperliquid-bridge.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/growthepie/gtp-backend/HEAD/backend/src/api/og_resources/quick-bites/arbitrum-hyperliquid-bridge.webp
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/analytics_count_chain_tag_id.sql.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | chain_id,
3 | tag_id,
4 | COUNT(*) as row_count
5 | FROM public.labels
6 | GROUP BY chain_id, tag_id
7 | ORDER BY chain_id, tag_id;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/analytics_count_by_attester.sql.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | chain_id,
3 | tag_id,
4 | COUNT(*) as row_count
5 | FROM public.labels
6 | WHERE attester = decode('{{ attester }}', 'hex')
7 | GROUP BY chain_id, tag_id
8 | ORDER BY chain_id ASC, tag_id DESC;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/analytics_all_attesters.sql.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | encode(attester, 'hex') AS attester,
3 | MAX("time") AS last_time_created,
4 | MAX(revocation_time) AS last_time_revoked
5 | FROM public.attestations
6 | WHERE
7 | revocation_time >= '2000-01-01' or revocation_time is null
8 | GROUP BY attester
9 | ORDER BY last_time_created DESC;
--------------------------------------------------------------------------------
/FUNDING.json:
--------------------------------------------------------------------------------
1 | {
2 | "opRetro": {
3 | "projectId": "0xa38f3efb4fb8f6fcefb80f0262645ac05d5548cad0308ee49520c48c4e8cbd1f"
4 | },
5 | "drips": {
6 | "ethereum": {
7 | "ownedBy": "0x9438b8B447179740cD97869997a2FCc9b4AA63a2"
8 | },
9 | "metis": {
10 | "ownedBy": "0x9438b8B447179740cD97869997a2FCc9b4AA63a2"
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/backend/src/realtime/sse_app_run/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | # Set working directory
4 | WORKDIR /app
5 |
6 | # Install dependencies
7 | COPY requirements.txt .
8 | RUN pip install --no-cache-dir -r requirements.txt
9 |
10 | # Copy application code
11 | COPY server.py .
12 | COPY history_utils.py redis_keys.py /app/
13 |
14 | # Expose port
15 | EXPOSE 8080
16 |
17 | # Run the application
18 | CMD ["python", "server.py"]
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/quick_bites/select_ethereum_blob_count_per_block.sql.j2:
--------------------------------------------------------------------------------
1 | select
2 | date,
3 | value / (5*60*24) as blob_count,
4 | case
5 | when date >= '2026-01-07' then 14
6 | when date >= '2025-12-09' then 10
7 | when date >= '2025-05-07' then 6
8 | else 3
9 | end as blob_target
10 | from fact_kpis
11 | where metric_key = 'da_blob_count' and origin_key = 'da_ethereum_blobs'
12 | and date >= '2024-06-01'
13 | and date < current_date
14 | order by date desc
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_fact_kpis_latest.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the latest data from the fact_kpis tabe
3 |
4 | Parameters:
5 | - metric_key: The metric key to consider for data extraction.
6 | - orgin_key: The origin key to consider for data extraction.
7 | #}
8 |
9 | SELECT
10 | fk."date",
11 | fk.value
12 | FROM public.fact_kpis fk
13 | WHERE fk.metric_key = '{{ metric_key }}'
14 | AND fk.origin_key = '{{ origin_key }}'
15 | AND fk."date" = current_date - INTERVAL '1 days'
16 | ORDER BY fk."date" DESC
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_count_apps.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get count active apps on a chain
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - origin_keys: The origin_keys to filter the data.
7 | #}
8 |
9 | {% set days = days | default(30) %}
10 |
11 |
12 | SELECT count(distinct owner_project) as active_apps
13 | FROM public.vw_apps_contract_level_materialized
14 | where origin_key IN ( '{{ origin_keys | join("', '") }}' )
15 | and date > current_date - interval '{{days}} days'
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/x.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_gas_per_second.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to find the average gas used per second over the last 'days' days.
3 |
4 | Parameters:
5 | - origin_key: The name of the chain to identify the table.
6 | - days: default = 7; The time interval (in days) to consider for the data extraction.
7 | #}
8 |
9 | {% set days = days | default(7) %}
10 |
11 | SELECT
12 | block_date AS day,
13 | SUM(gas_used) / (24 * 60 * 60) AS value
14 | FROM {{ origin_key }}_tx
15 | WHERE block_date >= current_date - interval '{{ days }} days'
16 | AND block_date < current_date
17 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/custom/orbit_select_gas_per_second.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to find the average gas used per second over the last 'days' days.
3 |
4 | Parameters:
5 | - origin_key: The name of the chain to identify the table.
6 | - days: default = 7; The time interval (in days) to consider for the data extraction.
7 | #}
8 |
9 | {% set days = days | default(7) %}
10 |
11 | SELECT
12 | block_date AS day,
13 | SUM(gas_used - l1_gas_used) / (24*60*60) AS value
14 | FROM {{ origin_key }}_tx
15 | WHERE block_date >= current_date - interval '{{ days }} days'
16 | AND block_date < current_date
17 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/analytics_latest_by_attester.sql.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | '0x' || encode(uid, 'hex') as id,
3 | '0x' || encode(attester, 'hex') as attester,
4 | '0x' || encode(recipient, 'hex') as recipient,
5 | is_offchain,
6 | revoked,
7 | ipfs_hash,
8 | CASE
9 | WHEN tx_hash IS NULL OR tx_hash = '\x'::bytea OR length(tx_hash) = 0
10 | THEN NULL
11 | ELSE '0x' || encode(tx_hash, 'hex')
12 | END as tx_id,
13 | chain_id,
14 | tags_json,
15 | "time" AS time_created
16 | FROM public.attestations
17 | WHERE attester = decode('{{ attester }}', 'hex')
18 | ORDER BY time_created DESC
19 | LIMIT {{ take }};
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/analytics_totals.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | (SELECT COUNT(*) FROM public.labels) AS count_tags,
3 | (SELECT COUNT(*) FROM public.attestations) AS count_attestations,
4 | (SELECT COUNT(*) FROM public.attestations WHERE revoked = true) AS revoked_count_attestations,
5 | (SELECT COUNT(*) FROM public.labels WHERE is_offchain = true) AS offchain_count_tags,
6 | (SELECT COUNT(*) FROM public.attestations WHERE is_offchain = true) AS offchain_count_attestations,
7 | (SELECT COUNT(*) FROM public.labels WHERE is_offchain = false) AS onchain_count_tags,
8 | (SELECT COUNT(*) FROM public.attestations WHERE is_offchain = false) AS onchain_count_attestations;
--------------------------------------------------------------------------------
/backend/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # This file makes the tests directory a proper Python package
2 | import sys
3 | import os
4 |
5 | def setup_test_imports():
6 | """
7 | Set up the Python path for test imports.
8 | Call this function at the beginning of each test file.
9 | """
10 | # Get the absolute path to the backend directory (parent of tests)
11 | backend_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
12 | # Add it to the Python path
13 | if backend_dir not in sys.path:
14 | sys.path.insert(0, backend_dir)
15 |
16 | # For backward compatibility, call the function when importing __init__
17 | setup_test_imports()
18 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_fees_paid.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to sum up the transaction fees (tx_fee) that users paid to use the chain over the last 'days' days.
3 |
4 | metric_key = 'fees_paid_eth'
5 |
6 | Parameters:
7 | - origin_key: The name of the chain to identify the table.
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | block_date AS day,
15 | SUM(tx_fee) AS value
16 | FROM {{ origin_key }}_tx
17 | WHERE block_date >= current_date - interval '{{ days }} days'
18 | AND block_date < current_date
19 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/celestia_da_blob_count.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to count the number of blobs posted on Celestia over the last 'days' days.
3 |
4 | metric: da_blob_count
5 |
6 | Parameters:
7 | - days: default = 7; The time interval (in days) to consider for data extraction.
8 | #}
9 |
10 | {% set days = days | default(7) %}
11 |
12 | SELECT
13 | "date" AS day,
14 | SUM(value) AS value -- number blobs
15 | FROM public.fact_da_consumers
16 | WHERE
17 | "date" >= current_date - interval '{{ days }} days'
18 | AND "date" < current_date
19 | AND metric_key = 'blob_count'
20 | AND da_layer = 'da_celestia'
21 | GROUP BY 1
22 | ORDER BY 1 DESC;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/celestia_da_data_posted_bytes.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to sum all Celestia data posted bytes (blob sizes) over the last 'days' days.
3 |
4 | metric: da_data_posted_bytes
5 |
6 | Parameters:
7 | - days: default = 7; The time interval (in days) to consider for data extraction.
8 | #}
9 |
10 | {% set days = days | default(7) %}
11 |
12 | SELECT
13 | "date" AS day,
14 | SUM(value) AS value -- in bytes
15 | FROM public.fact_da_consumers
16 | WHERE
17 | "date" >= current_date - interval '{{ days }} days'
18 | AND "date" < current_date
19 | AND metric_key = 'blob_size_bytes'
20 | AND da_layer = 'da_celestia'
21 | GROUP BY 1
22 | ORDER BY 1 DESC;
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-arrowdown.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_total_aa.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the total number of active addresses from the fact_active_addresses_hll table.
3 |
4 | Parameters:
5 | - orgin_key: The origin key to consider for data extraction.
6 | - days (optional): Number of days to look back from the current date. If not provided, all historical data is considered.
7 | #}
8 |
9 | SELECT
10 | hll_cardinality(hll_union_agg(hll_addresses))::int AS value
11 | FROM fact_active_addresses_hll
12 | where origin_key = '{{ origin_key }}'
13 | {% if days is defined %}
14 | and date < current_date - interval '{{ days }} days'
15 | {% else %}
16 | and date < current_date
17 | {% endif %}
18 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-arrowup.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_fact_kpis.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the data from the fact_kpis tabe
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - metric_key: The metric key to consider for data extraction.
7 | - orgin_key: The origin key to consider for data extraction.
8 | #}
9 |
10 | {% set days = days | default(30) %}
11 |
12 | SELECT
13 | fk."date",
14 | fk.value
15 | FROM public.fact_kpis fk
16 | WHERE fk.metric_key = '{{ metric_key }}'
17 | AND fk.origin_key = '{{ origin_key }}'
18 | AND fk."date" >= current_date - INTERVAL '{{ days }} days'
19 | AND fk."date" < current_date
20 | ORDER BY fk."date" DESC
--------------------------------------------------------------------------------
/backend/tests/test_celo_handler.py:
--------------------------------------------------------------------------------
1 |
2 | from src.misc.celo_handler import (
3 | CeloWeb3Provider,
4 | print_fee_currencies_and_rates
5 | )
6 |
7 | print("\nTesting CeloWeb3Provider singleton...")
8 | try:
9 | web3 = CeloWeb3Provider.get_instance()
10 | print(f"Connected to Celo network: {web3.is_connected()}")
11 | except Exception as e:
12 | print(f"Error connecting to Celo network: {e}")
13 |
14 | print("\nTesting print_fee_currencies_and_rates...")
15 | try:
16 | print_fee_currencies_and_rates()
17 | except Exception as e:
18 | print(f"Error printing fee currencies: {e}")
19 | except ImportError as e:
20 | print(f"Import error: {e}")
21 | except Exception as e:
22 | print(f"Unexpected error: {e}")
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_top_apps.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the top apps by txcount
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - origin_keys: A list of origin keys to consider for data extraction.
7 | - limit: The maximum number of apps to return (default is 2).
8 | #}
9 |
10 | {% set days = days | default(30) %}
11 | {% set limit = limit | default(2) %}
12 |
13 | SELECT owner_project, sum(txcount) as txcount
14 | FROM public.vw_apps_contract_level_materialized
15 | where origin_key IN ( '{{ origin_keys | join("', '") }}' )
16 | and date > current_date - interval '{{days}} days'
17 | group by 1
18 | order by 2 desc
19 | limit {{ limit }}
20 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_fact_kpis_multi_oks.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the data from the fact_kpis tabe
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - metric_key: The metric key to consider for data extraction.
7 | - origin_keys: A list of origin keys to consider for data extraction.
8 | #}
9 |
10 | {% set days = days | default(30) %}
11 |
12 | SELECT
13 | fk."date",
14 | fk.value
15 | FROM public.fact_kpis fk
16 | WHERE fk.metric_key = '{{ metric_key }}'
17 | AND fk.origin_key IN ( '{{ origin_keys | join("', '") }}' )
18 | AND fk."date" >= current_date - INTERVAL '{{ days }} days'
19 | AND fk."date" < current_date
20 | ORDER BY fk."date" DESC
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/glo-dollar.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_txcount_plain.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the number of transactions for each day in the last 'days' days.
3 | This number is used to compare our db values against the other block explorers.
4 |
5 | metric_key = 'txcount_comparison', 'txcount'
6 |
7 | Parameters:
8 | - origin_key: The name of the chain to identify the table.
9 | - days: default = 7; The time interval (in days) to consider for the data extraction.
10 | #}
11 |
12 | {% set days = days | default(7) %}
13 |
14 | SELECT
15 | block_date AS day,
16 | COUNT(*) AS value
17 | FROM {{ origin_key }}_tx
18 | WHERE block_date >= current_date - interval '{{ days }} days'
19 | AND block_date < current_date
20 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_txcount_type4.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the number of ype 4 transactions for each day in the last 'days' days.
3 |
4 | metric_key = 'txcount_type4'
5 |
6 | Parameters:
7 | - origin_key: The name of the chain to identify the table.
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | date_trunc('day', block_timestamp) AS day,
15 | COUNT(*) AS value
16 | FROM {{ origin_key }}_tx
17 | WHERE
18 | block_date >= '2025-05-07'
19 | AND tx_type = '4'
20 | AND block_date >= current_date - interval '{{ days }} days'
21 | AND block_date < current_date
22 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_fees_paid_combined.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to sum the base and priority fees (tx_fee) that users paid to use the chain over the last 'days' days.
3 |
4 | metric_key = 'fees_paid_eth'
5 |
6 | Parameters:
7 | - origin_key: The name of the chain to identify the table.
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | date AS day,
15 | SUM(value) AS value
16 | FROM fact_kpis
17 | WHERE date >= current_date - interval '{{ days }} days'
18 | AND date < current_date
19 | AND metric_key in ('fees_paid_base_eth', 'fees_paid_priority_eth')
20 | AND origin_key = '{{ origin_key }}'
21 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_daa.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the count of unique active addresses per day over a specified time window.
3 |
4 | metric_key = 'daa'
5 |
6 | Parameters:
7 | - origin_key: The key of the chain to filter the data by.
8 | - days: default = 7; The length of the time window (in days) to look back from the current date.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | date_trunc('day', date) AS day,
15 | hll_cardinality(hll_union_agg(hll_addresses))::int AS value
16 | FROM fact_active_addresses_hll
17 | WHERE
18 | origin_key = '{{ origin_key }}'
19 | AND date < date_trunc('day', current_date)
20 | AND date >= date_trunc('day', current_date - interval '{{ days }} days')
21 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/celestia_da_fees_eth.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate the total Celestia DA fees (in ETH equivalent) over the last 'days' days.
3 | It uses the hourly Celestia price in ETH from `fact_kpis_granular` to convert fees into ETH.
4 |
5 | metric: da_fees_eth
6 |
7 | Parameters:
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | "date" AS day,
15 | SUM(value) AS value -- in eth
16 | FROM public.fact_da_consumers
17 | WHERE
18 | "date" >= current_date - interval '{{ days }} days'
19 | AND "date" < current_date
20 | AND metric_key = 'blob_fee_eth'
21 | AND da_layer = 'da_celestia'
22 | GROUP BY 1
23 | ORDER BY 1 DESC;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_fact_kpis_achievements.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to sum up data for certain origin_keys in the fact_kpis table
3 |
4 | Parameters:
5 | - metric_keys: List of metric keys to consider for data extraction.
6 | - origin_key: List of origin keys to consider for data extraction.
7 | - days (optional): Number of days to look back from the current date. If not provided, all historical data is considered.
8 | #}
9 |
10 | select
11 | metric_key,
12 | sum(value) as total_value
13 | from fact_kpis
14 | where metric_key in ( '{{ metric_keys | join("', '") }}' )
15 | and origin_key = '{{ origin_key }}'
16 | {% if days is defined %}
17 | and date < current_date - interval '{{ days }} days'
18 | {% else %}
19 | and date < current_date
20 | {% endif %}
21 | group by 1
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_maa.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the count of unique active addresses per month over a specified time window.
3 |
4 | metric_key = 'maa'
5 |
6 | Parameters:
7 | - origin_key: The key of the chain to filter the data by.
8 | - days: default = 35; The length of the time window (in days) to look back from the current date. Default includes at least 2 months.
9 | #}
10 |
11 | {% set days = days | default(35) %}
12 |
13 | SELECT
14 | date_trunc('month', date) AS day,
15 | hll_cardinality(hll_union_agg(hll_addresses))::int AS value
16 | FROM fact_active_addresses_hll
17 | WHERE
18 | origin_key = '{{ origin_key }}'
19 | AND date < date_trunc('day', current_date)
20 | AND date >= date_trunc('month', current_date - interval '{{ days }} days')
21 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/celestia_da_unique_blob_producers.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to count the number of unique blob producers (identified by namespaces) on Celestia
3 | over the last 'days' days.
4 |
5 | metric: da_unique_blob_producers
6 |
7 | Parameters:
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | SELECT
14 | date_trunc('day', block_timestamp) AS day,
15 | COUNT(DISTINCT namespaces) AS value
16 | FROM (
17 | SELECT
18 | block_timestamp,
19 | jsonb_array_elements(namespaces::jsonb)::text AS namespaces
20 | FROM celestia_tx
21 | WHERE block_timestamp >= current_date - interval '{{ days }} days'
22 | AND block_timestamp < current_date
23 | ) a
24 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_txcount.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the number of transactions for each day in the last 'days' days.
3 | This number is used to compare our db values against the other block explorers.
4 |
5 | metric_key = 'txcount_comparison', 'txcount'
6 |
7 | Parameters:
8 | - origin_key: The name of the chain to identify the table.
9 | - filter_col: The column to filter the data by.
10 | - days: default = 7; The time interval (in days) to consider for the data extraction.
11 | #}
12 |
13 | {% set days = days | default(7) %}
14 |
15 | SELECT
16 | block_date AS day,
17 | COUNT(*) AS value
18 | FROM {{ origin_key }}_tx
19 | WHERE {{ filter_col }} > 0
20 | AND block_date >= current_date - interval '{{ days }} days'
21 | AND block_date < current_date
22 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_waa.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the count of unique active addresses per month over a specified time window.
3 |
4 | metric_key = 'waa', 'user_base_weekly' (i.e. for Starknet)
5 |
6 | Parameters:
7 | - origin_key: The key of the chain to filter the data by.
8 | - days: default = 8; The length of the time window (in days) to look back from the current date. Default includes at least 2 months.
9 | #}
10 |
11 | {% set days = days | default(8) %}
12 |
13 | SELECT
14 | date_trunc('week', date) AS day,
15 | hll_cardinality(hll_union_agg(hll_addresses))::int AS value
16 | FROM fact_active_addresses_hll
17 | WHERE
18 | origin_key = '{{ origin_key }}'
19 | AND date < date_trunc('day', current_date)
20 | AND date >= date_trunc('week', current_date - interval '{{ days }} days')
21 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-ethereumlogo.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_top_main_categories.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the top main categories from the blockspace_fact_category_level table by txcount.
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - origin_key: The origin_key to filter the data.
7 | - limit: The maximum number of categories to return (default is 2).
8 | #}
9 |
10 | {% set days = days | default(30) %}
11 | {% set limit = limit | default(2) %}
12 |
13 | SELECT main_category_id
14 | FROM public.blockspace_fact_category_level
15 | left join oli_categories using (category_id)
16 | where origin_key = '{{origin_key}}'
17 | and date > current_date - interval '{{ days }} days'
18 | and category_id not in ('total_usage', 'unlabeled', 'native_transfer')
19 | group by 1
20 | order by sum(txcount) desc
21 | limit {{ limit }}
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_qaa.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the count of unique active addresses per month over a specified time window.
3 |
4 | metric_key = 'qaa', 'user_base_weekly' (i.e. for Starknet)
5 |
6 | Parameters:
7 | - origin_key: The key of the chain to filter the data by.
8 | - days: default = 100; The length of the time window (in days) to look back from the current date. Default includes at least 2 months.
9 | #}
10 |
11 | {% set days = days | default(100) %}
12 |
13 | SELECT
14 | date_trunc('quarter', date) AS day,
15 | hll_cardinality(hll_union_agg(hll_addresses))::int AS value
16 | FROM fact_active_addresses_hll
17 | WHERE
18 | origin_key = '{{ origin_key }}'
19 | AND date < date_trunc('day', current_date)
20 | AND date >= date_trunc('quarter', current_date - interval '{{ days }} days')
21 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_txcosts_median.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate the median transaction fee over the last 'days' days.
3 | If the origin_key is not 'starknet', we also filter out transactions with a gas_price of zero.
4 |
5 | metric: txcosts_median_eth
6 |
7 | Parameters:
8 | - origin_key: The name of the chain to identify the table.
9 | - days: default = 7; The time interval (in days) to consider for the data extraction.
10 | #}
11 |
12 | {% set days = days | default(7) %}
13 |
14 | SELECT
15 | block_date AS day,
16 | PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tx_fee) AS value
17 | FROM {{ origin_key }}_tx
18 | WHERE 1=1
19 | {% if origin_key != 'starknet' %}
20 | AND tx_fee > 0
21 | {% endif %}
22 | AND block_date >= current_date - interval '{{ days }} days'
23 | AND block_date < current_date
24 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_blockspace_main_categories.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the blockspace in txcount by main categories for a chain
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - origin_key: The origin_key to filter the data.
7 |
8 | Question: filter out cefi here? since it's for apps page
9 | #}
10 |
11 | {% set days = days | default(30) %}
12 |
13 | SELECT
14 | main_category_id,
15 | sum(txcount) as txcount,
16 | ROUND(
17 | SUM(txcount) * 100.0 / SUM(SUM(txcount)) OVER (),
18 | 2
19 | ) AS pct_share
20 | FROM blockspace_fact_category_level
21 | left join oli_categories using (category_id)
22 | where origin_key = '{{ origin_key }}'
23 | and date > current_date - interval '{{ days }} days'
24 | and category_id not in ('total_usage')
25 | group by 1
26 | order by 2 desc
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_tps_historical.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get historical TPS data from the fact_kpis table.
3 |
4 | Parameters:
5 | - origin_key: The origin key to consider for data extraction.
6 | #}
7 |
8 | SELECT
9 | date_trunc('month', date) AS month,
10 | AVG(value) FILTER (WHERE metric_key = 'gas_limit') AS gas_limit,
11 | AVG(value) FILTER (WHERE metric_key = 'block_time_seconds') AS block_time_seconds,
12 | (AVG(value) FILTER (WHERE metric_key = 'gas_limit')) / NULLIF(AVG(value) FILTER (WHERE metric_key = 'block_time_seconds'), 0) AS gas_per_second_limit,
13 | (AVG(value) FILTER (WHERE metric_key = 'gas_limit')) / NULLIF(AVG(value) FILTER (WHERE metric_key = 'block_time_seconds'), 0) / 100000 / 2 AS tps
14 | FROM public.fact_kpis
15 | WHERE metric_key IN ('gas_limit', 'block_time_seconds')
16 | AND origin_key = '{{ origin_key }}'
17 | GROUP BY 1
18 | ORDER BY 1;
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-checkmark-unchecked.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-chevrondown.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_sum_metric_l2s.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the total sum of the txcount metric from the fact_kpis table.
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - metric_key: The metric key to filter the data.
7 | #}
8 |
9 | {% set days = days | default(30) %}
10 |
11 | SELECT
12 | fk."date",
13 | sum(fk.value) as value
14 | FROM fact_kpis fk
15 | WHERE fk.metric_key = '{{ metric_key }}'
16 | AND fk.origin_key not in (
17 | 'all',
18 | 'glo-dollar',
19 | 'celestia',
20 | 'da_celestia',
21 | 'da_ethereum_blobs',
22 | 'da_ethereum_calldata',
23 | 'ethereum',
24 | 'ethereum_ecosystem'
25 | )
26 | AND fk."date" >= current_date - INTERVAL '{{ days }} days'
27 | AND fk."date" < current_date
28 | GROUP BY 1
29 | ORDER BY 1 DESC
--------------------------------------------------------------------------------
/backend/src/adapters/clients/bigquery.py:
--------------------------------------------------------------------------------
1 | from google.cloud import bigquery
2 | from google.oauth2 import service_account
3 | import pandas as pd
4 | import json
5 |
6 | class BigQuery():
7 | def __init__(self, credentials_json):
8 | # Set up the credentials
9 | credentials_info = json.loads(credentials_json)
10 | credentials = service_account.Credentials.from_service_account_info(credentials_info)
11 |
12 | # Create a BigQuery client
13 | self.client = bigquery.Client(credentials=credentials, project=credentials.project_id)
14 |
15 | def execute_bigquery(self, query):
16 | # Execute the query
17 | query_job = self.client.query(query)
18 | # Wait for the query to complete and get the results
19 | results = query_job.result()
20 | # Convert the results to a pandas DataFrame
21 | df = results.to_dataframe()
22 | return df
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-chevronup.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_rpc_sync_check.py:
--------------------------------------------------------------------------------
1 | from airflow.decorators import dag, task
2 | from datetime import datetime, timedelta
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 |
6 | @dag(
7 | default_args={
8 | 'owner': 'nader',
9 | 'retries': 2,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'email_on_failure': False,
12 | 'on_failure_callback': alert_via_webhook
13 | },
14 | dag_id='utility_rpc_sync_check',
15 | description='DAG to check if chain nodes are synchronized',
16 | tags=['utility', 'hourly',],
17 | start_date=datetime(2023, 12, 1),
18 | schedule='35 * * * *'
19 | )
20 |
21 | def blockchain_sync_dag():
22 | @task
23 | def sync_checker():
24 | from src.adapters.rpc_funcs.rpc_sync_checker import sync_check
25 | sync_check()
26 |
27 | sync_checker()
28 |
29 | sync_dag_instance = blockchain_sync_dag()
--------------------------------------------------------------------------------
/backend/.env.example:
--------------------------------------------------------------------------------
1 | # Environment Configuration
2 | # Copy this file to .env and fill in the actual values
3 | # Never commit the actual .env file to version control
4 |
5 | ## API keys & others
6 | # GitHub personal access token for API access
7 | GITHUB_TOKEN=your_github_token_here
8 |
9 | # CoinGecko API key for price data
10 | COINGECKO_API=your_coingecko_api_key
11 |
12 | # OpenAI API key for AI services
13 | OPENAI_API_KEY=your_openai_api_key
14 |
15 | ## Database (Postgres) connection - make sure user has write access
16 | DB_HOST=localhost
17 | DB_USERNAME=your_db_username
18 | DB_PASSWORD=your_db_password
19 | DB_DATABASE=db_name
20 | DB_PORT=port
21 |
22 | ## AWS Configuration for S3 and other services
23 | AWS_ACCESS_KEY_ID=your_aws_access_key
24 | AWS_SECRET_ACCESS_KEY=your_aws_secret_key
25 | S3_CF_BUCKET=your-cloudfront-bucket
26 | S3_LONG_TERM_BUCKET=your-longterm-bucket
27 | CF_DISTRIBUTION_ID=your_cloudfront_distribution_id
28 |
--------------------------------------------------------------------------------
/requirements_test.in:
--------------------------------------------------------------------------------
1 | # --- Airflow core + providers ---
2 | apache-airflow==3.1.3
3 | apache-airflow-providers-fab==1.5.3
4 | apache-airflow-providers-http==5.3.0
5 | apache-airflow-providers-imap==3.9.0
6 | apache-airflow-providers-postgres==6.2.0
7 |
8 | # --- Cloud / storage ---
9 | google-cloud-bigquery==3.25.0
10 | google-cloud-bigquery-storage==2.25.0
11 | gcsfs==2025.3.2
12 | boto3==1.26.118
13 | s3fs==0.4.2
14 |
15 | # --- Data stack ---
16 | web3==7.10.0
17 | oli-python==1.2.1
18 | dune-client==1.2.1
19 | pandas==2.0.0
20 | numpy<2
21 | polars==1.1.0
22 | pangres==4.1.4
23 | pyairtable==2.3.0.post1
24 |
25 | # --- API / utils ---
26 | openai==1.42.0
27 | orjson==3.10.7
28 | redis==5.2.1
29 | nextcord==2.6.0
30 | PyGithub==2.8.1
31 | sgqlc==16.4
32 | simplejson==3.19.1
33 | yfinance==0.2.65
34 | playwright==1.54.0
35 | selenium==4.18.1
36 |
37 | # --- extras ---
38 | pipdeptree==2.30.0
39 | wheel==0.45.1
40 | unicodecsv==0.14.1
41 | tinycss2==1.4.0
--------------------------------------------------------------------------------
/backend/src/misc/airflow_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from src.misc.helper_functions import send_discord_message
3 | from airflow.models import Variable
4 |
5 | def alert_via_webhook(context, user='mseidl'):
6 | dag_run = context.get('dag_run')
7 | task_instance = context.get('task_instance')
8 | exception = context.get('exception')
9 | webhook_url = Variable.get("DISCORD_ALERTS")
10 |
11 | if user == 'mseidl':
12 | user_id = '693484083895992393'
13 | elif user == 'lorenz':
14 | user_id = '790276642660548619'
15 | elif user == 'nader':
16 | user_id = '326358477335298050'
17 | elif user == 'mike':
18 | user_id = '253618927572221962'
19 | elif user == 'ahoura':
20 | user_id = '874921624720257037'
21 |
22 | message = f"<@{user_id}> -- A failure occurred in {dag_run.dag_id} on task {task_instance.task_id}. Might just be a transient issue -- Exception: {exception}"
23 | send_discord_message(message[:499], webhook_url)
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-plus.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_user_base_weekly.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Determining the unique number of users across layer 2s.
3 |
4 | metric_key = 'user_base_weekly'
5 |
6 | Parameters:
7 | - days: default = 9; The time interval (in days) to consider for data extraction.
8 | #}
9 |
10 | {% set days = days | default(9) %}
11 |
12 | WITH chain_info AS (
13 | SELECT
14 | DATE_TRUNC('week', date) AS day,
15 | address,
16 | CASE
17 | WHEN COUNT(DISTINCT origin_key) > 1 THEN 'multiple'
18 | ELSE MAX(origin_key)
19 | END AS origin_key
20 | FROM fact_active_addresses
21 | WHERE
22 | date < DATE_TRUNC('week', NOW())
23 | AND date >= DATE_TRUNC('week', NOW() - INTERVAL '{{ days }} days')
24 | AND origin_key NOT IN ('starknet', 'ethereum')
25 | GROUP BY 1, 2
26 | )
27 | SELECT
28 | day,
29 | origin_key,
30 | COUNT(DISTINCT address) AS val
31 | FROM chain_info
32 | GROUP BY 1, 2;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_fact_kpis_rolling.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the data from the fact_kpis table with a rolling average.
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - rolling: default = 7; The number of days for the rolling average.
7 | - metric_key: The metric key to consider for data extraction.
8 | - orgin_key: The origin key to consider for data extraction.
9 | #}
10 |
11 | {% set days = days | default(30) %}
12 | {% set rolling = rolling | default(7) %}
13 |
14 |
15 | SELECT
16 | fk."date",
17 | AVG(fk.value) OVER (
18 | ORDER BY fk."date"
19 | ROWS BETWEEN {{rolling}} PRECEDING AND CURRENT ROW
20 | ) AS value
21 | FROM public.fact_kpis fk
22 | WHERE fk.metric_key = '{{ metric_key }}'
23 | AND fk.origin_key = '{{ origin_key }}'
24 | AND fk."date" >= current_date - INTERVAL '{{ days }} days'
25 | AND fk."date" < current_date
26 | ORDER BY fk."date" DESC
--------------------------------------------------------------------------------
/backend/src/realtime/sse_app_run/redis_keys.py:
--------------------------------------------------------------------------------
1 | class RedisKeys:
2 | """Redis key helpers shared across realtime services."""
3 |
4 | # Global keys
5 | GLOBAL_TPS_ATH = "global:tps:ath"
6 | GLOBAL_TPS_24H = "global:tps:24h_high"
7 | TPS_HISTORY_24H = "global:tps:history_24h"
8 | ATH_HISTORY = "global:tps:ath_history"
9 |
10 | @staticmethod
11 | def chain_stream(chain_name: str) -> str:
12 | return f"chain:{chain_name}"
13 |
14 | @staticmethod
15 | def chain_tps_ath(chain_name: str) -> str:
16 | return f"chain:{chain_name}:tps:ath"
17 |
18 | @staticmethod
19 | def chain_tps_24h(chain_name: str) -> str:
20 | return f"chain:{chain_name}:tps:24h_high"
21 |
22 | @staticmethod
23 | def chain_tps_history_24h(chain_name: str) -> str:
24 | return f"chain:{chain_name}:tps:history_24h"
25 |
26 | @staticmethod
27 | def chain_ath_history(chain_name: str) -> str:
28 | return f"chain:{chain_name}:tps:ath_history"
29 |
30 |
31 | __all__ = ["RedisKeys"]
32 |
--------------------------------------------------------------------------------
/backend/airflow/dags/api/api_og_images.py:
--------------------------------------------------------------------------------
1 | import getpass
2 | sys_user = getpass.getuser()
3 |
4 | from datetime import datetime,timedelta
5 | from airflow.decorators import dag, task
6 | from src.misc.airflow_utils import alert_via_webhook
7 |
8 | @dag(
9 | default_args={
10 | 'owner' : 'mseidl',
11 | 'retries' : 2,
12 | 'email_on_failure': False,
13 | 'retry_delay' : timedelta(minutes=5),
14 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='mike')
15 | },
16 | dag_id='api_og_images',
17 | description='Create and store og images',
18 | tags=['api', 'daily'],
19 | start_date = datetime(2023,4,24),
20 | schedule='0 7 * * SUN'
21 | )
22 |
23 | def etl():
24 | @task()
25 | def run_og_images():
26 | import os
27 | from src.api.screenshots_to_s3 import run_template_generation
28 |
29 | run_template_generation(os.getenv("S3_CF_BUCKET"), os.getenv("CF_DISTRIBUTION_ID"), 'v1', sys_user)
30 |
31 | run_og_images()
32 | etl()
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_dummy.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 1,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(seconds=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='lorenz')
12 | },
13 | dag_id='utility_dummy',
14 | description='This is a dummy DAG that is supposed to fail.',
15 | tags=['utility'],
16 | start_date=datetime(2023,4,24),
17 | schedule='*/15 * * * *'
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_dummy_task():
23 | import getpass
24 | import os
25 |
26 | print("User:", getpass.getuser())
27 | print("UID:", os.getuid())
28 | print("CWD:", os.getcwd())
29 | raise Exception("This is a dummy task that is supposed to fail.")
30 |
31 | run_dummy_task()
32 | etl()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-table.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/oli/api/Dockerfile:
--------------------------------------------------------------------------------
1 | # 1. Build stage (optional but nice for speed); we’ll keep it single-stage for simplicity first.
2 | FROM python:3.13-slim
3 |
4 | # 2. Set workdir
5 | WORKDIR /app
6 |
7 | # 3. System deps (asyncpg needs libpq headers/runtime;
8 | # also build tools for any wheels that need compiling)
9 | RUN apt-get update && apt-get install -y --no-install-recommends \
10 | build-essential \
11 | libpq-dev \
12 | && rm -rf /var/lib/apt/lists/*
13 |
14 | # 4. Copy requirements first (better layer caching)
15 | COPY requirements.txt .
16 |
17 | # 5. Install deps
18 | RUN pip install --no-cache-dir -r requirements.txt
19 |
20 | # 6. Copy the actual app code
21 | # Assuming api.py is in this directory.
22 | # If your code lives in backend/src/oli/api.py, adjust COPY and WORKDIR accordingly.
23 | COPY . .
24 |
25 | # 7. Expose port (not strictly required, but nice for clarity)
26 | ENV PORT=8080
27 |
28 | # 8. Set the start command
29 | # We run uvicorn pointing at FastAPI app `app` inside module `api`
30 | CMD ["python", "api.py"]
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/farcaster.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-chevronright.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-chevronleft.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/misc/jinja_helper.py:
--------------------------------------------------------------------------------
1 |
2 | import getpass
3 | from sqlalchemy import text
4 | sys_user = getpass.getuser()
5 | from jinja2 import Environment, FileSystemLoader, StrictUndefined
6 | import pandas as pd
7 |
8 | def execute_jinja_query(db_connector, jinja_query_path, query_parameters, return_df=False):
9 | if sys_user == 'ubuntu':
10 | env = Environment(loader=FileSystemLoader(f'/home/{sys_user}/gtp/backend/src/queries/postgres'), undefined=StrictUndefined)
11 | else:
12 | env = Environment(loader=FileSystemLoader('src/queries/postgres'), undefined=StrictUndefined)
13 |
14 | template = env.get_template(jinja_query_path)
15 | rendered_sql = template.render(query_parameters)
16 | print(f"...executing jinja query: {jinja_query_path} with params: {query_parameters}")
17 | #print(rendered_sql)
18 | if return_df:
19 | df = pd.read_sql(text(rendered_sql), db_connector.engine)
20 | return df
21 | else:
22 | with db_connector.engine.begin() as connection:
23 | connection.execute(text(rendered_sql))
24 | return None
25 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/quick_bites/hyperliquid_usdc_arb.sql.j2:
--------------------------------------------------------------------------------
1 | WITH arbitrum_stables AS (
2 | SELECT
3 | "date",
4 | value AS stables_on_arb_with_hl
5 | FROM public.fact_kpis
6 | WHERE origin_key = 'arbitrum'
7 | AND metric_key = 'stables_mcap'
8 | AND "date" >= '2024-01-01'
9 | AND "date" < CURRENT_DATE
10 | ),
11 | hyperliquid_bridge AS (
12 | SELECT
13 | "date",
14 | value AS hyperliquid_bridge_usdc
15 | FROM public.fact_kpis
16 | WHERE metric_key = 'qb_hyperliquid_bridge_usdc'
17 | AND "date" >= '2024-01-01'
18 | AND "date" < CURRENT_DATE
19 | )
20 | SELECT
21 | COALESCE(a."date", h."date") AS date,
22 | ROUND(COALESCE(a.stables_on_arb_with_hl, 0)::NUMERIC, 2) AS stables_on_arb_with_hl,
23 | ROUND((COALESCE(a.stables_on_arb_with_hl, 0) - COALESCE(h.hyperliquid_bridge_usdc, 0))::NUMERIC, 2) AS stables_on_arb_without_hl,
24 | ROUND(COALESCE(h.hyperliquid_bridge_usdc, 0)::NUMERIC, 2) AS hyperliquid_usdc
25 | FROM arbitrum_stables a
26 | FULL OUTER JOIN hyperliquid_bridge h ON a."date" = h."date"
27 | ORDER BY date DESC;
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 growthepie
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-calendar-clean.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_sum_metric_l2s_rolling.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the total sum of the txcount metric from the fact_kpis table.
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - rolling: default = 7; The number of days for the rolling sum.
7 | - metric_key: The metric key to filter the data.
8 | #}
9 |
10 | {% set days = days | default(30) %}
11 | {% set rolling = rolling | default(7) %}
12 |
13 | SELECT
14 | fk."date",
15 | AVG(SUM(fk.value)) OVER (
16 | ORDER BY fk."date"
17 | ROWS BETWEEN {{rolling}} PRECEDING AND CURRENT ROW
18 | ) AS value
19 | FROM fact_kpis fk
20 | WHERE fk.metric_key = '{{ metric_key }}'
21 | AND fk.origin_key NOT IN (
22 | 'all',
23 | 'glo-dollar',
24 | 'celestia',
25 | 'da_celestia',
26 | 'da_ethereum_blobs',
27 | 'da_ethereum_calldata',
28 | 'ethereum',
29 | 'ethereum_ecosystem'
30 | )
31 | AND fk."date" >= current_date - INTERVAL '{{ days }} days'
32 | AND fk."date" < current_date
33 | GROUP BY fk."date"
34 | ORDER BY fk."date" DESC
--------------------------------------------------------------------------------
/backend/tests/test_main_config.py:
--------------------------------------------------------------------------------
1 | from tests import setup_test_imports
2 | # Set up imports
3 | setup_test_imports()
4 |
5 | from src.main_config import get_main_config
6 | from src.db_connector import DbConnector
7 |
8 | def main():
9 | print("Testing of MainConfig loading...")
10 |
11 | # Create DB connector
12 | db_connector = DbConnector()
13 |
14 | # Load from GitHub first to ensure we have a reference point
15 | print("1. Loading from GitHub source...")
16 | main_conf_github = get_main_config(db_connector=db_connector, source='github', api_version="dev")
17 | print(f" Loaded {len(main_conf_github)} chains")
18 |
19 | # Now load from S3 - this should silently fall back to GitHub
20 | print("\n2. Loading from S3 source...")
21 | main_conf_s3 = get_main_config(db_connector=db_connector, api_version="dev")
22 | print(f" Loaded {len(main_conf_s3)} chains")
23 |
24 | # Verify they're the same
25 | are_equal = main_conf_github == main_conf_s3
26 | print(f"\n3. Configs are equal: {are_equal}")
27 |
28 | print("\nTest completed successfully!")
29 |
30 | if __name__ == "__main__":
31 | main()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-checkmark-single-select.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_defillama.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='metrics_defillama',
14 | description='Load App fees',
15 | tags=['metrics', 'daily'],
16 | start_date=datetime(2023,4,24),
17 | schedule='10 03 * * *'
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_app_fees():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_defillama import AdapterDefillama
25 |
26 | adapter_params = {}
27 | load_params = {
28 | 'origin_keys' : None,
29 | }
30 |
31 | # initialize adapter
32 | db_connector = DbConnector()
33 | ad = AdapterDefillama(adapter_params, db_connector)
34 | # extract
35 | df = ad.extract(load_params)
36 | # load
37 | ad.load(df)
38 |
39 | run_app_fees()
40 | etl()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/upsert_fact_kpis_agg_ecosystem.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | This query aggregates key metrics across multiple chains to form an "ecosystem" view.
3 |
4 | Parameters:
5 | - days: int (optional) number of days to look back for aggregation
6 | #}
7 |
8 | INSERT INTO fact_kpis (metric_key, origin_key, date, value)
9 |
10 | select
11 | metric_key,
12 | 'ethereum_ecosystem' as origin_key,
13 | date,
14 | SUM(value) as value
15 | from fact_kpis
16 | where origin_key in (
17 | select origin_key
18 | from sys_main_conf
19 | where chain_type in ('L1', 'L2')
20 | and api_in_main
21 | and api_deployment_flag = 'PROD'
22 | )
23 | -- txcosts and daa missing
24 | and metric_key in (
25 | 'tvl', 'tvl_eth', 'txcount', 'stables_mcap', 'stables_mcap_eth', 'fees_paid_usd', 'fees_paid_eth',
26 | 'rent_paid_usd', 'rent_paid_eth', 'profit_usd', 'profit_eth', 'fdv_usd', 'fdv_eth',
27 | 'market_cap_usd', 'market_cap_eth', 'gas_per_second', 'app_fees_usd', 'app_fees_eth'
28 | )
29 | and date >= current_date - interval '{{ days | default(9999) }} days'
30 | group by 1,2,3
31 |
32 | ON CONFLICT (metric_key, origin_key, date) DO UPDATE SET
33 | value = EXCLUDED.value;
--------------------------------------------------------------------------------
/backend/tests/test_chain.py:
--------------------------------------------------------------------------------
1 | from src.adapters.adapter_raw_rpc import NodeAdapter
2 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
3 | from src.db_connector import DbConnector
4 |
5 | def run_celo():
6 | # Initialize DbConnector
7 | db_connector = DbConnector()
8 |
9 | chain_name = 'celo'
10 |
11 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
12 | print(f"CELO_CONFIG={active_rpc_configs}")
13 |
14 | adapter_params = {
15 | 'rpc': 'local_node',
16 | 'chain': chain_name,
17 | 'rpc_configs': active_rpc_configs,
18 | }
19 |
20 | # Initialize NodeAdapter
21 | adapter = NodeAdapter(adapter_params, db_connector)
22 |
23 | # Initial load parameters
24 | load_params = {
25 | 'block_start': 'auto',
26 | 'batch_size': batch_size,
27 | }
28 |
29 | try:
30 | adapter.extract_raw(load_params)
31 | except MaxWaitTimeExceededException as e:
32 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
33 | raise e
34 | finally:
35 | adapter.log_stats()
36 |
37 |
38 | if __name__ == "__main__":
39 | run_celo()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-tokeneth.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | .vscode
3 | **/__pycache__
4 | *.pyc
5 | my_env/
6 | myenv/
7 | myenv312/
8 | local_env/
9 | test_env/
10 | venv/
11 | .venv/
12 |
13 | *.ipynb
14 | *.html
15 | *.csv
16 | *.parquet
17 | *.pkl
18 | backend/output/
19 | backend/local/
20 | screenshots/
21 | backend/generated_images/
22 |
23 | # Airflow stuff
24 | airflow.cfg
25 | airflow/git_version
26 | airflow/logs/
27 | airflow/www/static/coverage/
28 | airflow/www/*.log
29 | backend/airflow/logs/
30 | airflow-webserver.pid
31 | standalone_admin_password.txt
32 | warnings.txt
33 |
34 |
35 | backend/adapter_starknet_test.py
36 | backend/adapter_gtp_test.py
37 | backend/adapter_celestia_test.py
38 | backend/adapter_rhino_test.py
39 | backend/starknet_backfill.py
40 | backend/94df57da-0ad4-43cd-9558-cf3e41547324.json
41 |
42 | .DS_Store
43 | backend/mode_rpc_config.json
44 | backend/growthepie-73a5bd385526.json
45 | backend/redstone_rpc_config.json
46 | backend/growthepie-17b4adf5ad94.json
47 | backend/fundamentals_full.json
48 | backend/src/misc/discord_config.json
49 | backend/main_conf.pkl
50 | backend/da_conf.pkl
51 |
52 | node_modules
53 | backend/add_new_date_indx.py
54 | backend/add_new_date.py
55 | backend/test_screenshots.py
56 |
57 | backend/src/adapters/*.txt
--------------------------------------------------------------------------------
/backend/airflow/dags/api/api_json_gen_sub_daily.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | api_version = "v1"
6 |
7 | @dag(
8 | default_args={
9 | 'owner' : 'mseidl',
10 | 'retries' : 2,
11 | 'email_on_failure': False,
12 | 'retry_delay' : timedelta(minutes=1),
13 | 'on_failure_callback': alert_via_webhook
14 | },
15 | dag_id='api_json_gen_sub_daily',
16 | description='DAG to create JSON files multiple times a day for our frontend.',
17 | tags=['api', 'daily'],
18 | start_date=datetime(2025,8,28),
19 | schedule='*/15 * * * *' ## run every 15min
20 | )
21 |
22 | def run():
23 | @task()
24 | def run_create_streaks_today_json():
25 | import os
26 | from src.api.json_gen import JsonGen
27 | from src.db_connector import DbConnector
28 |
29 | db_connector = DbConnector()
30 |
31 | json_gen = JsonGen(os.getenv("S3_CF_BUCKET"), os.getenv("CF_DISTRIBUTION_ID"), db_connector, api_version)
32 | json_gen.create_streaks_today_json()
33 |
34 |
35 | run_create_streaks_today_json()
36 |
37 | run()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/upsert_fact_kpis_celestia_chain_metrics.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Insert or update records into the 'fact_kpis' table. This query computes the sum of of DA metrics on chain level based on the economics mapping and the 'fact_da_consumers' table.
3 |
4 | Parameters:
5 | - days: default = 7; The time interval (in days) to consider for the data extraction. This will be dynamically injected into the query via Jinja2 templating.
6 | #}
7 |
8 | {% set days = days | default(7) %}
9 |
10 | INSERT INTO fact_kpis (metric_key, origin_key, date, value)
11 |
12 | SELECT
13 | REPLACE(REPLACE(REPLACE(metric_key , 'blob_size_bytes', 'celestia_blob_size_bytes'), 'blob_fee_eth', 'celestia_blobs_eth'), 'blob_count', 'celestia_blob_count') AS metric_key,
14 | eco.origin_key,
15 | "date",
16 | sum(value) as value
17 | FROM public.fact_da_consumers f
18 | inner join sys_economics_mapping eco using ("namespace")
19 | where f.da_layer = 'da_celestia'
20 | and date > current_date - interval '{{ days }} days' -- Dynamic time interval
21 | and date < current_date
22 | group by 1,2,3
23 |
24 | ON CONFLICT (metric_key, origin_key, date)
25 | DO UPDATE SET
26 | value = EXCLUDED.value;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_streak_today.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get todays's values for specific metrics from the {{ origin_key }}_tx table.
3 |
4 | Parameters:
5 | - origin_key: the specific origin key to filter the results.
6 | - custom_gas: bool (optional) e.g., true/false
7 | #}
8 |
9 |
10 |
11 |
12 | with price_eth as (
13 | select value
14 | FROM public.fact_kpis_granular
15 | where origin_key = 'ethereum' and metric_key = 'price_usd'
16 | order by "timestamp" desc
17 | limit 1
18 | ),
19 | custom_price AS (
20 | SELECT value as price_eth
21 | FROM public.fact_kpis
22 | WHERE origin_key = '{{ origin_key }}'
23 | AND metric_key = 'price_eth'
24 | ORDER BY "date" DESC
25 | LIMIT 1
26 | )
27 |
28 | select
29 | count(*) as txcount,
30 | CASE WHEN {{ custom_gas | default(false) }}
31 | THEN SUM(tx_fee) * (SELECT price_eth FROM custom_price)
32 | ELSE SUM(tx_fee)
33 | END fees_paid_eth,
34 |
35 | CASE WHEN {{ custom_gas | default(false) }}
36 | THEN SUM(tx_fee) * (SELECT price_eth FROM custom_price) * (SELECT value FROM price_eth)
37 | ELSE SUM(tx_fee) * (SELECT value FROM price_eth)
38 | END fees_paid_usd
39 | from {{ origin_key }}_tx
40 | where block_date = current_date
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_starknet_proof.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='metrics_starknet_proof',
14 | description='Load Starknets Proof Costs.',
15 | tags=['metrics', 'daily'],
16 | start_date=datetime(2024,4,21),
17 | schedule='30 03 * * *'
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_tvl():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_starknet_proof import AdapterStarknetProof
25 |
26 | adapter_params = {}
27 | load_params = {
28 | 'days' : 7,
29 | }
30 |
31 | # initialize adapter
32 | db_connector = DbConnector()
33 | ad = AdapterStarknetProof(adapter_params, db_connector)
34 | # extract
35 | df = ad.extract(load_params)
36 | # load
37 | ad.load(df)
38 |
39 | run_tvl()
40 | etl()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_aa_lastXXd.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to aggregate the number of active addresses for a specified chain ('origin_key') over a sliding window of 'timerange' days,
3 | and then select the last 'days' days from that rolling aggregate.
4 |
5 | metric_key = 'aa_last7d', 'aa_last30d'
6 |
7 | Parameters:
8 | - origin_key: The key of the chain to filter the data by.
9 | - days: default = 7; The number of recent days to include in the final output.
10 | - timerange: The length of the rolling window (in days) over which we aggregate addresses. Either 7 or 30.
11 | #}
12 |
13 | {% set days = days | default(7) %}
14 |
15 | WITH tmp AS (
16 | SELECT
17 | date AS day,
18 | #hll_union_agg(hll_addresses) OVER window_range AS value
19 | FROM fact_active_addresses_hll
20 | WHERE origin_key = '{{ origin_key }}'
21 | AND date > current_date - interval '{{ days }} days' - interval '{{ timerange }} days'
22 | AND date < current_date
23 | WINDOW window_range AS (ORDER BY date ASC ROWS {{ timerange }} - 1 PRECEDING)
24 | )
25 | SELECT
26 | day,
27 | value::int AS value
28 | FROM tmp
29 | WHERE day >= current_date - interval '{{ days }} days';
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_top_da_consumers_list.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the list of top DA consumers that have a name assigned (map to our mapping) and posted over 100MB of data during their lifetime
3 |
4 | Parameters:
5 | - da_layer: The DA layer to consider for data extraction.
6 | #}
7 |
8 | {% if da_layer == "da_ethereum_blobs" %}
9 | {% set metric_key = "ethereum_blob_size_bytes" %}
10 | {% elif da_layer == "da_celestia" %}
11 | {% set metric_key = "celestia_blob_size_bytes" %}
12 | {% elif da_layer == "da_eigenda" %}
13 | {% set metric_key = "eigenda_blob_size_bytes" %}
14 | {% endif %}
15 |
16 | with econ_names as (
17 | SELECT
18 | origin_key,
19 | max("name") as econ_name
20 | FROM sys_economics_mapping
21 | group by 1
22 | )
23 |
24 | select
25 | fk.origin_key as da_consumer_key,
26 | eco.econ_name as name,
27 | c.origin_key as gtp_origin_key,
28 | sum(value) as data_posted
29 | from fact_kpis fk
30 | left join econ_names eco using (origin_key)
31 | left join sys_main_conf c using (origin_key)
32 | where metric_key = '{{ metric_key }}'
33 | group by 1,2,3
34 | having sum(value) > 0.1 * 1024 * 1024 * 1024 --at least 100MB of data posted
35 | order by 4 desc
36 |
--------------------------------------------------------------------------------
/backend/airflow/dags/oli/oli_mev_contract.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 |
6 | @dag(
7 | default_args={
8 | 'owner': 'lorenz',
9 | 'retries': 2,
10 | 'email_on_failure': False,
11 | 'retry_delay': timedelta(minutes=5),
12 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='ahoura')
13 | },
14 | dag_id='oli_mev_contract_airtable',
15 | description='Update Airtable with MEV contract data',
16 | tags=['oli', 'daily', 'mev'],
17 | start_date=datetime(2025, 5, 26),
18 | schedule='00 09 * * *'
19 | )
20 | def mev_contract_etl():
21 |
22 | @task()
23 | def run_mev_script():
24 | from src.misc.oli_mev_contract_airtable import main_async, Config
25 | import argparse
26 | import asyncio
27 |
28 | # Create args object with default values
29 | class Args:
30 | batch_size = 50
31 | concurrency = 10
32 | max_records = None
33 |
34 | args = Args()
35 |
36 | # Run the async main function
37 | asyncio.run(main_async(args))
38 |
39 | run_mev_script()
40 |
41 | mev_contract_etl()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_tps_projected.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get projected TPS data based on a compound growth formula.
3 |
4 | Parameters:
5 | - start_day: The starting date for the projection
6 | - months_total: The total number of months to project
7 | - annual_factor: The annual growth factor
8 | - starting_tps: The starting TPS value
9 | #}
10 |
11 | WITH params AS (
12 | SELECT
13 | TIMESTAMP '{{ start_day }}' AS start_date,
14 | {{ months_total }} AS months_total,
15 | {{ starting_tps }}::double precision AS start_tps,
16 | {{ annual_factor }}::double precision AS annual_factor
17 | ),
18 | expanded AS (
19 | SELECT
20 | gs AS month_idx,
21 | (SELECT start_date FROM params) + (gs || ' months')::interval AS month_date
22 | FROM generate_series(0, (SELECT months_total FROM params) - 1) AS t(gs)
23 | )
24 | SELECT
25 | date_trunc('month', month_date) AS month,
26 | 'projection' AS data_type,
27 | (
28 | (SELECT start_tps FROM params)
29 | * power((SELECT annual_factor FROM params), month_idx / 12.0)
30 | ) ::double precision AS tps,
31 | 10000 as target_tps
32 | FROM expanded
33 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-close.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_cca_last7d.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | This query calculates the unique active addresses on a specified chain ('origin_key') over the last 7 days.
3 | The metric (cca_last7d_exclusive) is computed as (unique address on chain MINUS intersecting unique addresses).
4 |
5 | metric_key: cca_last7d_exclusive
6 |
7 | Parameters:
8 | - origin_key: The key identifying the target chain.
9 | - days_interval: default = 7; The time interval (in days) over which to consider active addresses.
10 | #}
11 |
12 | WITH step_1 AS (
13 | SELECT
14 | #hll_union_agg(hll_addresses) AS unioned,
15 | #hll_union_agg(CASE WHEN origin_key = '{{ origin_key }}' THEN hll_addresses END) AS chain_a,
16 | #hll_union_agg(CASE WHEN origin_key <> '{{ origin_key }}' THEN hll_addresses END) AS other_chains
17 | FROM fact_active_addresses_hll
18 | WHERE
19 | --origin_key NOT IN ('ethereum', 'starknet')
20 | "date" BETWEEN current_date - INTERVAL '7 days' AND current_date
21 | ORDER BY 1 DESC
22 | )
23 | SELECT
24 | (current_date - INTERVAL '1 days')::DATE AS day,
25 | 'cca_last7d_exclusive' AS metric_key,
26 | (chain_a - (chain_a + other_chains - unioned))::int AS value -- chain_a - intersecting
27 | FROM step_1;
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_imx.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='raw_imx',
14 | description='Load raw data on withdrawals, deposits, trades, orders_filled, transfers, mints.',
15 | tags=['raw', 'near-real-time'],
16 | start_date=datetime(2023,4,24),
17 | schedule='3/10 * * * *'
18 | )
19 |
20 | def etl():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_imx():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_raw_imx import AdapterRawImx
25 |
26 | adapter_params = {
27 | 'load_types' : ['withdrawals', 'deposits', 'transfers', 'mints'],
28 | 'forced_refresh' : 'no',
29 | }
30 | # initialize adapter
31 | db_connector = DbConnector()
32 | ad = AdapterRawImx(adapter_params, db_connector)
33 | # extract & load incremmentally
34 | ad.extract_raw()
35 |
36 | run_imx()
37 | etl()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_new_user_contracts.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get contracts that new users have interacted with
3 |
4 | Parameters:
5 | - days: default = 1; The time interval (in days) to consider for data extraction.
6 | - origin_key: The origin_key to filter the data.
7 | - limit: default = 30; The maximum number of results to return.
8 | #}
9 |
10 | with eth_price as (
11 | SELECT "date", value
12 | FROM fact_kpis
13 | WHERE metric_key = 'price_usd' and origin_key = 'ethereum'
14 | )
15 |
16 | SELECT
17 | to_address as address,
18 | l.contract_name,
19 | l.owner_project,
20 | l.usage_category as sub_category_key,
21 | o.main_category_id as main_category_key,
22 | count(*) as txcount,
23 | sum(tx_fee) as gas_fees_eth,
24 | sum(tx_fee * e.value) as gas_fees_usd
25 | FROM public.{{ origin_key }}_tx tx
26 | left join vw_oli_label_pool_gold_pivoted_v2 l on tx.to_address = l.address
27 | left join eth_price e on e.date = tx.block_date
28 | left join oli_categories o on l.usage_category = o.category_id
29 | where tx.block_date >= date_trunc('day',now()) - interval '{{ days }} days'
30 | and tx.block_date < current_date
31 | and tx.nonce = 0
32 | and l.origin_key = '{{ origin_key }}'
33 | group by 1,2,3,4,5
34 | order by 6 desc
35 | limit {{ limit }}
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_rankings.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get chain rankings from the fact_kpis table.
3 | We rank the chains based on their metrics on the last day.
4 |
5 | Parameters:
6 | - metric_key: the specific metric key to consider for data extraction.
7 | - comparison_oks: A list of origin keys to consider for data extraction.
8 | - origin_key: the specific origin key to filter the results.
9 | #}
10 |
11 |
12 | WITH y AS (
13 | SELECT
14 | metric_key,
15 | origin_key,
16 | value,
17 | RANK() OVER (
18 | PARTITION BY metric_key
19 | ORDER BY
20 | /* for tx_costs lower is better */
21 | CASE WHEN metric_key = 'txcosts_median_usd' THEN value END ASC NULLS LAST,
22 | /* for all other metrics higher is better */
23 | CASE WHEN metric_key <> 'txcosts_median_usd' THEN value END DESC NULLS LAST
24 | ) AS rank,
25 | COUNT(*) OVER (PARTITION BY metric_key) AS out_of
26 | FROM fact_kpis
27 | WHERE "date" = CURRENT_DATE - INTERVAL '1 day'
28 | AND metric_key = '{{ metric_key }}'
29 | AND origin_key IN ( '{{ comparison_oks | join("', '") }}' )
30 | )
31 | SELECT
32 | metric_key,
33 | origin_key,
34 | value,
35 | rank,
36 | out_of
37 | FROM y
38 | WHERE origin_key = '{{ origin_key }}'
39 | ORDER BY metric_key;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_cca_weekly.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | This query calculates the unique active addresses on a specified chain ('origin_key') in a given week.
3 | The metric (cca_last7d_exclusive) is computed as (unique address on chain MINUS intersecting unique addresses).
4 |
5 | metric_key: cca_weekly_exclusive
6 |
7 | Parameters:
8 | - origin_key: The key identifying the target chain.
9 | - days: default = 7; The time interval (in days) over which to consider active addresses.
10 | #}
11 |
12 | {% set days = days | default(8) %}
13 |
14 | WITH step_1 AS (
15 | SELECT
16 | DATE_TRUNC('week', date) AS "day",
17 | #hll_union_agg(hll_addresses) AS unioned,
18 | #hll_union_agg(CASE WHEN origin_key = '{{ origin_key }}' THEN hll_addresses END) AS chain_a,
19 | #hll_union_agg(CASE WHEN origin_key <> '{{ origin_key }}' THEN hll_addresses END) AS other_chains
20 | FROM fact_active_addresses_hll
21 | WHERE
22 | date < DATE_TRUNC('week', current_date)
23 | AND date >= DATE_TRUNC('week', current_date - INTERVAL '{{ days }} days')
24 | GROUP BY 1
25 | )
26 | SELECT
27 | "day",
28 | '{{ origin_key }}' AS origin_key,
29 | 'cca_weekly_exclusive' AS metric_key,
30 | (chain_a - (chain_a + other_chains - unioned))::int AS value -- chain_a - intersecting
31 | FROM step_1;
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_total_supply.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'lorenz',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=15),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='metrics_total_supply',
14 | description='Get KPI totalSupply for tokens of L2 chains',
15 | tags=['metrics', 'daily'],
16 | start_date=datetime(2024,2,20),
17 | schedule='10 02 * * *'
18 | )
19 |
20 | def etl():
21 | @task()
22 | def load_data():
23 | import os
24 | from src.db_connector import DbConnector
25 | from src.adapters.adapter_total_supply import AdapterTotalSupply
26 |
27 | adapter_params = {}
28 | load_params = {
29 | 'days' : 'auto', ## days as int our 'auto'
30 | 'origin_keys' : None, ## origin_keys as list or None
31 | }
32 |
33 | # initialize adapter
34 | db_connector = DbConnector()
35 | ad = AdapterTotalSupply(adapter_params, db_connector)
36 | # extract
37 | df = ad.extract(load_params)
38 | # load
39 | ad.load(df)
40 |
41 | load_data()
42 | etl()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-layer2-maturity-early-phase.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-piechart.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_l2count_over_time.sql.j2:
--------------------------------------------------------------------------------
1 | WITH date_series AS (
2 | SELECT generate_series(
3 | date '2020-03-01', -- start with a Monday
4 | current_date,
5 | interval '1 month'
6 | )::date AS date
7 | ),
8 | l2beat_origin_key AS (
9 | SELECT l2.date_100k, l2.name, l2.archived_on, mc.origin_key
10 | FROM sys_l2beat l2
11 | LEFT JOIN sys_main_conf mc ON l2.index = mc.aliases_l2beat
12 | )
13 | SELECT
14 | ds.date,
15 | COUNT(DISTINCT s.index) AS value,
16 | jsonb_agg(DISTINCT jsonb_build_object(
17 | 'l2beat_name', s2.name,
18 | 'origin_key', s2.origin_key
19 | )) FILTER (WHERE s2.name IS NOT NULL) AS l2s_launched,
20 | jsonb_agg(DISTINCT jsonb_build_object(
21 | 'l2beat_name', a2.name,
22 | 'origin_key', a2.origin_key
23 | )) FILTER (WHERE a2.name IS NOT NULL) AS l2s_archived
24 | FROM date_series ds
25 | LEFT JOIN sys_l2beat s
26 | ON s.date_100k <= ds.date + interval '31 days'
27 | AND (s.archived_on IS NULL OR s.archived_on > ds.date) -- active at ds.date
28 | LEFT JOIN l2beat_origin_key s2
29 | ON date_trunc('month', s2.date_100k)::date = ds.date -- launched this month
30 | LEFT JOIN l2beat_origin_key a2
31 | ON a2.archived_on IS NOT NULL
32 | AND date_trunc('month', a2.archived_on)::date = ds.date -- archived this month
33 | GROUP BY 1
34 | ORDER BY 1 ASC;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/upsert_cca_weekly_multiple_l2s.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | This query calculates + upserts the unique active addresses on multiple L2s that weren't active on Ethereum in a given week.
3 |
4 | metric_key: cca_weekly_multiple_l2s
5 |
6 | Parameters:
7 | - days: default = 8; The time interval (in days) over which to consider active addresses.
8 | #}
9 |
10 | {% set days = days | default(8) %}
11 |
12 | INSERT INTO fact_kpis (date, origin_key, metric_key, value)
13 |
14 | WITH chain_info AS (
15 | SELECT
16 | DATE_TRUNC('week', date) AS day,
17 | address,
18 | count(distinct origin_key) as counter,
19 | array_agg(distinct origin_key) as ok_array
20 | FROM fact_active_addresses
21 | WHERE
22 | date < DATE_TRUNC('week', NOW())
23 | AND date >= DATE_TRUNC('week', NOW() - INTERVAL '{{ days }} days')
24 | AND origin_key <> 'starknet'
25 | GROUP BY 1, 2
26 | )
27 |
28 | SELECT
29 | day as date,
30 | 'all' as origin_key,
31 | 'cca_weekly_multiple_l2s' AS metric_key,
32 | COUNT(*) AS value
33 | FROM chain_info
34 | WHERE counter > 1 AND NOT ('ethereum' = ANY(ok_array))
35 | GROUP BY 1
36 |
37 | ON CONFLICT (date, origin_key, metric_key)
38 | DO UPDATE SET
39 | value = EXCLUDED.value;
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_healthcheck.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 1,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(seconds=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='lorenz')
12 | },
13 | dag_id='utility_healthcheck',
14 | description='This DAG sends a heartbeat to healthchecks.io every 5 minutes',
15 | tags=['utility'],
16 | start_date=datetime(2023,4,24),
17 | schedule='*/5 * * * *'
18 | )
19 |
20 | def healthcheck():
21 | @task()
22 | def run_healthcheck_ping():
23 | try:
24 | import os
25 | import requests
26 | import dotenv
27 | dotenv.load_dotenv()
28 |
29 | # Healthchecks.io URL
30 | HEALTHCHECKS_URL = os.getenv("HEALTHCHECKS_URL")
31 | response = requests.get(HEALTHCHECKS_URL, timeout=5)
32 | response.raise_for_status() # Raises an error if the request fails
33 | print("✅ Heartbeat sent successfully!")
34 | except requests.RequestException as e:
35 | print(f"❌ Failed to send heartbeat: {e}")
36 |
37 | run_healthcheck_ping()
38 | healthcheck()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-risk.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_sql_blockspace.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 1,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='metrics_sql_blockspace',
14 | description='Run blockspace sql aggregations on database.',
15 | tags=['metrics', 'daily'],
16 | start_date=datetime(2023,4,24),
17 | schedule='30 01 * * *' ## after coingecko, before sql materialize
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_blockspace():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_sql import AdapterSQL
25 |
26 | db_connector = DbConnector()
27 |
28 | adapter_params = {
29 | }
30 |
31 | load_params = {
32 | 'load_type' : 'blockspace', ## usd_to_eth or metrics or blockspace
33 | 'days' : 'auto', ## days as or auto
34 | 'origin_keys' : None, ## origin_keys as list or None
35 | }
36 |
37 | # initialize adapter
38 | ad = AdapterSQL(adapter_params, db_connector)
39 |
40 | # extract
41 | ad.extract(load_params)
42 |
43 | run_blockspace()
44 | etl()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-compare.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/airflow/dags/other/other_octant.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 1,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=2),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='other_octant',
14 | description='Load data from Octant API for tracker.',
15 | tags=['other', 'near-real-time'],
16 | start_date=datetime(2024,7,22),
17 | schedule='*/05 * * * *'
18 | )
19 |
20 | def run_dag():
21 | @task()
22 | def run_octant_v2():
23 | import os
24 | import dotenv
25 | from src.db_connector import DbConnector
26 | from src.misc.octant_v2 import OctantV2
27 |
28 | dotenv.load_dotenv()
29 | api_version = "v1"
30 | db_connector = DbConnector(db_name="fun")
31 |
32 | octantv2 = OctantV2(os.getenv("S3_CF_BUCKET"), os.getenv("CF_DISTRIBUTION_ID"), db_connector, api_version)
33 | #octantv2.run_load_octant_data_for_all_epochs()
34 | print('### LOAD DATA FOR LATEST EPOCH ###')
35 | octantv2.run_load_epoch_data(9)
36 |
37 | print('### CREATE ALL OCTANT JSONS ###')
38 | octantv2.run_create_all_octant_jsons()
39 |
40 |
41 | run_octant_v2()
42 | run_dag()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/upsert_fact_da_consumers_celestia_blob_count.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Insert or update records into the 'fact_da_consumers' table. This query computes the sum of 'blob_count' from the 'celestia_tx' table.
3 | It counts the blobs submitted per namespace.
4 |
5 | Parameters:
6 | - days: default = 7; The time interval (in days) to consider for the data extraction. This will be dynamically injected into the query via Jinja2 templating.
7 | #}
8 |
9 | {% set days = days | default(7) %}
10 |
11 | INSERT INTO fact_da_consumers (date, da_layer, namespace, metric_key, value)
12 |
13 | SELECT
14 | date_trunc('day', block_timestamp) AS day,
15 | 'da_celestia' as da_layer,
16 | namespace,
17 | 'blob_count' as metric_key,
18 | COUNT(*) AS value -- number of blobs
19 | FROM (
20 | SELECT
21 | block_timestamp,
22 | jsonb_array_elements(blob_sizes::jsonb)::numeric AS blob_sizes,
23 | trim('"' FROM jsonb_array_elements(namespaces::jsonb)::text) AS namespace
24 | FROM celestia_tx
25 | WHERE
26 | block_timestamp > current_date - interval '{{ days }} days'
27 | AND block_timestamp < current_date
28 | AND blob_sizes IS NOT NULL
29 | )
30 | GROUP BY 1,2,3,4
31 |
32 | ON CONFLICT (date, da_layer, namespace, metric_key)
33 | DO UPDATE SET
34 | value = EXCLUDED.value;
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/quick_bites/linea_profit_calculation.sql.j2:
--------------------------------------------------------------------------------
1 | SELECT
2 | base.date,
3 | base.value AS gas_fee_income,
4 | op.value AS operating_costs,
5 | l1.value AS operating_costs_L1,
6 | op.value - l1.value AS operating_costs_infrastructure,
7 | base.value - op.value AS amount_for_burn,
8 | -- USD conversions
9 | base.value * price.value AS gas_fee_income_usd,
10 | op.value * price.value AS operating_costs_usd,
11 | l1.value * price.value AS operating_costs_L1_usd,
12 | (op.value - l1.value) * price.value AS operating_costs_infrastructure_usd,
13 | (base.value - op.value) * price.value AS amount_for_burn_usd
14 | FROM (
15 | SELECT "date", value
16 | FROM public.fact_kpis
17 | WHERE origin_key = 'linea'
18 | AND metric_key = 'profit_eth'
19 | AND "date" > '2025-09-10'
20 | ) base
21 | LEFT JOIN (
22 | SELECT "date", value
23 | FROM public.fact_kpis
24 | WHERE origin_key = 'linea'
25 | AND metric_key = 'qb_amountRequested_eth'
26 | ) op ON base.date = op.date
27 | LEFT JOIN (
28 | SELECT "date", value
29 | FROM public.fact_kpis
30 | WHERE origin_key = 'linea'
31 | AND metric_key = 'costs_total_eth'
32 | ) l1 ON base.date = l1.date
33 | LEFT JOIN (
34 | SELECT "date", value
35 | FROM public.fact_kpis
36 | WHERE metric_key = 'price_usd'
37 | AND origin_key = 'ethereum'
38 | ) price ON base.date = price.date
39 | ORDER BY base.date DESC
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/octant.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-activity.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-valuelocked.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/da_metrics/upsert_fact_da_consumers_celestia_blob_size.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Insert or update records into the 'fact_da_consumers' table. This query computes the sum of 'blob_sizes' from the 'celestia_tx' table.
3 | It extracts the blob sizes from the 'blob_sizes' json column and the 'namespace' from the 'namespaces' json column.
4 |
5 | Parameters:
6 | - days: default = 7; The time interval (in days) to consider for the data extraction. This will be dynamically injected into the query via Jinja2 templating.
7 | #}
8 |
9 | {% set days = days | default(7) %}
10 |
11 | INSERT INTO fact_da_consumers (date, da_layer, namespace, metric_key, value)
12 |
13 | SELECT
14 | date_trunc('day', block_timestamp) AS date,
15 | 'da_celestia' as da_layer,
16 | namespace,
17 | 'blob_size_bytes' as metric_key,
18 | sum(blob_sizes) AS value
19 | FROM (
20 | SELECT
21 | block_timestamp,
22 | jsonb_array_elements(blob_sizes::jsonb)::numeric AS blob_sizes,
23 | trim('"' FROM jsonb_array_elements(namespaces::jsonb)::text) AS namespace
24 | FROM celestia_tx
25 | WHERE
26 | block_timestamp > current_date - interval '{{ days }} days' -- Dynamic time interval
27 | AND block_timestamp < current_date
28 | AND "action" = 'celestia.blob.v1.MsgPayForBlobs'
29 | ) AS subquery
30 | GROUP BY 1,2,3,4
31 |
32 | ON CONFLICT (date, da_layer, namespace, metric_key)
33 | DO UPDATE SET
34 | value = EXCLUDED.value;
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_cross_check.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='utility_cross_check',
14 | description='Load txcount data from explorers and check against our database. Send discord message if there is a discrepancy.',
15 | tags=['utility', 'daily'],
16 | start_date=datetime(2023,12,9),
17 | schedule='30 06 * * *'
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_explorers():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_cross_check import AdapterCrossCheck
25 |
26 | adapter_params = {}
27 |
28 | load_params = {
29 | 'origin_keys' : None,
30 | }
31 |
32 | # initialize adapter
33 | db_connector = DbConnector()
34 | ad = AdapterCrossCheck(adapter_params, db_connector)
35 | # extract
36 | df = ad.extract(load_params)
37 | # load
38 | ad.load(df)
39 |
40 | ## cross-check and send Discord message
41 | ad.cross_check()
42 |
43 | ## cross-check Celestia
44 | ad.cross_check_celestia()
45 |
46 | run_explorers()
47 | etl()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-compass.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/tests/test_env_vars.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import json
4 | from dotenv import load_dotenv
5 |
6 | load_dotenv()
7 | # Add the parent directory to sys.path to import modules from src
8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
9 |
10 | def check_env_vars():
11 | print("\n--- Checking Environment Variables ---")
12 |
13 | # Check GCS_BUCKET_NAME
14 | bucket_name = os.getenv("GCS_BUCKET_NAME")
15 | print(f"GCS_BUCKET_NAME: {'✓ Set' if bucket_name else '✗ Not set'}")
16 | if bucket_name:
17 | print(f" Value: {bucket_name}")
18 |
19 | # Check GOOGLE_CREDENTIALS
20 | google_credentials = os.getenv("GOOGLE_CREDENTIALS")
21 | print(f"GOOGLE_CREDENTIALS: {'✓ Set' if google_credentials else '✗ Not set'}")
22 |
23 | if google_credentials:
24 | # Debug: Print the raw value to inspect it
25 | print(f" Raw value: {repr(google_credentials)}")
26 |
27 | try:
28 | # Attempt to parse the JSON string
29 | json_google_credentials = json.loads(google_credentials)
30 | print(f" Parsed JSON: {json_google_credentials}")
31 | except json.JSONDecodeError as e:
32 | print(f" Error parsing GOOGLE_CREDENTIALS: {e}")
33 | print(" Ensure the GOOGLE_CREDENTIALS environment variable contains valid JSON.")
34 | else:
35 | print(" GOOGLE_CREDENTIALS is not set.")
36 |
37 | if __name__ == "__main__":
38 | check_env_vars()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-blog.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-layer2-maturity-emerging.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/tests/test_ethereum.py:
--------------------------------------------------------------------------------
1 | from src.adapters.adapter_raw_rpc import NodeAdapter
2 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
3 | from src.db_connector import DbConnector
4 |
5 | def run_ethereum():
6 | """
7 | Test script for Ethereum data loading.
8 | Starts from block 22265969 and uses batch receipt functionality.
9 | """
10 | # Initialize DbConnector
11 | db_connector = DbConnector()
12 |
13 | chain_name = 'ethereum'
14 |
15 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
16 | print(f"ETH_CONFIG={active_rpc_configs}")
17 |
18 | # Adjust workers to 1 for testing to avoid overwhelming RPCs
19 | for config in active_rpc_configs:
20 | config['workers'] = 1
21 |
22 | adapter_params = {
23 | 'rpc': 'local_node',
24 | 'chain': chain_name,
25 | 'rpc_configs': active_rpc_configs,
26 | }
27 |
28 | # Initialize NodeAdapter
29 | adapter = NodeAdapter(adapter_params, db_connector)
30 |
31 | # Initial load parameters
32 | load_params = {
33 | 'block_start': 22265969,
34 | 'batch_size': 10,
35 | }
36 |
37 | try:
38 | adapter.extract_raw(load_params)
39 | except MaxWaitTimeExceededException as e:
40 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
41 | raise e
42 | finally:
43 | adapter.log_stats()
44 |
45 |
46 | if __name__ == "__main__":
47 | run_ethereum()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/github.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_fees_paid_custom_gas.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate the fees paid for chain with custom gas token.
3 |
4 |
5 | metric_key = 'fees_paid_eth'
6 |
7 | Parameters:
8 | - origin_key: The name of the chain to identify the custom gas token price feed.
9 | - days: default = 7; The time interval (in days) to consider for the data extraction.
10 | #}
11 |
12 | {% set days = days | default(7) %}
13 |
14 | WITH token_price AS (
15 | SELECT "date", value AS price_usd
16 | FROM fact_kpis
17 | WHERE origin_key = '{{ origin_key }}'
18 | AND metric_key = 'price_usd'
19 | AND "date" >= current_date - interval '{{ days }} days'
20 | AND "date" < current_date
21 | ),
22 | eth_price AS (
23 | SELECT "date", value AS price_usd
24 | FROM fact_kpis
25 | WHERE origin_key = 'ethereum'
26 | AND metric_key = 'price_usd'
27 | AND "date" >= current_date - interval '{{ days }} days'
28 | AND "date" < current_date
29 | ),
30 | tx_filtered AS (
31 | SELECT
32 | block_date AS day,
33 | SUM(tx_fee) AS total_tx_fee
34 | FROM {{ origin_key }}_tx
35 | WHERE block_date >= current_date - interval '{{ days }} days'
36 | AND block_date < current_date
37 | GROUP BY 1
38 | )
39 |
40 | SELECT
41 | tx.day,
42 | tx.total_tx_fee * e.price_usd / eth.price_usd AS value
43 | FROM tx_filtered tx
44 | LEFT JOIN token_price e ON tx.day = e."date"
45 | LEFT JOIN eth_price eth ON tx.day = eth."date";
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/custom/mantle_select_gas_per_second.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate the ETH-equivalent gas used per second on Mantle over the last 'days' days.
3 | This involves adjusting Mantle gas usage with a token ratio (Mantle to Ethereum),
4 | computed hourly from price data, and then normalized to a daily rate.
5 |
6 | Parameters:
7 | - days: default = 7; The time interval (in days) to consider for the data extraction.
8 | #}
9 |
10 | {% set days = days | default(7) %}
11 |
12 | WITH ratio AS (
13 | SELECT
14 | "timestamp",
15 | AVG(CASE WHEN origin_key = 'mantle' THEN value END) / AVG(CASE WHEN origin_key = 'ethereum' THEN value END) AS token_ratio
16 | FROM public.fact_kpis_granular
17 | WHERE
18 | metric_key = 'price_usd'
19 | AND granularity = 'hourly'
20 | AND "timestamp" > '2024-03-14'
21 | GROUP BY 1
22 | ),
23 | tmp AS (
24 | SELECT
25 | date_trunc('hour', block_timestamp) AS hour,
26 | SUM(gas_used * token_ratio) - SUM(l1_gas_used) AS l2_gas_used
27 | FROM mantle_tx
28 | LEFT JOIN ratio r ON r."timestamp" = date_trunc('hour', block_timestamp)
29 | WHERE
30 | block_timestamp > date_trunc('day', now()) - INTERVAL '{{ days }} days'
31 | AND block_timestamp < date_trunc('day', now())
32 | AND block_timestamp > '2024-03-14'
33 | GROUP BY 1
34 | )
35 | SELECT
36 | date_trunc('day', "hour") AS day,
37 | SUM(l2_gas_used) / (24*60*60) AS value
38 | FROM tmp t
39 | GROUP BY 1;
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-house.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_total_stable_supply.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate total stablecoin supply in USD, adjusted for bridged L2 supplies.
3 |
4 | Parameters:
5 | - origin_keys: The key of the chain to filter the data by.
6 | - days: default = 7
7 | #}
8 |
9 |
10 | WITH stables_usd as (
11 | select *
12 | from vw_fact_stables_usd
13 | where date < current_date
14 | AND date >= current_date - interval '{{ days }} days'
15 | and origin_key IN ( '{{ origin_keys | join("', '") }}')
16 | ),
17 |
18 | bridged_l2s AS (
19 | SELECT
20 | date,
21 | SUM(value_usd) AS value_l2s_usd
22 | FROM stables_usd
23 | WHERE metric_key IN ('supply_bridged', 'supply_bridged_exceptions')
24 | AND origin_key <> 'ethereum'
25 | GROUP BY date
26 | ),
27 |
28 | totals_raw AS (
29 | SELECT
30 | origin_key,
31 | date,
32 | SUM (
33 | CASE WHEN metric_key = 'supply_bridged_exceptions' THEN 0 ELSE value_usd END
34 | ) AS value_usd
35 | FROM stables_usd
36 | GROUP BY origin_key, date
37 | ),
38 |
39 | totals_adjusted AS (
40 | SELECT
41 | 'stables_mcap' AS metric_key,
42 | t.origin_key,
43 | t.date,
44 | CASE
45 | WHEN t.origin_key = 'ethereum'
46 | THEN t.value_usd - COALESCE(b.value_l2s_usd, 0)
47 | ELSE t.value_usd
48 | END AS value
49 | FROM totals_raw t
50 | LEFT JOIN bridged_l2s b USING (date)
51 | )
52 |
53 | SELECT *
54 | FROM totals_adjusted
55 | ORDER BY date, origin_key;
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_4byte.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 |
4 | from src.misc.airflow_utils import alert_via_webhook
5 |
6 | @dag(
7 | default_args={
8 | 'owner' : 'lorenz',
9 | 'retries' : 5,
10 | 'email_on_failure': False,
11 | 'retry_delay' : timedelta(seconds=5),
12 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='lorenz')
13 | },
14 | dag_id='utility_4byte',
15 | description='This DAG create a 4byte parquet export from relevant smart contract function signatures for our UserOp script.',
16 | tags=['utility'],
17 | start_date=datetime(2025,9,16),
18 | schedule='11 01 * * 0' # Run weekly on Sunday at 01:05 AM
19 | )
20 |
21 | def etl():
22 | @task()
23 | def run_4byte_export():
24 | from src.adapters.adapter_4bytes import Adapter4Bytes
25 | import os
26 |
27 | adapter = Adapter4Bytes()
28 |
29 | adapter.extract({
30 | 'save_path': 'backend/', # save path of four_byte_lookup.pkl & 4bytes.parquet file inside ec2 instance
31 | 'provider': 'sourcify' # options: "sourcify" or "verifieralliance"
32 | })
33 |
34 | adapter.load({
35 | 's3_path_parquet': 'v1/export/4bytes.parquet', # save path inside S3 bucket
36 | 's3_path_lookup': 'v1/export/four_byte_lookup.pkl', # save path inside S3 bucket
37 | 'S3_CF_BUCKET': os.getenv("S3_CF_BUCKET"),
38 | 'CF_DISTRIBUTION_ID': os.getenv("CF_DISTRIBUTION_ID")
39 | })
40 |
41 | run_4byte_export()
42 | etl()
--------------------------------------------------------------------------------
/backend/src/queries/postgres/chain_metrics/select_txcosts_median_custom_gas.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to calculate the median transaction cost (in ETH-equivalent terms) on chains with custom gas tokens.
3 |
4 | metric_key = 'txcosts_median_eth'
5 |
6 | Parameters:
7 | - origin_key: The name of the chain to identify the custom gas token price feed.
8 | - days: default = 7; The time interval (in days) to consider for the data extraction.
9 | #}
10 |
11 | {% set days = days | default(7) %}
12 |
13 | WITH token_price AS (
14 | SELECT "date", value AS price_usd
15 | FROM fact_kpis
16 | WHERE origin_key = '{{ origin_key }}'
17 | AND metric_key = 'price_usd'
18 | AND "date" >= current_date - interval '{{ days }} days'
19 | AND "date" < current_date
20 | ),
21 | eth_price AS (
22 | SELECT "date", value AS price_usd
23 | FROM fact_kpis
24 | WHERE origin_key = 'ethereum'
25 | AND metric_key = 'price_usd'
26 | AND "date" >= current_date - interval '{{ days }} days'
27 | AND "date" < current_date
28 | ),
29 | tx_median AS (
30 | SELECT
31 | block_date AS day,
32 | PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tx_fee) AS median_tx_fee
33 | FROM {{ origin_key }}_tx
34 | WHERE tx_fee > 0
35 | AND block_date >= current_date - interval '{{ days }} days'
36 | AND block_date < current_date
37 | GROUP BY 1
38 | )
39 |
40 | SELECT
41 | tx.day,
42 | tx.median_tx_fee * e.price_usd / eth.price_usd AS value
43 | FROM tx_median tx
44 | LEFT JOIN token_price e ON tx.day = e."date"
45 | LEFT JOIN eth_price eth ON tx.day = eth."date";
--------------------------------------------------------------------------------
/backend/src/misc/agora.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import pandas as pd
3 |
4 | class AgoraAPI:
5 | def __init__(self, api_key: str):
6 | self.api_key = api_key
7 | self.headers = {
8 | 'Authorization': f"Bearer {self.api_key}",
9 | 'accept': 'application/json'
10 | }
11 | # base_url mapping
12 | self.base_url = {
13 | "Optimism": "https://vote.optimism.io/api/v1/",
14 | "Scroll": "https://gov.scroll.io/api/v1/"
15 | }
16 |
17 | def get_proposals(self, url, limit: int = 10, offset: int = 0) -> dict:
18 | """
19 | Fetch proposals from the Agora API.
20 | Args:
21 | limit: Number of proposals to return (default: 10)
22 | offset: Number of proposals to skip (default: 0)
23 | Returns:
24 | Dict containing the API response.
25 | """
26 | try:
27 | endpoint = "proposals"
28 | params = {
29 | 'limit': limit,
30 | 'offset': offset
31 | }
32 |
33 | # Make the GET request
34 | response = requests.get(
35 | url + endpoint,
36 | headers=self.headers,
37 | params=params
38 | )
39 | response.raise_for_status() # Raise an exception for HTTP errors
40 |
41 | df = pd.DataFrame(response.json()['data'])
42 |
43 | # Return the JSON response
44 | return df
45 |
46 | except requests.exceptions.RequestException as e:
47 | raise Exception(f"Failed to fetch AgoraAPI proposals: {str(e)}")
--------------------------------------------------------------------------------
/backend/src/queries/postgres/oli/extract_labels_for_review.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Get all attestations from external attesters that have a 'contract_name', 'owner_project' or 'usage_category' tag assigned to a label. Labels are reviewed and reattested via airtable.
3 | - Filters out tag_ids that are not contract_name, owner_project, usage_category
4 | - Filters out attestations made by growthepie.attest or growthepie.automatic
5 |
6 | Parameters:
7 | - date: Only get attestations after this date (default: '2020-01-01')
8 | #}
9 |
10 | {% set date = date | default('2020-01-01') %}
11 |
12 | WITH filtered_labels AS (
13 | SELECT
14 | uid, chain_id, address, tag_id, tag_value, attester, "time"
15 | FROM
16 | public.labels
17 | WHERE
18 | attester != decode('A725646C05E6BB813D98C5ABB4E72DF4BCF00B56', 'hex') -- growthepie.attest
19 | AND attester != decode('C139d50144Ee873c8577d682628E045dECe6040E', 'hex') -- growthepie.automatic
20 | AND chain_id IN (SELECT caip2 FROM sys_main_conf)
21 | AND tag_id IN ('contract_name', 'owner_project', 'usage_category')
22 | AND "time" > '{{ date }}'::timestamp
23 | ),
24 |
25 | pivoted_data AS (
26 | SELECT
27 | (array_agg(address))[1] AS address,
28 | (array_agg(attester))[1] AS attester,
29 | MAX(chain_id) AS chain_id,
30 | MAX(CASE WHEN tag_id = 'contract_name' THEN tag_value END) AS contract_name,
31 | MAX(CASE WHEN tag_id = 'owner_project' THEN tag_value END) AS owner_project,
32 | MAX(CASE WHEN tag_id = 'usage_category' THEN tag_value END) AS usage_category
33 | FROM
34 | filtered_labels
35 | GROUP BY
36 | uid
37 | )
38 |
39 | SELECT * FROM pivoted_data
40 | WHERE owner_project IS NOT NULL;
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_coingecko.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 5,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=10),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='metrics_coingecko',
14 | description='Load price, volume, and market_cap from coingecko API for all tracked tokens.',
15 | tags=['metrics', 'daily'],
16 | start_date=datetime(2023,4,24),
17 | schedule='30 00 * * *' ## data should be available by 0:10 utc according to https://docs.coingecko.com/v3.0.1/reference/coins-id-market-chart
18 | )
19 |
20 | def etl():
21 | @task()
22 | def run_direct():
23 | from src.db_connector import DbConnector
24 | from src.adapters.adapter_coingecko import AdapterCoingecko
25 | import os
26 | adapter_params = {
27 | 'api_key' : os.getenv("COINGECKO_API")
28 | }
29 |
30 | load_params = {
31 | 'load_type' : 'direct',
32 | 'metric_keys' : ['price', 'volume', 'market_cap'],
33 | 'coingecko_ids' : ['glo-dollar'],
34 | 'days' : 'auto', # auto, max, or a number (as string)
35 | 'vs_currencies' : ['usd', 'eth']
36 | }
37 |
38 | # initialize adapter
39 | db_connector = DbConnector()
40 | ad = AdapterCoingecko(adapter_params, db_connector)
41 | # extract
42 | df = ad.extract(load_params)
43 | # load
44 | ad.load(df)
45 |
46 | run_direct()
47 | etl()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-rank.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_top_da_consumers.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to sum the data posted (blob_size_bytes) by DA consumers over a certain time interval.
3 |
4 | Parameters:
5 | - days: default = 30; The time interval (in days) to consider for data extraction.
6 | - da_layer: The DA layer to consider for data extraction. Can also be 'all' to consider all DA layers.
7 | - limit: default = 10; The maximum number of rows to return.
8 | #}
9 |
10 | {% set days = days | default(7) %}
11 | {% set da_layer = da_layer | default('all') %}
12 | {% set limit = limit | default(10) %}
13 |
14 |
15 | {% if da_layer == "da_ethereum_blobs" %}
16 | {% set metric_key = "= 'ethereum_blob_size_bytes'" %}
17 | {% elif da_layer == "da_celestia" %}
18 | {% set metric_key = "= 'celestia_blob_size_bytes'" %}
19 | {% elif da_layer == "da_eigenda" %}
20 | {% set metric_key = "= 'eigenda_blob_size_bytes'" %}
21 | {% else %}
22 | {% set metric_key = "in ('celestia_blob_size_bytes', 'ethereum_blob_size_bytes', 'eigenda_blob_size_bytes')" %}
23 | {% endif %}
24 |
25 | with econ_names as (
26 | SELECT
27 | origin_key,
28 | max("name") as econ_name
29 | FROM sys_economics_mapping
30 | group by 1
31 | )
32 |
33 | select
34 | origin_key as da_consumer_key,
35 | eco.econ_name as name,
36 | 'NA' as da_layer,
37 | c.origin_key as gtp_origin_key,
38 | sum(value) as data_posted
39 | FROM public.fact_kpis
40 | left join econ_names eco using (origin_key)
41 | left join sys_main_conf c using (origin_key)
42 | where metric_key {{ metric_key }}
43 | AND "date" >= current_date - INTERVAL '{{ days }} days'
44 | AND "date" < current_date
45 | group by 1,2,3,4
46 | order by 5 desc
47 | LIMIT {{ limit | default(10) }};
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-email.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-economics.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-burger-menu.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-lock.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/airflow/dags/metrics/metrics_eigenDA.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'lorenz',
8 | 'retries': 1,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='lorenz')
12 | },
13 | dag_id='metrics_eigenda',
14 | description='Load data from EigenDA API.',
15 | tags=['EigenDA', 'fact_kpi'],
16 | start_date=datetime(2024, 7, 22),
17 | schedule='30 3 * * *' # Run daily at 3:30 AM. Needs to be before metrics_sql dag
18 | )
19 | def run_dag():
20 | @task()
21 | def run_eigendata_extract_load():
22 | from src.adapters.adapter_eigenDA import AdapterEigenDA
23 | from src.db_connector import DbConnector
24 |
25 | # Initialize the adapter
26 | db_connector = DbConnector()
27 | adapter_params = {}
28 | eigen = AdapterEigenDA(adapter_params, db_connector)
29 |
30 | load_params = {
31 | 'days': 7, # Look back 7 days
32 | 'endpoint': 'https://eigenda-mainnet-ethereum-blobmetadata-usage.s3.us-east-2.amazonaws.com/v2/stats',
33 | 'table': 'fact_kpis' # Example table name
34 | }
35 |
36 | df = eigen.extract(load_params)
37 | df = df.set_index(['date', 'origin_key', 'metric_key'])
38 | eigen.load(df)
39 |
40 | # How to find out new namespaces
41 | # df = eigen.call_api_endpoint()
42 | # df = df.groupby(['account_name', 'customer_id', 'version']).sum().reset_index()
43 | # df = df.drop(columns=['datetime'])
44 | # df
45 |
46 | run_eigendata_extract_load()
47 |
48 | run_dag()
--------------------------------------------------------------------------------
/backend/src/adapters/abstract_adapters.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | import pandas as pd
3 | from src.db_connector import DbConnector
4 |
5 | class AbstractAdapter(ABC):
6 |
7 | @abstractmethod
8 | def __init__(self, name, adapter_params:dict, db_connector:DbConnector):
9 | self.adapter_params = adapter_params
10 | self.name = name
11 | self.db_connector = db_connector
12 | """
13 | - param adapter_params: some useful adapter config stuff such as API keys, URLs, etc
14 | - para db_connector: database connection
15 | """
16 |
17 | class AbstractAdapterRaw(AbstractAdapter):
18 |
19 | def __init__(self, name, adapter_params:dict, db_connector):
20 | super().__init__(name, adapter_params, db_connector)
21 | self.orchestration = False
22 |
23 | @abstractmethod
24 | def extract_raw(self, load_params:dict) -> pd.DataFrame:
25 | """
26 | This function should be used to request the most granular transaction (raw) data from a datasource (e.g. API).
27 | - param load_params: some useful load config
28 | """
29 | raise NotImplementedError
30 |
31 | def load_raw(self, df_transformed) -> None:
32 | """
33 | This function should be used to persist our data to our database
34 | - param df_transformed: the transformed dataframe that should be used for the upload
35 | """
36 | raise NotImplementedError
37 |
38 | def orchestratation_raw(self):
39 | """
40 | This function call extract_raw script and whenever the df hits 10k rows, we load it into the db and start over.
41 | It is useful for endpoints that return a lot of data and we wont to make sure that we don't run into memory issues or similar.
42 | """
43 | raise NotImplementedError
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-filter.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/airflow/dags/utility/utility_db_backup.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime,timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner' : 'mseidl',
8 | 'retries' : 2,
9 | 'email_on_failure': False,
10 | 'retry_delay' : timedelta(minutes=5),
11 | 'on_failure_callback': alert_via_webhook
12 | },
13 | dag_id='utility_db_backup',
14 | description='Backup crucial data tables from db.',
15 | tags=['utility', 'daily'],
16 | start_date=datetime(2023,7,4),
17 | schedule='30 04 * * *'
18 | )
19 |
20 | def backup():
21 | @task()
22 | def run_backup_tables():
23 | import os
24 | import polars as pl
25 | from src.misc.helper_functions import upload_polars_df_to_s3
26 | from src.db_connector import DbConnector
27 |
28 | db_connector = DbConnector()
29 | tables = ['fact_kpis', 'sys_main_conf', 'sys_rpc_config', 'oli_oss_directory', 'sys_l2beat']
30 |
31 | time_str = datetime.now().isoformat()[:10]
32 | bucket_name = os.getenv("S3_LONG_TERM_BUCKET")
33 |
34 | for table_name in tables:
35 | print(f'...loading {table_name}')
36 | exec_string = f'select * from {table_name}'
37 |
38 | df = pl.read_database_uri(query=exec_string, uri=db_connector.uri)
39 |
40 | print(f"...loaded {df.shape[0]} rows.")
41 |
42 | filename = f"{table_name}_{time_str}.parquet"
43 | file_key = f"backup_db/{table_name}/{filename}"
44 |
45 | upload_polars_df_to_s3(df, filename, bucket_name, file_key)
46 |
47 | print(f'...finished backing up {table_name}')
48 |
49 | run_backup_tables()
50 | backup()
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/backend/tests/test_da_config.py:
--------------------------------------------------------------------------------
1 | from tests import setup_test_imports
2 | # Set up imports
3 | setup_test_imports()
4 |
5 | from src.da_config import get_da_config
6 |
7 | def main():
8 | print("Testing of DAConfig loading...")
9 |
10 | # Load from GitHub
11 | print("1. Loading from GitHub source...")
12 | da_conf_github = get_da_config(source='github')
13 | print(f" Loaded {len(da_conf_github)} DA layers")
14 |
15 | # Test consistency
16 | print("\n2. Testing consistency with another GitHub load:")
17 | da_conf_github2 = get_da_config(source='github')
18 | print(f" Configs are equal: {da_conf_github == da_conf_github2}")
19 |
20 | # Additional validation test: Test logo defaulting
21 | print("\n3. Testing that a DA layer with null logo gets the default logo:")
22 | # Manually create a test config dict with null logo
23 | test_conf_dict = [{
24 | "da_layer": "test_layer",
25 | "name": "Test Layer",
26 | "name_short": "TL",
27 | "block_explorers": {},
28 | "colors": {"light": ["#123456"], "dark": ["#654321"]},
29 | "logo": None, # This should get the default logo
30 | "incl_in_da_overview": True,
31 | "parameters": {}
32 | }]
33 |
34 | # Process with validator
35 | da_conf_test = get_da_config(da_config_dict=test_conf_dict, source='github')
36 | default_logo = {
37 | 'body': "",
38 | 'width': 15,
39 | 'height': 15
40 | }
41 | print(f" Has default logo: {da_conf_test[0].logo == default_logo}")
42 |
43 | print("\nTest completed successfully!")
44 |
45 | if __name__ == "__main__":
46 | main()
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-ethereum-weekly.svg:
--------------------------------------------------------------------------------
1 |
30 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-download.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-blobs.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-night.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/giveth.svg:
--------------------------------------------------------------------------------
1 |
6 |
--------------------------------------------------------------------------------
/backend/src/misc/octant_lib/codegen/gql/README.md:
--------------------------------------------------------------------------------
1 | # Generating schema.py
2 |
3 | ## 1. Generate schema.json by running the following command:
4 |
5 | ```bash
6 | curl -X POST -H "Content-Type: application/json" --data '{"query": "{ __schema { queryType { name } mutationType { name } subscriptionType { name } types { ...FullType } directives { name description locations args { ...InputValue } } } } fragment FullType on __Type { kind name description fields(includeDeprecated: true) { name description args { ...InputValue } type { ...TypeRef } isDeprecated deprecationReason } inputFields { ...InputValue } interfaces { ...TypeRef } enumValues(includeDeprecated: true) { name description isDeprecated deprecationReason } possibleTypes { ...TypeRef } } fragment InputValue on __InputValue { name description type { ...TypeRef } defaultValue } fragment TypeRef on __Type { kind name ofType { kind name ofType { kind name ofType { kind name ofType { kind name } } } } }"}' https://graph.mainnet.octant.app/subgraphs/name/octant > schema.json
7 | ```
8 |
9 | ## 2. Install sgqlc library by running the following command:
10 |
11 | ```bash
12 | pip install sgqlc
13 | ```
14 |
15 | ## 3. Generate schema.py by running the following command:
16 |
17 | ```bash
18 | sgqlc-codegen.exe schema --docstrings schema.json schema.py
19 | ```
20 |
21 | ## 4. Use schema.py to query the subgraph
22 |
23 | ```python
24 | from sgqlc.operation import Operation
25 | from sgqlc.endpoint.requests import RequestsEndpoint
26 |
27 | from schema import schema
28 |
29 | # Create an endpoint
30 | endpoint = RequestsEndpoint('https://graph.mainnet.octant.app/subgraphs/name/octant')
31 |
32 | # Create an operation
33 | op = Operation(schema.Query)
34 |
35 | # Query the subgraph
36 | op.query {
37 | allAccounts {
38 | id
39 | balance
40 | }
41 | }
42 |
43 | # Execute the operation
44 | data = endpoint(op)
45 |
46 | # Print the response
47 | print(data)
48 | ```
49 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-wallet.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-info.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/quick_bites/linea_burn.sql.j2:
--------------------------------------------------------------------------------
1 | WITH date_spine AS (
2 | SELECT generate_series(
3 | '2025-09-11'::date,
4 | CURRENT_DATE,
5 | '1 day'::interval
6 | )::date AS "date"
7 | ),
8 | metrics AS (
9 | SELECT
10 | "date",
11 | origin_key,
12 | MAX(CASE WHEN metric_key = 'qb_lineaTokensBridged_linea' THEN value END) AS lineaTokensBridged_linea,
13 | MAX(CASE WHEN metric_key = 'qb_ethBurnt_eth' THEN value END) AS ethBurnt_eth
14 | FROM public.fact_kpis
15 | WHERE
16 | (metric_key = 'qb_lineaTokensBridged_linea' OR metric_key = 'qb_ethBurnt_eth')
17 | AND origin_key = 'linea'
18 | AND "date" >= '2025-09-11'
19 | GROUP BY "date", origin_key
20 | ),
21 | linea_price AS (
22 | SELECT "date", value
23 | FROM public.fact_kpis
24 | WHERE metric_key = 'price_usd'
25 | AND origin_key = 'linea'
26 | AND "date" >= '2025-09-11'
27 | ),
28 | eth_price AS (
29 | SELECT "date", value
30 | FROM public.fact_kpis
31 | WHERE metric_key = 'price_usd'
32 | AND origin_key = 'ethereum'
33 | AND "date" >= '2025-09-11'
34 | )
35 | SELECT
36 | ds."date",
37 | m.lineaTokensBridged_linea,
38 | m.ethBurnt_eth,
39 | m.lineaTokensBridged_linea * lp.value AS lineaTokensBridged_usd,
40 | m.ethBurnt_eth * ep.value AS ethBurnt_usd,
41 | SUM(COALESCE(m.lineaTokensBridged_linea, 0)) OVER (ORDER BY ds."date") AS cum_lineaTokensBridged_linea,
42 | SUM(COALESCE(m.ethBurnt_eth, 0)) OVER (ORDER BY ds."date") AS cum_ethBurnt_eth,
43 | SUM(COALESCE(m.lineaTokensBridged_linea * lp.value, 0)) OVER (ORDER BY ds."date") AS cum_lineaTokensBridged_usd,
44 | SUM(COALESCE(m.ethBurnt_eth * ep.value, 0)) OVER (ORDER BY ds."date") AS cum_ethBurnt_usd
45 | FROM date_spine ds
46 | LEFT JOIN metrics m ON ds.date = m.date
47 | LEFT JOIN linea_price lp ON ds.date = lp.date
48 | LEFT JOIN eth_price ep ON ds.date = ep.date
49 | ORDER BY ds.date ASC
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_starknet.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_starknet',
14 | description='Load raw tx data from StarkNet',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='8/10 * * * *'
18 | )
19 | def adapter_rpc():
20 | @task(execution_timeout=timedelta(minutes=45))
21 | def run_starknet():
22 | from src.adapters.adapter_raw_starknet import AdapterStarknet
23 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
24 | from src.db_connector import DbConnector
25 |
26 | # Initialize DbConnector
27 | db_connector = DbConnector()
28 |
29 | chain_name = 'starknet'
30 |
31 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
32 | print(f"STARKNET_CONFIG={active_rpc_configs}")
33 |
34 | adapter_params = {
35 | 'chain': chain_name,
36 | 'rpc_configs': active_rpc_configs,
37 | }
38 |
39 | # Initialize AdapterStarknet
40 | adapter = AdapterStarknet(adapter_params, db_connector)
41 |
42 | # Initial load parameters
43 | load_params = {
44 | 'block_start': 'auto',
45 | 'batch_size': batch_size,
46 | }
47 |
48 | try:
49 | adapter.extract_raw(load_params)
50 | except MaxWaitTimeExceededException as e:
51 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
52 | raise e
53 |
54 | run_starknet()
55 | adapter_rpc()
--------------------------------------------------------------------------------
/backend/tests/check_tx_types.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import text
2 | from src.db_connector import DbConnector
3 |
4 | # Initialize the database connector
5 | db = DbConnector()
6 |
7 | # Connect to the database
8 | with db.engine.connect() as conn:
9 | # Set the appropriate role
10 | conn.execute(text("SET ROLE data_team_write;"))
11 |
12 | # Query transactions from the block we loaded
13 | query = text("""
14 | SELECT
15 | block_number,
16 | tx_hash::text,
17 | tx_type,
18 | from_address::text,
19 | to_address::text
20 | FROM lisk_tx
21 | WHERE block_number >= 15590154
22 | AND block_number < 15590164
23 | ORDER BY block_number, tx_type
24 | """)
25 |
26 | result = conn.execute(query)
27 |
28 | # Print the headers
29 | print("\nBLOCK | TX_TYPE | FROM (truncated) | TO (truncated)")
30 | print("-" * 90)
31 |
32 | # Print each row
33 | for row in result:
34 | # Truncate the long values for display
35 | tx_hash = row.tx_hash[:20] + "..." if row.tx_hash else None
36 | from_addr = row.from_address[:20] + "..." if row.from_address else None
37 | to_addr = row.to_address[:20] + "..." if row.to_address else None
38 |
39 | print(f"{row.block_number} | {row.tx_type:7} | {from_addr or 'None':25} | {to_addr or 'None'}")
40 |
41 | # Get statistics
42 | stats_query = text("""
43 | SELECT
44 | tx_type,
45 | COUNT(*) as count
46 | FROM lisk_tx
47 | WHERE block_number >= 15590154
48 | AND block_number < 15590164
49 | GROUP BY tx_type
50 | ORDER BY tx_type
51 | """)
52 |
53 | stats = conn.execute(stats_query).fetchall()
54 |
55 | # Print the statistics
56 | print("\n--- Transaction Type Distribution ---")
57 | for tx_type, count in stats:
58 | print(f"Type {tx_type}: {count} transactions")
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-total-value-locked.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-total-value-secured.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-totalvaluelocked.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-metrics-totalvaluesecured.svg:
--------------------------------------------------------------------------------
1 |
20 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-notification.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-book-open.svg:
--------------------------------------------------------------------------------
1 |
15 |
--------------------------------------------------------------------------------
/backend/src/api/og_resources/icons/small/gtp-layer2-maturity-developing.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_base.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_base',
14 | description='Load raw tx data from Base',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='0/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_base():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'base'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"BASE_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_base()
59 | adapter_rpc()
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_ink.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_ink',
14 | description='Load raw tx data from Ink',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='3/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_ink():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'ink'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"INK_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_ink()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_celo.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_celo',
14 | description='Load raw tx data from Celo',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='1/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_celo():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'celo'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"CELO_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': "auto",
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_celo()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_lisk.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_lisk',
14 | description='Load raw tx data from Lisk',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='4/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_lisk():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'lisk'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"LISK_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_lisk()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_mode.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_mode',
14 | description='Load raw tx data from Mode',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='5/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_mode():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'mode'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"MODE_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_mode()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_plume.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_plume',
14 | description='Load raw tx data from Plume',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='6/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_plume():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'plume'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"PLUME_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_plume()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_zora.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_zora',
14 | description='Load raw tx data from Zora',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='9/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_zora():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'zora'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"ZORA_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_zora()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/src/queries/postgres/api/select_highlights.sql.j2:
--------------------------------------------------------------------------------
1 | {#
2 | A query to get the highest recent highlights from the highlights table.
3 |
4 | Parameters:
5 | - origin_key: The origin key to consider for data extraction.
6 | - days: The time interval (in days) to consider for recent highlights (default is 7 days).
7 | - limit: The maximum number of results to return (default is 5).
8 | #}
9 |
10 |
11 | WITH ath_multiple AS (
12 | SELECT *
13 | FROM public.highlights
14 | WHERE date > current_date - interval '{{ days }} days'
15 | AND origin_key = '{{ origin_key }}'
16 | AND type = 'ath_multiple'
17 | ),
18 | ath_regular AS (
19 | SELECT *
20 | FROM public.highlights
21 | WHERE date > current_date - interval '{{ days }} days'
22 | AND origin_key = '{{ origin_key }}'
23 | AND type = 'ath_regular'
24 | ),
25 | growth AS (
26 | SELECT *
27 | FROM public.highlights
28 | WHERE date > current_date - interval '{{ days }} days'
29 | AND origin_key = '{{ origin_key }}'
30 | AND type ILIKE 'growth_%%'
31 | AND value > 0
32 | ORDER BY growth_pct_growth DESC
33 | LIMIT 2
34 | ),
35 | lifetime AS (
36 | SELECT *
37 | FROM public.highlights
38 | WHERE date > current_date - interval '{{ days }} days'
39 | AND origin_key = '{{ origin_key }}'
40 | AND type ILIKE 'lifetime_%%'
41 | AND value > 0
42 | ),
43 | combined AS (
44 | SELECT 1 AS prio, * FROM ath_multiple
45 | UNION ALL
46 | SELECT 2 AS prio, * FROM lifetime
47 | UNION ALL
48 | SELECT 3 AS prio, * FROM ath_regular
49 | UNION ALL
50 | SELECT 4 AS prio, * FROM growth
51 |
52 | ),
53 | ranked AS (
54 | SELECT
55 | c.*,
56 | ROW_NUMBER() OVER (
57 | PARTITION BY origin_key, metric_key
58 | ORDER BY prio asc, date desc, growth_pct_growth desc
59 | ) AS rn
60 | FROM combined c
61 | )
62 |
63 | SELECT date, metric_key, type, value, ath_prior_max, ath_next_threshold, growth_prior_value, growth_pct_growth
64 | FROM ranked
65 | WHERE rn = 1
66 | ORDER BY date DESC, prio ASC
67 | LIMIT {{ limit | default(5) }}
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_blast.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_blast',
14 | description='Load raw tx data from Blast',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='1/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_blast():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'blast'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"BLAST_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_blast()
59 | adapter_rpc()
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_gravity.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=1),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_gravity',
14 | description='Load raw tx data from Gravity',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2024, 10, 15),
17 | schedule='2/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_gravity():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'gravity'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_gravity()
59 | adapter_rpc()
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_linea.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_linea',
14 | description='Load raw tx data from Linea',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='4/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_linea():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'linea'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"LINEA_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_linea()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------
/backend/airflow/dags/raw/raw_manta.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from airflow.decorators import dag, task
3 | from src.misc.airflow_utils import alert_via_webhook
4 |
5 | @dag(
6 | default_args={
7 | 'owner': 'nader',
8 | 'retries': 2,
9 | 'email_on_failure': False,
10 | 'retry_delay': timedelta(minutes=5),
11 | 'on_failure_callback': lambda context: alert_via_webhook(context, user='nader')
12 | },
13 | dag_id='raw_manta',
14 | description='Load raw tx data from Manta',
15 | tags=['raw', 'near-real-time', 'rpc'],
16 | start_date=datetime(2023, 9, 1),
17 | schedule='4/10 * * * *'
18 | )
19 |
20 | def adapter_rpc():
21 | @task(execution_timeout=timedelta(minutes=45))
22 | def run_manta():
23 | from src.adapters.adapter_raw_rpc import NodeAdapter
24 | from src.adapters.rpc_funcs.utils import MaxWaitTimeExceededException, get_chain_config
25 | from src.db_connector import DbConnector
26 |
27 | # Initialize DbConnector
28 | db_connector = DbConnector()
29 |
30 | chain_name = 'manta'
31 |
32 | active_rpc_configs, batch_size = get_chain_config(db_connector, chain_name)
33 | print(f"MANTA_CONFIG={active_rpc_configs}")
34 |
35 | adapter_params = {
36 | 'rpc': 'local_node',
37 | 'chain': chain_name,
38 | 'rpc_configs': active_rpc_configs,
39 | }
40 |
41 | # Initialize NodeAdapter
42 | adapter = NodeAdapter(adapter_params, db_connector)
43 |
44 | # Initial load parameters
45 | load_params = {
46 | 'block_start': 'auto',
47 | 'batch_size': batch_size,
48 | }
49 |
50 | try:
51 | adapter.extract_raw(load_params)
52 | except MaxWaitTimeExceededException as e:
53 | print(f"Extraction stopped due to maximum wait time being exceeded: {e}")
54 | raise e
55 | finally:
56 | adapter.log_stats()
57 |
58 | run_manta()
59 | adapter_rpc()
60 |
--------------------------------------------------------------------------------