├── .gitignore ├── migrations ├── 2025-08-28-080719_added_func__get_decimals_or_warn │ ├── down.sql │ └── up.sql ├── 00000000000000_diesel_initial_setup │ ├── down.sql │ └── up.sql └── 2022-04-27-111623_initial │ ├── down.sql │ └── up.sql ├── src ├── lib │ ├── consumer │ │ ├── models │ │ │ ├── mod.rs │ │ │ ├── waves_data.rs │ │ │ ├── block_microblock.rs │ │ │ ├── asset_tickers.rs │ │ │ ├── assets.rs │ │ │ ├── candles.rs │ │ │ └── txs │ │ │ │ ├── mod.rs │ │ │ │ └── convert.rs │ │ ├── repo │ │ │ ├── mod.rs │ │ │ └── pg.rs │ │ ├── updates.rs │ │ └── mod.rs │ ├── lib.rs │ ├── config │ │ ├── mod.rs │ │ ├── postgres.rs │ │ └── consumer.rs │ ├── utils.rs │ ├── tuple_len.rs │ ├── error.rs │ ├── db.rs │ ├── waves.rs │ ├── models.rs │ └── schema.rs └── bin │ ├── consumer.rs │ └── migration.rs ├── diesel.toml ├── Dockerfile └── Cargo.toml /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea/ 3 | *.env 4 | *.yml 5 | -------------------------------------------------------------------------------- /migrations/2025-08-28-080719_added_func__get_decimals_or_warn/down.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS get_decimals_or_exception(text); -------------------------------------------------------------------------------- /src/lib/consumer/models/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod asset_tickers; 2 | pub mod assets; 3 | pub mod block_microblock; 4 | pub mod candles; 5 | pub mod txs; 6 | pub mod waves_data; 7 | -------------------------------------------------------------------------------- /diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "src/lib/schema.rs" 6 | import_types = ["diesel::sql_types::*"] -------------------------------------------------------------------------------- /src/lib/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate diesel; 3 | 4 | pub mod config; 5 | pub mod consumer; 6 | pub mod db; 7 | pub mod error; 8 | pub mod models; 9 | pub mod schema; 10 | mod tuple_len; 11 | mod utils; 12 | pub mod waves; 13 | -------------------------------------------------------------------------------- /src/lib/consumer/models/waves_data.rs: -------------------------------------------------------------------------------- 1 | use crate::schema::waves_data; 2 | use bigdecimal::BigDecimal; 3 | use diesel::Insertable; 4 | 5 | #[derive(Debug, Clone, Insertable)] 6 | #[diesel(table_name = waves_data)] 7 | pub struct WavesData { 8 | pub height: i32, 9 | pub quantity: BigDecimal, 10 | } 11 | -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/down.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); 6 | DROP FUNCTION IF EXISTS diesel_set_updated_at(); 7 | -------------------------------------------------------------------------------- /migrations/2025-08-28-080719_added_func__get_decimals_or_warn/up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION get_decimals_or_exception(id text) 2 | RETURNS integer AS $$ 3 | DECLARE 4 | dec integer; 5 | BEGIN 6 | SELECT decimals INTO dec 7 | FROM decimals 8 | WHERE asset_id = id; 9 | 10 | IF dec IS NULL THEN 11 | RAISE EXCEPTION 'Missing decimals for asset_id=%. Cannot calculate candle price.', id; 12 | END IF; 13 | 14 | RETURN dec; 15 | END; 16 | $$ LANGUAGE plpgsql; -------------------------------------------------------------------------------- /src/lib/consumer/models/block_microblock.rs: -------------------------------------------------------------------------------- 1 | use crate::consumer::BlockMicroblockAppend; 2 | use crate::schema::*; 3 | use chrono::NaiveDateTime; 4 | use diesel::Insertable; 5 | 6 | #[derive(Clone, Debug, Insertable, QueryableByName)] 7 | #[diesel(table_name = blocks_microblocks)] 8 | pub struct BlockMicroblock { 9 | pub id: String, 10 | pub time_stamp: Option, 11 | pub height: i32, 12 | } 13 | 14 | impl From for BlockMicroblock { 15 | fn from(bma: BlockMicroblockAppend) -> Self { 16 | Self { 17 | id: bma.id, 18 | time_stamp: bma.time_stamp, 19 | height: bma.height, 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.89 AS builder 2 | WORKDIR /app 3 | 4 | RUN rustup component add rustfmt 5 | RUN apt-get update && apt-get install -y protobuf-compiler 6 | 7 | COPY Cargo.* ./ 8 | COPY ./src ./src 9 | COPY ./migrations ./migrations 10 | 11 | RUN cargo build --release 12 | 13 | 14 | FROM debian:12 as runtime 15 | WORKDIR /app 16 | 17 | RUN apt-get update && apt-get install -y curl openssl libssl-dev libpq-dev postgresql-client 18 | RUN /usr/sbin/update-ca-certificates 19 | 20 | COPY --from=builder /app/target/release/consumer ./consumer 21 | COPY --from=builder /app/target/release/migration ./migration 22 | COPY --from=builder /app/migrations ./migrations/ 23 | 24 | 25 | CMD ["./consumer"] 26 | -------------------------------------------------------------------------------- /src/lib/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod consumer; 2 | pub mod postgres; 3 | 4 | use crate::error::Error; 5 | 6 | #[derive(Debug, Clone)] 7 | pub struct Config { 8 | pub postgres: postgres::Config, 9 | pub consumer: consumer::Config, 10 | } 11 | 12 | #[derive(Debug, Clone)] 13 | pub struct MigrationConfig { 14 | pub postgres: postgres::Config, 15 | } 16 | 17 | pub fn load_consumer_config() -> Result { 18 | Ok(Config { 19 | postgres: postgres::load()?, 20 | consumer: consumer::load()?, 21 | }) 22 | } 23 | 24 | pub fn load_migration_config() -> Result { 25 | Ok(MigrationConfig { 26 | postgres: postgres::load()?, 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /src/lib/utils.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDateTime; 2 | 3 | pub fn into_base58(b: impl AsRef<[u8]>) -> String { 4 | bs58::encode(b.as_ref()).into_string() 5 | } 6 | 7 | pub fn into_prefixed_base64(b: impl AsRef<[u8]>) -> String { 8 | let b = b.as_ref(); 9 | #[allow(deprecated)] // for base64::encode() 10 | if b.len() > 0 { 11 | String::from("base64:") + &base64::encode(b) 12 | } else { 13 | String::new() 14 | } 15 | } 16 | 17 | pub fn epoch_ms_to_naivedatetime(ts: i64) -> NaiveDateTime { 18 | NaiveDateTime::from_timestamp_opt(ts / 1000, (ts % 1000) as u32 * 1_000_000) 19 | .expect(&format!("invalid timestamp {ts}")) 20 | } 21 | 22 | pub fn escape_unicode_null(s: impl AsRef) -> String { 23 | s.as_ref().replace("\0", "\\0") 24 | } 25 | -------------------------------------------------------------------------------- /src/lib/tuple_len.rs: -------------------------------------------------------------------------------- 1 | pub trait TupleLen { 2 | fn len(&self) -> usize; 3 | } 4 | 5 | macro_rules! count { 6 | () => (0); 7 | ( $x:tt $($xs:tt)* ) => (1 + count!($($xs)*)); 8 | } 9 | 10 | macro_rules! tuple_len_impls { 11 | ( $T:ident, $($rem:ident),+ ) => { 12 | impl<$T, $($rem),+> TupleLen for ($T, $($rem),+) { 13 | #[inline] 14 | fn len(&self) -> usize { 15 | count!($T $($rem)+) 16 | } 17 | } 18 | 19 | tuple_len_impls!($($rem),+); 20 | }; 21 | ( $T:ident ) => { 22 | impl<$T> TupleLen for ($T,) { 23 | #[inline] 24 | fn len(&self) -> usize { 25 | 1 26 | } 27 | } 28 | }; 29 | } 30 | 31 | // this macro makes TupleLen impls for (A, ..., Z), (B, ..., Z), ..., (Y, Z), (Z,) 32 | tuple_len_impls! { 33 | A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use super::TupleLen; 39 | 40 | #[test] 41 | fn tuple_len() { 42 | assert_eq!((1,).len(), 1); 43 | assert_eq!((1, 2,).len(), 2); 44 | assert_eq!((1, 2, 3,).len(), 3); 45 | assert_eq!((1, 2, 3, 4,).len(), 4); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/up.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | 6 | 7 | 8 | -- Sets up a trigger for the given table to automatically set a column called 9 | -- `updated_at` whenever the row is modified (unless `updated_at` was included 10 | -- in the modified columns) 11 | -- 12 | -- # Example 13 | -- 14 | -- ```sql 15 | -- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); 16 | -- 17 | -- SELECT diesel_manage_updated_at('users'); 18 | -- ``` 19 | CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ 20 | BEGIN 21 | EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s 22 | FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); 23 | END; 24 | $$ LANGUAGE plpgsql; 25 | 26 | CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ 27 | BEGIN 28 | IF ( 29 | NEW IS DISTINCT FROM OLD AND 30 | NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at 31 | ) THEN 32 | NEW.updated_at := current_timestamp; 33 | END IF; 34 | RETURN NEW; 35 | END; 36 | $$ LANGUAGE plpgsql; 37 | -------------------------------------------------------------------------------- /src/lib/config/postgres.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Error; 2 | use serde::Deserialize; 3 | 4 | fn default_port() -> u16 { 5 | 5432 6 | } 7 | 8 | fn default_poolsize() -> u32 { 9 | 1 10 | } 11 | 12 | #[derive(Deserialize)] 13 | struct ConfigFlat { 14 | host: String, 15 | #[serde(default = "default_port")] 16 | port: u16, 17 | database: String, 18 | user: String, 19 | password: String, 20 | #[serde(default = "default_poolsize")] 21 | poolsize: u32, 22 | } 23 | 24 | #[derive(Debug, Clone)] 25 | pub struct Config { 26 | pub host: String, 27 | pub port: u16, 28 | pub database: String, 29 | pub user: String, 30 | pub password: String, 31 | pub poolsize: u32, 32 | } 33 | 34 | impl Config { 35 | pub fn database_url(&self) -> String { 36 | format!( 37 | "postgres://{}:{}@{}:{}/{}", 38 | self.user, self.password, self.host, self.port, self.database 39 | ) 40 | } 41 | } 42 | 43 | pub fn load() -> Result { 44 | let config_flat = envy::prefixed("POSTGRES__").from_env::()?; 45 | 46 | Ok(Config { 47 | host: config_flat.host, 48 | port: config_flat.port, 49 | database: config_flat.database, 50 | user: config_flat.user, 51 | password: config_flat.password, 52 | poolsize: config_flat.poolsize, 53 | }) 54 | } 55 | -------------------------------------------------------------------------------- /src/lib/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, thiserror::Error)] 2 | pub enum Error { 3 | #[error("LoadConfigFailed: {0}")] 4 | LoadConfigFailed(#[from] envy::Error), 5 | 6 | #[error("InvalidMessage: {0}")] 7 | InvalidMessage(String), 8 | 9 | #[error("DbDieselError: {0}")] 10 | DbDieselError(#[from] diesel::result::Error), 11 | 12 | #[error("DeadpoolError: {0}")] 13 | DeadpoolError(String), 14 | 15 | #[error("ConnectionPoolError: {0}")] 16 | ConnectionPoolError(#[from] r2d2::Error), 17 | 18 | #[error("ConnectionError: {0}")] 19 | ConnectionError(#[from] diesel::ConnectionError), 20 | 21 | #[error("StreamClosed: {0}")] 22 | StreamClosed(String), 23 | 24 | #[error("StreamError: {0}")] 25 | StreamError(String), 26 | 27 | #[error("SerializationError: {0}")] 28 | SerializationError(#[from] serde_json::Error), 29 | 30 | #[error("CursorDecodeError: {0}")] 31 | CursorDecodeError(#[from] base64::DecodeError), 32 | 33 | #[error("JoinError: {0}")] 34 | JoinError(#[from] tokio::task::JoinError), 35 | 36 | #[error("InconsistDataError: {0}")] 37 | InconsistDataError(String), 38 | } 39 | 40 | // impl done manually because InteractError is not Sync 41 | impl From for Error { 42 | fn from(err: deadpool_diesel::InteractError) -> Self { 43 | Error::DeadpoolError(err.to_string()) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/lib/consumer/models/asset_tickers.rs: -------------------------------------------------------------------------------- 1 | use std::hash::{Hash, Hasher}; 2 | 3 | use crate::schema::asset_tickers; 4 | use diesel::Insertable; 5 | 6 | #[derive(Clone, Debug, Insertable)] 7 | #[diesel(table_name = asset_tickers)] 8 | pub struct InsertableAssetTicker { 9 | pub uid: i64, 10 | pub superseded_by: i64, 11 | pub block_uid: i64, 12 | pub asset_id: String, 13 | pub ticker: String, 14 | } 15 | 16 | impl PartialEq for InsertableAssetTicker { 17 | fn eq(&self, other: &InsertableAssetTicker) -> bool { 18 | (&self.asset_id) == (&other.asset_id) 19 | } 20 | } 21 | 22 | impl Eq for InsertableAssetTicker {} 23 | 24 | impl Hash for InsertableAssetTicker { 25 | fn hash(&self, state: &mut H) { 26 | self.asset_id.hash(state); 27 | } 28 | } 29 | 30 | #[derive(Clone, Debug)] 31 | pub struct AssetTickerOverride { 32 | pub superseded_by: i64, 33 | pub asset_id: String, 34 | } 35 | 36 | #[derive(Clone, Debug)] 37 | pub struct DeletedAssetTicker { 38 | pub uid: i64, 39 | pub asset_id: String, 40 | } 41 | 42 | impl PartialEq for DeletedAssetTicker { 43 | fn eq(&self, other: &Self) -> bool { 44 | (&self.asset_id) == (&other.asset_id) 45 | } 46 | } 47 | 48 | impl Eq for DeletedAssetTicker {} 49 | 50 | impl Hash for DeletedAssetTicker { 51 | fn hash(&self, state: &mut H) { 52 | self.asset_id.hash(state); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /migrations/2022-04-27-111623_initial/down.sql: -------------------------------------------------------------------------------- 1 | DROP VIEW IF EXISTS decimals; 2 | 3 | DROP VIEW IF EXISTS assets; 4 | DROP VIEW IF EXISTS tickers; 5 | DROP TABLE IF EXISTS asset_origins; 6 | DROP TABLE IF EXISTS asset_updates; 7 | DROP TABLE IF EXISTS asset_tickers; 8 | DROP TABLE IF EXISTS assets_names_map; 9 | DROP TABLE IF EXISTS assets_metadata; 10 | DROP TABLE IF EXISTS candles; 11 | DROP TABLE IF EXISTS pairs; 12 | DROP TABLE IF EXISTS waves_data; 13 | DROP TABLE IF EXISTS txs_1; 14 | DROP TABLE IF EXISTS txs_2; 15 | DROP TABLE IF EXISTS txs_3; 16 | DROP TABLE IF EXISTS txs_4; 17 | DROP TABLE IF EXISTS txs_5; 18 | DROP TABLE IF EXISTS txs_6; 19 | DROP TABLE IF EXISTS txs_7; 20 | DROP TABLE IF EXISTS txs_8; 21 | DROP TABLE IF EXISTS txs_9; 22 | DROP TABLE IF EXISTS txs_10; 23 | DROP TABLE IF EXISTS txs_11_transfers; 24 | DROP TABLE IF EXISTS txs_11; 25 | DROP TABLE IF EXISTS txs_12_data; 26 | DROP TABLE IF EXISTS txs_12; 27 | DROP TABLE IF EXISTS txs_13; 28 | DROP TABLE IF EXISTS txs_14; 29 | DROP TABLE IF EXISTS txs_15; 30 | DROP TABLE IF EXISTS txs_16_args; 31 | DROP TABLE IF EXISTS txs_16_payment; 32 | DROP TABLE IF EXISTS txs_16; 33 | DROP TABLE IF EXISTS txs_17; 34 | DROP TABLE IF EXISTS txs_18_args; 35 | DROP TABLE IF EXISTS txs_18_payment; 36 | DROP TABLE IF EXISTS txs_18; 37 | DROP TABLE IF EXISTS txs; 38 | DROP TABLE IF EXISTS blocks_microblocks; 39 | DROP FUNCTION IF EXISTS public.text_timestamp_cast; 40 | DROP FUNCTION IF EXISTS calc_and_insert_candles_since_timestamp; 41 | DROP FUNCTION IF EXISTS _to_raw_timestamp; 42 | DROP FUNCTION IF EXISTS _trunc_ts_by_secs; 43 | 44 | DROP EXTENSION IF EXISTS btree_gin; 45 | DROP EXTENSION IF EXISTS btree_gist; -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "data-service-consumer" 3 | version = "1.0.2" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = "1" 8 | async-trait = "0.1" 9 | base64 = "0.21" 10 | bigdecimal = { version = "0.4", features = ["serde"] } 11 | blake2 = "0.10" 12 | bs58 = "0.5" 13 | bytes = "1.1" 14 | chrono = { version = "^0.4.27", features = ["serde"] } 15 | deadpool-diesel = "0.5" 16 | diesel = { version = "^2.1", default-features = false, features = ["chrono", "postgres", "r2d2", "32-column-tables", "serde_json", "numeric"] } 17 | diesel_migrations = { version = "2", features = ["postgres"] } 18 | envy = "0.4" 19 | fragstrings = { git = "https://github.com/waves-exchange/fragstrings", tag = "v0.2.0", default-features = false, features = ["parse"] } 20 | hex = "0.4.3" 21 | itertools = "0.12" 22 | lazy_static = "1.4" 23 | percent-encoding = "2.1" 24 | r2d2 = "0.8" 25 | regex = "1" 26 | serde = { version = "1.0", features = ["derive"] } 27 | serde_json = "1.0.81" 28 | sha3 = "0.10" 29 | thiserror = "1.0" 30 | tokio = { version = "1.12", features = ["macros", "rt-multi-thread"] } 31 | wavesexchange_log = { git = "https://github.com/waves-exchange/wavesexchange-rs", tag = "wavesexchange_log/0.5.1" } 32 | waves-protobuf-schemas = { git = "https://github.com/wavesplatform/protobuf-schemas", tag = "rust_v1.5.2" } 33 | wavesexchange_liveness = { git = "https://github.com/waves-exchange/wavesexchange-rs", tag = "wavesexchange_liveness/0.3.1"} 34 | wavesexchange_warp = { git = "https://github.com/waves-exchange/wavesexchange-rs", tag = "wavesexchange_warp/0.14.10" } 35 | 36 | 37 | [lib] 38 | name = "app_lib" 39 | path = "src/lib/lib.rs" 40 | 41 | [[bin]] 42 | name = "consumer" 43 | path = "src/bin/consumer.rs" 44 | 45 | [[bin]] 46 | name = "migration" 47 | path = "src/bin/migration.rs" 48 | -------------------------------------------------------------------------------- /src/lib/db.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Error, Result}; 2 | use deadpool_diesel::{Manager as DManager, Pool as DPool, Runtime}; 3 | use diesel::pg::PgConnection; 4 | use diesel::r2d2::{ConnectionManager, Pool}; 5 | use diesel::Connection; 6 | use std::time::Duration; 7 | 8 | use crate::config::postgres::Config; 9 | use crate::error::Error as AppError; 10 | 11 | pub type PgPool = Pool>; 12 | pub type PgAsyncPool = DPool>; 13 | 14 | pub fn generate_postgres_url(config: &Config) -> String { 15 | format!( 16 | "postgres://{}:{}@{}:{}/{}", 17 | config.user, config.password, config.host, config.port, config.database 18 | ) 19 | } 20 | 21 | pub async fn async_pool(config: &Config) -> Result { 22 | let db_url = generate_postgres_url(config); 23 | 24 | let manager = DManager::new(db_url, Runtime::Tokio1); 25 | let pool = DPool::builder(manager) 26 | .max_size(config.poolsize as usize) 27 | .wait_timeout(Some(Duration::from_secs(10 * 60))) 28 | .runtime(Runtime::Tokio1) 29 | .build()?; 30 | Ok(pool) 31 | } 32 | 33 | pub fn pool(config: &Config) -> Result { 34 | let db_url = generate_postgres_url(config); 35 | 36 | let manager = ConnectionManager::::new(db_url); 37 | Ok(Pool::builder() 38 | .min_idle(Some(1)) 39 | .max_size(config.poolsize as u32) 40 | .idle_timeout(Some(Duration::from_secs(10 * 60))) 41 | .connection_timeout(Duration::from_secs(5)) 42 | .build(manager)?) 43 | } 44 | 45 | pub fn unpooled(config: &Config) -> Result { 46 | let db_url = generate_postgres_url(config); 47 | 48 | PgConnection::establish(&db_url).map_err(|err| Error::new(AppError::ConnectionError(err))) 49 | } 50 | -------------------------------------------------------------------------------- /src/lib/consumer/models/assets.rs: -------------------------------------------------------------------------------- 1 | use crate::schema::*; 2 | use chrono::NaiveDateTime; 3 | use diesel::{Insertable, Queryable}; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | pub type BlockUid = i64; 7 | pub type UpdateUid = i64; 8 | 9 | #[derive(Clone, Debug, Insertable, Queryable)] 10 | pub struct AssetUpdate { 11 | pub block_uid: i64, 12 | pub uid: i64, 13 | pub superseded_by: i64, 14 | pub asset_id: String, 15 | pub decimals: i16, 16 | pub name: String, 17 | pub description: String, 18 | pub reissuable: bool, 19 | pub volume: i64, 20 | pub script: Option, 21 | pub sponsorship: Option, 22 | pub nft: bool, 23 | } 24 | 25 | impl PartialEq for AssetUpdate { 26 | fn eq(&self, other: &AssetUpdate) -> bool { 27 | self.asset_id == other.asset_id 28 | } 29 | } 30 | 31 | impl Eq for AssetUpdate {} 32 | 33 | impl Hash for AssetUpdate { 34 | fn hash(&self, state: &mut H) { 35 | self.asset_id.hash(state); 36 | } 37 | } 38 | 39 | #[derive(Clone, Debug)] 40 | pub struct AssetOverride { 41 | pub superseded_by: i64, 42 | pub id: String, 43 | } 44 | 45 | #[derive(Clone, Debug)] 46 | pub struct DeletedAsset { 47 | pub uid: i64, 48 | pub id: String, 49 | } 50 | 51 | impl PartialEq for DeletedAsset { 52 | fn eq(&self, other: &Self) -> bool { 53 | self.id == other.id 54 | } 55 | } 56 | 57 | impl Eq for DeletedAsset {} 58 | 59 | impl Hash for DeletedAsset { 60 | fn hash(&self, state: &mut H) { 61 | self.id.hash(state); 62 | } 63 | } 64 | 65 | #[derive(Clone, Debug, Insertable, Queryable)] 66 | pub struct AssetOrigin { 67 | pub asset_id: String, 68 | pub first_asset_update_uid: i64, 69 | pub origin_transaction_id: String, 70 | pub issuer: String, 71 | pub issue_height: i32, 72 | pub issue_time_stamp: NaiveDateTime, 73 | } 74 | -------------------------------------------------------------------------------- /src/bin/consumer.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use app_lib::{config, consumer, db}; 3 | use std::time::Duration; 4 | use tokio::select; 5 | use wavesexchange_liveness::channel; 6 | use wavesexchange_log::{error, info}; 7 | use wavesexchange_warp::MetricsWarpBuilder; 8 | 9 | const LAST_TIMESTAMP_QUERY: &str = "SELECT (EXTRACT(EPOCH FROM time_stamp) * 1000)::BIGINT as time_stamp FROM blocks_microblocks WHERE time_stamp IS NOT NULL ORDER BY uid DESC LIMIT 1"; 10 | const POLL_INTERVAL_SECS: u64 = 60; 11 | const MAX_BLOCK_AGE: Duration = Duration::from_secs(300); 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<()> { 15 | let config = config::load_consumer_config()?; 16 | 17 | info!( 18 | "Starting data-service consumer with config: {:?}", 19 | config.consumer 20 | ); 21 | 22 | let conn = db::async_pool(&config.postgres) 23 | .await 24 | .context("DB connection failed")?; 25 | 26 | let updates_src = consumer::updates::new(&config.consumer.blockchain_updates_url) 27 | .await 28 | .context("Blockchain connection failed")?; 29 | 30 | let pg_repo = consumer::repo::pg::new(conn); 31 | 32 | let db_url = config.postgres.database_url(); 33 | let readiness_channel = channel( 34 | db_url, 35 | POLL_INTERVAL_SECS, 36 | MAX_BLOCK_AGE, 37 | Some(LAST_TIMESTAMP_QUERY.to_string()), 38 | ); 39 | 40 | let metrics = tokio::spawn(async move { 41 | MetricsWarpBuilder::new() 42 | .with_metrics_port(config.consumer.metrics_port) 43 | .with_readiness_channel(readiness_channel) 44 | .run_async() 45 | .await 46 | }); 47 | 48 | let consumer = consumer::start(updates_src, pg_repo, config.consumer); 49 | 50 | select! { 51 | Err(err) = consumer => { 52 | error!("{}", err); 53 | return Err(err); 54 | }, 55 | result = metrics => { 56 | if let Err(err) = result { 57 | error!("Metrics failed: {:?}", err); 58 | } else { 59 | error!("Metrics stopped"); 60 | } 61 | } 62 | }; 63 | Ok(()) 64 | } 65 | -------------------------------------------------------------------------------- /src/lib/consumer/models/candles.rs: -------------------------------------------------------------------------------- 1 | use crate::schema::candles; 2 | use bigdecimal::BigDecimal; 3 | use chrono::NaiveDateTime; 4 | use diesel::Insertable; 5 | 6 | #[derive(Debug, Insertable)] 7 | pub struct Candle { 8 | time_start: NaiveDateTime, 9 | amount_asset_id: String, 10 | price_asset_id: String, 11 | low: BigDecimal, 12 | high: BigDecimal, 13 | volume: BigDecimal, 14 | quote_volume: BigDecimal, 15 | max_height: i32, 16 | txs_count: i32, 17 | weighted_average_price: BigDecimal, 18 | open: BigDecimal, 19 | close: BigDecimal, 20 | interval: String, 21 | matcher_address: String, 22 | } 23 | 24 | pub mod intervals { 25 | pub const MIN1: &str = "1m"; 26 | pub const MIN5: &str = "5m"; 27 | pub const MIN15: &str = "15m"; 28 | pub const MIN30: &str = "30m"; 29 | pub const HOUR1: &str = "1h"; 30 | pub const HOUR2: &str = "2h"; 31 | pub const HOUR3: &str = "3h"; 32 | pub const HOUR4: &str = "4h"; 33 | pub const HOUR6: &str = "6h"; 34 | pub const HOUR12: &str = "12h"; 35 | pub const DAY1: &str = "1d"; 36 | pub const WEEK1: &str = "1w"; 37 | pub const MONTH1: &str = "1M"; 38 | 39 | pub const CANDLE_INTERVALS: &[[&str; 2]] = &[ 40 | [MIN1, MIN5], 41 | [MIN5, MIN15], 42 | [MIN15, MIN30], 43 | [MIN30, HOUR1], 44 | [HOUR1, HOUR2], 45 | [HOUR1, HOUR3], 46 | [HOUR2, HOUR4], 47 | [HOUR3, HOUR6], 48 | [HOUR6, HOUR12], 49 | [HOUR12, DAY1], 50 | [DAY1, WEEK1], 51 | [DAY1, MONTH1], 52 | ]; 53 | } 54 | 55 | pub fn interval_in_seconds(interval: &str) -> Option { 56 | match interval { 57 | intervals::MIN1 => Some(60), 58 | intervals::MIN5 => Some(60 * 5), 59 | intervals::MIN15 => Some(60 * 15), 60 | intervals::MIN30 => Some(60 * 30), 61 | intervals::HOUR1 => Some(60 * 60), 62 | intervals::HOUR2 => Some(60 * 60 * 2), 63 | intervals::HOUR3 => Some(60 * 60 * 3), 64 | intervals::HOUR4 => Some(60 * 60 * 4), 65 | intervals::HOUR6 => Some(60 * 60 * 6), 66 | intervals::HOUR12 => Some(60 * 60 * 12), 67 | intervals::DAY1 => Some(60 * 60 * 24), 68 | _ => None, 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/bin/migration.rs: -------------------------------------------------------------------------------- 1 | use diesel::migration::Migration; 2 | use diesel::{migration, pg::PgConnection, Connection}; 3 | use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; 4 | 5 | use app_lib::{config, db::generate_postgres_url}; 6 | 7 | const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); 8 | 9 | fn main() -> anyhow::Result<()> { 10 | let action = action::parse_command_line()?; 11 | let dbconfig = config::postgres::load()?; 12 | let conn = PgConnection::establish(&generate_postgres_url(&dbconfig))?; 13 | run(action, conn).map_err(|e| anyhow::anyhow!(e)) 14 | } 15 | 16 | fn run(action: action::Action, mut conn: PgConnection) -> migration::Result<()> { 17 | use action::Action::*; 18 | match action { 19 | ListPending => { 20 | let list = conn.pending_migrations(MIGRATIONS)?; 21 | if list.is_empty() { 22 | println!("No pending migrations."); 23 | } 24 | for mig in list { 25 | println!("Pending migration: {}", mig.name()); 26 | } 27 | } 28 | MigrateUp => { 29 | let list = conn.run_pending_migrations(MIGRATIONS)?; 30 | if list.is_empty() { 31 | println!("No pending migrations."); 32 | } 33 | for mig in list { 34 | println!("Applied migration: {}", mig); 35 | } 36 | } 37 | MigrateDown => { 38 | let mig = conn.revert_last_migration(MIGRATIONS)?; 39 | println!("Reverted migration: {}", mig); 40 | } 41 | } 42 | Ok(()) 43 | } 44 | 45 | mod action { 46 | pub enum Action { 47 | ListPending, 48 | MigrateUp, 49 | MigrateDown, 50 | } 51 | 52 | impl TryFrom<&str> for Action { 53 | type Error = (); 54 | 55 | fn try_from(value: &str) -> Result { 56 | match value { 57 | "" | "list" => Ok(Action::ListPending), 58 | "up" => Ok(Action::MigrateUp), 59 | "down" => Ok(Action::MigrateDown), 60 | _ => Err(()), 61 | } 62 | } 63 | } 64 | 65 | pub fn parse_command_line() -> Result { 66 | let action_str = std::env::args().nth(1).unwrap_or_default(); 67 | let action = action_str.as_str().try_into().map_err(|()| { 68 | anyhow::anyhow!( 69 | "unrecognized command line argument: {} (either 'up' or 'down' expected)", 70 | action_str 71 | ) 72 | })?; 73 | Ok(action) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/lib/config/consumer.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Error; 2 | use chrono::Duration; 3 | use serde::Deserialize; 4 | use std::num::NonZeroU32; 5 | 6 | fn default_assets_only() -> bool { 7 | false 8 | } 9 | 10 | fn default_updates_per_request() -> usize { 11 | 256 12 | } 13 | 14 | fn default_max_wait_time_in_msecs() -> u64 { 15 | 5000 16 | } 17 | 18 | fn default_start_rollback_depth() -> u32 { 19 | 1 20 | } 21 | 22 | fn default_rollback_step() -> u32 { 23 | 500 24 | } 25 | 26 | fn default_metrics_port() -> u16 { 27 | 9090 28 | } 29 | 30 | #[derive(Deserialize)] 31 | struct ConfigFlat { 32 | asset_storage_address: Option, 33 | #[serde(default = "default_assets_only")] 34 | assets_only: bool, 35 | blockchain_updates_url: String, 36 | chain_id: u8, 37 | #[serde(default = "default_max_wait_time_in_msecs")] 38 | max_wait_time_in_msecs: u64, 39 | starting_height: u32, 40 | #[serde(default = "default_updates_per_request")] 41 | updates_per_request: usize, 42 | #[serde(default = "default_start_rollback_depth")] 43 | start_rollback_depth: u32, 44 | #[serde(default = "default_rollback_step")] 45 | rollback_step: u32, 46 | #[serde(default = "default_metrics_port")] 47 | metrics_port: u16, 48 | } 49 | 50 | #[derive(Debug, Clone)] 51 | pub struct Config { 52 | pub asset_storage_address: Option, 53 | pub assets_only: bool, 54 | pub blockchain_updates_url: String, 55 | pub chain_id: u8, 56 | pub max_wait_time: Duration, 57 | pub starting_height: u32, 58 | pub updates_per_request: usize, 59 | pub start_rollback_depth: NonZeroU32, 60 | pub rollback_step: NonZeroU32, 61 | pub metrics_port: u16, 62 | } 63 | 64 | pub fn load() -> Result { 65 | let config_flat = envy::from_env::()?; 66 | let nonzero_err = 67 | |msg| Error::LoadConfigFailed(envy::Error::Custom(format!("{msg} must be > 0"))); 68 | 69 | Ok(Config { 70 | asset_storage_address: config_flat.asset_storage_address, 71 | assets_only: config_flat.assets_only, 72 | blockchain_updates_url: config_flat.blockchain_updates_url, 73 | chain_id: config_flat.chain_id, 74 | max_wait_time: Duration::milliseconds(config_flat.max_wait_time_in_msecs as i64), 75 | starting_height: config_flat.starting_height, 76 | updates_per_request: config_flat.updates_per_request, 77 | start_rollback_depth: NonZeroU32::new(config_flat.start_rollback_depth) 78 | .ok_or_else(|| nonzero_err("start_rollback_depth"))?, 79 | rollback_step: NonZeroU32::new(config_flat.rollback_step) 80 | .ok_or_else(|| nonzero_err("rollback_step"))?, 81 | metrics_port: config_flat.metrics_port, 82 | }) 83 | } 84 | -------------------------------------------------------------------------------- /src/lib/waves.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::into_base58; 2 | use bytes::{BufMut, BytesMut}; 3 | use lazy_static::lazy_static; 4 | use regex::Regex; 5 | 6 | lazy_static! { 7 | pub static ref ASSET_ORACLE_DATA_ENTRY_KEY_REGEX: Regex = 8 | Regex::new(r"^(.*)_<([a-zA-Z\d]+)>$").unwrap(); 9 | } 10 | 11 | pub type ChainId = u8; 12 | 13 | pub const WAVES_ID: &str = "WAVES"; 14 | 15 | pub fn keccak256(message: &[u8]) -> [u8; 32] { 16 | use sha3::{Digest, Keccak256}; 17 | 18 | let mut hasher = Keccak256::new(); 19 | hasher.update(message); 20 | hasher.finalize().into() 21 | } 22 | 23 | pub fn blake2b256(message: &[u8]) -> [u8; 32] { 24 | use blake2::{digest::consts::U32, Blake2b, Digest}; 25 | 26 | let mut hasher = Blake2b::::new(); 27 | hasher.update(message); 28 | let res = hasher.finalize(); 29 | res.into() 30 | } 31 | 32 | pub struct Address(String); 33 | pub struct PublicKeyHash<'b>(pub &'b [u8]); 34 | 35 | impl From<(&[u8], ChainId)> for Address { 36 | fn from((pk, chain_id): (&[u8], ChainId)) -> Self { 37 | let pkh = keccak256(&blake2b256(pk)); 38 | 39 | let mut addr = BytesMut::with_capacity(26); // VERSION + CHAIN_ID + PKH + checksum 40 | 41 | addr.put_u8(1); // address version is always 1 42 | addr.put_u8(chain_id); 43 | addr.put_slice(&pkh[..20]); 44 | 45 | let chks = &keccak256(&blake2b256(&addr[..22]))[..4]; 46 | 47 | addr.put_slice(chks); 48 | 49 | Address(into_base58(addr)) 50 | } 51 | } 52 | 53 | impl From<(PublicKeyHash<'_>, ChainId)> for Address { 54 | fn from((PublicKeyHash(hash), chain_id): (PublicKeyHash, ChainId)) -> Self { 55 | let mut addr = BytesMut::with_capacity(26); 56 | 57 | addr.put_u8(1); 58 | addr.put_u8(chain_id); 59 | addr.put_slice(hash); 60 | 61 | let chks = &keccak256(&blake2b256(&addr[..22]))[..4]; 62 | 63 | addr.put_slice(chks); 64 | 65 | Address(into_base58(addr)) 66 | } 67 | } 68 | 69 | impl From
for String { 70 | fn from(v: Address) -> Self { 71 | v.0 72 | } 73 | } 74 | 75 | pub fn is_valid_base58(src: &str) -> bool { 76 | bs58::decode(src).into_vec().is_ok() 77 | } 78 | 79 | pub fn extract_asset_id(asset_id: impl AsRef<[u8]>) -> String { 80 | if asset_id.as_ref().is_empty() { 81 | WAVES_ID.to_string() 82 | } else { 83 | into_base58(asset_id) 84 | } 85 | } 86 | 87 | pub fn is_waves_asset_id(input: impl AsRef<[u8]>) -> bool { 88 | extract_asset_id(input) == WAVES_ID 89 | } 90 | 91 | #[cfg(test)] 92 | mod tests { 93 | use super::is_valid_base58; 94 | 95 | #[test] 96 | fn should_validate_base58_string() { 97 | let test_cases = vec![ 98 | ("3PC9BfRwJWWiw9AREE2B3eWzCks3CYtg4yo", true), 99 | ("not-valid-string", false), 100 | ]; 101 | 102 | test_cases.into_iter().for_each(|(key, expected)| { 103 | let actual = is_valid_base58(&key); 104 | assert_eq!(actual, expected); 105 | }); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/lib/consumer/repo/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod pg; 2 | 3 | use std::num::NonZeroU32; 4 | 5 | use anyhow::Result; 6 | use async_trait::async_trait; 7 | use chrono::NaiveDateTime; 8 | 9 | use super::models::{ 10 | asset_tickers::{AssetTickerOverride, DeletedAssetTicker, InsertableAssetTicker}, 11 | assets::{AssetOrigin, AssetOverride, AssetUpdate, DeletedAsset}, 12 | block_microblock::BlockMicroblock, 13 | txs::*, 14 | waves_data::WavesData, 15 | }; 16 | use super::UidHeight; 17 | 18 | #[async_trait] 19 | pub trait Repo { 20 | type Operations<'c>: RepoOperations + 'c; 21 | 22 | async fn transaction(&self, f: F) -> Result 23 | where 24 | F: for<'conn> FnOnce(&mut Self::Operations<'conn>) -> Result, 25 | F: Send + 'static, 26 | R: Send + 'static; 27 | } 28 | 29 | pub trait RepoOperations { 30 | // 31 | // COMMON 32 | // 33 | 34 | fn get_current_height(&mut self) -> Result; 35 | 36 | fn get_blocks_rollback_to( 37 | &mut self, 38 | depth: NonZeroU32, 39 | rollback_step: NonZeroU32, 40 | ) -> Result>>; 41 | 42 | fn get_block_uid_height(&mut self, block_id: &str) -> Result; 43 | 44 | fn get_key_block_uid(&mut self) -> Result; 45 | 46 | fn get_total_block_id(&mut self) -> Result>; 47 | 48 | fn insert_blocks_or_microblocks(&mut self, blocks: &Vec) -> Result>; 49 | 50 | fn change_block_id(&mut self, block_uid: i64, new_block_id: &str) -> Result<()>; 51 | 52 | fn delete_microblocks(&mut self) -> Result<()>; 53 | 54 | fn rollback_blocks_microblocks(&mut self, block_uid: i64) -> Result<()>; 55 | 56 | fn insert_waves_data(&mut self, waves_data: &Vec) -> Result<()>; 57 | 58 | // 59 | // ASSETS 60 | // 61 | 62 | fn get_next_assets_uid(&mut self) -> Result; 63 | 64 | fn insert_asset_updates(&mut self, updates: &Vec) -> Result<()>; 65 | 66 | fn insert_asset_origins(&mut self, origins: &Vec) -> Result<()>; 67 | 68 | fn update_assets_block_references(&mut self, block_uid: i64) -> Result<()>; 69 | 70 | fn close_assets_superseded_by(&mut self, updates: &Vec) -> Result<()>; 71 | 72 | fn reopen_assets_superseded_by(&mut self, current_superseded_by: &Vec) -> Result<()>; 73 | 74 | fn set_assets_next_update_uid(&mut self, new_uid: i64) -> Result<()>; 75 | 76 | fn rollback_assets(&mut self, block_uid: i64) -> Result>; 77 | 78 | fn assets_gt_block_uid(&mut self, block_uid: i64) -> Result>; 79 | 80 | fn insert_asset_tickers(&mut self, tickers: &Vec) -> Result<()>; 81 | 82 | fn rollback_asset_tickers(&mut self, block_uid: &i64) -> Result>; 83 | 84 | fn update_asset_tickers_block_references(&mut self, block_uid: i64) -> Result<()>; 85 | 86 | fn reopen_asset_tickers_superseded_by( 87 | &mut self, 88 | current_superseded_by: &Vec, 89 | ) -> Result<()>; 90 | 91 | fn close_asset_tickers_superseded_by( 92 | &mut self, 93 | updates: &Vec, 94 | ) -> Result<()>; 95 | 96 | fn set_asset_tickers_next_update_uid(&mut self, new_uid: i64) -> Result<()>; 97 | 98 | fn get_next_asset_tickers_uid(&mut self) -> Result; 99 | 100 | // 101 | // TRANSACTIONS 102 | // 103 | 104 | fn update_transactions_references(&mut self, block_uid: i64) -> Result<()>; 105 | 106 | fn rollback_transactions(&mut self, block_uid: i64) -> Result<()>; 107 | 108 | fn insert_txs_1(&mut self, txs: Vec) -> Result<()>; 109 | 110 | fn insert_txs_2(&mut self, txs: Vec) -> Result<()>; 111 | 112 | fn insert_txs_3(&mut self, txs: Vec) -> Result<()>; 113 | 114 | fn insert_txs_4(&mut self, txs: Vec) -> Result<()>; 115 | 116 | fn insert_txs_5(&mut self, txs: Vec) -> Result<()>; 117 | 118 | fn insert_txs_6(&mut self, txs: Vec) -> Result<()>; 119 | 120 | fn insert_txs_7(&mut self, txs: Vec) -> Result<()>; 121 | 122 | fn insert_txs_8(&mut self, txs: Vec) -> Result<()>; 123 | 124 | fn insert_txs_9(&mut self, txs: Vec) -> Result<()>; 125 | 126 | fn insert_txs_10(&mut self, txs: Vec) -> Result<()>; 127 | 128 | fn insert_txs_11(&mut self, txs: Vec) -> Result<()>; 129 | 130 | fn insert_txs_12(&mut self, txs: Vec) -> Result<()>; 131 | 132 | fn insert_txs_13(&mut self, txs: Vec) -> Result<()>; 133 | 134 | fn insert_txs_14(&mut self, txs: Vec) -> Result<()>; 135 | 136 | fn insert_txs_15(&mut self, txs: Vec) -> Result<()>; 137 | 138 | fn insert_txs_16(&mut self, txs: Vec) -> Result<()>; 139 | 140 | fn insert_txs_17(&mut self, txs: Vec) -> Result<()>; 141 | 142 | fn insert_txs_18(&mut self, txs: Vec) -> Result<()>; 143 | 144 | // 145 | // CANDLES 146 | // 147 | 148 | fn calculate_candles_since_block_uid(&mut self, block_uid: i64) -> Result<()>; 149 | 150 | fn calculate_minute_candles(&mut self, ts: NaiveDateTime) -> Result<()>; 151 | 152 | fn calculate_non_minute_candles(&mut self, ts: NaiveDateTime) -> Result<()>; 153 | 154 | fn rollback_candles(&mut self, block_uid: i64) -> Result<()>; 155 | } 156 | -------------------------------------------------------------------------------- /src/lib/models.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::{escape_unicode_null, into_base58}; 2 | use chrono::{DateTime, Utc}; 3 | use serde::ser::{SerializeStruct, Serializer}; 4 | use serde::Serialize; 5 | use serde_json::{json, Value}; 6 | use waves_protobuf_schemas::waves::{ 7 | invoke_script_result::call::argument::{List as ListPb, Value as InvokeScriptArgValue}, 8 | order::Sender as SenderPb, 9 | Order as OrderPb, 10 | }; 11 | 12 | #[derive(Clone, Debug)] 13 | pub struct BaseAssetInfoUpdate { 14 | pub id: String, 15 | pub issuer: String, 16 | pub precision: i32, 17 | pub nft: bool, 18 | pub updated_at: DateTime, 19 | pub update_height: i32, 20 | pub name: String, 21 | pub description: String, 22 | pub script: Option>, 23 | pub quantity: i64, 24 | pub reissuable: bool, 25 | pub min_sponsored_fee: Option, 26 | pub tx_id: String, 27 | } 28 | 29 | #[derive(Debug, Serialize)] 30 | #[serde(rename_all = "lowercase")] 31 | #[serde(tag = "type", content = "value")] 32 | pub enum DataEntryTypeValue { 33 | Binary(String), 34 | Boolean(bool), 35 | Integer(i64), 36 | String(String), 37 | List(Value), 38 | } 39 | 40 | impl From<&InvokeScriptArgValue> for DataEntryTypeValue { 41 | fn from(val: &InvokeScriptArgValue) -> Self { 42 | match val { 43 | InvokeScriptArgValue::IntegerValue(v) => DataEntryTypeValue::Integer(*v), 44 | InvokeScriptArgValue::BinaryValue(v) => { 45 | #[allow(deprecated)] // for base64::encode() 46 | DataEntryTypeValue::Binary(format!("base64:{}", base64::encode(v))) 47 | } 48 | InvokeScriptArgValue::StringValue(v) => { 49 | DataEntryTypeValue::String(escape_unicode_null(v)) 50 | } 51 | InvokeScriptArgValue::BooleanValue(v) => DataEntryTypeValue::Boolean(*v), 52 | // deep conversion of List 53 | InvokeScriptArgValue::List(v) => DataEntryTypeValue::List(json!(ArgList::from(v))), 54 | InvokeScriptArgValue::CaseObj(_) => todo!(), 55 | } 56 | } 57 | } 58 | 59 | #[derive(Debug, Serialize)] 60 | pub struct ArgList(pub Vec); 61 | 62 | impl From<&ListPb> for ArgList { 63 | fn from(list: &ListPb) -> Self { 64 | ArgList( 65 | list.items 66 | .iter() 67 | .filter_map(|i| i.value.as_ref().map(DataEntryTypeValue::from)) 68 | .collect(), 69 | ) 70 | } 71 | } 72 | 73 | pub struct OrderMeta<'o> { 74 | pub order: &'o OrderPb, 75 | pub id: &'o [u8], 76 | pub sender_address: &'o [u8], 77 | pub sender_public_key: &'o [u8], 78 | } 79 | 80 | #[derive(Debug)] 81 | pub struct Order { 82 | pub id: String, 83 | pub version: i32, 84 | pub sender: String, 85 | pub sender_public_key: String, 86 | pub matcher_public_key: String, 87 | pub asset_pair: AssetPair, 88 | pub order_type: OrderType, 89 | pub amount: i64, 90 | pub price: i64, 91 | pub timestamp: i64, 92 | pub expiration: i64, 93 | pub matcher_fee: i64, 94 | pub matcher_fee_asset_id: Option, 95 | pub proofs: Vec, 96 | pub signature: String, 97 | pub eip712_signature: Option, 98 | pub price_mode: Option, 99 | } 100 | 101 | impl Serialize for Order { 102 | fn serialize(&self, serializer: S) -> Result { 103 | let fields_count = match self.version { 104 | 1..=2 => 15, 105 | 3 => 16, // + matcher_fee_asset_id 106 | 4.. => 17, // + eip712_signature, price_mode 107 | v => unreachable!("unknown order version {v}"), 108 | }; 109 | let mut state = serializer.serialize_struct("Order", fields_count)?; 110 | state.serialize_field("id", &self.id)?; 111 | state.serialize_field("version", &self.version)?; 112 | state.serialize_field("sender", &self.sender)?; 113 | state.serialize_field("senderPublicKey", &self.sender_public_key)?; 114 | state.serialize_field("matcherPublicKey", &self.matcher_public_key)?; 115 | state.serialize_field("assetPair", &self.asset_pair)?; 116 | state.serialize_field("orderType", &self.order_type)?; 117 | state.serialize_field("amount", &self.amount)?; 118 | state.serialize_field("price", &self.price)?; 119 | state.serialize_field("timestamp", &self.timestamp)?; 120 | state.serialize_field("expiration", &self.expiration)?; 121 | state.serialize_field("matcherFee", &self.matcher_fee)?; 122 | state.serialize_field("proofs", &self.proofs)?; 123 | state.serialize_field("signature", &self.signature)?; 124 | 125 | if self.version >= 3 { 126 | state.serialize_field("matcherFeeAssetId", &self.matcher_fee_asset_id)?; 127 | } 128 | 129 | if self.version >= 4 { 130 | state.serialize_field("eip712Signature", &self.eip712_signature)?; 131 | state.serialize_field("priceMode", &self.price_mode)?; 132 | } 133 | state.end() 134 | } 135 | } 136 | 137 | impl From> for Order { 138 | fn from(o: OrderMeta) -> Self { 139 | let OrderMeta { 140 | order, 141 | id, 142 | sender_address, 143 | sender_public_key, 144 | } = o; 145 | let proofs: Vec = order.proofs.iter().map(into_base58).collect(); 146 | let signature = proofs.get(0).cloned().unwrap_or_else(|| String::new()); 147 | Self { 148 | matcher_public_key: into_base58(&order.matcher_public_key), 149 | asset_pair: AssetPair { 150 | amount_asset_id: order 151 | .asset_pair 152 | .as_ref() 153 | .map(|p| &p.amount_asset_id) 154 | .and_then(|asset| (asset.len() > 0).then(|| into_base58(asset))), 155 | price_asset_id: order 156 | .asset_pair 157 | .as_ref() 158 | .map(|p| &p.price_asset_id) 159 | .and_then(|asset| (asset.len() > 0).then(|| into_base58(asset))), 160 | }, 161 | order_type: OrderType::from(order.order_side), 162 | amount: order.amount, 163 | price: order.price, 164 | timestamp: order.timestamp, 165 | expiration: order.expiration, 166 | matcher_fee: order.matcher_fee.as_ref().map(|f| f.amount).unwrap_or(0), 167 | matcher_fee_asset_id: order 168 | .matcher_fee 169 | .as_ref() 170 | .map(|f| &f.asset_id) 171 | .and_then(|asset| (asset.len() > 0).then(|| into_base58(asset))), 172 | version: order.version, 173 | proofs, 174 | sender: into_base58(sender_address), 175 | id: into_base58(&id), 176 | sender_public_key: into_base58(&sender_public_key), 177 | signature, 178 | eip712_signature: match order.sender { 179 | Some(SenderPb::Eip712Signature(ref sig)) if order.version >= 4 => { 180 | Some(format!("0x{}", hex::encode(sig))) 181 | } 182 | _ => None, 183 | }, 184 | price_mode: match order.price_mode { 185 | 0 => None, 186 | 1 => Some("fixedDecimals".to_string()), 187 | 2 => Some("assetDecimals".to_string()), 188 | m => unreachable!("unknown order price_mode {m}"), 189 | }, 190 | } 191 | } 192 | } 193 | 194 | #[derive(Serialize, Debug)] 195 | pub struct AssetPair { 196 | #[serde(rename = "amountAsset")] 197 | pub amount_asset_id: Option, 198 | #[serde(rename = "priceAsset")] 199 | pub price_asset_id: Option, 200 | } 201 | 202 | #[derive(Debug, Serialize)] 203 | #[serde(rename_all = "camelCase")] 204 | pub enum OrderType { 205 | Buy = 0, 206 | Sell = 1, 207 | } 208 | 209 | impl From for OrderType { 210 | fn from(n: i32) -> Self { 211 | match n { 212 | 0 => OrderType::Buy, 213 | 1 => OrderType::Sell, 214 | r => panic!("unknown OrderType {r}"), 215 | } 216 | } 217 | } 218 | 219 | #[cfg(test)] 220 | mod tests { 221 | use super::*; 222 | use waves_protobuf_schemas::waves::invoke_script_result::call::Argument; 223 | 224 | #[test] 225 | fn serialize_arg_list() { 226 | let src = InvokeScriptArgValue::List(ListPb { 227 | items: vec![ 228 | Argument { 229 | value: Some(InvokeScriptArgValue::IntegerValue(5)), 230 | }, 231 | Argument { 232 | value: Some(InvokeScriptArgValue::BinaryValue(b"\x00\x01".to_vec())), 233 | }, 234 | ], 235 | }); 236 | let data_value = DataEntryTypeValue::from(&src); 237 | if matches!(data_value, DataEntryTypeValue::List(_)) { 238 | let json = json!(data_value); 239 | let serialized = serde_json::to_string(&json["value"]).unwrap(); 240 | let expected = json!([ 241 | {"type": "integer", "value": 5}, 242 | {"type": "binary", "value": "base64:AAE="}, 243 | ]); 244 | assert_eq!(serialized, serde_json::to_string(&expected).unwrap()); 245 | } else { 246 | panic!("Wrong variant: {:?}", src); 247 | } 248 | } 249 | } 250 | -------------------------------------------------------------------------------- /src/lib/consumer/updates.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use async_trait::async_trait; 3 | use bs58; 4 | use chrono::Duration; 5 | use std::str; 6 | use std::time::{Duration as StdDuration, Instant}; 7 | use tokio::sync::mpsc::{channel, Receiver, Sender}; 8 | use tokio::time; 9 | use waves_protobuf_schemas::tonic; 10 | use waves_protobuf_schemas::waves::{ 11 | block::Header as HeaderPB, 12 | events::{ 13 | blockchain_updated::append::{ 14 | BlockAppend as BlockAppendPB, Body as BodyPB, MicroBlockAppend as MicroBlockAppendPB, 15 | }, 16 | blockchain_updated::Append as AppendPB, 17 | blockchain_updated::Update as UpdatePB, 18 | grpc::{ 19 | blockchain_updates_api_client::BlockchainUpdatesApiClient, 20 | SubscribeEvent as SubscribeEventPB, SubscribeRequest as SubscribeRequestPB, 21 | }, 22 | BlockchainUpdated as BlockchainUpdatedPB, 23 | }, 24 | Block as BlockPB, SignedMicroBlock as SignedMicroBlockPB, 25 | SignedTransaction as SignedTransactionPB, 26 | }; 27 | use wavesexchange_log::{debug, error}; 28 | 29 | use super::{ 30 | epoch_ms_to_naivedatetime, BlockMicroblockAppend, BlockchainUpdate, 31 | BlockchainUpdatesWithLastHeight, Tx, UpdatesSource, 32 | }; 33 | use crate::error::Error as AppError; 34 | 35 | #[derive(Clone)] 36 | pub struct UpdatesSourceImpl { 37 | grpc_client: BlockchainUpdatesApiClient, 38 | } 39 | 40 | pub async fn new(blockchain_updates_url: &str) -> Result { 41 | Ok(UpdatesSourceImpl { 42 | grpc_client: { 43 | const MAX_MSG_SIZE: usize = 8 * 1024 * 1024; // 8 MB instead of the default 4 MB 44 | BlockchainUpdatesApiClient::connect(blockchain_updates_url.to_owned()) 45 | .await? 46 | .max_decoding_message_size(MAX_MSG_SIZE) 47 | }, 48 | }) 49 | } 50 | 51 | #[async_trait] 52 | impl UpdatesSource for UpdatesSourceImpl { 53 | async fn stream( 54 | self, 55 | from_height: u32, 56 | batch_max_size: usize, 57 | batch_max_wait_time: Duration, 58 | ) -> Result, AppError> { 59 | let request = tonic::Request::new(SubscribeRequestPB { 60 | from_height: from_height as i32, 61 | to_height: 0, 62 | }); 63 | 64 | let stream: tonic::Streaming = self 65 | .grpc_client 66 | .clone() 67 | .subscribe(request) 68 | .await 69 | .map_err(|e| AppError::StreamError(format!("Subscribe Stream error: {}", e)))? 70 | .into_inner(); 71 | 72 | let (tx, rx) = channel::(1); 73 | 74 | tokio::spawn(async move { 75 | let r = self 76 | .run(stream, tx, from_height, batch_max_size, batch_max_wait_time) 77 | .await; 78 | if let Err(e) = r { 79 | error!("updates source stopped with error: {:?}", e); 80 | } else { 81 | error!("updates source stopped without an error") 82 | } 83 | }); 84 | 85 | Ok(rx) 86 | } 87 | } 88 | 89 | impl UpdatesSourceImpl { 90 | async fn run( 91 | &self, 92 | mut stream: tonic::Streaming, 93 | tx: Sender, 94 | from_height: u32, 95 | batch_max_size: usize, 96 | batch_max_wait_time: Duration, 97 | ) -> Result<(), AppError> { 98 | let mut result = vec![]; 99 | let mut last_height = from_height; 100 | 101 | let mut start = Instant::now(); 102 | let mut should_receive_more = true; 103 | 104 | let batch_max_wait_time = batch_max_wait_time.to_std().unwrap(); 105 | 106 | loop { 107 | if let Some(SubscribeEventPB { 108 | update: Some(update), 109 | }) = stream 110 | .message() 111 | .await 112 | .map_err(|s| AppError::StreamError(format!("Updates stream error: {}", s)))? 113 | { 114 | last_height = update.height as u32; 115 | match BlockchainUpdate::try_from(update) { 116 | Ok(upd) => { 117 | let current_batch_size = result.len() + 1; 118 | match &upd { 119 | BlockchainUpdate::Block(_) => { 120 | if current_batch_size >= batch_max_size 121 | || start.elapsed().ge(&batch_max_wait_time) 122 | { 123 | should_receive_more = false; 124 | } 125 | } 126 | BlockchainUpdate::Microblock(_) | BlockchainUpdate::Rollback(_) => { 127 | should_receive_more = false 128 | } 129 | } 130 | result.push(upd); 131 | Ok(()) 132 | } 133 | Err(err) => Err(err), 134 | }?; 135 | } 136 | 137 | if !should_receive_more { 138 | debug!("updating to height {}", last_height); 139 | tx.send(BlockchainUpdatesWithLastHeight { 140 | last_height, 141 | updates: result.drain(..).collect(), 142 | }) 143 | .await 144 | .map_err(|e| AppError::StreamError(format!("Channel error: {}", e)))?; 145 | should_receive_more = true; 146 | start = Instant::now(); 147 | } 148 | 149 | time::sleep(StdDuration::from_micros(1000)).await; 150 | } 151 | } 152 | } 153 | 154 | impl TryFrom for BlockchainUpdate { 155 | type Error = AppError; 156 | 157 | fn try_from(mut value: BlockchainUpdatedPB) -> Result { 158 | use BlockchainUpdate::{Block, Microblock, Rollback}; 159 | 160 | match value.update { 161 | Some(UpdatePB::Append(AppendPB { 162 | ref mut body, 163 | transaction_ids, 164 | transactions_metadata, 165 | transaction_state_updates, 166 | .. 167 | })) => { 168 | let height = value.height; 169 | 170 | let txs: Option<(Vec, Option)> = match body { 171 | Some(BodyPB::Block(BlockAppendPB { ref mut block, .. })) => { 172 | Ok(block.as_mut().map(|it| { 173 | ( 174 | it.transactions.drain(..).collect(), 175 | it.header.as_ref().map(|it| it.timestamp), 176 | ) 177 | })) 178 | } 179 | Some(BodyPB::MicroBlock(MicroBlockAppendPB { 180 | ref mut micro_block, 181 | .. 182 | })) => Ok(micro_block.as_mut().and_then(|it| { 183 | it.micro_block 184 | .as_mut() 185 | .map(|it| (it.transactions.drain(..).collect(), None)) 186 | })), 187 | _ => Err(AppError::InvalidMessage( 188 | "Append body is empty.".to_string(), 189 | )), 190 | }?; 191 | 192 | let txs = match txs { 193 | Some((txs, ..)) => txs 194 | .into_iter() 195 | .enumerate() 196 | .map(|(idx, tx)| { 197 | let id = transaction_ids.get(idx).unwrap().clone(); 198 | Tx { 199 | id: bs58::encode(id).into_string(), 200 | data: tx, 201 | meta: transactions_metadata.get(idx).unwrap().clone(), 202 | state_update: transaction_state_updates.get(idx).unwrap().clone(), 203 | } 204 | }) 205 | .collect(), 206 | None => vec![], 207 | }; 208 | 209 | match body { 210 | Some(BodyPB::Block(BlockAppendPB { 211 | block: 212 | Some(BlockPB { 213 | header: Some(HeaderPB { timestamp, .. }), 214 | .. 215 | }), 216 | updated_waves_amount, .. 217 | })) => Ok(Block(BlockMicroblockAppend { 218 | id: bs58::encode(&value.id).into_string(), 219 | time_stamp: Some(epoch_ms_to_naivedatetime(*timestamp)), 220 | height, 221 | updated_waves_amount: if *updated_waves_amount > 0 { 222 | Some(*updated_waves_amount) 223 | } else { 224 | None 225 | }, 226 | txs, 227 | })), 228 | Some(BodyPB::MicroBlock(MicroBlockAppendPB { 229 | micro_block: Some(SignedMicroBlockPB { total_block_id, .. }), 230 | .. 231 | })) => Ok(Microblock(BlockMicroblockAppend { 232 | id: bs58::encode(&total_block_id).into_string(), 233 | time_stamp: None, 234 | height, 235 | updated_waves_amount: None, 236 | txs, 237 | })), 238 | _ => Err(AppError::InvalidMessage( 239 | "Append body is empty.".to_string(), 240 | )), 241 | } 242 | } 243 | Some(UpdatePB::Rollback(_)) => Ok(Rollback(bs58::encode(&value.id).into_string())), 244 | _ => Err(AppError::InvalidMessage( 245 | "Unknown blockchain update.".to_string(), 246 | )), 247 | } 248 | } 249 | } 250 | -------------------------------------------------------------------------------- /src/lib/consumer/models/txs/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod convert; 2 | 3 | use crate::schema::*; 4 | use chrono::NaiveDateTime; 5 | use diesel::Insertable; 6 | use serde_json::Value; 7 | 8 | type TxUid = i64; 9 | type TxHeight = i32; 10 | type TxType = i16; 11 | type TxId = String; 12 | type TxTimeStamp = NaiveDateTime; 13 | type TxSignature = Option; 14 | type TxFee = i64; 15 | type TxProofs = Option>; 16 | type TxVersion = Option; 17 | type TxSender = String; 18 | type TxSenderPubKey = String; 19 | type TxStatus = String; 20 | type TxBlockUid = i64; 21 | 22 | /// Genesis transaction 23 | #[derive(Clone, Debug, Insertable)] 24 | #[diesel(table_name = txs_1)] 25 | pub struct Tx1 { 26 | pub uid: TxUid, 27 | pub height: TxHeight, 28 | pub tx_type: TxType, 29 | pub id: TxId, 30 | pub time_stamp: TxTimeStamp, 31 | pub signature: TxSignature, 32 | pub fee: TxFee, 33 | pub proofs: TxProofs, 34 | pub tx_version: TxVersion, 35 | pub block_uid: TxBlockUid, 36 | pub sender: Option, 37 | pub sender_public_key: Option, 38 | pub status: TxStatus, 39 | pub recipient_address: String, 40 | pub recipient_alias: Option, 41 | pub amount: i64, 42 | } 43 | 44 | /// Payment transaction 45 | #[derive(Clone, Debug, Insertable)] 46 | #[diesel(table_name = txs_2)] 47 | pub struct Tx2 { 48 | pub uid: TxUid, 49 | pub height: TxHeight, 50 | pub tx_type: TxType, 51 | pub id: TxId, 52 | pub time_stamp: TxTimeStamp, 53 | pub signature: TxSignature, 54 | pub fee: TxFee, 55 | pub proofs: TxProofs, 56 | pub tx_version: TxVersion, 57 | pub block_uid: TxBlockUid, 58 | pub sender: TxSender, 59 | pub sender_public_key: TxSenderPubKey, 60 | pub status: TxStatus, 61 | pub recipient_address: String, 62 | pub recipient_alias: Option, 63 | pub amount: i64, 64 | } 65 | 66 | /// Issue transaction 67 | #[derive(Clone, Debug, Insertable)] 68 | #[diesel(table_name = txs_3)] 69 | pub struct Tx3 { 70 | pub uid: TxUid, 71 | pub height: TxHeight, 72 | pub tx_type: TxType, 73 | pub id: TxId, 74 | pub time_stamp: TxTimeStamp, 75 | pub signature: TxSignature, 76 | pub fee: TxFee, 77 | pub proofs: TxProofs, 78 | pub tx_version: TxVersion, 79 | pub block_uid: TxBlockUid, 80 | pub sender: TxSender, 81 | pub sender_public_key: TxSenderPubKey, 82 | pub status: TxStatus, 83 | pub asset_id: String, 84 | pub asset_name: String, 85 | pub description: String, 86 | pub quantity: i64, 87 | pub decimals: i16, 88 | pub reissuable: bool, 89 | pub script: Option, 90 | } 91 | 92 | /// Transfer transaction 93 | #[derive(Clone, Debug, Insertable)] 94 | #[diesel(table_name = txs_4)] 95 | pub struct Tx4 { 96 | pub uid: TxUid, 97 | pub height: TxHeight, 98 | pub tx_type: TxType, 99 | pub id: TxId, 100 | pub time_stamp: TxTimeStamp, 101 | pub signature: TxSignature, 102 | pub fee: TxFee, 103 | pub proofs: TxProofs, 104 | pub tx_version: TxVersion, 105 | pub block_uid: TxBlockUid, 106 | pub sender: TxSender, 107 | pub sender_public_key: TxSenderPubKey, 108 | pub status: TxStatus, 109 | pub amount: i64, 110 | pub asset_id: String, 111 | pub recipient_address: String, 112 | pub recipient_alias: Option, 113 | pub fee_asset_id: String, 114 | pub attachment: String, 115 | } 116 | 117 | /// Reissue transaction 118 | #[derive(Clone, Debug, Insertable)] 119 | #[diesel(table_name = txs_5)] 120 | pub struct Tx5 { 121 | pub uid: TxUid, 122 | pub height: TxHeight, 123 | pub tx_type: TxType, 124 | pub id: TxId, 125 | pub time_stamp: TxTimeStamp, 126 | pub signature: TxSignature, 127 | pub fee: TxFee, 128 | pub proofs: TxProofs, 129 | pub tx_version: TxVersion, 130 | pub block_uid: TxBlockUid, 131 | pub sender: TxSender, 132 | pub sender_public_key: TxSenderPubKey, 133 | pub status: TxStatus, 134 | pub asset_id: String, 135 | pub quantity: i64, 136 | pub reissuable: bool, 137 | } 138 | 139 | /// Burn transaction 140 | #[derive(Clone, Debug, Insertable)] 141 | #[diesel(table_name = txs_6)] 142 | pub struct Tx6 { 143 | pub uid: TxUid, 144 | pub height: TxHeight, 145 | pub tx_type: TxType, 146 | pub id: TxId, 147 | pub time_stamp: TxTimeStamp, 148 | pub signature: TxSignature, 149 | pub fee: TxFee, 150 | pub proofs: TxProofs, 151 | pub tx_version: TxVersion, 152 | pub block_uid: TxBlockUid, 153 | pub sender: TxSender, 154 | pub sender_public_key: TxSenderPubKey, 155 | pub status: TxStatus, 156 | pub asset_id: String, 157 | pub amount: i64, 158 | } 159 | 160 | /// Exchange transaction 161 | #[derive(Clone, Debug, Insertable)] 162 | #[diesel(table_name = txs_7)] 163 | pub struct Tx7 { 164 | pub uid: TxUid, 165 | pub height: TxHeight, 166 | pub tx_type: TxType, 167 | pub id: TxId, 168 | pub time_stamp: TxTimeStamp, 169 | pub signature: TxSignature, 170 | pub fee: TxFee, 171 | pub proofs: TxProofs, 172 | pub tx_version: TxVersion, 173 | pub block_uid: TxBlockUid, 174 | pub sender: TxSender, 175 | pub sender_public_key: TxSenderPubKey, 176 | pub status: TxStatus, 177 | pub order1: Value, 178 | pub order2: Value, 179 | pub amount_asset_id: String, 180 | pub price_asset_id: String, 181 | pub amount: i64, 182 | pub price: i64, 183 | pub buy_matcher_fee: i64, 184 | pub sell_matcher_fee: i64, 185 | pub fee_asset_id: String, 186 | } 187 | 188 | /// Lease transaction 189 | #[derive(Clone, Debug, Insertable)] 190 | #[diesel(table_name = txs_8)] 191 | pub struct Tx8 { 192 | pub uid: TxUid, 193 | pub height: TxHeight, 194 | pub tx_type: TxType, 195 | pub id: TxId, 196 | pub time_stamp: TxTimeStamp, 197 | pub signature: TxSignature, 198 | pub fee: TxFee, 199 | pub proofs: TxProofs, 200 | pub tx_version: TxVersion, 201 | pub block_uid: TxBlockUid, 202 | pub sender: TxSender, 203 | pub sender_public_key: TxSenderPubKey, 204 | pub status: TxStatus, 205 | pub recipient_address: String, 206 | pub recipient_alias: Option, 207 | pub amount: i64, 208 | } 209 | 210 | /// LeaseCancel partial transaction. 211 | /// 212 | /// `lease_id` field is used further to create an actual transaction 213 | #[derive(Clone, Debug)] 214 | pub struct Tx9Partial { 215 | pub uid: TxUid, 216 | pub height: TxHeight, 217 | pub tx_type: TxType, 218 | pub id: TxId, 219 | pub time_stamp: TxTimeStamp, 220 | pub signature: TxSignature, 221 | pub fee: TxFee, 222 | pub proofs: TxProofs, 223 | pub tx_version: TxVersion, 224 | pub block_uid: TxBlockUid, 225 | pub sender: TxSender, 226 | pub sender_public_key: TxSenderPubKey, 227 | pub status: TxStatus, 228 | pub lease_id: Option, 229 | } 230 | 231 | /// LeaseCancel transaction 232 | #[derive(Clone, Debug, Insertable)] 233 | #[diesel(table_name = txs_9)] 234 | pub struct Tx9 { 235 | pub uid: TxUid, 236 | pub height: TxHeight, 237 | pub tx_type: TxType, 238 | pub id: TxId, 239 | pub time_stamp: TxTimeStamp, 240 | pub signature: TxSignature, 241 | pub fee: TxFee, 242 | pub proofs: TxProofs, 243 | pub tx_version: TxVersion, 244 | pub block_uid: TxBlockUid, 245 | pub sender: TxSender, 246 | pub sender_public_key: TxSenderPubKey, 247 | pub status: TxStatus, 248 | pub lease_tx_uid: Option, 249 | } 250 | 251 | impl From<(&Tx9Partial, Option)> for Tx9 { 252 | fn from((tx, lease_tx_uid): (&Tx9Partial, Option)) -> Self { 253 | let tx = tx.clone(); 254 | Self { 255 | uid: tx.uid, 256 | height: tx.height, 257 | tx_type: tx.tx_type, 258 | id: tx.id, 259 | time_stamp: tx.time_stamp, 260 | signature: tx.signature, 261 | fee: tx.fee, 262 | proofs: tx.proofs, 263 | tx_version: tx.tx_version, 264 | sender: tx.sender, 265 | sender_public_key: tx.sender_public_key, 266 | status: tx.status, 267 | lease_tx_uid: tx.lease_id.and(lease_tx_uid), 268 | block_uid: tx.block_uid, 269 | } 270 | } 271 | } 272 | 273 | /// CreateAlias transaction 274 | #[derive(Clone, Debug, Insertable)] 275 | #[diesel(table_name = txs_10)] 276 | pub struct Tx10 { 277 | pub uid: TxUid, 278 | pub height: TxHeight, 279 | pub tx_type: TxType, 280 | pub id: TxId, 281 | pub time_stamp: TxTimeStamp, 282 | pub signature: TxSignature, 283 | pub fee: TxFee, 284 | pub proofs: TxProofs, 285 | pub tx_version: TxVersion, 286 | pub block_uid: TxBlockUid, 287 | pub sender: TxSender, 288 | pub sender_public_key: TxSenderPubKey, 289 | pub status: TxStatus, 290 | pub alias: String, 291 | } 292 | 293 | /// MassTransfer transaction 294 | #[derive(Clone, Debug, Insertable)] 295 | #[diesel(table_name = txs_11)] 296 | pub struct Tx11 { 297 | pub uid: TxUid, 298 | pub height: TxHeight, 299 | pub tx_type: TxType, 300 | pub id: TxId, 301 | pub time_stamp: TxTimeStamp, 302 | pub signature: TxSignature, 303 | pub fee: TxFee, 304 | pub proofs: TxProofs, 305 | pub tx_version: TxVersion, 306 | pub block_uid: TxBlockUid, 307 | pub sender: TxSender, 308 | pub sender_public_key: TxSenderPubKey, 309 | pub status: TxStatus, 310 | pub asset_id: String, 311 | pub attachment: String, 312 | } 313 | 314 | /// MassTransfer transaction 315 | #[derive(Clone, Debug, Insertable)] 316 | #[diesel(table_name = txs_11_transfers)] 317 | pub struct Tx11Transfers { 318 | pub tx_uid: TxUid, 319 | pub recipient_address: String, 320 | pub recipient_alias: Option, 321 | pub amount: i64, 322 | pub position_in_tx: i16, 323 | pub height: TxHeight, 324 | } 325 | 326 | /// MassTransfer transaction 327 | #[derive(Clone, Debug)] 328 | pub struct Tx11Combined { 329 | pub tx: Tx11, 330 | pub transfers: Vec, 331 | } 332 | 333 | /// DataTransaction transaction 334 | #[derive(Clone, Debug, Insertable)] 335 | #[diesel(table_name = txs_12)] 336 | pub struct Tx12 { 337 | pub uid: TxUid, 338 | pub height: TxHeight, 339 | pub tx_type: TxType, 340 | pub id: TxId, 341 | pub time_stamp: TxTimeStamp, 342 | pub signature: TxSignature, 343 | pub fee: TxFee, 344 | pub proofs: TxProofs, 345 | pub tx_version: TxVersion, 346 | pub block_uid: TxBlockUid, 347 | pub sender: TxSender, 348 | pub sender_public_key: TxSenderPubKey, 349 | pub status: TxStatus, 350 | } 351 | 352 | /// DataTransaction transaction 353 | #[derive(Clone, Debug, Insertable)] 354 | #[diesel(table_name = txs_12_data)] 355 | pub struct Tx12Data { 356 | pub tx_uid: TxUid, 357 | pub data_key: String, 358 | pub data_type: Option, 359 | pub data_value_integer: Option, 360 | pub data_value_boolean: Option, 361 | pub data_value_binary: Option, 362 | pub data_value_string: Option, 363 | pub position_in_tx: i16, 364 | pub height: TxHeight, 365 | } 366 | 367 | /// DataTransaction transaction 368 | #[derive(Clone, Debug)] 369 | pub struct Tx12Combined { 370 | pub tx: Tx12, 371 | pub data: Vec, 372 | } 373 | 374 | /// SetScript transaction 375 | #[derive(Clone, Debug, Insertable)] 376 | #[diesel(table_name = txs_13)] 377 | pub struct Tx13 { 378 | pub uid: TxUid, 379 | pub height: TxHeight, 380 | pub tx_type: TxType, 381 | pub id: TxId, 382 | pub time_stamp: TxTimeStamp, 383 | pub signature: TxSignature, 384 | pub fee: TxFee, 385 | pub proofs: TxProofs, 386 | pub tx_version: TxVersion, 387 | pub block_uid: TxBlockUid, 388 | pub sender: TxSender, 389 | pub sender_public_key: TxSenderPubKey, 390 | pub status: TxStatus, 391 | pub script: Option, 392 | } 393 | 394 | /// SponsorFee transaction 395 | #[derive(Clone, Debug, Insertable)] 396 | #[diesel(table_name = txs_14)] 397 | pub struct Tx14 { 398 | pub uid: TxUid, 399 | pub height: TxHeight, 400 | pub tx_type: TxType, 401 | pub id: TxId, 402 | pub time_stamp: TxTimeStamp, 403 | pub signature: TxSignature, 404 | pub fee: TxFee, 405 | pub proofs: TxProofs, 406 | pub tx_version: TxVersion, 407 | pub block_uid: TxBlockUid, 408 | pub sender: TxSender, 409 | pub sender_public_key: TxSenderPubKey, 410 | pub status: TxStatus, 411 | pub asset_id: String, 412 | pub min_sponsored_asset_fee: Option, 413 | } 414 | 415 | /// SetAssetScript transaction 416 | #[derive(Clone, Debug, Insertable)] 417 | #[diesel(table_name = txs_15)] 418 | pub struct Tx15 { 419 | pub uid: TxUid, 420 | pub height: TxHeight, 421 | pub tx_type: TxType, 422 | pub id: TxId, 423 | pub time_stamp: TxTimeStamp, 424 | pub signature: TxSignature, 425 | pub fee: TxFee, 426 | pub proofs: TxProofs, 427 | pub tx_version: TxVersion, 428 | pub block_uid: TxBlockUid, 429 | pub sender: TxSender, 430 | pub sender_public_key: TxSenderPubKey, 431 | pub status: TxStatus, 432 | pub asset_id: String, 433 | pub script: Option, 434 | } 435 | 436 | /// InvokeScript transaction 437 | #[derive(Clone, Debug, Insertable)] 438 | #[diesel(table_name = txs_16)] 439 | pub struct Tx16 { 440 | pub uid: TxUid, 441 | pub height: TxHeight, 442 | pub tx_type: TxType, 443 | pub id: TxId, 444 | pub time_stamp: TxTimeStamp, 445 | pub signature: TxSignature, 446 | pub fee: TxFee, 447 | pub proofs: TxProofs, 448 | pub tx_version: TxVersion, 449 | pub block_uid: TxBlockUid, 450 | pub sender: TxSender, 451 | pub sender_public_key: TxSenderPubKey, 452 | pub status: TxStatus, 453 | pub dapp_address: String, 454 | pub dapp_alias: Option, 455 | pub function_name: Option, 456 | pub fee_asset_id: String, 457 | } 458 | 459 | /// InvokeScript transaction 460 | #[derive(Clone, Debug, Insertable)] 461 | #[diesel(table_name = txs_16_args)] 462 | pub struct Tx16Args { 463 | pub tx_uid: TxUid, 464 | pub arg_type: String, 465 | pub arg_value_integer: Option, 466 | pub arg_value_boolean: Option, 467 | pub arg_value_binary: Option, 468 | pub arg_value_string: Option, 469 | pub arg_value_list: Option, 470 | pub position_in_args: i16, 471 | pub height: TxHeight, 472 | } 473 | 474 | /// InvokeScript transaction 475 | #[derive(Clone, Debug, Insertable)] 476 | #[diesel(table_name = txs_16_payment)] 477 | pub struct Tx16Payment { 478 | pub tx_uid: TxUid, 479 | pub amount: i64, 480 | pub position_in_payment: i16, 481 | pub height: TxHeight, 482 | pub asset_id: String, 483 | } 484 | 485 | /// InvokeScript transaction 486 | #[derive(Clone, Debug)] 487 | pub struct Tx16Combined { 488 | pub tx: Tx16, 489 | pub args: Vec, 490 | pub payments: Vec, 491 | } 492 | 493 | /// UpdateAssetInfo transaction 494 | #[derive(Clone, Debug, Insertable)] 495 | #[diesel(table_name = txs_17)] 496 | pub struct Tx17 { 497 | pub uid: TxUid, 498 | pub height: TxHeight, 499 | pub tx_type: TxType, 500 | pub id: TxId, 501 | pub time_stamp: TxTimeStamp, 502 | pub signature: TxSignature, 503 | pub fee: TxFee, 504 | pub proofs: TxProofs, 505 | pub tx_version: TxVersion, 506 | pub block_uid: TxBlockUid, 507 | pub sender: TxSender, 508 | pub sender_public_key: TxSenderPubKey, 509 | pub status: TxStatus, 510 | pub asset_id: String, 511 | pub asset_name: String, 512 | pub description: String, 513 | } 514 | 515 | /// Ethereum transaction 516 | #[derive(Clone, Debug, Insertable)] 517 | #[diesel(table_name = txs_18)] 518 | pub struct Tx18 { 519 | pub uid: TxUid, 520 | pub height: TxHeight, 521 | pub tx_type: TxType, 522 | pub id: TxId, 523 | pub time_stamp: TxTimeStamp, 524 | pub signature: TxSignature, 525 | pub fee: TxFee, 526 | pub proofs: TxProofs, 527 | pub tx_version: TxVersion, 528 | pub block_uid: TxBlockUid, 529 | pub sender: TxSender, 530 | pub sender_public_key: TxSenderPubKey, 531 | pub status: TxStatus, 532 | pub bytes: Vec, 533 | pub function_name: Option, 534 | } 535 | 536 | /// Ethereum InvokeScript transaction 537 | #[derive(Clone, Debug, Insertable)] 538 | #[diesel(table_name = txs_18_args)] 539 | pub struct Tx18Args { 540 | pub tx_uid: TxUid, 541 | pub arg_type: String, 542 | pub arg_value_integer: Option, 543 | pub arg_value_boolean: Option, 544 | pub arg_value_binary: Option, 545 | pub arg_value_string: Option, 546 | pub arg_value_list: Option, 547 | pub position_in_args: i16, 548 | pub height: TxHeight, 549 | } 550 | 551 | /// Ethereum InvokeScript transaction 552 | #[derive(Clone, Debug, Insertable)] 553 | #[diesel(table_name = txs_18_payment)] 554 | pub struct Tx18Payment { 555 | pub tx_uid: TxUid, 556 | pub amount: i64, 557 | pub position_in_payment: i16, 558 | pub height: TxHeight, 559 | pub asset_id: String, 560 | } 561 | 562 | /// Ethereum transaction 563 | #[derive(Clone, Debug)] 564 | pub struct Tx18Combined { 565 | pub tx: Tx18, 566 | pub args: Vec, 567 | pub payments: Vec, 568 | } 569 | -------------------------------------------------------------------------------- /src/lib/schema.rs: -------------------------------------------------------------------------------- 1 | // @generated automatically by Diesel CLI. 2 | 3 | diesel::table! { 4 | use diesel::sql_types::*; 5 | 6 | asset_origins (asset_id) { 7 | asset_id -> Varchar, 8 | first_asset_update_uid -> Int8, 9 | origin_transaction_id -> Varchar, 10 | issuer -> Varchar, 11 | issue_height -> Int4, 12 | issue_time_stamp -> Timestamptz, 13 | } 14 | } 15 | 16 | diesel::table! { 17 | use diesel::sql_types::*; 18 | 19 | asset_tickers (superseded_by, asset_id) { 20 | uid -> Int8, 21 | superseded_by -> Int8, 22 | block_uid -> Int8, 23 | asset_id -> Text, 24 | ticker -> Text, 25 | } 26 | } 27 | 28 | diesel::table! { 29 | use diesel::sql_types::*; 30 | 31 | asset_updates (superseded_by, asset_id) { 32 | block_uid -> Int8, 33 | uid -> Int8, 34 | superseded_by -> Int8, 35 | asset_id -> Varchar, 36 | decimals -> Int2, 37 | name -> Varchar, 38 | description -> Varchar, 39 | reissuable -> Bool, 40 | volume -> Int8, 41 | script -> Nullable, 42 | sponsorship -> Nullable, 43 | nft -> Bool, 44 | } 45 | } 46 | 47 | diesel::table! { 48 | use diesel::sql_types::*; 49 | 50 | assets_metadata (asset_id) { 51 | asset_id -> Varchar, 52 | asset_name -> Nullable, 53 | ticker -> Nullable, 54 | height -> Nullable, 55 | } 56 | } 57 | 58 | diesel::table! { 59 | use diesel::sql_types::*; 60 | 61 | blocks_microblocks (id) { 62 | uid -> Int8, 63 | id -> Varchar, 64 | height -> Int4, 65 | time_stamp -> Nullable, 66 | } 67 | } 68 | 69 | diesel::table! { 70 | use diesel::sql_types::*; 71 | 72 | candles (interval, time_start, amount_asset_id, price_asset_id, matcher_address) { 73 | time_start -> Timestamp, 74 | #[max_length = 255] 75 | amount_asset_id -> Varchar, 76 | #[max_length = 255] 77 | price_asset_id -> Varchar, 78 | low -> Numeric, 79 | high -> Numeric, 80 | volume -> Numeric, 81 | quote_volume -> Numeric, 82 | max_height -> Int4, 83 | txs_count -> Int4, 84 | weighted_average_price -> Numeric, 85 | open -> Numeric, 86 | close -> Numeric, 87 | interval -> Varchar, 88 | matcher_address -> Varchar, 89 | } 90 | } 91 | 92 | diesel::table! { 93 | use diesel::sql_types::*; 94 | 95 | pairs (amount_asset_id, price_asset_id, matcher_address) { 96 | #[max_length = 255] 97 | amount_asset_id -> Varchar, 98 | #[max_length = 255] 99 | price_asset_id -> Varchar, 100 | first_price -> Numeric, 101 | last_price -> Numeric, 102 | volume -> Numeric, 103 | volume_waves -> Nullable, 104 | quote_volume -> Numeric, 105 | high -> Numeric, 106 | low -> Numeric, 107 | weighted_average_price -> Numeric, 108 | txs_count -> Int4, 109 | #[max_length = 255] 110 | matcher_address -> Varchar, 111 | } 112 | } 113 | 114 | diesel::table! { 115 | use diesel::sql_types::*; 116 | 117 | txs (uid, id, time_stamp) { 118 | uid -> Int8, 119 | tx_type -> Int2, 120 | sender -> Nullable, 121 | sender_public_key -> Nullable, 122 | time_stamp -> Timestamptz, 123 | height -> Int4, 124 | id -> Varchar, 125 | signature -> Nullable, 126 | proofs -> Nullable>>, 127 | tx_version -> Nullable, 128 | fee -> Int8, 129 | status -> Varchar, 130 | block_uid -> Int8, 131 | } 132 | } 133 | 134 | diesel::table! { 135 | use diesel::sql_types::*; 136 | 137 | txs_1 (uid) { 138 | uid -> Int8, 139 | tx_type -> Int2, 140 | sender -> Nullable, 141 | sender_public_key -> Nullable, 142 | time_stamp -> Timestamptz, 143 | height -> Int4, 144 | id -> Varchar, 145 | signature -> Nullable, 146 | proofs -> Nullable>>, 147 | tx_version -> Nullable, 148 | fee -> Int8, 149 | status -> Varchar, 150 | block_uid -> Int8, 151 | recipient_address -> Varchar, 152 | recipient_alias -> Nullable, 153 | amount -> Int8, 154 | } 155 | } 156 | 157 | diesel::table! { 158 | use diesel::sql_types::*; 159 | 160 | txs_10 (uid) { 161 | uid -> Int8, 162 | tx_type -> Int2, 163 | sender -> Varchar, 164 | sender_public_key -> Varchar, 165 | time_stamp -> Timestamptz, 166 | height -> Int4, 167 | id -> Varchar, 168 | signature -> Nullable, 169 | proofs -> Nullable>>, 170 | tx_version -> Nullable, 171 | fee -> Int8, 172 | status -> Varchar, 173 | block_uid -> Int8, 174 | alias -> Varchar, 175 | } 176 | } 177 | 178 | diesel::table! { 179 | use diesel::sql_types::*; 180 | 181 | txs_11 (uid) { 182 | uid -> Int8, 183 | tx_type -> Int2, 184 | sender -> Varchar, 185 | sender_public_key -> Varchar, 186 | time_stamp -> Timestamptz, 187 | height -> Int4, 188 | id -> Varchar, 189 | signature -> Nullable, 190 | proofs -> Nullable>>, 191 | tx_version -> Nullable, 192 | fee -> Int8, 193 | status -> Varchar, 194 | block_uid -> Int8, 195 | asset_id -> Varchar, 196 | attachment -> Varchar, 197 | } 198 | } 199 | 200 | diesel::table! { 201 | use diesel::sql_types::*; 202 | 203 | txs_11_transfers (tx_uid, position_in_tx) { 204 | tx_uid -> Int8, 205 | recipient_address -> Varchar, 206 | recipient_alias -> Nullable, 207 | amount -> Int8, 208 | position_in_tx -> Int2, 209 | height -> Int4, 210 | } 211 | } 212 | 213 | diesel::table! { 214 | use diesel::sql_types::*; 215 | 216 | txs_12 (uid) { 217 | uid -> Int8, 218 | tx_type -> Int2, 219 | sender -> Varchar, 220 | sender_public_key -> Varchar, 221 | time_stamp -> Timestamptz, 222 | height -> Int4, 223 | id -> Varchar, 224 | signature -> Nullable, 225 | proofs -> Nullable>>, 226 | tx_version -> Nullable, 227 | fee -> Int8, 228 | status -> Varchar, 229 | block_uid -> Int8, 230 | } 231 | } 232 | 233 | diesel::table! { 234 | use diesel::sql_types::*; 235 | 236 | txs_12_data (tx_uid, position_in_tx) { 237 | tx_uid -> Int8, 238 | data_key -> Text, 239 | data_type -> Nullable, 240 | data_value_integer -> Nullable, 241 | data_value_boolean -> Nullable, 242 | data_value_binary -> Nullable, 243 | data_value_string -> Nullable, 244 | position_in_tx -> Int2, 245 | height -> Int4, 246 | } 247 | } 248 | 249 | diesel::table! { 250 | use diesel::sql_types::*; 251 | 252 | txs_13 (uid) { 253 | uid -> Int8, 254 | tx_type -> Int2, 255 | sender -> Varchar, 256 | sender_public_key -> Varchar, 257 | time_stamp -> Timestamptz, 258 | height -> Int4, 259 | id -> Varchar, 260 | signature -> Nullable, 261 | proofs -> Nullable>>, 262 | tx_version -> Nullable, 263 | fee -> Int8, 264 | status -> Varchar, 265 | block_uid -> Int8, 266 | script -> Nullable, 267 | } 268 | } 269 | 270 | diesel::table! { 271 | use diesel::sql_types::*; 272 | 273 | txs_14 (uid) { 274 | uid -> Int8, 275 | tx_type -> Int2, 276 | sender -> Varchar, 277 | sender_public_key -> Varchar, 278 | time_stamp -> Timestamptz, 279 | height -> Int4, 280 | id -> Varchar, 281 | signature -> Nullable, 282 | proofs -> Nullable>>, 283 | tx_version -> Nullable, 284 | fee -> Int8, 285 | status -> Varchar, 286 | block_uid -> Int8, 287 | asset_id -> Varchar, 288 | min_sponsored_asset_fee -> Nullable, 289 | } 290 | } 291 | 292 | diesel::table! { 293 | use diesel::sql_types::*; 294 | 295 | txs_15 (uid) { 296 | uid -> Int8, 297 | tx_type -> Int2, 298 | sender -> Varchar, 299 | sender_public_key -> Varchar, 300 | time_stamp -> Timestamptz, 301 | height -> Int4, 302 | id -> Varchar, 303 | signature -> Nullable, 304 | proofs -> Nullable>>, 305 | tx_version -> Nullable, 306 | fee -> Int8, 307 | status -> Varchar, 308 | block_uid -> Int8, 309 | asset_id -> Varchar, 310 | script -> Nullable, 311 | } 312 | } 313 | 314 | diesel::table! { 315 | use diesel::sql_types::*; 316 | 317 | txs_16 (uid) { 318 | uid -> Int8, 319 | tx_type -> Int2, 320 | sender -> Varchar, 321 | sender_public_key -> Varchar, 322 | time_stamp -> Timestamptz, 323 | height -> Int4, 324 | id -> Varchar, 325 | signature -> Nullable, 326 | proofs -> Nullable>>, 327 | tx_version -> Nullable, 328 | fee -> Int8, 329 | status -> Varchar, 330 | block_uid -> Int8, 331 | dapp_address -> Varchar, 332 | dapp_alias -> Nullable, 333 | function_name -> Nullable, 334 | fee_asset_id -> Varchar, 335 | } 336 | } 337 | 338 | diesel::table! { 339 | use diesel::sql_types::*; 340 | 341 | txs_16_args (tx_uid, position_in_args) { 342 | arg_type -> Text, 343 | arg_value_integer -> Nullable, 344 | arg_value_boolean -> Nullable, 345 | arg_value_binary -> Nullable, 346 | arg_value_string -> Nullable, 347 | arg_value_list -> Nullable, 348 | position_in_args -> Int2, 349 | tx_uid -> Int8, 350 | height -> Nullable, 351 | } 352 | } 353 | 354 | diesel::table! { 355 | use diesel::sql_types::*; 356 | 357 | txs_16_payment (tx_uid, position_in_payment) { 358 | tx_uid -> Int8, 359 | amount -> Int8, 360 | position_in_payment -> Int2, 361 | height -> Nullable, 362 | asset_id -> Varchar, 363 | } 364 | } 365 | 366 | diesel::table! { 367 | use diesel::sql_types::*; 368 | 369 | txs_17 (uid) { 370 | uid -> Int8, 371 | tx_type -> Int2, 372 | sender -> Varchar, 373 | sender_public_key -> Varchar, 374 | time_stamp -> Timestamptz, 375 | height -> Int4, 376 | id -> Varchar, 377 | signature -> Nullable, 378 | proofs -> Nullable>>, 379 | tx_version -> Nullable, 380 | fee -> Int8, 381 | status -> Varchar, 382 | block_uid -> Int8, 383 | asset_id -> Varchar, 384 | asset_name -> Varchar, 385 | description -> Varchar, 386 | } 387 | } 388 | 389 | diesel::table! { 390 | use diesel::sql_types::*; 391 | 392 | txs_18 (uid) { 393 | uid -> Int8, 394 | tx_type -> Int2, 395 | sender -> Nullable, 396 | sender_public_key -> Nullable, 397 | time_stamp -> Timestamptz, 398 | height -> Int4, 399 | id -> Varchar, 400 | signature -> Nullable, 401 | proofs -> Nullable>>, 402 | tx_version -> Nullable, 403 | fee -> Int8, 404 | status -> Varchar, 405 | block_uid -> Int8, 406 | bytes -> Bytea, 407 | function_name -> Nullable, 408 | } 409 | } 410 | 411 | diesel::table! { 412 | use diesel::sql_types::*; 413 | 414 | txs_18_args (tx_uid, position_in_args) { 415 | arg_type -> Text, 416 | arg_value_integer -> Nullable, 417 | arg_value_boolean -> Nullable, 418 | arg_value_binary -> Nullable, 419 | arg_value_string -> Nullable, 420 | arg_value_list -> Nullable, 421 | position_in_args -> Int2, 422 | tx_uid -> Int8, 423 | height -> Nullable, 424 | } 425 | } 426 | 427 | diesel::table! { 428 | use diesel::sql_types::*; 429 | 430 | txs_18_payment (tx_uid, position_in_payment) { 431 | tx_uid -> Int8, 432 | amount -> Int8, 433 | position_in_payment -> Int2, 434 | height -> Nullable, 435 | asset_id -> Varchar, 436 | } 437 | } 438 | 439 | diesel::table! { 440 | use diesel::sql_types::*; 441 | 442 | txs_2 (uid) { 443 | uid -> Int8, 444 | tx_type -> Int2, 445 | sender -> Varchar, 446 | sender_public_key -> Varchar, 447 | time_stamp -> Timestamptz, 448 | height -> Int4, 449 | id -> Varchar, 450 | signature -> Nullable, 451 | proofs -> Nullable>>, 452 | tx_version -> Nullable, 453 | fee -> Int8, 454 | status -> Varchar, 455 | block_uid -> Int8, 456 | recipient_address -> Varchar, 457 | recipient_alias -> Nullable, 458 | amount -> Int8, 459 | } 460 | } 461 | 462 | diesel::table! { 463 | use diesel::sql_types::*; 464 | 465 | txs_3 (uid) { 466 | uid -> Int8, 467 | tx_type -> Int2, 468 | sender -> Varchar, 469 | sender_public_key -> Varchar, 470 | time_stamp -> Timestamptz, 471 | height -> Int4, 472 | id -> Varchar, 473 | signature -> Nullable, 474 | proofs -> Nullable>>, 475 | tx_version -> Nullable, 476 | fee -> Int8, 477 | status -> Varchar, 478 | block_uid -> Int8, 479 | asset_id -> Varchar, 480 | asset_name -> Varchar, 481 | description -> Varchar, 482 | quantity -> Int8, 483 | decimals -> Int2, 484 | reissuable -> Bool, 485 | script -> Nullable, 486 | } 487 | } 488 | 489 | diesel::table! { 490 | use diesel::sql_types::*; 491 | 492 | txs_4 (uid) { 493 | uid -> Int8, 494 | tx_type -> Int2, 495 | sender -> Varchar, 496 | sender_public_key -> Varchar, 497 | time_stamp -> Timestamptz, 498 | height -> Int4, 499 | id -> Varchar, 500 | signature -> Nullable, 501 | proofs -> Nullable>>, 502 | tx_version -> Nullable, 503 | fee -> Int8, 504 | status -> Varchar, 505 | block_uid -> Int8, 506 | asset_id -> Varchar, 507 | amount -> Int8, 508 | recipient_address -> Varchar, 509 | recipient_alias -> Nullable, 510 | fee_asset_id -> Varchar, 511 | attachment -> Varchar, 512 | } 513 | } 514 | 515 | diesel::table! { 516 | use diesel::sql_types::*; 517 | 518 | txs_5 (uid) { 519 | uid -> Int8, 520 | tx_type -> Int2, 521 | sender -> Varchar, 522 | sender_public_key -> Varchar, 523 | time_stamp -> Timestamptz, 524 | height -> Int4, 525 | id -> Varchar, 526 | signature -> Nullable, 527 | proofs -> Nullable>>, 528 | tx_version -> Nullable, 529 | fee -> Int8, 530 | status -> Varchar, 531 | block_uid -> Int8, 532 | asset_id -> Varchar, 533 | quantity -> Int8, 534 | reissuable -> Bool, 535 | } 536 | } 537 | 538 | diesel::table! { 539 | use diesel::sql_types::*; 540 | 541 | txs_6 (uid) { 542 | uid -> Int8, 543 | tx_type -> Int2, 544 | sender -> Varchar, 545 | sender_public_key -> Varchar, 546 | time_stamp -> Timestamptz, 547 | height -> Int4, 548 | id -> Varchar, 549 | signature -> Nullable, 550 | proofs -> Nullable>>, 551 | tx_version -> Nullable, 552 | fee -> Int8, 553 | status -> Varchar, 554 | block_uid -> Int8, 555 | asset_id -> Varchar, 556 | amount -> Int8, 557 | } 558 | } 559 | 560 | diesel::table! { 561 | use diesel::sql_types::*; 562 | 563 | txs_7 (uid) { 564 | uid -> Int8, 565 | tx_type -> Int2, 566 | sender -> Varchar, 567 | sender_public_key -> Varchar, 568 | time_stamp -> Timestamptz, 569 | height -> Int4, 570 | id -> Varchar, 571 | signature -> Nullable, 572 | proofs -> Nullable>>, 573 | tx_version -> Nullable, 574 | fee -> Int8, 575 | status -> Varchar, 576 | block_uid -> Int8, 577 | order1 -> Jsonb, 578 | order2 -> Jsonb, 579 | amount -> Int8, 580 | price -> Int8, 581 | amount_asset_id -> Varchar, 582 | price_asset_id -> Varchar, 583 | buy_matcher_fee -> Int8, 584 | sell_matcher_fee -> Int8, 585 | fee_asset_id -> Varchar, 586 | } 587 | } 588 | 589 | diesel::table! { 590 | use diesel::sql_types::*; 591 | 592 | txs_8 (uid) { 593 | uid -> Int8, 594 | tx_type -> Int2, 595 | sender -> Varchar, 596 | sender_public_key -> Varchar, 597 | time_stamp -> Timestamptz, 598 | height -> Int4, 599 | id -> Varchar, 600 | signature -> Nullable, 601 | proofs -> Nullable>>, 602 | tx_version -> Nullable, 603 | fee -> Int8, 604 | status -> Varchar, 605 | block_uid -> Int8, 606 | recipient_address -> Varchar, 607 | recipient_alias -> Nullable, 608 | amount -> Int8, 609 | } 610 | } 611 | 612 | diesel::table! { 613 | use diesel::sql_types::*; 614 | 615 | txs_9 (uid) { 616 | uid -> Int8, 617 | tx_type -> Int2, 618 | sender -> Varchar, 619 | sender_public_key -> Varchar, 620 | time_stamp -> Timestamptz, 621 | height -> Int4, 622 | id -> Varchar, 623 | signature -> Nullable, 624 | proofs -> Nullable>>, 625 | tx_version -> Nullable, 626 | fee -> Int8, 627 | status -> Varchar, 628 | block_uid -> Int8, 629 | lease_tx_uid -> Nullable, 630 | } 631 | } 632 | 633 | diesel::table! { 634 | use diesel::sql_types::*; 635 | 636 | waves_data (quantity) { 637 | height -> Nullable, 638 | quantity -> Numeric, 639 | } 640 | } 641 | 642 | diesel::joinable!(txs_11_transfers -> txs_11 (tx_uid)); 643 | diesel::joinable!(txs_12_data -> txs_12 (tx_uid)); 644 | diesel::joinable!(txs_16_args -> txs_16 (tx_uid)); 645 | diesel::joinable!(txs_16_payment -> txs_16 (tx_uid)); 646 | diesel::joinable!(txs_18_args -> txs_18 (tx_uid)); 647 | diesel::joinable!(txs_18_payment -> txs_18 (tx_uid)); 648 | 649 | diesel::allow_tables_to_appear_in_same_query!( 650 | asset_origins, 651 | asset_tickers, 652 | asset_updates, 653 | assets_metadata, 654 | blocks_microblocks, 655 | candles, 656 | pairs, 657 | txs, 658 | txs_1, 659 | txs_10, 660 | txs_11, 661 | txs_11_transfers, 662 | txs_12, 663 | txs_12_data, 664 | txs_13, 665 | txs_14, 666 | txs_15, 667 | txs_16, 668 | txs_16_args, 669 | txs_16_payment, 670 | txs_17, 671 | txs_18, 672 | txs_18_args, 673 | txs_18_payment, 674 | txs_2, 675 | txs_3, 676 | txs_4, 677 | txs_5, 678 | txs_6, 679 | txs_7, 680 | txs_8, 681 | txs_9, 682 | waves_data, 683 | ); 684 | -------------------------------------------------------------------------------- /src/lib/consumer/models/txs/convert.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use crate::error::Error; 3 | use crate::models::{DataEntryTypeValue, Order, OrderMeta}; 4 | use crate::utils::{ 5 | epoch_ms_to_naivedatetime, escape_unicode_null, into_base58, into_prefixed_base64, 6 | }; 7 | use crate::waves::{extract_asset_id, Address, ChainId, PublicKeyHash, WAVES_ID}; 8 | use serde_json::json; 9 | use waves_protobuf_schemas::waves::{ 10 | data_entry::Value as DataValue, 11 | events::{ 12 | transaction_metadata::{ethereum_metadata::Action as EthAction, *}, 13 | TransactionMetadata, 14 | }, 15 | invoke_script_result::call::argument::Value as InvokeScriptArgValue, 16 | recipient::Recipient as InnerRecipient, 17 | signed_transaction::Transaction, 18 | transaction::Data, 19 | Amount, Recipient, SignedTransaction, 20 | }; 21 | 22 | const WRONG_META_VAR: &str = "wrong meta variant"; 23 | 24 | pub enum Tx { 25 | Genesis(Tx1), 26 | Payment(Tx2), 27 | Issue(Tx3), 28 | Transfer(Tx4), 29 | Reissue(Tx5), 30 | Burn(Tx6), 31 | Exchange(Tx7), 32 | Lease(Tx8), 33 | LeaseCancel(Tx9Partial), 34 | CreateAlias(Tx10), 35 | MassTransfer(Tx11Combined), 36 | DataTransaction(Tx12Combined), 37 | SetScript(Tx13), 38 | SponsorFee(Tx14), 39 | SetAssetScript(Tx15), 40 | InvokeScript(Tx16Combined), 41 | UpdateAssetInfo(Tx17), 42 | Ethereum(Tx18Combined), 43 | } 44 | 45 | pub struct TxUidGenerator { 46 | multiplier: i64, 47 | last_height: TxHeight, 48 | last_id: TxUid, 49 | } 50 | 51 | impl TxUidGenerator { 52 | pub const fn new(multiplier: i64) -> Self { 53 | Self { 54 | multiplier, 55 | last_height: 0, 56 | last_id: 0, 57 | } 58 | } 59 | 60 | pub fn maybe_update_height(&mut self, height: TxHeight) { 61 | if self.last_height < height { 62 | self.last_height = height; 63 | self.last_id = 0; 64 | } 65 | } 66 | 67 | pub fn next(&mut self) -> TxUid { 68 | let result = self.last_height as i64 * self.multiplier + self.last_id; 69 | self.last_id += 1; 70 | result 71 | } 72 | } 73 | 74 | impl 75 | TryFrom<( 76 | &SignedTransaction, 77 | &TxId, 78 | TxHeight, 79 | &TransactionMetadata, 80 | TxUid, 81 | TxBlockUid, 82 | ChainId, 83 | )> for Tx 84 | { 85 | type Error = Error; 86 | 87 | fn try_from( 88 | (tx, id, height, meta, tx_uid, block_uid, chain_id): ( 89 | &SignedTransaction, 90 | &TxId, 91 | TxHeight, 92 | &TransactionMetadata, 93 | TxUid, 94 | TxBlockUid, 95 | ChainId, 96 | ), 97 | ) -> Result { 98 | let SignedTransaction { 99 | transaction: Some(tx), 100 | proofs, 101 | } = tx else { 102 | return Err(Error::InconsistDataError(format!( 103 | "No transaction data in id={id}, height={height}", 104 | ))) 105 | }; 106 | let uid = tx_uid; 107 | let id = id.to_owned(); 108 | let proofs = proofs.iter().map(into_base58).collect::>(); 109 | let signature = proofs 110 | .get(0) 111 | .and_then(|p| (p.len() > 0).then_some(p.to_owned())); 112 | let proofs = Some(proofs); 113 | let mut status = String::from("succeeded"); 114 | 115 | if let Some( 116 | Metadata::Ethereum(EthereumMetadata { 117 | action: Some(EthAction::Invoke(ref m)), 118 | .. 119 | }) 120 | | Metadata::InvokeScript(ref m), 121 | ) = meta.metadata 122 | { 123 | if let Some(ref result) = m.result { 124 | if let Some(_) = result.error_message { 125 | status = String::from("script_execution_failed"); 126 | } 127 | } 128 | } 129 | 130 | let sender = into_base58(&meta.sender_address); 131 | 132 | let tx = match tx { 133 | Transaction::WavesTransaction(tx) => tx, 134 | Transaction::EthereumTransaction(tx) => { 135 | let Some(Metadata::Ethereum(meta)) = &meta.metadata else { 136 | unreachable!("{WRONG_META_VAR}") 137 | }; 138 | let mut eth_tx = Tx18 { 139 | uid, 140 | height, 141 | tx_type: 18, 142 | id, 143 | time_stamp: epoch_ms_to_naivedatetime(meta.timestamp), 144 | signature, 145 | fee: meta.fee, 146 | proofs, 147 | tx_version: Some(1), 148 | sender, 149 | sender_public_key: into_base58(&meta.sender_public_key), 150 | status, 151 | bytes: tx.clone(), 152 | block_uid, 153 | function_name: None, 154 | }; 155 | let result_tx = match meta.action.as_ref().unwrap() { 156 | EthAction::Transfer(_) => Tx18Combined { 157 | tx: eth_tx, 158 | args: vec![], 159 | payments: vec![], 160 | }, 161 | EthAction::Invoke(imeta) => { 162 | eth_tx.function_name = Some(imeta.function_name.clone()); 163 | Tx18Combined { 164 | tx: eth_tx, 165 | args: imeta 166 | .arguments 167 | .iter() 168 | .filter_map(|arg| arg.value.as_ref()) 169 | .enumerate() 170 | .map(|(i, arg)| { 171 | let (v_type, v_int, v_bool, v_bin, v_str, v_list) = match &arg { 172 | InvokeScriptArgValue::IntegerValue(v) => { 173 | ("integer", Some(v.to_owned()), None, None, None, None) 174 | } 175 | InvokeScriptArgValue::BooleanValue(v) => { 176 | ("boolean", None, Some(v.to_owned()), None, None, None) 177 | } 178 | InvokeScriptArgValue::BinaryValue(v) => { 179 | ("binary", None, None, Some(v.to_owned()), None, None) 180 | } 181 | InvokeScriptArgValue::StringValue(v) => { 182 | ("string", None, None, None, Some(v.to_owned()), None) 183 | } 184 | InvokeScriptArgValue::List(_) => ( 185 | "list", 186 | None, 187 | None, 188 | None, 189 | None, 190 | Some( 191 | json!(DataEntryTypeValue::from(arg))["value"] 192 | .clone(), 193 | ), 194 | ), 195 | InvokeScriptArgValue::CaseObj(_) => { 196 | ("case", None, None, None, None, None) 197 | } 198 | }; 199 | Tx18Args { 200 | tx_uid, 201 | arg_type: v_type.to_string(), 202 | arg_value_integer: v_int, 203 | arg_value_boolean: v_bool, 204 | arg_value_binary: v_bin.map(into_prefixed_base64), 205 | arg_value_string: v_str.map(escape_unicode_null), 206 | arg_value_list: v_list, 207 | position_in_args: i as i16, 208 | height, 209 | } 210 | }) 211 | .collect(), 212 | payments: imeta 213 | .payments 214 | .iter() 215 | .enumerate() 216 | .map(|(i, p)| Tx18Payment { 217 | tx_uid, 218 | amount: p.amount, 219 | position_in_payment: i as i16, 220 | height, 221 | asset_id: extract_asset_id(&p.asset_id), 222 | }) 223 | .collect(), 224 | } 225 | } 226 | }; 227 | return Ok(Tx::Ethereum(result_tx)); 228 | } 229 | }; 230 | let tx_data = tx.data.as_ref().ok_or_else(|| { 231 | Error::InconsistDataError(format!( 232 | "No inner transaction data in id={id}, height={height}", 233 | )) 234 | })?; 235 | let time_stamp = epoch_ms_to_naivedatetime(tx.timestamp); 236 | let (fee, fee_asset_id) = tx 237 | .fee 238 | .as_ref() 239 | .map(|f| (f.amount, extract_asset_id(&f.asset_id))) 240 | .unwrap_or((0, WAVES_ID.to_string())); 241 | let tx_version = Some(tx.version as i16); 242 | let sender_public_key = into_base58(&tx.sender_public_key); 243 | 244 | Ok(match tx_data { 245 | Data::Genesis(t) => Tx::Genesis(Tx1 { 246 | uid, 247 | height, 248 | tx_type: 1, 249 | id, 250 | time_stamp, 251 | signature, 252 | fee, 253 | proofs, 254 | tx_version: None, 255 | sender: (sender.len() > 0).then_some(sender), 256 | sender_public_key: (sender_public_key.len() > 0).then_some(sender_public_key), 257 | status, 258 | recipient_address: Address::from(( 259 | PublicKeyHash(t.recipient_address.as_ref()), 260 | chain_id, 261 | )) 262 | .into(), 263 | recipient_alias: None, 264 | amount: t.amount, 265 | block_uid, 266 | }), 267 | Data::Payment(t) => Tx::Payment(Tx2 { 268 | uid, 269 | height, 270 | tx_type: 2, 271 | id, 272 | time_stamp, 273 | signature, 274 | fee, 275 | proofs, 276 | tx_version: tx_version.and_then(|v| (v != 1).then_some(v)), 277 | sender, 278 | sender_public_key, 279 | status, 280 | recipient_address: Address::from(( 281 | PublicKeyHash(t.recipient_address.as_ref()), 282 | chain_id, 283 | )) 284 | .into(), 285 | recipient_alias: None, 286 | amount: t.amount, 287 | block_uid, 288 | }), 289 | Data::Issue(t) => Tx::Issue(Tx3 { 290 | uid, 291 | height, 292 | tx_type: 3, 293 | id: id.clone(), 294 | time_stamp, 295 | signature, 296 | fee, 297 | proofs, 298 | tx_version, 299 | sender, 300 | sender_public_key, 301 | status, 302 | asset_id: if id.is_empty() { 303 | WAVES_ID.to_string() 304 | } else { 305 | id 306 | }, 307 | asset_name: escape_unicode_null(&t.name), 308 | description: escape_unicode_null(&t.description), 309 | quantity: t.amount, 310 | decimals: t.decimals as i16, 311 | reissuable: t.reissuable, 312 | script: extract_script(&t.script), 313 | block_uid, 314 | }), 315 | Data::Transfer(t) => { 316 | let Some(Metadata::Transfer(meta)) = &meta.metadata else { 317 | unreachable!("{WRONG_META_VAR}") 318 | }; 319 | let Amount { asset_id, amount } = t.amount.as_ref().unwrap(); 320 | Tx::Transfer(Tx4 { 321 | uid, 322 | height, 323 | tx_type: 4, 324 | id, 325 | time_stamp, 326 | signature, 327 | fee, 328 | proofs, 329 | tx_version, 330 | sender, 331 | sender_public_key, 332 | status, 333 | asset_id: extract_asset_id(asset_id), 334 | fee_asset_id, 335 | amount: *amount, 336 | attachment: into_base58(&t.attachment), 337 | recipient_address: into_base58(&meta.recipient_address), 338 | recipient_alias: extract_recipient_alias(&t.recipient), 339 | block_uid, 340 | }) 341 | } 342 | Data::Reissue(t) => { 343 | let Amount { asset_id, amount } = t.asset_amount.as_ref().unwrap(); 344 | Tx::Reissue(Tx5 { 345 | uid, 346 | height, 347 | tx_type: 5, 348 | id, 349 | time_stamp, 350 | signature, 351 | fee, 352 | proofs, 353 | tx_version, 354 | sender, 355 | sender_public_key, 356 | status, 357 | asset_id: extract_asset_id(asset_id), 358 | quantity: *amount, 359 | reissuable: t.reissuable, 360 | block_uid, 361 | }) 362 | } 363 | Data::Burn(t) => { 364 | let Amount { asset_id, amount } = t.asset_amount.as_ref().unwrap(); 365 | Tx::Burn(Tx6 { 366 | uid, 367 | height, 368 | tx_type: 6, 369 | id, 370 | time_stamp, 371 | signature, 372 | fee, 373 | proofs, 374 | tx_version, 375 | sender, 376 | sender_public_key, 377 | status, 378 | asset_id: extract_asset_id(asset_id), 379 | amount: *amount, 380 | block_uid, 381 | }) 382 | } 383 | Data::Exchange(t) => { 384 | let order_to_val = |o| serde_json::to_value(Order::from(o)).unwrap(); 385 | let Some(Metadata::Exchange(meta)) = &meta.metadata else { 386 | unreachable!("{WRONG_META_VAR}") 387 | }; 388 | let order_1 = OrderMeta { 389 | order: &t.orders[0], 390 | id: &meta.order_ids[0], 391 | sender_address: &meta.order_sender_addresses[0], 392 | sender_public_key: &meta.order_sender_public_keys[0], 393 | }; 394 | let order_2 = OrderMeta { 395 | order: &t.orders[1], 396 | id: &meta.order_ids[1], 397 | sender_address: &meta.order_sender_addresses[1], 398 | sender_public_key: &meta.order_sender_public_keys[1], 399 | }; 400 | let first_order_asset_pair = t.orders[0].asset_pair.as_ref().unwrap(); 401 | Tx::Exchange(Tx7 { 402 | uid, 403 | height, 404 | tx_type: 7, 405 | id, 406 | time_stamp, 407 | signature, 408 | fee, 409 | proofs, 410 | tx_version, 411 | sender, 412 | sender_public_key, 413 | status, 414 | order1: order_to_val(order_1), 415 | order2: order_to_val(order_2), 416 | amount_asset_id: extract_asset_id(&first_order_asset_pair.amount_asset_id), 417 | price_asset_id: extract_asset_id(&first_order_asset_pair.price_asset_id), 418 | amount: t.amount, 419 | price: t.price, 420 | buy_matcher_fee: t.buy_matcher_fee, 421 | sell_matcher_fee: t.sell_matcher_fee, 422 | fee_asset_id, 423 | block_uid, 424 | }) 425 | } 426 | Data::Lease(t) => { 427 | let Some(Metadata::Lease(meta)) = &meta.metadata else { 428 | unreachable!("{WRONG_META_VAR}") 429 | }; 430 | Tx::Lease(Tx8 { 431 | uid, 432 | height, 433 | tx_type: 8, 434 | id, 435 | time_stamp, 436 | signature, 437 | fee, 438 | proofs, 439 | tx_version, 440 | sender, 441 | sender_public_key, 442 | status, 443 | amount: t.amount, 444 | recipient_address: into_base58(&meta.recipient_address), 445 | recipient_alias: extract_recipient_alias(&t.recipient), 446 | block_uid, 447 | }) 448 | } 449 | Data::LeaseCancel(t) => Tx::LeaseCancel(Tx9Partial { 450 | uid, 451 | height, 452 | tx_type: 9, 453 | id, 454 | time_stamp, 455 | signature, 456 | fee, 457 | proofs, 458 | tx_version, 459 | sender, 460 | sender_public_key, 461 | status, 462 | lease_id: if !t.lease_id.is_empty() { 463 | Some(into_base58(&t.lease_id)) 464 | } else { 465 | None 466 | }, 467 | block_uid, 468 | }), 469 | Data::CreateAlias(t) => Tx::CreateAlias(Tx10 { 470 | uid, 471 | height, 472 | tx_type: 10, 473 | id, 474 | time_stamp, 475 | signature, 476 | fee, 477 | proofs, 478 | tx_version, 479 | sender, 480 | sender_public_key, 481 | status, 482 | alias: t.alias.clone(), 483 | block_uid, 484 | }), 485 | Data::MassTransfer(t) => { 486 | let Some(Metadata::MassTransfer(meta)) = &meta.metadata else { 487 | unreachable!("{WRONG_META_VAR}") 488 | }; 489 | Tx::MassTransfer(Tx11Combined { 490 | tx: Tx11 { 491 | uid, 492 | height, 493 | tx_type: 11, 494 | id, 495 | time_stamp, 496 | signature, 497 | fee, 498 | proofs, 499 | tx_version, 500 | sender, 501 | sender_public_key, 502 | status, 503 | asset_id: extract_asset_id(&t.asset_id), 504 | attachment: into_base58(&t.attachment), 505 | block_uid, 506 | }, 507 | transfers: t 508 | .transfers 509 | .iter() 510 | .zip(&meta.recipients_addresses) 511 | .enumerate() 512 | .map(|(i, (t, rcpt_addr))| Tx11Transfers { 513 | tx_uid, 514 | recipient_address: into_base58(rcpt_addr), 515 | recipient_alias: extract_recipient_alias(&t.recipient), 516 | amount: t.amount, 517 | position_in_tx: i as i16, 518 | height, 519 | }) 520 | .collect(), 521 | }) 522 | } 523 | Data::DataTransaction(t) => Tx::DataTransaction(Tx12Combined { 524 | tx: Tx12 { 525 | uid, 526 | height, 527 | tx_type: 12, 528 | id, 529 | time_stamp, 530 | signature, 531 | fee, 532 | proofs, 533 | tx_version, 534 | sender, 535 | sender_public_key, 536 | status, 537 | block_uid, 538 | }, 539 | data: t 540 | .data 541 | .iter() 542 | .enumerate() 543 | .map(|(i, d)| { 544 | let (v_type, v_int, v_bool, v_bin, v_str) = match &d.value { 545 | Some(DataValue::IntValue(v)) => { 546 | (Some("integer"), Some(v.to_owned()), None, None, None) 547 | } 548 | Some(DataValue::BoolValue(v)) => { 549 | (Some("boolean"), None, Some(v.to_owned()), None, None) 550 | } 551 | Some(DataValue::BinaryValue(v)) => { 552 | (Some("binary"), None, None, Some(v.to_owned()), None) 553 | } 554 | Some(DataValue::StringValue(v)) => { 555 | (Some("string"), None, None, None, Some(v.to_owned())) 556 | } 557 | _ => (None, None, None, None, None), 558 | }; 559 | Tx12Data { 560 | tx_uid, 561 | data_key: escape_unicode_null(&d.key), 562 | data_type: v_type.map(String::from), 563 | data_value_integer: v_int, 564 | data_value_boolean: v_bool, 565 | data_value_binary: v_bin.map(into_prefixed_base64), 566 | data_value_string: v_str.map(escape_unicode_null), 567 | position_in_tx: i as i16, 568 | height, 569 | } 570 | }) 571 | .collect(), 572 | }), 573 | Data::SetScript(t) => Tx::SetScript(Tx13 { 574 | uid, 575 | height, 576 | tx_type: 13, 577 | id, 578 | time_stamp, 579 | signature, 580 | fee, 581 | proofs, 582 | tx_version, 583 | sender, 584 | sender_public_key, 585 | status, 586 | script: extract_script(&t.script), 587 | block_uid, 588 | }), 589 | Data::SponsorFee(t) => Tx::SponsorFee(Tx14 { 590 | uid, 591 | height, 592 | tx_type: 14, 593 | id, 594 | time_stamp, 595 | signature, 596 | fee, 597 | proofs, 598 | tx_version, 599 | sender, 600 | sender_public_key, 601 | status, 602 | asset_id: extract_asset_id(&t.min_fee.as_ref().unwrap().asset_id), 603 | min_sponsored_asset_fee: t 604 | .min_fee 605 | .as_ref() 606 | .and_then(|f| (f.amount != 0).then_some(f.amount)), 607 | block_uid, 608 | }), 609 | Data::SetAssetScript(t) => Tx::SetAssetScript(Tx15 { 610 | uid, 611 | height, 612 | tx_type: 15, 613 | id, 614 | time_stamp, 615 | signature, 616 | fee, 617 | proofs, 618 | tx_version, 619 | sender, 620 | sender_public_key, 621 | status, 622 | asset_id: extract_asset_id(&t.asset_id), 623 | script: extract_script(&t.script), 624 | block_uid, 625 | }), 626 | Data::InvokeScript(t) => { 627 | let Some(Metadata::InvokeScript(meta)) = &meta.metadata else { 628 | unreachable!("{WRONG_META_VAR}") 629 | }; 630 | Tx::InvokeScript(Tx16Combined { 631 | tx: Tx16 { 632 | uid, 633 | height, 634 | tx_type: 16, 635 | id, 636 | time_stamp, 637 | signature, 638 | fee, 639 | proofs, 640 | tx_version, 641 | sender, 642 | sender_public_key, 643 | status, 644 | function_name: Some(meta.function_name.clone()), 645 | fee_asset_id: extract_asset_id(&tx.fee.as_ref().unwrap().asset_id), 646 | dapp_address: into_base58(&meta.d_app_address), 647 | dapp_alias: extract_recipient_alias(&t.d_app), 648 | block_uid, 649 | }, 650 | args: meta 651 | .arguments 652 | .iter() 653 | .filter_map(|arg| arg.value.as_ref()) 654 | .enumerate() 655 | .map(|(i, arg)| { 656 | let (v_type, v_int, v_bool, v_bin, v_str, v_list) = match &arg { 657 | InvokeScriptArgValue::IntegerValue(v) => { 658 | ("integer", Some(v.to_owned()), None, None, None, None) 659 | } 660 | InvokeScriptArgValue::BooleanValue(v) => { 661 | ("boolean", None, Some(v.to_owned()), None, None, None) 662 | } 663 | InvokeScriptArgValue::BinaryValue(v) => { 664 | ("binary", None, None, Some(v.to_owned()), None, None) 665 | } 666 | InvokeScriptArgValue::StringValue(v) => { 667 | ("string", None, None, None, Some(v.to_owned()), None) 668 | } 669 | InvokeScriptArgValue::List(_) => ( 670 | "list", 671 | None, 672 | None, 673 | None, 674 | None, 675 | Some(json!(DataEntryTypeValue::from(arg))["value"].clone()), 676 | ), 677 | InvokeScriptArgValue::CaseObj(_) => { 678 | ("case", None, None, None, None, None) 679 | } 680 | }; 681 | Tx16Args { 682 | tx_uid, 683 | arg_type: v_type.to_string(), 684 | arg_value_integer: v_int, 685 | arg_value_boolean: v_bool, 686 | arg_value_binary: v_bin.map(into_prefixed_base64), 687 | arg_value_string: v_str.map(escape_unicode_null), 688 | arg_value_list: v_list, 689 | position_in_args: i as i16, 690 | height, 691 | } 692 | }) 693 | .collect(), 694 | payments: t 695 | .payments 696 | .iter() 697 | .enumerate() 698 | .map(|(i, p)| Tx16Payment { 699 | tx_uid, 700 | amount: p.amount, 701 | position_in_payment: i as i16, 702 | height, 703 | asset_id: extract_asset_id(&p.asset_id), 704 | }) 705 | .collect(), 706 | }) 707 | } 708 | Data::UpdateAssetInfo(t) => Tx::UpdateAssetInfo(Tx17 { 709 | uid, 710 | height, 711 | tx_type: 17, 712 | id, 713 | time_stamp, 714 | signature, 715 | fee, 716 | proofs, 717 | tx_version, 718 | sender, 719 | sender_public_key, 720 | status, 721 | asset_id: extract_asset_id(&t.asset_id), 722 | asset_name: escape_unicode_null(&t.name), 723 | description: escape_unicode_null(&t.description), 724 | block_uid, 725 | }), 726 | Data::InvokeExpression(_t) => unimplemented!(), 727 | }) 728 | } 729 | } 730 | 731 | fn extract_recipient_alias(rcpt: &Option) -> Option { 732 | rcpt.as_ref() 733 | .map(|r| r.recipient.as_ref()) 734 | .flatten() 735 | .and_then(|r| match r { 736 | InnerRecipient::Alias(alias) if !alias.is_empty() => Some(alias.clone()), 737 | _ => None, 738 | }) 739 | } 740 | 741 | fn extract_script(script: &Vec) -> Option { 742 | if !script.is_empty() { 743 | Some(into_prefixed_base64(script)) 744 | } else { 745 | None 746 | } 747 | } 748 | -------------------------------------------------------------------------------- /src/lib/consumer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod models; 2 | pub mod repo; 3 | pub mod updates; 4 | 5 | use anyhow::{Error, Result}; 6 | use bigdecimal::BigDecimal; 7 | use chrono::{DateTime, Duration, NaiveDateTime, Utc}; 8 | use itertools::Itertools; 9 | use std::collections::HashMap; 10 | use std::sync::Mutex; 11 | use std::time::Instant; 12 | use tokio::sync::mpsc::Receiver; 13 | use waves_protobuf_schemas::waves::{ 14 | data_entry::Value, 15 | events::{transaction_metadata::Metadata, StateUpdate, TransactionMetadata}, 16 | signed_transaction::Transaction, 17 | SignedTransaction, Transaction as WavesTx, 18 | }; 19 | use wavesexchange_log::{debug, info, timer}; 20 | 21 | use self::models::{asset_tickers::InsertableAssetTicker, block_microblock::BlockMicroblock}; 22 | use self::models::{ 23 | asset_tickers::{AssetTickerOverride, DeletedAssetTicker}, 24 | assets::{AssetOrigin, AssetOverride, AssetUpdate, DeletedAsset}, 25 | }; 26 | use self::repo::RepoOperations; 27 | use crate::error::Error as AppError; 28 | use crate::models::BaseAssetInfoUpdate; 29 | use crate::waves::{extract_asset_id, Address}; 30 | use crate::{config::consumer::Config, utils::into_base58}; 31 | use crate::{ 32 | consumer::models::{ 33 | txs::convert::{Tx as ConvertedTx, TxUidGenerator}, 34 | waves_data::WavesData, 35 | }, 36 | utils::{epoch_ms_to_naivedatetime, escape_unicode_null}, 37 | waves::WAVES_ID, 38 | }; 39 | use fragstrings::frag_parse; 40 | 41 | static UID_GENERATOR: Mutex = Mutex::new(TxUidGenerator::new(100000)); 42 | 43 | #[derive(Clone, Debug)] 44 | pub enum BlockchainUpdate { 45 | Block(BlockMicroblockAppend), 46 | Microblock(BlockMicroblockAppend), 47 | Rollback(String), 48 | } 49 | 50 | #[derive(Clone, Debug)] 51 | pub struct BlockMicroblockAppend { 52 | id: String, 53 | time_stamp: Option, 54 | height: i32, 55 | updated_waves_amount: Option, 56 | txs: Vec, 57 | } 58 | 59 | #[derive(Clone, Debug)] 60 | pub struct Tx { 61 | pub id: String, 62 | pub data: SignedTransaction, 63 | pub meta: TransactionMetadata, 64 | pub state_update: StateUpdate, 65 | } 66 | 67 | #[derive(Debug)] 68 | pub struct BlockchainUpdatesWithLastHeight { 69 | pub last_height: u32, 70 | pub updates: Vec, 71 | } 72 | 73 | #[derive(Debug, Queryable, Clone, Copy)] 74 | pub struct UidHeight { 75 | pub uid: i64, 76 | pub height: i32, 77 | } 78 | 79 | #[derive(Debug)] 80 | enum UpdatesItem { 81 | Blocks(Vec), 82 | Microblock(BlockMicroblockAppend), 83 | Rollback(String), 84 | } 85 | 86 | #[derive(Debug)] 87 | pub struct AssetTickerUpdate { 88 | pub asset_id: String, 89 | pub ticker: String, 90 | } 91 | 92 | #[async_trait::async_trait] 93 | pub trait UpdatesSource { 94 | async fn stream( 95 | self, 96 | from_height: u32, 97 | batch_max_size: usize, 98 | batch_max_time: Duration, 99 | ) -> Result, AppError>; 100 | } 101 | 102 | // TODO: handle shutdown signals -> rollback current transaction 103 | pub async fn start(updates_src: T, repo: R, config: Config) -> Result<()> 104 | where 105 | T: UpdatesSource + Send + 'static, 106 | R: repo::Repo + Clone + Send + 'static, 107 | { 108 | let Config { 109 | assets_only, 110 | chain_id, 111 | max_wait_time, 112 | starting_height, 113 | updates_per_request, 114 | asset_storage_address, 115 | start_rollback_depth, 116 | rollback_step, 117 | .. 118 | } = config; 119 | 120 | let asset_storage_address: Option<&'static str> = 121 | asset_storage_address.map(|a| &*Box::leak(a.into_boxed_str())); 122 | let starting_from_height = { 123 | repo.transaction(move |ops| { 124 | match ops.get_blocks_rollback_to(start_rollback_depth, rollback_step) { 125 | Ok(Some(rollback_blocks)) => { 126 | rollback(ops, &rollback_blocks, assets_only)?; 127 | Ok(rollback_blocks 128 | .last() 129 | .map(|height| height.height as u32 + 1) 130 | .unwrap_or(starting_height)) 131 | } 132 | Ok(None) => Ok(starting_height), 133 | Err(e) => Err(e), 134 | } 135 | }) 136 | .await? 137 | }; 138 | 139 | info!( 140 | "Start fetching updates from height {} (by {} block(s) back)", 141 | starting_from_height, start_rollback_depth 142 | ); 143 | 144 | let mut rx = updates_src 145 | .stream(starting_from_height, updates_per_request, max_wait_time) 146 | .await?; 147 | 148 | loop { 149 | let mut start = Instant::now(); 150 | 151 | let updates_with_height = rx.recv().await.ok_or_else(|| { 152 | Error::new(AppError::StreamClosed( 153 | "GRPC Stream was closed by the server".to_string(), 154 | )) 155 | })?; 156 | 157 | let updates_count = updates_with_height.updates.len(); 158 | info!( 159 | "{} updates were received in {:?}", 160 | updates_count, 161 | start.elapsed() 162 | ); 163 | 164 | let last_height = updates_with_height.last_height; 165 | 166 | start = Instant::now(); 167 | 168 | repo.transaction(move |ops| { 169 | handle_updates( 170 | updates_with_height, 171 | ops, 172 | chain_id, 173 | assets_only, 174 | asset_storage_address, 175 | )?; 176 | 177 | info!( 178 | "{} updates were saved to database in {:?}. Last height is {}.", 179 | updates_count, 180 | start.elapsed(), 181 | last_height, 182 | ); 183 | 184 | Ok(()) 185 | }) 186 | .await?; 187 | } 188 | } 189 | 190 | fn handle_updates( 191 | updates_with_height: BlockchainUpdatesWithLastHeight, 192 | repo: &mut R, 193 | chain_id: u8, 194 | assets_only: bool, 195 | asset_storage_address: Option<&str>, 196 | ) -> Result<()> { 197 | updates_with_height 198 | .updates 199 | .into_iter() 200 | .fold(&mut Vec::::new(), |acc, cur| match cur { 201 | BlockchainUpdate::Block(b) => { 202 | info!("Handle block {}, height = {}", b.id, b.height); 203 | let len = acc.len(); 204 | if len > 0 { 205 | match acc.get_mut(len as usize - 1).unwrap() { 206 | UpdatesItem::Blocks(v) => { 207 | v.push(b); 208 | acc 209 | } 210 | UpdatesItem::Microblock(_) | UpdatesItem::Rollback(_) => { 211 | acc.push(UpdatesItem::Blocks(vec![b])); 212 | acc 213 | } 214 | } 215 | } else { 216 | acc.push(UpdatesItem::Blocks(vec![b])); 217 | acc 218 | } 219 | } 220 | BlockchainUpdate::Microblock(mba) => { 221 | info!("Handle microblock {}, height = {}", mba.id, mba.height); 222 | acc.push(UpdatesItem::Microblock(mba)); 223 | acc 224 | } 225 | BlockchainUpdate::Rollback(sig) => { 226 | info!("Handle rollback to {}", sig); 227 | acc.push(UpdatesItem::Rollback(sig)); 228 | acc 229 | } 230 | }) 231 | .into_iter() 232 | .try_fold((), |_, update_item| match update_item { 233 | UpdatesItem::Blocks(ba) => { 234 | squash_microblocks(repo, assets_only)?; 235 | handle_appends(repo, chain_id, ba, assets_only, asset_storage_address) 236 | } 237 | UpdatesItem::Microblock(mba) => handle_appends( 238 | repo, 239 | chain_id, 240 | &vec![mba.to_owned()], 241 | assets_only, 242 | asset_storage_address, 243 | ), 244 | UpdatesItem::Rollback(sig) => { 245 | let block = repo.get_block_uid_height(sig)?; 246 | rollback(repo, &[block], assets_only) 247 | } 248 | })?; 249 | 250 | Ok(()) 251 | } 252 | 253 | fn handle_appends( 254 | repo: &mut R, 255 | chain_id: u8, 256 | appends: &Vec, 257 | assets_only: bool, 258 | asset_storage_address: Option<&str>, 259 | ) -> Result<()> 260 | where 261 | R: RepoOperations, 262 | { 263 | let block_uids = repo.insert_blocks_or_microblocks( 264 | &appends 265 | .into_iter() 266 | .map(|append| BlockMicroblock { 267 | id: append.id.clone(), 268 | height: append.height as i32, 269 | time_stamp: append.time_stamp, 270 | }) 271 | .collect_vec(), 272 | )?; 273 | 274 | let block_uids_with_appends = block_uids.into_iter().zip(appends).collect_vec(); 275 | 276 | timer!("blockchain updates handling"); 277 | 278 | let base_asset_info_updates_with_block_uids: Vec<(i64, BaseAssetInfoUpdate)> = 279 | block_uids_with_appends 280 | .iter() 281 | .flat_map(|(block_uid, append)| { 282 | extract_base_asset_info_updates(chain_id, append) 283 | .into_iter() 284 | .map(|au| (*block_uid, au)) 285 | .collect_vec() 286 | }) 287 | .collect(); 288 | 289 | let inserted_uids = 290 | handle_base_asset_info_updates(repo, &base_asset_info_updates_with_block_uids)?; 291 | 292 | let updates_amount = base_asset_info_updates_with_block_uids.len(); 293 | 294 | if let Some(uids) = inserted_uids { 295 | assert_eq!(uids.len(), base_asset_info_updates_with_block_uids.len()); 296 | let asset_origins = uids 297 | .into_iter() 298 | .zip(base_asset_info_updates_with_block_uids) 299 | .map(|(uid, (_, au))| AssetOrigin { 300 | asset_id: au.id, 301 | first_asset_update_uid: uid, 302 | origin_transaction_id: au.tx_id, 303 | issuer: au.issuer, 304 | issue_height: au.update_height, 305 | issue_time_stamp: au.updated_at.naive_utc(), 306 | }) 307 | .collect_vec(); 308 | 309 | assert_eq!(asset_origins.len(), updates_amount); 310 | repo.insert_asset_origins(&asset_origins)?; 311 | } 312 | 313 | info!("handled {} assets updates", updates_amount); 314 | 315 | if !assets_only { 316 | handle_txs(repo, &block_uids_with_appends, chain_id)?; 317 | 318 | let waves_data = appends 319 | .into_iter() 320 | .filter_map(|append| { 321 | append.updated_waves_amount.map(|reward| WavesData { 322 | height: append.height, 323 | quantity: BigDecimal::from(reward), 324 | }) 325 | }) 326 | .collect_vec(); 327 | 328 | if waves_data.len() > 0 { 329 | repo.insert_waves_data(&waves_data)?; 330 | } 331 | } 332 | 333 | if let Some(storage_addr) = asset_storage_address { 334 | timer!("handling asset tickers updates"); 335 | let asset_tickers_updates_with_block_uids: Vec<(&i64, AssetTickerUpdate)> = 336 | block_uids_with_appends 337 | .iter() 338 | .flat_map(|(block_uid, append)| { 339 | append 340 | .txs 341 | .iter() 342 | .flat_map(|tx| extract_asset_tickers_updates(tx, storage_addr)) 343 | .map(|u| (block_uid, u)) 344 | .collect_vec() 345 | }) 346 | .collect(); 347 | 348 | handle_asset_tickers_updates(repo, &asset_tickers_updates_with_block_uids)?; 349 | 350 | info!( 351 | "handled {} asset tickers updates", 352 | asset_tickers_updates_with_block_uids.len() 353 | ); 354 | } 355 | 356 | Ok(()) 357 | } 358 | 359 | fn handle_txs( 360 | repo: &mut R, 361 | block_uid_data: &Vec<(i64, &BlockMicroblockAppend)>, 362 | chain_id: u8, 363 | ) -> Result<(), Error> { 364 | let mut txs_1 = vec![]; 365 | let mut txs_2 = vec![]; 366 | let mut txs_3 = vec![]; 367 | let mut txs_4 = vec![]; 368 | let mut txs_5 = vec![]; 369 | let mut txs_6 = vec![]; 370 | let mut txs_7 = vec![]; 371 | let mut txs_8 = vec![]; 372 | let mut txs_9 = vec![]; 373 | let mut txs_10 = vec![]; 374 | let mut txs_11 = vec![]; 375 | let mut txs_12 = vec![]; 376 | let mut txs_13 = vec![]; 377 | let mut txs_14 = vec![]; 378 | let mut txs_15 = vec![]; 379 | let mut txs_16 = vec![]; 380 | let mut txs_17 = vec![]; 381 | let mut txs_18 = vec![]; 382 | 383 | let txs_count = block_uid_data 384 | .iter() 385 | .fold(0usize, |txs, (_, block)| txs + block.txs.len()); 386 | info!("handling {} transactions", txs_count); 387 | 388 | let mut first_block_with_tx7_uid = None::; 389 | 390 | let mut ugen = UID_GENERATOR.lock().unwrap(); 391 | for &(block_uid, bm) in block_uid_data { 392 | ugen.maybe_update_height(bm.height); 393 | 394 | for tx in &bm.txs { 395 | let tx_uid = ugen.next(); 396 | let result_tx = ConvertedTx::try_from(( 397 | &tx.data, &tx.id, bm.height, &tx.meta, tx_uid, block_uid, chain_id, 398 | ))?; 399 | match result_tx { 400 | ConvertedTx::Genesis(t) => txs_1.push(t), 401 | ConvertedTx::Payment(t) => txs_2.push(t), 402 | ConvertedTx::Issue(t) => txs_3.push(t), 403 | ConvertedTx::Transfer(t) => txs_4.push(t), 404 | ConvertedTx::Reissue(t) => txs_5.push(t), 405 | ConvertedTx::Burn(t) => txs_6.push(t), 406 | ConvertedTx::Exchange(t) => { 407 | if first_block_with_tx7_uid.is_none() { 408 | first_block_with_tx7_uid = Some(block_uid); 409 | } 410 | txs_7.push(t); 411 | } 412 | ConvertedTx::Lease(t) => txs_8.push(t), 413 | ConvertedTx::LeaseCancel(t) => txs_9.push(t), 414 | ConvertedTx::CreateAlias(t) => txs_10.push(t), 415 | ConvertedTx::MassTransfer(t) => txs_11.push(t), 416 | ConvertedTx::DataTransaction(t) => txs_12.push(t), 417 | ConvertedTx::SetScript(t) => txs_13.push(t), 418 | ConvertedTx::SponsorFee(t) => txs_14.push(t), 419 | ConvertedTx::SetAssetScript(t) => txs_15.push(t), 420 | ConvertedTx::InvokeScript(t) => txs_16.push(t), 421 | ConvertedTx::UpdateAssetInfo(t) => txs_17.push(t), 422 | ConvertedTx::Ethereum(t) => txs_18.push(t), 423 | } 424 | } 425 | } 426 | 427 | #[inline] 428 | fn insert_txs(txs: Vec, mut inserter: F) -> Result<()> 429 | where 430 | T: 'static, 431 | F: FnMut(Vec) -> Result<()>, 432 | { 433 | if !txs.is_empty() { 434 | inserter(txs)?; 435 | } 436 | Ok(()) 437 | } 438 | 439 | insert_txs(txs_1, |txs| repo.insert_txs_1(txs))?; 440 | insert_txs(txs_2, |txs| repo.insert_txs_2(txs))?; 441 | insert_txs(txs_3, |txs| repo.insert_txs_3(txs))?; 442 | insert_txs(txs_4, |txs| repo.insert_txs_4(txs))?; 443 | insert_txs(txs_5, |txs| repo.insert_txs_5(txs))?; 444 | insert_txs(txs_6, |txs| repo.insert_txs_6(txs))?; 445 | insert_txs(txs_7, |txs| repo.insert_txs_7(txs))?; 446 | insert_txs(txs_8, |txs| repo.insert_txs_8(txs))?; 447 | insert_txs(txs_9, |txs| repo.insert_txs_9(txs))?; 448 | insert_txs(txs_10, |txs| repo.insert_txs_10(txs))?; 449 | insert_txs(txs_11, |txs| repo.insert_txs_11(txs))?; 450 | insert_txs(txs_12, |txs| repo.insert_txs_12(txs))?; 451 | insert_txs(txs_13, |txs| repo.insert_txs_13(txs))?; 452 | insert_txs(txs_14, |txs| repo.insert_txs_14(txs))?; 453 | insert_txs(txs_15, |txs| repo.insert_txs_15(txs))?; 454 | insert_txs(txs_16, |txs| repo.insert_txs_16(txs))?; 455 | insert_txs(txs_17, |txs| repo.insert_txs_17(txs))?; 456 | insert_txs(txs_18, |txs| repo.insert_txs_18(txs))?; 457 | 458 | info!("{} transactions handled", txs_count); 459 | 460 | if let Some(block_uid) = first_block_with_tx7_uid { 461 | timer!("calculating candles"); 462 | 463 | repo.calculate_candles_since_block_uid(block_uid)?; 464 | } 465 | 466 | Ok(()) 467 | } 468 | 469 | fn extract_base_asset_info_updates( 470 | chain_id: u8, 471 | append: &BlockMicroblockAppend, 472 | ) -> Vec { 473 | let mut asset_updates = vec![]; 474 | 475 | let mut updates_from_txs = append 476 | .txs 477 | .iter() 478 | .flat_map(|tx: &Tx| { 479 | tx.state_update 480 | .assets 481 | .iter() 482 | .filter_map(|asset_update| { 483 | if let Some(asset_details) = &asset_update.after { 484 | let asset_id = extract_asset_id(&asset_details.asset_id); 485 | 486 | if asset_id == WAVES_ID { 487 | return None; 488 | } 489 | 490 | let time_stamp = match tx.data.transaction.as_ref() { 491 | Some(stx) => match stx { 492 | Transaction::WavesTransaction(WavesTx { timestamp, .. }) => { 493 | let dt = epoch_ms_to_naivedatetime(*timestamp); 494 | DateTime::from_naive_utc_and_offset(dt, Utc) 495 | } 496 | Transaction::EthereumTransaction(_) => { 497 | if let Some(Metadata::Ethereum(meta)) = &tx.meta.metadata { 498 | let dt = epoch_ms_to_naivedatetime(meta.timestamp); 499 | DateTime::from_naive_utc_and_offset(dt, Utc) 500 | } else { 501 | unreachable!("wrong meta variant") 502 | } 503 | } 504 | }, 505 | _ => Utc::now(), 506 | }; 507 | 508 | let issuer = 509 | Address::from((asset_details.issuer.as_slice(), chain_id)).into(); 510 | Some(BaseAssetInfoUpdate { 511 | update_height: append.height as i32, 512 | updated_at: time_stamp, 513 | id: asset_id, 514 | name: escape_unicode_null(&asset_details.name), 515 | description: escape_unicode_null(&asset_details.description), 516 | issuer, 517 | precision: asset_details.decimals, 518 | script: asset_details.script_info.clone().map(|s| s.script), 519 | nft: asset_details.nft, 520 | reissuable: asset_details.reissuable, 521 | min_sponsored_fee: if asset_details.sponsorship > 0 { 522 | Some(asset_details.sponsorship) 523 | } else { 524 | None 525 | }, 526 | quantity: asset_details.volume.to_owned(), 527 | tx_id: tx.id.clone(), 528 | }) 529 | } else { 530 | None 531 | } 532 | }) 533 | .collect_vec() 534 | }) 535 | .collect_vec(); 536 | 537 | asset_updates.append(&mut updates_from_txs); 538 | asset_updates 539 | } 540 | 541 | fn extract_asset_tickers_updates(tx: &Tx, asset_storage_address: &str) -> Vec { 542 | tx.state_update 543 | .data_entries 544 | .iter() 545 | .filter_map(|data_entry_update| { 546 | data_entry_update.data_entry.as_ref().and_then(|de| { 547 | if asset_storage_address == into_base58(&data_entry_update.address) 548 | && de.key.starts_with("%s%s__assetId2ticker__") 549 | { 550 | match de.value.as_ref() { 551 | Some(value) => match value { 552 | Value::StringValue(value) => { 553 | frag_parse!("%s%s", de.key).map(|(_, asset_id)| AssetTickerUpdate { 554 | asset_id: asset_id, 555 | ticker: value.clone(), 556 | }) 557 | } 558 | _ => None, 559 | }, 560 | // key was deleted -> drop asset ticker 561 | None => { 562 | frag_parse!("%s%s", de.key).map(|(_, asset_id)| AssetTickerUpdate { 563 | asset_id, 564 | ticker: "".into(), 565 | }) 566 | } 567 | } 568 | } else { 569 | None 570 | } 571 | }) 572 | }) 573 | .collect_vec() 574 | } 575 | 576 | fn handle_base_asset_info_updates( 577 | repo: &mut R, 578 | updates: &[(i64, BaseAssetInfoUpdate)], 579 | ) -> Result>> { 580 | if updates.is_empty() { 581 | return Ok(None); 582 | } 583 | 584 | let updates_count = updates.len(); 585 | let assets_next_uid = repo.get_next_assets_uid()?; 586 | 587 | #[allow(deprecated)] // for base64::encode() 588 | let asset_updates = updates 589 | .iter() 590 | .enumerate() 591 | .map(|(update_idx, (block_uid, update))| AssetUpdate { 592 | uid: assets_next_uid + update_idx as i64, 593 | superseded_by: -1, 594 | block_uid: *block_uid, 595 | asset_id: update.id.clone(), 596 | name: update.name.clone(), 597 | description: update.description.clone(), 598 | nft: update.nft, 599 | reissuable: update.reissuable, 600 | decimals: update.precision as i16, 601 | script: update.script.clone().map(base64::encode), 602 | sponsorship: update.min_sponsored_fee, 603 | volume: update.quantity, 604 | }) 605 | .collect_vec(); 606 | 607 | let mut assets_grouped: HashMap> = HashMap::new(); 608 | 609 | asset_updates.into_iter().for_each(|update| { 610 | let group = assets_grouped.entry(update.clone()).or_insert(vec![]); 611 | group.push(update); 612 | }); 613 | 614 | let assets_grouped = assets_grouped.into_iter().collect_vec(); 615 | 616 | let assets_grouped_with_uids_superseded_by = assets_grouped 617 | .into_iter() 618 | .map(|(group_key, group)| { 619 | let mut updates = group 620 | .into_iter() 621 | .sorted_by_key(|item| item.uid) 622 | .collect::>(); 623 | 624 | let mut last_uid = std::i64::MAX - 1; 625 | ( 626 | group_key, 627 | updates 628 | .as_mut_slice() 629 | .iter_mut() 630 | .rev() 631 | .map(|cur| { 632 | cur.superseded_by = last_uid; 633 | last_uid = cur.uid; 634 | cur.to_owned() 635 | }) 636 | .sorted_by_key(|item| item.uid) 637 | .collect(), 638 | ) 639 | }) 640 | .collect::)>>(); 641 | 642 | let assets_first_uids: Vec = assets_grouped_with_uids_superseded_by 643 | .iter() 644 | .map(|(_, group)| { 645 | let first = group.iter().next().unwrap().clone(); 646 | AssetOverride { 647 | superseded_by: first.uid, 648 | id: first.asset_id, 649 | } 650 | }) 651 | .collect(); 652 | 653 | repo.close_assets_superseded_by(&assets_first_uids)?; 654 | 655 | let assets_with_uids_superseded_by = &assets_grouped_with_uids_superseded_by 656 | .into_iter() 657 | .flat_map(|(_, v)| v) 658 | .sorted_by_key(|asset| asset.uid) 659 | .collect_vec(); 660 | 661 | repo.insert_asset_updates(assets_with_uids_superseded_by)?; 662 | repo.set_assets_next_update_uid(assets_next_uid + updates_count as i64)?; 663 | 664 | Ok(Some( 665 | assets_with_uids_superseded_by 666 | .into_iter() 667 | .map(|a| a.uid) 668 | .collect_vec(), 669 | )) 670 | } 671 | 672 | fn handle_asset_tickers_updates( 673 | repo: &mut R, 674 | updates: &[(&i64, AssetTickerUpdate)], 675 | ) -> Result<()> { 676 | if updates.is_empty() { 677 | return Ok(()); 678 | } 679 | 680 | let updates_count = updates.len(); 681 | 682 | let asset_tickers_next_uid = repo.get_next_asset_tickers_uid()?; 683 | 684 | let asset_tickers_updates = updates 685 | .iter() 686 | .enumerate() 687 | .map( 688 | |(update_idx, (block_uid, tickers_update))| InsertableAssetTicker { 689 | uid: asset_tickers_next_uid + update_idx as i64, 690 | superseded_by: -1, 691 | block_uid: **block_uid, 692 | asset_id: tickers_update.asset_id.clone(), 693 | ticker: tickers_update.ticker.clone(), 694 | }, 695 | ) 696 | .collect_vec(); 697 | 698 | let mut asset_tickers_grouped: HashMap> = 699 | HashMap::new(); 700 | 701 | asset_tickers_updates.into_iter().for_each(|update| { 702 | let group = asset_tickers_grouped 703 | .entry(update.clone()) 704 | .or_insert(vec![]); 705 | group.push(update); 706 | }); 707 | 708 | let asset_tickers_grouped = asset_tickers_grouped.into_iter().collect_vec(); 709 | 710 | let asset_tickers_grouped_with_uids_superseded_by = asset_tickers_grouped 711 | .into_iter() 712 | .map(|(group_key, group)| { 713 | let mut updates = group 714 | .into_iter() 715 | .sorted_by_key(|item| item.uid) 716 | .collect::>(); 717 | 718 | let mut last_uid = std::i64::MAX - 1; 719 | ( 720 | group_key, 721 | updates 722 | .as_mut_slice() 723 | .iter_mut() 724 | .rev() 725 | .map(|cur| { 726 | cur.superseded_by = last_uid; 727 | last_uid = cur.uid; 728 | cur.to_owned() 729 | }) 730 | .sorted_by_key(|item| item.uid) 731 | .collect(), 732 | ) 733 | }) 734 | .collect::)>>(); 735 | 736 | let asset_tickers_first_uids: Vec = 737 | asset_tickers_grouped_with_uids_superseded_by 738 | .iter() 739 | .map(|(_, group)| { 740 | let first = group.iter().next().unwrap().clone(); 741 | AssetTickerOverride { 742 | superseded_by: first.uid, 743 | asset_id: first.asset_id, 744 | } 745 | }) 746 | .collect(); 747 | 748 | repo.close_asset_tickers_superseded_by(&asset_tickers_first_uids)?; 749 | 750 | let asset_tickers_with_uids_superseded_by = &asset_tickers_grouped_with_uids_superseded_by 751 | .clone() 752 | .into_iter() 753 | .flat_map(|(_, v)| v) 754 | .sorted_by_key(|asset_tickers| asset_tickers.uid) 755 | .collect_vec(); 756 | 757 | repo.insert_asset_tickers(asset_tickers_with_uids_superseded_by)?; 758 | 759 | repo.set_asset_tickers_next_update_uid(asset_tickers_next_uid + updates_count as i64) 760 | } 761 | 762 | fn squash_microblocks(repo: &mut R, assets_only: bool) -> Result<()> { 763 | let last_microblock_id = repo.get_total_block_id()?; 764 | 765 | if let Some(lmid) = last_microblock_id { 766 | let last_block_uid = repo.get_key_block_uid()?; 767 | 768 | debug!( 769 | "squashing into block_uid = {}, new block_id = {}", 770 | last_block_uid, lmid 771 | ); 772 | 773 | repo.update_assets_block_references(last_block_uid)?; 774 | repo.update_asset_tickers_block_references(last_block_uid)?; 775 | 776 | if !assets_only { 777 | repo.update_transactions_references(last_block_uid)?; 778 | } 779 | 780 | repo.delete_microblocks()?; 781 | repo.change_block_id(last_block_uid, &lmid)?; 782 | } 783 | 784 | Ok(()) 785 | } 786 | 787 | pub fn rollback( 788 | repo: &mut R, 789 | blocks: &[UidHeight], 790 | assets_only: bool, 791 | ) -> Result<()> { 792 | if let Some(b) = blocks.last() { 793 | debug!( 794 | "initiating sequenced rollback to block_uid = {}, height = {}", 795 | b.uid, b.height 796 | ); 797 | } 798 | 799 | for &block in blocks { 800 | let UidHeight { uid, height } = block; 801 | 802 | debug!("rolling back to block_uid = {}, height = {}", uid, height); 803 | 804 | rollback_assets(repo, uid)?; 805 | rollback_asset_tickers(repo, uid)?; 806 | 807 | if !assets_only { 808 | repo.rollback_transactions(uid)?; 809 | rollback_candles(repo, uid)?; 810 | } 811 | 812 | repo.rollback_blocks_microblocks(uid)?; 813 | } 814 | Ok(()) 815 | } 816 | 817 | fn rollback_assets(repo: &mut R, block_uid: i64) -> Result<()> { 818 | let deleted = repo.rollback_assets(block_uid)?; 819 | 820 | let mut grouped_deleted: HashMap> = HashMap::new(); 821 | 822 | deleted.into_iter().for_each(|item| { 823 | let group = grouped_deleted.entry(item.clone()).or_insert(vec![]); 824 | group.push(item); 825 | }); 826 | 827 | let lowest_deleted_uids: Vec = grouped_deleted 828 | .into_iter() 829 | .filter_map(|(_, group)| group.into_iter().min_by_key(|i| i.uid).map(|i| i.uid)) 830 | .collect(); 831 | 832 | repo.reopen_assets_superseded_by(&lowest_deleted_uids) 833 | } 834 | 835 | fn rollback_asset_tickers(repo: &mut R, block_uid: i64) -> Result<()> { 836 | let deleted = repo.rollback_asset_tickers(&block_uid)?; 837 | 838 | let mut grouped_deleted: HashMap> = HashMap::new(); 839 | 840 | deleted.into_iter().for_each(|item| { 841 | let group = grouped_deleted.entry(item.clone()).or_insert(vec![]); 842 | group.push(item); 843 | }); 844 | 845 | let lowest_deleted_uids: Vec = grouped_deleted 846 | .into_iter() 847 | .filter_map(|(_, group)| group.into_iter().min_by_key(|i| i.uid).map(|i| i.uid)) 848 | .collect(); 849 | 850 | repo.reopen_asset_tickers_superseded_by(&lowest_deleted_uids) 851 | } 852 | 853 | fn rollback_candles(repo: &mut R, block_uid: i64) -> Result<()> { 854 | repo.rollback_candles(block_uid)?; 855 | repo.calculate_candles_since_block_uid(block_uid) 856 | } 857 | -------------------------------------------------------------------------------- /migrations/2022-04-27-111623_initial/up.sql: -------------------------------------------------------------------------------- 1 | SET client_encoding = 'UTF8'; 2 | SET standard_conforming_strings = on; 3 | SET xmloption = content; 4 | SET client_min_messages = warning; 5 | SET row_security = off; 6 | 7 | CREATE EXTENSION IF NOT EXISTS btree_gin WITH SCHEMA public; 8 | COMMENT ON EXTENSION btree_gin IS 'support for indexing common datatypes in GIN'; 9 | 10 | CREATE EXTENSION IF NOT EXISTS btree_gist; 11 | 12 | CREATE TABLE IF NOT EXISTS blocks_microblocks ( 13 | uid BIGINT UNIQUE GENERATED BY DEFAULT AS IDENTITY NOT NULL, 14 | id VARCHAR NOT NULL PRIMARY KEY, 15 | height INTEGER NOT NULL, 16 | time_stamp TIMESTAMPTZ 17 | ); 18 | 19 | CREATE TABLE IF NOT EXISTS asset_updates ( 20 | block_uid BIGINT NOT NULL REFERENCES blocks_microblocks(uid), 21 | uid BIGINT UNIQUE GENERATED BY DEFAULT AS IDENTITY NOT NULL, 22 | superseded_by BIGINT NOT NULL, 23 | asset_id VARCHAR NOT NULL, 24 | decimals SMALLINT NOT NULL, 25 | name VARCHAR NOT NULL, 26 | description VARCHAR NOT NULL, 27 | reissuable bool NOT NULL, 28 | volume BIGINT NOT NULL, 29 | script VARCHAR, 30 | sponsorship int8, 31 | nft bool NOT NULL, 32 | 33 | PRIMARY KEY (superseded_by, asset_id) 34 | ); 35 | 36 | CREATE TABLE IF NOT EXISTS asset_origins ( 37 | asset_id VARCHAR NOT NULL PRIMARY KEY, 38 | first_asset_update_uid BIGINT NOT NULL REFERENCES asset_updates(uid) ON DELETE CASCADE, 39 | origin_transaction_id VARCHAR NOT NULL, 40 | issuer VARCHAR NOT NULL, 41 | issue_height INTEGER NOT NULL, 42 | issue_time_stamp TIMESTAMPTZ NOT NULL 43 | ); 44 | 45 | CREATE TABLE IF NOT EXISTS txs ( 46 | uid BIGINT NOT NULL, 47 | tx_type SMALLINT NOT NULL, 48 | sender VARCHAR, 49 | sender_public_key VARCHAR, 50 | time_stamp TIMESTAMP WITH TIME ZONE NOT NULL, 51 | height INTEGER NOT NULL, 52 | id VARCHAR NOT NULL, 53 | signature VARCHAR, 54 | proofs TEXT[], 55 | tx_version SMALLINT, 56 | fee BIGINT NOT NULL, 57 | status VARCHAR DEFAULT 'succeeded' NOT NULL, 58 | block_uid BIGINT NOT NULL, 59 | 60 | CONSTRAINT txs_pk_uid_id_time_stamp PRIMARY KEY (uid, id, time_stamp), 61 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 62 | ); 63 | 64 | CREATE TABLE IF NOT EXISTS txs_1 ( 65 | recipient_address VARCHAR NOT NULL, 66 | recipient_alias VARCHAR, 67 | amount BIGINT NOT NULL, 68 | 69 | CONSTRAINT txs_1_pk_uid PRIMARY KEY (uid), 70 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 71 | ) 72 | INHERITS (txs); 73 | 74 | CREATE TABLE IF NOT EXISTS txs_2 ( 75 | sender VARCHAR NOT NULL, 76 | sender_public_key VARCHAR NOT NULL, 77 | recipient_address VARCHAR NOT NULL, 78 | recipient_alias VARCHAR, 79 | amount BIGINT NOT NULL, 80 | 81 | CONSTRAINT txs_2_pk_uid PRIMARY KEY (uid), 82 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 83 | ) 84 | INHERITS (txs); 85 | 86 | CREATE TABLE IF NOT EXISTS txs_3 ( 87 | sender VARCHAR NOT NULL, 88 | sender_public_key VARCHAR NOT NULL, 89 | asset_id VARCHAR NOT NULL, 90 | asset_name VARCHAR NOT NULL, 91 | description VARCHAR NOT NULL, 92 | quantity BIGINT NOT NULL, 93 | decimals SMALLINT NOT NULL, 94 | reissuable BOOLEAN NOT NULL, 95 | script VARCHAR, 96 | 97 | CONSTRAINT txs_3_pk_uid PRIMARY KEY (uid), 98 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 99 | ) 100 | INHERITS (txs); 101 | 102 | CREATE TABLE IF NOT EXISTS txs_4 ( 103 | sender VARCHAR NOT NULL, 104 | sender_public_key VARCHAR NOT NULL, 105 | asset_id VARCHAR NOT NULL, 106 | amount BIGINT NOT NULL, 107 | recipient_address VARCHAR NOT NULL, 108 | recipient_alias VARCHAR, 109 | fee_asset_id VARCHAR NOT NULL, 110 | attachment VARCHAR NOT NULL, 111 | 112 | CONSTRAINT txs_4_pk_uid PRIMARY KEY (uid), 113 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 114 | ) 115 | INHERITS (txs); 116 | ALTER TABLE ONLY txs_4 ALTER COLUMN sender SET STATISTICS 1000; 117 | 118 | CREATE TABLE IF NOT EXISTS txs_5 ( 119 | sender VARCHAR NOT NULL, 120 | sender_public_key VARCHAR NOT NULL, 121 | asset_id VARCHAR NOT NULL, 122 | quantity BIGINT NOT NULL, 123 | reissuable BOOLEAN NOT NULL, 124 | 125 | CONSTRAINT txs_5_pk_uid PRIMARY KEY (uid), 126 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 127 | ) 128 | INHERITS (txs); 129 | 130 | CREATE TABLE IF NOT EXISTS txs_6 ( 131 | sender VARCHAR NOT NULL, 132 | sender_public_key VARCHAR NOT NULL, 133 | asset_id VARCHAR NOT NULL, 134 | amount BIGINT NOT NULL, 135 | 136 | CONSTRAINT txs_6_pk_uid PRIMARY KEY (uid), 137 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 138 | ) 139 | INHERITS (txs); 140 | 141 | CREATE TABLE IF NOT EXISTS txs_7 ( 142 | sender VARCHAR NOT NULL, 143 | sender_public_key VARCHAR NOT NULL, 144 | order1 jsonb NOT NULL, 145 | order2 jsonb NOT NULL, 146 | amount BIGINT NOT NULL, 147 | price BIGINT NOT NULL, 148 | amount_asset_id VARCHAR NOT NULL, 149 | price_asset_id VARCHAR NOT NULL, 150 | buy_matcher_fee BIGINT NOT NULL, 151 | sell_matcher_fee BIGINT NOT NULL, 152 | fee_asset_id VARCHAR NOT NULL, 153 | 154 | CONSTRAINT txs_7_pk_uid PRIMARY KEY (uid), 155 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 156 | ) 157 | INHERITS (txs); 158 | 159 | CREATE TABLE IF NOT EXISTS txs_8 ( 160 | sender VARCHAR NOT NULL, 161 | sender_public_key VARCHAR NOT NULL, 162 | recipient_address VARCHAR NOT NULL, 163 | recipient_alias VARCHAR, 164 | amount BIGINT NOT NULL, 165 | 166 | CONSTRAINT txs_8_pk_uid PRIMARY KEY (uid), 167 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 168 | ) 169 | INHERITS (txs); 170 | 171 | CREATE TABLE IF NOT EXISTS txs_9 ( 172 | sender VARCHAR NOT NULL, 173 | sender_public_key VARCHAR NOT NULL, 174 | lease_tx_uid BIGINT, 175 | 176 | CONSTRAINT txs_9_pk_uid PRIMARY KEY (uid), 177 | CONSTRAINT txs_9_un UNIQUE (uid, lease_tx_uid), 178 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 179 | ) 180 | INHERITS (txs); 181 | 182 | CREATE TABLE IF NOT EXISTS txs_10 ( 183 | sender VARCHAR NOT NULL, 184 | sender_public_key VARCHAR NOT NULL, 185 | alias VARCHAR NOT NULL, 186 | 187 | CONSTRAINT txs_10_pk_uid PRIMARY KEY (uid), 188 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 189 | ) 190 | INHERITS (txs); 191 | 192 | CREATE TABLE IF NOT EXISTS txs_11 ( 193 | sender VARCHAR NOT NULL, 194 | sender_public_key VARCHAR NOT NULL, 195 | asset_id VARCHAR NOT NULL, 196 | attachment VARCHAR NOT NULL, 197 | 198 | CONSTRAINT txs_11_pk_uid PRIMARY KEY (uid), 199 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 200 | ) 201 | INHERITS (txs); 202 | 203 | CREATE TABLE IF NOT EXISTS txs_11_transfers ( 204 | tx_uid BIGINT NOT NULL, 205 | recipient_address VARCHAR NOT NULL, 206 | recipient_alias VARCHAR, 207 | amount bigint NOT NULL, 208 | position_in_tx smallint NOT NULL, 209 | height integer NOT NULL, 210 | 211 | PRIMARY KEY (tx_uid, position_in_tx), 212 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_11(uid) ON DELETE CASCADE 213 | ); 214 | 215 | CREATE TABLE IF NOT EXISTS txs_12 ( 216 | sender VARCHAR NOT NULL, 217 | sender_public_key VARCHAR NOT NULL, 218 | 219 | CONSTRAINT txs_12_pk_uid PRIMARY KEY (uid), 220 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 221 | ) 222 | INHERITS (txs); 223 | 224 | CREATE TABLE IF NOT EXISTS txs_12_data ( 225 | tx_uid BIGINT NOT NULL, 226 | data_key TEXT NOT NULL, 227 | data_type TEXT, 228 | data_value_integer BIGINT, 229 | data_value_boolean BOOLEAN, 230 | data_value_binary TEXT, 231 | data_value_string TEXT, 232 | position_in_tx SMALLINT NOT NULL, 233 | height INTEGER NOT NULL, 234 | 235 | PRIMARY KEY (tx_uid, position_in_tx), 236 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_12(uid) ON DELETE CASCADE 237 | ); 238 | 239 | CREATE TABLE IF NOT EXISTS txs_13 ( 240 | sender VARCHAR NOT NULL, 241 | sender_public_key VARCHAR NOT NULL, 242 | script VARCHAR, 243 | 244 | CONSTRAINT txs_13_pk_uid PRIMARY KEY (uid), 245 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 246 | ) 247 | INHERITS (txs); 248 | 249 | CREATE TABLE IF NOT EXISTS txs_14 ( 250 | sender VARCHAR NOT NULL, 251 | sender_public_key VARCHAR NOT NULL, 252 | asset_id VARCHAR NOT NULL, 253 | min_sponsored_asset_fee BIGINT, 254 | 255 | CONSTRAINT txs_14_pk_uid PRIMARY KEY (uid), 256 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 257 | ) 258 | INHERITS (txs); 259 | 260 | CREATE TABLE IF NOT EXISTS txs_15 ( 261 | sender VARCHAR NOT NULL, 262 | sender_public_key VARCHAR NOT NULL, 263 | asset_id VARCHAR NOT NULL, 264 | script VARCHAR, 265 | 266 | CONSTRAINT txs_15_pk_uid PRIMARY KEY (uid), 267 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 268 | ) 269 | INHERITS (txs); 270 | 271 | CREATE TABLE IF NOT EXISTS txs_16 ( 272 | sender VARCHAR NOT NULL, 273 | sender_public_key VARCHAR NOT NULL, 274 | dapp_address VARCHAR NOT NULL, 275 | dapp_alias VARCHAR, 276 | function_name VARCHAR, 277 | fee_asset_id VARCHAR NOT NULL, 278 | 279 | CONSTRAINT txs_16_pk_uid PRIMARY KEY (uid), 280 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 281 | ) 282 | INHERITS (txs); 283 | 284 | CREATE TABLE IF NOT EXISTS txs_16_args ( 285 | arg_type TEXT NOT NULL, 286 | arg_value_integer BIGINT, 287 | arg_value_boolean BOOLEAN, 288 | arg_value_binary TEXT, 289 | arg_value_string TEXT, 290 | arg_value_list jsonb DEFAULT NULL, 291 | position_in_args SMALLINT NOT NULL, 292 | tx_uid BIGINT NOT NULL, 293 | height INTEGER, 294 | 295 | PRIMARY KEY (tx_uid, position_in_args), 296 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_16(uid) ON DELETE CASCADE 297 | ); 298 | 299 | CREATE TABLE IF NOT EXISTS txs_16_payment ( 300 | tx_uid BIGINT NOT NULL, 301 | amount BIGINT NOT NULL, 302 | position_in_payment SMALLINT NOT NULL, 303 | height INTEGER, 304 | asset_id VARCHAR NOT NULL, 305 | 306 | PRIMARY KEY (tx_uid, position_in_payment), 307 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_16(uid) ON DELETE CASCADE 308 | ); 309 | 310 | CREATE TABLE IF NOT EXISTS txs_17 311 | ( 312 | sender VARCHAR NOT NULL, 313 | sender_public_key VARCHAR NOT NULL, 314 | asset_id VARCHAR NOT NULL, 315 | asset_name VARCHAR NOT NULL, 316 | description VARCHAR NOT NULL, 317 | 318 | CONSTRAINT txs_17_pk_uid PRIMARY KEY (uid), 319 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 320 | ) 321 | INHERITS (txs); 322 | 323 | CREATE TABLE IF NOT EXISTS txs_18 324 | ( 325 | bytes BYTEA NOT NULL, 326 | function_name VARCHAR, -- null - transfer, not null - invoke 327 | 328 | CONSTRAINT txs_18_pk_uid PRIMARY KEY (uid), 329 | CONSTRAINT fk_blocks_uid FOREIGN KEY (block_uid) REFERENCES blocks_microblocks(uid) 330 | ) 331 | INHERITS (txs); 332 | 333 | CREATE TABLE IF NOT EXISTS txs_18_args ( 334 | arg_type TEXT NOT NULL, 335 | arg_value_integer BIGINT, 336 | arg_value_boolean BOOLEAN, 337 | arg_value_binary TEXT, 338 | arg_value_string TEXT, 339 | arg_value_list jsonb DEFAULT NULL, 340 | position_in_args SMALLINT NOT NULL, 341 | tx_uid BIGINT NOT NULL, 342 | height INTEGER, 343 | 344 | PRIMARY KEY (tx_uid, position_in_args), 345 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_18(uid) ON DELETE CASCADE 346 | ); 347 | 348 | CREATE TABLE IF NOT EXISTS txs_18_payment ( 349 | tx_uid BIGINT NOT NULL, 350 | amount BIGINT NOT NULL, 351 | position_in_payment SMALLINT NOT NULL, 352 | height INTEGER, 353 | asset_id VARCHAR NOT NULL, 354 | 355 | PRIMARY KEY (tx_uid, position_in_payment), 356 | CONSTRAINT fk_tx_uid FOREIGN KEY (tx_uid) REFERENCES txs_18(uid) ON DELETE CASCADE 357 | ); 358 | 359 | CREATE TABLE IF NOT EXISTS assets_metadata ( 360 | asset_id VARCHAR, 361 | asset_name VARCHAR, 362 | ticker VARCHAR, 363 | height INTEGER, 364 | 365 | CONSTRAINT asset_meta_pk PRIMARY KEY (asset_id) 366 | ); 367 | 368 | CREATE TABLE IF NOT EXISTS candles ( 369 | time_start timestamp without time zone NOT NULL, 370 | amount_asset_id character varying(255) NOT NULL, 371 | price_asset_id character varying(255) NOT NULL, 372 | low numeric NOT NULL, 373 | high numeric NOT NULL, 374 | volume numeric NOT NULL, 375 | quote_volume numeric NOT NULL, 376 | max_height integer NOT NULL, 377 | txs_count integer NOT NULL, 378 | weighted_average_price numeric NOT NULL, 379 | open numeric NOT NULL, 380 | close numeric NOT NULL, 381 | interval varchar NOT NULL, 382 | matcher_address varchar NOT NULL, 383 | 384 | PRIMARY KEY (interval, time_start, amount_asset_id, price_asset_id, matcher_address) 385 | ); 386 | 387 | CREATE TABLE IF NOT EXISTS pairs ( 388 | amount_asset_id character varying(255) NOT NULL, 389 | price_asset_id character varying(255) NOT NULL, 390 | first_price numeric NOT NULL, 391 | last_price numeric NOT NULL, 392 | volume numeric NOT NULL, 393 | volume_waves numeric, 394 | quote_volume numeric NOT NULL, 395 | high numeric NOT NULL, 396 | low numeric NOT NULL, 397 | weighted_average_price numeric NOT NULL, 398 | txs_count integer NOT NULL, 399 | matcher_address character varying(255) NOT NULL, 400 | 401 | PRIMARY KEY (amount_asset_id, price_asset_id, matcher_address) 402 | ); 403 | 404 | CREATE TABLE IF NOT EXISTS waves_data ( 405 | height int4 NULL, 406 | -- quantity никогда не может быть одинаковым у двух записей 407 | quantity numeric NOT NULL PRIMARY KEY 408 | ); 409 | 410 | CREATE TABLE IF NOT EXISTS asset_tickers ( 411 | uid BIGINT GENERATED BY DEFAULT AS IDENTITY, 412 | superseded_by BIGINT DEFAULT 9223372036854775806 NOT NULL, 413 | block_uid BIGINT NOT NULL CONSTRAINT data_entries_block_uid_fkey REFERENCES blocks_microblocks (uid) ON DELETE CASCADE, 414 | asset_id TEXT NOT NULL, 415 | ticker TEXT NOT NULL, 416 | 417 | PRIMARY KEY (superseded_by, asset_id) 418 | ); 419 | 420 | CREATE OR REPLACE VIEW decimals ( 421 | asset_id, 422 | decimals 423 | ) AS 424 | SELECT asset_id, decimals 425 | FROM asset_updates 426 | WHERE superseded_by = '9223372036854775806'::bigint 427 | UNION ALL 428 | SELECT 429 | 'WAVES'::character varying AS asset_id, 430 | 8 AS decimals; 431 | 432 | CREATE OR REPLACE VIEW tickers( 433 | asset_id, 434 | ticker 435 | ) AS SELECT DISTINCT ON (asset_id) asset_id, ticker FROM asset_tickers ORDER BY asset_id, uid DESC; 436 | 437 | CREATE OR REPLACE VIEW assets( 438 | asset_id, 439 | ticker, 440 | asset_name, 441 | description, 442 | sender, 443 | issue_height, 444 | issue_timestamp, 445 | total_quantity, 446 | decimals, 447 | reissuable, 448 | has_script, 449 | min_sponsored_asset_fee 450 | ) AS 451 | SELECT au.asset_id, 452 | t.ticker, 453 | au.name AS asset_name, 454 | au.description, 455 | ao.issuer AS sender, 456 | ao.issue_height, 457 | ao.issue_time_stamp AS issue_timestamp, 458 | au.volume AS total_quantity, 459 | au.decimals, 460 | au.reissuable, 461 | CASE 462 | WHEN au.script IS NOT NULL THEN true 463 | ELSE false 464 | END AS has_script, 465 | au.sponsorship AS min_sponsored_asset_fee 466 | FROM asset_updates au 467 | LEFT JOIN (SELECT tickers.asset_id, tickers.ticker FROM tickers) t ON au.asset_id::text = t.asset_id 468 | LEFT JOIN asset_origins ao ON au.asset_id::text = ao.asset_id::text 469 | WHERE au.superseded_by = '9223372036854775806'::bigint 470 | UNION ALL 471 | SELECT 'WAVES'::character varying AS asset_id, 472 | 'WAVES'::text AS ticker, 473 | 'Waves'::character varying AS asset_name, 474 | ''::character varying AS description, 475 | ''::character varying AS sender, 476 | 0 AS issue_height, 477 | '2016-04-11 21:00:00+00'::timestamp with time zone AS issue_timestamp, 478 | ((SELECT waves_data.quantity 479 | FROM waves_data 480 | ORDER BY waves_data.height DESC NULLS LAST 481 | LIMIT 1))::bigint::numeric AS total_quantity, 482 | 8 AS decimals, 483 | false AS reissuable, 484 | false AS has_script, 485 | NULL::bigint AS min_sponsored_asset_fee; 486 | 487 | CREATE OR REPLACE FUNCTION public.text_timestamp_cast(text) RETURNS timestamp without time zone 488 | LANGUAGE plpgsql 489 | AS $_$ 490 | begin 491 | -- raise notice $1; 492 | return to_timestamp($1 :: DOUBLE PRECISION / 1000); 493 | END 494 | $_$; 495 | 496 | CREATE OR REPLACE FUNCTION _to_raw_timestamp(ts TIMESTAMP WITHOUT TIME ZONE, ivl TEXT) 497 | RETURNS TIMESTAMP 498 | LANGUAGE plpgsql 499 | AS $$ 500 | BEGIN 501 | CASE 502 | WHEN ivl = '1m' THEN RETURN _trunc_ts_by_secs(ts, 60); 503 | WHEN ivl = '5m' THEN RETURN _trunc_ts_by_secs(ts, 300); 504 | WHEN ivl = '15m' THEN RETURN _trunc_ts_by_secs(ts, 900); 505 | WHEN ivl = '30m' THEN RETURN _trunc_ts_by_secs(ts, 1800); 506 | WHEN ivl = '1h' THEN RETURN _trunc_ts_by_secs(ts, 3600); 507 | WHEN ivl = '2h' THEN RETURN _trunc_ts_by_secs(ts, 7200); 508 | WHEN ivl = '3h' THEN RETURN _trunc_ts_by_secs(ts, 10800); 509 | WHEN ivl = '4h' THEN RETURN _trunc_ts_by_secs(ts, 14400); 510 | WHEN ivl = '6h' THEN RETURN _trunc_ts_by_secs(ts, 21600); 511 | WHEN ivl = '12h' THEN RETURN _trunc_ts_by_secs(ts, 43200); 512 | WHEN ivl = '1d' THEN RETURN date_trunc('day', ts); 513 | WHEN ivl = '1w' THEN RETURN date_trunc('week', ts); 514 | WHEN ivl = '1M' THEN RETURN date_trunc('month', ts); 515 | ELSE 516 | RETURN to_timestamp(0); 517 | END CASE; 518 | END 519 | $$; 520 | 521 | CREATE OR REPLACE FUNCTION _trunc_ts_by_secs(ts TIMESTAMP WITHOUT TIME ZONE, secs INTEGER) 522 | RETURNS TIMESTAMP 523 | LANGUAGE plpgsql 524 | AS $$ 525 | BEGIN 526 | RETURN to_timestamp(floor(extract('epoch' from ts) / secs) * secs); 527 | END; 528 | $$; 529 | 530 | CREATE UNIQUE INDEX IF NOT EXISTS txs_1_uid_time_stamp_unique_idx ON txs_1 (uid, time_stamp); 531 | CREATE UNIQUE INDEX IF NOT EXISTS txs_2_uid_time_stamp_unique_idx ON txs_2 (uid, time_stamp); 532 | CREATE UNIQUE INDEX IF NOT EXISTS txs_3_uid_time_stamp_unique_idx ON txs_3 (uid, time_stamp); 533 | CREATE UNIQUE INDEX IF NOT EXISTS txs_4_uid_time_stamp_unique_idx ON txs_4 (uid, time_stamp); 534 | CREATE UNIQUE INDEX IF NOT EXISTS txs_5_uid_time_stamp_unique_idx ON txs_5 (uid, time_stamp); 535 | CREATE UNIQUE INDEX IF NOT EXISTS txs_6_uid_time_stamp_unique_idx ON txs_6 (uid, time_stamp); 536 | CREATE UNIQUE INDEX IF NOT EXISTS txs_7_uid_time_stamp_unique_idx ON txs_7 (uid, time_stamp); 537 | CREATE UNIQUE INDEX IF NOT EXISTS txs_8_uid_time_stamp_unique_idx ON txs_8 (uid, time_stamp); 538 | CREATE UNIQUE INDEX IF NOT EXISTS txs_9_uid_time_stamp_unique_idx ON txs_9 (uid, time_stamp); 539 | CREATE UNIQUE INDEX IF NOT EXISTS txs_10_uid_time_stamp_unique_idx ON txs_10 (uid, time_stamp); 540 | CREATE UNIQUE INDEX IF NOT EXISTS txs_11_uid_time_stamp_unique_idx ON txs_11 (uid, time_stamp); 541 | CREATE UNIQUE INDEX IF NOT EXISTS txs_12_uid_time_stamp_unique_idx ON txs_12 (uid, time_stamp); 542 | CREATE UNIQUE INDEX IF NOT EXISTS txs_13_uid_time_stamp_unique_idx ON txs_13 (uid, time_stamp); 543 | CREATE UNIQUE INDEX IF NOT EXISTS txs_14_uid_time_stamp_unique_idx ON txs_14 (uid, time_stamp); 544 | CREATE UNIQUE INDEX IF NOT EXISTS txs_15_uid_time_stamp_unique_idx ON txs_15 (uid, time_stamp); 545 | CREATE UNIQUE INDEX IF NOT EXISTS txs_16_uid_time_stamp_unique_idx ON txs_16 (uid, time_stamp); 546 | CREATE UNIQUE INDEX IF NOT EXISTS txs_17_uid_time_stamp_unique_idx ON txs_17 (uid, time_stamp); 547 | CREATE UNIQUE INDEX IF NOT EXISTS txs_18_uid_time_stamp_unique_idx ON txs_18 (uid, time_stamp); 548 | 549 | CREATE INDEX IF NOT EXISTS txs_height_idx ON txs USING btree (height); 550 | CREATE INDEX IF NOT EXISTS txs_1_height_idx ON txs_1 USING btree (height); 551 | CREATE INDEX IF NOT EXISTS txs_2_height_idx ON txs_2 USING btree (height); 552 | CREATE INDEX IF NOT EXISTS txs_3_height_idx ON txs_3 USING btree (height); 553 | CREATE INDEX IF NOT EXISTS txs_5_height_idx ON txs_5 USING btree (height); 554 | CREATE INDEX IF NOT EXISTS txs_6_height_idx ON txs_6 USING btree (height); 555 | CREATE INDEX IF NOT EXISTS txs_7_height_idx ON txs_7 USING btree (height); 556 | CREATE INDEX IF NOT EXISTS txs_8_height_idx ON txs_8 USING btree (height); 557 | CREATE INDEX IF NOT EXISTS txs_9_height_idx ON txs_9 USING btree (height); 558 | CREATE INDEX IF NOT EXISTS txs_10_height_idx ON txs_10 USING btree (height); 559 | CREATE INDEX IF NOT EXISTS txs_11_height_idx ON txs_11 USING btree (height); 560 | CREATE INDEX IF NOT EXISTS txs_11_transfers_height_idx ON txs_11_transfers USING btree (height); 561 | CREATE INDEX IF NOT EXISTS txs_12_height_idx ON txs_12 USING btree (height); 562 | CREATE INDEX IF NOT EXISTS txs_12_data_height_idx ON txs_12_data USING btree (height); 563 | CREATE INDEX IF NOT EXISTS txs_13_height_idx ON txs_13 USING btree (height); 564 | CREATE INDEX IF NOT EXISTS txs_14_height_idx ON txs_14 USING btree (height); 565 | CREATE INDEX IF NOT EXISTS txs_15_height_idx ON txs_15 USING btree (height); 566 | CREATE INDEX IF NOT EXISTS txs_16_height_idx ON txs_16 USING btree (height); 567 | CREATE INDEX IF NOT EXISTS txs_16_args_height_idx ON txs_16_args USING btree (height); 568 | CREATE INDEX IF NOT EXISTS txs_16_payment_height_idx ON txs_16_payment USING btree (height); 569 | CREATE INDEX IF NOT EXISTS txs_17_height_idx ON txs_17 USING btree (height); 570 | CREATE INDEX IF NOT EXISTS txs_18_height_idx ON txs_18 USING btree (height); 571 | CREATE INDEX IF NOT EXISTS txs_18_args_height_idx ON txs_18_args USING btree (height); 572 | CREATE INDEX IF NOT EXISTS txs_18_payment_height_idx ON txs_18_payment USING btree (height); 573 | 574 | CREATE INDEX IF NOT EXISTS txs_sender_uid_idx ON txs USING btree (sender, uid); 575 | CREATE INDEX IF NOT EXISTS txs_1_sender_uid_idx ON txs_1 USING btree (sender, uid); 576 | CREATE INDEX IF NOT EXISTS txs_2_sender_uid_idx ON txs_2 USING btree (sender, uid); 577 | CREATE INDEX IF NOT EXISTS txs_3_sender_uid_idx ON txs_3 USING btree (sender, uid); 578 | CREATE INDEX IF NOT EXISTS txs_5_sender_uid_idx ON txs_5 USING btree (sender, uid); 579 | CREATE INDEX IF NOT EXISTS txs_6_sender_uid_idx ON txs_6 USING btree (sender, uid); 580 | CREATE INDEX IF NOT EXISTS txs_7_sender_uid_idx ON txs_7 USING btree (sender, uid); 581 | CREATE INDEX IF NOT EXISTS txs_8_sender_uid_idx ON txs_8 USING btree (sender, uid); 582 | CREATE INDEX IF NOT EXISTS txs_9_sender_uid_idx ON txs_9 USING btree (sender, uid); 583 | CREATE INDEX IF NOT EXISTS txs_10_sender_uid_idx ON txs_10 USING btree (sender, uid); 584 | CREATE INDEX IF NOT EXISTS txs_11_sender_uid_idx ON txs_11 USING btree (sender, uid); 585 | CREATE INDEX IF NOT EXISTS txs_12_sender_uid_idx ON txs_12 USING btree (sender, uid); 586 | CREATE INDEX IF NOT EXISTS txs_13_sender_uid_idx ON txs_13 USING btree (sender, uid); 587 | CREATE INDEX IF NOT EXISTS txs_14_sender_uid_idx ON txs_14 USING btree (sender, uid); 588 | CREATE INDEX IF NOT EXISTS txs_15_sender_uid_idx ON txs_15 USING btree (sender, uid); 589 | CREATE INDEX IF NOT EXISTS txs_16_sender_uid_idx ON txs_16 USING btree (sender, uid); 590 | CREATE INDEX IF NOT EXISTS txs_17_sender_uid_idx ON txs_17 USING btree (sender, uid); 591 | CREATE INDEX IF NOT EXISTS txs_18_sender_uid_idx ON txs_18 USING btree (sender, uid); 592 | 593 | CREATE INDEX IF NOT EXISTS txs_id_idx ON txs USING hash (id); 594 | CREATE INDEX IF NOT EXISTS txs_1_id_idx ON txs_1 USING hash (id); 595 | CREATE INDEX IF NOT EXISTS txs_2_id_idx ON txs_2 USING hash (id); 596 | CREATE INDEX IF NOT EXISTS txs_3_id_idx ON txs_3 USING hash (id); 597 | CREATE INDEX IF NOT EXISTS txs_4_id_idx ON txs_4 USING hash (id); 598 | CREATE INDEX IF NOT EXISTS txs_5_id_idx ON txs_5 USING hash (id); 599 | CREATE INDEX IF NOT EXISTS txs_6_id_idx ON txs_6 USING hash (id); 600 | CREATE INDEX IF NOT EXISTS txs_7_id_idx ON txs_7 USING hash (id); 601 | CREATE INDEX IF NOT EXISTS txs_8_id_idx ON txs_8 USING hash (id); 602 | CREATE INDEX IF NOT EXISTS txs_9_id_idx ON txs_9 USING hash (id); 603 | CREATE INDEX IF NOT EXISTS txs_10_id_idx ON txs_10 USING hash (id); 604 | CREATE INDEX IF NOT EXISTS txs_11_id_idx ON txs_11 USING hash (id); 605 | CREATE INDEX IF NOT EXISTS txs_12_id_idx ON txs_12 USING hash (id); 606 | CREATE INDEX IF NOT EXISTS txs_13_id_idx ON txs_13 USING hash (id); 607 | CREATE INDEX IF NOT EXISTS txs_14_id_idx ON txs_14 USING hash (id); 608 | CREATE INDEX IF NOT EXISTS txs_15_id_idx ON txs_15 USING hash (id); 609 | CREATE INDEX IF NOT EXISTS txs_16_id_idx ON txs_16 USING hash (id); 610 | CREATE INDEX IF NOT EXISTS txs_17_id_idx ON txs_17 USING hash (id); 611 | CREATE INDEX IF NOT EXISTS txs_18_id_idx ON txs_18 USING hash (id); 612 | 613 | CREATE INDEX IF NOT EXISTS txs_time_stamp_uid_gist_idx ON txs USING gist (time_stamp, uid); 614 | CREATE INDEX IF NOT EXISTS txs_1_time_stamp_uid_gist_idx ON txs_1 USING gist (time_stamp, uid); 615 | CREATE INDEX IF NOT EXISTS txs_2_time_stamp_uid_gist_idx ON txs_2 USING gist (time_stamp, uid); 616 | CREATE INDEX IF NOT EXISTS txs_3_time_stamp_uid_gist_idx ON txs_3 USING gist (time_stamp, uid); 617 | CREATE INDEX IF NOT EXISTS txs_4_time_stamp_uid_gist_idx ON txs_4 USING gist (time_stamp, uid); 618 | CREATE INDEX IF NOT EXISTS txs_5_time_stamp_uid_gist_idx ON txs_5 USING gist (time_stamp, uid); 619 | CREATE INDEX IF NOT EXISTS txs_6_time_stamp_uid_gist_idx ON txs_6 USING gist (time_stamp, uid); 620 | CREATE INDEX IF NOT EXISTS txs_7_time_stamp_uid_gist_idx ON txs_7 USING gist (time_stamp, uid); 621 | CREATE INDEX IF NOT EXISTS txs_8_time_stamp_uid_gist_idx ON txs_8 USING gist (time_stamp, uid); 622 | CREATE INDEX IF NOT EXISTS txs_9_time_stamp_uid_gist_idx ON txs_9 USING gist (time_stamp, uid); 623 | CREATE INDEX IF NOT EXISTS txs_10_time_stamp_uid_gist_idx ON txs_10 USING gist (time_stamp, uid); 624 | CREATE INDEX IF NOT EXISTS txs_11_time_stamp_uid_gist_idx ON txs_11 USING gist (time_stamp, uid); 625 | CREATE INDEX IF NOT EXISTS txs_12_time_stamp_uid_gist_idx ON txs_12 USING gist (time_stamp, uid); 626 | CREATE INDEX IF NOT EXISTS txs_13_time_stamp_uid_gist_idx ON txs_13 USING gist (time_stamp, uid); 627 | CREATE INDEX IF NOT EXISTS txs_14_time_stamp_uid_gist_idx ON txs_14 USING gist (time_stamp, uid); 628 | CREATE INDEX IF NOT EXISTS txs_15_time_stamp_uid_gist_idx ON txs_15 USING gist (time_stamp, uid); 629 | CREATE INDEX IF NOT EXISTS txs_16_time_stamp_uid_gist_idx ON txs_16 USING gist (time_stamp, uid); 630 | CREATE INDEX IF NOT EXISTS txs_17_time_stamp_uid_gist_idx ON txs_17 USING gist (time_stamp, uid); 631 | CREATE INDEX IF NOT EXISTS txs_18_time_stamp_uid_gist_idx ON txs_18 USING gist (time_stamp, uid); 632 | 633 | CREATE INDEX IF NOT EXISTS txs_1_block_uid_idx ON txs_1 (block_uid); 634 | CREATE INDEX IF NOT EXISTS txs_2_block_uid_idx ON txs_2 (block_uid); 635 | CREATE INDEX IF NOT EXISTS txs_3_block_uid_idx ON txs_3 (block_uid); 636 | CREATE INDEX IF NOT EXISTS txs_4_block_uid_idx ON txs_4 (block_uid); 637 | CREATE INDEX IF NOT EXISTS txs_5_block_uid_idx ON txs_5 (block_uid); 638 | CREATE INDEX IF NOT EXISTS txs_6_block_uid_idx ON txs_6 (block_uid); 639 | CREATE INDEX IF NOT EXISTS txs_7_block_uid_idx ON txs_7 (block_uid); 640 | CREATE INDEX IF NOT EXISTS txs_8_block_uid_idx ON txs_8 (block_uid); 641 | CREATE INDEX IF NOT EXISTS txs_9_block_uid_idx ON txs_9 (block_uid); 642 | CREATE INDEX IF NOT EXISTS txs_10_block_uid_idx ON txs_10 (block_uid); 643 | CREATE INDEX IF NOT EXISTS txs_11_block_uid_idx ON txs_11 (block_uid); 644 | CREATE INDEX IF NOT EXISTS txs_12_block_uid_idx ON txs_12 (block_uid); 645 | CREATE INDEX IF NOT EXISTS txs_13_block_uid_idx ON txs_13 (block_uid); 646 | CREATE INDEX IF NOT EXISTS txs_14_block_uid_idx ON txs_14 (block_uid); 647 | CREATE INDEX IF NOT EXISTS txs_15_block_uid_idx ON txs_15 (block_uid); 648 | CREATE INDEX IF NOT EXISTS txs_16_block_uid_idx ON txs_16 (block_uid); 649 | CREATE INDEX IF NOT EXISTS txs_17_block_uid_idx ON txs_17 (block_uid); 650 | CREATE INDEX IF NOT EXISTS txs_18_block_uid_idx ON txs_18 (block_uid); 651 | 652 | CREATE INDEX IF NOT EXISTS txs_3_asset_id_uid_idx ON txs_3 USING btree (asset_id, uid); 653 | CREATE INDEX IF NOT EXISTS txs_4_asset_id_uid_idx ON txs_4 USING btree (asset_id, uid); 654 | CREATE INDEX IF NOT EXISTS txs_5_asset_id_uid_idx ON txs_5 USING btree (asset_id, uid); 655 | CREATE INDEX IF NOT EXISTS txs_6_asset_id_uid_idx ON txs_6 USING btree (asset_id, uid); 656 | CREATE INDEX IF NOT EXISTS txs_11_asset_id_uid_idx ON txs_11 USING btree (asset_id, uid); 657 | 658 | CREATE INDEX IF NOT EXISTS txs_3_md5_script_idx ON txs_3 USING btree (md5((script)::text)); 659 | CREATE INDEX IF NOT EXISTS txs_13_md5_script_idx ON txs_13 USING btree (md5((script)::text)); 660 | CREATE INDEX IF NOT EXISTS txs_15_md5_script_idx ON txs_15 USING btree (md5((script)::text)); 661 | 662 | CREATE INDEX IF NOT EXISTS txs_8_recipient_idx ON txs_8 USING btree (recipient_address); 663 | CREATE INDEX IF NOT EXISTS txs_11_transfers_recipient_address_idx ON txs_11_transfers USING btree (recipient_address); 664 | 665 | CREATE INDEX IF NOT EXISTS txs_tx_type_idx ON txs USING btree (tx_type); 666 | CREATE INDEX IF NOT EXISTS txs_time_stamp_uid_idx ON txs USING btree (time_stamp, uid); 667 | CREATE INDEX IF NOT EXISTS txs_4_height_uid_idx ON txs_4 USING btree (height, uid); 668 | CREATE INDEX IF NOT EXISTS txs_4_recipient_address_uid_idx ON txs_4 (recipient_address, uid); 669 | CREATE INDEX IF NOT EXISTS txs_4_sender_uid_idx ON txs_4 (sender, uid); 670 | CREATE INDEX IF NOT EXISTS txs_7_order_ids_uid_idx ON txs_7 USING gin ((ARRAY[order1->>'id', order2->>'id']), uid); 671 | CREATE INDEX IF NOT EXISTS txs_7_order_senders_uid_idx ON txs_7 USING gin ((ARRAY[order1->>'sender', order2->>'sender']), uid); 672 | CREATE INDEX IF NOT EXISTS txs_7_price_asset_id_uid_idx ON txs_7 (price_asset_id, uid); 673 | CREATE INDEX IF NOT EXISTS txs_7_time_stamp_gist_idx ON txs_7 USING gist (time_stamp); 674 | CREATE INDEX IF NOT EXISTS txs_7_amount_asset_id_uid_idx ON txs_7 (amount_asset_id, uid); 675 | CREATE INDEX IF NOT EXISTS txs_7_order_sender_1_uid_desc_idx ON txs_7 ((order1 ->> 'sender'::text) asc, uid desc); 676 | CREATE INDEX IF NOT EXISTS txs_7_order_sender_2_uid_desc_idx ON txs_7 ((order2 ->> 'sender'::text) asc, uid desc); 677 | CREATE INDEX IF NOT EXISTS txs_7_uid_height_time_stamp_idx ON txs_7 (uid, height, time_stamp); 678 | CREATE INDEX IF NOT EXISTS txs_7_amount_asset_id_price_asset_id_uid_idx ON txs_7 (amount_asset_id, price_asset_id, uid); 679 | CREATE INDEX IF NOT EXISTS txs_8_recipient_address_uid_idx ON txs_8 USING btree (recipient_address, uid); 680 | CREATE INDEX IF NOT EXISTS txs_10_alias_sender_idx ON txs_10 USING btree (alias, sender); 681 | CREATE INDEX IF NOT EXISTS txs_10_alias_uid_idx ON txs_10 USING btree (alias, uid); 682 | CREATE INDEX IF NOT EXISTS txs_12_data_data_value_binary_tx_uid_partial_idx 683 | ON txs_12_data USING hash (data_value_binary) WHERE (data_type = 'binary'::text); 684 | CREATE INDEX IF NOT EXISTS txs_12_data_data_value_boolean_tx_uid_partial_idx 685 | ON txs_12_data USING btree (data_value_boolean, tx_uid) WHERE (data_type = 'boolean'::text); 686 | CREATE INDEX IF NOT EXISTS txs_12_data_data_value_integer_tx_uid_partial_idx 687 | ON txs_12_data USING btree (data_value_integer, tx_uid) WHERE (data_type = 'integer'::text); 688 | CREATE INDEX IF NOT EXISTS txs_12_data_data_value_string_tx_uid_partial_idx 689 | ON txs_12_data USING hash (data_value_string) WHERE (data_type = 'string'::text); 690 | CREATE INDEX IF NOT EXISTS txs_12_data_tx_uid_idx ON txs_12_data USING btree (tx_uid); 691 | CREATE INDEX IF NOT EXISTS txs_12_data_data_key_tx_uid_idx ON txs_12_data USING btree (data_key, tx_uid); 692 | CREATE INDEX IF NOT EXISTS txs_12_data_data_type_tx_uid_idx ON txs_12_data USING btree (data_type, tx_uid); 693 | CREATE INDEX IF NOT EXISTS txs_16_dapp_address_uid_idx ON txs_16 USING btree (dapp_address, uid); 694 | CREATE INDEX IF NOT EXISTS txs_16_function_name_uid_idx ON txs_16 (function_name, uid); 695 | CREATE INDEX IF NOT EXISTS txs_16_payment_asset_id_idx ON txs_16_payment USING btree (asset_id); 696 | CREATE INDEX IF NOT EXISTS txs_16_sender_time_stamp_uid_idx ON txs_16 (sender, time_stamp, uid); 697 | CREATE INDEX IF NOT EXISTS txs_16_dapp_address_function_name_uid_idx ON txs_16 (dapp_address, function_name, uid); 698 | CREATE INDEX IF NOT EXISTS txs_17_sender_time_stamp_id_idx ON txs_17 (sender, time_stamp, uid); 699 | CREATE INDEX IF NOT EXISTS txs_17_asset_id_uid_idx ON txs_17 (asset_id, uid); 700 | CREATE INDEX IF NOT EXISTS txs_18_function_name_uid_idx ON txs_18 (function_name, uid); 701 | CREATE INDEX IF NOT EXISTS txs_18_payment_asset_id_idx ON txs_18_payment USING btree (asset_id); 702 | 703 | CREATE INDEX IF NOT EXISTS asset_updates_to_tsvector_idx 704 | ON asset_updates USING gin (to_tsvector('simple'::regconfig, name::TEXT)) 705 | WHERE (superseded_by = '9223372036854775806'::BIGINT); 706 | CREATE INDEX IF NOT EXISTS asset_updates_block_uid_idx ON asset_updates (block_uid); 707 | CREATE INDEX IF NOT EXISTS asset_tickers_block_uid_idx ON asset_tickers (block_uid); 708 | CREATE INDEX IF NOT EXISTS blocks_microblocks_time_stamp_uid_idx ON blocks_microblocks (time_stamp DESC, uid DESC); 709 | CREATE INDEX IF NOT EXISTS blocks_microblocks_id_idx ON blocks_microblocks (id); 710 | CREATE INDEX IF NOT EXISTS candles_max_height_index ON candles USING btree (max_height); 711 | CREATE INDEX IF NOT EXISTS candles_amount_price_ids_matcher_time_start_partial_1m_idx 712 | ON candles (amount_asset_id, price_asset_id, matcher_address, time_start) WHERE (("interval")::text = '1m'::text); 713 | CREATE INDEX IF NOT EXISTS candles_assets_id_idx 714 | ON candles USING btree (amount_asset_id, price_asset_id) 715 | WHERE ((("interval")::text = '1d'::text) AND ((matcher_address)::text = '3PEjHv3JGjcWNpYEEkif2w8NXV4kbhnoGgu'::text)); 716 | CREATE INDEX IF NOT EXISTS candles_interval_time_start ON candles (interval, time_start); 717 | CREATE INDEX IF NOT EXISTS waves_data_height_desc_quantity_idx ON waves_data (height DESC NULLS LAST, quantity); 718 | CREATE INDEX IF NOT EXISTS asset_tickers_ticker_idx ON asset_tickers (ticker); 719 | CREATE INDEX IF NOT EXISTS asset_tickers_asset_id_uid_idx ON asset_tickers (asset_id, uid) INCLUDE (ticker); -------------------------------------------------------------------------------- /src/lib/consumer/repo/pg.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Error, Result}; 2 | use async_trait::async_trait; 3 | use chrono::{Datelike, Duration, NaiveDateTime, Timelike as _}; 4 | use diesel::{ 5 | dsl::sql, 6 | pg::PgConnection, 7 | prelude::*, 8 | result::Error as DslError, 9 | sql_query, 10 | sql_types::{Array, BigInt, Int8, Timestamp, VarChar}, 11 | Table, 12 | }; 13 | use std::mem::drop; 14 | use std::{collections::HashMap, num::NonZeroU32}; 15 | use super::super::UidHeight; 16 | use super::{Repo, RepoOperations}; 17 | use crate::consumer::models::candles::interval_in_seconds; 18 | use crate::consumer::models::{ 19 | asset_tickers::{AssetTickerOverride, DeletedAssetTicker, InsertableAssetTicker}, 20 | assets::{AssetOrigin, AssetOverride, AssetUpdate, DeletedAsset}, 21 | block_microblock::BlockMicroblock, 22 | candles::intervals::{self, CANDLE_INTERVALS}, 23 | txs::*, 24 | waves_data::WavesData, 25 | }; 26 | use crate::db::PgAsyncPool; 27 | use crate::error::Error as AppError; 28 | use crate::schema::*; 29 | use crate::tuple_len::TupleLen; 30 | 31 | const MAX_UID: i64 = std::i64::MAX - 1; 32 | const PG_MAX_INSERT_FIELDS_COUNT: usize = 65535; 33 | 34 | #[derive(Clone)] 35 | pub struct PgRepo { 36 | pool: PgAsyncPool, 37 | } 38 | 39 | pub fn new(pool: PgAsyncPool) -> PgRepo { 40 | PgRepo { pool } 41 | } 42 | 43 | pub struct PgRepoOperations<'c> { 44 | pub conn: &'c mut PgConnection, 45 | } 46 | 47 | #[async_trait] 48 | impl Repo for PgRepo { 49 | type Operations<'c> = PgRepoOperations<'c>; 50 | 51 | async fn transaction(&self, f: F) -> Result 52 | where 53 | F: for<'conn> FnOnce(&mut Self::Operations<'conn>) -> Result, 54 | F: Send + 'static, 55 | R: Send + 'static, 56 | { 57 | let connection = self.pool.get().await?; 58 | connection 59 | .interact(|conn| conn.transaction(|conn| f(&mut PgRepoOperations { conn }))) 60 | .await 61 | .map_err(AppError::from)? 62 | } 63 | } 64 | 65 | impl RepoOperations for PgRepoOperations<'_> { 66 | // 67 | // COMMON 68 | // 69 | 70 | fn get_current_height(&mut self) -> Result { 71 | blocks_microblocks::table 72 | .select(blocks_microblocks::height) 73 | .order(blocks_microblocks::height.desc()) 74 | .first(self.conn) 75 | .optional() 76 | .map_err(build_err_fn(format!("Cannot get current height"))) 77 | .map(|height| height.unwrap_or(0)) 78 | } 79 | 80 | fn get_blocks_rollback_to( 81 | &mut self, 82 | depth: NonZeroU32, 83 | seq_step: NonZeroU32, 84 | ) -> Result>> { 85 | let depth = depth.into(); 86 | let current_height = self.get_current_height()? as u32; 87 | let rollback_step = u32::min(seq_step.into(), depth); 88 | let starting_height = current_height.saturating_sub(rollback_step); 89 | let final_height = current_height.saturating_sub(depth); 90 | 91 | // intentionally made up this interval because starting_height >= final_height 92 | // (final_height + 1) is needed to not accidentally include final_height twice 93 | let mut heights_rollback_to = ((final_height + 1)..=starting_height) 94 | .rev() 95 | .step_by(rollback_step as usize) 96 | .map(|h| h as i32) 97 | .collect::>(); 98 | 99 | heights_rollback_to.push(final_height as i32); 100 | 101 | chunked_with_result(blocks_microblocks::table, &heights_rollback_to, |heights| { 102 | blocks_microblocks::table 103 | .select((blocks_microblocks::uid, blocks_microblocks::height)) 104 | .filter(blocks_microblocks::height.eq_any(heights)) 105 | .order(blocks_microblocks::uid.desc()) 106 | .get_results(self.conn) 107 | }) 108 | .optional() 109 | .map_err(build_err_fn(format!( 110 | "Cannot get prev handled_height with depth {depth}" 111 | ))) 112 | } 113 | 114 | fn get_block_uid_height(&mut self, block_id: &str) -> Result { 115 | blocks_microblocks::table 116 | .select((blocks_microblocks::uid, blocks_microblocks::height)) 117 | .filter(blocks_microblocks::id.eq(block_id)) 118 | .get_result(self.conn) 119 | .map_err(build_err_fn(format!( 120 | "Cannot get block_uid by block id {}", 121 | block_id 122 | ))) 123 | } 124 | 125 | fn get_key_block_uid(&mut self) -> Result { 126 | blocks_microblocks::table 127 | .select(sql::("max(uid)")) 128 | .filter(blocks_microblocks::time_stamp.is_not_null()) 129 | .get_result(self.conn) 130 | .map_err(build_err_fn("Cannot get key block uid")) 131 | } 132 | 133 | fn get_total_block_id(&mut self) -> Result> { 134 | blocks_microblocks::table 135 | .select(blocks_microblocks::id) 136 | .filter(blocks_microblocks::time_stamp.is_null()) 137 | .order(blocks_microblocks::uid.desc()) 138 | .first(self.conn) 139 | .optional() 140 | .map_err(build_err_fn("Cannot get total block id")) 141 | } 142 | 143 | fn insert_blocks_or_microblocks(&mut self, blocks: &Vec) -> Result> { 144 | diesel::insert_into(blocks_microblocks::table) 145 | .values(blocks) 146 | .returning(blocks_microblocks::uid) 147 | .get_results(self.conn) 148 | .map_err(build_err_fn("Cannot insert blocks/microblocks")) 149 | } 150 | 151 | fn change_block_id(&mut self, block_uid: i64, new_block_id: &str) -> Result<()> { 152 | diesel::update(blocks_microblocks::table) 153 | .set(blocks_microblocks::id.eq(new_block_id)) 154 | .filter(blocks_microblocks::uid.eq(block_uid)) 155 | .execute(self.conn) 156 | .map(drop) 157 | .map_err(build_err_fn("Cannot change block id")) 158 | } 159 | 160 | fn delete_microblocks(&mut self) -> Result<()> { 161 | diesel::delete(blocks_microblocks::table) 162 | .filter(blocks_microblocks::time_stamp.is_null()) 163 | .execute(self.conn) 164 | .map(drop) 165 | .map_err(build_err_fn("Cannot delete microblocks")) 166 | } 167 | 168 | fn rollback_blocks_microblocks(&mut self, block_uid: i64) -> Result<()> { 169 | diesel::delete(blocks_microblocks::table) 170 | .filter(blocks_microblocks::uid.gt(block_uid)) 171 | .execute(self.conn) 172 | .map(drop) 173 | .map_err(build_err_fn("Cannot rollback blocks/microblocks")) 174 | } 175 | 176 | fn insert_waves_data(&mut self, waves_data: &Vec) -> Result<()> { 177 | diesel::insert_into(waves_data::table) 178 | .values(waves_data) 179 | .on_conflict(waves_data::quantity) 180 | .do_nothing() // its ok to skip same quantity on historical sync 181 | .execute(self.conn) 182 | .map(drop) 183 | .map_err(build_err_fn("Cannot insert waves data")) 184 | } 185 | 186 | // 187 | // ASSETS 188 | // 189 | 190 | fn get_next_assets_uid(&mut self) -> Result { 191 | diesel::select(sql::("nextval('asset_updates_uid_seq')")) 192 | .get_result(self.conn) 193 | .map_err(build_err_fn("Cannot get next assets update uid")) 194 | } 195 | 196 | fn insert_asset_updates(&mut self, updates: &Vec) -> Result<()> { 197 | chunked(asset_updates::table, updates, |chunk| { 198 | diesel::insert_into(asset_updates::table) 199 | .values(chunk) 200 | .execute(self.conn) 201 | }) 202 | .map_err(build_err_fn("Cannot insert new asset updates")) 203 | } 204 | 205 | fn insert_asset_origins(&mut self, origins: &Vec) -> Result<()> { 206 | chunked(asset_origins::table, origins, |chunk| { 207 | diesel::insert_into(asset_origins::table) 208 | .values(chunk) 209 | .on_conflict(asset_origins::asset_id) 210 | .do_nothing() 211 | .execute(self.conn) 212 | }) 213 | .map_err(build_err_fn("Cannot insert new assets")) 214 | } 215 | 216 | fn update_assets_block_references(&mut self, block_uid: i64) -> Result<()> { 217 | diesel::update(asset_updates::table) 218 | .set((asset_updates::block_uid.eq(block_uid),)) 219 | .filter(asset_updates::block_uid.gt(block_uid)) 220 | .execute(self.conn) 221 | .map(drop) 222 | .map_err(build_err_fn("Cannot update assets block references")) 223 | } 224 | 225 | fn close_assets_superseded_by(&mut self, updates: &Vec) -> Result<()> { 226 | let (ids, superseded_by_uids): (Vec<&String>, Vec) = 227 | updates.iter().map(|u| (&u.id, u.superseded_by)).unzip(); 228 | 229 | let q = sql_query( 230 | "UPDATE asset_updates 231 | SET superseded_by = updates.superseded_by 232 | FROM (SELECT UNNEST($1::text[]) as id, UNNEST($2::int8[]) as superseded_by) AS updates 233 | WHERE asset_updates.asset_id = updates.id AND asset_updates.superseded_by = $3;", 234 | ) 235 | .bind::, _>(ids) 236 | .bind::, _>(superseded_by_uids) 237 | .bind::(MAX_UID); 238 | 239 | q.execute(self.conn) 240 | .map(drop) 241 | .map_err(build_err_fn("Cannot close assets superseded_by")) 242 | } 243 | 244 | fn reopen_assets_superseded_by(&mut self, current_superseded_by: &Vec) -> Result<()> { 245 | sql_query( 246 | "UPDATE asset_updates 247 | SET superseded_by = $1 248 | FROM (SELECT UNNEST($2) AS superseded_by) AS current 249 | WHERE asset_updates.superseded_by = current.superseded_by;", 250 | ) 251 | .bind::(MAX_UID) 252 | .bind::, _>(current_superseded_by) 253 | .execute(self.conn) 254 | .map(drop) 255 | .map_err(build_err_fn("Cannot reopen assets superseded_by")) 256 | } 257 | 258 | fn set_assets_next_update_uid(&mut self, new_uid: i64) -> Result<()> { 259 | // 3rd param - is called; in case of true, value'll be incremented before returning 260 | sql_query(format!( 261 | "select setval('asset_updates_uid_seq', {}, false);", 262 | new_uid 263 | )) 264 | .execute(self.conn) 265 | .map(drop) 266 | .map_err(build_err_fn("Cannot set assets next update uid")) 267 | } 268 | 269 | fn rollback_assets(&mut self, block_uid: i64) -> Result> { 270 | diesel::delete(asset_updates::table) 271 | .filter(asset_updates::block_uid.gt(block_uid)) 272 | .returning((asset_updates::uid, asset_updates::asset_id)) 273 | .get_results(self.conn) 274 | .map(|bs| { 275 | bs.into_iter() 276 | .map(|(uid, id)| DeletedAsset { uid, id }) 277 | .collect() 278 | }) 279 | .map_err(build_err_fn("Cannot rollback assets")) 280 | } 281 | 282 | fn assets_gt_block_uid(&mut self, block_uid: i64) -> Result> { 283 | asset_updates::table 284 | .select(asset_updates::uid) 285 | .filter(asset_updates::block_uid.gt(block_uid)) 286 | .get_results(self.conn) 287 | .map_err(build_err_fn(format!( 288 | "Cannot get assets greater then block_uid {}", 289 | block_uid 290 | ))) 291 | } 292 | 293 | fn insert_asset_tickers(&mut self, tickers: &Vec) -> Result<()> { 294 | chunked(asset_tickers::table, tickers, |chunk| { 295 | diesel::insert_into(asset_tickers::table) 296 | .values(chunk) 297 | .execute(self.conn) 298 | }) 299 | .map_err(build_err_fn("Cannot insert new asset tickers")) 300 | } 301 | 302 | fn rollback_asset_tickers(&mut self, block_uid: &i64) -> Result> { 303 | diesel::delete(asset_tickers::table) 304 | .filter(asset_tickers::block_uid.gt(block_uid)) 305 | .returning((asset_tickers::uid, asset_tickers::asset_id)) 306 | .get_results(self.conn) 307 | .map(|bs| { 308 | bs.into_iter() 309 | .map(|(uid, asset_id)| DeletedAssetTicker { uid, asset_id }) 310 | .collect() 311 | }) 312 | .map_err(build_err_fn("Cannot rollback asset_tickers")) 313 | } 314 | 315 | fn update_asset_tickers_block_references(&mut self, block_uid: i64) -> Result<()> { 316 | diesel::update(asset_tickers::table) 317 | .set((asset_tickers::block_uid.eq(block_uid),)) 318 | .filter(asset_tickers::block_uid.gt(block_uid)) 319 | .execute(self.conn) 320 | .map(drop) 321 | .map_err(build_err_fn("Cannot update asset tickers block references")) 322 | } 323 | 324 | fn reopen_asset_tickers_superseded_by( 325 | &mut self, 326 | current_superseded_by: &Vec, 327 | ) -> Result<()> { 328 | sql_query( 329 | "UPDATE asset_tickers SET superseded_by = $1 FROM (SELECT UNNEST($2) AS superseded_by) AS current 330 | WHERE asset_tickers.superseded_by = current.superseded_by;") 331 | .bind::(MAX_UID) 332 | .bind::, _>(current_superseded_by) 333 | .execute(self.conn) 334 | .map(drop) 335 | .map_err(build_err_fn("Cannot reopen asset_tickers superseded_by")) 336 | } 337 | 338 | fn close_asset_tickers_superseded_by( 339 | &mut self, 340 | updates: &Vec, 341 | ) -> Result<()> { 342 | let (ids, superseded_by_uids): (Vec<&String>, Vec) = updates 343 | .iter() 344 | .map(|u| (&u.asset_id, u.superseded_by)) 345 | .unzip(); 346 | 347 | let q = sql_query( 348 | "UPDATE asset_tickers 349 | SET superseded_by = updates.superseded_by 350 | FROM (SELECT UNNEST($1::text[]) as id, UNNEST($2::int8[]) as superseded_by) AS updates 351 | WHERE asset_tickers.asset_id = updates.id AND asset_tickers.superseded_by = $3;", 352 | ) 353 | .bind::, _>(ids) 354 | .bind::, _>(superseded_by_uids) 355 | .bind::(MAX_UID); 356 | 357 | q.execute(self.conn) 358 | .map(drop) 359 | .map_err(build_err_fn("Cannot close asset_tickers superseded_by")) 360 | } 361 | 362 | fn set_asset_tickers_next_update_uid(&mut self, new_uid: i64) -> Result<()> { 363 | // 3rd param - is called; in case of true, value'll be incremented before returning 364 | sql_query(format!( 365 | "select setval('asset_tickers_uid_seq', {}, false);", 366 | new_uid 367 | )) 368 | .execute(self.conn) 369 | .map(drop) 370 | .map_err(build_err_fn("Cannot set asset_tickers next update uid")) 371 | } 372 | 373 | fn get_next_asset_tickers_uid(&mut self) -> Result { 374 | diesel::select(sql::("nextval('asset_tickers_uid_seq')")) 375 | .get_result(self.conn) 376 | .map_err(build_err_fn("Cannot get next asset tickers update uid")) 377 | } 378 | 379 | // 380 | // TRANSACTIONS 381 | // 382 | 383 | fn update_transactions_references(&mut self, block_uid: i64) -> Result<()> { 384 | diesel::update(txs::table) 385 | .set((txs::block_uid.eq(block_uid),)) 386 | .filter(txs::block_uid.gt(block_uid)) 387 | .execute(self.conn) 388 | .map(drop) 389 | .map_err(build_err_fn("Cannot update transactions references")) 390 | } 391 | 392 | fn rollback_transactions(&mut self, block_uid: i64) -> Result<()> { 393 | diesel::delete(txs::table) 394 | .filter(txs::block_uid.gt(block_uid)) 395 | .execute(self.conn) 396 | .map(drop) 397 | .map_err(build_err_fn("Cannot rollback transactions")) 398 | } 399 | 400 | fn insert_txs_1(&mut self, txs: Vec) -> Result<()> { 401 | chunked(txs_1::table, &txs, |chunk| { 402 | diesel::insert_into(txs_1::table) 403 | .values(chunk) 404 | .execute(self.conn) 405 | }) 406 | .map_err(build_err_fn("Cannot insert Genesis transactions")) 407 | } 408 | 409 | fn insert_txs_2(&mut self, txs: Vec) -> Result<()> { 410 | chunked(txs_2::table, &txs, |chunk| { 411 | diesel::insert_into(txs_2::table) 412 | .values(chunk) 413 | .execute(self.conn) 414 | }) 415 | .map_err(build_err_fn("Cannot insert Payment transactions")) 416 | } 417 | 418 | fn insert_txs_3(&mut self, txs: Vec) -> Result<()> { 419 | chunked(txs_3::table, &txs, |chunk| { 420 | diesel::insert_into(txs_3::table) 421 | .values(chunk) 422 | .execute(self.conn) 423 | }) 424 | .map_err(build_err_fn("Cannot insert Issue transactions")) 425 | } 426 | 427 | fn insert_txs_4(&mut self, txs: Vec) -> Result<()> { 428 | chunked(txs_4::table, &txs, |chunk| { 429 | diesel::insert_into(txs_4::table) 430 | .values(chunk) 431 | .execute(self.conn) 432 | }) 433 | .map_err(build_err_fn("Cannot insert Transfer transactions")) 434 | } 435 | 436 | fn insert_txs_5(&mut self, txs: Vec) -> Result<()> { 437 | chunked(txs_5::table, &txs, |chunk| { 438 | diesel::insert_into(txs_5::table) 439 | .values(chunk) 440 | .execute(self.conn) 441 | }) 442 | .map_err(build_err_fn("Cannot insert Reissue transactions")) 443 | } 444 | 445 | fn insert_txs_6(&mut self, txs: Vec) -> Result<()> { 446 | chunked(txs_6::table, &txs, |chunk| { 447 | diesel::insert_into(txs_6::table) 448 | .values(chunk) 449 | .execute(self.conn) 450 | }) 451 | .map_err(build_err_fn("Cannot insert Burn transactions")) 452 | } 453 | 454 | fn insert_txs_7(&mut self, txs: Vec) -> Result<()> { 455 | chunked(txs_7::table, &txs, |chunk| { 456 | diesel::insert_into(txs_7::table) 457 | .values(chunk) 458 | .execute(self.conn) 459 | }) 460 | .map_err(build_err_fn("Cannot insert Exchange transactions")) 461 | } 462 | 463 | fn insert_txs_8(&mut self, txs: Vec) -> Result<()> { 464 | chunked(txs_8::table, &txs, |chunk| { 465 | diesel::insert_into(txs_8::table) 466 | .values(chunk) 467 | .execute(self.conn) 468 | }) 469 | .map_err(build_err_fn("Cannot insert Lease transactions")) 470 | } 471 | 472 | fn insert_txs_9(&mut self, txs: Vec) -> Result<()> { 473 | let lease_ids = txs 474 | .iter() 475 | .filter_map(|tx| tx.lease_id.as_ref()) 476 | .collect::>(); 477 | let tx_id_uid = chunked_with_result(txs::table, &lease_ids, |ids| { 478 | txs::table 479 | .select((txs::id, txs::uid)) 480 | .filter(txs::id.eq_any(ids)) 481 | .get_results(self.conn) 482 | }) 483 | .map_err(build_err_fn("Cannot find uids for lease_ids"))?; 484 | 485 | let tx_id_uid_map = HashMap::::from_iter(tx_id_uid); 486 | let txs9 = txs 487 | .into_iter() 488 | .map(|tx| { 489 | Tx9::from(( 490 | &tx, 491 | tx.lease_id 492 | .as_ref() 493 | .and_then(|lease_id| tx_id_uid_map.get(lease_id)) 494 | .cloned(), 495 | )) 496 | }) 497 | .collect::>(); 498 | 499 | chunked(txs_9::table, &txs9, |chunk| { 500 | diesel::insert_into(txs_9::table) 501 | .values(chunk) 502 | .execute(self.conn) 503 | }) 504 | .map_err(build_err_fn("Cannot insert LeaseCancel transactions")) 505 | } 506 | 507 | fn insert_txs_10(&mut self, txs: Vec) -> Result<()> { 508 | chunked(txs_10::table, &txs, |chunk| { 509 | diesel::insert_into(txs_10::table) 510 | .values(chunk) 511 | .execute(self.conn) 512 | }) 513 | .map_err(build_err_fn("Cannot insert CreateAlias transactions")) 514 | } 515 | 516 | fn insert_txs_11(&mut self, txs: Vec) -> Result<()> { 517 | let (txs11, transfers): (Vec, Vec>) = 518 | txs.into_iter().map(|t| (t.tx, t.transfers)).unzip(); 519 | let transfers = transfers.into_iter().flatten().collect::>(); 520 | 521 | chunked(txs_11::table, &txs11, |chunk| { 522 | diesel::insert_into(txs_11::table) 523 | .values(chunk) 524 | .execute(self.conn) 525 | }) 526 | .map_err(build_err_fn("Cannot insert MassTransfer transactions"))?; 527 | 528 | chunked(txs_11_transfers::table, &transfers, |chunk| { 529 | diesel::insert_into(txs_11_transfers::table) 530 | .values(chunk) 531 | .execute(self.conn) 532 | }) 533 | .map_err(build_err_fn("Cannot insert MassTransfer transfers")) 534 | } 535 | 536 | fn insert_txs_12(&mut self, txs: Vec) -> Result<()> { 537 | let (txs12, data): (Vec, Vec>) = 538 | txs.into_iter().map(|t| (t.tx, t.data)).unzip(); 539 | let data = data.into_iter().flatten().collect::>(); 540 | 541 | chunked(txs_12::table, &txs12, |chunk| { 542 | diesel::insert_into(txs_12::table) 543 | .values(chunk) 544 | .execute(self.conn) 545 | }) 546 | .map_err(build_err_fn("Cannot insert DataTransaction transaction"))?; 547 | 548 | chunked(txs_12_data::table, &data, |chunk| { 549 | diesel::insert_into(txs_12_data::table) 550 | .values(chunk) 551 | .execute(self.conn) 552 | }) 553 | .map_err(build_err_fn("Cannot insert DataTransaction data")) 554 | } 555 | 556 | fn insert_txs_13(&mut self, txs: Vec) -> Result<()> { 557 | chunked(txs_13::table, &txs, |chunk| { 558 | diesel::insert_into(txs_13::table) 559 | .values(chunk) 560 | .execute(self.conn) 561 | }) 562 | .map_err(build_err_fn("Cannot insert SetScript transactions")) 563 | } 564 | 565 | fn insert_txs_14(&mut self, txs: Vec) -> Result<()> { 566 | chunked(txs_14::table, &txs, |chunk| { 567 | diesel::insert_into(txs_14::table) 568 | .values(chunk) 569 | .execute(self.conn) 570 | }) 571 | .map_err(build_err_fn("Cannot insert SponsorFee transactions")) 572 | } 573 | 574 | fn insert_txs_15(&mut self, txs: Vec) -> Result<()> { 575 | chunked(txs_15::table, &txs, |chunk| { 576 | diesel::insert_into(txs_15::table) 577 | .values(chunk) 578 | .execute(self.conn) 579 | }) 580 | .map_err(build_err_fn("Cannot insert SetAssetScript transactions")) 581 | } 582 | 583 | fn insert_txs_16(&mut self, txs: Vec) -> Result<()> { 584 | let (txs16, data): (Vec, Vec<(Vec, Vec)>) = txs 585 | .into_iter() 586 | .map(|t| (t.tx, (t.args, t.payments))) 587 | .unzip(); 588 | let (args, payments): (Vec>, Vec>) = 589 | data.into_iter().unzip(); 590 | let args = args.into_iter().flatten().collect::>(); 591 | let payments = payments.into_iter().flatten().collect::>(); 592 | 593 | chunked(txs_16::table, &txs16, |chunk| { 594 | diesel::insert_into(txs_16::table) 595 | .values(chunk) 596 | .execute(self.conn) 597 | }) 598 | .map_err(build_err_fn("Cannot insert InvokeScript transactions"))?; 599 | 600 | chunked(txs_16_args::table, &args, |chunk| { 601 | diesel::insert_into(txs_16_args::table) 602 | .values(chunk) 603 | .execute(self.conn) 604 | }) 605 | .map_err(build_err_fn("Cannot insert InvokeScript args"))?; 606 | 607 | chunked(txs_16_payment::table, &payments, |chunk| { 608 | diesel::insert_into(txs_16_payment::table) 609 | .values(chunk) 610 | .execute(self.conn) 611 | }) 612 | .map_err(build_err_fn("Cannot insert InvokeScript payments")) 613 | } 614 | 615 | fn insert_txs_17(&mut self, txs: Vec) -> Result<()> { 616 | chunked(txs_17::table, &txs, |chunk| { 617 | diesel::insert_into(txs_17::table) 618 | .values(chunk) 619 | .execute(self.conn) 620 | }) 621 | .map_err(build_err_fn("Cannot insert UpdateAssetInfo transactions")) 622 | } 623 | 624 | fn insert_txs_18(&mut self, txs: Vec) -> Result<()> { 625 | let (txs18, data): (Vec, Vec<(Vec, Vec)>) = txs 626 | .into_iter() 627 | .map(|t| (t.tx, (t.args, t.payments))) 628 | .unzip(); 629 | let (args, payments): (Vec>, Vec>) = 630 | data.into_iter().unzip(); 631 | let args = args.into_iter().flatten().collect::>(); 632 | let payments = payments.into_iter().flatten().collect::>(); 633 | 634 | chunked(txs_18::table, &txs18, |chunk| { 635 | diesel::insert_into(txs_18::table) 636 | .values(chunk) 637 | .execute(self.conn) 638 | }) 639 | .map_err(build_err_fn("Cannot insert Ethereum transactions"))?; 640 | 641 | chunked(txs_18_args::table, &args, |chunk| { 642 | diesel::insert_into(txs_18_args::table) 643 | .values(chunk) 644 | .execute(self.conn) 645 | }) 646 | .map_err(build_err_fn("Cannot insert Ethereum InvokeScript args"))?; 647 | 648 | chunked(txs_18_payment::table, &payments, |chunk| { 649 | diesel::insert_into(txs_18_payment::table) 650 | .values(chunk) 651 | .execute(self.conn) 652 | }) 653 | .map_err(build_err_fn("Cannot insert Ethereum InvokeScript payments")) 654 | } 655 | 656 | // 657 | // CANDLES 658 | // 659 | 660 | fn calculate_candles_since_block_uid(&mut self, block_uid: i64) -> Result<()> { 661 | let first_tx7_in_block_ts = match txs_7::table 662 | .select(txs_7::time_stamp) 663 | .filter(txs_7::block_uid.eq(block_uid)) 664 | .order(txs_7::time_stamp.asc()) 665 | .first::(self.conn) 666 | .optional() 667 | .map_err(build_err_fn("Cannot find exchange txs"))? 668 | { 669 | Some(ts) => ts 670 | .with_second(0) 671 | .and_then(|ts| ts.with_nanosecond(0)) 672 | .unwrap(), 673 | None => return Ok(()), 674 | }; 675 | 676 | self.calculate_minute_candles(first_tx7_in_block_ts)?; 677 | self.calculate_non_minute_candles(first_tx7_in_block_ts) 678 | } 679 | 680 | fn calculate_minute_candles(&mut self, since_timestamp: NaiveDateTime) -> Result<()> { 681 | let insert_candles_query = r#" 682 | INSERT INTO candles 683 | SELECT 684 | e.candle_time, 685 | amount_asset_id, 686 | price_asset_id, 687 | min(e.price) AS low, 688 | max(e.price) AS high, 689 | sum(e.amount) AS volume, 690 | sum((e.amount)::numeric * (e.price)::numeric) AS quote_volume, 691 | max(height) AS max_height, 692 | count(e.price) AS txs_count, 693 | floor(sum((e.amount)::numeric * (e.price)::numeric) / sum((e.amount)::numeric))::numeric 694 | AS weighted_average_price, 695 | (array_agg(e.price ORDER BY e.uid)::numeric[])[1] AS open, 696 | (array_agg(e.price ORDER BY e.uid DESC)::numeric[])[1] AS close, 697 | '1m' AS interval, 698 | e.sender AS matcher_address 699 | FROM 700 | (SELECT 701 | date_trunc('minute', time_stamp) AS candle_time, 702 | uid, 703 | amount_asset_id, 704 | price_asset_id, 705 | sender, 706 | height, 707 | amount, 708 | CASE WHEN tx_version > 2 709 | THEN price::numeric 710 | * 10^get_decimals_or_exception(price_asset_id) 711 | * 10^(-get_decimals_or_exception(amount_asset_id)) 712 | ELSE price::numeric 713 | END price 714 | FROM txs_7 715 | WHERE time_stamp >= $1 ORDER BY uid, time_stamp <-> $1) AS e 716 | GROUP BY 717 | e.candle_time, 718 | e.amount_asset_id, 719 | e.price_asset_id, 720 | e.sender 721 | ON CONFLICT (time_start, amount_asset_id, price_asset_id, matcher_address, interval) DO UPDATE 722 | SET open = excluded.open, 723 | close = excluded.close, 724 | low = excluded.low, 725 | high = excluded.high, 726 | max_height = excluded.max_height, 727 | quote_volume = excluded.quote_volume, 728 | txs_count = excluded.txs_count, 729 | volume = excluded.volume, 730 | weighted_average_price = excluded.weighted_average_price; 731 | "#; 732 | 733 | sql_query(insert_candles_query) 734 | .bind::(since_timestamp) 735 | .execute(self.conn) 736 | .map(drop) 737 | .map_err(build_err_fn("Cannot calculate minute candles")) 738 | } 739 | 740 | fn calculate_non_minute_candles(&mut self, since_timestamp: NaiveDateTime) -> Result<()> { 741 | let insert_candles_query = r#" 742 | INSERT INTO candles 743 | SELECT 744 | _to_raw_timestamp(time_start, $2) AS candle_time, 745 | amount_asset_id, 746 | price_asset_id, 747 | min(low) AS low, 748 | max(high) AS high, 749 | sum(volume) AS volume, 750 | sum(quote_volume) AS quote_volume, 751 | max(max_height) AS max_height, 752 | sum(txs_count) as txs_count, 753 | floor(sum((weighted_average_price * volume)::numeric)::numeric / sum(volume)::numeric)::numeric 754 | AS weighted_average_price, 755 | (array_agg(open ORDER BY time_start)::numeric[])[1] AS open, 756 | (array_agg(close ORDER BY time_start DESC)::numeric[])[1] AS close, 757 | $2 AS interval, 758 | matcher_address 759 | FROM candles 760 | WHERE interval = $1 761 | AND time_start >= $3 762 | GROUP BY candle_time, amount_asset_id, price_asset_id, matcher_address 763 | 764 | ON CONFLICT (time_start, amount_asset_id, price_asset_id, matcher_address, interval) DO UPDATE 765 | SET open = excluded.open, 766 | close = excluded.close, 767 | low = excluded.low, 768 | high = excluded.high, 769 | max_height = excluded.max_height, 770 | quote_volume = excluded.quote_volume, 771 | txs_count = excluded.txs_count, 772 | volume = excluded.volume, 773 | weighted_average_price = excluded.weighted_average_price; 774 | "#; 775 | 776 | for interval in CANDLE_INTERVALS { 777 | let [interval_start, interval_end] = interval; 778 | 779 | let interval_start_time_stamp = 780 | if let Some(interval_secs) = interval_in_seconds(&interval_end) { 781 | NaiveDateTime::from_timestamp_opt( 782 | (since_timestamp.timestamp() / interval_secs) * interval_secs, 783 | 0, 784 | ) 785 | .unwrap() 786 | } else { 787 | match *interval_end { 788 | intervals::WEEK1 => { 789 | let weekday = since_timestamp.weekday().num_days_from_monday() as i64; 790 | (since_timestamp - Duration::days(weekday)) 791 | .date() 792 | .and_hms_opt(0, 0, 0) 793 | .unwrap() 794 | } 795 | intervals::MONTH1 => since_timestamp 796 | .with_day(1) 797 | .unwrap() 798 | .date() 799 | .and_hms_opt(0, 0, 0) 800 | .unwrap(), 801 | _ => bail!("unknown interval {interval_end}"), 802 | } 803 | }; 804 | 805 | sql_query(insert_candles_query) 806 | .bind::(interval_start) 807 | .bind::(interval_end) 808 | .bind::(interval_start_time_stamp) 809 | .execute(self.conn) 810 | .map_err(build_err_fn(format!( 811 | "Cannot insert candles with [{interval_start}; {interval_end}] interval" 812 | )))?; 813 | } 814 | Ok(()) 815 | } 816 | 817 | fn rollback_candles(&mut self, block_uid: i64) -> Result<()> { 818 | let first_tx7_in_block_ts = match txs_7::table 819 | .select(txs_7::time_stamp) 820 | .filter(txs_7::block_uid.eq(block_uid)) 821 | .order(txs_7::time_stamp.asc()) 822 | .first::(self.conn) 823 | .optional() 824 | .map_err(build_err_fn("Cannot find exchange txs in rollback"))? 825 | { 826 | Some(ts) => ts 827 | .with_second(0) 828 | .and_then(|ts| ts.with_nanosecond(0)) 829 | .unwrap(), 830 | None => return Ok(()), 831 | }; 832 | 833 | diesel::delete(candles::table) 834 | .filter(candles::time_start.gt(first_tx7_in_block_ts)) 835 | .execute(self.conn) 836 | .map(drop) 837 | .map_err(build_err_fn("Cannot rollback candles")) 838 | } 839 | } 840 | 841 | fn chunked_with_result( 842 | _: T, 843 | values: &Vec, 844 | mut query_fn: F, 845 | ) -> Result, DslError> 846 | where 847 | T: Table, 848 | T::AllColumns: TupleLen, 849 | F: FnMut(&[V]) -> Result, DslError>, 850 | { 851 | let columns_count = T::all_columns().len(); 852 | let chunk_size = (PG_MAX_INSERT_FIELDS_COUNT / columns_count) / 10 * 10; 853 | let mut result = vec![]; 854 | values 855 | .chunks(chunk_size) 856 | .into_iter() 857 | .try_fold((), |_, chunk| { 858 | result.extend(query_fn(chunk)?); 859 | Ok::<_, DslError>(()) 860 | })?; 861 | Ok(result) 862 | } 863 | 864 | #[inline] 865 | fn chunked(table: T, values: &Vec, mut query_fn: F) -> Result<(), DslError> 866 | where 867 | T: Table, 868 | T::AllColumns: TupleLen, 869 | F: FnMut(&[V]) -> Result, //allows only dsl_query.execute() 870 | { 871 | chunked_with_result(table, values, |v| query_fn(v).map(|_| Vec::<()>::new())).map(drop) 872 | } 873 | 874 | fn build_err_fn(msg: impl AsRef) -> impl Fn(DslError) -> Error { 875 | move |err| { 876 | let ctx = format!("{}", msg.as_ref()); 877 | Error::new(AppError::DbDieselError(err)).context(ctx) 878 | } 879 | } 880 | --------------------------------------------------------------------------------