├── config
├── pgpass
├── pgpass-test
├── mainnet-config.yaml
├── allegra-config.yaml
├── testnet-config.yaml
├── launchpad-config.yaml
└── shelley-qa-config.yaml
├── nix
├── nixos
│ ├── module-list.nix
│ ├── default.nix
│ ├── tests
│ │ ├── default.nix
│ │ └── smash-test.nix
│ └── smash-service.nix
├── util.nix
├── stack-shell.nix
├── regenerate.sh
├── pkgs.nix
├── scripts.nix
├── default.nix
├── sources.json
├── haskell.nix
└── sources.nix
├── smash
├── Setup.hs
├── test
│ ├── Spec.hs
│ ├── DBSpec.hs
│ └── MigrationSpec.hs
├── src
│ └── Cardano
│ │ └── SMASH
│ │ ├── DBSync
│ │ ├── Db
│ │ │ ├── Delete.hs
│ │ │ ├── Migration
│ │ │ │ ├── Version.hs
│ │ │ │ └── Haskell.hs
│ │ │ ├── PGConfig.hs
│ │ │ ├── Insert.hs
│ │ │ ├── Schema.hs
│ │ │ ├── Run.hs
│ │ │ ├── Database.hs
│ │ │ └── Migration.hs
│ │ └── Metrics.hs
│ │ ├── FetchQueue.hs
│ │ └── HttpClient.hs
├── smash.cabal
└── LICENSE
├── smash-servant-types
├── Setup.hs
├── smash-servant-types.cabal
├── src
│ └── Cardano
│ │ └── SMASH
│ │ ├── DBSync
│ │ └── Db
│ │ │ ├── Error.hs
│ │ │ └── Types.hs
│ │ └── API.hs
└── LICENSE
├── cabal.project.local
├── .gitignore
├── doc
├── .sphinx
│ ├── cardano-logo.png
│ ├── _templates
│ │ └── layout.html
│ └── requirements.txt
├── index.rst
├── conf.py
└── getting-started
│ └── how-to-run-smash.md
├── test_pool.json
├── scripts
├── nix
│ └── stack-shell.nix
├── buildkite
│ ├── default.nix
│ ├── stack-cabal-sync.sh
│ └── rebuild.hs
└── postgresql-setup.sh
├── bors.toml
├── schema
├── force-resync.sql
├── migration-2-0007-20210114.sql
├── migration-2-0008-20210215.sql
├── migration-1-0000-20200610.sql
├── migration-2-0004-20201006.sql
├── migration-2-0006-20210108.sql
├── migration-2-0005-20201203.sql
├── migration-1-0001-20200611.sql
├── migration-2-0003-20201001.sql
├── migration-2-0002-20200904.sql
├── migration-2-0009-20210818.sql
└── migration-2-0001-20200810.sql
├── .readthedocs.yml
├── .buildkite
└── pipeline.yml
├── README.rst
├── shell.nix
├── default.nix
├── release.nix
├── ChangeLog.md
├── cabal.project
└── stack.yaml
/config/pgpass:
--------------------------------------------------------------------------------
1 | /var/run/postgresql:5432:smash:*:*
2 |
--------------------------------------------------------------------------------
/config/pgpass-test:
--------------------------------------------------------------------------------
1 | /var/run/postgresql:5432:smash-test:*:*
2 |
--------------------------------------------------------------------------------
/nix/nixos/module-list.nix:
--------------------------------------------------------------------------------
1 | [
2 | ./smash-service.nix
3 | ]
4 |
--------------------------------------------------------------------------------
/smash/Setup.hs:
--------------------------------------------------------------------------------
1 | import Distribution.Simple
2 | main = defaultMain
3 |
--------------------------------------------------------------------------------
/nix/nixos/default.nix:
--------------------------------------------------------------------------------
1 |
2 | {
3 | imports = import ./module-list.nix;
4 | }
5 |
--------------------------------------------------------------------------------
/smash-servant-types/Setup.hs:
--------------------------------------------------------------------------------
1 | import Distribution.Simple
2 | main = defaultMain
3 |
--------------------------------------------------------------------------------
/cabal.project.local:
--------------------------------------------------------------------------------
1 | package cardano-crypto-praos
2 | flags: -external-libsodium-vrf
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .stack-work/
2 | dist-newstyle/
3 | *~
4 | tags
5 | stack.yaml.lock
6 | result*
7 |
--------------------------------------------------------------------------------
/doc/.sphinx/cardano-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/smash/HEAD/doc/.sphinx/cardano-logo.png
--------------------------------------------------------------------------------
/doc/.sphinx/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
7 | {% endblock %}
--------------------------------------------------------------------------------
/test_pool.json:
--------------------------------------------------------------------------------
1 | {"name": "test", "description": "This is a test pool", "ticker": "testy", "homepage": "https://github.com/input-output-hk/cardano-db-sync/tree/master/cardano-db/src/Cardano/Db"}
2 |
--------------------------------------------------------------------------------
/scripts/nix/stack-shell.nix:
--------------------------------------------------------------------------------
1 |
2 | with import ./. {};
3 |
4 | haskell.lib.buildStackProject {
5 | name = "stack-env";
6 | buildInputs = with pkgs; [ zlib openssl git ];
7 | ghc = (import ../shell.nix {inherit pkgs;}).ghc;
8 | }
9 |
--------------------------------------------------------------------------------
/nix/util.nix:
--------------------------------------------------------------------------------
1 | { haskell-nix }:
2 |
3 | with haskell-nix.haskellLib;
4 | {
5 | inherit
6 | selectProjectPackages
7 | collectComponents';
8 |
9 | inherit (extra)
10 | recRecurseIntoAttrs
11 | collectChecks;
12 | }
13 |
--------------------------------------------------------------------------------
/bors.toml:
--------------------------------------------------------------------------------
1 | status = [
2 | "buildkite/smash",
3 | "ci/hydra-eval",
4 | "ci/hydra-build:required",
5 | ]
6 | timeout_sec = 7200
7 | required_approvals = 1
8 | block_labels = [ "WIP", "DO NOT MERGE" ]
9 | delete_merged_branches = true
10 |
--------------------------------------------------------------------------------
/nix/stack-shell.nix:
--------------------------------------------------------------------------------
1 | with import ./. {};
2 |
3 | haskell.lib.buildStackProject {
4 | name = "stack-env";
5 | buildInputs = with pkgs; [ zlib openssl gmp libffi git systemd haskellPackages.happy perl ];
6 | ghc = (import ../shell.nix {inherit pkgs;}).ghc;
7 | }
8 |
--------------------------------------------------------------------------------
/schema/force-resync.sql:
--------------------------------------------------------------------------------
1 | -- Clear block information, force re-sync.
2 | TRUNCATE pool_metadata;
3 | TRUNCATE pool_metadata_reference CASCADE;
4 | TRUNCATE pool;
5 | TRUNCATE retired_pool;
6 | TRUNCATE pool_metadata_fetch_error CASCADE;
7 | TRUNCATE block;
8 | TRUNCATE meta;
9 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 |
2 | version: 2
3 |
4 | sphinx:
5 | configuration: doc/conf.py
6 |
7 | # Optionally set the version of Python and requirements required to build your docs
8 | python:
9 | version: 3.7
10 | install:
11 | - requirements: doc/.sphinx/requirements.txt
--------------------------------------------------------------------------------
/smash/test/Spec.hs:
--------------------------------------------------------------------------------
1 | module Main where
2 |
3 | import Cardano.Prelude
4 |
5 | import Test.Hspec (hspec)
6 |
7 | -- | Entry point for tests.
8 | main :: IO ()
9 | main = hspec $ pure ()
10 | -- describe "SMASH tests" smashSpec
11 | -- describe "SMASH state machine tests" smashSpecSM
12 |
13 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | #####
2 | SMASH
3 | #####
4 |
5 | .. include:: ../README.rst
6 |
7 | .. toctree::
8 | :titlesonly:
9 | :hidden:
10 |
11 | Go Back to Cardano Documentation
12 |
13 | .. toctree::
14 | :maxdepth: 1
15 | :caption: Getting Started
16 | :titlesonly:
17 | :hidden:
18 |
19 | getting-started/how-to-install-smash
20 | getting-started/how-to-run-smash
21 |
--------------------------------------------------------------------------------
/.buildkite/pipeline.yml:
--------------------------------------------------------------------------------
1 | steps:
2 |
3 | - label: 'stack-cabal-sync'
4 | command: 'nix-shell ./nix -A iohkNix.stack-cabal-sync-shell --run scripts/buildkite/stack-cabal-sync.sh'
5 | agents:
6 | system: x86_64-linux
7 |
8 | - label: 'check-cabal-project'
9 | command: 'nix-build ./nix -A iohkNix.checkCabalProject -o check-cabal-project.sh && ./check-cabal-project.sh'
10 | agents:
11 | system: x86_64-linux
12 |
--------------------------------------------------------------------------------
/nix/regenerate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 | cd $(git rev-parse --show-toplevel)
4 |
5 | exec $(nix-build `dirname $0`/. -A iohkNix.cabalProjectRegenerate --no-out-link --option substituters "https://hydra.iohk.io https://cache.nixos.org" --option trusted-substituters "" --option trusted-public-keys "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=")/bin/cabal-project-regenerate
6 |
--------------------------------------------------------------------------------
/nix/nixos/tests/default.nix:
--------------------------------------------------------------------------------
1 | { pkgs
2 | , supportedSystems ? [ "x86_64-linux" ]
3 | }:
4 |
5 | with pkgs;
6 | with pkgs.commonLib;
7 |
8 | let
9 | forAllSystems = genAttrs supportedSystems;
10 | importTest = fn: args: system: let
11 | imported = import fn;
12 | test = import (pkgs.path + "/nixos/tests/make-test-python.nix") imported;
13 | in test ({
14 | inherit pkgs system config;
15 | } // args);
16 | callTest = fn: args: forAllSystems (system: hydraJob (importTest fn args system));
17 | in rec {
18 |
19 | smashTest = callTest ./smash-test.nix {};
20 | }
21 |
--------------------------------------------------------------------------------
/schema/migration-2-0007-20210114.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 7 THEN
9 | ALTER TABLE "retired_pool" ADD COLUMN "block_no" uinteger NOT NULL;
10 | -- Hand written SQL statements can be added here.
11 | UPDATE schema_version SET stage_two = 7 ;
12 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
13 | END IF ;
14 | END ;
15 | $$ LANGUAGE plpgsql ;
16 |
17 | SELECT migrate() ;
18 |
19 | DROP FUNCTION migrate() ;
20 |
--------------------------------------------------------------------------------
/schema/migration-2-0008-20210215.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 8 THEN
9 | ALTER TABLE "meta" DROP COLUMN "protocol_const";
10 | ALTER TABLE "meta" DROP COLUMN "slot_duration";
11 | ALTER TABLE "meta" DROP COLUMN "slots_per_epoch";
12 | -- Hand written SQL statements can be added here.
13 | UPDATE schema_version SET stage_two = 8 ;
14 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
15 | END IF ;
16 | END ;
17 | $$ LANGUAGE plpgsql ;
18 |
19 | SELECT migrate() ;
20 |
21 | DROP FUNCTION migrate() ;
22 |
--------------------------------------------------------------------------------
/schema/migration-1-0000-20200610.sql:
--------------------------------------------------------------------------------
1 | -- Hand written migration that creates a 'schema_version' table and initializes it.
2 |
3 | CREATE FUNCTION init() RETURNS void AS $$
4 |
5 | DECLARE
6 | emptyDB boolean;
7 |
8 | BEGIN
9 | SELECT NOT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name='schema_version') INTO emptyDB;
10 | IF emptyDB THEN
11 | CREATE TABLE "schema_version" (id SERIAL PRIMARY KEY UNIQUE, stage_one INT8 NOT NULL, stage_two INT8 NOT NULL, stage_three INT8 NOT NULL);
12 | INSERT INTO "schema_version" (stage_one, stage_two, stage_three) VALUES (0, 0, 0);
13 |
14 | RAISE NOTICE 'DB has been initialized';
15 | END IF;
16 | END;
17 |
18 | $$ LANGUAGE plpgsql;
19 |
20 | SELECT init();
21 |
22 | DROP FUNCTION init();
23 |
--------------------------------------------------------------------------------
/schema/migration-2-0004-20201006.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 4 THEN
9 | CREATe TABLE "retired_pool"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" text NOT NULL);
10 | ALTER TABLE "retired_pool" ADD CONSTRAINT "unique_retired_pool_id" UNIQUE("pool_id");
11 | -- Hand written SQL statements can be added here.
12 | UPDATE schema_version SET stage_two = 4 ;
13 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
14 | END IF ;
15 | END ;
16 | $$ LANGUAGE plpgsql ;
17 |
18 | SELECT migrate() ;
19 |
20 | DROP FUNCTION migrate() ;
21 |
--------------------------------------------------------------------------------
/scripts/buildkite/default.nix:
--------------------------------------------------------------------------------
1 | { system ? builtins.currentSystem
2 | , config ? {}
3 | , pkgs ? import ../../nix { inherit system config; }
4 | , buildTools ? with pkgs; [ git nix gnumake ]
5 | }:
6 |
7 | with pkgs.lib;
8 | with pkgs;
9 |
10 | let
11 | stack-hpc-coveralls = iohkNix.stack-hpc-coveralls;
12 | stackRebuild = runCommand "stack-rebuild" {} ''
13 | ${haskellPackages.ghcWithPackages (ps: [ps.turtle ps.safe ps.transformers])}/bin/ghc -o $out ${./rebuild.hs}
14 | '';
15 |
16 | buildTools =
17 | [ git gzip nix gnumake stack gnused gnutar coreutils stack-hpc-coveralls systemd ];
18 |
19 | in
20 | writeScript "stack-rebuild-wrapped" ''
21 | #!${stdenv.shell}
22 | export PATH=${lib.makeBinPath buildTools}
23 | exec ${stackRebuild} "$@"
24 | ''
25 |
--------------------------------------------------------------------------------
/smash/test/DBSpec.hs:
--------------------------------------------------------------------------------
1 | module Main where
2 |
3 | import Cardano.Prelude
4 |
5 | import System.Directory (getCurrentDirectory)
6 | import System.Environment (lookupEnv, setEnv)
7 | import System.FilePath ((>))
8 |
9 | import Test.Hspec (describe, hspec)
10 |
11 | import MigrationSpec (migrationSpec)
12 |
13 | -- | Entry point for tests.
14 | main :: IO ()
15 | main = do
16 |
17 | -- If the env is not set, set it to default.
18 | mPgPassFile <- lookupEnv "SMASHPGPASSFILE"
19 | when (isNothing mPgPassFile) $ do
20 | currentDir <- getCurrentDirectory
21 | setEnv "SMASHPGPASSFILE" (currentDir > "../config/pgpass-test")
22 |
23 | hspec $ do
24 | describe "Migration tests" migrationSpec
25 |
26 |
--------------------------------------------------------------------------------
/schema/migration-2-0006-20210108.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 6 THEN
9 | -- Fix the pool table.
10 | DROP TABLE "pool";
11 | CREATe TABLE "pool"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" text NOT NULL);
12 | ALTER TABLE "pool" ADD CONSTRAINT "unique_pool_id" UNIQUE("pool_id");
13 | -- Hand written SQL statements can be added here.
14 | UPDATE schema_version SET stage_two = 6 ;
15 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
16 | END IF ;
17 | END ;
18 | $$ LANGUAGE plpgsql ;
19 |
20 | SELECT migrate() ;
21 |
22 | DROP FUNCTION migrate() ;
23 |
--------------------------------------------------------------------------------
/doc/.sphinx/requirements.txt:
--------------------------------------------------------------------------------
1 | Sphinx==3.1.1
2 | sphinx-intl==2.0.1
3 | transifex-client==0.13.10
4 | testresources==2.0.1
5 | -e git+https://github.com/input-output-hk/sphinx_rtd_theme.git#egg=sphinx_rtd_theme
6 | recommonmark==0.6
7 | ## The following requirements were added by pip freeze:
8 | alabaster==0.7.12
9 | Babel==2.8.0
10 | certifi==2020.4.5.2
11 | chardet==3.0.4
12 | click==7.1.2
13 | sphinxcontrib-mermaid==0.4.0
14 | sphinxemoji==0.1.6
15 | sphinx_markdown_tables==0.0.15
16 | CommonMark==0.9.1
17 | docutils==0.16
18 | future==0.18.2
19 | idna==2.9
20 | imagesize==1.2.0
21 | Jinja2==2.11.3
22 | jsonpointer==2.0
23 | jsonref==0.2
24 | MarkupSafe==1.1.1
25 | Pygments==2.7.4
26 | pytz==2020.1
27 | requests==2.24.0
28 | six==1.15.0
29 | snowballstemmer==2.0.0
30 | sphinxcontrib-websupport==1.2.2
31 | urllib3==1.26.5
32 |
--------------------------------------------------------------------------------
/schema/migration-2-0005-20201203.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 5 THEN
9 | ALTER TABLE "pool_metadata_fetch_error" DROP CONSTRAINT "unique_pool_metadata_fetch_error";
10 | ALTER TABLE "pool_metadata_fetch_error" ADD CONSTRAINT "unique_pool_metadata_fetch_error" UNIQUE("fetch_time","pool_id","pool_hash","retry_count");
11 | -- Hand written SQL statements can be added here.
12 | UPDATE schema_version SET stage_two = 5 ;
13 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
14 | END IF ;
15 | END ;
16 | $$ LANGUAGE plpgsql ;
17 |
18 | SELECT migrate() ;
19 |
20 | DROP FUNCTION migrate() ;
21 |
--------------------------------------------------------------------------------
/scripts/buildkite/stack-cabal-sync.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eu
3 |
4 | # This script checks that the `stack.yaml` and `cabal.project` files have
5 | # consistent git hashes for the packages they depend on. We use
6 | # `cardano-repo-tool`'s `update-cabal-project` command which modifies
7 | # `cabal.project` to be consistent with `stack.yaml`s versions. If the
8 | # diff is non-empty, we know they're out of sync.
9 |
10 | # Check that functions are defined.
11 | HELP_TEXT="cardano-repo-tool not found."
12 | type cardano-repo-tool > /dev/null 2>&1 || { echo "${HELP_TEXT}"; exit 1; }
13 | HELP_TEXT="git not found."
14 | type git > /dev/null 2>&1 || { echo "${HELP_TEXT}"; exit 1; }
15 |
16 | # Update `cabal.project` from the `stack.yaml` file.
17 | cardano-repo-tool update-cabal-project
18 |
19 | git diff cabal.project | tee stack-cabal.patch
20 |
21 | if test "$(wc -l < stack-cabal.patch)" -gt 0 ; then
22 | buildkite-agent artifact upload stack-cabal.patch --job "$BUILDKITE_JOB_ID"
23 | exit 1
24 | fi
25 |
26 | exit 0
27 |
--------------------------------------------------------------------------------
/nix/pkgs.nix:
--------------------------------------------------------------------------------
1 | # our packages overlay
2 | pkgs: _: with pkgs;
3 | let
4 | compiler = config.haskellNix.compiler or "ghc8105";
5 | src = haskell-nix.haskellLib.cleanGit {
6 | name = "smash-src";
7 | src = ../.;
8 | };
9 | projectPackagesNames = lib.attrNames (haskell-nix.haskellLib.selectProjectPackages
10 | (haskell-nix.cabalProject { inherit src; compiler-nix-name = compiler; }));
11 | in {
12 |
13 | inherit projectPackagesNames;
14 |
15 |
16 | smashHaskellPackages = callPackage ./haskell.nix {
17 | inherit compiler src projectPackagesNames;
18 | };
19 |
20 | smashTestingHaskellPackages = callPackage ./haskell.nix {
21 | inherit compiler src projectPackagesNames;
22 | flags = [ "testing-mode" ];
23 | };
24 |
25 | # Grab the executable component of our package.
26 | inherit (smashHaskellPackages.smash.components.exes)
27 | smash-exe;
28 |
29 | inherit (smashHaskellPackages.cardano-node.components.exes)
30 | cardano-node;
31 |
32 | smash-exe-testing = smashTestingHaskellPackages.smash.components.exes.smash-exe;
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/nix/scripts.nix:
--------------------------------------------------------------------------------
1 | { pkgs, lib, iohkNix, customConfig }:
2 | let
3 | blacklistedEnvs = [ "selfnode" "shelley_selfnode" "latency-tests" "mainnet-ci" ];
4 | environments = lib.filterAttrs (k: v: (!builtins.elem k blacklistedEnvs)) iohkNix.cardanoLib.environments;
5 | mkStartScripts = envConfig: let
6 | systemdCompat.options = {
7 | systemd.services = lib.mkOption {};
8 | services.postgresql = lib.mkOption {};
9 | users = lib.mkOption {};
10 | environment = lib.mkOption {};
11 | };
12 | eval = let
13 | extra = {
14 | internal.smashPackages = pkgs;
15 | services.smash = {
16 | enable = true;
17 | environment = envConfig;
18 | environmentName = envConfig.name;
19 | };
20 | };
21 | in lib.evalModules {
22 | prefix = [];
23 | modules = import nixos/module-list.nix ++ [ systemdCompat customConfig extra ];
24 | args = { inherit pkgs; };
25 | };
26 | in {
27 | smash = eval.config.services.smash.script;
28 | };
29 | in iohkNix.cardanoLib.forEnvironmentsCustom mkStartScripts environments // { inherit environments; }
30 |
--------------------------------------------------------------------------------
/schema/migration-1-0001-20200611.sql:
--------------------------------------------------------------------------------
1 | -- Hand written migration to create the custom types with 'DOMAIN' statements.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 |
5 | DECLARE
6 | next_version int;
7 |
8 | BEGIN
9 | SELECT stage_one + 1 INTO next_version FROM "schema_version";
10 | IF next_version = 1 THEN
11 | CREATE DOMAIN lovelace AS bigint CHECK (VALUE >= 0 AND VALUE <= 45000000000000000);
12 | CREATE DOMAIN txindex AS smallint CHECK (VALUE >= 0 AND VALUE < 1024);
13 | CREATE DOMAIN uinteger AS integer CHECK (VALUE >= 0);
14 |
15 | -- Base16 encoded values use a 64 byte hash.
16 | CREATE DOMAIN base16type AS bytea CHECK (octet_length (VALUE) = 64);
17 |
18 | -- Blocks, transactions and merkel roots use a 32 byte hash.
19 | CREATE DOMAIN hash32type AS bytea CHECK (octet_length (VALUE) = 32);
20 |
21 | -- Addresses use a 28 byte hash (as do StakeholdIds).
22 | CREATE DOMAIN hash28type AS bytea CHECK (octet_length (VALUE) = 28);
23 |
24 | UPDATE "schema_version" SET stage_one = 1;
25 | RAISE NOTICE 'DB has been migrated to stage_one version %', next_version;
26 | END IF;
27 | END;
28 |
29 | $$ LANGUAGE plpgsql;
30 |
31 | SELECT migrate();
32 |
33 | DROP FUNCTION migrate();
34 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. raw:: html
2 |
3 |
4 |
5 |
6 |
7 |
8 | Deprecated Note:
9 |
10 | ⚠️ This project is deprecated, and only supports up to cardano-node 1.30.1, for newer versions of cardano-node use the SMASH server in https://github.com/input-output-hk/cardano-db-sync. Do not use anymore, it is here for historical purpose.
11 |
12 | *************************
13 | ``smash`` Overview
14 | *************************
15 |
16 | This repository contains the source code for the Cardano Stakepool Metadata Aggregation Server (SMASH).
17 | The purpose of SMASH is to aggregate common metadata about stakepools that are registered
18 | on the Cardano blockchain, including the name of the stakepool, its "ticker" name etc.
19 | This metadata can be curated and provided as a service to delegators, stake pool operators,
20 | exchanges etc., enabling independent validation and/or disambiguation of stakepool "ticker" names, for example.
21 |
22 |
--------------------------------------------------------------------------------
/nix/default.nix:
--------------------------------------------------------------------------------
1 | { system ? builtins.currentSystem
2 | , crossSystem ? null
3 | , config ? {}
4 | , sourcesOverride ? {}
5 | }:
6 | let
7 | sources = import ./sources.nix { inherit pkgs; }
8 | // sourcesOverride;
9 | iohkNix = import sources.iohk-nix {};
10 | haskellNix = import sources."haskell.nix" {};
11 | nixpkgs = haskellNix.sources.nixpkgs-2105;
12 |
13 | # for inclusion in pkgs:
14 | overlays =
15 | # Haskell.nix (https://github.com/input-output-hk/haskell.nix)
16 | haskellNix.overlays
17 | # haskell-nix.haskellLib.extra: some useful extra utility functions for haskell.nix
18 | ++ iohkNix.overlays.haskell-nix-extra
19 | # add libsodium:
20 | ++ iohkNix.overlays.crypto
21 | # iohkNix: nix utilities and niv:
22 | ++ iohkNix.overlays.iohkNix
23 | # our own overlays:
24 | ++ [
25 | (pkgs: _: with pkgs; {
26 |
27 | # commonLib: mix pkgs.lib with iohk-nix utils and our own:
28 | commonLib = lib // iohkNix
29 | // import ./util.nix { inherit haskell-nix; }
30 | # also expose our sources and overlays
31 | // { inherit overlays sources; };
32 | })
33 | # And, of course, our haskell-nix-ified cabal project:
34 | (import ./pkgs.nix)
35 | ];
36 |
37 | pkgs = import nixpkgs {
38 | inherit system crossSystem overlays;
39 | config = haskellNix.config // config;
40 | };
41 |
42 | in pkgs
43 |
--------------------------------------------------------------------------------
/shell.nix:
--------------------------------------------------------------------------------
1 | # This file is used by nix-shell.
2 | { config ? {}
3 | , sourcesOverride ? {}
4 | , withHoogle ? true
5 | , pkgs ? import ./nix {
6 | inherit config sourcesOverride;
7 | }
8 | }:
9 | with pkgs;
10 | let
11 | # This provides a development environment that can be used with nix-shell or
12 | # lorri. See https://input-output-hk.github.io/haskell.nix/user-guide/development/
13 | shell = smashHaskellPackages.shellFor {
14 | name = "cabal-dev-shell";
15 |
16 | packages = ps: lib.attrValues (lib.getAttrs projectPackagesNames ps);
17 |
18 | # These programs will be available inside the nix-shell.
19 | buildInputs = with haskellPackages; [
20 | cabal-install
21 | ghcid
22 | hlint
23 | weeder
24 | nix
25 | niv
26 | pkgconfig
27 | sqlite-interactive
28 | tmux
29 | pkgs.git
30 | ];
31 |
32 | # Prevents cabal from choosing alternate plans, so that
33 | # *all* dependencies are provided by Nix.
34 | exactDeps = true;
35 |
36 | inherit withHoogle;
37 | };
38 |
39 | devops = pkgs.stdenv.mkDerivation {
40 | name = "devops-shell";
41 | buildInputs = [
42 | niv
43 | ];
44 | shellHook = ''
45 | echo "DevOps Tools" \
46 | | ${figlet}/bin/figlet -f banner -c \
47 | | ${lolcat}/bin/lolcat
48 |
49 | echo "NOTE: you may need to export GITHUB_TOKEN if you hit rate limits with niv"
50 | echo "Commands:
51 | * niv update - update package
52 |
53 | "
54 | '';
55 | };
56 |
57 | in
58 |
59 | shell // { inherit devops; }
60 |
--------------------------------------------------------------------------------
/schema/migration-2-0003-20201001.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 3 THEN
9 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "pool_id" TYPE text;
10 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "url" TYPE text;
11 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "hash" TYPE text;
12 | ALTER TABLE "pool_metadata" ALTER COLUMN "pool_id" TYPE text;
13 | ALTER TABLE "pool_metadata" ALTER COLUMN "ticker_name" TYPE text;
14 | ALTER TABLE "pool_metadata" ALTER COLUMN "hash" TYPE text;
15 | ALTER TABLE "pool_metadata" ALTER COLUMN "metadata" TYPE text;
16 | ALTER TABLE "pool_metadata_fetch_error" ALTER COLUMN "pool_id" TYPE text;
17 | ALTER TABLE "pool_metadata_fetch_error" ALTER COLUMN "pool_hash" TYPE text;
18 | ALTER TABLE "delisted_pool" ALTER COLUMN "pool_id" TYPE text;
19 | ALTER TABLE "delisted_pool" ADD CONSTRAINT "unique_delisted_pool" UNIQUE("pool_id");
20 | ALTER TABLE "delisted_pool" DROP CONSTRAINT "unique_blacklisted_pool";
21 | ALTER TABLE "reserved_ticker" ALTER COLUMN "name" TYPE text;
22 | ALTER TABLE "reserved_ticker" ALTER COLUMN "pool_hash" TYPE text;
23 | -- Hand written SQL statements can be added here.
24 | UPDATE schema_version SET stage_two = 3 ;
25 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
26 | END IF ;
27 | END ;
28 | $$ LANGUAGE plpgsql ;
29 |
30 | SELECT migrate() ;
31 |
32 | DROP FUNCTION migrate() ;
33 |
--------------------------------------------------------------------------------
/schema/migration-2-0002-20200904.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 2 THEN
9 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "pool_id" TYPE text;
10 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "url" TYPE text;
11 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "hash" TYPE text;
12 | ALTER TABLE "pool_metadata" ALTER COLUMN "pool_id" TYPE text;
13 | ALTER TABLE "pool_metadata" ALTER COLUMN "ticker_name" TYPE text;
14 | ALTER TABLE "pool_metadata" ALTER COLUMN "hash" TYPE text;
15 | ALTER TABLE "pool_metadata" ALTER COLUMN "metadata" TYPE text;
16 | CREATe TABLE "pool_metadata_fetch_error"("id" SERIAL8 PRIMARY KEY UNIQUE,"fetch_time" timestamp NOT NULL,"pool_id" text NOT NULL,"pool_hash" text NOT NULL,"pmr_id" INT8 NOT NULL,"fetch_error" VARCHAR NOT NULL,"retry_count" uinteger NOT NULL);
17 | ALTER TABLE "pool_metadata_fetch_error" ADD CONSTRAINT "unique_pool_metadata_fetch_error" UNIQUE("fetch_time","pool_id");
18 | ALTER TABLE "pool_metadata_fetch_error" ADD CONSTRAINT "pool_metadata_fetch_error_pmr_id_fkey" FOREIGN KEY("pmr_id") REFERENCES "pool_metadata_reference"("id");
19 | ALTER TABLE "delisted_pool" ALTER COLUMN "pool_id" TYPE text;
20 | ALTER TABLE "reserved_ticker" ALTER COLUMN "name" TYPE text;
21 | ALTER TABLE "reserved_ticker" ALTER COLUMN "pool_hash" TYPE text;
22 | -- Hand written SQL statements can be added here.
23 | UPDATE schema_version SET stage_two = 2 ;
24 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
25 | END IF ;
26 | END ;
27 | $$ LANGUAGE plpgsql ;
28 |
29 | SELECT migrate() ;
30 |
31 | DROP FUNCTION migrate() ;
32 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Delete.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE FlexibleContexts #-}
2 | {-# LANGUAGE TypeFamilies #-}
3 |
4 | module Cardano.SMASH.DBSync.Db.Delete
5 | ( deleteDelistedPool
6 | , deleteRetiredPool
7 | , deleteAdminUser
8 | , deleteCascadeSlotNo
9 | ) where
10 |
11 | import Cardano.Prelude hiding (Meta)
12 |
13 | import Database.Persist.Sql (SqlBackend, delete,
14 | selectKeysList, (==.))
15 |
16 | import Cardano.SMASH.DBSync.Db.Schema
17 | import qualified Cardano.SMASH.DBSync.Db.Types as Types
18 |
19 | -- | Delete a delisted pool if it exists. Returns 'True' if it did exist and has been
20 | -- deleted and 'False' if it did not exist.
21 | deleteDelistedPool :: MonadIO m => Types.PoolId -> ReaderT SqlBackend m Bool
22 | deleteDelistedPool poolId = do
23 | keys <- selectKeysList [ DelistedPoolPoolId ==. poolId ] []
24 | mapM_ delete keys
25 | pure $ not (null keys)
26 |
27 | -- | Delete a retired pool if it exists. Returns 'True' if it did exist and has been
28 | -- deleted and 'False' if it did not exist.
29 | deleteRetiredPool :: MonadIO m => Types.PoolId -> ReaderT SqlBackend m Bool
30 | deleteRetiredPool poolId = do
31 | keys <- selectKeysList [ RetiredPoolPoolId ==. poolId ] []
32 | mapM_ delete keys
33 | pure $ not (null keys)
34 |
35 | deleteAdminUser :: MonadIO m => AdminUser -> ReaderT SqlBackend m Bool
36 | deleteAdminUser adminUser = do
37 | keys <- selectKeysList [ AdminUserUsername ==. adminUserUsername adminUser, AdminUserPassword ==. adminUserPassword adminUser ] []
38 | mapM_ delete keys
39 | pure $ not (null keys)
40 |
41 | deleteCascadeSlotNo :: MonadIO m => Word64 -> ReaderT SqlBackend m Bool
42 | deleteCascadeSlotNo slotNo = do
43 | keys <- selectKeysList [ BlockSlotNo ==. Just slotNo ] []
44 | mapM_ delete keys
45 | pure $ not (null keys)
--------------------------------------------------------------------------------
/smash-servant-types/smash-servant-types.cabal:
--------------------------------------------------------------------------------
1 | cabal-version: 1.12
2 | name: smash-servant-types
3 | version: 1.4.0
4 | description:
5 | Shared servant API types for SMASH
6 |
7 | homepage: https://github.com/input-output-hk/smash#readme
8 | bug-reports: https://github.com/input-output-hk/smash/issues
9 | author: IOHK
10 | maintainer: operations@iohk.io
11 | license: Apache-2.0
12 | license-file: LICENSE
13 | build-type: Simple
14 |
15 | source-repository head
16 | type: git
17 | location: https://github.com/input-output-hk/smash
18 |
19 | flag disable-basic-auth
20 | description: Disable basic authentication scheme for other authentication mechanisms.
21 | default: False
22 |
23 | flag testing-mode
24 | description: A flag for allowing operations that promote easy testing.
25 | default: False
26 |
27 | library
28 | if flag(disable-basic-auth)
29 | cpp-options: -DDISABLE_BASIC_AUTH
30 |
31 | if flag(testing-mode)
32 | cpp-options: -DTESTING_MODE
33 |
34 | exposed-modules:
35 | Cardano.SMASH.API
36 | Cardano.SMASH.Types
37 | Cardano.SMASH.DBSync.Db.Error
38 | Cardano.SMASH.DBSync.Db.Types
39 |
40 | hs-source-dirs: src
41 | build-depends:
42 | aeson
43 | , base >=4.7 && <5
44 | , bytestring
45 | , cardano-prelude
46 | , cardano-api
47 | , base16-bytestring
48 | , persistent
49 | , network-uri
50 | , servant
51 | , servant-server
52 | , servant-swagger
53 | , swagger2
54 | , text
55 | , time
56 | , quiet
57 | , wai
58 |
59 | default-language: Haskell2010
60 | default-extensions:
61 | NoImplicitPrelude
62 | OverloadedStrings
63 |
64 | ghc-options:
65 | -Wall -Wcompat -Wincomplete-record-updates
66 | -Wincomplete-uni-patterns -Wredundant-constraints -Wpartial-fields
67 | -fno-warn-orphans
68 |
69 |
--------------------------------------------------------------------------------
/nix/sources.json:
--------------------------------------------------------------------------------
1 | {
2 | "cardano-node": {
3 | "branch": "master",
4 | "description": "The core component that is used to participate in a Cardano decentralised blockchain.",
5 | "homepage": "https://cardano.org",
6 | "owner": "input-output-hk",
7 | "repo": "cardano-node",
8 | "rev": "07c59920321dcada28a38dffcb36aafe4625824f",
9 | "sha256": "1p3mmmw3lw6d740833m1i4fysbw59d8v7pg4a4nriclip2plp0f3",
10 | "type": "tarball",
11 | "url": "https://github.com/input-output-hk/cardano-node/archive/07c59920321dcada28a38dffcb36aafe4625824f.tar.gz",
12 | "url_template": "https://github.com///archive/.tar.gz"
13 | },
14 | "haskell.nix": {
15 | "branch": "master",
16 | "description": "Alternative Haskell Infrastructure for Nixpkgs",
17 | "homepage": "https://input-output-hk.github.io/haskell.nix",
18 | "owner": "input-output-hk",
19 | "repo": "haskell.nix",
20 | "rev": "8407c0fcdb7591d1a323f5ca39abc607f226d0dc",
21 | "sha256": "0jg1kv4adayg4irza1mjgxbhawk6ss7nzs0yl2p7c1knf5zzgifa",
22 | "type": "tarball",
23 | "url": "https://github.com/input-output-hk/haskell.nix/archive/8407c0fcdb7591d1a323f5ca39abc607f226d0dc.tar.gz",
24 | "url_template": "https://github.com///archive/.tar.gz"
25 | },
26 | "iohk-nix": {
27 | "branch": "master",
28 | "description": "nix scripts shared across projects",
29 | "homepage": null,
30 | "owner": "input-output-hk",
31 | "repo": "iohk-nix",
32 | "rev": "baf39a5a5e782c934eab58337284d4c59c2c57c8",
33 | "sha256": "1pf0nrrxf0kqdlghikgz6pas78sbcxzkvmma6rbg0df0hvyll5wn",
34 | "type": "tarball",
35 | "url": "https://github.com/input-output-hk/iohk-nix/archive/baf39a5a5e782c934eab58337284d4c59c2c57c8.tar.gz",
36 | "url_template": "https://github.com///archive/.tar.gz"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/scripts/buildkite/rebuild.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE LambdaCase #-}
2 | {-# LANGUAGE OverloadedStrings #-}
3 | {-# LANGUAGE ScopedTypeVariables #-}
4 |
5 | import Control.Exception
6 | import Control.Monad.Trans.Maybe
7 | import qualified Data.Text as T
8 | import Safe
9 | import System.Exit (exitWith)
10 | import Turtle
11 |
12 |
13 | -- | Run build and upload coverage information when successful
14 | main :: IO ()
15 | main = do
16 | buildResult <- buildStep
17 |
18 | when (buildResult == ExitSuccess) coverageUploadStep
19 |
20 | exitWith buildResult
21 |
22 |
23 | -- | Build and test all packages using stack
24 | buildStep :: IO ExitCode
25 | buildStep = do
26 | echo "+++ Build and test"
27 | run "stack" $ cfg ++ ["build", "--fast"] ++ buildArgs
28 | where
29 | cfg = ["--dump-logs", "--color", "always"]
30 | buildArgs =
31 | [ "--bench"
32 | , "--no-run-benchmarks"
33 | , "--test"
34 | , "--coverage"
35 | ]
36 |
37 | -- | Upload coverage information to coveralls
38 | coverageUploadStep :: IO ()
39 | coverageUploadStep = do
40 | echo "--- Uploading Coverage Information"
41 | need "SMASH_COVERALLS_REPO_TOKEN" >>= \case
42 | Nothing -> printf
43 | "Missing coverall repo token. Not uploading coverage information.\n"
44 | Just repoToken -> do
45 | result <- proc
46 | "shc"
47 | ["--repo-token", repoToken, "smash", "tests"]
48 | empty
49 | case result of
50 | ExitSuccess -> printf "Coverage information upload successful.\n"
51 | ExitFailure _ -> printf "Coverage information upload failed.\n"
52 |
53 |
54 | run :: Text -> [Text] -> IO ExitCode
55 | run cmd args = do
56 | printf (s % " " % s % "\n") cmd (T.unwords args)
57 | res <- proc cmd args empty
58 | case res of
59 | ExitSuccess -> pure ()
60 | ExitFailure code -> eprintf
61 | ("error: Command exited with code " % d % "!\nContinuing...\n")
62 | code
63 | pure res
64 |
--------------------------------------------------------------------------------
/default.nix:
--------------------------------------------------------------------------------
1 | { system ? builtins.currentSystem
2 | , crossSystem ? null
3 | # allows to cutomize haskellNix (ghc and profiling, see ./nix/haskell.nix)
4 | , config ? {}
5 | # override scripts with custom configuration
6 | , customConfig ? {}
7 | # allows to override dependencies of the project without modifications,
8 | # eg. to test build against local checkout of iohk-nix:
9 | # nix build -f default.nix cardano-node --arg sourcesOverride '{
10 | # iohk-nix = ../iohk-nix;
11 | # }'
12 | , sourcesOverride ? {}
13 | # pinned version of nixpkgs augmented with overlays (iohk-nix and our packages).
14 | , pkgs ? import ./nix { inherit system crossSystem config sourcesOverride; }
15 | , gitrev ? pkgs.iohkNix.commitIdFromGitRepoOrZero ./.git
16 | }:
17 | with pkgs; with commonLib;
18 | let
19 | customConfig' = if customConfig ? services then customConfig else {
20 | services.smash = customConfig;
21 | };
22 |
23 | haskellPackages = recRecurseIntoAttrs
24 | # we are only intersted in listing the project packages:
25 | (selectProjectPackages smashHaskellPackages);
26 | scripts = callPackage ./nix/scripts.nix {
27 | customConfig = customConfig';
28 | };
29 |
30 | packages = {
31 | inherit haskellPackages scripts smash-exe smash-exe-testing cardano-node;
32 | inherit (haskellPackages.smash.identifier) version;
33 |
34 | # `tests` are the test suites which have been built.
35 | tests = collectComponents' "tests" haskellPackages;
36 |
37 | libs = collectComponents' "library" haskellPackages;
38 |
39 | exes = lib.recursiveUpdate (collectComponents' "exes" haskellPackages) {
40 | smash = { inherit smash-exe-testing; };
41 | };
42 |
43 | checks = recurseIntoAttrs {
44 | # `checks.tests` collect results of executing the tests:
45 | tests = collectChecks haskellPackages;
46 | };
47 |
48 | nixosTests = import ./nix/nixos/tests {
49 | inherit pkgs;
50 | };
51 |
52 | shell = import ./shell.nix {
53 | inherit pkgs;
54 | withHoogle = true;
55 | };
56 | };
57 | in packages
58 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Migration/Version.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE OverloadedStrings #-}
2 |
3 | module Cardano.SMASH.DBSync.Db.Migration.Version
4 | ( MigrationVersion (..)
5 | , parseMigrationVersionFromFile
6 | , nextMigrationVersion
7 | , renderMigrationVersion
8 | , renderMigrationVersionFile
9 | ) where
10 |
11 | import Cardano.Prelude
12 |
13 | import qualified Data.List as List
14 | import qualified Data.List.Extra as List
15 | import qualified Data.Time.Calendar as Time
16 | import qualified Data.Time.Clock as Time
17 |
18 | import Text.Printf (printf)
19 |
20 |
21 | data MigrationVersion = MigrationVersion
22 | { mvStage :: Int
23 | , mvVersion :: Int
24 | , mvDate :: Int
25 | } deriving (Eq, Ord, Show)
26 |
27 |
28 | parseMigrationVersionFromFile :: Text -> Maybe MigrationVersion
29 | parseMigrationVersionFromFile str =
30 | case List.splitOn "-" (List.takeWhile (/= '.') (toS str)) of
31 | [_, stage, ver, date] ->
32 | case (readMaybe stage, readMaybe ver, readMaybe date) of
33 | (Just s, Just v, Just d) -> Just $ MigrationVersion s v d
34 | _ -> Nothing
35 | _ -> Nothing
36 |
37 | nextMigrationVersion :: MigrationVersion -> IO MigrationVersion
38 | nextMigrationVersion (MigrationVersion _stage ver _date) = do
39 | -- We can ignore the provided 'stage' and 'date' fields, but we do bump the version number.
40 | -- All new versions have 'stage == 2' because the stage 2 migrations are the Presistent
41 | -- generated ones. For the date we use today's date.
42 | (y, m, d) <- Time.toGregorian . Time.utctDay <$> Time.getCurrentTime
43 | pure $ MigrationVersion 2 (ver + 1) (fromIntegral y * 10000 + m * 100 + d)
44 |
45 | renderMigrationVersion :: MigrationVersion -> Text
46 | renderMigrationVersion mv =
47 | toS $ List.intercalate "-"
48 | [ printf "%d" (mvStage mv)
49 | , printf "%04d" (mvVersion mv)
50 | , show (mvDate mv)
51 | ]
52 |
53 | renderMigrationVersionFile :: MigrationVersion -> Text
54 | renderMigrationVersionFile mv =
55 | toS $ List.concat
56 | [ "migration-"
57 | , toS $ renderMigrationVersion mv
58 | , ".sql"
59 | ]
60 |
61 |
--------------------------------------------------------------------------------
/schema/migration-2-0009-20210818.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 9 THEN
9 | ALTER TABLE "schema_version" ALTER COLUMN "id" TYPE INT8;
10 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "pool_id" TYPE text;
11 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "url" TYPE text;
12 | ALTER TABLE "pool_metadata_reference" ALTER COLUMN "hash" TYPE text;
13 | ALTER TABLE "pool_metadata" ALTER COLUMN "pool_id" TYPE text;
14 | ALTER TABLE "pool_metadata" ALTER COLUMN "ticker_name" TYPE text;
15 | ALTER TABLE "pool_metadata" ALTER COLUMN "hash" TYPE text;
16 | ALTER TABLE "pool_metadata" ALTER COLUMN "metadata" TYPE text;
17 | ALTER TABLE "pool_metadata" DROP CONSTRAINT "pool_metadata_pmr_id_fkey";
18 | ALTER TABLE "pool_metadata" ADD CONSTRAINT "pool_metadata_pmr_id_fkey" FOREIGN KEY("pmr_id") REFERENCES "pool_metadata_reference"("id") ON DELETE CASCADE ON UPDATE RESTRICT;
19 | ALTER TABLE "pool" ALTER COLUMN "pool_id" TYPE text;
20 | ALTER TABLE "retired_pool" ALTER COLUMN "pool_id" TYPE text;
21 | ALTER TABLE "pool_metadata_fetch_error" ALTER COLUMN "pool_id" TYPE text;
22 | ALTER TABLE "pool_metadata_fetch_error" ALTER COLUMN "pool_hash" TYPE text;
23 | ALTER TABLE "pool_metadata_fetch_error" DROP CONSTRAINT "pool_metadata_fetch_error_pmr_id_fkey";
24 | ALTER TABLE "pool_metadata_fetch_error" ADD CONSTRAINT "pool_metadata_fetch_error_pmr_id_fkey" FOREIGN KEY("pmr_id") REFERENCES "pool_metadata_reference"("id") ON DELETE CASCADE ON UPDATE RESTRICT;
25 | ALTER TABLE "delisted_pool" ALTER COLUMN "pool_id" TYPE text;
26 | ALTER TABLE "reserved_ticker" ALTER COLUMN "name" TYPE text;
27 | ALTER TABLE "reserved_ticker" ALTER COLUMN "pool_hash" TYPE text;
28 | -- Hand written SQL statements can be added here.
29 | UPDATE schema_version SET stage_two = 9 ;
30 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
31 | END IF ;
32 | END ;
33 | $$ LANGUAGE plpgsql ;
34 |
35 | SELECT migrate() ;
36 |
37 | DROP FUNCTION migrate() ;
38 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/FetchQueue.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE DeriveGeneric #-}
2 |
3 | module Cardano.SMASH.FetchQueue
4 | ( PoolFetchRetry (..)
5 | , Retry (..)
6 | , newRetry
7 | , retryAgain
8 | , showRetryTimes
9 | ) where
10 |
11 |
12 | import Cardano.Prelude
13 |
14 | import Data.Time.Clock.POSIX (POSIXTime)
15 |
16 | import Cardano.SMASH.DBSync.Db.Schema (PoolMetadataReferenceId)
17 | import Cardano.SMASH.DBSync.Db.Types (PoolId, PoolMetadataHash,
18 | PoolUrl)
19 | import Cardano.SMASH.Types (formatTimeToNormal)
20 |
21 | data PoolFetchRetry = PoolFetchRetry
22 | { pfrReferenceId :: !PoolMetadataReferenceId
23 | , pfrPoolIdWtf :: !PoolId
24 | , pfrPoolUrl :: !PoolUrl
25 | , pfrPoolMDHash :: !PoolMetadataHash
26 | , pfrRetry :: !Retry
27 | } deriving (Show)
28 |
29 | data Retry = Retry
30 | { fetchTime :: !POSIXTime
31 | , retryTime :: !POSIXTime
32 | , retryCount :: !Word
33 | } deriving (Eq, Show, Generic)
34 |
35 | newRetry :: POSIXTime -> Retry
36 | newRetry now =
37 | Retry
38 | { fetchTime = now
39 | , retryTime = now + 60 -- 60 seconds from now
40 | , retryCount = 0
41 | }
42 |
43 | retryAgain :: POSIXTime -> Word -> Retry
44 | retryAgain fetchTimePOSIX existingRetryCount =
45 | -- When to retry. Maximum of a day for a retry.
46 | -- We are basically using a series to predict the next retry time.
47 | let calculateNewDiff currRetryCount = min (24 * 60 * 60) ((3 ^ currRetryCount) * 60)
48 | newRetryDiff = sum $ map calculateNewDiff [0..existingRetryCount]
49 | in
50 | Retry
51 | { fetchTime = fetchTimePOSIX
52 | , retryTime = fetchTimePOSIX + newRetryDiff
53 | , retryCount = existingRetryCount
54 | }
55 |
56 | -- A nice pretty printer for the retry.
57 | showRetryTimes :: Retry -> Text
58 | showRetryTimes retry' =
59 | mconcat
60 | [ "Fetch time: '"
61 | , formatTimeToNormal $ fetchTime retry'
62 | , "', retry time: '"
63 | , formatTimeToNormal $ retryTime retry'
64 | , "', retry count: '"
65 | , show $ retryCount retry'
66 | , "'."
67 | ]
68 |
69 |
--------------------------------------------------------------------------------
/nix/nixos/tests/smash-test.nix:
--------------------------------------------------------------------------------
1 | { pkgs, ... }:
2 | with pkgs; with commonLib;
3 | {
4 | name = "smash-test";
5 | nodes = {
6 | machine = { config, ... }: {
7 | nixpkgs.pkgs = pkgs;
8 | environment = {
9 | systemPackages = with pkgs; [ curl jq ];
10 | variables = {
11 | SMASHPGPASSFILE = config.services.smash.postgres.pgpass;
12 | };
13 | };
14 | imports = [
15 | ../smash-service.nix
16 | (sources.cardano-node + "/nix/nixos")
17 | ];
18 | services.smash = {
19 | enable = true;
20 | environmentName = "mainnet";
21 | smashPkgs = pkgs;
22 | inherit (config.services.cardano-node) socketPath;
23 | };
24 | systemd.services.smash.serviceConfig = {
25 | # Put cardano-db-sync in "cardano-node" group so that it can write socket file:
26 | SupplementaryGroups = "cardano-node";
27 | };
28 | services.cardano-node = {
29 | enable = true;
30 | environment = "mainnet";
31 | package = smashHaskellPackages.cardano-node.components.exes.cardano-node;
32 | topology = cardanoLib.mkEdgeTopology {
33 | port = 3001;
34 | edgeNodes = [ "127.0.0.1" ];
35 | };
36 | };
37 | systemd.services.cardano-node.serviceConfig.Restart = lib.mkForce "no";
38 | services.postgresql = {
39 | enable = true;
40 | package = postgresql_12;
41 | enableTCPIP = false;
42 | ensureDatabases = [ "${config.services.smash.postgres.database}" ];
43 | ensureUsers = [
44 | {
45 | name = "${config.services.smash.postgres.user}";
46 | ensurePermissions = {
47 | "DATABASE ${config.services.smash.postgres.database}" = "ALL PRIVILEGES";
48 | };
49 | }
50 | ];
51 | identMap = ''
52 | smash-users root ${config.services.smash.postgres.user}
53 | smash-users ${config.services.smash.user} ${config.services.smash.postgres.user}
54 | smash-users postgres postgres
55 | '';
56 | authentication = ''
57 | local all all ident map=smash-users
58 | '';
59 | };
60 | };
61 | };
62 | testScript = ''
63 | start_all()
64 | machine.wait_for_unit("postgresql.service")
65 | machine.wait_for_unit("cardano-node.service")
66 | machine.wait_for_open_port(3001)
67 | machine.wait_for_unit("smash.service")
68 | machine.wait_for_open_port(3100)
69 | '';
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/schema/migration-2-0001-20200810.sql:
--------------------------------------------------------------------------------
1 | -- Persistent generated migration.
2 |
3 | CREATE FUNCTION migrate() RETURNS void AS $$
4 | DECLARE
5 | next_version int ;
6 | BEGIN
7 | SELECT stage_two + 1 INTO next_version FROM schema_version ;
8 | IF next_version = 1 THEN
9 | CREATe TABLE "pool_metadata_reference"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" text NOT NULL,"url" text NOT NULL,"hash" base16type NOT NULL);
10 | ALTER TABLE "pool_metadata_reference" ADD CONSTRAINT "unique_pool_metadata_reference" UNIQUE("pool_id","hash");
11 | CREATe TABLE "pool_metadata"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" text NOT NULL,"ticker_name" text NOT NULL,"hash" base16type NOT NULL,"metadata" text NOT NULL,"pmr_id" INT8 NULL);
12 | ALTER TABLE "pool_metadata" ADD CONSTRAINT "unique_pool_metadata" UNIQUE("pool_id","hash");
13 | ALTER TABLE "pool_metadata" ADD CONSTRAINT "pool_metadata_pmr_id_fkey" FOREIGN KEY("pmr_id") REFERENCES "pool_metadata_reference"("id");
14 | CREATe TABLE "pool"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" INT8 NOT NULL);
15 | ALTER TABLE "pool" ADD CONSTRAINT "unique_pool_id" UNIQUE("pool_id");
16 | ALTER TABLE "pool" ADD CONSTRAINT "pool_pool_id_fkey" FOREIGN KEY("pool_id") REFERENCES "pool"("id");
17 | CREATe TABLE "block"("id" SERIAL8 PRIMARY KEY UNIQUE,"hash" hash32type NOT NULL,"epoch_no" uinteger NULL,"slot_no" uinteger NULL,"block_no" uinteger NULL);
18 | ALTER TABLE "block" ADD CONSTRAINT "unique_block" UNIQUE("hash");
19 | CREATe TABLE "meta"("id" SERIAL8 PRIMARY KEY UNIQUE,"protocol_const" INT8 NOT NULL,"slot_duration" INT8 NOT NULL,"start_time" timestamp NOT NULL,"slots_per_epoch" INT8 NOT NULL,"network_name" VARCHAR NULL);
20 | ALTER TABLE "meta" ADD CONSTRAINT "unique_meta" UNIQUE("start_time");
21 | CREATe TABLE "delisted_pool"("id" SERIAL8 PRIMARY KEY UNIQUE,"pool_id" text NOT NULL);
22 | ALTER TABLE "delisted_pool" ADD CONSTRAINT "unique_blacklisted_pool" UNIQUE("pool_id");
23 | CREATe TABLE "reserved_ticker"("id" SERIAL8 PRIMARY KEY UNIQUE,"name" text NOT NULL,"pool_hash" base16type NOT NULL);
24 | ALTER TABLE "reserved_ticker" ADD CONSTRAINT "unique_reserved_ticker" UNIQUE("name");
25 | CREATe TABLE "admin_user"("id" SERIAL8 PRIMARY KEY UNIQUE,"username" VARCHAR NOT NULL,"password" VARCHAR NOT NULL);
26 | ALTER TABLE "admin_user" ADD CONSTRAINT "unique_admin_user" UNIQUE("username");
27 | -- Hand written SQL statements can be added here.
28 | UPDATE schema_version SET stage_two = 1 ;
29 | RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
30 | END IF ;
31 | END ;
32 | $$ LANGUAGE plpgsql ;
33 |
34 | SELECT migrate() ;
35 |
36 | DROP FUNCTION migrate() ;
37 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Migration/Haskell.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE ConstraintKinds #-}
2 | {-# LANGUAGE OverloadedStrings #-}
3 |
4 | module Cardano.SMASH.DBSync.Db.Migration.Haskell
5 | ( runHaskellMigration
6 | ) where
7 |
8 | import Cardano.Prelude
9 |
10 | import Control.Monad.Logger (MonadLogger)
11 |
12 | import qualified Data.Map.Strict as Map
13 |
14 | import Database.Persist.Sql (SqlBackend)
15 |
16 | import Cardano.SMASH.DBSync.Db.Migration.Version
17 | import Cardano.SMASH.DBSync.Db.Run
18 |
19 | import System.IO (hClose, hFlush)
20 |
21 | -- | Run a migration written in Haskell (eg one that cannot easily be done in SQL).
22 | -- The Haskell migration is paired with an SQL migration and uses the same MigrationVersion
23 | -- numbering system. For example when 'migration-2-0008-20190731.sql' is applied this
24 | -- function will be called and if a Haskell migration with that version number exists
25 | -- in the 'migrationMap' it will be run.
26 | --
27 | -- An example of how this may be used is:
28 | -- 1. 'migration-2-0008-20190731.sql' adds a new NULL-able column.
29 | -- 2. Haskell migration 'MigrationVersion 2 8 20190731' populates new column from data already
30 | -- in the database.
31 | -- 3. 'migration-2-0009-20190731.sql' makes the new column NOT NULL.
32 |
33 | runHaskellMigration :: Handle -> MigrationVersion -> IO ()
34 | runHaskellMigration logHandle mversion =
35 | case Map.lookup mversion migrationMap of
36 | Nothing -> pure ()
37 | Just action -> do
38 | let migrationVersion = toS $ renderMigrationVersion mversion
39 | hPutStrLn logHandle $ "Running : migration-" ++ migrationVersion ++ ".hs"
40 | putStr $ " migration-" ++ migrationVersion ++ ".hs ... "
41 | hFlush stdout
42 | handle handler $ runDbHandleLogger logHandle action
43 | putTextLn "ok"
44 | where
45 | handler :: SomeException -> IO a
46 | handler e = do
47 | putStrLn $ "runHaskellMigration: " ++ show e
48 | hPutStrLn logHandle $ "runHaskellMigration: " ++ show e
49 | hClose logHandle
50 | exitFailure
51 |
52 | --------------------------------------------------------------------------------
53 |
54 | migrationMap :: MonadLogger m => Map MigrationVersion (ReaderT SqlBackend m ())
55 | migrationMap =
56 | Map.fromList
57 | [ ( MigrationVersion 2 1 20190731, migration0001 )
58 | ]
59 |
60 | --------------------------------------------------------------------------------
61 |
62 | migration0001 :: MonadLogger m => ReaderT SqlBackend m ()
63 | migration0001 =
64 | -- Place holder.
65 | pure ()
66 |
67 | --------------------------------------------------------------------------------
68 |
69 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Metrics.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE NoImplicitPrelude #-}
2 | {-# LANGUAGE OverloadedStrings #-}
3 |
4 | module Cardano.SMASH.DBSync.Metrics
5 | ( Metrics (..)
6 | , makeMetrics
7 | , withMetricSetters
8 | , withMetricsServer
9 | ) where
10 |
11 | import Cardano.Prelude
12 |
13 | import Cardano.Slotting.Slot (SlotNo (..))
14 |
15 | import Cardano.Sync.Types (MetricSetters (..))
16 |
17 | import Ouroboros.Network.Block (BlockNo (..))
18 |
19 | import System.Metrics.Prometheus.Concurrent.RegistryT (RegistryT (..), registerGauge,
20 | runRegistryT, unRegistryT)
21 | import System.Metrics.Prometheus.Http.Scrape (serveMetricsT)
22 | import System.Metrics.Prometheus.Metric.Gauge (Gauge)
23 | import qualified System.Metrics.Prometheus.Metric.Gauge as Gauge
24 |
25 | data Metrics = Metrics
26 | { mNodeBlockHeight :: !Gauge
27 | -- ^ The block tip number of the remote node.
28 | , mDbQueueLength :: !Gauge
29 | -- ^ The number of @DbAction@ remaining for the database.
30 | , mDbBlockHeight :: !Gauge
31 | -- ^ The block tip number in the database.
32 | , mDbSlotHeight :: !Gauge
33 | -- ^ The slot tip number in the database.
34 | }
35 |
36 | -- This enables us to be much more flexibile with what we actually measure.
37 | withMetricSetters :: Int -> (MetricSetters -> IO a) -> IO a
38 | withMetricSetters prometheusPort action =
39 | withMetricsServer prometheusPort $ \metrics -> do
40 | action $
41 | MetricSetters
42 | { metricsSetNodeBlockHeight = \ (BlockNo nodeHeight) ->
43 | Gauge.set (fromIntegral nodeHeight) $ mNodeBlockHeight metrics
44 | , metricsSetDbQueueLength = \ queuePostWrite ->
45 | Gauge.set (fromIntegral queuePostWrite) $ mDbQueueLength metrics
46 | , metricsSetDbBlockHeight = \ (BlockNo blockNo) ->
47 | Gauge.set (fromIntegral blockNo) $ mDbBlockHeight metrics
48 | , metricsSetDbSlotHeight = \ (SlotNo slotNo) ->
49 | Gauge.set (fromIntegral slotNo) $ mDbSlotHeight metrics
50 | }
51 |
52 | withMetricsServer :: Int -> (Metrics -> IO a) -> IO a
53 | withMetricsServer port action = do
54 | -- Using both `RegistryT` and `bracket` here is overkill. Unfortunately the
55 | -- Prometheus API requires the use of a `Registry` and this seems to be the
56 | -- least sucky way of doing it.
57 | (metrics, registry) <- runRegistryT $ (,) <$> makeMetrics <*> RegistryT ask
58 | bracket
59 | (async $ runReaderT (unRegistryT $ serveMetricsT port []) registry)
60 | cancel
61 | (const $ action metrics)
62 |
63 | makeMetrics :: RegistryT IO Metrics
64 | makeMetrics =
65 | Metrics
66 | <$> registerGauge "cardano_db_sync_node_block_height" mempty
67 | <*> registerGauge "cardano_db_sync_db_queue_length" mempty
68 | <*> registerGauge "cardano_db_sync_db_block_height" mempty
69 | <*> registerGauge "cardano_db_sync_db_slot_height" mempty
70 |
--------------------------------------------------------------------------------
/release.nix:
--------------------------------------------------------------------------------
1 | ############################################################################
2 | #
3 | # Hydra release jobset.
4 | #
5 | # The purpose of this file is to select jobs defined in default.nix and map
6 | # them to all supported build platforms.
7 | #
8 | ############################################################################
9 |
10 | # The project sources
11 | { smash ? { outPath = ./.; rev = "abcdef"; }
12 |
13 | # Function arguments to pass to the project
14 | , projectArgs ? {
15 | inherit sourcesOverride;
16 | config = { allowUnfree = false; inHydra = true; };
17 | gitrev = smash.rev;
18 | }
19 |
20 | # The systems that the jobset will be built for.
21 | , supportedSystems ? [ "x86_64-linux" "x86_64-darwin" ]
22 |
23 | # The systems used for cross-compiling (default: linux)
24 | , supportedCrossSystems ? [ (builtins.head supportedSystems) ]
25 |
26 | # A Hydra option
27 | , scrubJobs ? true
28 |
29 | # Dependencies overrides
30 | , sourcesOverride ? {}
31 |
32 | # Import pkgs, including IOHK common nix lib
33 | , pkgs ? import ./nix { inherit sourcesOverride; }
34 |
35 | }:
36 |
37 | with (import pkgs.iohkNix.release-lib) {
38 | inherit pkgs;
39 | inherit supportedSystems supportedCrossSystems scrubJobs projectArgs;
40 | packageSet = import smash;
41 | gitrev = smash.rev;
42 | };
43 |
44 | with pkgs.lib;
45 |
46 | let
47 | # restrict supported systems to a subset where tests (if exist) are required to pass:
48 | testsSupportedSystems = intersectLists supportedSystems [ "x86_64-linux" "x86_64-darwin" ];
49 | # Recurse through an attrset, returning all derivations in a list matching test supported systems.
50 | collectJobs' = ds: filter (d: elem d.system testsSupportedSystems) (collect isDerivation ds);
51 | # Adds the package name to the derivations for windows-testing-bundle.nix
52 | # (passthru.identifier.name does not survive mapTestOn)
53 | collectJobs = ds: concatLists (
54 | mapAttrsToList (packageName: package:
55 | map (drv: drv // { inherit packageName; }) (collectJobs' package)
56 | ) ds);
57 |
58 | nonDefaultBuildSystems = tail supportedSystems;
59 | # Paths or prefixes of paths of derivations to build only on the default system (ie. linux on hydra):
60 | onlyBuildOnDefaultSystem = [
61 | ["dockerImage"]
62 | ["checks" "tests" "smash" "db-spec-test"] ["haskellPackages" "smash" "checks" "db-spec-test"]
63 | ];
64 |
65 | jobs = {
66 | native =
67 | let filteredBuilds = mapAttrsRecursiveCond (a: !(isList a)) (path: value:
68 | if (any (p: take (length p) path == p) onlyBuildOnDefaultSystem) then filter (s: !(elem s nonDefaultBuildSystems)) value else value)
69 | (packagePlatforms project);
70 | in (mapTestOn (__trace (__toJSON filteredBuilds) filteredBuilds));
71 | # only build nixos tests on first supported system (linux)
72 | inherit (pkgsFor (builtins.head supportedSystems)) nixosTests;
73 | } // (mkRequiredJob (concatLists [
74 | (collectJobs jobs.native.checks)
75 | (collectJobs jobs.native.libs)
76 | (collectJobs jobs.native.exes)
77 | [ jobs.nixosTests.smashTest.x86_64-linux
78 | jobs.native.cardano-node.x86_64-linux
79 | ]
80 | ]));
81 |
82 | in jobs
83 |
--------------------------------------------------------------------------------
/nix/haskell.nix:
--------------------------------------------------------------------------------
1 | ############################################################################
2 | # Builds Haskell packages with Haskell.nix
3 | ############################################################################
4 | { lib
5 | , stdenv
6 | , haskell-nix
7 | , buildPackages
8 | , config ? {}
9 | # GHC attribute name
10 | , compiler
11 | # Source root directory
12 | , src
13 | # Enable profiling
14 | , profiling ? config.haskellNix.profiling or false
15 | , projectPackagesNames
16 | , postgresql
17 | # Disable basic auth by default:
18 | , flags ? [ "disable-basic-auth" ]
19 | }:
20 | let
21 | preCheck = ''
22 | echo pre-check
23 | initdb --encoding=UTF8 --locale=en_US.UTF-8 --username=postgres $NIX_BUILD_TOP/db-dir
24 | postgres -D $NIX_BUILD_TOP/db-dir -k /tmp &
25 | PSQL_PID=$!
26 | sleep 10
27 | if (echo '\q' | psql -h /tmp postgres postgres); then
28 | echo "PostgreSQL server is verified to be started."
29 | else
30 | echo "Failed to connect to local PostgreSQL server."
31 | exit 2
32 | fi
33 | ls -ltrh $NIX_BUILD_TOP
34 | DBUSER=nixbld
35 | DBNAME=nixbld
36 | export SMASHPGPASSFILE=$NIX_BUILD_TOP/pgpass-test
37 | echo "/tmp:5432:$DBUSER:$DBUSER:*" > $SMASHPGPASSFILE
38 | cp -vir ${../schema} ../schema
39 | chmod 600 $SMASHPGPASSFILE
40 | psql -h /tmp postgres postgres < ConnectionString
38 | toConnectionString pgc =
39 | BS.concat
40 | [ "host=", pgcHost pgc, " "
41 | , "port=", pgcPort pgc, " "
42 | , "user=", pgcUser pgc, " "
43 | , "dbname=", pgcDbname pgc, " "
44 | , "password=", pgcPassword pgc
45 | ]
46 |
47 | -- | Read the PostgreSQL configuration from the file at the location specified by the
48 | -- '$SMASHPGPASSFILE' environment variable.
49 | readPGPassFileEnv :: IO PGConfig
50 | readPGPassFileEnv = do
51 | mpath <- lookupEnv "SMASHPGPASSFILE"
52 | case mpath of
53 | Just fp -> readPGPassFileExit (PGPassFile fp)
54 | Nothing -> panic $ "Environment variable 'SMASHPGPASSFILE' not set."
55 |
56 | -- | Read the PostgreSQL configuration from the specified file.
57 | readPGPassFile :: PGPassFile -> IO (Maybe PGConfig)
58 | readPGPassFile (PGPassFile fpath) = do
59 | ebs <- Exception.try $ BS.readFile fpath
60 | case ebs of
61 | Left e -> pure $ handler e
62 | Right bs -> extract bs
63 | where
64 | handler :: IOException -> Maybe a
65 | handler = const Nothing
66 |
67 | extract :: ByteString -> IO (Maybe PGConfig)
68 | extract bs =
69 | case BS.lines bs of
70 | (b:_) -> parseConfig b
71 | _ -> pure Nothing
72 |
73 | parseConfig :: ByteString -> IO (Maybe PGConfig)
74 | parseConfig bs =
75 | case BS.split ':' bs of
76 | [h, pt, d, u, pwd] -> Just <$> replaceUser (PGConfig h pt d u pwd)
77 | _ -> pure Nothing
78 |
79 | replaceUser :: PGConfig -> IO PGConfig
80 | replaceUser pgc
81 | | pgcUser pgc /= "*" = pure pgc
82 | | otherwise = do
83 | euser <- Exception.try getEffectiveUserName
84 | case euser of
85 | Left (_ :: IOException) ->
86 | panic "readPGPassFile: User in pgpass file was specified as '*' but getEffectiveUserName failed."
87 | Right user ->
88 | pure $ pgc { pgcUser = BS.pack user }
89 |
90 |
91 | -- | Read 'PGPassFile' into 'PGConfig'.
92 | -- If it fails it will raise an error.
93 | -- If it succeeds, it will set the 'PGPASSFILE' environment variable.
94 | readPGPassFileExit :: PGPassFile -> IO PGConfig
95 | readPGPassFileExit pgpassfile@(PGPassFile fpath) = do
96 | mc <- readPGPassFile pgpassfile
97 | case mc of
98 | Nothing -> panic $ toS $ "Not able to read PGPassFile at " ++ show fpath ++ "."
99 | Just pgc -> do
100 | setEnv "SMASHPGPASSFILE" fpath
101 | pure pgc
102 |
--------------------------------------------------------------------------------
/config/mainnet-config.yaml:
--------------------------------------------------------------------------------
1 | # Explorer DB Node configuration
2 |
3 | NetworkName: mainnet
4 |
5 | EnableLogMetrics: False
6 | EnableLogging: True
7 |
8 | # The config file for the node we are connecting to. If this is not the correct
9 | # config, it will likely lead to db-sync throwing up weird error messages from
10 | # the consensus layer.
11 | # The path to the node config file is relative to this config file.
12 | NodeConfigFile: ../../cardano-node/configuration/cardano/mainnet-config.json
13 |
14 | # ------------------------------------------------------------------------------
15 | # Logging configuration follows.
16 |
17 | # global filter; messages must have at least this severity to pass:
18 | minSeverity: Info
19 |
20 | # global file rotation settings:
21 | rotation:
22 | rpLogLimitBytes: 5000000
23 | rpKeepFilesNum: 10
24 | rpMaxAgeHours: 24
25 |
26 | # these backends are initialized:
27 | setupBackends:
28 | - AggregationBK
29 | - KatipBK
30 | # - EditorBK
31 | # - EKGViewBK
32 |
33 | # if not indicated otherwise, then messages are passed to these backends:
34 | defaultBackends:
35 | - KatipBK
36 |
37 | # if wanted, the GUI is listening on this port:
38 | # hasGUI: 12787
39 |
40 | # if wanted, the EKG interface is listening on this port:
41 | # hasEKG: 12788
42 |
43 | # here we set up outputs of logging in 'katip':
44 | setupScribes:
45 | - scKind: StdoutSK
46 | scName: stdout
47 | scFormat: ScText
48 | scRotation: null
49 |
50 | # if not indicated otherwise, then log output is directed to this:
51 | defaultScribes:
52 | - - StdoutSK
53 | - stdout
54 |
55 | # more options which can be passed as key-value pairs:
56 | options:
57 | cfokey:
58 | value: "Release-1.0.0"
59 | mapSubtrace:
60 | benchmark:
61 | contents:
62 | - GhcRtsStats
63 | - MonotonicClock
64 | subtrace: ObservableTrace
65 | '#ekgview':
66 | contents:
67 | - - tag: Contains
68 | contents: 'cardano.epoch-validation.benchmark'
69 | - - tag: Contains
70 | contents: .monoclock.basic.
71 | - - tag: Contains
72 | contents: 'cardano.epoch-validation.benchmark'
73 | - - tag: Contains
74 | contents: diff.RTS.cpuNs.timed.
75 | - - tag: StartsWith
76 | contents: '#ekgview.#aggregation.cardano.epoch-validation.benchmark'
77 | - - tag: Contains
78 | contents: diff.RTS.gcNum.timed.
79 | subtrace: FilterTrace
80 | 'cardano.epoch-validation.utxo-stats':
81 | # Change the `subtrace` value to `Neutral` in order to log
82 | # `UTxO`-related messages during epoch validation.
83 | subtrace: NoTrace
84 | '#messagecounters.aggregation':
85 | subtrace: NoTrace
86 | '#messagecounters.ekgview':
87 | subtrace: NoTrace
88 | '#messagecounters.switchboard':
89 | subtrace: NoTrace
90 | '#messagecounters.katip':
91 | subtrace: NoTrace
92 | '#messagecounters.monitoring':
93 | subtrace: NoTrace
94 | 'cardano.#messagecounters.aggregation':
95 | subtrace: NoTrace
96 | 'cardano.#messagecounters.ekgview':
97 | subtrace: NoTrace
98 | 'cardano.#messagecounters.switchboard':
99 | subtrace: NoTrace
100 | 'cardano.#messagecounters.katip':
101 | subtrace: NoTrace
102 | 'cardano.#messagecounters.monitoring':
103 | subtrace: NoTrace
104 | mapBackends:
105 | cardano.epoch-validation.benchmark:
106 | - AggregationBK
107 | '#aggregation.cardano.epoch-validation.benchmark':
108 | - EKGViewBK
109 | mapSeverity:
110 | smash-node.Subscription: Error
111 | smash-node.Mux: Error
112 | smash-node: Info
113 |
--------------------------------------------------------------------------------
/config/allegra-config.yaml:
--------------------------------------------------------------------------------
1 | # Explorer DB Node configuration
2 | NetworkName: allegra
3 |
4 | EnableLogMetrics: False
5 | EnableLogging: True
6 |
7 | Protocol: Cardano
8 |
9 | # The config file for the node we are connecting to. If this is not the correct
10 | # config, it will likely lead to db-sync throwing up weird error messages from
11 | # the consensus layer.
12 | # The path to the node config file is relative to this config file.
13 |
14 | NodeConfigFile: ../../cardano-node/allegra/allegra-config.json
15 |
16 | # ------------------------------------------------------------------------------
17 | # Logging configuration follows.
18 |
19 | # global filter; messages must have at least this severity to pass:
20 | minSeverity: Info
21 |
22 | # global file rotation settings:
23 | rotation:
24 | rpLogLimitBytes: 5000000
25 | rpKeepFilesNum: 10
26 | rpMaxAgeHours: 24
27 |
28 | # these backends are initialized:
29 | setupBackends:
30 | - AggregationBK
31 | - KatipBK
32 | # - EditorBK
33 | # - EKGViewBK
34 |
35 | # if not indicated otherwise, then messages are passed to these backends:
36 | defaultBackends:
37 | - KatipBK
38 |
39 | # if wanted, the GUI is listening on this port:
40 | # hasGUI: 12787
41 |
42 | # if wanted, the EKG interface is listening on this port:
43 | # hasEKG: 12788
44 |
45 | # here we set up outputs of logging in 'katip':
46 | setupScribes:
47 | - scKind: StdoutSK
48 | scName: stdout
49 | scFormat: ScText
50 | scRotation: null
51 |
52 | # if not indicated otherwise, then log output is directed to this:
53 | defaultScribes:
54 | - - StdoutSK
55 | - stdout
56 |
57 | # more options which can be passed as key-value pairs:
58 | options:
59 | cfokey:
60 | value: "Release-1.0.0"
61 | mapSubtrace:
62 | benchmark:
63 | contents:
64 | - GhcRtsStats
65 | - MonotonicClock
66 | subtrace: ObservableTrace
67 | '#ekgview':
68 | contents:
69 | - - tag: Contains
70 | contents: 'cardano.epoch-validation.benchmark'
71 | - - tag: Contains
72 | contents: .monoclock.basic.
73 | - - tag: Contains
74 | contents: 'cardano.epoch-validation.benchmark'
75 | - - tag: Contains
76 | contents: diff.RTS.cpuNs.timed.
77 | - - tag: StartsWith
78 | contents: '#ekgview.#aggregation.cardano.epoch-validation.benchmark'
79 | - - tag: Contains
80 | contents: diff.RTS.gcNum.timed.
81 | subtrace: FilterTrace
82 | 'cardano.epoch-validation.utxo-stats':
83 | # Change the `subtrace` value to `Neutral` in order to log
84 | # `UTxO`-related messages during epoch validation.
85 | subtrace: NoTrace
86 | '#messagecounters.aggregation':
87 | subtrace: NoTrace
88 | '#messagecounters.ekgview':
89 | subtrace: NoTrace
90 | '#messagecounters.switchboard':
91 | subtrace: NoTrace
92 | '#messagecounters.katip':
93 | subtrace: NoTrace
94 | '#messagecounters.monitoring':
95 | subtrace: NoTrace
96 | 'cardano.#messagecounters.aggregation':
97 | subtrace: NoTrace
98 | 'cardano.#messagecounters.ekgview':
99 | subtrace: NoTrace
100 | 'cardano.#messagecounters.switchboard':
101 | subtrace: NoTrace
102 | 'cardano.#messagecounters.katip':
103 | subtrace: NoTrace
104 | 'cardano.#messagecounters.monitoring':
105 | subtrace: NoTrace
106 | mapBackends:
107 | cardano.epoch-validation.benchmark:
108 | - AggregationBK
109 | '#aggregation.cardano.epoch-validation.benchmark':
110 | - EKGViewBK
111 | mapSeverity:
112 | smash-node.Subscription: Error
113 | smash-node.Mux: Error
114 | smash-node: Info
115 |
--------------------------------------------------------------------------------
/config/testnet-config.yaml:
--------------------------------------------------------------------------------
1 | # Explorer DB Node configuration
2 |
3 | NetworkName: testnet
4 | RequiresNetworkMagic: RequiresMagic
5 | EnableLogMetrics: False
6 | EnableLogging: True
7 |
8 | Protocol: Byron
9 |
10 | RequiresNetworkMagic: RequiresMagic
11 |
12 | ByronGenesisFile: ../../cardano-node/configuration/defaults/byron-testnet/genesis.json
13 | ByronGenesisHash: 96fceff972c2c06bd3bb5243c39215333be6d56aaf4823073dca31afe5038471
14 |
15 | ShelleyGenesisFile: not/applicable
16 | ShelleyGenesisHash: not/applicable
17 |
18 | # ------------------------------------------------------------------------------
19 | # Logging configuration follows.
20 |
21 | # global filter; messages must have at least this severity to pass:
22 | minSeverity: Info
23 |
24 | # global file rotation settings:
25 | rotation:
26 | rpLogLimitBytes: 5000000
27 | rpKeepFilesNum: 10
28 | rpMaxAgeHours: 24
29 |
30 | # these backends are initialized:
31 | setupBackends:
32 | - AggregationBK
33 | - KatipBK
34 | # - EditorBK
35 | # - EKGViewBK
36 |
37 | # if not indicated otherwise, then messages are passed to these backends:
38 | defaultBackends:
39 | - KatipBK
40 |
41 | # if wanted, the GUI is listening on this port:
42 | # hasGUI: 12787
43 |
44 | # if wanted, the EKG interface is listening on this port:
45 | # hasEKG: 12788
46 |
47 | # here we set up outputs of logging in 'katip':
48 | setupScribes:
49 | - scKind: StdoutSK
50 | scName: stdout
51 | scFormat: ScText
52 | scRotation: null
53 |
54 | # if not indicated otherwise, then log output is directed to this:
55 | defaultScribes:
56 | - - StdoutSK
57 | - stdout
58 |
59 | # more options which can be passed as key-value pairs:
60 | options:
61 | cfokey:
62 | value: "Release-1.0.0"
63 | mapSubtrace:
64 | benchmark:
65 | contents:
66 | - GhcRtsStats
67 | - MonotonicClock
68 | subtrace: ObservableTrace
69 | '#ekgview':
70 | contents:
71 | - - tag: Contains
72 | contents: 'cardano.epoch-validation.benchmark'
73 | - - tag: Contains
74 | contents: .monoclock.basic.
75 | - - tag: Contains
76 | contents: 'cardano.epoch-validation.benchmark'
77 | - - tag: Contains
78 | contents: diff.RTS.cpuNs.timed.
79 | - - tag: StartsWith
80 | contents: '#ekgview.#aggregation.cardano.epoch-validation.benchmark'
81 | - - tag: Contains
82 | contents: diff.RTS.gcNum.timed.
83 | subtrace: FilterTrace
84 | 'cardano.epoch-validation.utxo-stats':
85 | # Change the `subtrace` value to `Neutral` in order to log
86 | # `UTxO`-related messages during epoch validation.
87 | subtrace: NoTrace
88 | '#messagecounters.aggregation':
89 | subtrace: NoTrace
90 | '#messagecounters.ekgview':
91 | subtrace: NoTrace
92 | '#messagecounters.switchboard':
93 | subtrace: NoTrace
94 | '#messagecounters.katip':
95 | subtrace: NoTrace
96 | '#messagecounters.monitoring':
97 | subtrace: NoTrace
98 | 'cardano.#messagecounters.aggregation':
99 | subtrace: NoTrace
100 | 'cardano.#messagecounters.ekgview':
101 | subtrace: NoTrace
102 | 'cardano.#messagecounters.switchboard':
103 | subtrace: NoTrace
104 | 'cardano.#messagecounters.katip':
105 | subtrace: NoTrace
106 | 'cardano.#messagecounters.monitoring':
107 | subtrace: NoTrace
108 | mapBackends:
109 | cardano.epoch-validation.benchmark:
110 | - AggregationBK
111 | '#aggregation.cardano.epoch-validation.benchmark':
112 | - EKGViewBK
113 | mapSeverity:
114 | smash-node.Subscription: Error
115 | smash-node.Mux: Error
116 | smash-node: Info
117 |
--------------------------------------------------------------------------------
/config/launchpad-config.yaml:
--------------------------------------------------------------------------------
1 | # Explorer DB Node configuration
2 | NetworkName: launchpad
3 |
4 | EnableLogMetrics: False
5 | EnableLogging: True
6 |
7 | Protocol: Cardano
8 |
9 | # The config file for the node we are connecting to. If this is not the correct
10 | # config, it will likely lead to db-sync throwing up weird error messages from
11 | # the consensus layer.
12 | # The path to the node config file is relative to this config file.
13 |
14 | NodeConfigFile: /nix/store/s2z0clsfwci7z2g4167nlvf3i0m2wb6s-config-0.json
15 |
16 | # ------------------------------------------------------------------------------
17 | # Logging configuration follows.
18 |
19 | # global filter; messages must have at least this severity to pass:
20 | minSeverity: Info
21 |
22 | # global file rotation settings:
23 | rotation:
24 | rpLogLimitBytes: 5000000
25 | rpKeepFilesNum: 10
26 | rpMaxAgeHours: 24
27 |
28 | # these backends are initialized:
29 | setupBackends:
30 | - AggregationBK
31 | - KatipBK
32 | # - EditorBK
33 | # - EKGViewBK
34 |
35 | # if not indicated otherwise, then messages are passed to these backends:
36 | defaultBackends:
37 | - KatipBK
38 |
39 | # if wanted, the GUI is listening on this port:
40 | # hasGUI: 12787
41 |
42 | # if wanted, the EKG interface is listening on this port:
43 | # hasEKG: 12788
44 |
45 | # here we set up outputs of logging in 'katip':
46 | setupScribes:
47 | - scKind: StdoutSK
48 | scName: stdout
49 | scFormat: ScText
50 | scRotation: null
51 |
52 | # if not indicated otherwise, then log output is directed to this:
53 | defaultScribes:
54 | - - StdoutSK
55 | - stdout
56 |
57 | # more options which can be passed as key-value pairs:
58 | options:
59 | cfokey:
60 | value: "Release-1.0.0"
61 | mapSubtrace:
62 | benchmark:
63 | contents:
64 | - GhcRtsStats
65 | - MonotonicClock
66 | subtrace: ObservableTrace
67 | '#ekgview':
68 | contents:
69 | - - tag: Contains
70 | contents: 'cardano.epoch-validation.benchmark'
71 | - - tag: Contains
72 | contents: .monoclock.basic.
73 | - - tag: Contains
74 | contents: 'cardano.epoch-validation.benchmark'
75 | - - tag: Contains
76 | contents: diff.RTS.cpuNs.timed.
77 | - - tag: StartsWith
78 | contents: '#ekgview.#aggregation.cardano.epoch-validation.benchmark'
79 | - - tag: Contains
80 | contents: diff.RTS.gcNum.timed.
81 | subtrace: FilterTrace
82 | 'cardano.epoch-validation.utxo-stats':
83 | # Change the `subtrace` value to `Neutral` in order to log
84 | # `UTxO`-related messages during epoch validation.
85 | subtrace: NoTrace
86 | '#messagecounters.aggregation':
87 | subtrace: NoTrace
88 | '#messagecounters.ekgview':
89 | subtrace: NoTrace
90 | '#messagecounters.switchboard':
91 | subtrace: NoTrace
92 | '#messagecounters.katip':
93 | subtrace: NoTrace
94 | '#messagecounters.monitoring':
95 | subtrace: NoTrace
96 | 'cardano.#messagecounters.aggregation':
97 | subtrace: NoTrace
98 | 'cardano.#messagecounters.ekgview':
99 | subtrace: NoTrace
100 | 'cardano.#messagecounters.switchboard':
101 | subtrace: NoTrace
102 | 'cardano.#messagecounters.katip':
103 | subtrace: NoTrace
104 | 'cardano.#messagecounters.monitoring':
105 | subtrace: NoTrace
106 | mapBackends:
107 | cardano.epoch-validation.benchmark:
108 | - AggregationBK
109 | '#aggregation.cardano.epoch-validation.benchmark':
110 | - EKGViewBK
111 | mapSeverity:
112 | smash-node.Subscription: Error
113 | smash-node.Mux: Error
114 | smash-node: Info
115 |
--------------------------------------------------------------------------------
/config/shelley-qa-config.yaml:
--------------------------------------------------------------------------------
1 | # Explorer DB Node configuration
2 |
3 | NetworkName: shelley-qa
4 |
5 | EnableLogMetrics: False
6 | EnableLogging: True
7 |
8 | Protocol: Cardano
9 |
10 | # The config file for the node we are connecting to. If this is not the correct
11 | # config, it will likely lead to db-sync throwing up weird error messages from
12 | # the consensus layer.
13 | # The path to the node config file is relative to this config file.
14 | NodeConfigFile: ../../cardano-node/configuration/cardano/shelley_qa-config.json
15 |
16 | # ------------------------------------------------------------------------------
17 | # Logging configuration follows.
18 |
19 | # global filter; messages must have at least this severity to pass:
20 | minSeverity: Info
21 |
22 | # global file rotation settings:
23 | rotation:
24 | rpLogLimitBytes: 5000000
25 | rpKeepFilesNum: 10
26 | rpMaxAgeHours: 24
27 |
28 | # these backends are initialized:
29 | setupBackends:
30 | - AggregationBK
31 | - KatipBK
32 | # - EditorBK
33 | # - EKGViewBK
34 |
35 | # if not indicated otherwise, then messages are passed to these backends:
36 | defaultBackends:
37 | - KatipBK
38 |
39 | # if wanted, the GUI is listening on this port:
40 | # hasGUI: 12787
41 |
42 | # if wanted, the EKG interface is listening on this port:
43 | # hasEKG: 12788
44 |
45 | # here we set up outputs of logging in 'katip':
46 | setupScribes:
47 | - scKind: StdoutSK
48 | scName: stdout
49 | scFormat: ScText
50 | scRotation: null
51 |
52 | # if not indicated otherwise, then log output is directed to this:
53 | defaultScribes:
54 | - - StdoutSK
55 | - stdout
56 |
57 | # more options which can be passed as key-value pairs:
58 | options:
59 | cfokey:
60 | value: "Release-1.0.0"
61 | mapSubtrace:
62 | benchmark:
63 | contents:
64 | - GhcRtsStats
65 | - MonotonicClock
66 | subtrace: ObservableTrace
67 | '#ekgview':
68 | contents:
69 | - - tag: Contains
70 | contents: 'cardano.epoch-validation.benchmark'
71 | - - tag: Contains
72 | contents: .monoclock.basic.
73 | - - tag: Contains
74 | contents: 'cardano.epoch-validation.benchmark'
75 | - - tag: Contains
76 | contents: diff.RTS.cpuNs.timed.
77 | - - tag: StartsWith
78 | contents: '#ekgview.#aggregation.cardano.epoch-validation.benchmark'
79 | - - tag: Contains
80 | contents: diff.RTS.gcNum.timed.
81 | subtrace: FilterTrace
82 | 'cardano.epoch-validation.utxo-stats':
83 | # Change the `subtrace` value to `Neutral` in order to log
84 | # `UTxO`-related messages during epoch validation.
85 | subtrace: NoTrace
86 | '#messagecounters.aggregation':
87 | subtrace: NoTrace
88 | '#messagecounters.ekgview':
89 | subtrace: NoTrace
90 | '#messagecounters.switchboard':
91 | subtrace: NoTrace
92 | '#messagecounters.katip':
93 | subtrace: NoTrace
94 | '#messagecounters.monitoring':
95 | subtrace: NoTrace
96 | 'cardano.#messagecounters.aggregation':
97 | subtrace: NoTrace
98 | 'cardano.#messagecounters.ekgview':
99 | subtrace: NoTrace
100 | 'cardano.#messagecounters.switchboard':
101 | subtrace: NoTrace
102 | 'cardano.#messagecounters.katip':
103 | subtrace: NoTrace
104 | 'cardano.#messagecounters.monitoring':
105 | subtrace: NoTrace
106 | mapBackends:
107 | cardano.epoch-validation.benchmark:
108 | - AggregationBK
109 | '#aggregation.cardano.epoch-validation.benchmark':
110 | - EKGViewBK
111 | mapSeverity:
112 | smash-node.Subscription: Error
113 | smash-node.Mux: Error
114 | smash-node: Info
115 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import sys
4 | import os
5 | import sphinx_rtd_theme
6 | import recommonmark
7 |
8 | from recommonmark.transform import AutoStructify
9 | from os.path import abspath, join, dirname
10 |
11 | sys.path.insert(0, abspath(join(dirname(__file__))))
12 |
13 | # -- RTD configuration ------------------------------------------------
14 |
15 | on_rtd = os.environ.get("READTHEDOCS", None) == "True"
16 |
17 | # This is used for linking and such so we link to the thing we're building
18 | rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest")
19 | if rtd_version not in ["stable", "latest"]:
20 | rtd_version = "stable"
21 |
22 | # -- Project information -----------------------------------------------------
23 |
24 | project = 'SMASH Documentation'
25 | copyright = '2020, IOHK'
26 | author = 'IOHK'
27 |
28 | # The full version, including alpha/beta/rc tags
29 | release = '1.0.0'
30 |
31 |
32 | # -- General configuration ---------------------------------------------------
33 | master_doc = 'index'
34 | # Add any Sphinx extension module names here, as strings. They can be
35 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
36 | # ones.
37 |
38 | extensions = [
39 | "sphinx_rtd_theme",
40 | 'recommonmark',
41 | 'sphinx_markdown_tables',
42 | 'sphinxemoji.sphinxemoji',
43 | "sphinx.ext.autodoc",
44 | "sphinx.ext.autosummary",
45 | "sphinx.ext.intersphinx",
46 | "sphinx.ext.viewcode",
47 | ]
48 |
49 | # Add any paths that contain templates here, relative to this directory.
50 | templates_path = ['.sphinx/_templates']
51 | html_static_path = ['.sphinx/_static']
52 |
53 | source_suffix = {
54 | '.rst': 'restructuredtext',
55 | '.md': 'markdown',
56 | }
57 |
58 | intersphinx_mapping = {
59 | "commandsv1": (
60 | "https://robotpy.readthedocs.io/projects/commands-v1/en/%s/"
61 | % rtd_version,
62 | None,
63 | ),
64 | }
65 |
66 | # List of patterns, relative to source directory, that match files and
67 | # directories to ignore when looking for source files.
68 | # This pattern also affects html_static_path and html_extra_path.
69 | exclude_patterns = []
70 |
71 |
72 | # -- Options for HTML output -------------------------------------------------
73 |
74 | # The theme to use for HTML and HTML Help pages. See the documentation for
75 | # a list of builtin themes.
76 | #
77 | html_theme = "sphinx_rtd_theme"
78 |
79 | html_theme_options = {
80 | 'logo_only': False,
81 | 'display_version': False,
82 | 'prev_next_buttons_location': 'bottom',
83 | 'style_external_links': False,
84 | 'style_nav_header_background': '#0635a7',
85 | # Toc options
86 | 'collapse_navigation': True,
87 | 'sticky_navigation': True,
88 | 'navigation_depth': 4,
89 | 'includehidden': True,
90 | 'titles_only': False
91 | }
92 |
93 | # Add any paths that contain custom static files (such as style sheets) here,
94 | # relative to this directory. They are copied after the builtin static files,
95 | # so a file named "default.css" will overwrite the builtin "default.css".
96 |
97 | html_logo = ".sphinx/cardano-logo.png"
98 |
99 | html_context = {
100 | "display_github": True, # Add 'Edit on Github' link instead of 'View page source'
101 | "github_user": "input-output-hk",
102 | "github_repo": "cardano-node",
103 | "github_version": "master",
104 | "conf_py_path": "/",
105 | "source_suffix": source_suffix,
106 | }
107 |
108 | # -- Custom Document processing ----------------------------------------------
109 |
110 | def setup(app):
111 | app.add_config_value('recommonmark_config', {
112 | 'enable_auto_doc_ref': False,
113 | 'enable_auto_toc_tree': False,
114 | }, True)
115 | app.add_transform(AutoStructify)
--------------------------------------------------------------------------------
/ChangeLog.md:
--------------------------------------------------------------------------------
1 | # Changelog for smash
2 |
3 | ## 1.4.0
4 |
5 | ### Story
6 |
7 | - [CAD-1357] - Remove any traces of cardano-db-sync
8 | - [CAD-2180] - Documentation improvements
9 | - [CAD-2184] - Fix Swagger documentation to be consistent, add more info
10 | - [CAD-2449] - Add API endpoint for checking valid pool id
11 | - [CAD-2450] - Bump up to Mary (Native tokens) support
12 |
13 | ### Bug
14 |
15 | - [CAD-2408] - Create directory for --state-dir automatically if it does not exist
16 | - [CAD-2416] - Database connection inconsistency
17 | - [CAD-2476] - SMASH not returning active pools that were previously retired
18 |
19 | ## 1.3.0
20 |
21 | ### Story
22 |
23 | - [CAD-2169] - Expose API types in a separate package
24 | - [CAD-2177] - smash should work also with pool_ids in Bech32 format
25 | - [CAD-2182] - Pool insertion and ticker insertion should be added into API
26 | - [CAD-2183] - Add/remove admin user via CLI
27 | - [CAD-2323] - Bump up to Allegra Hard Fork
28 |
29 | ### Bug
30 |
31 | - [CAD-2176] - errors endpoint doesn't validate poolId properly
32 | - [CAD-2178] - The retryCount from the /errors endpoint is not correctly incremented
33 | - [CAD-2179] - pool_id delist endpoint is returning 200 for any string (not only for valid pool_ids)
34 | - [CAD-2181] - All queries that don't return anything should return 404
35 |
36 | ## 1.2.0
37 |
38 | ### Story
39 |
40 | - [CAD-1358] - Return the caching headers for the HTTP server to work with that
41 | - [CAD-1823] - Stake pools with issues list
42 | - [CAD-1824] - List of delisted Stake pools
43 | - [CAD-1838] - Add whitelisting (listing) to return delisted
44 | - [CAD-1926] - Retired pools should be ignored
45 | - [CAD-2061] - Logs improvement, add information to why an error occured
46 | - [CAD-2074] - Health check endpoint
47 | - [CAD-2085] - Create migration scripts for SMASH
48 | - [CAD-2088] - Resolve paths relative to the config file, not the executable
49 | - [CAD-2093] - Use qualified module names
50 |
51 | ## 1.1.0
52 |
53 | ### Story
54 |
55 | - [CAD-1744] - Easily query reason for pool metadata lookup failure
56 |
57 | ### Bug
58 |
59 | - [CAD-1791] - smash on shelley-qa is failing when decoding address
60 | - [CAD-1753] - TLS version 1.3 not working correctly
61 |
62 |
63 | ## 1.0.1
64 |
65 | ### Bug
66 |
67 | - [CAD-1471] - Query pool id along with hash when looking for pool info
68 |
69 |
70 | ## 1.0.0
71 |
72 | ### Story
73 |
74 | - [CAD-1397] - ITN Ticker protection for mainnet and SMASH
75 | - [CAD-1399] - Change wording in documentation and codebase to delist/list
76 | - [CAD-1409] - Will not retry fetching metadata if hash mismatches or URL is not available
77 | - [CAD-1428] - Change the primary database key to be "poolid", not ticker name
78 | - [CAD-1446] - Support delayed registration of metadata
79 | - [CAD-1449] - Exponential back-off to pull Stake Pools metadata in case there’s a timeout or an HTTP error returned by the pool metadata server
80 | - [CAD-1456] - Implement blacklisting for pool ids
81 | - [CAD-1462] - Clarify insert ticker command name and report error if ticker already exists
82 |
83 | ### Bug
84 |
85 | - [CAD-1390] - Fix Nix issue
86 |
87 |
88 | ## 0.1.0
89 |
90 | ### Story
91 |
92 | - [CAD-762] - Initial API design
93 | - [CAD-770] - Add a simple in-memory database
94 | - [CAD-778] - Add block syncing mechanism
95 | - [CAD-779] - Add a database backend
96 | - [CAD-1266] - Adapt cardano-db-sync for SMASH
97 | - [CAD-1330] - Add http client for fetching JSON metadata
98 | - [CAD-1348] - Add block insertion so we don't sync from scratch
99 | - [CAD-1353] - Add JSON size check for offline metadata
100 | - [CAD-1361] - Servers return `200 OK` when metadata isn't found
101 | - [CAD-1354] - Add deny list functionality
102 | - [CAD-1355] - Add a flag for switching Basic Auth off/onn
103 | - [CAD-1360] - Add documentation for third party clients
104 | - [CAD-1371] - Integrate SMASH against HFC
105 |
106 | ### Bug
107 |
108 | - [CAD-1361] - Servers return `200 OK` when metadata isn't found
109 |
110 |
--------------------------------------------------------------------------------
/smash-servant-types/src/Cardano/SMASH/DBSync/Db/Error.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE OverloadedStrings #-}
2 | {-# LANGUAGE DeriveGeneric #-}
3 |
4 | module Cardano.SMASH.DBSync.Db.Error
5 | ( DBFail (..)
6 | , renderLookupFail
7 | ) where
8 |
9 | import Cardano.Prelude
10 |
11 | import Data.Aeson (ToJSON (..), (.=), object, Value (..))
12 |
13 | import Cardano.SMASH.DBSync.Db.Types
14 |
15 |
16 | -- | Errors, not exceptions.
17 | data DBFail
18 | = DbLookupBlockHash !ByteString
19 | | DbLookupPoolMetadataHash !PoolId !PoolMetadataHash
20 | | DbMetaEmpty
21 | | DbMetaMultipleRows
22 | | PoolMetadataHashMismatch
23 | | PoolDelisted
24 | | UnableToEncodePoolMetadataToJSON !Text
25 | | UnknownError !Text
26 | | ReservedTickerAlreadyInserted !Text
27 | | RecordDoesNotExist
28 | | DbInsertError !Text
29 | deriving (Eq, Show, Generic)
30 |
31 | {-
32 |
33 | The example we agreed would be:
34 | ```
35 | {
36 | "code": "ERR_4214",
37 | "description": "You did something wrong."
38 | }
39 | ```
40 |
41 | -}
42 |
43 | instance ToJSON DBFail where
44 | toJSON failure@(DbLookupBlockHash _hash) =
45 | object
46 | [ "code" .= String "DbLookupBlockHash"
47 | , "description" .= String (renderLookupFail failure)
48 | ]
49 | toJSON failure@(DbLookupPoolMetadataHash _poolId _poolMDHash) =
50 | object
51 | [ "code" .= String "DbLookupPoolMetadataHash"
52 | , "description" .= String (renderLookupFail failure)
53 | ]
54 | toJSON failure@DbMetaEmpty =
55 | object
56 | [ "code" .= String "DbMetaEmpty"
57 | , "description" .= String (renderLookupFail failure)
58 | ]
59 | toJSON failure@DbMetaMultipleRows =
60 | object
61 | [ "code" .= String "DbMetaMultipleRows"
62 | , "description" .= String (renderLookupFail failure)
63 | ]
64 | toJSON failure@(PoolMetadataHashMismatch) =
65 | object
66 | [ "code" .= String "PoolMetadataHashMismatch"
67 | , "description" .= String (renderLookupFail failure)
68 | ]
69 | toJSON failure@(PoolDelisted) =
70 | object
71 | [ "code" .= String "PoolDelisted"
72 | , "description" .= String (renderLookupFail failure)
73 | ]
74 | toJSON failure@(UnableToEncodePoolMetadataToJSON _err) =
75 | object
76 | [ "code" .= String "UnableToEncodePoolMetadataToJSON"
77 | , "description" .= String (renderLookupFail failure)
78 | ]
79 | toJSON failure@(UnknownError _err) =
80 | object
81 | [ "code" .= String "UnknownError"
82 | , "description" .= String (renderLookupFail failure)
83 | ]
84 | toJSON failure@(ReservedTickerAlreadyInserted _tickerName) =
85 | object
86 | [ "code" .= String "ReservedTickerAlreadyInserted"
87 | , "description" .= String (renderLookupFail failure)
88 | ]
89 | toJSON failure@(RecordDoesNotExist) =
90 | object
91 | [ "code" .= String "RecordDoesNotExist"
92 | , "description" .= String (renderLookupFail failure)
93 | ]
94 | toJSON failure@(DbInsertError _err) =
95 | object
96 | [ "code" .= String "DbInsertError"
97 | , "description" .= String (renderLookupFail failure)
98 | ]
99 |
100 | renderLookupFail :: DBFail -> Text
101 | renderLookupFail lf =
102 | case lf of
103 | DbLookupBlockHash hash -> "The block hash " <> decodeUtf8 hash <> " is missing from the DB."
104 | DbLookupPoolMetadataHash poolId poolMDHash -> "The metadata with hash " <> show poolId <> " for pool " <> show poolMDHash <> " is missing from the DB."
105 | DbMetaEmpty -> "The metadata table is empty!"
106 | DbMetaMultipleRows -> "The metadata table contains multiple rows. Error."
107 | PoolMetadataHashMismatch -> "The pool metadata does not match!"
108 | PoolDelisted -> "The pool has been delisted!"
109 | UnableToEncodePoolMetadataToJSON err -> "Unable to encode the content to JSON. " <> err
110 | UnknownError text -> "Unknown error. Context: " <> text
111 | ReservedTickerAlreadyInserted tickerName -> "Ticker '" <> tickerName <> "' has already been inserted."
112 | RecordDoesNotExist -> "The requested record does not exist."
113 | DbInsertError text -> "The database got an error while trying to insert a record. Error: " <> text
114 |
115 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/HttpClient.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE LambdaCase #-}
2 | {-# LANGUAGE ScopedTypeVariables #-}
3 |
4 | module Cardano.SMASH.HttpClient
5 | ( httpClientFetchPolicies
6 | , httpApiCall
7 | , renderHttpClientError
8 | ) where
9 |
10 | import Cardano.Prelude
11 |
12 | import Control.Monad.Trans.Except.Extra
13 |
14 | import Cardano.SMASH.DBSync.Db.Types (PoolId)
15 |
16 | import Cardano.SMASH.Types (HealthStatus,
17 | PolicyResult (..),
18 | SmashURL (..))
19 | import Data.Aeson (FromJSON, parseJSON)
20 | import Data.Aeson.Types (parseEither)
21 |
22 | import Network.HTTP.Simple (Request, getResponseBody,
23 | getResponseStatusCode,
24 | httpJSONEither,
25 | parseRequestThrow)
26 |
27 | -- |The possible errors for the http client.
28 | data HttpClientError
29 | = HttpClientCannotParseEndpoint !Text
30 | | HttpClientInvalidClientBody
31 | | HttpClientCannotParseJSON !Text
32 | | HttpClientStatusNotOk
33 |
34 | -- |Render the http client error.
35 | renderHttpClientError :: HttpClientError -> Text
36 | renderHttpClientError = \case
37 | HttpClientCannotParseEndpoint endpoint ->
38 | mconcat
39 | [ "Http client cannot parse the '"
40 | , endpoint
41 | , "' endpoint"
42 | ]
43 | HttpClientInvalidClientBody ->
44 | "Http client invalid response body."
45 | HttpClientCannotParseJSON reason ->
46 | mconcat
47 | [ "Http client cannot parse the response JSON - '"
48 | , reason
49 | , "'."
50 | ]
51 | HttpClientStatusNotOk ->
52 | "Http client returned status not ok. Status should be 200."
53 |
54 | -- |Fetch the remote SMASH server policies.
55 | httpClientFetchPolicies :: SmashURL -> IO (Either HttpClientError PolicyResult)
56 | httpClientFetchPolicies smashURL = runExceptT $ do
57 |
58 | -- https://smash.cardano-mainnet.iohk.io
59 | let baseSmashURL = show $ getSmashURL smashURL
60 |
61 | -- TODO(KS): This would be nice.
62 | --let delistedEndpoint = symbolVal (Proxy :: Proxy DelistedPoolsAPI)
63 | --let smashDelistedEndpoint = baseSmashURL <> delistedEndpoint
64 |
65 | let statusEndpoint = baseSmashURL <> "/api/v1/status"
66 | let delistedEndpoint = baseSmashURL <> "/api/v1/delisted"
67 | let reservedTickersEndpoint = baseSmashURL <> "/api/v1/tickers"
68 |
69 | statusRequest <- parseRequestEither statusEndpoint
70 | delistedRequest <- parseRequestEither delistedEndpoint
71 | _reservedTickersRequest <- parseRequestEither reservedTickersEndpoint
72 |
73 | healthStatus :: HealthStatus <- httpApiCall statusRequest
74 | delistedPools :: [PoolId] <- httpApiCall delistedRequest
75 |
76 | -- TODO(KS): Current version doesn't have exposed the tickers endpoint and would fail!
77 | -- uniqueTickers :: [UniqueTicker] <- httpApiCall reservedTickersRequest
78 | uniqueTickers <- pure []
79 |
80 | let policyResult =
81 | PolicyResult
82 | { prSmashURL = smashURL
83 | , prHealthStatus = healthStatus
84 | , prDelistedPools = delistedPools
85 | , prUniqueTickers = uniqueTickers
86 | }
87 |
88 | return policyResult
89 |
90 | -- |A simple HTTP call for remote server.
91 | httpApiCall :: forall a. (FromJSON a) => Request -> ExceptT HttpClientError IO a
92 | httpApiCall request = do
93 |
94 | httpResult <- httpJSONEither request
95 | let httpResponseBody = getResponseBody httpResult
96 |
97 | httpResponse <- firstExceptT (\_ -> HttpClientInvalidClientBody) $ hoistEither httpResponseBody
98 |
99 | let httpStatusCode = getResponseStatusCode httpResult
100 |
101 | when (httpStatusCode /= 200) $
102 | left HttpClientStatusNotOk
103 |
104 | case parseEither parseJSON httpResponse of
105 | Left reason -> left $ HttpClientCannotParseJSON (toS reason)
106 | Right value -> right value
107 |
108 | -- |When any exception occurs, we simply map it to an http client error.
109 | parseRequestEither :: Text -> ExceptT HttpClientError IO Request
110 | parseRequestEither requestEndpoint = do
111 | let parsedRequest = newExceptT (try $ parseRequestThrow (toS requestEndpoint) :: IO (Either SomeException Request))
112 | firstExceptT (\_ -> HttpClientCannotParseEndpoint requestEndpoint) parsedRequest
113 |
114 |
--------------------------------------------------------------------------------
/smash/test/MigrationSpec.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE LambdaCase #-}
2 | {-# LANGUAGE ScopedTypeVariables #-}
3 |
4 | module MigrationSpec
5 | ( migrationSpec
6 | ) where
7 |
8 | import Cardano.Prelude
9 |
10 | import Control.Monad.Trans.Except.Extra (left)
11 | import Data.Time.Clock.POSIX (getPOSIXTime)
12 |
13 | import Test.Hspec (Spec, describe)
14 | import Test.Hspec.QuickCheck (modifyMaxSuccess, prop)
15 | import Test.QuickCheck.Monadic (assert, monadicIO, run)
16 |
17 | import qualified Cardano.BM.Trace as Logging
18 |
19 | import Cardano.SMASH.FetchQueue
20 | import Cardano.SMASH.Offline
21 | import Cardano.SMASH.Types
22 |
23 | import Cardano.SMASH.DB
24 | import Cardano.SMASH.DBSync.Db.Insert (insertPoolMetadataReference)
25 | import Cardano.SMASH.DBSync.Db.Schema (SchemaVersion (..))
26 |
27 | -- | Test spec for smash
28 | -- SMASHPGPASSFILE=config/pgpass-test ./scripts/postgresql-setup.sh --createdb
29 | migrationSpec :: Spec
30 | migrationSpec = do
31 | describe "MigrationSpec" $ do
32 | modifyMaxSuccess (const 10) $ prop "migrations should be run without any issues" $ monadicIO $ do
33 | _ <- run migrationTest
34 | assert True
35 |
36 | describe "FetchQueueSpec" $ do
37 | describe "Retry count" $
38 | prop "retrying again increases the retry count" $ \(initialCount :: Word) -> monadicIO $ do
39 |
40 | -- Probably switch to @DataLayer@
41 | poolMetadataRefIdE <- run $ runDbNoLogging $ insertPoolMetadataReference $ PoolMetadataReference (PoolId "1") (PoolUrl "http://test.com") (PoolMetadataHash "hash")
42 |
43 | poolMetadataRefId <- case poolMetadataRefIdE of
44 | Left err -> panic $ show err
45 | Right id -> return id
46 |
47 | timeNow <- run $ getPOSIXTime --secondsToNominalDiffTime timeNowInt
48 |
49 | let retry' =
50 | Retry
51 | { fetchTime = timeNow
52 | , retryTime = timeNow + 60
53 | , retryCount = initialCount
54 | }
55 |
56 | let poolFetchRetry =
57 | PoolFetchRetry
58 | { pfrReferenceId = poolMetadataRefId
59 | , pfrPoolIdWtf = PoolId "1"
60 | , pfrPoolUrl = PoolUrl "http://test.com"
61 | , pfrPoolMDHash = PoolMetadataHash "hash"
62 | , pfrRetry = retry'
63 | }
64 |
65 | let dataLayer = postgresqlDataLayer Nothing
66 |
67 | let fetchInsert = \_ _ _ _ -> left $ FEIOException "Dunno"
68 |
69 | --print $ showRetryTimes (pfrRetry poolFetchRetry)
70 |
71 | poolFetchRetry1 <- run $ fetchInsertNewPoolMetadataOld dataLayer Logging.nullTracer fetchInsert poolFetchRetry
72 | --print $ showRetryTimes (pfrRetry poolFetchRetry1)
73 |
74 | poolFetchRetry2 <- run $ fetchInsertNewPoolMetadataOld dataLayer Logging.nullTracer fetchInsert poolFetchRetry1
75 | --print $ showRetryTimes (pfrRetry poolFetchRetry2)
76 |
77 | poolFetchRetry3 <- run $ fetchInsertNewPoolMetadataOld dataLayer Logging.nullTracer fetchInsert poolFetchRetry2
78 | --print $ showRetryTimes (pfrRetry poolFetchRetry3)
79 |
80 | let newRetryCount = retryCount (pfrRetry poolFetchRetry3)
81 |
82 | assert $ newRetryCount == initialCount + 3
83 |
84 |
85 | -- Really just make sure that the migrations do actually run correctly.
86 | -- If they fail the file path of the log file (in /tmp) will be printed.
87 | migrationTest :: IO ()
88 | migrationTest = do
89 | let schemaDir = SmashMigrationDir "../schema"
90 | runMigrations Logging.nullTracer (\x -> x) schemaDir (Just $ SmashLogFileDir "/tmp")
91 |
92 | -- TODO(KS): This version HAS to be changed manually so we don't mess up the
93 | -- migration.
94 | let expected = SchemaVersion 1 9 0
95 | actual <- getDbSchemaVersion
96 | unless (expected == actual) $
97 | panic $ mconcat
98 | [ "Schema version mismatch. Expected "
99 | , showSchemaVersion expected
100 | , " but got "
101 | , showSchemaVersion actual
102 | , "."
103 | ]
104 |
105 | getDbSchemaVersion :: IO SchemaVersion
106 | getDbSchemaVersion =
107 | runDbNoLogging $
108 | fromMaybe (panic "getDbSchemaVersion: Nothing") <$> querySchemaVersion
109 |
110 | showSchemaVersion :: SchemaVersion -> Text
111 | showSchemaVersion (SchemaVersion a b c) =
112 | toS $ intercalate "." [show a, show b, show c]
113 |
114 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Insert.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE FlexibleContexts #-}
2 | {-# LANGUAGE TypeFamilies #-}
3 |
4 | module Cardano.SMASH.DBSync.Db.Insert
5 | ( insertBlock
6 | , insertMeta
7 | , insertPool
8 | , insertPoolMetadata
9 | , insertPoolMetadataReference
10 | , insertReservedTicker
11 | , insertDelistedPool
12 | , insertRetiredPool
13 | , insertAdminUser
14 | , insertPoolMetadataFetchError
15 |
16 | -- Export mainly for testing.
17 | , insertByReturnKey
18 | ) where
19 |
20 | import Cardano.Prelude hiding (Meta, replace)
21 |
22 | import Control.Monad.Trans.Reader (mapReaderT)
23 |
24 | import Database.Persist.Class (AtLeastOneUniqueKey,
25 | PersistEntityBackend,
26 | checkUnique, getByValue,
27 | insert)
28 | import Database.Persist.Sql (SqlBackend)
29 | import Database.Persist.Types (entityKey)
30 | import Database.PostgreSQL.Simple (SqlError)
31 |
32 | import Cardano.SMASH.DBSync.Db.Error
33 | import Cardano.SMASH.DBSync.Db.Schema
34 |
35 | insertBlock :: (MonadIO m) => Block -> ReaderT SqlBackend m (Either DBFail BlockId)
36 | insertBlock = insertByReturnKey
37 |
38 | insertMeta :: (MonadIO m) => Meta -> ReaderT SqlBackend m (Either DBFail MetaId)
39 | insertMeta meta = insertByReturnKey meta
40 |
41 | insertPool :: (MonadIO m) => Pool -> ReaderT SqlBackend m (Either DBFail PoolId)
42 | insertPool pool = insertByReturnKey pool
43 |
44 | insertPoolMetadata :: (MonadIO m) => PoolMetadata -> ReaderT SqlBackend m (Either DBFail PoolMetadataId)
45 | insertPoolMetadata = insertByReturnKey
46 |
47 | insertPoolMetadataReference
48 | :: MonadIO m
49 | => PoolMetadataReference
50 | -> ReaderT SqlBackend m (Either DBFail PoolMetadataReferenceId)
51 | insertPoolMetadataReference = insertByReturnKey
52 |
53 | insertReservedTicker :: (MonadIO m) => ReservedTicker -> ReaderT SqlBackend m (Either DBFail ReservedTickerId)
54 | insertReservedTicker reservedTicker = do
55 | isUnique <- checkUnique reservedTicker
56 | -- If there is no unique constraint violated, insert, otherwise return error.
57 | case isUnique of
58 | Nothing -> insertByReturnKey reservedTicker
59 | Just _key -> return . Left . ReservedTickerAlreadyInserted $ show reservedTicker
60 |
61 | insertDelistedPool :: (MonadIO m) => DelistedPool -> ReaderT SqlBackend m (Either DBFail DelistedPoolId)
62 | insertDelistedPool delistedPool = do
63 | isUnique <- checkUnique delistedPool
64 | -- If there is no unique constraint violated, insert, otherwise return error.
65 | case isUnique of
66 | Nothing -> insertByReturnKey delistedPool
67 | Just _key -> return . Left . DbInsertError $ "Delisted pool already exists!"
68 |
69 | insertRetiredPool :: (MonadIO m) => RetiredPool -> ReaderT SqlBackend m (Either DBFail RetiredPoolId)
70 | insertRetiredPool = insertByReturnKey
71 |
72 | insertAdminUser :: (MonadIO m) => AdminUser -> ReaderT SqlBackend m (Either DBFail AdminUserId)
73 | insertAdminUser adminUser = do
74 | isUnique <- checkUnique adminUser
75 | -- If there is no unique constraint violated, insert, otherwise return error.
76 | case isUnique of
77 | Nothing -> insertByReturnKey adminUser
78 | Just _key -> return . Left . DbInsertError $ "Admin user already exists!"
79 |
80 | insertPoolMetadataFetchError
81 | :: (MonadIO m)
82 | => PoolMetadataFetchError
83 | -> ReaderT SqlBackend m (Either DBFail PoolMetadataFetchErrorId)
84 | insertPoolMetadataFetchError pmfe = do
85 | isUnique <- checkUnique pmfe
86 | -- If there is no unique constraint violated, insert, otherwise delete and insert.
87 | case isUnique of
88 | Nothing -> insertByReturnKey pmfe
89 | Just _key -> return . Left . DbInsertError $ "Pool metadata fetch error already exists!"
90 |
91 | -------------------------------------------------------------------------------
92 |
93 | -- | Insert a record (with a Unique constraint), and return 'Right key' if the
94 | -- record is inserted and 'Left key' if the record already exists in the DB.
95 | -- TODO(KS): This needs to be tested, not sure if it's actually working.
96 | insertByReturnKey
97 | :: ( AtLeastOneUniqueKey record
98 | , MonadIO m
99 | , PersistEntityBackend record ~ SqlBackend
100 | )
101 | => record -> ReaderT SqlBackend m (Either DBFail (Key record))
102 | insertByReturnKey value = do
103 | res <- getByValue value
104 | case res of
105 | -- handle :: Exception e => (e -> IO a) -> IO a -> IO a
106 | Nothing -> mapReaderT (\insertedValue -> liftIO $ handle exceptionHandler insertedValue) (Right <$> insert value)
107 | Just r -> pure . Right $ entityKey r
108 | where
109 | exceptionHandler :: MonadIO m => SqlError -> m (Either DBFail a)
110 | exceptionHandler e =
111 | liftIO . pure . Left . DbInsertError . show $ e
112 |
113 |
--------------------------------------------------------------------------------
/scripts/postgresql-setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Unoffiical bash strict mode.
4 | # See: http://redsymbol.net/articles/unofficial-bash-strict-mode/
5 | set -u
6 | set -o pipefail
7 | IFS=$'\n\t'
8 |
9 | progname="$0"
10 |
11 |
12 | function die {
13 | echo "$1"
14 | exit 1
15 | }
16 |
17 | function check_pgpass_file {
18 | if test -z ${SMASHPGPASSFILE+x} ; then
19 | echo "Error: The SMASHPGPASSFILE env var should be set to the location of the pgpass file."
20 | echo
21 | echo "Eg for mainnet:"
22 | echo "export SMASHPGPASSFILE=$(pwd)/config/pgpass"
23 | echo
24 | exit 1
25 | fi
26 |
27 | if test ! -f "${SMASHPGPASSFILE}" ; then
28 | echo "Error: PostgeSQL password file ${SMASHPGPASSFILE} does not exist."
29 | exit 1
30 | fi
31 |
32 | databasename=$(cut -d ":" -f 3 "${SMASHPGPASSFILE}")
33 | export databasename
34 | }
35 |
36 | function check_for_psql {
37 | # Make sure we have the psql executable.
38 | psql -V > /dev/null 2>&1 || die "Error : Missing 'psql' executable!"
39 | }
40 |
41 | function check_psql_superuser {
42 | user="$(whoami)"
43 | set +e
44 | psql -l > /dev/null 2>&1
45 | if test $? -ne 0 ; then
46 | echo
47 | echo "Error : User '$user' can't access postgres."
48 | echo
49 | echo "To fix this, log into the postgres account and run:"
50 | echo " createuser --createdb --superuser $user"
51 | echo
52 | exit 1
53 | fi
54 | set -e
55 | }
56 |
57 | function check_connect_as_user {
58 | psql "${databasename}" --no-password --command='\dt' > /dev/null
59 | if test $? -ne 0 ; then
60 | echo
61 | echo "Error : Not able to connect as '$(whoami)' user."
62 | echo
63 | exit 1
64 | fi
65 | }
66 |
67 | function check_db_exists {
68 | set +e
69 | count=$(psql -l | grep -c "${databasename} ")
70 | if test "${count}" -lt 1 ; then
71 | echo
72 | echo "Error : No '${databasename}' database."
73 | echo
74 | echo "To create one run:"
75 | echo " $progname --createdb"
76 | echo
77 | exit 1
78 | fi
79 | count=$(psql -l | grep "${databasename} " | cut -d \| -f 3 | grep -c UTF8)
80 | if test "${count}" -ne 1 ; then
81 | echo
82 | echo "Error : '${databasename}' database exists, but is not UTF8."
83 | echo
84 | echo "To fix this you should drop the current one and create a new one using:"
85 | echo " $progname --dropdb"
86 | echo " $progname --createdb"
87 | echo
88 | exit 1
89 | fi
90 | set -e
91 | }
92 |
93 | function create_db {
94 | createdb -T template0 --owner="$(whoami)" --encoding=UTF8 "${databasename}"
95 | }
96 |
97 | function drop_db {
98 | dropdb --if-exists "${databasename}"
99 | }
100 |
101 | function list_views {
102 | psql "${databasename}" \
103 | --command="select table_name from information_schema.views where table_catalog = '${databasename}' and table_schema = 'public' ;"
104 | }
105 |
106 | function create_migration {
107 | echo "To create a migration:"
108 | echo "cabal run create-migration --mdir schema/"
109 | exit 0
110 | }
111 |
112 | function run_migrations {
113 | echo "To run migrations:"
114 | echo "cabal run cardano-db-tool run-migrations --mdir schema/ --ldir ."
115 | echo "You probably do not need to do this."
116 | exit 0
117 | }
118 |
119 | function dump_schema {
120 | pg_dump -s "${databasename}"
121 | }
122 |
123 | function usage_exit {
124 | echo
125 | echo "Usage:"
126 | echo " $progname --check - Check database exists and is set up correctly."
127 | echo " $progname --createdb - Create database."
128 | echo " $progname --dropdb - Drop database."
129 | echo " $progname --list-views - List the currently definied views."
130 | echo " $progname --recreatedb - Drop and recreate database."
131 | echo " $progname --create-user - Create database user (from config/pgass file)."
132 | echo " $progname --create-migration - Create a migration (if one is needed)."
133 | echo " $progname --run-migrations - Run all migrations applying as needed."
134 | echo " $progname --dump-schema - Dump the schema of the database."
135 | echo
136 | exit 0
137 | }
138 |
139 | # postgresql_version=$(psql -V | head -1 | sed -e "s/.* //;s/\.[0-9]*$//")
140 |
141 | set -e
142 |
143 | case "${1:-""}" in
144 | --check)
145 | check_pgpass_file
146 | check_for_psql
147 | check_psql_superuser
148 | check_db_exists
149 | check_connect_as_user
150 | ;;
151 | --createdb)
152 | check_pgpass_file
153 | check_for_psql
154 | check_psql_superuser
155 | create_db
156 | ;;
157 | --dropdb)
158 | check_pgpass_file
159 | check_for_psql
160 | check_psql_superuser
161 | drop_db
162 | ;;
163 | --list-views)
164 | check_pgpass_file
165 | check_for_psql
166 | check_psql_superuser
167 | check_db_exists
168 | check_connect_as_user
169 | list_views
170 | ;;
171 | --recreatedb)
172 | check_pgpass_file
173 | check_for_psql
174 | check_psql_superuser
175 | check_db_exists
176 | check_connect_as_user
177 | drop_db
178 | create_db
179 | echo "The database ${databasename} has been dropped and recreated."
180 | echo "The tables will be recreated when the application is run."
181 | exit 0
182 | ;;
183 | --create-user)
184 | check_pgpass_file
185 | check_for_psql
186 | check_psql_superuser
187 | create_user
188 | ;;
189 | --create-migration)
190 | create_migration
191 | ;;
192 | --run-migrations)
193 | run_migrations
194 | ;;
195 | --dump-schema)
196 | check_pgpass_file
197 | check_db_exists
198 | dump_schema
199 | ;;
200 | *)
201 | usage_exit
202 | ;;
203 | esac
204 |
205 | echo "All good!"
206 | exit 0
207 |
--------------------------------------------------------------------------------
/nix/sources.nix:
--------------------------------------------------------------------------------
1 | # This file has been generated by Niv.
2 |
3 | let
4 |
5 | #
6 | # The fetchers. fetch_ fetches specs of type .
7 | #
8 |
9 | fetch_file = pkgs: spec:
10 | if spec.builtin or true then
11 | builtins_fetchurl { inherit (spec) url sha256; }
12 | else
13 | pkgs.fetchurl { inherit (spec) url sha256; };
14 |
15 | fetch_tarball = pkgs: name: spec:
16 | let
17 | ok = str: ! builtins.isNull (builtins.match "[a-zA-Z0-9+-._?=]" str);
18 | # sanitize the name, though nix will still fail if name starts with period
19 | name' = stringAsChars (x: if ! ok x then "-" else x) "${name}-src";
20 | in
21 | if spec.builtin or true then
22 | builtins_fetchTarball { name = name'; inherit (spec) url sha256; }
23 | else
24 | pkgs.fetchzip { name = name'; inherit (spec) url sha256; };
25 |
26 | fetch_git = spec:
27 | builtins.fetchGit { url = spec.repo; inherit (spec) rev ref; };
28 |
29 | fetch_local = spec: spec.path;
30 |
31 | fetch_builtin-tarball = name: throw
32 | ''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`.
33 | $ niv modify ${name} -a type=tarball -a builtin=true'';
34 |
35 | fetch_builtin-url = name: throw
36 | ''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`.
37 | $ niv modify ${name} -a type=file -a builtin=true'';
38 |
39 | #
40 | # Various helpers
41 | #
42 |
43 | # The set of packages used when specs are fetched using non-builtins.
44 | mkPkgs = sources:
45 | let
46 | sourcesNixpkgs =
47 | import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) {};
48 | hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath;
49 | hasThisAsNixpkgsPath = == ./.;
50 | in
51 | if builtins.hasAttr "nixpkgs" sources
52 | then sourcesNixpkgs
53 | else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then
54 | import {}
55 | else
56 | abort
57 | ''
58 | Please specify either (through -I or NIX_PATH=nixpkgs=...) or
59 | add a package called "nixpkgs" to your sources.json.
60 | '';
61 |
62 | # The actual fetching function.
63 | fetch = pkgs: name: spec:
64 |
65 | if ! builtins.hasAttr "type" spec then
66 | abort "ERROR: niv spec ${name} does not have a 'type' attribute"
67 | else if spec.type == "file" then fetch_file pkgs spec
68 | else if spec.type == "tarball" then fetch_tarball pkgs name spec
69 | else if spec.type == "git" then fetch_git spec
70 | else if spec.type == "local" then fetch_local spec
71 | else if spec.type == "builtin-tarball" then fetch_builtin-tarball name
72 | else if spec.type == "builtin-url" then fetch_builtin-url name
73 | else
74 | abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}";
75 |
76 | # Ports of functions for older nix versions
77 |
78 | # a Nix version of mapAttrs if the built-in doesn't exist
79 | mapAttrs = builtins.mapAttrs or (
80 | f: set: with builtins;
81 | listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set))
82 | );
83 |
84 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295
85 | range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1);
86 |
87 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257
88 | stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1));
89 |
90 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269
91 | stringAsChars = f: s: concatStrings (map f (stringToCharacters s));
92 | concatStrings = builtins.concatStringsSep "";
93 |
94 | # fetchTarball version that is compatible between all the versions of Nix
95 | builtins_fetchTarball = { url, name, sha256 }@attrs:
96 | let
97 | inherit (builtins) lessThan nixVersion fetchTarball;
98 | in
99 | if lessThan nixVersion "1.12" then
100 | fetchTarball { inherit name url; }
101 | else
102 | fetchTarball attrs;
103 |
104 | # fetchurl version that is compatible between all the versions of Nix
105 | builtins_fetchurl = { url, sha256 }@attrs:
106 | let
107 | inherit (builtins) lessThan nixVersion fetchurl;
108 | in
109 | if lessThan nixVersion "1.12" then
110 | fetchurl { inherit url; }
111 | else
112 | fetchurl attrs;
113 |
114 | # Create the final "sources" from the config
115 | mkSources = config:
116 | mapAttrs (
117 | name: spec:
118 | if builtins.hasAttr "outPath" spec
119 | then abort
120 | "The values in sources.json should not have an 'outPath' attribute"
121 | else
122 | spec // { outPath = fetch config.pkgs name spec; }
123 | ) config.sources;
124 |
125 | # The "config" used by the fetchers
126 | mkConfig =
127 | { sourcesFile ? ./sources.json
128 | , sources ? builtins.fromJSON (builtins.readFile sourcesFile)
129 | , pkgs ? mkPkgs sources
130 | }: rec {
131 | # The sources, i.e. the attribute set of spec name to spec
132 | inherit sources;
133 |
134 | # The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers
135 | inherit pkgs;
136 | };
137 | in
138 | mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); }
139 |
--------------------------------------------------------------------------------
/nix/nixos/smash-service.nix:
--------------------------------------------------------------------------------
1 | { config, lib, pkgs, ... }:
2 |
3 | let
4 | cfg = config.services.smash;
5 | inherit (cfg.smashPkgs) smashHaskellPackages smashTestingHaskellPackages iohkNix;
6 | smashConfig = cfg.explorerConfig // {
7 | inherit (cfg.nodeConfig) ByronGenesisFile ShelleyGenesisFile ByronGenesisHash ShelleyGenesisHash Protocol RequiresNetworkMagic;
8 | };
9 | configFile = __toFile "config.json" (__toJSON (smashConfig // cfg.logConfig));
10 | in {
11 |
12 | options = {
13 | services.smash = {
14 | enable = lib.mkEnableOption "enable the smash server";
15 | script = lib.mkOption {
16 | internal = true;
17 | type = lib.types.package;
18 | };
19 | smashPkgs = lib.mkOption {
20 | type = lib.types.attrs;
21 | default = import ../. {};
22 | defaultText = "smash pkgs";
23 | description = ''
24 | The smash packages and library that should be used.
25 | '';
26 | internal = true;
27 | };
28 | testing-mode = lib.mkOption {
29 | type = lib.types.bool;
30 | default = false;
31 | description = "enable testing APIs";
32 | };
33 | package = lib.mkOption {
34 | type = lib.types.package;
35 | default = if cfg.testing-mode
36 | then smashTestingHaskellPackages.smash.components.exes.smash-exe
37 | else smashHaskellPackages.smash.components.exes.smash-exe;
38 | };
39 | explorerConfig = lib.mkOption {
40 | type = lib.types.attrs;
41 | default = cfg.environment.explorerConfig;
42 | };
43 | nodeConfig = lib.mkOption {
44 | type = lib.types.attrs;
45 | default = cfg.environment.nodeConfig;
46 | };
47 | environment = lib.mkOption {
48 | type = lib.types.nullOr lib.types.attrs;
49 | default = iohkNix.cardanoLib.environments.${cfg.environmentName};
50 | };
51 | logConfig = lib.mkOption {
52 | type = lib.types.attrs;
53 | default = iohkNix.cardanoLib.defaultExplorerLogConfig;
54 | };
55 | environmentName = lib.mkOption {
56 | type = lib.types.str;
57 | description = "environment name";
58 | };
59 | socketPath = lib.mkOption {
60 | type = lib.types.nullOr lib.types.path;
61 | default = null;
62 | };
63 | user = lib.mkOption {
64 | type = lib.types.str;
65 | default = "smash";
66 | description = "the user to run as";
67 | };
68 | postgres = {
69 | generatePGPASS = lib.mkOption {
70 | type = lib.types.bool;
71 | default = true;
72 | description = "generate pgpass";
73 | };
74 |
75 | pgpass = lib.mkOption {
76 | type = lib.types.path;
77 | default = builtins.toFile "pgpass" "${cfg.postgres.socketdir}:${toString cfg.postgres.port}:${cfg.postgres.database}:${cfg.postgres.user}:*";
78 | };
79 |
80 | socketdir = lib.mkOption {
81 | type = lib.types.str;
82 | default = "/run/postgresql";
83 | description = "the path to the postgresql socket";
84 | };
85 | port = lib.mkOption {
86 | type = lib.types.int;
87 | default = 5432;
88 | description = "the postgresql port";
89 | };
90 | database = lib.mkOption {
91 | type = lib.types.str;
92 | default = cfg.postgres.user;
93 | description = "the postgresql database to use";
94 | };
95 | user = lib.mkOption {
96 | type = lib.types.str;
97 | default = cfg.user;
98 | description = "the postgresql user to use";
99 | };
100 | };
101 | };
102 | };
103 | config = lib.mkIf cfg.enable {
104 | services.smash.script = let
105 | in pkgs.writeShellScript "smash" ''
106 | set -euo pipefail
107 |
108 | RUNTIME_DIRECTORY=''${RUNTIME_DIRECTORY:-$(pwd)}
109 | ${if (cfg.socketPath == null) then ''if [ -z ''${CARDANO_NODE_SOCKET_PATH:-} ]
110 | then
111 | echo "You must set \$CARDANO_NODE_SOCKET_PATH"
112 | exit 1
113 | fi'' else "export CARDANO_NODE_SOCKET_PATH=\"${cfg.socketPath}\""}
114 |
115 | ${lib.optionalString cfg.postgres.generatePGPASS ''
116 | cp ${cfg.postgres.pgpass} /$RUNTIME_DIRECTORY/pgpass
117 | chmod 0600 $RUNTIME_DIRECTORY/pgpass
118 | export SMASHPGPASSFILE=/$RUNTIME_DIRECTORY/pgpass
119 | ''}
120 |
121 | if [ -f $STATE_DIRECTORY/force-resync ]; then
122 | echo "Preparing DB for full-resync"
123 | ${cfg.package}/bin/smash-exe force-resync --config ${configFile} --mdir ${../../schema}
124 | rm $STATE_DIRECTORY/force-resync
125 | fi
126 |
127 | ${cfg.package}/bin/smash-exe run-migrations --config ${configFile} --mdir ${../../schema}
128 | exec ${cfg.package}/bin/smash-exe run-app-with-db-sync \
129 | --config ${configFile} \
130 | --socket-path "$CARDANO_NODE_SOCKET_PATH" \
131 | --schema-dir ${../../schema} \
132 | --state-dir $STATE_DIRECTORY
133 | '';
134 | environment.systemPackages = [ cfg.package config.services.postgresql.package ];
135 | systemd.services.smash = {
136 | path = [ cfg.package pkgs.netcat pkgs.postgresql ];
137 | preStart = ''
138 | for x in {1..60}; do
139 | nc -z localhost ${toString config.services.smash.postgres.port} && break
140 | echo loop $x: waiting for postgresql 2 sec...
141 | sleep 2
142 | done
143 | sleep 1
144 | '';
145 | serviceConfig = {
146 | ExecStart = config.services.smash.script;
147 | DynamicUser = true;
148 | RuntimeDirectory = "smash";
149 | StateDirectory = "smash";
150 | };
151 |
152 | wantedBy = [ "multi-user.target" ];
153 | after = [ "postgres.service" "cardano-node.service" ];
154 | requires = [ "postgresql.service" ];
155 | };
156 | };
157 | }
158 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Schema.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE ConstraintKinds #-}
2 | {-# LANGUAGE DataKinds #-}
3 | {-# LANGUAGE DeriveDataTypeable #-}
4 | {-# LANGUAGE DeriveGeneric #-}
5 | {-# LANGUAGE DerivingStrategies #-}
6 | {-# LANGUAGE FlexibleContexts #-}
7 | {-# LANGUAGE FlexibleInstances #-}
8 | {-# LANGUAGE GADTs #-}
9 | {-# LANGUAGE GeneralizedNewtypeDeriving #-}
10 | {-# LANGUAGE MultiParamTypeClasses #-}
11 | {-# LANGUAGE OverloadedStrings #-}
12 | {-# LANGUAGE QuasiQuotes #-}
13 | {-# LANGUAGE StandaloneDeriving #-}
14 | {-# LANGUAGE TemplateHaskell #-}
15 | {-# LANGUAGE TypeFamilies #-}
16 | {-# LANGUAGE UndecidableInstances #-}
17 |
18 | module Cardano.SMASH.DBSync.Db.Schema where
19 |
20 | import Cardano.Prelude hiding (Meta)
21 |
22 | import Data.Time.Clock (UTCTime)
23 |
24 | -- Do not use explicit imports from this module as the imports can change
25 | -- from version to version due to changes to the TH code in Persistent.
26 | import Database.Persist.TH
27 |
28 | import qualified Cardano.SMASH.DBSync.Db.Types as Types
29 |
30 |
31 | -- In the schema definition we need to match Haskell types with with the
32 | -- custom type defined in PostgreSQL (via 'DOMAIN' statements). For the
33 | -- time being the Haskell types will be simple Haskell types like
34 | -- 'ByteString' and 'Word64'.
35 |
36 | -- We use camelCase here in the Haskell schema definition and 'persistLowerCase'
37 | -- specifies that all the table and column names are converted to lower snake case.
38 |
39 | share
40 | [ mkPersist sqlSettings
41 | , mkMigrate "migrateCardanoDb"
42 | ]
43 | [persistLowerCase|
44 |
45 | -- Schema versioning has three stages to best allow handling of schema migrations.
46 | -- Stage 1: Set up PostgreSQL data types (using SQL 'DOMAIN' statements).
47 | -- Stage 2: Persistent generated migrations.
48 | -- Stage 3: Set up 'VIEW' tables (for use by other languages and applications).
49 | -- This table should have a single row.
50 | SchemaVersion
51 | stageOne Int
52 | stageTwo Int
53 | stageThree Int
54 | deriving Eq
55 |
56 | -- The table containing pools' on-chain reference to its off-chain metadata.
57 |
58 | PoolMetadataReference
59 | poolId Types.PoolId sqltype=text
60 | url Types.PoolUrl sqltype=text
61 | hash Types.PoolMetadataHash sqltype=text
62 | UniquePoolMetadataReference poolId hash
63 |
64 | -- The table containing the metadata.
65 |
66 | PoolMetadata
67 | poolId Types.PoolId sqltype=text
68 | tickerName Types.TickerName sqltype=text
69 | hash Types.PoolMetadataHash sqltype=text
70 | metadata Types.PoolMetadataRaw sqltype=text
71 | pmrId PoolMetadataReferenceId Maybe OnDeleteCascade
72 | UniquePoolMetadata poolId hash
73 |
74 | -- The pools themselves (identified by the owner vkey hash)
75 |
76 | Pool
77 | poolId Types.PoolId sqltype=text
78 | UniquePoolId poolId
79 |
80 | -- The retired pools.
81 |
82 | RetiredPool
83 | poolId Types.PoolId sqltype=text
84 | blockNo Word64 sqltype=uinteger -- When the pool was retired.
85 | UniqueRetiredPoolId poolId
86 |
87 | -- The pool metadata fetch error. We duplicate the poolId for easy access.
88 | -- TODO(KS): Debatable whether we need to persist this between migrations!
89 |
90 | PoolMetadataFetchError
91 | fetchTime UTCTime sqltype=timestamp
92 | poolId Types.PoolId sqltype=text
93 | poolHash Types.PoolMetadataHash sqltype=text
94 | pmrId PoolMetadataReferenceId OnDeleteCascade
95 | fetchError Text
96 | retryCount Word sqltype=uinteger
97 | UniquePoolMetadataFetchError fetchTime poolId poolHash retryCount
98 |
99 | -- We actually need the block table to be able to persist sync data
100 |
101 | Block
102 | hash ByteString sqltype=hash32type
103 | epochNo Word64 Maybe sqltype=uinteger
104 | slotNo Word64 Maybe sqltype=uinteger
105 | blockNo Word64 Maybe sqltype=uinteger
106 | UniqueBlock hash
107 | deriving Show
108 |
109 | -- A table containing metadata about the chain. There will probably only ever be one
110 | -- row in this table.
111 | -- TODO(KS): This can be left alone when migration occurs since it should be the same!
112 | Meta
113 | startTime UTCTime sqltype=timestamp
114 | networkName Text Maybe
115 | UniqueMeta startTime
116 | deriving Show
117 |
118 | --------------------------------------------------------------------------
119 | -- Tables below should be preserved when migration occurs!
120 | --------------------------------------------------------------------------
121 |
122 | -- A table containing a list of delisted pools.
123 | DelistedPool
124 | poolId Types.PoolId sqltype=text
125 | UniqueDelistedPool poolId
126 |
127 | -- A table containing a managed list of reserved ticker names.
128 | -- For now they are grouped under the specific hash of the pool.
129 | ReservedTicker
130 | name Types.TickerName sqltype=text
131 | poolHash Types.PoolMetadataHash sqltype=text
132 | UniqueReservedTicker name
133 | deriving Show
134 |
135 | -- A table containin a list of administrator users that can be used to access the secure API endpoints.
136 | -- Yes, we don't have any hash check mechanisms here, if they get to the database, game over anyway.
137 | AdminUser
138 | username Text
139 | password Text
140 | UniqueAdminUser username
141 | deriving Show
142 |
143 | |]
144 |
145 |
--------------------------------------------------------------------------------
/smash-servant-types/src/Cardano/SMASH/DBSync/Db/Types.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE DeriveGeneric #-}
2 | {-# LANGUAGE DerivingStrategies #-}
3 | {-# LANGUAGE DerivingVia #-}
4 | {-# LANGUAGE GeneralisedNewtypeDeriving #-}
5 | {-# LANGUAGE LambdaCase #-}
6 |
7 | module Cardano.SMASH.DBSync.Db.Types where
8 |
9 | import Cardano.Prelude
10 |
11 | import Control.Monad.Fail (fail)
12 |
13 | import Data.Aeson (FromJSON (..), ToJSON (..), object,
14 | withObject, (.:), (.=))
15 | import Database.Persist.Class
16 |
17 | import Cardano.Api (AsType (..), Hash,
18 | deserialiseFromBech32,
19 | deserialiseFromRawBytesHex,
20 | serialiseToRawBytes)
21 | import Cardano.Api.Shelley (StakePoolKey)
22 | import qualified Data.ByteString.Base16 as B16
23 | import qualified Data.ByteString.Char8 as BSC
24 |
25 | import Quiet (Quiet (..))
26 |
27 | -- | The stake pool identifier. It is the hash of the stake pool operator's
28 | -- vkey.
29 | --
30 | -- It may be rendered as hex or as bech32 using the @pool@ prefix.
31 | --
32 | newtype PoolId = PoolId { getPoolId :: Text }
33 | deriving stock (Eq, Ord, Generic)
34 | deriving Show via (Quiet PoolId)
35 | deriving newtype PersistField
36 |
37 | instance ToJSON PoolId where
38 | toJSON (PoolId poolId) =
39 | object
40 | [ "poolId" .= poolId
41 | ]
42 |
43 | instance FromJSON PoolId where
44 | parseJSON = withObject "PoolId" $ \o -> do
45 | poolId <- o .: "poolId"
46 | case parsePoolId poolId of
47 | Left err -> fail $ toS err
48 | Right poolId' -> return poolId'
49 |
50 | -- Currently deserializing from safe types, unwrapping and wrapping it up again.
51 | -- The underlying DB representation is HEX.
52 | --
53 | -- pool ids as key hashes and so use the "address hash" size, which is 28 bytes, and hence a hex encoding of that is 2*28 = 56
54 | parsePoolId :: Text -> Either Text PoolId
55 | parsePoolId poolId =
56 | case pBech32OrHexStakePoolId poolId of
57 | Nothing -> Left "Unable to parse pool id. Wrong format."
58 | Just poolId' -> Right . PoolId . decodeUtf8 . B16.encode . serialiseToRawBytes $ poolId'
59 |
60 | where
61 | -- bech32 pool <<< e5cb8a89cabad2cb22ea85423bcbbe270f292be3dbe838948456d3ae
62 | -- bech32 <<< pool1uh9c4zw2htfvkgh2s4prhja7yu8jj2lrm05r39yy2mf6uqqegn6
63 | pBech32OrHexStakePoolId :: Text -> Maybe (Hash StakePoolKey)
64 | pBech32OrHexStakePoolId str = pBech32StakePoolId str <|> pHexStakePoolId str
65 |
66 | -- e5cb8a89cabad2cb22ea85423bcbbe270f292be3dbe838948456d3ae
67 | pHexStakePoolId :: Text -> Maybe (Hash StakePoolKey)
68 | pHexStakePoolId =
69 | deserialiseFromRawBytesHex (AsHash AsStakePoolKey) . BSC.pack . toS
70 |
71 | -- pool1uh9c4zw2htfvkgh2s4prhja7yu8jj2lrm05r39yy2mf6uqqegn6
72 | pBech32StakePoolId :: Text -> Maybe (Hash StakePoolKey)
73 | pBech32StakePoolId =
74 | either (const Nothing) Just
75 | . deserialiseFromBech32 (AsHash AsStakePoolKey)
76 |
77 | -- | The hash of a stake pool's metadata.
78 | --
79 | -- It may be rendered as hex.
80 | --
81 | newtype PoolMetadataHash = PoolMetadataHash { getPoolMetadataHash :: Text }
82 | deriving stock (Eq, Ord, Generic)
83 | deriving Show via (Quiet PoolMetadataHash)
84 | deriving newtype PersistField
85 |
86 | instance ToJSON PoolMetadataHash where
87 | toJSON (PoolMetadataHash poolHash) =
88 | object
89 | [ "poolHash" .= poolHash
90 | ]
91 |
92 | -- The validation of @PoolMetadataHash@ is a bit more involved and would require
93 | -- an analysis with some bounds on the size.
94 | instance FromJSON PoolMetadataHash where
95 | parseJSON = withObject "PoolMetadataHash" $ \o -> do
96 | poolHash <- o .: "poolHash"
97 | return $ PoolMetadataHash poolHash
98 |
99 | -- Converting the basic type to a strong one.
100 | -- Presumes the user knows what he is doing, NOT TYPE SAFE!
101 | bytestringToPoolMetaHash :: ByteString -> PoolMetadataHash
102 | bytestringToPoolMetaHash bs = PoolMetadataHash . decodeUtf8 . B16.encode $ bs
103 |
104 | -- | The stake pool metadata. It is JSON format. This type represents it in
105 | -- its raw original form. The hash of this content is the 'PoolMetadataHash'.
106 | newtype PoolMetadataRaw = PoolMetadataRaw { getPoolMetadata :: Text }
107 | deriving stock (Eq, Show, Ord, Generic)
108 | deriving newtype PersistField
109 |
110 | -- | The pool url wrapper so we have some additional safety.
111 | newtype PoolUrl = PoolUrl { getPoolUrl :: Text }
112 | deriving stock (Eq, Ord, Generic)
113 | deriving Show via (Quiet PoolUrl)
114 | deriving newtype PersistField
115 |
116 | -- | The ticker name wrapper so we have some additional safety.
117 | newtype TickerName = TickerName { getTickerName :: Text }
118 | deriving stock (Eq, Ord, Generic)
119 | deriving Show via (Quiet TickerName)
120 | deriving newtype PersistField
121 |
122 | instance ToJSON TickerName where
123 | toJSON (TickerName name) =
124 | object
125 | [ "name" .= name
126 | ]
127 |
128 | instance FromJSON TickerName where
129 | parseJSON = withObject "TickerName" $ \o -> do
130 | name <- o .: "name"
131 |
132 | eitherToMonadFail $ validateTickerName name
133 |
134 | -- |Util.
135 | eitherToMonadFail :: MonadFail m => Either Text a -> m a
136 | eitherToMonadFail (Left err) = fail $ toS err
137 | eitherToMonadFail (Right val) = return val
138 |
139 | -- |The validation for the ticker name we can reuse.
140 | validateTickerName :: Text -> Either Text TickerName
141 | validateTickerName name = do
142 | let tickerLen = length name
143 | if tickerLen >= 3 && tickerLen <= 5
144 | then Right $ TickerName name
145 | else Left $
146 | "\"ticker\" must have at least 3 and at most 5 "
147 | <> "characters, but it has "
148 | <> show (length name)
149 | <> " characters."
150 |
151 |
--------------------------------------------------------------------------------
/cabal.project:
--------------------------------------------------------------------------------
1 | index-state: 2021-07-02T00:00:00Z
2 |
3 | packages:
4 | ./smash
5 | ./smash-servant-types
6 |
7 | constraints:
8 | libsystemd-journal >= 1.4.4
9 | , systemd >= 2.3.0
10 | -- systemd-2.3.0 requires at least network 3.1.1.0 but it doesn't declare
11 | -- that dependency
12 | , network >= 3.1.1.0
13 | , persistent-postgresql >= 2.11.0.1
14 |
15 | package smash
16 | ghc-options: -Wall -Werror -Wredundant-constraints -Wincomplete-uni-patterns -Wincomplete-record-updates -Wpartial-fields -Wunused-imports
17 |
18 | package postgresql-libpq
19 | flags: +use-pkg-config
20 |
21 | ------------------------------------------------------------------------------
22 |
23 | -- Disable all tests by default
24 |
25 | tests: False
26 |
27 | test-show-details: direct
28 |
29 | -- Then enable specific tests in this repo
30 |
31 | package smash
32 | tests: True
33 |
34 | package smash-servant-types
35 | tests: True
36 |
37 | -- These are needed because Nix is doing something crazy.
38 | package cardano-api
39 | tests: False
40 |
41 | package cardano-cli
42 | tests: False
43 |
44 | package cardano-node
45 | tests: False
46 |
47 | package ouroboros-consensus-cardano
48 | tests: False
49 |
50 | ------------------------------------------------------------------------------
51 |
52 | source-repository-package
53 | type: git
54 | location: https://github.com/input-output-hk/Win32-network
55 | tag: 3825d3abf75f83f406c1f7161883c438dac7277d
56 | --sha256: 19wahfv726fa3mqajpqdqhnl9ica3xmf68i254q45iyjcpj1psqx
57 |
58 |
59 | source-repository-package
60 | type: git
61 | location: https://github.com/input-output-hk/cardano-db-sync
62 | tag: 7ab7a9a2863cb1cdd344fccafef163b2a8372dc5
63 | --sha256: 02k5p17cq14xb0lgvzp1qg6df50yyl40s4acjqpwapn3mnf5ja97
64 | subdir:
65 | cardano-sync
66 | cardano-db
67 | cardano-db-sync
68 |
69 | source-repository-package
70 | type: git
71 | location: https://github.com/input-output-hk/cardano-base
72 | tag: cb0f19c85e5bb5299839ad4ed66af6fa61322cc4
73 | --sha256: 0dnkfqcvbifbk3m5pg8kyjqjy0zj1l4vd23p39n6ym4q0bnib1cq
74 | subdir:
75 | base-deriving-via
76 | binary
77 | binary/test
78 | cardano-crypto-class
79 | cardano-crypto-praos
80 | measures
81 | orphans-deriving-via
82 | slotting
83 | strict-containers
84 |
85 | source-repository-package
86 | type: git
87 | location: https://github.com/input-output-hk/cardano-crypto
88 | tag: 07397f0e50da97eaa0575d93bee7ac4b2b2576ec
89 | --sha256: 06sdx5ndn2g722jhpicmg96vsrys89fl81k8290b3lr6b1b0w4m3
90 |
91 | source-repository-package
92 | type: git
93 | location: https://github.com/input-output-hk/cardano-ledger-specs
94 | tag: d5b184a820853c7ba202efd615b8fadca1acb52c
95 | --sha256: 04k5p6qwmfdza65gl5319r1ahdfwjnyqgzpfxdx0x2g5jcbimar4
96 | subdir:
97 | alonzo/impl
98 | byron/crypto
99 | byron/crypto/test
100 | byron/chain/executable-spec
101 | byron/ledger/executable-spec
102 | byron/ledger/impl
103 | byron/ledger/impl/test
104 | cardano-ledger-core
105 | semantics/executable-spec
106 | semantics/small-steps-test
107 | shelley/chain-and-ledger/dependencies/non-integer
108 | shelley/chain-and-ledger/executable-spec
109 | shelley/chain-and-ledger/shelley-spec-ledger-test
110 | shelley-ma/impl
111 | shelley-ma/shelley-ma-test
112 |
113 | source-repository-package
114 | type: git
115 | location: https://github.com/input-output-hk/cardano-node
116 | tag: 9a6a6c81e3aebfaf757b562c823146c7da601e1c
117 | --sha256: 1xiqrx3hf2s7j62clzzmlim81g7v2dvmirv78zf9gp9m1lqxzan6
118 | subdir:
119 | cardano-api
120 | cardano-config
121 | cardano-node
122 |
123 | source-repository-package
124 | type: git
125 | location: https://github.com/input-output-hk/cardano-prelude
126 | tag: fd773f7a58412131512b9f694ab95653ac430852
127 | --sha256: 02jddik1yw0222wd6q0vv10f7y8rdgrlqaiy83ph002f9kjx7mh6
128 | subdir:
129 | cardano-prelude
130 | cardano-prelude-test
131 |
132 | source-repository-package
133 | type: git
134 | location: https://github.com/input-output-hk/goblins
135 | tag: cde90a2b27f79187ca8310b6549331e59595e7ba
136 | --sha256: 17c88rbva3iw82yg9srlxjv2ia5wjb9cyqw44hik565f5v9svnyg
137 |
138 | source-repository-package
139 | type: git
140 | location: https://github.com/input-output-hk/iohk-monitoring-framework
141 | tag: 808724ff8a19a33d0ed06f9ef59fbd900b08553c
142 | --sha256: 0298dpl29gxzs9as9ha6y0w18hqwc00ipa3hzkxv7nlfrjjz8hmz
143 | subdir:
144 | contra-tracer
145 | iohk-monitoring
146 | plugins/backend-aggregation
147 | plugins/backend-ekg
148 | plugins/backend-monitoring
149 | plugins/backend-trace-forwarder
150 | plugins/scribe-systemd
151 | tracer-transformers
152 |
153 | -- Using a fork until our patches can be merged upstream
154 | source-repository-package
155 | type: git
156 | location: https://github.com/input-output-hk/optparse-applicative
157 | tag: 84bcc6f18992a441886589a117249bfface8630e
158 | --sha256: 09pr7m9gjsk8889m9d3mj75p69viv1acnrq63dgb11jl5gnnsblr
159 |
160 | source-repository-package
161 | type: git
162 | location: https://github.com/input-output-hk/ouroboros-network
163 | tag: d070bad7ce389a4b2ff7fb4fcb7937fdeca80f3a
164 | --sha256: 0jzdwjgqcj06b0rvwyh61cgf23dlh62lcn8z7dbm7wxwjjgpkjb1
165 | subdir:
166 | cardano-client
167 | io-sim
168 | io-classes
169 | monoidal-synchronisation
170 | ouroboros-consensus
171 | ouroboros-consensus-byron
172 | ouroboros-consensus-cardano
173 | ouroboros-consensus-shelley
174 | ouroboros-network
175 | ouroboros-network-framework
176 | ouroboros-network-testing
177 | typed-protocols
178 | typed-protocols-examples
179 | network-mux
180 |
181 | source-repository-package
182 | type: git
183 | location: https://github.com/input-output-hk/plutus
184 | tag: 8c83c4abe211b4bbcaca3cdf1b2c0e38d0eb683f
185 | --sha256: 1643s1g3jlm9pgalpc3vpij1zqb1n8yv8irq6qc43gs9bvl0wc3l
186 | subdir:
187 | plutus-core
188 | plutus-ledger-api
189 | plutus-tx
190 | prettyprinter-configurable
191 | word-array
192 |
193 | allow-newer:
194 | monoidal-containers:aeson,
195 | size-based:template-haskell
196 |
197 | -- Something in plutus-core requries this.
198 | source-repository-package
199 | type: git
200 | location: https://github.com/michaelpj/flat.git
201 | tag: ee59880f47ab835dbd73bea0847dab7869fc20d8
202 | --sha256: 1lrzknw765pz2j97nvv9ip3l1mcpf2zr4n56hwlz0rk7wq7ls4cm
203 |
--------------------------------------------------------------------------------
/doc/getting-started/how-to-run-smash.md:
--------------------------------------------------------------------------------
1 | # Running the SMASH server
2 |
3 | There is an order of how to run the SMASH service.
4 | It requires the node to be run first, since it fetches the blocks and online info from the blockchain
5 | in the node.
6 | It also requires the Postgresql database to be running.
7 | After the Postgresql database is installed, this guide can be used to run the SMASH service.
8 |
9 | ## Running the node
10 |
11 | We simply clone the node and use Nix to build it.
12 | For example, if we want to use a specific version of the node, we can simply download it from the release page,
13 | which is found here https://github.com/input-output-hk/cardano-node/releases.
14 | Or you can clone the repository and simply use a specific tag from the release (for example, let us use a `1.14.2`):
15 | ```
16 | git clone git@github.com:input-output-hk/cardano-node.git
17 |
18 | git checkout 1.14.2 -b tag-1.14.2
19 | ```
20 |
21 | ### Testnet
22 |
23 | In any case, after you have the version you require, you simply build the node using Nix:
24 | ```
25 | nix-build -A scripts.shelley_testnet.node -o shelley-testnet-node
26 | ```
27 |
28 | After that you can run the node by simply running:
29 | ```
30 | ./shelley-testnet-node
31 | ```
32 |
33 | ### Mainnet
34 |
35 | In any case, after you have the version you require, you simply build the node using Nix:
36 | ```
37 | nix-build -A scripts.mainnet.node -o mainnet-node-local
38 | ```
39 |
40 | After that you can run the node by simply running:
41 | ```
42 | ./mainnet-node-local
43 | ```
44 |
45 | ## Building SMASH
46 |
47 | You can download the version from https://github.com/input-output-hk/smash/releases.
48 |
49 | After that, you can simply build the project using Stack, Cabal or Nix:
50 | ```
51 | nix-build -o smash-local
52 | ```
53 |
54 | And now we can setup the DB schema.
55 |
56 | ## DB setup using SMASH
57 |
58 | Create a schema to your choosing and you can use SMASH to populate it.
59 | What we need is a connection string.
60 |
61 | For example, this is the content for `config/pgpass` which we use to connect to the database:
62 | ```
63 | /var/run/postgresql:5432:smash:*:*
64 | ```
65 |
66 | We simply create a schema, point the schema name and port number to the correct values and we store that
67 | information, like above in a file somewhere. We will later use the location of that file to use it to
68 | connect to that database and populate it.
69 |
70 | Like this:
71 | ```
72 | SMASHPGPASSFILE=config/pgpass ./scripts/postgresql-setup.sh --createdb
73 | ```
74 |
75 | After this we need to run the migration required for SMASH to work. Again, we use the database config file:
76 | ```
77 | SMASHPGPASSFILE=config/pgpass ./smash-local run-migrations --mdir ./schema
78 | ```
79 |
80 | After that is completed, we should have a valid schema and should be able to run SMASH!
81 |
82 | ## Basic Auth and DB
83 |
84 | You need to have the flag for disabling Basic auth not enabled (disabled). After you run the migration scripts (see in this README examples), you can insert the user with the password in the DB. To do that, we have the command line interface (CLI) commands. This will create a new admin user:
85 | ```
86 | SMASHPGPASSFILE=config/pgpass cabal run smash-exe -- create-admin-user --username ksaric --password cirask
87 | ```
88 |
89 | This CLI command will delete the admin user (both the username and password must match):
90 | ```
91 | SMASHPGPASSFILE=config/pgpass cabal run smash-exe -- delete-admin-user --username ksaric --password cirask
92 | ```
93 |
94 | Now you will be able to run your SMASH server with user authentication from DB. If you change your users/passwords, please restart the application since it takes a full restart for users to reload. _Any changes to the users table requires the restart of the application_.
95 |
96 | # Running SMASH
97 |
98 | Finally, we have one thing left.
99 | We first run the node, like mentioned above and in another terminal session/service we simply run SMASH.
100 |
101 | We need to run it using appropriate parameters, since running it requires it to be in sync with the node.
102 | The socket path is just pointing to a socket that will be used for communication with the node.
103 | The example:
104 | ```
105 | SMASHPGPASSFILE=config/pgpass ./smash-local run-app-with-db-sync --config config/testnet-config.yaml --socket-path ../cardano-node/state-node-shelley_testnet/node.socket --schema-dir schema/ --state-dir ledger-state/shelley-testnet
106 | ```
107 |
108 | After this, the SMASH application should start syncing blocks and picking up pools.
109 |
110 | ## Running tests
111 |
112 | You can run tests using Stack:
113 | ```
114 | stack test --fast -j`nproc` --flag 'smash:testing-mode' --flag 'smash-servant-types:testing-mode'
115 | ```
116 |
117 | Or Cabal:
118 | ```
119 | cabal test all -f testing-mode
120 | ```
121 |
122 | ## Checking if it works
123 |
124 | For example, after seeing that a pool has be registered, you can try to get it's info by running it's poolid and hash (the example of the hash here is `93b13334b5edf623fd4c7a716f3cf47be5baf7fb3a431c16ee07aab8ff074873`):
125 | ```
126 | curl -X GET -v http://localhost:3100/api/v1/metadata/062693863e0bcf9f619238f020741381d4d3748aae6faf1c012e80e7/93b13334b5edf623fd4c7a716f3cf47be5baf7fb3a431c16ee07aab8ff074873
127 | ```
128 |
129 | You can test the delisting by sending a PATCH on the delist endpoint (using the pool id from the example, `062693863e0bcf9f619238f020741381d4d3748aae6faf1c012e80e7`).
130 | ```
131 | curl -X PATCH -v http://localhost:3100/api/v1/delist -H 'content-type: application/json' -d '{"poolId": "062693863e0bcf9f619238f020741381d4d3748aae6faf1c012e80e7"}'
132 | ```
133 |
134 | Or if you have Basic Auth enabled (replace with you username/pass you have in your DB):
135 | ```
136 | curl -u ksaric:cirask -X PATCH -v http://localhost:3100/api/v1/delist -H 'content-type: application/json' -d '{"poolId": "062693863e0bcf9f619238f020741381d4d3748aae6faf1c012e80e7"}'
137 | ```
138 |
139 | ## Running stub server for local testing purposes
140 |
141 | Make sure to build SMASH in testing mode:
142 |
143 | ```
144 | stack install --flag 'smash:testing-mode' --flag 'smash-servant-types:testing-mode' --flag 'smash:disable-basic-auth'
145 |
146 | smash-exe run-stub-app
147 |
148 | curl -X POST -v -H 'content-type: application/octet-stream' --data-binary @test_pool.json \
149 | http://localhost:3100/api/v1/metadata/5ee7591bf30eaa4f5dce70b4a676eb02d5be8012d188f04fe3beffb0/cc019105f084aef2a956b2f7f2c0bf4e747bf7696705312c244620089429df6f
150 |
151 | curl -X GET -v \
152 | http://localhost:3100/api/v1/metadata/5ee7591bf30eaa4f5dce70b4a676eb02d5be8012d188f04fe3beffb0/cc019105f084aef2a956b2f7f2c0bf4e747bf7696705312c244620089429df6f
153 | ```
154 |
--------------------------------------------------------------------------------
/smash/smash.cabal:
--------------------------------------------------------------------------------
1 | cabal-version: 1.12
2 | name: smash
3 | version: 1.4.0
4 | description:
5 | Please see the README on GitHub at
6 |
7 | homepage: https://github.com/input-output-hk/smash#readme
8 | bug-reports: https://github.com/input-output-hk/smash/issues
9 | author: IOHK
10 | maintainer: operations@iohk.io
11 | license: Apache-2.0
12 | license-file: LICENSE
13 | build-type: Simple
14 |
15 | source-repository head
16 | type: git
17 | location: https://github.com/input-output-hk/smash
18 |
19 | flag disable-basic-auth
20 | description: Disable basic authentication scheme for other authentication mechanisms.
21 | default: False
22 |
23 | flag testing-mode
24 | description: A flag for allowing operations that promote easy testing.
25 | default: False
26 |
27 | library
28 | if flag(disable-basic-auth)
29 | cpp-options: -DDISABLE_BASIC_AUTH
30 |
31 | if flag(testing-mode)
32 | cpp-options: -DTESTING_MODE
33 |
34 | exposed-modules:
35 | Cardano.SMASH.DB
36 | Cardano.SMASH.DBSync.Db.Delete
37 | Cardano.SMASH.DBSync.Db.Insert
38 | Cardano.SMASH.DBSync.Db.Query
39 | Cardano.SMASH.DBSync.Db.Schema
40 |
41 | Cardano.SMASH.DBSync.Db.Migration
42 | Cardano.SMASH.DBSync.Db.Migration.Haskell
43 | Cardano.SMASH.DBSync.Db.Migration.Version
44 | Cardano.SMASH.DBSync.Db.PGConfig
45 | Cardano.SMASH.DBSync.Db.Run
46 |
47 | Cardano.SMASH.DBSync.Metrics
48 | Cardano.SMASH.DBSyncPlugin
49 | Cardano.SMASH.DBSyncRun
50 | Cardano.SMASH.FetchQueue
51 | Cardano.SMASH.Lib
52 | Cardano.SMASH.Offline
53 | Cardano.SMASH.HttpClient
54 |
55 | other-modules: Paths_smash
56 | hs-source-dirs: src
57 | build-depends:
58 | aeson
59 | , base >=4.7 && <5
60 | , base16-bytestring
61 | , bytestring
62 | , cardano-crypto-class
63 | , cardano-sync
64 | , cardano-ledger-core
65 | , cardano-prelude
66 | , cardano-slotting
67 | , cardano-db-sync
68 | , cardano-ledger-byron
69 | , cborg
70 | , conduit-extra
71 | , containers
72 | , contra-tracer
73 | , directory
74 | , esqueleto
75 | , extra
76 | , fast-logger
77 | , filepath
78 | , http-client
79 | , http-client-tls
80 | , http-types
81 | , http-conduit
82 | , io-classes
83 | , iohk-monitoring
84 | , monad-logger
85 | , ouroboros-consensus-byron
86 | , ouroboros-consensus-shelley
87 | , ouroboros-consensus-cardano
88 | , ouroboros-network
89 | , persistent
90 | , persistent-postgresql
91 | , persistent-template >=2.9.1.0
92 | , postgresql-simple
93 | , prometheus
94 | , resourcet
95 | , servant
96 | , servant-server
97 | , servant-swagger
98 | , shelley-spec-ledger
99 | , smash-servant-types
100 | , swagger2
101 | , template-haskell
102 | , text
103 | , time
104 | , transformers
105 | , transformers-except
106 | , typed-protocols
107 | , unix
108 | , wai
109 | , warp
110 |
111 | default-language: Haskell2010
112 | default-extensions:
113 | NoImplicitPrelude
114 | OverloadedStrings
115 |
116 | ghc-options:
117 | -Wall -Wcompat -Wincomplete-record-updates
118 | -Wincomplete-uni-patterns -Wredundant-constraints -Wpartial-fields
119 |
120 | executable smash-exe
121 |
122 | if flag(testing-mode)
123 | cpp-options: -DTESTING_MODE
124 |
125 | main-is: Main.hs
126 | other-modules:
127 | Paths_smash
128 | hs-source-dirs: app
129 | ghc-options: -threaded -rtsopts -with-rtsopts=-N
130 | build-depends:
131 | base >=4.7 && <5
132 | , cardano-prelude
133 | , cardano-sync
134 | , cardano-slotting
135 | , optparse-applicative
136 | , iohk-monitoring
137 | , smash
138 | , smash-servant-types
139 | , esqueleto
140 | , transformers
141 | , prometheus
142 | , filepath
143 | , bytestring
144 | , transformers-except
145 | , cardano-api
146 | , time
147 | , persistent-postgresql
148 | , persistent
149 | , ouroboros-consensus-cardano
150 | , ouroboros-consensus-shelley
151 | , ouroboros-network
152 | , shelley-spec-ledger
153 |
154 | default-language: Haskell2010
155 | default-extensions:
156 | NoImplicitPrelude
157 | OverloadedStrings
158 |
159 | ghc-options:
160 | -Wall -Wcompat -Wincomplete-record-updates
161 | -Wincomplete-uni-patterns -Wredundant-constraints -Wpartial-fields
162 |
163 | test-suite smash-test
164 |
165 | if flag(testing-mode)
166 | cpp-options: -DTESTING_MODE
167 |
168 | type: exitcode-stdio-1.0
169 | main-is: Spec.hs
170 | other-modules:
171 | Paths_smash
172 |
173 | hs-source-dirs: test
174 | ghc-options: -threaded -rtsopts -with-rtsopts=-N
175 | build-depends:
176 | base >=4.7 && <5
177 | , cardano-prelude
178 | , iohk-monitoring
179 | , containers
180 | , ed25519
181 | , hspec
182 | , QuickCheck
183 | , quickcheck-state-machine >=0.6
184 | , smash
185 | , smash-servant-types
186 | , tree-diff
187 | , transformers-except
188 |
189 | default-language: Haskell2010
190 | default-extensions:
191 | NoImplicitPrelude
192 | OverloadedStrings
193 |
194 | ghc-options:
195 | -Wall -Wcompat -Wincomplete-record-updates
196 | -Wincomplete-uni-patterns -Wredundant-constraints -Wpartial-fields
197 |
198 |
199 | test-suite db-spec-test
200 |
201 | if flag(testing-mode)
202 | cpp-options: -DTESTING_MODE
203 |
204 | type: exitcode-stdio-1.0
205 | main-is: DBSpec.hs
206 | other-modules:
207 | Paths_smash
208 | MigrationSpec
209 |
210 | hs-source-dirs: test
211 | ghc-options: -threaded -rtsopts -with-rtsopts=-N
212 | build-depends:
213 | base >=4.7 && <5
214 | , cardano-prelude
215 | , filepath
216 | , directory
217 | , containers
218 | , ed25519
219 | , hspec
220 | , QuickCheck
221 | , quickcheck-state-machine >=0.6
222 | , smash
223 | , smash-servant-types
224 | , iohk-monitoring
225 | , tree-diff
226 | , time
227 | , transformers-except
228 |
229 | default-language: Haskell2010
230 | default-extensions:
231 | NoImplicitPrelude
232 | OverloadedStrings
233 |
234 | ghc-options:
235 | -Wall -Wcompat -Wincomplete-record-updates
236 | -Wincomplete-uni-patterns -Wredundant-constraints -Wpartial-fields
237 |
238 |
239 |
--------------------------------------------------------------------------------
/stack.yaml:
--------------------------------------------------------------------------------
1 | resolver: lts-17.4
2 |
3 | allow-newer: true
4 |
5 | packages:
6 | - smash
7 | - smash-servant-types
8 |
9 | flags:
10 | # Bundle VRF crypto in libsodium and do not rely on an external fork to have it.
11 | # This still requires the host system to have the 'standard' libsodium installed.
12 | cardano-crypto-praos:
13 | external-libsodium-vrf: false
14 |
15 | ghc-options:
16 | smash: -Wall -Werror -fno-warn-redundant-constraints
17 | #smash-servant-types: -Wall -Werror -fno-warn-redundant-constraints
18 |
19 | # Generate files required by Weeder.
20 | # See https://github.com/ndmitchell/weeder/issues/53
21 | ghc-options:
22 | "$locals": -fwrite-ide-info -ddump-to-file -ddump-hi
23 |
24 | extra-deps:
25 | - Cabal-3.4.0.0
26 | - async-timer-0.2.0.0
27 | - parsec-3.1.14.0
28 | - base16-0.1.2.1
29 | - base16-bytestring-1.0.1.0
30 | - base58-bytestring-0.1.0
31 | - base64-0.4.2
32 | - bech32-1.1.0
33 | - bech32-th-1.0.2
34 | - binary-0.8.7.0
35 | - bimap-0.4.0
36 | - canonical-json-0.6.0.0
37 | - cborg-0.2.4.0
38 | - clock-0.8
39 | - config-ini-0.2.4.0
40 | - connection-0.3.1
41 | - containers-0.5.11.0
42 | - data-clist-0.1.2.2
43 | - dns-3.0.4
44 | - generic-monoid-0.1.0.0
45 | - generics-sop-0.5.1.0
46 | - ghc-byteorder-4.11.0.0.10
47 | - gray-code-0.3.1
48 | - hedgehog-1.0.5
49 | - hedgehog-corpus-0.2.0
50 | - hedgehog-quickcheck-0.1.1
51 | - hspec-2.7.0
52 | - hspec-core-2.7.0
53 | - hspec-discover-2.7.0
54 | - io-streams-1.5.1.0
55 | - io-streams-haproxy-1.0.1.0
56 | - katip-0.8.4.0
57 | - libsystemd-journal-1.4.4
58 | - micro-recursion-schemes-5.0.2.2
59 | - moo-1.2
60 | - network-3.1.2.1
61 | - partial-order-0.2.0.0
62 | - prettyprinter-1.7.0
63 | - primitive-0.7.1.0
64 | - protolude-0.3.0
65 | - quiet-0.2
66 | - semialign-1.1.0.1
67 | - snap-core-1.0.4.1
68 | - snap-server-1.1.1.1
69 | - sop-core-0.5.0.1
70 | - statistics-linreg-0.3
71 | - streaming-binary-0.2.2.0
72 | - streaming-bytestring-0.2.0
73 | - systemd-2.3.0
74 | - tasty-hedgehog-1.0.0.2
75 | - text-1.2.4.0
76 | - text-ansi-0.1.0
77 | - text-conversions-0.3.1
78 | - text-zipper-0.10.1
79 | - th-lift-instances-0.1.14
80 | - these-1.1.1.1
81 | - time-units-1.0.0
82 | - transformers-except-0.1.1
83 | - unordered-containers-0.2.12.0
84 | - Unique-0.4.7.6
85 | - word-wrap-0.4.1
86 | - websockets-0.12.6.1
87 | - Win32-2.6.2.0
88 | - nothunks-0.1.2
89 |
90 | # db-sync dependency
91 | - git: https://github.com/input-output-hk/cardano-db-sync
92 | commit: 7ab7a9a2863cb1cdd344fccafef163b2a8372dc5
93 | subdirs:
94 | - cardano-sync
95 | - cardano-db
96 | - cardano-db-sync
97 |
98 | - git: https://github.com/input-output-hk/cardano-base
99 | commit: cb0f19c85e5bb5299839ad4ed66af6fa61322cc4
100 | subdirs:
101 | - binary
102 | - binary/test
103 | - cardano-crypto-class
104 | - cardano-crypto-tests
105 | - cardano-crypto-praos
106 | - slotting
107 |
108 | - git: https://github.com/input-output-hk/cardano-crypto
109 | commit: 07397f0e50da97eaa0575d93bee7ac4b2b2576ec
110 |
111 | - git: https://github.com/input-output-hk/cardano-ledger-specs
112 | commit: d5b184a820853c7ba202efd615b8fadca1acb52c
113 | subdirs:
114 | - alonzo/impl
115 | - byron/crypto
116 | - byron/crypto/test
117 | - byron/chain/executable-spec
118 | - byron/ledger/executable-spec
119 | - byron/ledger/impl
120 | - byron/ledger/impl/test
121 | - semantics/executable-spec
122 | - semantics/small-steps-test
123 | - shelley/chain-and-ledger/dependencies/non-integer
124 | - shelley/chain-and-ledger/executable-spec
125 | - shelley/chain-and-ledger/shelley-spec-ledger-test
126 | - shelley-ma/impl
127 | - shelley-ma/shelley-ma-test
128 |
129 | - git: https://github.com/input-output-hk/cardano-node
130 | commit: 9a6a6c81e3aebfaf757b562c823146c7da601e1c
131 | subdirs:
132 | - cardano-api
133 | - cardano-api/test
134 | - cardano-cli
135 | - cardano-config
136 | - cardano-node
137 | - cardano-node-chairman
138 | - hedgehog-extras
139 |
140 | - git: https://github.com/input-output-hk/cardano-prelude
141 | commit: fd773f7a58412131512b9f694ab95653ac430852
142 | subdirs:
143 | - cardano-prelude
144 | - cardano-prelude-test
145 |
146 | - git: https://github.com/input-output-hk/cardano-sl-x509
147 | commit: 43a036c5bbe68ca2e9cbe611eab7982e2348fe49
148 |
149 | - git: https://github.com/input-output-hk/goblins
150 | commit: cde90a2b27f79187ca8310b6549331e59595e7ba
151 |
152 | - git: https://github.com/input-output-hk/iohk-monitoring-framework
153 | commit: 808724ff8a19a33d0ed06f9ef59fbd900b08553c
154 | subdirs:
155 | - contra-tracer
156 | - iohk-monitoring
157 | - plugins/backend-aggregation
158 | - plugins/backend-ekg
159 | - plugins/backend-monitoring
160 | - plugins/backend-trace-forwarder
161 | - plugins/scribe-systemd
162 | - tracer-transformers
163 |
164 | - git: https://github.com/input-output-hk/ouroboros-network
165 | commit: d070bad7ce389a4b2ff7fb4fcb7937fdeca80f3a
166 | subdirs:
167 | - io-sim
168 | - io-sim-classes
169 | - monoidal-synchronisation
170 | - network-mux
171 | - ouroboros-consensus
172 | - ouroboros-consensus-byron
173 | - ouroboros-consensus-cardano
174 | - ouroboros-consensus-shelley
175 | - ouroboros-network
176 | - ouroboros-network-framework
177 | - typed-protocols
178 | - typed-protocols-examples
179 | # Extra packages not used by cardano-node
180 | - cardano-client
181 | - ntp-client
182 | - ouroboros-consensus-mock
183 |
184 | - git : https://github.com/input-output-hk/Win32-network
185 | commit: 3825d3abf75f83f406c1f7161883c438dac7277d
186 |
187 | - git: https://github.com/input-output-hk/plutus
188 | commit: 8c83c4abe211b4bbcaca3cdf1b2c0e38d0eb683f
189 | subdirs:
190 | - plutus-core
191 | - plutus-ledger-api
192 | - plutus-tx
193 | - prettyprinter-configurable
194 |
195 | - git: https://github.com/snoyberg/http-client.git
196 | commit: 1a75bdfca014723dd5d40760fad854b3f0f37156
197 | subdirs:
198 | - http-client
199 |
200 | # Plutus deps
201 | - Stream-0.4.7.2
202 | - composition-prelude-3.0.0.2
203 | - dependent-map-0.4.0.0
204 | - dependent-sum-0.6.2.0
205 | - dependent-sum-template-0.1.0.3
206 | - lazy-search-0.1.2.1
207 | - monoidal-containers-0.6.0.1
208 | - size-based-0.1.2.0
209 | - witherable-0.4.1
210 | - constraints-extras-0.3.0.2
211 | - indexed-traversable-instances-0.1
212 | - lazysmallcheck-0.6
213 |
214 | # Additional deps
215 | - persistent-2.11.0.1
216 | - persistent-postgresql-2.11.0.0
217 | - persistent-template-2.9.1.0
218 |
219 | - esqueleto-3.4.0.1
220 |
221 | # Compiler error 'System.Metrics.Prometheus.Http.Scrape (serveMetricsT)'
222 | - prometheus-2.2.2
223 |
224 | # Testing
225 | - quickcheck-state-machine-0.7.0
226 | - markov-chain-usage-model-0.0.0
227 |
228 | nix:
229 | shell-file: nix/stack-shell.nix
230 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Run.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE ConstraintKinds #-}
2 | {-# LANGUAGE LambdaCase #-}
3 | {-# LANGUAGE OverloadedStrings #-}
4 |
5 | module Cardano.SMASH.DBSync.Db.Run
6 | ( getBackendGhci
7 | , ghciDebugQuery
8 | , runDbAction
9 | , runDbHandleLogger
10 | , runDbIohkLogging
11 | , runDbNoLogging
12 | , runDbStdoutLogging
13 | ) where
14 |
15 | import Cardano.BM.Data.LogItem (LOContent (..),
16 | LogObject (..),
17 | PrivacyAnnotation (..),
18 | mkLOMeta)
19 | import Cardano.BM.Data.Severity (Severity (..))
20 | import Cardano.BM.Trace (Trace)
21 |
22 | import Control.Tracer (traceWith)
23 |
24 | import Cardano.Prelude
25 |
26 | import Control.Monad.Logger (LogLevel (..), LogSource,
27 | LoggingT, NoLoggingT,
28 | defaultLogStr,
29 | runLoggingT,
30 | runNoLoggingT,
31 | runStdoutLoggingT)
32 |
33 | import qualified Data.ByteString.Char8 as BS
34 | import qualified Data.Text.Encoding as T
35 | import qualified Data.Text.Lazy.Builder as LT
36 | import qualified Data.Text.Lazy.IO as LT
37 |
38 | import Database.Persist.Postgresql (openSimpleConn,
39 | withPostgresqlConn)
40 | import Database.Persist.Sql (IsolationLevel (..),
41 | runSqlConnWithIsolation)
42 | import Database.PostgreSQL.Simple (connectPostgreSQL)
43 |
44 | import Database.Esqueleto.Legacy
45 | import Database.Esqueleto.Internal.Internal (Mode (..), SqlSelect,
46 | initialIdentState,
47 | toRawSql)
48 |
49 | import Cardano.SMASH.DBSync.Db.PGConfig
50 |
51 | import Language.Haskell.TH.Syntax (Loc)
52 |
53 | import System.Log.FastLogger (LogStr, fromLogStr)
54 |
55 | -- | Run a DB action logging via the provided Handle.
56 | runDbHandleLogger :: Handle -> ReaderT SqlBackend (LoggingT IO) a -> IO a
57 | runDbHandleLogger logHandle dbAction = do
58 | pgconf <- readPGPassFileEnv
59 | runHandleLoggerT .
60 | withPostgresqlConn (toConnectionString pgconf) $ \backend ->
61 | -- The 'runSqlConnWithIsolation' function starts a transaction, runs the 'dbAction'
62 | -- and then commits the transaction.
63 | runSqlConnWithIsolation dbAction backend Serializable
64 | where
65 | runHandleLoggerT :: LoggingT m a -> m a
66 | runHandleLoggerT action =
67 | runLoggingT action logOut
68 |
69 | logOut :: Loc -> LogSource -> LogLevel -> LogStr -> IO ()
70 | logOut loc src level msg =
71 | BS.hPutStrLn logHandle . fromLogStr $ defaultLogStr loc src level msg
72 |
73 | runDbAction :: Maybe (Trace IO Text) -> ReaderT SqlBackend (LoggingT IO) a -> IO a
74 | runDbAction mLogging dbAction = do
75 | pgconf <- readPGPassFileEnv
76 | case mLogging of
77 | Nothing ->
78 | runSilentLoggingT .
79 | withPostgresqlConn (toConnectionString pgconf) $ \backend ->
80 | runSqlConnWithIsolation dbAction backend Serializable
81 | Just tracer ->
82 | runIohkLogging tracer .
83 | withPostgresqlConn (toConnectionString pgconf) $ \backend ->
84 | runSqlConnWithIsolation dbAction backend Serializable
85 | where
86 | runSilentLoggingT :: LoggingT m a -> m a
87 | runSilentLoggingT action = runLoggingT action silentLog
88 |
89 | silentLog :: Monad m => Loc -> LogSource -> LogLevel -> LogStr -> m ()
90 | silentLog _loc _src _level _msg = pure ()
91 |
92 | -- | Run a DB action logging via iohk-monitoring-framework.
93 | runDbIohkLogging :: Trace IO Text -> ReaderT SqlBackend (LoggingT IO) b -> IO b
94 | runDbIohkLogging tracer dbAction = do
95 | pgconf <- readPGPassFileEnv
96 | runIohkLogging tracer .
97 | withPostgresqlConn (toConnectionString pgconf) $ \backend ->
98 | runSqlConnWithIsolation dbAction backend Serializable
99 |
100 | runIohkLogging :: Trace IO Text -> LoggingT m a -> m a
101 | runIohkLogging tracer action =
102 | runLoggingT action toIohkLog
103 | where
104 | toIohkLog :: Loc -> LogSource -> LogLevel -> LogStr -> IO ()
105 | toIohkLog _loc _src level msg = do
106 | meta <- mkLOMeta (toIohkSeverity level) Public
107 | traceWith tracer $
108 | (name, LogObject name meta (LogMessage . T.decodeLatin1 $ fromLogStr msg))
109 |
110 | name :: Text
111 | name = "db-sync"
112 |
113 | toIohkSeverity :: LogLevel -> Severity
114 | toIohkSeverity =
115 | \case
116 | LevelDebug -> Debug
117 | LevelInfo -> Info
118 | LevelWarn -> Warning
119 | LevelError -> Error
120 | LevelOther _ -> Error
121 |
122 | -- | Run a DB action without any logging. Mainly for tests.
123 | runDbNoLogging :: ReaderT SqlBackend (NoLoggingT IO) a -> IO a
124 | runDbNoLogging action = do
125 | pgconfig <- readPGPassFileEnv
126 | runNoLoggingT .
127 | withPostgresqlConn (toConnectionString pgconfig) $ \backend ->
128 | runSqlConnWithIsolation action backend Serializable
129 |
130 | -- | Run a DB action with stdout logging. Mainly for debugging.
131 | runDbStdoutLogging :: ReaderT SqlBackend (LoggingT IO) b -> IO b
132 | runDbStdoutLogging action = do
133 | pgconfig <- readPGPassFileEnv
134 | runStdoutLoggingT .
135 | withPostgresqlConn (toConnectionString pgconfig) $ \backend ->
136 | runSqlConnWithIsolation action backend Serializable
137 |
138 | -- from Control.Monad.Logger, wasnt exported
139 | defaultOutput :: Handle
140 | -> Loc
141 | -> LogSource
142 | -> LogLevel
143 | -> LogStr
144 | -> IO ()
145 | defaultOutput h loc src level msg =
146 | BS.hPutStr h ls
147 | where
148 | ls = defaultLogStrBS loc src level msg
149 |
150 | defaultLogStrBS :: Loc
151 | -> LogSource
152 | -> LogLevel
153 | -> LogStr
154 | -> BS.ByteString
155 | defaultLogStrBS a b c d =
156 | toBS $ defaultLogStr a b c d
157 | where
158 | toBS = fromLogStr
159 |
160 | getBackendGhci :: IO SqlBackend
161 | getBackendGhci = do
162 | pgconfig <- readPGPassFileEnv
163 | connection <- connectPostgreSQL (toConnectionString pgconfig)
164 | openSimpleConn (\loc source level str -> defaultOutput stdout loc source level str) connection
165 |
166 | ghciDebugQuery :: SqlSelect a r => SqlQuery a -> IO ()
167 | ghciDebugQuery query = do
168 | pgconfig <- readPGPassFileEnv
169 | runStdoutLoggingT . withPostgresqlConn (toConnectionString pgconfig) $ \backend -> do
170 | let
171 | (sql,params) = toRawSql SELECT (backend, initialIdentState) query
172 | liftIO $ do
173 | LT.putStr $ LT.toLazyText sql
174 | print params
175 |
--------------------------------------------------------------------------------
/smash-servant-types/src/Cardano/SMASH/API.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE CPP #-}
2 | {-# LANGUAGE DataKinds #-}
3 | {-# LANGUAGE DeriveGeneric #-}
4 | {-# LANGUAGE FlexibleInstances #-}
5 | {-# LANGUAGE MultiParamTypeClasses #-}
6 | {-# LANGUAGE ScopedTypeVariables #-}
7 | {-# LANGUAGE TypeApplications #-}
8 | {-# LANGUAGE TypeFamilies #-}
9 | {-# LANGUAGE TypeOperators #-}
10 |
11 | module Cardano.SMASH.API
12 | ( API
13 | , DelistedPoolsAPI
14 | , fullAPI
15 | , smashApi
16 | ) where
17 |
18 | import Cardano.Prelude
19 | import Prelude (String)
20 |
21 | import Data.Aeson (FromJSON, ToJSON (..),
22 | eitherDecode, encode, object,
23 | (.=))
24 | import Data.Swagger (Swagger (..))
25 |
26 | import Network.Wai (Request, lazyRequestBody)
27 | import Servant (BasicAuth, Capture, Get,
28 | HasServer (..), Header, Headers,
29 | JSON, OctetStream, Patch, Post,
30 | QueryParam, ReqBody,
31 | (:<|>) (..), (:>))
32 | import Servant.Server (err400)
33 | import Servant.Server.Internal (DelayedIO, addBodyCheck,
34 | delayedFailFatal, errBody,
35 | withRequest)
36 |
37 | import Servant.Swagger (HasSwagger (..))
38 |
39 | import Cardano.SMASH.DBSync.Db.Error (DBFail (..))
40 | import Cardano.SMASH.Types (ApiResult, HealthStatus,
41 | PolicyResult, PoolFetchError,
42 | PoolId (..),
43 | PoolIdBlockNumber (..),
44 | PoolMetadataHash,
45 | PoolMetadataRaw, SmashURL,
46 | TickerName, TimeStringFormat,
47 | UniqueTicker, User)
48 |
49 |
50 | -- Showing errors as JSON. To be reused when we need more general error handling.
51 |
52 | data Body a
53 |
54 | instance (FromJSON a, HasServer api context) => HasServer (Body a :> api) context where
55 | type ServerT (Body a :> api) m = a -> ServerT api m
56 |
57 | route Proxy context subserver =
58 | route (Proxy :: Proxy api) context (addBodyCheck subserver ctCheck bodyCheckRequest)
59 | where
60 | -- Don't check the content type specifically.
61 | ctCheck :: DelayedIO Request
62 | ctCheck = withRequest $ \req -> pure req
63 |
64 | bodyCheckRequest :: Request -> DelayedIO a
65 | bodyCheckRequest request = do
66 | body <- liftIO (lazyRequestBody request)
67 | case eitherDecode body of
68 | Left dbFail ->
69 | delayedFailFatal err400 { errBody = encode dbFail }
70 | Right v ->
71 | return v
72 |
73 | newtype BodyError = BodyError String
74 | instance ToJSON BodyError where
75 | toJSON (BodyError b) = object ["error" .= b]
76 |
77 | -- |For api versioning.
78 | type APIVersion = "v1"
79 |
80 | -- | Shortcut for common api result types.
81 | type ApiRes verb a = verb '[JSON] (ApiResult DBFail a)
82 |
83 | -- The basic auth.
84 | type BasicAuthURL = BasicAuth "smash" User
85 |
86 | -- GET api/v1/metadata/{hash}
87 | type OfflineMetadataAPI = "api" :> APIVersion :> "metadata" :> Capture "id" PoolId :> Capture "hash" PoolMetadataHash :> Get '[JSON] (Headers '[Header "Cache-Control" Text] (ApiResult DBFail PoolMetadataRaw))
88 |
89 | -- GET api/v1/status
90 | type HealthStatusAPI = "api" :> APIVersion :> "status" :> ApiRes Get HealthStatus
91 |
92 | -- GET api/v1/tickers
93 | type ReservedTickersAPI = "api" :> APIVersion :> "tickers" :> ApiRes Get [UniqueTicker]
94 |
95 | -- GET api/v1/delisted
96 | type DelistedPoolsAPI = "api" :> APIVersion :> "delisted" :> ApiRes Get [PoolId]
97 |
98 | -- GET api/v1/errors
99 | type FetchPoolErrorAPI = "api" :> APIVersion :> "errors" :> Capture "poolId" PoolId :> QueryParam "fromDate" TimeStringFormat :> ApiRes Get [PoolFetchError]
100 |
101 | #ifdef DISABLE_BASIC_AUTH
102 | -- POST api/v1/delist
103 | type DelistPoolAPI = "api" :> APIVersion :> "delist" :> ReqBody '[JSON] PoolId :> ApiRes Patch PoolId
104 |
105 | type EnlistPoolAPI = "api" :> APIVersion :> "enlist" :> ReqBody '[JSON] PoolId :> ApiRes Patch PoolId
106 |
107 | type AddTickerAPI = "api" :> APIVersion :> "tickers" :> Capture "name" TickerName :> ReqBody '[JSON] PoolMetadataHash :> ApiRes Post TickerName
108 |
109 | -- Enabling the SMASH server to fetch the policies from remote SMASH server. Policies like delisting or unique ticker names.
110 | type FetchPoliciesAPI = "api" :> APIVersion :> "policies" :> ReqBody '[JSON] SmashURL :> ApiRes Post PolicyResult
111 | #else
112 | type DelistPoolAPI = BasicAuthURL :> "api" :> APIVersion :> "delist" :> ReqBody '[JSON] PoolId :> ApiRes Patch PoolId
113 |
114 | type EnlistPoolAPI = BasicAuthURL :> "api" :> APIVersion :> "enlist" :> ReqBody '[JSON] PoolId :> ApiRes Patch PoolId
115 |
116 | type AddTickerAPI = "api" :> APIVersion :> "tickers" :> Capture "name" TickerName :> ReqBody '[JSON] PoolMetadataHash :> ApiRes Post TickerName
117 |
118 | -- Enabling the SMASH server to fetch the policies from remote SMASH server. Policies like delisting or unique ticker names.
119 | type FetchPoliciesAPI = BasicAuthURL :> "api" :> APIVersion :> "policies" :> ReqBody '[JSON] SmashURL :> ApiRes Post PolicyResult
120 | #endif
121 |
122 | type RetiredPoolsAPI = "api" :> APIVersion :> "retired" :> ApiRes Get [PoolId]
123 |
124 | type CheckPoolAPI = "api" :> APIVersion :> "exists" :> Capture "poolId" PoolId :> ApiRes Get PoolId
125 |
126 | -- The full API.
127 | type SmashAPI = OfflineMetadataAPI
128 | :<|> HealthStatusAPI
129 | :<|> ReservedTickersAPI
130 | :<|> DelistedPoolsAPI
131 | :<|> DelistPoolAPI
132 | :<|> EnlistPoolAPI
133 | :<|> FetchPoolErrorAPI
134 | :<|> RetiredPoolsAPI
135 | :<|> CheckPoolAPI
136 | :<|> AddTickerAPI
137 | :<|> FetchPoliciesAPI
138 | #ifdef TESTING_MODE
139 | :<|> RetirePoolAPI
140 | :<|> AddPoolAPI
141 |
142 | type RetirePoolAPI = "api" :> APIVersion :> "retired" :> ReqBody '[JSON] PoolIdBlockNumber :> ApiRes Patch PoolId
143 | type AddPoolAPI = "api" :> APIVersion :> "metadata" :> Capture "id" PoolId :> Capture "hash" PoolMetadataHash :> ReqBody '[OctetStream] PoolMetadataRaw :> ApiRes Post PoolId
144 |
145 | #endif
146 |
147 | -- | API for serving @swagger.json@.
148 | type SwaggerAPI = "swagger.json" :> Get '[JSON] Swagger
149 |
150 | -- | Combined API of a Todo service with Swagger documentation.
151 | type API = SwaggerAPI :<|> SmashAPI
152 |
153 | fullAPI :: Proxy API
154 | fullAPI = Proxy
155 |
156 | -- | Just the @Proxy@ for the API type.
157 | smashApi :: Proxy SmashAPI
158 | smashApi = Proxy
159 |
160 | -- For now, we just ignore the @Body@ definition.
161 | instance (HasSwagger api) => HasSwagger (Body name :> api) where
162 | toSwagger _ = toSwagger (Proxy :: Proxy api)
163 |
164 | -- For now, we just ignore the @BasicAuth@ definition.
165 | instance (HasSwagger api) => HasSwagger (BasicAuth name typo :> api) where
166 | toSwagger _ = toSwagger (Proxy :: Proxy api)
167 |
168 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Database.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE FlexibleInstances #-}
2 | {-# LANGUAGE NoImplicitPrelude #-}
3 | {-# LANGUAGE OverloadedStrings #-}
4 | {-# LANGUAGE TypeSynonymInstances #-}
5 |
6 | module Cardano.SMASH.DBSync.Db.Database
7 | ( DbAction (..)
8 | , DbActionQueue (..)
9 | , lengthDbActionQueue
10 | , newDbActionQueue
11 | , runDbStartup
12 | , runDbThread
13 | , writeDbActionQueue
14 | ) where
15 |
16 | import Cardano.BM.Trace (Trace, logDebug,
17 | logError, logInfo)
18 | import qualified Cardano.Chain.Block as Ledger
19 | import Cardano.Prelude
20 |
21 | import Control.Monad.Logger (LoggingT)
22 | import Control.Monad.Trans.Except.Extra (left, newExceptT,
23 | runExceptT)
24 |
25 | import Cardano.Slotting.Slot (SlotNo)
26 |
27 | import qualified Cardano.SMASH.DB as DB
28 |
29 | import qualified Cardano.DbSync.Era.Byron.Util as Byron
30 | import Cardano.DbSync.Config
31 | import Cardano.DbSync.DbAction
32 | import Cardano.DbSync.Error
33 | import Cardano.DbSync.LedgerState
34 | import Cardano.DbSync.Plugin
35 | import Cardano.DbSync.Types
36 | import Cardano.DbSync.Util
37 |
38 | import Database.Persist.Sql (SqlBackend)
39 |
40 | import Ouroboros.Consensus.Byron.Ledger (ByronBlock (..))
41 | import Ouroboros.Consensus.Cardano.Block (HardForkBlock (..))
42 |
43 | -- TODO(KS): This whole module is suspect for deletion. I have no clue why there
44 | -- are so many different things in one module.
45 |
46 | data NextState
47 | = Continue
48 | | Done
49 | deriving Eq
50 |
51 | -- TODO(KS): Do we even need this? What is this?
52 | runDbStartup :: DbSyncNodePlugin -> Trace IO Text -> IO ()
53 | runDbStartup plugin trce =
54 | DB.runDbAction (Just trce) $
55 | mapM_ (\action -> action trce) $ plugOnStartup plugin
56 |
57 | -- TODO(KS): Needs a @DataLayer@.
58 | -- TODO(KS): Metrics layer!
59 | runDbThread
60 | :: HasCallStack
61 | => Trace IO Text
62 | -> DbSyncEnv
63 | -> DbSyncNodePlugin
64 | -> DbActionQueue
65 | -> LedgerStateVar
66 | -> IO ()
67 | runDbThread trce env plugin queue ledgerStateVar = do
68 | logInfo trce "Running DB thread"
69 | logException trce "runDBThread: " loop
70 | logInfo trce "Shutting down DB thread"
71 | where
72 | loop = do
73 | xs <- blockingFlushDbActionQueue queue
74 |
75 | when (length xs > 1) $ do
76 | logDebug trce $ "runDbThread: " <> textShow (length xs) <> " blocks"
77 |
78 | eNextState <- runExceptT $ runActions trce env plugin ledgerStateVar xs
79 |
80 | case eNextState of
81 | Left err -> logError trce $ renderDbSyncNodeError err
82 | Right Continue -> loop
83 | Right Done -> pure ()
84 |
85 | -- | Run the list of 'DbAction's. Block are applied in a single set (as a transaction)
86 | -- and other operations are applied one-by-one.
87 | runActions
88 | :: Trace IO Text
89 | -> DbSyncEnv
90 | -> DbSyncNodePlugin
91 | -> LedgerStateVar
92 | -> [DbAction]
93 | -> ExceptT DbSyncNodeError IO NextState
94 | runActions trce env plugin ledgerState actions = do
95 | nextState <- checkDbState trce actions
96 | if nextState /= Done
97 | then dbAction Continue actions
98 | else pure Continue
99 | where
100 | dbAction :: NextState -> [DbAction] -> ExceptT DbSyncNodeError IO NextState
101 | dbAction next [] = pure next
102 | dbAction Done _ = pure Done
103 | dbAction Continue xs =
104 | case spanDbApply xs of
105 | ([], DbFinish:_) -> do
106 | pure Done
107 | ([], DbRollBackToPoint sn:ys) -> do
108 | runRollbacks trce plugin sn
109 | liftIO $ loadLedgerState (envLedgerStateDir env) ledgerState sn
110 | dbAction Continue ys
111 | (ys, zs) -> do
112 | insertBlockList trce env ledgerState plugin ys
113 | if null zs
114 | then pure Continue
115 | else dbAction Continue zs
116 |
117 | -- TODO(KS): This seems wrong, why do we validate something here?
118 | checkDbState :: Trace IO Text -> [DbAction] -> ExceptT DbSyncNodeError IO NextState
119 | checkDbState trce xs =
120 | case filter isMainBlockApply (reverse xs) of
121 | [] -> pure Continue
122 | (DbApplyBlock blktip : _) -> validateBlock blktip
123 | _ -> pure Continue
124 | where
125 | -- We need to seperate base types from new types se we achive separation.
126 | validateBlock :: BlockDetails -> ExceptT DbSyncNodeError IO NextState
127 | validateBlock (BlockDetails cblk _) = do
128 | case cblk of
129 | BlockByron bblk ->
130 | case byronBlockRaw bblk of
131 | Ledger.ABOBBoundary _ -> left $ NEError "checkDbState got a boundary block"
132 | Ledger.ABOBBlock chBlk -> do
133 | mDbBlk <- liftIO $ DB.runDbAction (Just trce) $ DB.queryBlockNo (Byron.blockNumber chBlk)
134 | case mDbBlk of
135 | Nothing -> pure Continue
136 | Just dbBlk -> do
137 | when (DB.blockHash dbBlk /= Byron.blockHash chBlk) $ do
138 | liftIO $ logInfo trce (textShow chBlk)
139 | left $ NEBlockMismatch (Byron.blockNumber chBlk) (DB.blockHash dbBlk) (Byron.blockHash chBlk)
140 |
141 | liftIO . logInfo trce $
142 | mconcat [ "checkDbState: Block no ", textShow (Byron.blockNumber chBlk), " present" ]
143 | pure Done -- Block already exists, so we are done.
144 |
145 | BlockShelley {} ->
146 | panic "checkDbState for ShelleyBlock not yet implemented"
147 | BlockAllegra {} ->
148 | panic "checkDbState for AllegraBlock not yet implemented"
149 | BlockMary {} ->
150 | panic "checkDbState for MaryBlock not yet implemented"
151 |
152 |
153 | isMainBlockApply :: DbAction -> Bool
154 | isMainBlockApply dba =
155 | case dba of
156 | DbApplyBlock (BlockDetails cblk _details) ->
157 | case cblk of
158 | BlockByron bblk ->
159 | case byronBlockRaw bblk of
160 | Ledger.ABOBBlock _ -> True
161 | Ledger.ABOBBoundary _ -> False
162 | BlockShelley {} -> False
163 | BlockAllegra {} -> False
164 | BlockMary {} -> False
165 | DbRollBackToPoint {} -> False
166 | DbFinish -> False
167 |
168 | runRollbacks
169 | :: Trace IO Text
170 | -> DbSyncNodePlugin
171 | -> SlotNo
172 | -> ExceptT DbSyncNodeError IO ()
173 | runRollbacks trce plugin point =
174 | newExceptT
175 | . traverseMEither (\ f -> f trce point)
176 | $ plugRollbackBlock plugin
177 |
178 | insertBlockList
179 | :: Trace IO Text
180 | -> DbSyncEnv
181 | -> LedgerStateVar
182 | -> DbSyncNodePlugin
183 | -> [BlockDetails]
184 | -> ExceptT DbSyncNodeError IO ()
185 | insertBlockList trce env ledgerState plugin blks =
186 | -- Setting this to True will log all 'Persistent' operations which is great
187 | -- for debugging, but otherwise is *way* too chatty.
188 | newExceptT
189 | . DB.runDbAction (Just trce)
190 | $ traverseMEither insertBlock blks
191 | where
192 | insertBlock
193 | :: BlockDetails
194 | -> ReaderT SqlBackend (LoggingT IO) (Either DbSyncNodeError ())
195 | insertBlock blkTip =
196 | traverseMEither (\ f -> f trce env ledgerState blkTip) $ plugInsertBlock plugin
197 |
198 | -- | Split the DbAction list into a prefix containing blocks to apply and a postfix.
199 | spanDbApply :: [DbAction] -> ([BlockDetails], [DbAction])
200 | spanDbApply lst =
201 | case lst of
202 | (DbApplyBlock bt:xs) -> let (ys, zs) = spanDbApply xs in (bt:ys, zs)
203 | xs -> ([], xs)
204 |
--------------------------------------------------------------------------------
/smash/src/Cardano/SMASH/DBSync/Db/Migration.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE OverloadedStrings #-}
2 |
3 | module Cardano.SMASH.DBSync.Db.Migration
4 | ( SmashMigrationDir (..)
5 | , SmashLogFileDir (..)
6 | , createMigration
7 | , applyMigration
8 | , runMigrations
9 | , runSingleScript
10 | ) where
11 |
12 | import Cardano.Prelude
13 |
14 | import Control.Monad.Logger (NoLoggingT)
15 | import Control.Monad.Trans.Resource (runResourceT)
16 |
17 | import Cardano.BM.Trace (Trace, logInfo)
18 |
19 | import qualified Data.ByteString.Char8 as BS
20 | import Data.Conduit.Binary (sinkHandle)
21 | import Data.Conduit.Process (sourceCmdWithConsumer,
22 | system)
23 | import qualified Data.List as List
24 | import qualified Data.Text as Text
25 | import qualified Data.Text.IO as Text
26 | import Data.Time.Clock (getCurrentTime)
27 | import Data.Time.Format (defaultTimeLocale,
28 | formatTime,
29 | iso8601DateFormat)
30 |
31 | import Database.Persist.Sql (SqlBackend,
32 | SqlPersistT,
33 | entityVal,
34 | getMigration,
35 | selectFirst)
36 |
37 | import Cardano.SMASH.DBSync.Db.Migration.Haskell
38 | import Cardano.SMASH.DBSync.Db.Migration.Version
39 | import Cardano.SMASH.DBSync.Db.PGConfig
40 | import Cardano.SMASH.DBSync.Db.Run
41 | import Cardano.SMASH.DBSync.Db.Schema
42 |
43 | import System.Directory (listDirectory)
44 | import System.FilePath (takeFileName, (>))
45 | import System.IO (hClose, hFlush,
46 | hPrint)
47 |
48 |
49 |
50 | newtype SmashMigrationDir
51 | = SmashMigrationDir { getSmashMigrationDir :: FilePath }
52 | deriving (Show)
53 |
54 | newtype SmashLogFileDir
55 | = SmashLogFileDir FilePath
56 | deriving (Show)
57 |
58 | -- | Run the migrations in the provided 'MigrationDir' and write date stamped log file
59 | -- to 'LogFileDir'.
60 | runMigrations :: Trace IO Text -> (PGConfig -> PGConfig) -> SmashMigrationDir -> Maybe SmashLogFileDir -> IO ()
61 | runMigrations tracer cfgOverride migrationDir mLogfiledir = do
62 | pgconfig <- cfgOverride <$> readPGPassFileEnv
63 | scripts <- getMigrationScripts migrationDir
64 |
65 | case mLogfiledir of
66 | Nothing -> do
67 | logInfo tracer "Running."
68 | forM_ scripts $ applyMigration tracer pgconfig Nothing stdout
69 | logInfo tracer "Success!"
70 |
71 | Just logfiledir -> do
72 | logInfo tracer $ "Running with logfile directory: " <> show logfiledir
73 | logFilename <- genLogFilename logfiledir
74 | bracket (openFile logFilename AppendMode) hClose $ \logHandle -> do
75 | logInfo tracer "Running."
76 | forM_ scripts $ applyMigration tracer pgconfig (Just logFilename) logHandle
77 | logInfo tracer "Success!"
78 | where
79 | genLogFilename :: SmashLogFileDir -> IO FilePath
80 | genLogFilename (SmashLogFileDir logdir) =
81 | (logdir >)
82 | . formatTime defaultTimeLocale ("migrate-" ++ iso8601DateFormat (Just "%H%M%S") ++ ".log")
83 | <$> getCurrentTime
84 |
85 | -- A simple way to run a single script
86 | runSingleScript :: Trace IO Text -> PGConfig -> FilePath -> IO ()
87 | runSingleScript tracer pgConfig script = do
88 | -- This assumes that the credentials for 'psql' are already sorted out.
89 | -- One way to achive this is via a 'PGPASSFILE' environment variable
90 | -- as per the PostgreSQL documentation.
91 | let command =
92 | List.intercalate " "
93 | [ "psql"
94 | , BS.unpack (pgcDbname pgConfig)
95 | , "--no-password"
96 | , "--quiet"
97 | , "--username=" <> BS.unpack (pgcUser pgConfig)
98 | , "--host=" <> BS.unpack (pgcHost pgConfig)
99 | , "--port=" <> BS.unpack (pgcPort pgConfig)
100 | , "--no-psqlrc" -- Ignore the ~/.psqlrc file.
101 | , "--single-transaction" -- Run the file as a transaction.
102 | , "--set ON_ERROR_STOP=on" -- Exit with non-zero on error.
103 | , "--file='" ++ script ++ "'"
104 | , "2>&1" -- Pipe stderr to stdout.
105 | ]
106 |
107 | logInfo tracer $ toS $ "Running: " ++ takeFileName script
108 |
109 | hFlush stdout
110 | exitCode <- system command
111 |
112 | case exitCode of
113 | ExitSuccess ->
114 | logInfo tracer "ExitSuccess."
115 | ExitFailure _ -> do
116 | print exitCode
117 | exitFailure
118 |
119 | applyMigration :: Trace IO Text -> PGConfig -> Maybe FilePath -> Handle -> (MigrationVersion, FilePath) -> IO ()
120 | applyMigration tracer pgconfig mLogFilename logHandle (version, script) = do
121 | -- This assumes that the credentials for 'psql' are already sorted out.
122 | -- One way to achive this is via a 'PGPASSFILE' environment variable
123 | -- as per the PostgreSQL documentation.
124 | let command =
125 | List.intercalate " "
126 | [ "psql"
127 | , BS.unpack (pgcDbname pgconfig)
128 | , "--no-password"
129 | , "--quiet"
130 | , "--username=" <> BS.unpack (pgcUser pgconfig)
131 | , "--host=" <> BS.unpack (pgcHost pgconfig)
132 | , "--port=" <> BS.unpack (pgcPort pgconfig)
133 | , "--no-psqlrc" -- Ignore the ~/.psqlrc file.
134 | , "--single-transaction" -- Run the file as a transaction.
135 | , "--set ON_ERROR_STOP=on" -- Exit with non-zero on error.
136 | , "--file='" ++ script ++ "'"
137 | , "2>&1" -- Pipe stderr to stdout.
138 | ]
139 | logInfo tracer $ toS $ "Running: " ++ takeFileName script
140 |
141 | hFlush stdout
142 | exitCode <- fst <$> handle (errorExit :: SomeException -> IO a)
143 | (runResourceT $ sourceCmdWithConsumer command (sinkHandle logHandle))
144 | case exitCode of
145 | ExitSuccess -> do
146 | logInfo tracer "ExitSuccess."
147 | runHaskellMigration logHandle version
148 | ExitFailure _ -> errorExit exitCode
149 | where
150 | errorExit :: Show e => e -> IO a
151 | errorExit e = do
152 | print e
153 | hPrint logHandle e
154 | case mLogFilename of
155 | Nothing -> pure ()
156 | Just logFilename -> putStrLn $ "\nErrors in file: " ++ logFilename ++ "\n"
157 | exitFailure
158 |
159 | -- | Create a database migration (using functionality built into Persistent). If no
160 | -- migration is needed return 'Nothing' otherwise return the migration as 'Text'.
161 | createMigration :: SmashMigrationDir -> IO (Maybe FilePath)
162 | createMigration (SmashMigrationDir migdir) = do
163 | mt <- runDbNoLogging create
164 | case mt of
165 | Nothing -> pure Nothing
166 | Just (ver, mig) -> do
167 | let fname = toS $ renderMigrationVersionFile ver
168 | Text.writeFile (migdir > fname) mig
169 | pure $ Just $ fname
170 | where
171 | create :: ReaderT SqlBackend (NoLoggingT IO) (Maybe (MigrationVersion, Text))
172 | create = do
173 | ver <- getSchemaVersion
174 | statements <- getMigration migrateCardanoDb
175 | if null statements
176 | then pure Nothing
177 | else do
178 | nextVer <- liftIO $ nextMigrationVersion ver
179 | pure $ Just (nextVer, genScript statements (mvVersion nextVer))
180 |
181 | genScript :: [Text] -> Int -> Text
182 | genScript statements next_version =
183 | Text.concat $
184 | [ "-- Persistent generated migration.\n\n"
185 | , "CREATE FUNCTION migrate() RETURNS void AS $$\n"
186 | , "DECLARE\n"
187 | , " next_version int ;\n"
188 | , "BEGIN\n"
189 | , " SELECT stage_two + 1 INTO next_version FROM schema_version ;\n"
190 | , " IF next_version = " <> textShow next_version <> " THEN\n"
191 | ]
192 | ++ concatMap buildStatement statements ++
193 | [ " -- Hand written SQL statements can be added here.\n"
194 | , " UPDATE schema_version SET stage_two = ", textShow next_version, " ;\n"
195 | , " RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;\n"
196 | , " END IF ;\n"
197 | , "END ;\n"
198 | , "$$ LANGUAGE plpgsql ;\n\n"
199 | , "SELECT migrate() ;\n\n"
200 | , "DROP FUNCTION migrate() ;\n"
201 | ]
202 |
203 | buildStatement :: Text -> [Text]
204 | buildStatement sql = [" ", sql, ";\n"]
205 |
206 | getSchemaVersion :: SqlPersistT (NoLoggingT IO) MigrationVersion
207 | getSchemaVersion = do
208 | res <- selectFirst [] []
209 | case res of
210 | Nothing -> panic "getSchemaVersion failed!"
211 | Just x -> do
212 | -- Only interested in the stage2 version because that is the only stage for
213 | -- which Persistent migrations are generated.
214 | let (SchemaVersion _ stage2 _) = entityVal x
215 | pure $ MigrationVersion 2 stage2 0
216 |
217 | --------------------------------------------------------------------------------
218 |
219 | getMigrationScripts :: SmashMigrationDir -> IO [(MigrationVersion, FilePath)]
220 | getMigrationScripts (SmashMigrationDir location) = do
221 | files <- listDirectory location
222 | let xs = map addVersionString (List.sort $ List.filter isMigrationScript files)
223 | case partitionEithers xs of
224 | ([], rs) -> pure rs
225 | (ls, _) -> panic (toS $ "getMigrationScripts: Unable to parse " ++ show ls)
226 | where
227 | isMigrationScript :: FilePath -> Bool
228 | isMigrationScript fp =
229 | List.isPrefixOf "migration-" fp && List.isSuffixOf ".sql" fp
230 |
231 | addVersionString :: FilePath -> Either FilePath (MigrationVersion, FilePath)
232 | addVersionString fp =
233 | maybe (Left fp) (\mv -> Right (mv, location > fp)) $ (parseMigrationVersionFromFile $ toS fp)
234 |
235 | textShow :: Show a => a -> Text
236 | textShow = Text.pack . show
237 |
--------------------------------------------------------------------------------
/smash/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/smash-servant-types/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------