├── assets
└── logo.png
├── checkpoints
├── cmamba_v.ckpt
└── cmamba_nv.ckpt
├── requirements.txt
├── configs
├── models
│ ├── GRU
│ │ ├── gru_nv.yaml
│ │ └── gru_v.yaml
│ ├── LSTM
│ │ ├── lstm_bi_nv.yaml
│ │ ├── lstm_bi_v.yaml
│ │ ├── lstm_nv.yaml
│ │ └── lstm_v.yaml
│ ├── CryptoMamba
│ │ ├── v1.yaml
│ │ └── v2.yaml
│ ├── SMamba
│ │ ├── smamba_nv.yaml
│ │ └── smamba_v.yaml
│ ├── iTransformer
│ │ ├── itransformer_v.yaml
│ │ └── itransformer_nv.yaml
│ └── archs.yaml
├── training
│ ├── gru_v.yaml
│ ├── gru_nv.yaml
│ ├── lstm_nv.yaml
│ ├── lstm_v.yaml
│ ├── lstm_bi_v.yaml
│ ├── smamba_v.yaml
│ ├── lstm_bi_nv.yaml
│ ├── smamba_nv.yaml
│ ├── itransformer_nv.yaml
│ ├── itransformer_v.yaml
│ ├── cmamba_v.yaml
│ └── cmamba_nv.yaml
└── data_configs
│ └── mode_1.yaml
├── data
├── 2018-09-17_2024-09-16_86400
│ ├── config.pkl
│ ├── test.csv
│ └── val.csv
└── one_day_pred.csv
├── models
├── gru.py
├── lstm.py
├── layers
│ ├── Mamba_EncDec.py
│ ├── Transformer_EncDec.py
│ ├── Embed.py
│ └── SelfAttention_Family.py
├── iTransformer.py
├── smamba.py
└── cmamba.py
├── LICENSE
├── data_utils
├── data_transforms.py
└── dataset.py
├── pl_modules
├── gru_module.py
├── lstm_module.py
├── smamba_module.py
├── itransformer_module.py
├── cmamba_module.py
├── hcmamba_module.py
├── data_module.py
└── base_module.py
├── utils
├── io_tools.py
└── trade.py
├── example.ipynb
├── README.md
└── scripts
├── training.py
├── one_day_pred.py
├── evaluation.py
└── simulate_trade.py
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MShahabSepehri/CryptoMamba/HEAD/assets/logo.png
--------------------------------------------------------------------------------
/checkpoints/cmamba_v.ckpt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MShahabSepehri/CryptoMamba/HEAD/checkpoints/cmamba_v.ckpt
--------------------------------------------------------------------------------
/checkpoints/cmamba_nv.ckpt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MShahabSepehri/CryptoMamba/HEAD/checkpoints/cmamba_nv.ckpt
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | torchaudio
4 | setuptools
5 | pyaml
6 | lightning
7 | matplotlib
8 | seaborn
9 | pandas
10 | einops
11 | tensorboard
12 | mamba-ssm[causal-conv1d]
13 |
--------------------------------------------------------------------------------
/configs/models/GRU/gru_nv.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.gru_module.GRUModule
2 | params:
3 | num_features: 5
4 | hidden_size: 100
5 | num_layers: 3
6 | window_size: 14
7 | loss: 'mse'
8 | normalize: True
--------------------------------------------------------------------------------
/configs/models/GRU/gru_v.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.gru_module.GRUModule
2 | params:
3 | num_features: 6
4 | hidden_size: 100
5 | num_layers: 3
6 | window_size: 14
7 | loss: 'mse'
8 | normalize: True
--------------------------------------------------------------------------------
/configs/models/LSTM/lstm_bi_nv.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.lstm_module.LSTMModule
2 | params:
3 | num_features: 5
4 | hidden_size: 100
5 | num_layers: 3
6 | bidirectional: True
7 | window_size: 14
8 | normalize: True
--------------------------------------------------------------------------------
/configs/models/CryptoMamba/v1.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.cmamba_module.CryptoMambaModule
2 | params:
3 | num_features: 5
4 | hidden_dims: [14, 16, 32, 1]
5 | d_states: 64
6 | layer_density: 4
7 | loss: 'rmse'
8 | normalize: False
--------------------------------------------------------------------------------
/configs/models/CryptoMamba/v2.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.cmamba_module.CryptoMambaModule
2 | params:
3 | num_features: 6
4 | hidden_dims: [14, 16, 32, 1]
5 | d_states: 64
6 | layer_density: 4
7 | loss: 'rmse'
8 | normalize: False
--------------------------------------------------------------------------------
/configs/models/LSTM/lstm_bi_v.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.lstm_module.LSTMModule
2 | params:
3 | num_features: 6
4 | hidden_size: 100
5 | num_layers: 3
6 | bidirectional: True
7 | window_size: 14
8 | loss: 'mse'
9 | normalize: True
--------------------------------------------------------------------------------
/configs/models/LSTM/lstm_nv.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.lstm_module.LSTMModule
2 | params:
3 | num_features: 5
4 | hidden_size: 100
5 | num_layers: 3
6 | bidirectional: False
7 | window_size: 14
8 | loss: 'mse'
9 | normalize: True
--------------------------------------------------------------------------------
/configs/models/LSTM/lstm_v.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.lstm_module.LSTMModule
2 | params:
3 | num_features: 6
4 | hidden_size: 100
5 | num_layers: 3
6 | bidirectional: False
7 | window_size: 14
8 | loss: 'mse'
9 | normalize: True
--------------------------------------------------------------------------------
/configs/training/gru_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'GRU_v'
3 |
4 | name: 'gru'
5 | max_epochs: 1000
6 | use_volume: True
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.005
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.00001
--------------------------------------------------------------------------------
/configs/training/gru_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'GRU_nv'
3 |
4 | name: 'gru'
5 | max_epochs: 700
6 | use_volume: False
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.005
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.00001
--------------------------------------------------------------------------------
/configs/training/lstm_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'LSTM_nv'
3 |
4 | name: 'lstm'
5 | max_epochs: 700
6 | use_volume: False
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.000005
--------------------------------------------------------------------------------
/configs/training/lstm_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'LSTM_v'
3 |
4 | name: 'lstm'
5 | max_epochs: 700
6 | use_volume: True
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.000003
--------------------------------------------------------------------------------
/configs/training/lstm_bi_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'LSTM_bi_v'
3 |
4 | name: 'lstm'
5 | max_epochs: 700
6 | use_volume: True
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.1
13 | weight_decay: 0.000001
--------------------------------------------------------------------------------
/configs/training/smamba_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'SMamba_v'
3 |
4 | name: 'smamba'
5 | max_epochs: 1100
6 | use_volume: True
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.000003
--------------------------------------------------------------------------------
/configs/training/lstm_bi_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'LSTM_bi_nv'
3 |
4 | name: 'lstm'
5 | max_epochs: 700
6 | use_volume: False
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.00001
--------------------------------------------------------------------------------
/configs/training/smamba_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'SMamba_nv'
3 |
4 | name: 'smamba'
5 | max_epochs: 1000
6 | use_volume: False
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.001
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.000003
--------------------------------------------------------------------------------
/configs/training/itransformer_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'iTransformer_nv'
3 |
4 | name: 'itransformer'
5 | max_epochs: 1000
6 | use_volume: False
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.002
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.00001
--------------------------------------------------------------------------------
/configs/training/itransformer_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'iTransformer_v'
3 |
4 | name: 'itransformer'
5 | max_epochs: 1000
6 | use_volume: True
7 |
8 | hyperparams:
9 | optimizer: 'adam'
10 | lr: 0.002
11 | lr_step_size: 100
12 | lr_gamma: 0.5
13 | weight_decay: 0.00001
--------------------------------------------------------------------------------
/configs/training/cmamba_v.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'CMamba_v2'
3 |
4 | name: 'CMamba'
5 | max_epochs: 1000
6 | use_volume: True
7 | additional_features: []
8 |
9 | hyperparams:
10 | optimizer: 'adam'
11 | lr: 0.01
12 | lr_step_size: 100
13 | lr_gamma: 0.5
14 | weight_decay: 0.001
--------------------------------------------------------------------------------
/configs/training/cmamba_nv.yaml:
--------------------------------------------------------------------------------
1 | data_config: 'mode_1'
2 | model: 'CMamba_v1'
3 |
4 | name: 'CMamba'
5 | max_epochs: 1100
6 | use_volume: False
7 | additional_features: []
8 |
9 | hyperparams:
10 | optimizer: 'adam'
11 | lr: 0.01
12 | lr_step_size: 100
13 | lr_gamma: 0.5
14 | weight_decay: 0.0005
--------------------------------------------------------------------------------
/configs/models/SMamba/smamba_nv.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.smamba_module.SMambaModule
2 | params:
3 | num_features: 5
4 | seq_len: 14
5 | use_norm: True
6 | d_model: 128
7 | d_state: 32
8 | d_ff: 128
9 | dropout: 0.1
10 | activation: 'gelu'
11 | e_layers: 2
12 | loss: 'mse'
13 | normalize: False
--------------------------------------------------------------------------------
/configs/models/SMamba/smamba_v.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.smamba_module.SMambaModule
2 | params:
3 | num_features: 6
4 | seq_len: 14
5 | use_norm: True
6 | d_model: 128
7 | d_state: 32
8 | d_ff: 128
9 | dropout: 0.1
10 | activation: 'gelu'
11 | e_layers: 2
12 | loss: 'mse'
13 | normalize: False
--------------------------------------------------------------------------------
/configs/data_configs/mode_1.yaml:
--------------------------------------------------------------------------------
1 | jumps: 86400
2 | start_date: "2018-09-17"
3 | end_date: "2024-09-16"
4 | root: "./data"
5 | date_format: "%Y-%m-%d"
6 | data_path: "./data/one_day_pred.csv"
7 | train_interval: ["2018-09-17", "2022-09-17"]
8 | val_interval: ["2022-09-17", "2023-09-17"]
9 | test_interval: ["2023-09-17", "2024-09-17"]
10 | additional_features: []
--------------------------------------------------------------------------------
/data/2018-09-17_2024-09-16_86400/config.pkl:
--------------------------------------------------------------------------------
1 | data_path: ./Dataset_daily.csv
2 | end_date: '2024-16-09'
3 | jumps: 1440
4 | root: ./data
5 | start_date: '2018-17-09'
6 | test_interval:
7 | - '2023-17-09'
8 | - '2024-17-09'
9 | train_interval:
10 | - '2018-17-09'
11 | - '2022-17-09'
12 | val_interval:
13 | - '2022-17-09'
14 | - '2023-17-09'
15 | window_size: 14
16 |
--------------------------------------------------------------------------------
/configs/models/iTransformer/itransformer_v.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.itransformer_module.iTransformerModule
2 | params:
3 | num_features: 6
4 | seq_len: 14
5 | use_norm: True
6 | d_model: 128
7 | d_ff: 128
8 | dropout: 0
9 | activation: 'gelu'
10 | e_layers: 2
11 | output_attention: True
12 | factor: 1
13 | n_heads: 16
14 | loss: 'mse'
15 | normalize: False
--------------------------------------------------------------------------------
/configs/models/iTransformer/itransformer_nv.yaml:
--------------------------------------------------------------------------------
1 | target: pl_modules.itransformer_module.iTransformerModule
2 | params:
3 | num_features: 5
4 | seq_len: 14
5 | use_norm: True
6 | d_model: 128
7 | d_ff: 128
8 | dropout: 0
9 | activation: 'gelu'
10 | e_layers: 2
11 | output_attention: True
12 | factor: 1
13 | n_heads: 16
14 | loss: 'mse'
15 | normalize: False
--------------------------------------------------------------------------------
/models/gru.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 | class GRUModel(nn.Module):
4 | def __init__(self, num_features=5, hidden_size=50, num_layers=2):
5 | super(GRUModel, self).__init__()
6 | self.gru = nn.GRU(num_features, hidden_size, num_layers, batch_first=True)
7 | self.fc = nn.Linear(hidden_size, 1)
8 |
9 | def forward(self, x):
10 | x = x.permute(0, 2, 1)
11 | output, _ = self.gru(x)
12 | output = self.fc(output[:, -1, :])
13 | return output
--------------------------------------------------------------------------------
/configs/models/archs.yaml:
--------------------------------------------------------------------------------
1 | CMamba_v1: 'CryptoMamba/v1.yaml'
2 | CMamba_v2: 'CryptoMamba/v2.yaml'
3 | CMamba_v3: 'CryptoMamba/v3.yaml'
4 | CMamba_v4: 'CryptoMamba/v4.yaml'
5 | CMamba_v5: 'CryptoMamba/v5.yaml'
6 | CMamba_test: 'CryptoMamba/test.yaml'
7 | LSTM_bi_nv: 'LSTM/lstm_bi_nv.yaml'
8 | LSTM_bi_v: 'LSTM/lstm_bi_v.yaml'
9 | LSTM_nv: 'LSTM/lstm_nv.yaml'
10 | LSTM_v: 'LSTM/lstm_v.yaml'
11 | GRU_nv: 'GRU/gru_nv.yaml'
12 | GRU_v: 'GRU/gru_v.yaml'
13 | SMamba_nv: 'SMamba/smamba_nv.yaml'
14 | SMamba_v: 'SMamba/smamba_v.yaml'
15 | iTransformer_v: 'iTransformer/itransformer_v.yaml'
16 | iTransformer_v: 'iTransformer/itransformer_nv.yaml'
--------------------------------------------------------------------------------
/models/lstm.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | class LSTM(nn.Module):
5 | def __init__(
6 | self,
7 | num_features=5,
8 | hidden_size=64,
9 | num_layers=1,
10 | bidirectional=False,
11 | ):
12 | super().__init__()
13 | # self.norm = norm_layer((num_features, window_size))
14 | self.lstm = nn.LSTM(input_size=num_features,
15 | hidden_size=hidden_size,
16 | num_layers=num_layers,
17 | batch_first=True,
18 | bidirectional=bidirectional)
19 | d = 2 if bidirectional else 1
20 | self.linear = nn.Linear(d * hidden_size, 1)
21 | # self.linear2 = nn.Linear(window_size, 1)
22 |
23 |
24 | def forward(self, x):
25 | # x = self.norm(x)
26 | x = x.permute(0, 2, 1)
27 | x, _ = self.lstm(x)
28 | x = self.linear(x[:, -1, :])
29 | # x = x.permute(0, 2, 1)
30 | # x = self.linear2(x)
31 | return nn.functional.tanh(x)
32 |
--------------------------------------------------------------------------------
/data/one_day_pred.csv:
--------------------------------------------------------------------------------
1 | Date,Open,High,Low,Close,Adj Close,Volume
2 | 2024-12-15,101373.53,105047.54,101227.03,104298.70,104298.70,51145914137
3 | 2024-12-14,101451.44,102618.88,100634.05,101372.97,101372.97,40422968793
4 | 2024-12-13,100046.65,101888.80,99233.28,101459.26,101459.26,56894751583
5 | 2024-12-12,101167.80,102524.91,99339.95,100043.00,100043.00,72073983533
6 | 2024-12-11,96656.06,101913.36,95747.23,101173.03,101173.03,85391409936
7 | 2024-12-10,97441.23,98270.16,94321.26,96675.43,96675.43,104823780634
8 | 2024-12-09,101237.06,101272.51,94355.91,97432.72,97432.72,110676473908
9 | 2024-12-08,99921.91,101399.99,98771.52,101236.02,101236.02,44125751925
10 | 2024-12-07,99916.71,100563.38,99030.88,99923.34,99923.34,44177510897
11 | 2024-12-06,97074.23,102039.88,96514.88,99920.71,99920.71,94534772658
12 | 2024-12-05,98741.54,103900.47,91998.78,96593.57,96593.57,149218945580
13 | 2024-12-04,95988.53,99207.33,94660.52,98768.53,98768.53,77199817112
14 | 2024-12-03,95854.59,96297.20,93629.56,96002.16,96002.16,67067810961
15 | 2024-12-02,97276.01,98152.60,94482.87,95865.30,95865.30,72680784305
16 | 2024-12-01,96461.34,97888.13,95770.19,97279.79,97279.79,36590695296
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Mohammad Shahab Sepehri
4 |
5 | Copyright (c) 2024 Asal Mehradfar
6 |
7 | Permission is hereby granted, free of charge, to any person obtaining a copy
8 | of this software and associated documentation files (the "Software"), to deal
9 | in the Software without restriction, including without limitation the rights
10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | copies of the Software, and to permit persons to whom the Software is
12 | furnished to do so, subject to the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be included in all
15 | copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | SOFTWARE.
24 |
--------------------------------------------------------------------------------
/data_utils/data_transforms.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class DataTransform:
5 | def __init__(self, is_train, use_volume=False, additional_features=[]):
6 | self.is_train = is_train
7 | self.keys = ['Timestamp', 'Open', 'High', 'Low', 'Close']
8 | if use_volume:
9 | self.keys.append('Volume')
10 | self.keys += additional_features
11 | print(self.keys)
12 |
13 |
14 | def __call__(self, window):
15 | data_list = []
16 | output = {}
17 | if 'Timestamp_orig' in window.keys():
18 | self.keys.append('Timestamp_orig')
19 | for key in self.keys:
20 | data = torch.tensor(window.get(key).tolist())
21 | if key == 'Volume':
22 | data /= 1e9
23 | output[key] = data[-1]
24 | output[f'{key}_old'] = data[-2]
25 | if key == 'Timestamp_orig':
26 | continue
27 | data_list.append(data[:-1].reshape(1, -1))
28 | features = torch.cat(data_list, 0)
29 | output['features'] = features
30 | # raise ValueError(output)
31 | return output
32 |
33 | def set_initial_seed(self, seed):
34 | self.rng.seed(seed)
--------------------------------------------------------------------------------
/pl_modules/gru_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.gru import GRUModel
3 | from .base_module import BaseModule
4 |
5 |
6 | class GRUModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features=5,
11 | hidden_size=64,
12 | window_size=14,
13 | num_layers=1,
14 | lr=0.0002,
15 | lr_step_size=50,
16 | lr_gamma=0.1,
17 | weight_decay=0.0,
18 | logger_type=None,
19 | y_key='Close',
20 | optimizer='adam',
21 | mode='default',
22 | loss='rmse',
23 | **kwargs
24 | ):
25 | super().__init__(lr=lr,
26 | lr_step_size=lr_step_size,
27 | lr_gamma=lr_gamma,
28 | weight_decay=weight_decay,
29 | logger_type=logger_type,
30 | y_key=y_key,
31 | optimizer=optimizer,
32 | mode=mode,
33 | window_size=window_size,
34 | loss=loss,
35 | )
36 |
37 | self.model = GRUModel(
38 | num_features=num_features,
39 | hidden_size=hidden_size,
40 | num_layers=num_layers,
41 | )
--------------------------------------------------------------------------------
/pl_modules/lstm_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.lstm import LSTM
3 | from .base_module import BaseModule
4 |
5 |
6 | class LSTMModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features=5,
11 | hidden_size=64,
12 | window_size=14,
13 | num_layers=1,
14 | bidirectional=False,
15 | lr=0.0002,
16 | lr_step_size=50,
17 | lr_gamma=0.1,
18 | weight_decay=0.0,
19 | logger_type=None,
20 | y_key='Close',
21 | optimizer='adam',
22 | mode='default',
23 | loss='rmse',
24 | **kwargs
25 | ):
26 | super().__init__(lr=lr,
27 | lr_step_size=lr_step_size,
28 | lr_gamma=lr_gamma,
29 | weight_decay=weight_decay,
30 | logger_type=logger_type,
31 | y_key=y_key,
32 | optimizer=optimizer,
33 | mode=mode,
34 | window_size=window_size,
35 | loss=loss,
36 | )
37 |
38 | self.model = LSTM(
39 | num_features=num_features,
40 | hidden_size=hidden_size,
41 | num_layers=num_layers,
42 | bidirectional=bidirectional,
43 | )
--------------------------------------------------------------------------------
/pl_modules/smamba_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.smamba import SMamba
3 | from .base_module import BaseModule
4 |
5 |
6 | class SMambaModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features,
11 | seq_len,
12 | use_norm,
13 | d_model,
14 | d_state,
15 | d_ff,
16 | dropout,
17 | activation,
18 | e_layers,
19 | lr=0.0002,
20 | lr_step_size=50,
21 | lr_gamma=0.1,
22 | weight_decay=0.0,
23 | logger_type=None,
24 | window_size=14,
25 | y_key='Close',
26 | optimizer='adam',
27 | mode='default',
28 | loss='rmse',
29 | **kwargs
30 | ):
31 | super().__init__(lr=lr,
32 | lr_step_size=lr_step_size,
33 | lr_gamma=lr_gamma,
34 | weight_decay=weight_decay,
35 | logger_type=logger_type,
36 | y_key=y_key,
37 | optimizer=optimizer,
38 | mode=mode,
39 | window_size=window_size,
40 | loss=loss,
41 | )
42 |
43 | self.model = SMamba(
44 | num_features=num_features,
45 | seq_len=seq_len,
46 | use_norm=use_norm,
47 | d_model=d_model,
48 | d_state=d_state,
49 | d_ff=d_ff,
50 | dropout=dropout,
51 | activation=activation,
52 | e_layers=e_layers,
53 | **kwargs
54 | )
--------------------------------------------------------------------------------
/utils/io_tools.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import pickle
3 | import pathlib
4 | import importlib
5 |
6 |
7 | def get_obj_from_str(string, reload=False):
8 | module, cls = string.rsplit(".", 1)
9 | if reload:
10 | module_imp = importlib.import_module(module)
11 | importlib.reload(module_imp)
12 | return getattr(importlib.import_module(module, package=None), cls)
13 |
14 |
15 | def instantiate_from_config(config):
16 | if not "target" in config:
17 | if config == '__is_first_stage__':
18 | return None
19 | elif config == "__is_unconditional__":
20 | return None
21 | raise KeyError("Expected key `target` to instantiate.")
22 | return get_obj_from_str(config["target"])(**config.get("params", dict()))
23 |
24 |
25 | def get_root(file, num_returns=1):
26 | tmp = pathlib.Path(file)
27 | for _ in range(num_returns):
28 | tmp = tmp.parent.resolve()
29 | return tmp
30 |
31 | def load_config_from_yaml(path):
32 | config_file = pathlib.Path(path)
33 | if config_file.exists():
34 | with config_file.open('r') as f:
35 | d = yaml.safe_load(f)
36 | return d
37 | else:
38 | raise ValueError(f'Config file ({path}) does not exist.')
39 |
40 |
41 | def save_yaml(data, path):
42 | with open(path, 'w') as file:
43 | yaml.dump(data, file, default_flow_style=False)
44 |
45 |
46 | def save_pickle(data, path: str) -> None:
47 | with open(path, 'wb') as f:
48 | pickle.dump(data, f)
49 |
50 |
51 | def load_pickle(path: str):
52 | with open(path, 'rb') as f:
53 | data = pickle.load(f)
54 | return data
--------------------------------------------------------------------------------
/pl_modules/itransformer_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.iTransformer import iTransformer
3 | from .base_module import BaseModule
4 |
5 |
6 | class iTransformerModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features,
11 | seq_len,
12 | use_norm,
13 | d_model,
14 | d_ff,
15 | dropout,
16 | activation,
17 | e_layers,
18 | output_attention,
19 | factor,
20 | n_heads,
21 | lr=0.0002,
22 | lr_step_size=50,
23 | lr_gamma=0.1,
24 | weight_decay=0.0,
25 | logger_type=None,
26 | window_size=14,
27 | y_key='Close',
28 | optimizer='adam',
29 | mode='default',
30 | loss='rmse',
31 | **kwargs
32 | ):
33 | super().__init__(lr=lr,
34 | lr_step_size=lr_step_size,
35 | lr_gamma=lr_gamma,
36 | weight_decay=weight_decay,
37 | logger_type=logger_type,
38 | y_key=y_key,
39 | optimizer=optimizer,
40 | mode=mode,
41 | window_size=window_size,
42 | loss=loss,
43 | )
44 |
45 | self.model = iTransformer(
46 | num_features=num_features,
47 | seq_len=seq_len,
48 | pred_len=1,
49 | output_attention=output_attention,
50 | use_norm=use_norm,
51 | d_model=d_model,
52 | d_ff=d_ff,
53 | dropout=dropout,
54 | factor=factor,
55 | n_heads=n_heads,
56 | activation=activation,
57 | e_layers=e_layers,
58 | **kwargs
59 | )
--------------------------------------------------------------------------------
/pl_modules/cmamba_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.cmamba import CMamba
3 | from .base_module import BaseModule
4 |
5 |
6 | class CryptoMambaModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features=5,
11 | hidden_dims=[14, 1],
12 | norm_layer=nn.LayerNorm,
13 | d_conv=4,
14 | layer_density=1,
15 | expand=2,
16 | mlp_ratio=0,
17 | drop=0.0,
18 | num_classes=None,
19 | d_states=16,
20 | use_checkpoint=False,
21 | lr=0.0002,
22 | lr_step_size=50,
23 | lr_gamma=0.1,
24 | weight_decay=0.0,
25 | logger_type=None,
26 | window_size=14,
27 | y_key='Close',
28 | optimizer='adam',
29 | mode='default',
30 | loss='rmse',
31 | **kwargs
32 | ):
33 | super().__init__(lr=lr,
34 | lr_step_size=lr_step_size,
35 | lr_gamma=lr_gamma,
36 | weight_decay=weight_decay,
37 | logger_type=logger_type,
38 | y_key=y_key,
39 | optimizer=optimizer,
40 | mode=mode,
41 | window_size=window_size,
42 | loss=loss,
43 | )
44 | assert window_size == hidden_dims[0]
45 |
46 | self.model = CMamba(
47 | num_features=num_features,
48 | hidden_dims=hidden_dims,
49 | norm_layer=norm_layer,
50 | d_conv=d_conv,
51 | layer_density=layer_density,
52 | expand=expand,
53 | mlp_ratio=mlp_ratio,
54 | drop=drop,
55 | num_classes=num_classes,
56 | d_states=d_states,
57 | use_checkpoint=use_checkpoint,
58 | **kwargs
59 | )
60 |
--------------------------------------------------------------------------------
/pl_modules/hcmamba_module.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from models.hcmamba import HyperCMamba
3 | from .base_module import BaseModule
4 |
5 |
6 | class HyperCryptoMambaModule(BaseModule):
7 |
8 | def __init__(
9 | self,
10 | num_features=5,
11 | hidden_dims=[14, 1],
12 | norm_layer=nn.LayerNorm,
13 | d_conv=4,
14 | layer_density=1,
15 | expand=2,
16 | mlp_ratio=0,
17 | drop=0.0,
18 | num_classes=None,
19 | d_states=16,
20 | use_checkpoint=False,
21 | lr=0.0002,
22 | lr_step_size=50,
23 | lr_gamma=0.1,
24 | weight_decay=0.0,
25 | logger_type=None,
26 | y_key='Close',
27 | optimizer='adam',
28 | use_acc_in_loss=False,
29 | **kwargs
30 | ):
31 | super().__init__(lr=lr,
32 | lr_step_size=lr_step_size,
33 | lr_gamma=lr_gamma,
34 | weight_decay=weight_decay,
35 | logger_type=logger_type,
36 | y_key=y_key,
37 | optimizer=optimizer,
38 | use_acc_in_loss=use_acc_in_loss,
39 | )
40 |
41 | self.model = HyperCMamba(
42 | num_features=num_features,
43 | hidden_dims=hidden_dims,
44 | norm_layer=norm_layer,
45 | d_conv=d_conv,
46 | layer_density=layer_density,
47 | expand=expand,
48 | mlp_ratio=mlp_ratio,
49 | drop=drop,
50 | num_classes=num_classes,
51 | d_states=d_states,
52 | use_checkpoint=use_checkpoint,
53 | **kwargs
54 | )
55 |
56 | def forward(self, x, y_old=None):
57 | label, val = self.model(x)
58 | return (2 * label.reshape(-1) - 1) * val.reshape(-1) + y_old
--------------------------------------------------------------------------------
/utils/trade.py:
--------------------------------------------------------------------------------
1 |
2 | def buy_sell_smart(today, pred, balance, shares, risk=5):
3 | diff = pred * risk / 100
4 | if today > pred + diff:
5 | balance += shares * today
6 | shares = 0
7 | elif today > pred:
8 | factor = (today - pred) / diff
9 | balance += shares * factor * today
10 | shares *= (1 - factor)
11 | elif today > pred - diff:
12 | factor = (pred - today) / diff
13 | shares += balance * factor / today
14 | balance *= (1 - factor)
15 | else:
16 | shares += balance / today
17 | balance = 0
18 | return balance, shares
19 |
20 | def buy_sell_smart_w_short(today, pred, balance, shares, risk=5, max_n_btc=0.002):
21 | diff = pred * risk / 100
22 | if today < pred - diff:
23 | shares += balance / today
24 | balance = 0
25 | elif today < pred:
26 | factor = (pred - today) / diff
27 | shares += balance * factor / today
28 | balance *= (1 - factor)
29 | elif today < pred + diff:
30 | if shares > 0:
31 | factor = (today - pred) / diff
32 | balance += shares * factor * today
33 | shares *= (1 - factor)
34 | else:
35 | balance += (shares + max_n_btc) * today
36 | shares = -max_n_btc
37 | return balance, shares
38 |
39 | def buy_sell_vanilla(today, pred, balance, shares, tr=0.01):
40 | tmp = abs((pred - today) / today)
41 | if tmp < tr:
42 | return balance, shares
43 | if pred > today:
44 | shares += balance / today
45 | balance = 0
46 | else:
47 | balance += shares * today
48 | shares = 0
49 | return balance, shares
50 |
51 |
52 | def trade(data, time_key, timstamps, targets, preds, balance=100, mode='smart_v2', risk=5, y_key='Close'):
53 | balance_in_time = [balance]
54 | shares = 0
55 |
56 | for ts, target, pred in zip(timstamps, targets, preds):
57 | today = data[data[time_key] == int(ts - 24 * 60 * 60)].iloc[0][y_key]
58 | assert round(target, 2) == round(data[data[time_key] == int(ts)].iloc[0][y_key], 2)
59 | if mode == 'smart':
60 | balance, shares = buy_sell_smart(today, pred, balance, shares, risk=risk)
61 | if mode == 'smart_w_short':
62 | balance, shares = buy_sell_smart_w_short(today, pred, balance, shares, risk=risk, max_n_btc=0.002)
63 | elif mode == 'vanilla':
64 | balance, shares = buy_sell_vanilla(today, pred, balance, shares)
65 | elif mode == 'no_strategy':
66 | shares += balance / today
67 | balance = 0
68 | balance_in_time.append(shares * today + balance)
69 |
70 | balance += shares * targets[-1]
71 | return balance, balance_in_time
--------------------------------------------------------------------------------
/models/layers/Mamba_EncDec.py:
--------------------------------------------------------------------------------
1 | import concurrent.futures
2 | import threading
3 |
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import torch
7 | from mamba_ssm import Mamba
8 | from models.layers.SelfAttention_Family import FullAttention, AttentionLayer
9 |
10 | class EncoderLayer(nn.Module):
11 | def __init__(self, attention, attention_r, d_model, d_ff=None, dropout=0.1, activation="relu"):
12 | super(EncoderLayer, self).__init__()
13 | d_ff = d_ff or 4 * d_model
14 | self.attention = attention
15 | self.attention_r = attention_r
16 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
17 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
18 | self.norm1 = nn.LayerNorm(d_model)
19 | self.norm2 = nn.LayerNorm(d_model)
20 | self.dropout = nn.Dropout(dropout)
21 | self.activation = F.relu if activation == "relu" else F.gelu
22 | self.man = Mamba(
23 | d_model=11, # Model dimension d_model
24 | d_state=16, # SSM state expansion factor
25 | d_conv=2, # Local convolution width
26 | expand=1, # Block expansion factor)
27 | )
28 | self.man2 = Mamba(
29 | d_model=11, # Model dimension d_model
30 | d_state=16, # SSM state expansion factor
31 | d_conv=2, # Local convolution width
32 | expand=1, # Block expansion factor)
33 | )
34 | self.a = AttentionLayer(
35 | FullAttention(False, 2, attention_dropout=0.1,
36 | output_attention=True), 11,1)
37 | def forward(self, x, attn_mask=None, tau=None, delta=None):
38 | new_x = self.attention(x) + self.attention_r(x.flip(dims=[1])).flip(dims=[1])
39 | attn = 1
40 |
41 | x = x + new_x
42 | y = x = self.norm1(x)
43 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
44 | y = self.dropout(self.conv2(y).transpose(-1, 1))
45 |
46 | return self.norm2(x + y), attn
47 |
48 |
49 | class Encoder(nn.Module):
50 | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
51 | super(Encoder, self).__init__()
52 | self.attn_layers = nn.ModuleList(attn_layers)
53 | self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
54 | self.norm = norm_layer
55 |
56 | def forward(self, x, attn_mask=None, tau=None, delta=None):
57 | # x [B, L, D]
58 | attns = []
59 | if self.conv_layers is not None:
60 | for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):
61 | delta = delta if i == 0 else None
62 | x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
63 | x = conv_layer(x)
64 | attns.append(attn)
65 | x, attn = self.attn_layers[-1](x, tau=tau, delta=None)
66 | attns.append(attn)
67 | else:
68 | for attn_layer in self.attn_layers:
69 | x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
70 | attns.append(attn)
71 |
72 | if self.norm is not None:
73 | x = self.norm(x)
74 |
75 | return x, attns
76 |
--------------------------------------------------------------------------------
/example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": [],
7 | "gpuType": "T4",
8 | "include_colab_link": true
9 | },
10 | "kernelspec": {
11 | "name": "python3",
12 | "display_name": "Python 3"
13 | },
14 | "language_info": {
15 | "name": "python"
16 | },
17 | "accelerator": "GPU"
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | ""
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 1,
33 | "metadata": {
34 | "colab": {
35 | "base_uri": "https://localhost:8080/"
36 | },
37 | "cellView": "form",
38 | "id": "WCqI8zrlp99U",
39 | "outputId": "9d14221b-d515-497a-9faf-d1d8dd2e81d7"
40 | },
41 | "outputs": [
42 | {
43 | "output_type": "stream",
44 | "name": "stdout",
45 | "text": [
46 | "fatal: destination path 'CryptoMamba' already exists and is not an empty directory.\n",
47 | "/content/CryptoMamba\n"
48 | ]
49 | }
50 | ],
51 | "source": [
52 | "# @title Cloning\n",
53 | "!git clone https://github.com/MShahabSepehri/CryptoMamba.git\n",
54 | "%cd CryptoMamba/"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "source": [
60 | "# @title Installing requirements\n",
61 | "\n",
62 | "%%capture\n",
63 | "!pip install mamba-ssm[causal-conv1d] --no-build-isolation\n",
64 | "!pip install -r requirements.txt"
65 | ],
66 | "metadata": {
67 | "cellView": "form",
68 | "id": "QoSiNMCtqGj5"
69 | },
70 | "execution_count": 2,
71 | "outputs": []
72 | },
73 | {
74 | "cell_type": "code",
75 | "source": [
76 | "# @title Prediction\n",
77 | "!python scripts/one_day_pred.py --config cmamba_v --ckpt_path ./checkpoints/cmamba_v.ckpt --date 2024-12-15"
78 | ],
79 | "metadata": {
80 | "colab": {
81 | "base_uri": "https://localhost:8080/"
82 | },
83 | "id": "lAU_41CEeho4",
84 | "outputId": "11f32408-e116-4e99-c454-19252a8a7787"
85 | },
86 | "execution_count": 3,
87 | "outputs": [
88 | {
89 | "output_type": "stream",
90 | "name": "stdout",
91 | "text": [
92 | "['Timestamp', 'Open', 'High', 'Low', 'Close', 'Volume']\n",
93 | "['Timestamp', 'Open', 'High', 'Low', 'Close', 'Volume']\n",
94 | "['Timestamp', 'Open', 'High', 'Low', 'Close', 'Volume']\n",
95 | "\n",
96 | "Prediction date: 2024-12-15\n",
97 | "Prediction: 102174.26\n",
98 | "Today value: 101372.97\n",
99 | "Smart trade: 39.21% buy\n",
100 | "Vanilla trade: -\n"
101 | ]
102 | }
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "source": [
108 | "# @title Training\n",
109 | "!python scripts/training.py --config cmamba_v"
110 | ],
111 | "metadata": {
112 | "id": "fVSOA2b_ssQq"
113 | },
114 | "execution_count": null,
115 | "outputs": []
116 | }
117 | ]
118 | }
--------------------------------------------------------------------------------
/models/iTransformer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from models.layers.Transformer_EncDec import Encoder, EncoderLayer
5 | from models.layers.SelfAttention_Family import FullAttention, AttentionLayer
6 | from models.layers.Embed import DataEmbedding_inverted
7 | import numpy as np
8 |
9 |
10 | class iTransformer(nn.Module):
11 | """
12 | Paper link: https://arxiv.org/abs/2310.06625
13 | """
14 |
15 | def __init__(self,
16 | num_features,
17 | seq_len,
18 | pred_len,
19 | output_attention,
20 | use_norm,
21 | d_model,
22 | d_ff,
23 | dropout,
24 | factor,
25 | n_heads,
26 | activation,
27 | e_layers
28 | ):
29 | super(iTransformer, self).__init__()
30 | self.num_features = num_features
31 | self.seq_len = seq_len
32 | self.pred_len = pred_len
33 | self.output_attention = output_attention
34 | self.use_norm = use_norm
35 |
36 | # raise ValueError(self.num_features, self.seq_len, self.pred_len, self.output_attention, self.use_norm)
37 | # Embedding
38 | self.enc_embedding = DataEmbedding_inverted(seq_len, d_model, dropout)
39 | # Encoder-only architecture
40 | self.encoder = Encoder(
41 | [
42 | EncoderLayer(
43 | AttentionLayer(
44 | FullAttention(False, factor, attention_dropout=dropout,
45 | output_attention=output_attention), d_model, n_heads),
46 | d_model,
47 | d_ff,
48 | dropout=dropout,
49 | activation=activation
50 | ) for l in range(e_layers)
51 | ],
52 | norm_layer=torch.nn.LayerNorm(d_model)
53 | )
54 | self.projector = nn.Linear(d_model, pred_len, bias=True)
55 | self.projector_features = nn.Linear(self.num_features, 1, bias=True)
56 |
57 | def forecast(self, x_enc):
58 | if self.use_norm:
59 | # Normalization from Non-stationary Transformer
60 | means = x_enc.mean(1, keepdim=True).detach()
61 | x_enc = x_enc - means
62 | stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
63 | x_enc /= stdev
64 |
65 | _, _, N = x_enc.shape # B L N
66 | # B: batch_size; E: d_model;
67 | # L: seq_len; S: pred_len;
68 | # N: number of variate (tokens), can also includes covariates
69 |
70 | # Embedding
71 | # B L N -> B N E (B L N -> B L E in the vanilla Transformer)
72 | enc_out = self.enc_embedding(x_enc, None) # covariates (e.g timestamp) can be also embedded as tokens
73 |
74 | # B N E -> B N E (B L E -> B L E in the vanilla Transformer)
75 | # the dimensions of embedded time series has been inverted, and then processed by native attn, layernorm and ffn modules
76 | enc_out, attns = self.encoder(enc_out, attn_mask=None)
77 |
78 | # B N E -> B N S -> B S N
79 | dec_out = self.projector(enc_out).permute(0, 2, 1)[:, :, :N] # filter the covariates
80 |
81 | if self.use_norm:
82 | # De-Normalization from Non-stationary Transformer
83 | dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))
84 | dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))
85 |
86 | output = self.projector_features(dec_out)
87 | return output
88 |
89 |
90 | def forward(self, x_enc):
91 | return self.forecast(x_enc.permute(0, 2, 1))
--------------------------------------------------------------------------------
/models/smamba.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from models.layers.Mamba_EncDec import Encoder, EncoderLayer
4 | from models.layers.Embed import DataEmbedding_inverted
5 |
6 | from mamba_ssm import Mamba
7 |
8 |
9 | class SMamba(nn.Module):
10 | """
11 | Paper link: https://arxiv.org/abs/2310.06625
12 | """
13 |
14 | def __init__(self,
15 | num_features,
16 | seq_len,
17 | use_norm,
18 | d_model,
19 | d_state,
20 | d_ff,
21 | dropout,
22 | activation,
23 | e_layers
24 | ):
25 | super(SMamba, self).__init__()
26 | self.num_features = num_features
27 | self.seq_len = seq_len
28 | self.use_norm = use_norm
29 | self.d_model = d_model
30 | self.d_state = d_state
31 | self.d_ff = d_ff
32 | self.dropout = dropout
33 | self.activation = activation
34 | self.e_layers = e_layers
35 | # Embedding
36 | self.enc_embedding = DataEmbedding_inverted(self.seq_len, self.d_model, self.dropout)
37 | # Encoder-only architecture
38 | self.encoder = Encoder(
39 | [
40 | EncoderLayer(
41 | Mamba(
42 | d_model=self.d_model, # Model dimension d_model
43 | d_state=self.d_state, # SSM state expansion factor
44 | d_conv=2, # Local convolution width
45 | expand=1, # Block expansion factor)
46 | ),
47 | Mamba(
48 | d_model=self.d_model, # Model dimension d_model
49 | d_state=self.d_state, # SSM state expansion factor
50 | d_conv=2, # Local convolution width
51 | expand=1, # Block expansion factor)
52 | ),
53 | self.d_model,
54 | self.d_ff,
55 | dropout=self.dropout,
56 | activation=self.activation
57 | ) for l in range(self.e_layers)
58 | ],
59 | norm_layer=torch.nn.LayerNorm(self.d_model)
60 | )
61 | self.projector = nn.Linear(self.d_model, 1, bias=True)
62 | self.projector_features = nn.Linear(self.num_features, 1, bias=True)
63 |
64 | def forecast(self, x_enc):
65 | if self.use_norm:
66 | # Normalization from Non-stationary Transformer
67 | means = x_enc.mean(1, keepdim=True).detach()
68 | x_enc = x_enc - means
69 | stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
70 | x_enc /= stdev
71 |
72 | _, _, N = x_enc.shape # B L N
73 | # B: batch_size; E: d_model;
74 | # L: seq_len; S: pred_len;
75 | # N: number of variate (tokens), can also includes covariates
76 |
77 | # Embedding
78 | # B L N -> B N E (B L N -> B L E in the vanilla Transformer)
79 | enc_out = self.enc_embedding(x_enc, None) # covariates (e.g timestamp) can be also embedded as tokens
80 |
81 | # B N E -> B N E (B L E -> B L E in the vanilla Transformer)
82 | # the dimensions of embedded time series has been inverted, and then processed by native attn, layernorm and ffn modules
83 | enc_out, attns = self.encoder(enc_out, attn_mask=None)
84 | # B N E -> B N S -> B S N
85 | dec_out = self.projector(enc_out).permute(0, 2, 1)[:, :, :N] # filter the covariates
86 |
87 | if self.use_norm:
88 | # De-Normalization from Non-stationary Transformer
89 | dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, 1, 1))
90 | dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, 1, 1))
91 |
92 | output = self.projector_features(dec_out)
93 |
94 | return output
95 |
96 | def forward(self, x_enc):
97 | return self.forecast(x_enc.permute(0, 2, 1))
98 |
--------------------------------------------------------------------------------
/pl_modules/data_module.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from copy import copy
4 | from pathlib import Path
5 | import pytorch_lightning as pl
6 | from argparse import ArgumentParser
7 | from data_utils.dataset import CMambaDataset, DataConverter
8 |
9 | def worker_init_fn(worker_id):
10 | """
11 | Handle random seeding.
12 | """
13 | worker_info = torch.utils.data.get_worker_info()
14 | data = worker_info.dataset # pylint: disable=no-member
15 |
16 | # Check if we are using DDP
17 | is_ddp = False
18 | if torch.distributed.is_available():
19 | if torch.distributed.is_initialized():
20 | is_ddp = True
21 |
22 | # for NumPy random seed we need it to be in this range
23 | base_seed = worker_info.seed # pylint: disable=no-member
24 |
25 | if is_ddp: # DDP training: unique seed is determined by worker and device
26 | seed = base_seed + torch.distributed.get_rank() * worker_info.num_workers
27 | else:
28 | seed = base_seed
29 |
30 | class CMambaDataModule(pl.LightningDataModule):
31 |
32 | def __init__(
33 | self,
34 | data_config,
35 | train_transform,
36 | val_transform,
37 | test_transform,
38 | batch_size,
39 | distributed_sampler,
40 | num_workers=4,
41 | normalize=False,
42 | window_size=14,
43 | ):
44 |
45 | super().__init__()
46 |
47 | self.data_config = data_config
48 | self.train_transform = train_transform
49 | self.val_transform = val_transform
50 | self.test_transform = test_transform
51 | self.batch_size = batch_size
52 | self.num_workers = num_workers
53 | self.distributed_sampler = distributed_sampler
54 | self.window_size = window_size
55 | self.factors = None
56 |
57 | self.converter = DataConverter(data_config)
58 | train, val, test = self.converter.get_data()
59 | self.data_dict = {
60 | 'train': train,
61 | 'val': val,
62 | 'test': test,
63 | }
64 |
65 | if normalize:
66 | self.normalize()
67 |
68 |
69 | def normalize(self):
70 | tmp = {}
71 | for split in self.data_dict.keys():
72 | data = self.data_dict.get(split)
73 | for key in data.keys():
74 | max_val = max(data.get(key))
75 | min_val = min(data.get(key))
76 | if not key in tmp.keys():
77 | tmp[key] = {
78 | 'min': min_val,
79 | 'max': max_val
80 | }
81 | else:
82 | tmp.get(key)['max'] = max(max_val, tmp.get(key).get('max'))
83 | tmp.get(key)['min'] = min(min_val, tmp.get(key).get('min'))
84 | for data in self.data_dict.values():
85 | for key in data.keys():
86 | if key == 'Timestamp':
87 | data['Timestamp_orig'] = data.get(key)
88 | data[key] = (data.get(key) - tmp.get(key).get('min')) / (tmp.get(key).get('max') - tmp.get(key).get('min'))
89 | self.factors = tmp
90 |
91 | def _create_data_loader(
92 | self,
93 | data_split,
94 | data_transform,
95 | batch_size=None
96 | ) :
97 | dataset = CMambaDataset(
98 | data=self.data_dict.get(data_split),
99 | split=data_split,
100 | window_size=self.window_size,
101 | transform=data_transform,
102 | )
103 |
104 | batch_size = self.batch_size if batch_size is None else batch_size
105 | sampler = torch.utils.data.DistributedSampler(dataset) if self.distributed_sampler else None
106 |
107 | dataloader = torch.utils.data.DataLoader(
108 | dataset=dataset,
109 | batch_size=batch_size,
110 | num_workers=self.num_workers,
111 | worker_init_fn=worker_init_fn,
112 | sampler=sampler,
113 | drop_last=False
114 | )
115 | return dataloader
116 |
117 | def train_dataloader(self):
118 | return self._create_data_loader(data_split='train', data_transform=self.train_transform)
119 |
120 | def val_dataloader(self):
121 | return self._create_data_loader(data_split='val', data_transform=self.val_transform)
122 |
123 | def test_dataloader(self):
124 | return self._create_data_loader(data_split='test', data_transform=self.test_transform)
125 |
--------------------------------------------------------------------------------
/models/layers/Transformer_EncDec.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 |
4 |
5 | class ConvLayer(nn.Module):
6 | def __init__(self, c_in):
7 | super(ConvLayer, self).__init__()
8 | self.downConv = nn.Conv1d(in_channels=c_in,
9 | out_channels=c_in,
10 | kernel_size=3,
11 | padding=2,
12 | padding_mode='circular')
13 | self.norm = nn.BatchNorm1d(c_in)
14 | self.activation = nn.ELU()
15 | self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
16 |
17 | def forward(self, x):
18 | x = self.downConv(x.permute(0, 2, 1))
19 | x = self.norm(x)
20 | x = self.activation(x)
21 | x = self.maxPool(x)
22 | x = x.transpose(1, 2)
23 | return x
24 |
25 |
26 | class EncoderLayer(nn.Module):
27 | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
28 | super(EncoderLayer, self).__init__()
29 | d_ff = d_ff or 4 * d_model
30 | self.attention = attention
31 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
32 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
33 | self.norm1 = nn.LayerNorm(d_model)
34 | self.norm2 = nn.LayerNorm(d_model)
35 | self.dropout = nn.Dropout(dropout)
36 | self.activation = F.relu if activation == "relu" else F.gelu
37 |
38 | def forward(self, x, attn_mask=None, tau=None, delta=None):
39 | new_x, attn = self.attention(
40 | x, x, x,
41 | attn_mask=attn_mask,
42 | tau=tau, delta=delta
43 | )
44 | x = x + self.dropout(new_x)
45 |
46 | y = x = self.norm1(x)
47 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
48 | y = self.dropout(self.conv2(y).transpose(-1, 1))
49 |
50 | return self.norm2(x + y), attn
51 |
52 |
53 | class Encoder(nn.Module):
54 | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
55 | super(Encoder, self).__init__()
56 | self.attn_layers = nn.ModuleList(attn_layers)
57 | self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
58 | self.norm = norm_layer
59 |
60 | def forward(self, x, attn_mask=None, tau=None, delta=None):
61 | # x [B, L, D]
62 | attns = []
63 | if self.conv_layers is not None:
64 | for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):
65 | delta = delta if i == 0 else None
66 | x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
67 | x = conv_layer(x)
68 | attns.append(attn)
69 | x, attn = self.attn_layers[-1](x, tau=tau, delta=None)
70 | attns.append(attn)
71 | else:
72 | for attn_layer in self.attn_layers:
73 | x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
74 | attns.append(attn)
75 |
76 | if self.norm is not None:
77 | x = self.norm(x)
78 |
79 | return x, attns
80 |
81 |
82 | class DecoderLayer(nn.Module):
83 | def __init__(self, self_attention, cross_attention, d_model, d_ff=None,
84 | dropout=0.1, activation="relu"):
85 | super(DecoderLayer, self).__init__()
86 | d_ff = d_ff or 4 * d_model
87 | self.self_attention = self_attention
88 | self.cross_attention = cross_attention
89 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
90 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
91 | self.norm1 = nn.LayerNorm(d_model)
92 | self.norm2 = nn.LayerNorm(d_model)
93 | self.norm3 = nn.LayerNorm(d_model)
94 | self.dropout = nn.Dropout(dropout)
95 | self.activation = F.relu if activation == "relu" else F.gelu
96 |
97 | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):
98 | x = x + self.dropout(self.self_attention(
99 | x, x, x,
100 | attn_mask=x_mask,
101 | tau=tau, delta=None
102 | )[0])
103 | x = self.norm1(x)
104 |
105 | x = x + self.dropout(self.cross_attention(
106 | x, cross, cross,
107 | attn_mask=cross_mask,
108 | tau=tau, delta=delta
109 | )[0])
110 |
111 | y = x = self.norm2(x)
112 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
113 | y = self.dropout(self.conv2(y).transpose(-1, 1))
114 |
115 | return self.norm3(x + y)
116 |
117 |
118 | class Decoder(nn.Module):
119 | def __init__(self, layers, norm_layer=None, projection=None):
120 | super(Decoder, self).__init__()
121 | self.layers = nn.ModuleList(layers)
122 | self.norm = norm_layer
123 | self.projection = projection
124 |
125 | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):
126 | for layer in self.layers:
127 | x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta)
128 |
129 | if self.norm is not None:
130 | x = self.norm(x)
131 |
132 | if self.projection is not None:
133 | x = self.projection(x)
134 | return x
--------------------------------------------------------------------------------
/pl_modules/base_module.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import torch
3 | import torch.nn as nn
4 | import pytorch_lightning as pl
5 | from models.cmamba import CMamba
6 | from torchmetrics.regression import MeanAbsolutePercentageError as MAPE
7 |
8 |
9 | class BaseModule(pl.LightningModule):
10 |
11 | def __init__(
12 | self,
13 | lr=0.0002,
14 | lr_step_size=50,
15 | lr_gamma=0.1,
16 | weight_decay=0.0,
17 | logger_type=None,
18 | window_size=14,
19 | y_key='Close',
20 | optimizer='adam',
21 | mode='default',
22 | loss='rmse',
23 | ):
24 | super().__init__()
25 |
26 | self.lr = lr
27 | self.lr_step_size = lr_step_size
28 | self.lr_gamma = lr_gamma
29 | self.weight_decay = weight_decay
30 | self.logger_type = logger_type
31 | self.y_key = y_key
32 | self.optimizer = optimizer
33 | self.batch_size = None
34 | self.mode = mode
35 | self.window_size = window_size
36 | self.loss = loss
37 |
38 | # self.loss = lambda x, y: torch.sqrt(tmp(x, y))
39 | self.mse = nn.MSELoss()
40 | self.l1 = nn.L1Loss()
41 | self.mape = MAPE()
42 | self.normalization_coeffs = None
43 |
44 | def forward(self, x, y_old=None):
45 | if self.mode == 'default':
46 | return self.model(x).reshape(-1)
47 | elif self.mode == 'diff':
48 | return self.model(x).reshape(-1) + y_old
49 |
50 | def set_normalization_coeffs(self, factors):
51 | if factors is None:
52 | return
53 | scale = factors.get(self.y_key).get('max') - factors.get(self.y_key).get('min')
54 | shift = factors.get(self.y_key).get('min')
55 | self.normalization_coeffs = (scale, shift)
56 |
57 | def denormalize(self, y, y_hat):
58 | if self.normalization_coeffs is not None:
59 | scale, shift = self.normalization_coeffs
60 | y = y * scale + shift
61 | y_hat = y_hat * scale + shift
62 | return y, y_hat
63 |
64 | def training_step(self, batch, batch_idx):
65 | x = batch['features']
66 | y = batch[self.y_key]
67 | y_old = batch[f'{self.y_key}_old']
68 | if self.batch_size is None:
69 | self.batch_size = x.shape[0]
70 | y_hat = self.forward(x, y_old).reshape(-1)
71 | y, y_hat = self.denormalize(y, y_hat)
72 | mse = self.mse(y_hat, y)
73 | rmse = torch.sqrt(mse)
74 | mape = self.mape(y_hat, y)
75 | l1 = self.l1(y_hat, y)
76 |
77 | self.log("train/mse", mse.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=False)
78 | self.log("train/rmse", rmse.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
79 | self.log("train/mape", mape.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
80 | self.log("train/mae", l1.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=False)
81 |
82 | if self.loss == 'mse':
83 | return mse
84 | elif self.loss == 'rmse':
85 | return rmse
86 | elif self.loss == 'mae':
87 | return l1
88 | elif self.loss == 'mape':
89 | return mape
90 |
91 |
92 | def validation_step(self, batch, batch_idx):
93 | x = batch['features']
94 | y = batch[self.y_key]
95 | y_old = batch[f'{self.y_key}_old']
96 | if self.batch_size is None:
97 | self.batch_size = x.shape[0]
98 | y_hat = self.forward(x, y_old).reshape(-1)
99 | y, y_hat = self.denormalize(y, y_hat)
100 | mse = self.mse(y_hat, y)
101 | rmse = torch.sqrt(mse)
102 | mape = self.mape(y_hat, y)
103 | l1 = self.l1(y_hat, y)
104 |
105 | self.log("val/mse", mse.detach(), sync_dist=True, batch_size=self.batch_size, prog_bar=False)
106 | self.log("val/rmse", rmse.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
107 | self.log("val/mape", mape.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
108 | self.log("val/mae", l1.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=False)
109 | return {
110 | "val_loss": mse,
111 | }
112 |
113 | def test_step(self, batch, batch_idx):
114 | x = batch['features']
115 | y = batch[self.y_key]
116 | y_old = batch[f'{self.y_key}_old']
117 | if self.batch_size is None:
118 | self.batch_size = x.shape[0]
119 | y_hat = self.forward(x, y_old).reshape(-1)
120 | y, y_hat = self.denormalize(y, y_hat)
121 | mse = self.mse(y_hat, y)
122 | rmse = torch.sqrt(mse)
123 | mape = self.mape(y_hat, y)
124 | l1 = self.l1(y_hat, y)
125 |
126 | self.log("test/mse", mse.detach(), sync_dist=True, batch_size=self.batch_size, prog_bar=False)
127 | self.log("test/rmse", rmse.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
128 | self.log("test/mape", mape.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=True)
129 | self.log("test/mae", l1.detach(), batch_size=self.batch_size, sync_dist=True, prog_bar=False)
130 | return {
131 | "test_loss": mse,
132 | }
133 |
134 | def configure_optimizers(self):
135 | if self.optimizer == 'adam':
136 | optim = torch.optim.Adam(
137 | self.parameters(), lr=self.lr, weight_decay=self.weight_decay
138 | )
139 | elif self.optimizer == 'sgd':
140 | optim = torch.optim.SGD(
141 | self.parameters(), lr=self.lr, weight_decay=self.weight_decay
142 | )
143 | else:
144 | raise ValueError(f'Unimplemented optimizer {self.optimizer}')
145 | scheduler = torch.optim.lr_scheduler.StepLR(optim,
146 | self.lr_step_size,
147 | self.lr_gamma
148 | )
149 | return [optim], [scheduler]
150 |
151 | def lr_scheduler_step(self, scheduler, *args, **kwargs):
152 | scheduler.step()
153 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
17 | CryptoMamba is a novel Mamba-based architecture designed for accurate and efficient time-series forecasting, with a focus on cryptocurrency price prediction. Leveraging the capabilities of SSMs, CryptoMamba excels at capturing long-range dependencies and adapting to highly volatile market conditions. 18 |
19 | 20 | Repository Includes: 21 | - **Implementation** of CryptoMamba and baseline models (LSTM, Bi-LSTM, GRU, and S-Mamba). 22 | - **Two Trading Algorithms**: Vanilla and Smart, for evaluating real-world performance. 23 | - **Code** for data preprocessing, model training, evaluation metrics, and trading simulations. 24 | 25 |26 | CryptoMamba’s robust performance and generalizability make it a promising solution for various sequential forecasting tasks, including financial markets, commodities, and other time-series applications. 27 |
28 | 29 | ## 📖 Table of Contents 30 | 31 | * [Requirements](#-requirements) 32 | * [Usage](#-usage) 33 | * [Data](#data) 34 | * [Config](#config) 35 | * [Train New Model](#train-new-model) 36 | * [Evaluate Model](#evaluate-model) 37 | * [Predict Next Day Price](#predict-next-day-price) 38 | * [Results](#-results) 39 | * [Citation](#-citation) 40 | * [Where to Ask for Help](#-where-to-ask-for-help) 41 | 42 | ## 🔧 Requirements 43 | 44 | To install the requirements, you can use: 45 | 46 | ``` 47 | pip install -r requirements.txt 48 | ``` 49 | 50 |
51 | If you have difficulties installing mamba_ssm, please follow the instructions in its GitHub repository.
52 |
59 | You can find the processed data that we use in here. If you want to use another data configuration, you should change the configuration in the data config file. Note that the data_path should point to the raw data file with a similar format to this.
60 |
65 | If you want to use additional features other than Open, Close, High, Low, Timestamp, and Volume, you should specify a list called additional_features in your data and training configuration files. Note that your raw data file should have these features.
66 |
67 | If you want to change the time resolution, you should change the date_format in your data configuration file and also set jumps to your desired resolution in seconds. Note that your raw data dates and your start and end dates in the data configuration should follow the new date format.
68 |
78 | Here, CONFIG_NAME is the name of a config file in the training config folder without its extension. For example, to train CryptoMamba with volume you can run the following command:
79 |
102 | We also have a script to predict the next day's price and trade suggestions by providing the prices of its previous days. You have to create a csv file similar to [this](data/one_day_trade.csv) and use this command:
103 |
155 | If you have any questions, feel free to open a Discussion and ask your question. You can also email sepehri@usc.edu (Mohammad Shahab Sepehri) or mehradfa@usc.edu (Asal Mehradfar). 156 |
157 | -------------------------------------------------------------------------------- /scripts/training.py: -------------------------------------------------------------------------------- 1 | import os, sys, pathlib 2 | sys.path.insert(0, os.path.dirname(pathlib.Path(__file__).parent.absolute())) 3 | 4 | import yaml 5 | from utils import io_tools 6 | import pytorch_lightning as pl 7 | from argparse import ArgumentParser 8 | from pl_modules.data_module import CMambaDataModule 9 | from data_utils.data_transforms import DataTransform 10 | from pytorch_lightning.strategies.ddp import DDPStrategy 11 | from pytorch_lightning.loggers import TensorBoardLogger 12 | import warnings 13 | 14 | warnings.simplefilter(action='ignore', category=FutureWarning) 15 | 16 | 17 | ROOT = io_tools.get_root(__file__, num_returns=2) 18 | 19 | def get_args(): 20 | parser = ArgumentParser() 21 | parser.add_argument( 22 | "--logdir", 23 | type=str, 24 | help="Logging directory.", 25 | ) 26 | parser.add_argument( 27 | "--accelerator", 28 | type=str, 29 | default='gpu', 30 | help="The type of accelerator.", 31 | ) 32 | parser.add_argument( 33 | "--devices", 34 | type=int, 35 | default=1, 36 | help="Number of computing devices.", 37 | ) 38 | parser.add_argument( 39 | "--seed", 40 | type=int, 41 | default=23, 42 | help="Logging directory.", 43 | ) 44 | parser.add_argument( 45 | "--expname", 46 | type=str, 47 | default='Cmamba', 48 | help="Experiment name. Reconstructions will be saved under this folder.", 49 | ) 50 | parser.add_argument( 51 | "--config", 52 | type=str, 53 | default='cmamba_nv', 54 | help="Path to config file.", 55 | ) 56 | parser.add_argument( 57 | "--logger_type", 58 | default='tb', 59 | type=str, 60 | help="Path to config file.", 61 | ) 62 | parser.add_argument( 63 | "--num_workers", 64 | type=int, 65 | default=4, 66 | help="Number of parallel workers.", 67 | ) 68 | parser.add_argument( 69 | "--batch_size", 70 | type=int, 71 | default=32, 72 | help="batch_size", 73 | ) 74 | parser.add_argument( 75 | '--save_checkpoints', 76 | default=False, 77 | action='store_true', 78 | ) 79 | parser.add_argument( 80 | '--use_volume', 81 | default=False, 82 | action='store_true', 83 | ) 84 | 85 | parser.add_argument( 86 | '--resume_from_checkpoint', 87 | default=None, 88 | ) 89 | 90 | parser.add_argument( 91 | '--max_epochs', 92 | type=int, 93 | default=200, 94 | ) 95 | 96 | args = parser.parse_args() 97 | return args 98 | 99 | 100 | def save_all_hparams(log_dir, args): 101 | if not os.path.exists(log_dir): 102 | os.makedirs(log_dir) 103 | save_dict = vars(args) 104 | path = log_dir + '/hparams.yaml' 105 | if os.path.exists(path): 106 | return 107 | with open(path, 'w') as f: 108 | yaml.dump(save_dict, f) 109 | 110 | 111 | def load_model(config, logger_type): 112 | arch_config = io_tools.load_config_from_yaml('configs/models/archs.yaml') 113 | model_arch = config.get('model') 114 | model_config_path = f'{ROOT}/configs/models/{arch_config.get(model_arch)}' 115 | model_config = io_tools.load_config_from_yaml(model_config_path) 116 | 117 | normalize = model_config.get('normalize', False) 118 | hyperparams = config.get('hyperparams') 119 | if hyperparams is not None: 120 | for key in hyperparams.keys(): 121 | model_config.get('params')[key] = hyperparams.get(key) 122 | 123 | model_config.get('params')['logger_type'] = logger_type 124 | model = io_tools.instantiate_from_config(model_config) 125 | model.cuda() 126 | model.train() 127 | return model, normalize 128 | 129 | 130 | if __name__ == "__main__": 131 | 132 | args = get_args() 133 | pl.seed_everything(args.seed) 134 | logdir = args.logdir 135 | 136 | config = io_tools.load_config_from_yaml(f'{ROOT}/configs/training/{args.config}.yaml') 137 | 138 | data_config = io_tools.load_config_from_yaml(f"{ROOT}/configs/data_configs/{config.get('data_config')}.yaml") 139 | use_volume = args.use_volume 140 | 141 | if not use_volume: 142 | use_volume = config.get('use_volume') 143 | train_transform = DataTransform(is_train=True, use_volume=use_volume, additional_features=config.get('additional_features', [])) 144 | val_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 145 | test_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 146 | 147 | model, normalize = load_model(config, args.logger_type) 148 | 149 | tmp = vars(args) 150 | tmp.update(config) 151 | 152 | name = config.get('name', args.expname) 153 | if args.logger_type == 'tb': 154 | logger = TensorBoardLogger("logs", name=name) 155 | logger.log_hyperparams(args) 156 | elif args.logger_type == 'wandb': 157 | logger = pl.loggers.WandbLogger(project=args.expname, config=tmp) 158 | else: 159 | raise ValueError('Unknown logger type.') 160 | 161 | data_module = CMambaDataModule(data_config, 162 | train_transform=train_transform, 163 | val_transform=val_transform, 164 | test_transform=test_transform, 165 | batch_size=args.batch_size, 166 | distributed_sampler=True, 167 | num_workers=args.num_workers, 168 | normalize=normalize, 169 | window_size=model.window_size, 170 | ) 171 | 172 | callbacks = [] 173 | if args.save_checkpoints: 174 | checkpoint_callback = pl.callbacks.ModelCheckpoint( 175 | save_top_k=1, 176 | verbose=True, 177 | monitor="val/rmse", 178 | mode="min", 179 | filename='epoch{epoch}-val-rmse{val/rmse:.4f}', 180 | auto_insert_metric_name=False, 181 | save_last=True 182 | ) 183 | callbacks.append(checkpoint_callback) 184 | 185 | max_epochs = config.get('max_epochs', args.max_epochs) 186 | model.set_normalization_coeffs(data_module.factors) 187 | 188 | trainer = pl.Trainer(accelerator=args.accelerator, 189 | devices=args.devices, 190 | max_epochs=max_epochs, 191 | enable_checkpointing=args.save_checkpoints, 192 | log_every_n_steps=10, 193 | logger=logger, 194 | callbacks=callbacks, 195 | strategy = DDPStrategy(find_unused_parameters=False), 196 | ) 197 | 198 | trainer.fit(model, datamodule=data_module) 199 | if args.save_checkpoints: 200 | trainer.test(model, datamodule=data_module, ckpt_path=checkpoint_callback.best_model_path) 201 | -------------------------------------------------------------------------------- /models/layers/Embed.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import math 4 | 5 | from mamba_ssm import Mamba 6 | class PositionalEmbedding(nn.Module): 7 | def __init__(self, d_model, max_len=5000): 8 | super(PositionalEmbedding, self).__init__() 9 | # Compute the positional encodings once in log space. 10 | pe = torch.zeros(max_len, d_model).float() 11 | pe.require_grad = False 12 | 13 | position = torch.arange(0, max_len).float().unsqueeze(1) 14 | div_term = (torch.arange(0, d_model, 2).float() 15 | * -(math.log(10000.0) / d_model)).exp() 16 | 17 | pe[:, 0::2] = torch.sin(position * div_term) 18 | pe[:, 1::2] = torch.cos(position * div_term) 19 | 20 | pe = pe.unsqueeze(0) 21 | self.register_buffer('pe', pe) 22 | 23 | def forward(self, x): 24 | return self.pe[:, :x.size(1)] 25 | 26 | 27 | class TokenEmbedding(nn.Module): 28 | def __init__(self, c_in, d_model): 29 | super(TokenEmbedding, self).__init__() 30 | padding = 1 if torch.__version__ >= '1.5.0' else 2 31 | self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, 32 | kernel_size=3, padding=padding, padding_mode='circular', bias=False) 33 | for m in self.modules(): 34 | if isinstance(m, nn.Conv1d): 35 | nn.init.kaiming_normal_( 36 | m.weight, mode='fan_in', nonlinearity='leaky_relu') 37 | 38 | def forward(self, x): 39 | x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) 40 | return x 41 | 42 | 43 | class FixedEmbedding(nn.Module): 44 | def __init__(self, c_in, d_model): 45 | super(FixedEmbedding, self).__init__() 46 | 47 | w = torch.zeros(c_in, d_model).float() 48 | w.require_grad = False 49 | 50 | position = torch.arange(0, c_in).float().unsqueeze(1) 51 | div_term = (torch.arange(0, d_model, 2).float() 52 | * -(math.log(10000.0) / d_model)).exp() 53 | 54 | w[:, 0::2] = torch.sin(position * div_term) 55 | w[:, 1::2] = torch.cos(position * div_term) 56 | 57 | self.emb = nn.Embedding(c_in, d_model) 58 | self.emb.weight = nn.Parameter(w, requires_grad=False) 59 | 60 | def forward(self, x): 61 | return self.emb(x).detach() 62 | 63 | 64 | class TemporalEmbedding(nn.Module): 65 | def __init__(self, d_model, embed_type='fixed', freq='h'): 66 | super(TemporalEmbedding, self).__init__() 67 | 68 | minute_size = 4 69 | hour_size = 24 70 | weekday_size = 7 71 | day_size = 32 72 | month_size = 13 73 | 74 | Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding 75 | if freq == 't': 76 | self.minute_embed = Embed(minute_size, d_model) 77 | self.hour_embed = Embed(hour_size, d_model) 78 | self.weekday_embed = Embed(weekday_size, d_model) 79 | self.day_embed = Embed(day_size, d_model) 80 | self.month_embed = Embed(month_size, d_model) 81 | 82 | def forward(self, x): 83 | x = x.long() 84 | minute_x = self.minute_embed(x[:, :, 4]) if hasattr( 85 | self, 'minute_embed') else 0. 86 | hour_x = self.hour_embed(x[:, :, 3]) 87 | weekday_x = self.weekday_embed(x[:, :, 2]) 88 | day_x = self.day_embed(x[:, :, 1]) 89 | month_x = self.month_embed(x[:, :, 0]) 90 | 91 | return hour_x + weekday_x + day_x + month_x + minute_x 92 | 93 | 94 | class TimeFeatureEmbedding(nn.Module): 95 | def __init__(self, d_model, embed_type='timeF', freq='h'): 96 | super(TimeFeatureEmbedding, self).__init__() 97 | 98 | freq_map = {'h': 4, 't': 5, 's': 6, 99 | 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} 100 | d_inp = freq_map[freq] 101 | self.embed = nn.Linear(d_inp, d_model, bias=False) 102 | 103 | def forward(self, x): 104 | return self.embed(x) 105 | 106 | 107 | class DataEmbedding(nn.Module): 108 | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): 109 | super(DataEmbedding, self).__init__() 110 | 111 | self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) 112 | self.position_embedding = PositionalEmbedding(d_model=d_model) 113 | self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, 114 | freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( 115 | d_model=d_model, embed_type=embed_type, freq=freq) 116 | self.dropout = nn.Dropout(p=dropout) 117 | 118 | def forward(self, x, x_mark): 119 | if x_mark is None: 120 | x = self.value_embedding(x) + self.position_embedding(x) 121 | else: 122 | x = self.value_embedding( 123 | x) + self.temporal_embedding(x_mark) + self.position_embedding(x) 124 | return self.dropout(x) 125 | 126 | class moving_avg(nn.Module): 127 | """ 128 | Moving average block to highlight the trend of time series 129 | """ 130 | def __init__(self, kernel_size, stride): 131 | super(moving_avg, self).__init__() 132 | self.kernel_size = kernel_size 133 | self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) 134 | 135 | def forward(self, x): 136 | # padding on the both ends of time series 137 | front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) 138 | end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) 139 | x = torch.cat([front, x, end], dim=1) 140 | x = self.avg(x.permute(0, 2, 1)) 141 | x = x.permute(0, 2, 1) 142 | return x 143 | 144 | 145 | class series_decomp(nn.Module): 146 | """ 147 | Series decomposition block 148 | """ 149 | def __init__(self, kernel_size): 150 | super(series_decomp, self).__init__() 151 | self.moving_avg = moving_avg(kernel_size, stride=1) 152 | 153 | def forward(self, x): 154 | moving_mean = self.moving_avg(x) 155 | res = x - moving_mean 156 | return res, moving_mean 157 | class DataEmbedding_wo_pos(nn.Module): 158 | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): 159 | super(DataEmbedding_wo_pos, self).__init__() 160 | 161 | self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) 162 | self.position_embedding = PositionalEmbedding(d_model=d_model) 163 | self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, 164 | freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( 165 | d_model=d_model, embed_type=embed_type, freq=freq) 166 | self.dropout = nn.Dropout(p=dropout) 167 | 168 | def forward(self, x, x_mark): 169 | if x_mark == None: 170 | x = self.value_embedding(x) 171 | else: 172 | x = self.value_embedding(x) + self.temporal_embedding(x_mark) 173 | return self.dropout(x) 174 | 175 | class DataEmbedding_inverted(nn.Module): 176 | def __init__(self, c_in, d_model, dropout=0.1): 177 | super(DataEmbedding_inverted, self).__init__() 178 | self.value_embedding = nn.Linear(c_in, d_model) 179 | self.dropout = nn.Dropout(p=dropout) 180 | 181 | def forward(self, x, x_mark): 182 | x = x.permute(0, 2, 1) 183 | 184 | # x: [Batch Variate Time] 185 | if x_mark is None: 186 | 187 | x = self.value_embedding(x) 188 | else: 189 | x = self.value_embedding(torch.cat([x, x_mark.permute(0, 2, 1)], 1)) 190 | return self.dropout(x) 191 | -------------------------------------------------------------------------------- /data_utils/dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import time 4 | import random 5 | import pandas as pd 6 | from tqdm import tqdm 7 | from pathlib import Path 8 | from utils import io_tools 9 | from datetime import datetime 10 | from utils.io_tools import load_config_from_yaml 11 | 12 | 13 | class CMambaDataset(torch.utils.data.Dataset): 14 | 15 | def __init__( 16 | self, 17 | data, 18 | split, 19 | window_size, 20 | transform, 21 | ): 22 | 23 | self.data = data 24 | self.transform = transform 25 | self.window_size = window_size 26 | 27 | print('{} data points loaded as {} split.'.format(len(self), split)) 28 | 29 | def __len__(self): 30 | return max(0, len(self.data) - self.window_size - 1) 31 | 32 | def __getitem__(self, i: int): 33 | sample = self.data.iloc[i: i + self.window_size + 1] 34 | sample = self.transform(sample) 35 | return sample 36 | 37 | class DataConverter: 38 | def __init__(self, config) -> None: 39 | self.config = config 40 | self.root = config.get('root') 41 | self.jumps = config.get('jumps') 42 | self.date_format = config.get('date_format', "%Y-%m-%d") 43 | self.additional_features = config.get('additional_features', []) 44 | self.end_date = config.get('end_date') 45 | self.data_path = config.get('data_path') 46 | self.start_date = config.get('start_date') 47 | self.folder_name = f'{self.start_date}_{self.end_date}_{self.jumps}' 48 | self.file_path = f'{self.root}/{self.folder_name}' 49 | 50 | 51 | def process_data(self): 52 | data, start, stop = self.load_data() 53 | new_df = {} 54 | for key in ['Timestamp', 'High', 'Low', 'Open', 'Close', 'Volume']: 55 | new_df[key] = [] 56 | for key in self.additional_features: 57 | new_df[key] = [] 58 | for i in tqdm(range(start, stop - self.jumps // 60 + 1, self.jumps)): 59 | high, low, open, close, vol = self.merge_data(data, i, self.jumps) 60 | additional_features = self.merge_additional(data, i, self.jumps) 61 | if high is None: 62 | continue 63 | new_df.get('Timestamp').append(i) 64 | new_df.get('High').append(high) 65 | new_df.get('Low').append(low) 66 | new_df.get('Open').append(open) 67 | new_df.get('Close').append(close) 68 | new_df.get('Volume').append(vol) 69 | for key in additional_features.keys(): 70 | new_df.get(key).append(additional_features.get(key)) 71 | 72 | df = pd.DataFrame(new_df) 73 | 74 | return df 75 | 76 | def get_data(self): 77 | tmp = '----' 78 | data_path = f'{self.file_path}/{tmp}.csv' 79 | yaml_path = f'{self.file_path}/config.pkl' 80 | if os.path.isfile(data_path.replace(tmp, 'train')): 81 | train = pd.read_csv(data_path.replace(tmp, 'train'), index_col=0) 82 | val = pd.read_csv(data_path.replace(tmp, 'val'), index_col=0) 83 | test = pd.read_csv(data_path.replace(tmp, 'test'), index_col=0) 84 | return train, val, test 85 | 86 | df = self.process_data() 87 | 88 | train, val, test = self.split(df) 89 | 90 | if not os.path.isdir(self.file_path): 91 | os.mkdir(self.file_path) 92 | 93 | train.to_csv(data_path.replace('----', 'train')) 94 | val.to_csv(data_path.replace('----', 'val')) 95 | test.to_csv(data_path.replace('----', 'test')) 96 | io_tools.save_yaml(self.config, yaml_path) 97 | return train, val, test 98 | 99 | 100 | def split(self, data): 101 | if self.config.get('train_ratio') is not None: 102 | total = len(data) 103 | n_train = int(self.train_ratio * total) 104 | n_test = int(self.test_ratio * total) 105 | total = list(range(total)) 106 | random.shuffle(total) 107 | 108 | train = sorted(total[: n_train]) 109 | val = sorted(total[n_train: -n_test]) 110 | test = sorted(total[-n_test: ]) 111 | train = data.iloc[train] 112 | val = data.iloc[val] 113 | test = data.iloc[test] 114 | else: 115 | tmp_dict = {} 116 | for key in ['train', 'val', 'test']: 117 | start, stop = self.config.get(f'{key}_interval') 118 | start = self.generate_timestamp(start) 119 | stop = self.generate_timestamp(stop) 120 | tmp = data[data['Timestamp'] >= start].reset_index(drop=True) 121 | tmp = tmp[tmp['Timestamp'] < stop].reset_index(drop=True) 122 | tmp_dict[key] = tmp 123 | train = tmp_dict.get('train') 124 | val = tmp_dict.get('val') 125 | test = tmp_dict.get('test') 126 | return train, val, test 127 | 128 | 129 | def load_data(self): 130 | df = pd.read_csv(self.data_path) 131 | if 'Timestamp' not in df.keys(): 132 | dates = df.get('Date').to_list() 133 | df['Timestamp'] = [self.generate_timestamp(x) for x in dates] 134 | if self.start_date is None: 135 | self.start_date = self.convert_timestamp(min(list(df.get('Timestamp')))).strftime(self.date_format) 136 | # raise ValueError(self.start_date) 137 | if self.end_date is None: 138 | self.end_date = self.convert_timestamp(max(list(df.get('Timestamp'))) + self.jumps).strftime(self.date_format) 139 | start = self.generate_timestamp(self.start_date) 140 | stop = self.generate_timestamp(self.end_date) 141 | df = df[df['Timestamp'] >= start].reset_index(drop=True) 142 | df = df[df['Timestamp'] < stop].reset_index(drop=True) 143 | final_day = self.generate_timestamp(self.end_date) 144 | return df, start, final_day 145 | 146 | def merge_additional(self, data, start, jump): 147 | tmp = data[data['Timestamp'] >= start].reset_index(drop=True) 148 | tmp = tmp[tmp['Timestamp'] < start + jump].reset_index(drop=True) 149 | if len(tmp) == 0: 150 | return None, None, None, None, None 151 | row = tmp.iloc[-1] 152 | results = {} 153 | for key in self.additional_features: 154 | results[key] = float(row.get(key)) 155 | return results 156 | 157 | def generate_timestamp(self, date): 158 | return int(time.mktime(datetime.strptime(date, self.date_format).timetuple())) 159 | 160 | @staticmethod 161 | def merge_data(data, start, jump): 162 | tmp = data[data['Timestamp'] >= start].reset_index(drop=True) 163 | tmp = tmp[tmp['Timestamp'] < start + jump].reset_index(drop=True) 164 | if len(tmp) == 0: 165 | return None, None, None, None, None 166 | _, high, low, open, close, _ = DataConverter.get_row_values(tmp.iloc[0]) 167 | vol = 0 168 | for row in tmp.iterrows(): 169 | _, h, l, _, close, v = DataConverter.get_row_values(row[1]) 170 | high = max(high, h) 171 | low = min(low, l) 172 | vol += v 173 | return high, low, open, close, vol 174 | 175 | @staticmethod 176 | def get_row_values(row): 177 | ts = int(row.get('Timestamp')) 178 | high = float(row.get('High')) 179 | low = float(row.get('Low')) 180 | open = float(row.get('Open')) 181 | close = float(row.get('Close')) 182 | vol = float(row.get('Volume')) 183 | return ts, high, low, open, close, vol 184 | 185 | @staticmethod 186 | def convert_timestamp(timestamp): 187 | return datetime.fromtimestamp(timestamp) -------------------------------------------------------------------------------- /scripts/one_day_pred.py: -------------------------------------------------------------------------------- 1 | import os, sys, pathlib 2 | sys.path.insert(0, os.path.dirname(pathlib.Path(__file__).parent.absolute())) 3 | 4 | import time 5 | import yaml 6 | import torch 7 | import numpy as np 8 | import pandas as pd 9 | from utils import io_tools 10 | from datetime import datetime 11 | from argparse import ArgumentParser 12 | from pl_modules.data_module import CMambaDataModule 13 | from data_utils.data_transforms import DataTransform 14 | from utils.trade import buy_sell_vanilla, buy_sell_smart 15 | import warnings 16 | 17 | warnings.simplefilter(action='ignore', category=FutureWarning) 18 | 19 | 20 | ROOT = io_tools.get_root(__file__, num_returns=2) 21 | 22 | def get_args(): 23 | parser = ArgumentParser() 24 | parser.add_argument( 25 | "--accelerator", 26 | type=str, 27 | default='gpu', 28 | help="The type of accelerator.", 29 | ) 30 | parser.add_argument( 31 | "--date", 32 | type=str, 33 | default=None, 34 | ) 35 | parser.add_argument( 36 | "--devices", 37 | type=int, 38 | default=1, 39 | help="Number of computing devices.", 40 | ) 41 | parser.add_argument( 42 | "--seed", 43 | type=int, 44 | default=23, 45 | help="Logging directory.", 46 | ) 47 | parser.add_argument( 48 | "--config", 49 | type=str, 50 | default='cmamba_v', 51 | help="Path to config file.", 52 | ) 53 | parser.add_argument( 54 | '--use_volume', 55 | default=False, 56 | action='store_true', 57 | ) 58 | parser.add_argument( 59 | "--data_path", 60 | default='data/one_day_pred.csv', 61 | type=str, 62 | help="Path to config file.", 63 | ) 64 | parser.add_argument( 65 | "--ckpt_path", 66 | required=True, 67 | type=str, 68 | ) 69 | parser.add_argument( 70 | "--risk", 71 | default=2, 72 | type=int, 73 | ) 74 | 75 | args = parser.parse_args() 76 | return args 77 | 78 | def print_and_write(file, txt, add_new_line=True): 79 | print(txt) 80 | if add_new_line: 81 | file.write(f'{txt}\n') 82 | else: 83 | file.write(txt) 84 | 85 | def init_dirs(args, date): 86 | path = f'{ROOT}/Predictions/{args.config}/' 87 | if not os.path.isdir(path): 88 | os.makedirs(path) 89 | txt_file = open(f'{path}/{date}.txt', 'w') 90 | return txt_file 91 | 92 | def save_all_hparams(log_dir, args): 93 | if not os.path.exists(log_dir): 94 | os.makedirs(log_dir) 95 | save_dict = vars(args) 96 | save_dict.pop('checkpoint_callback') 97 | with open(log_dir + '/hparams.yaml', 'w') as f: 98 | yaml.dump(save_dict, f) 99 | 100 | def load_model(config, ckpt_path): 101 | arch_config = io_tools.load_config_from_yaml('configs/models/archs.yaml') 102 | model_arch = config.get('model') 103 | model_config_path = f'{ROOT}/configs/models/{arch_config.get(model_arch)}' 104 | model_config = io_tools.load_config_from_yaml(model_config_path) 105 | normalize = model_config.get('normalize', False) 106 | model_class = io_tools.get_obj_from_str(model_config.get('target')) 107 | model = model_class.load_from_checkpoint(ckpt_path, **model_config.get('params')) 108 | model.cuda() 109 | return model, normalize 110 | 111 | @torch.no_grad() 112 | def run_model(model, dataloader): 113 | target_list = [] 114 | preds_list = [] 115 | timetamps = [] 116 | with torch.no_grad(): 117 | for batch in dataloader: 118 | ts = batch.get('Timestamp').numpy().reshape(-1) 119 | target = batch.get('Close').numpy().reshape(-1) 120 | features = batch.get('features').to(model.device) 121 | preds = model(features).cpu().numpy().reshape(-1) 122 | target_list += [float(x) for x in list(target)] 123 | preds_list += [float(x) for x in list(preds)] 124 | timetamps += [float(x) for x in list(ts)] 125 | targets = np.asarray(target_list) 126 | preds = np.asarray(preds_list) 127 | targets_tensor = torch.tensor(target_list) 128 | preds_tensor = torch.tensor(preds_list) 129 | timetamps = [datetime.fromtimestamp(int(x)) for x in timetamps] 130 | loss = float(model.loss(preds_tensor, targets_tensor)) 131 | mape = float(model.mape(preds_tensor, targets_tensor)) 132 | return timetamps, targets, preds, loss, mape 133 | 134 | 135 | 136 | if __name__ == "__main__": 137 | 138 | args = get_args() 139 | 140 | config = io_tools.load_config_from_yaml(f'{ROOT}/configs/training/{args.config}.yaml') 141 | 142 | data_config = io_tools.load_config_from_yaml(f"{ROOT}/configs/data_configs/{config.get('data_config')}.yaml") 143 | 144 | use_volume = config.get('use_volume', args.use_volume) 145 | model, normalize = load_model(config, args.ckpt_path) 146 | 147 | data = pd.read_csv(args.data_path) 148 | if 'Date' in data.keys(): 149 | data['Timestamp'] = [float(time.mktime(datetime.strptime(x, "%Y-%m-%d").timetuple())) for x in data['Date']] 150 | data = data.sort_values(by='Timestamp').reset_index() 151 | 152 | train_transform = DataTransform(is_train=True, use_volume=use_volume, additional_features=config.get('additional_features', [])) 153 | val_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 154 | test_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 155 | data_module = CMambaDataModule(data_config, 156 | train_transform=train_transform, 157 | val_transform=val_transform, 158 | test_transform=test_transform, 159 | batch_size=1, 160 | distributed_sampler=False, 161 | num_workers=1, 162 | normalize=normalize, 163 | ) 164 | 165 | # end_date = "2024-27-10" 166 | if args.date is None: 167 | end_ts = max(data['Timestamp']) + 24 * 60 * 60 168 | else: 169 | end_ts = int(time.mktime(datetime.strptime(args.date, "%Y-%m-%d").timetuple())) 170 | start_ts = end_ts - 14 * 24 * 60 * 60 - 60 * 60 171 | pred_date = datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d") 172 | data = data[data['Timestamp'] < end_ts] 173 | data = data[data['Timestamp'] >= start_ts - 60 * 60] 174 | 175 | txt_file = init_dirs(args, pred_date) 176 | 177 | 178 | features = {} 179 | key_list = ['Timestamp', 'Open', 'High', 'Low', 'Close'] 180 | if use_volume: 181 | key_list.append('Volume') 182 | 183 | for key in key_list: 184 | tmp = list(data.get(key)) 185 | if normalize: 186 | scale = data_module.factors.get(key).get('max') - data_module.factors.get(key).get('min') 187 | shift = data_module.factors.get(key).get('min') 188 | else: 189 | scale = 1 190 | shift = 0 191 | if key == 'Volume': 192 | tmp = [x / 1e9 for x in tmp] 193 | tmp = [(x - shift) / scale for x in tmp] 194 | features[key] = torch.tensor(tmp).reshape(1, -1) 195 | if key == 'Timestamp': 196 | t_scale = scale 197 | t_shift = shift 198 | if key == model.y_key: 199 | scale_pred = scale 200 | shift_pred = shift 201 | 202 | x = torch.cat([features.get(x) for x in features.keys()], dim=0) 203 | 204 | close_idx = -2 if use_volume else -1 205 | today = float(x[close_idx, -1]) 206 | 207 | with torch.no_grad(): 208 | pred = float(model(x[None, ...].cuda()).cpu()) * scale_pred + shift_pred 209 | 210 | print('') 211 | print_and_write(txt_file, f'Prediction date: {pred_date}\nPrediction: {round(pred, 2)}\nToday value: {round(today, 2)}') 212 | 213 | b, s = buy_sell_smart(today, pred, 100, 100, risk=args.risk) 214 | if b < 100: 215 | tmp = round((100 - b), 2) 216 | print_and_write(txt_file, f'Smart trade: {tmp}% buy') 217 | if s < 100: 218 | tmp = round((100 - s), 2) 219 | print_and_write(txt_file, f'Smart trade: {tmp}% sell') 220 | 221 | b, s = buy_sell_vanilla(today, pred, 100, 100) 222 | if b < 100: 223 | assert b == 0 224 | print_and_write(txt_file, f'Vanilla trade: buy') 225 | elif s < 100: 226 | assert s == 0 227 | print_and_write(txt_file, f'Vanilla trade: sell') 228 | else: 229 | print_and_write(txt_file, f'Vanilla trade: -') 230 | 231 | 232 | 233 | 234 | 235 | 236 | -------------------------------------------------------------------------------- /scripts/evaluation.py: -------------------------------------------------------------------------------- 1 | import os, sys, pathlib 2 | sys.path.insert(0, os.path.dirname(pathlib.Path(__file__).parent.absolute())) 3 | 4 | import yaml 5 | import torch 6 | import matplotlib 7 | import numpy as np 8 | from utils import io_tools 9 | from datetime import datetime 10 | import pytorch_lightning as pl 11 | import matplotlib.ticker as ticker 12 | from argparse import ArgumentParser 13 | from pl_modules.data_module import CMambaDataModule 14 | from data_utils.data_transforms import DataTransform 15 | 16 | import matplotlib.pyplot as plt 17 | import matplotlib.dates as mdates 18 | import warnings 19 | 20 | warnings.simplefilter(action='ignore', category=FutureWarning) 21 | 22 | import seaborn as sns 23 | sns.set_theme(style='whitegrid', context='paper', font_scale=3) 24 | palette = sns.color_palette('muted') 25 | 26 | 27 | 28 | ROOT = io_tools.get_root(__file__, num_returns=2) 29 | 30 | def get_args(): 31 | parser = ArgumentParser() 32 | parser.add_argument( 33 | "--logdir", 34 | type=str, 35 | help="Logging directory.", 36 | ) 37 | parser.add_argument( 38 | "--accelerator", 39 | type=str, 40 | default='gpu', 41 | help="The type of accelerator.", 42 | ) 43 | parser.add_argument( 44 | "--devices", 45 | type=int, 46 | default=1, 47 | help="Number of computing devices.", 48 | ) 49 | parser.add_argument( 50 | "--seed", 51 | type=int, 52 | default=23, 53 | help="Logging directory.", 54 | ) 55 | parser.add_argument( 56 | "--expname", 57 | type=str, 58 | default='Cmamba', 59 | help="Experiment name. Reconstructions will be saved under this folder.", 60 | ) 61 | parser.add_argument( 62 | "--config", 63 | type=str, 64 | default='cmamba_nv', 65 | help="Path to config file.", 66 | ) 67 | parser.add_argument( 68 | "--logger_type", 69 | default='tb', 70 | type=str, 71 | help="Path to config file.", 72 | ) 73 | parser.add_argument( 74 | '--use_volume', 75 | default=False, 76 | action='store_true', 77 | ) 78 | parser.add_argument( 79 | "--ckpt_path", 80 | required=True, 81 | type=str, 82 | help="Path to config file.", 83 | ) 84 | parser.add_argument( 85 | "--num_workers", 86 | type=int, 87 | default=4, 88 | help="Number of parallel workers.", 89 | ) 90 | parser.add_argument( 91 | "--batch_size", 92 | type=int, 93 | default=32, 94 | help="batch_size", 95 | ) 96 | 97 | args = parser.parse_args() 98 | return args 99 | 100 | def print_and_write(file, txt, add_new_line=True): 101 | print(txt) 102 | if add_new_line: 103 | file.write(f'{txt}\n') 104 | else: 105 | file.write(txt) 106 | 107 | def save_all_hparams(log_dir, args): 108 | if not os.path.exists(log_dir): 109 | os.makedirs(log_dir) 110 | save_dict = vars(args) 111 | save_dict.pop('checkpoint_callback') 112 | with open(log_dir + '/hparams.yaml', 'w') as f: 113 | yaml.dump(save_dict, f) 114 | 115 | def init_dirs(args, name): 116 | path = f'{ROOT}/Results/{name}/{args.config}' 117 | if not os.path.isdir(path): 118 | os.makedirs(path) 119 | txt_file = open(f'{path}/metrics.txt', 'w') 120 | plot_path = f'{path}/pred.jpg' 121 | return txt_file, plot_path 122 | 123 | def load_model(config, ckpt_path): 124 | arch_config = io_tools.load_config_from_yaml('configs/models/archs.yaml') 125 | model_arch = config.get('model') 126 | model_config_path = f'{ROOT}/configs/models/{arch_config.get(model_arch)}' 127 | model_config = io_tools.load_config_from_yaml(model_config_path) 128 | normalize = model_config.get('normalize', False) 129 | model_class = io_tools.get_obj_from_str(model_config.get('target')) 130 | model = model_class.load_from_checkpoint(ckpt_path, **model_config.get('params')) 131 | model.cuda() 132 | model.eval() 133 | return model, normalize 134 | 135 | @torch.no_grad() 136 | def run_model(model, dataloader, factors=None): 137 | target_list = [] 138 | preds_list = [] 139 | timetamps = [] 140 | with torch.no_grad(): 141 | for batch in dataloader: 142 | ts = batch.get('Timestamp').numpy().reshape(-1) 143 | target = batch.get(model.y_key).numpy().reshape(-1) 144 | features = batch.get('features').to(model.device) 145 | preds = model(features).cpu().numpy().reshape(-1) 146 | target_list += [float(x) for x in list(target)] 147 | preds_list += [float(x) for x in list(preds)] 148 | timetamps += [float(x) for x in list(ts)] 149 | 150 | if factors is not None: 151 | scale = factors.get(model.y_key).get('max') - factors.get(model.y_key).get('min') 152 | shift = factors.get(model.y_key).get('min') 153 | target_list = [x * scale + shift for x in target_list] 154 | preds_list = [x * scale + shift for x in preds_list] 155 | scale = factors.get('Timestamp').get('max') - factors.get('Timestamp').get('min') 156 | shift = factors.get('Timestamp').get('min') 157 | timetamps = [x * scale + shift for x in timetamps] 158 | targets = np.asarray(target_list) 159 | preds = np.asarray(preds_list) 160 | targets_tensor = torch.tensor(target_list) 161 | preds_tensor = torch.tensor(preds_list) 162 | timetamps = [datetime.fromtimestamp(int(x)) for x in timetamps] 163 | mse = float(model.mse(preds_tensor, targets_tensor)) 164 | mape = float(model.mape(preds_tensor, targets_tensor)) 165 | l1 = float(model.l1(preds_tensor, targets_tensor)) 166 | return timetamps, targets, preds, mse, mape, l1 167 | 168 | 169 | 170 | if __name__ == "__main__": 171 | 172 | args = get_args() 173 | pl.seed_everything(args.seed) 174 | logdir = args.logdir 175 | 176 | config = io_tools.load_config_from_yaml(f'{ROOT}/configs/training/{args.config}.yaml') 177 | name = config.get('name', args.expname) 178 | 179 | data_config = io_tools.load_config_from_yaml(f"{ROOT}/configs/data_configs/{config.get('data_config')}.yaml") 180 | 181 | use_volume = args.use_volume 182 | if not use_volume: 183 | use_volume = config.get('use_volume') 184 | train_transform = DataTransform(is_train=True, use_volume=use_volume, additional_features=config.get('additional_features', [])) 185 | val_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 186 | test_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 187 | 188 | model, normalize = load_model(config, args.ckpt_path) 189 | data_module = CMambaDataModule(data_config, 190 | train_transform=train_transform, 191 | val_transform=val_transform, 192 | test_transform=test_transform, 193 | batch_size=args.batch_size, 194 | distributed_sampler=False, 195 | num_workers=args.num_workers, 196 | normalize=normalize, 197 | ) 198 | 199 | train_loader = data_module.train_dataloader() 200 | val_loader = data_module.val_dataloader() 201 | test_loader = data_module.test_dataloader() 202 | dataloader_list = [train_loader, val_loader, test_loader] 203 | titles = ['Train', 'Val', 'Test'] 204 | colors = ['red', 'green', 'magenta'] 205 | 206 | factors = None 207 | if normalize: 208 | factors = data_module.factors 209 | all_targets = [] 210 | all_timestamps = [] 211 | 212 | 213 | f, plot_path = init_dirs(args, name) 214 | 215 | plt.figure(figsize=(20, 10)) 216 | print_format = '{:^7} {:^15} {:^10} {:^7} {:^10}' 217 | txt = print_format.format('Split', 'MSE', 'RMSE', 'MAPE', 'MAE') 218 | print_and_write(f, txt) 219 | for key, dataloader, c in zip(titles, dataloader_list, colors): 220 | timstamps, targets, preds, mse, mape, l1 = run_model(model, dataloader, factors) 221 | all_timestamps += timstamps 222 | all_targets += list(targets) 223 | txt = print_format.format(key, round(mse, 3), round(np.sqrt(mse), 3), round(mape, 5), round(l1, 3)) 224 | print_and_write(f, txt) 225 | # plt.plot(timstamps, preds, color=c) 226 | sns.lineplot(x=timstamps, y=preds, color=c, linewidth=2.5, label=key) 227 | 228 | sns.lineplot(x=all_timestamps, y=all_targets, color='blue', zorder=0, linewidth=2.5, label='Target') 229 | plt.legend() 230 | plt.ylabel('Price ($)') 231 | plt.xlim([all_timestamps[0], all_timestamps[-1]]) 232 | plt.xticks(rotation=30) 233 | ax = plt.gca() 234 | ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:,.0f}K'.format(x/1000))) 235 | plt.savefig(plot_path, dpi=300, bbox_inches='tight') 236 | f.close() 237 | 238 | -------------------------------------------------------------------------------- /scripts/simulate_trade.py: -------------------------------------------------------------------------------- 1 | import os, sys, pathlib 2 | sys.path.insert(0, os.path.dirname(pathlib.Path(__file__).parent.absolute())) 3 | 4 | import torch 5 | import numpy as np 6 | import seaborn as sns 7 | from utils import io_tools 8 | from utils.trade import trade 9 | from datetime import datetime 10 | import matplotlib.pyplot as plt 11 | from argparse import ArgumentParser 12 | from pl_modules.data_module import CMambaDataModule 13 | from data_utils.data_transforms import DataTransform 14 | import warnings 15 | 16 | warnings.simplefilter(action='ignore', category=FutureWarning) 17 | 18 | sns.set_theme(style='whitegrid', context='paper', font_scale=2) 19 | palette = sns.color_palette('muted') 20 | 21 | ROOT = io_tools.get_root(__file__, num_returns=2) 22 | 23 | LABEL_DICT = { 24 | 'cmamba': 'CryptoMamba', 25 | 'lstm': 'LSTM', 26 | 'lstm_bi': 'Bi-LSTM', 27 | 'gru': 'GRU', 28 | 'smamba': 'S-Mamba', 29 | 'itransformer': 'iTransformer', 30 | } 31 | 32 | def get_args(): 33 | parser = ArgumentParser() 34 | parser.add_argument( 35 | "--accelerator", 36 | type=str, 37 | default='gpu', 38 | help="The type of accelerator.", 39 | ) 40 | parser.add_argument( 41 | "--devices", 42 | type=int, 43 | default=1, 44 | help="Number of computing devices.", 45 | ) 46 | parser.add_argument( 47 | "--seed", 48 | type=int, 49 | default=23, 50 | help="Logging directory.", 51 | ) 52 | parser.add_argument( 53 | "--expname", 54 | type=str, 55 | default='Cmamba', 56 | help="Experiment name. Reconstructions will be saved under this folder.", 57 | ) 58 | parser.add_argument( 59 | "--config", 60 | type=str, 61 | required=True, 62 | help="Path to config file.", 63 | ) 64 | parser.add_argument( 65 | "--logger_type", 66 | default='tb', 67 | type=str, 68 | help="Path to config file.", 69 | ) 70 | parser.add_argument( 71 | "--ckpt_path", 72 | default=None, 73 | type=str, 74 | help="Path to config file.", 75 | ) 76 | parser.add_argument( 77 | "--num_workers", 78 | type=int, 79 | default=4, 80 | help="Number of parallel workers.", 81 | ) 82 | parser.add_argument( 83 | "--batch_size", 84 | type=int, 85 | default=32, 86 | help="batch_size", 87 | ) 88 | 89 | parser.add_argument( 90 | "--balance", 91 | type=float, 92 | default=100, 93 | help="initial money", 94 | ) 95 | 96 | parser.add_argument( 97 | "--risk", 98 | type=float, 99 | default=2, 100 | ) 101 | 102 | parser.add_argument( 103 | "--split", 104 | type=str, 105 | default='test', 106 | choices={'test', 'val', 'train'}, 107 | ) 108 | 109 | parser.add_argument( 110 | "--trade_mode", 111 | type=str, 112 | default='smart', 113 | choices={'smart', 'smart_w_short', 'vanilla', 'no_strategy'}, 114 | ) 115 | 116 | args = parser.parse_args() 117 | return args 118 | 119 | def load_model(config, ckpt_path, config_name=None): 120 | if ckpt_path is None: 121 | ckpt_path = f'{ROOT}/checkpoints/{config_name}.ckpt' 122 | arch_config = io_tools.load_config_from_yaml('configs/models/archs.yaml') 123 | model_arch = config.get('model') 124 | model_config_path = f'{ROOT}/configs/models/{arch_config.get(model_arch)}' 125 | model_config = io_tools.load_config_from_yaml(model_config_path) 126 | normalize = model_config.get('normalize', False) 127 | model_class = io_tools.get_obj_from_str(model_config.get('target')) 128 | model = model_class.load_from_checkpoint(ckpt_path, **model_config.get('params')) 129 | model.cuda() 130 | model.eval() 131 | return model, normalize 132 | 133 | 134 | def init_dirs(args, name): 135 | path = f'{ROOT}/Results/{name}/{args.config}' 136 | if name == 'all': 137 | path = f'{ROOT}/Results/all/' 138 | if not os.path.isdir(path): 139 | os.makedirs(path) 140 | 141 | def max_drawdown(prices): 142 | prices = np.array(prices) 143 | peak = np.maximum.accumulate(prices) 144 | drawdown = (prices - peak) / peak 145 | mdd = drawdown.min() 146 | return -mdd 147 | 148 | 149 | @torch.no_grad() 150 | def run_model(model, dataloader, factors=None): 151 | target_list = [] 152 | preds_list = [] 153 | timetamps = [] 154 | with torch.no_grad(): 155 | for batch in dataloader: 156 | ts = batch.get('Timestamp').numpy().reshape(-1) 157 | target = batch.get(model.y_key).numpy().reshape(-1) 158 | features = batch.get('features').to(model.device) 159 | preds = model(features).cpu().numpy().reshape(-1) 160 | target_list += [float(x) for x in list(target)] 161 | preds_list += [float(x) for x in list(preds)] 162 | if factors is not None: 163 | timetamps += [float(x) for x in list(batch.get('Timestamp_orig').numpy().reshape(-1))] 164 | else: 165 | timetamps += [float(x) for x in list(ts)] 166 | 167 | if factors is not None: 168 | scale = factors.get(model.y_key).get('max') - factors.get(model.y_key).get('min') 169 | shift = factors.get(model.y_key).get('min') 170 | target_list = [x * scale + shift for x in target_list] 171 | preds_list = [x * scale + shift for x in preds_list] 172 | 173 | targets = np.asarray(target_list) 174 | preds = np.asarray(preds_list) 175 | 176 | return timetamps, targets, preds 177 | 178 | 179 | if __name__ == '__main__': 180 | args = get_args() 181 | init_dir_flag = False 182 | colors = ['darkblue', 'yellowgreen', 'crimson', 'darkviolet', 'orange', 'magenta'] 183 | if args.config == 'all': 184 | config_list = [x.replace('.ckpt', '') for x in os.listdir(f'{ROOT}/checkpoints/') if '_nv.ckpt' in x] 185 | elif args.config == 'all_v': 186 | config_list = [x.replace('.ckpt', '') for x in os.listdir(f'{ROOT}/checkpoints/') if '_v.ckpt' in x] 187 | init_dirs(args, 'all') 188 | else: 189 | config_list = [args.config] 190 | colors = ['darkblue'] 191 | init_dir_flag = True 192 | 193 | plt.figure(figsize=(15, 10)) 194 | for conf, c in zip(config_list, colors): 195 | config = io_tools.load_config_from_yaml(f'{ROOT}/configs/training/{conf}.yaml') 196 | if init_dir_flag: 197 | init_dir_flag = False 198 | init_dirs(args, config.get('name', args.expname)) 199 | data_config = io_tools.load_config_from_yaml(f"{ROOT}/configs/data_configs/{config.get('data_config')}.yaml") 200 | 201 | model, normalize = load_model(config, args.ckpt_path, config_name=conf) 202 | 203 | use_volume = config.get('use_volume', False) 204 | test_transform = DataTransform(is_train=False, use_volume=use_volume, additional_features=config.get('additional_features', [])) 205 | data_module = CMambaDataModule(data_config, 206 | train_transform=test_transform, 207 | val_transform=test_transform, 208 | test_transform=test_transform, 209 | batch_size=args.batch_size, 210 | distributed_sampler=False, 211 | num_workers=args.num_workers, 212 | normalize=normalize, 213 | ) 214 | 215 | if args.split == 'test': 216 | test_loader = data_module.test_dataloader() 217 | if args.split == 'val': 218 | test_loader = data_module.val_dataloader() 219 | if args.split == 'train': 220 | test_loader = data_module.train_dataloader() 221 | 222 | factors = None 223 | if normalize: 224 | factors = data_module.factors 225 | timstamps, targets, preds = run_model(model, test_loader, factors) 226 | 227 | data = test_loader.dataset.data 228 | tmp = data.get('Close') 229 | time_key = 'Timestamp' 230 | if normalize: 231 | time_key = 'Timestamp_orig' 232 | scale = factors.get(model.y_key).get('max') - factors.get(model.y_key).get('min') 233 | shift = factors.get(model.y_key).get('min') 234 | data[model.y_key] = data[model.y_key] * scale + shift 235 | 236 | balance, balance_in_time = trade(data, time_key, timstamps, targets, preds, 237 | balance=args.balance, mode=args.trade_mode, 238 | risk=args.risk, y_key=model.y_key) 239 | 240 | print(f'{conf} -- Final balance: {round(balance, 2)}') 241 | print(f'{conf} -- Maximum Draw Down : {round(max_drawdown(balance_in_time) * 100, 2)}') 242 | 243 | label = conf.replace("_nv", "").replace("_v", "") 244 | label = LABEL_DICT.get(label) 245 | tmp = [timstamps[0] - 24 * 60 * 60] + timstamps 246 | tmp = [datetime.fromtimestamp(int(x)) for x in tmp] 247 | sns.lineplot(x=tmp, 248 | y=balance_in_time, 249 | color=c, 250 | zorder=0, 251 | linewidth=2.5, 252 | label=label) 253 | 254 | name = config.get('name', args.expname) 255 | if args.trade_mode == 'no_strategy': 256 | plot_path = f'./balance_{args.split}.jpg' 257 | else: 258 | if len(config_list) == 1: 259 | plot_path = f'{ROOT}/Results/{name}/{args.config}/balance_{args.split}_{args.trade_mode}.jpg' 260 | else: 261 | plot_path = f'{ROOT}/Results/all/balance_{args.config}_{args.split}_{args.trade_mode}.jpg' 262 | plt.xticks(rotation=30) 263 | plt.axhline(y=100, color='r', linestyle='--') 264 | 265 | if len(config_list) == 1: 266 | ax = plt.gca() 267 | ax.get_legend().remove() 268 | plt.title(f'Balance in time (final: {round(balance, 2)})') 269 | else: 270 | plt.title(f'Net Worth in Time') 271 | 272 | # matplotlib.rcParams.update({'font.size': 100}) 273 | plt.xlim([tmp[0], tmp[-1]]) 274 | plt.ylabel('Balance ($)') 275 | plt.xlabel('Date') 276 | plt.legend(loc='upper left') 277 | plt.savefig(plot_path, dpi=300, bbox_inches='tight') -------------------------------------------------------------------------------- /models/layers/SelfAttention_Family.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | from math import sqrt 5 | from reformer_pytorch import LSHSelfAttention 6 | from einops import rearrange 7 | 8 | 9 | class TriangularCausalMask(): 10 | def __init__(self, B, L, device="cpu"): 11 | mask_shape = [B, 1, L, L] 12 | with torch.no_grad(): 13 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 14 | 15 | @property 16 | def mask(self): 17 | return self._mask 18 | 19 | 20 | class ProbMask(): 21 | def __init__(self, B, H, L, index, scores, device="cpu"): 22 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 23 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 24 | indicator = _mask_ex[torch.arange(B)[:, None, None], 25 | torch.arange(H)[None, :, None], 26 | index, :].to(device) 27 | self._mask = indicator.view(scores.shape).to(device) 28 | 29 | @property 30 | def mask(self): 31 | return self._mask 32 | 33 | # Code implementation from https://github.com/thuml/Flowformer 34 | class FlowAttention(nn.Module): 35 | def __init__(self, attention_dropout=0.1): 36 | super(FlowAttention, self).__init__() 37 | self.dropout = nn.Dropout(attention_dropout) 38 | 39 | def kernel_method(self, x): 40 | return torch.sigmoid(x) 41 | 42 | def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): 43 | queries = queries.transpose(1, 2) 44 | keys = keys.transpose(1, 2) 45 | values = values.transpose(1, 2) 46 | # kernel 47 | queries = self.kernel_method(queries) 48 | keys = self.kernel_method(keys) 49 | # incoming and outgoing 50 | normalizer_row = 1.0 / (torch.einsum("nhld,nhd->nhl", queries + 1e-6, keys.sum(dim=2) + 1e-6)) 51 | normalizer_col = 1.0 / (torch.einsum("nhsd,nhd->nhs", keys + 1e-6, queries.sum(dim=2) + 1e-6)) 52 | # reweighting 53 | normalizer_row_refine = ( 54 | torch.einsum("nhld,nhd->nhl", queries + 1e-6, (keys * normalizer_col[:, :, :, None]).sum(dim=2) + 1e-6)) 55 | normalizer_col_refine = ( 56 | torch.einsum("nhsd,nhd->nhs", keys + 1e-6, (queries * normalizer_row[:, :, :, None]).sum(dim=2) + 1e-6)) 57 | # competition and allocation 58 | normalizer_row_refine = torch.sigmoid( 59 | normalizer_row_refine * (float(queries.shape[2]) / float(keys.shape[2]))) 60 | normalizer_col_refine = torch.softmax(normalizer_col_refine, dim=-1) * keys.shape[2] # B h L vis 61 | # multiply 62 | kv = keys.transpose(-2, -1) @ (values * normalizer_col_refine[:, :, :, None]) 63 | x = (((queries @ kv) * normalizer_row[:, :, :, None]) * normalizer_row_refine[:, :, :, None]).transpose(1, 64 | 2).contiguous() 65 | return x, None 66 | 67 | 68 | # Code implementation from https://github.com/shreyansh26/FlashAttention-PyTorch 69 | class FlashAttention(nn.Module): 70 | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): 71 | super(FlashAttention, self).__init__() 72 | self.scale = scale 73 | self.mask_flag = mask_flag 74 | self.output_attention = output_attention 75 | self.dropout = nn.Dropout(attention_dropout) 76 | 77 | def flash_attention_forward(self, Q, K, V, mask=None): 78 | BLOCK_SIZE = 32 79 | NEG_INF = -1e10 # -infinity 80 | EPSILON = 1e-10 81 | # mask = torch.randint(0, 2, (128, 8)).to(device='cuda') 82 | O = torch.zeros_like(Q, requires_grad=True) 83 | l = torch.zeros(Q.shape[:-1])[..., None] 84 | m = torch.ones(Q.shape[:-1])[..., None] * NEG_INF 85 | 86 | O = O.to(device='cuda') 87 | l = l.to(device='cuda') 88 | m = m.to(device='cuda') 89 | 90 | Q_BLOCK_SIZE = min(BLOCK_SIZE, Q.shape[-1]) 91 | KV_BLOCK_SIZE = BLOCK_SIZE 92 | 93 | Q_BLOCKS = torch.split(Q, Q_BLOCK_SIZE, dim=2) 94 | K_BLOCKS = torch.split(K, KV_BLOCK_SIZE, dim=2) 95 | V_BLOCKS = torch.split(V, KV_BLOCK_SIZE, dim=2) 96 | if mask is not None: 97 | mask_BLOCKS = list(torch.split(mask, KV_BLOCK_SIZE, dim=1)) 98 | 99 | Tr = len(Q_BLOCKS) 100 | Tc = len(K_BLOCKS) 101 | 102 | O_BLOCKS = list(torch.split(O, Q_BLOCK_SIZE, dim=2)) 103 | l_BLOCKS = list(torch.split(l, Q_BLOCK_SIZE, dim=2)) 104 | m_BLOCKS = list(torch.split(m, Q_BLOCK_SIZE, dim=2)) 105 | 106 | for j in range(Tc): 107 | Kj = K_BLOCKS[j] 108 | Vj = V_BLOCKS[j] 109 | if mask is not None: 110 | maskj = mask_BLOCKS[j] 111 | 112 | for i in range(Tr): 113 | Qi = Q_BLOCKS[i] 114 | Oi = O_BLOCKS[i] 115 | li = l_BLOCKS[i] 116 | mi = m_BLOCKS[i] 117 | 118 | scale = 1 / np.sqrt(Q.shape[-1]) 119 | Qi_scaled = Qi * scale 120 | 121 | S_ij = torch.einsum('... i d, ... j d -> ... i j', Qi_scaled, Kj) 122 | if mask is not None: 123 | # Masking 124 | maskj_temp = rearrange(maskj, 'b j -> b 1 1 j') 125 | S_ij = torch.where(maskj_temp > 0, S_ij, NEG_INF) 126 | 127 | m_block_ij, _ = torch.max(S_ij, dim=-1, keepdims=True) 128 | P_ij = torch.exp(S_ij - m_block_ij) 129 | if mask is not None: 130 | # Masking 131 | P_ij = torch.where(maskj_temp > 0, P_ij, 0.) 132 | 133 | l_block_ij = torch.sum(P_ij, dim=-1, keepdims=True) + EPSILON 134 | 135 | P_ij_Vj = torch.einsum('... i j, ... j d -> ... i d', P_ij, Vj) 136 | 137 | mi_new = torch.maximum(m_block_ij, mi) 138 | li_new = torch.exp(mi - mi_new) * li + torch.exp(m_block_ij - mi_new) * l_block_ij 139 | 140 | O_BLOCKS[i] = (li / li_new) * torch.exp(mi - mi_new) * Oi + ( 141 | torch.exp(m_block_ij - mi_new) / li_new) * P_ij_Vj 142 | l_BLOCKS[i] = li_new 143 | m_BLOCKS[i] = mi_new 144 | 145 | O = torch.cat(O_BLOCKS, dim=2) 146 | l = torch.cat(l_BLOCKS, dim=2) 147 | m = torch.cat(m_BLOCKS, dim=2) 148 | return O, l, m 149 | 150 | def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): 151 | res = \ 152 | self.flash_attention_forward(queries.permute(0, 2, 1, 3), keys.permute(0, 2, 1, 3), values.permute(0, 2, 1, 3), 153 | attn_mask)[0] 154 | return res.permute(0, 2, 1, 3).contiguous(), None 155 | 156 | 157 | class FullAttention(nn.Module): 158 | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): 159 | super(FullAttention, self).__init__() 160 | self.scale = scale 161 | self.mask_flag = mask_flag 162 | self.output_attention = output_attention 163 | self.dropout = nn.Dropout(attention_dropout) 164 | 165 | def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): 166 | B, L, H, E = queries.shape 167 | _, S, _, D = values.shape 168 | scale = self.scale or 1. / sqrt(E) 169 | 170 | scores = torch.einsum("blhe,bshe->bhls", queries, keys) 171 | 172 | if self.mask_flag: 173 | if attn_mask is None: 174 | attn_mask = TriangularCausalMask(B, L, device=queries.device) 175 | 176 | scores.masked_fill_(attn_mask.mask, -np.inf) 177 | 178 | A = self.dropout(torch.softmax(scale * scores, dim=-1)) 179 | V = torch.einsum("bhls,bshd->blhd", A, values) 180 | 181 | if self.output_attention: 182 | return (V.contiguous(), A) 183 | else: 184 | return (V.contiguous(), None) 185 | 186 | 187 | # Code implementation from https://github.com/zhouhaoyi/Informer2020 188 | class ProbAttention(nn.Module): 189 | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): 190 | super(ProbAttention, self).__init__() 191 | self.factor = factor 192 | self.scale = scale 193 | self.mask_flag = mask_flag 194 | self.output_attention = output_attention 195 | self.dropout = nn.Dropout(attention_dropout) 196 | 197 | def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) 198 | # Q [B, H, L, D] 199 | B, H, L_K, E = K.shape 200 | _, _, L_Q, _ = Q.shape 201 | 202 | # calculate the sampled Q_K 203 | K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) 204 | # real U = U_part(factor*ln(L_k))*L_q 205 | index_sample = torch.randint(L_K, (L_Q, sample_k)) 206 | K_sample = K_expand[:, :, torch.arange( 207 | L_Q).unsqueeze(1), index_sample, :] 208 | Q_K_sample = torch.matmul( 209 | Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() 210 | 211 | # find the Top_k query with sparisty measurement 212 | M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) 213 | M_top = M.topk(n_top, sorted=False)[1] 214 | 215 | # use the reduced Q to calculate Q_K 216 | Q_reduce = Q[torch.arange(B)[:, None, None], 217 | torch.arange(H)[None, :, None], 218 | M_top, :] # factor*ln(L_q) 219 | Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k 220 | 221 | return Q_K, M_top 222 | 223 | def _get_initial_context(self, V, L_Q): 224 | B, H, L_V, D = V.shape 225 | if not self.mask_flag: 226 | # V_sum = V.sum(dim=-2) 227 | V_sum = V.mean(dim=-2) 228 | contex = V_sum.unsqueeze(-2).expand(B, H, 229 | L_Q, V_sum.shape[-1]).clone() 230 | else: # use mask 231 | # requires that L_Q == L_V, i.e. for self-attention only 232 | assert (L_Q == L_V) 233 | contex = V.cumsum(dim=-2) 234 | return contex 235 | 236 | def _update_context(self, context_in, V, scores, index, L_Q, attn_mask): 237 | B, H, L_V, D = V.shape 238 | 239 | if self.mask_flag: 240 | attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device) 241 | scores.masked_fill_(attn_mask.mask, -np.inf) 242 | 243 | attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores) 244 | 245 | context_in[torch.arange(B)[:, None, None], 246 | torch.arange(H)[None, :, None], 247 | index, :] = torch.matmul(attn, V).type_as(context_in) 248 | if self.output_attention: 249 | attns = (torch.ones([B, H, L_V, L_V]) / 250 | L_V).type_as(attn).to(attn.device) 251 | attns[torch.arange(B)[:, None, None], torch.arange(H)[ 252 | None, :, None], index, :] = attn 253 | return (context_in, attns) 254 | else: 255 | return (context_in, None) 256 | 257 | def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): 258 | B, L_Q, H, D = queries.shape 259 | _, L_K, _, _ = keys.shape 260 | 261 | queries = queries.transpose(2, 1) 262 | keys = keys.transpose(2, 1) 263 | values = values.transpose(2, 1) 264 | 265 | U_part = self.factor * \ 266 | np.ceil(np.log(L_K)).astype('int').item() # c*ln(L_k) 267 | u = self.factor * \ 268 | np.ceil(np.log(L_Q)).astype('int').item() # c*ln(L_q) 269 | 270 | U_part = U_part if U_part < L_K else L_K 271 | u = u if u < L_Q else L_Q 272 | 273 | scores_top, index = self._prob_QK( 274 | queries, keys, sample_k=U_part, n_top=u) 275 | 276 | # add scale factor 277 | scale = self.scale or 1. / sqrt(D) 278 | if scale is not None: 279 | scores_top = scores_top * scale 280 | # get the context 281 | context = self._get_initial_context(values, L_Q) 282 | # update the context with selected top_k queries 283 | context, attn = self._update_context( 284 | context, values, scores_top, index, L_Q, attn_mask) 285 | 286 | return context.contiguous(), attn 287 | 288 | 289 | class AttentionLayer(nn.Module): 290 | def __init__(self, attention, d_model, n_heads, d_keys=None, 291 | d_values=None): 292 | super(AttentionLayer, self).__init__() 293 | 294 | d_keys = d_keys or (d_model // n_heads) 295 | d_values = d_values or (d_model // n_heads) 296 | 297 | self.inner_attention = attention 298 | self.query_projection = nn.Linear(d_model, d_keys * n_heads) 299 | self.key_projection = nn.Linear(d_model, d_keys * n_heads) 300 | self.value_projection = nn.Linear(d_model, d_values * n_heads) 301 | self.out_projection = nn.Linear(d_values * n_heads, d_model) 302 | self.n_heads = n_heads 303 | 304 | def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): 305 | B, L, _ = queries.shape 306 | _, S, _ = keys.shape 307 | H = self.n_heads 308 | 309 | queries = self.query_projection(queries).view(B, L, H, -1) 310 | keys = self.key_projection(keys).view(B, S, H, -1) 311 | values = self.value_projection(values).view(B, S, H, -1) 312 | 313 | out, attn = self.inner_attention( 314 | queries, 315 | keys, 316 | values, 317 | attn_mask, 318 | tau=tau, 319 | delta=delta 320 | ) 321 | out = out.view(B, L, -1) 322 | 323 | return self.out_projection(out), attn 324 | 325 | 326 | class ReformerLayer(nn.Module): 327 | def __init__(self, attention, d_model, n_heads, d_keys=None, 328 | d_values=None, causal=False, bucket_size=4, n_hashes=4): 329 | super().__init__() 330 | self.bucket_size = bucket_size 331 | self.attn = LSHSelfAttention( 332 | dim=d_model, 333 | heads=n_heads, 334 | bucket_size=bucket_size, 335 | n_hashes=n_hashes, 336 | causal=causal 337 | ) 338 | 339 | def fit_length(self, queries): 340 | # inside reformer: assert N % (bucket_size * 2) == 0 341 | B, N, C = queries.shape 342 | if N % (self.bucket_size * 2) == 0: 343 | return queries 344 | else: 345 | # fill the time series 346 | fill_len = (self.bucket_size * 2) - (N % (self.bucket_size * 2)) 347 | return torch.cat([queries, torch.zeros([B, fill_len, C]).to(queries.device)], dim=1) 348 | 349 | def forward(self, queries, keys, values, attn_mask, tau, delta): 350 | # in Reformer: defalut queries=keys 351 | B, N, C = queries.shape 352 | queries = self.attn(self.fit_length(queries))[:, :N, :] 353 | return queries, None 354 | -------------------------------------------------------------------------------- /models/cmamba.py: -------------------------------------------------------------------------------- 1 | import math 2 | from functools import partial 3 | from typing import Callable, Any 4 | 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | from einops import rearrange, repeat 9 | import torch.utils.checkpoint as checkpoint 10 | from causal_conv1d import causal_conv1d_fn, causal_conv1d_update 11 | from mamba_ssm.ops.triton.selective_state_update import selective_state_update 12 | from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn 13 | 14 | 15 | class Mamba(nn.Module): 16 | def __init__( 17 | self, 18 | d_model, 19 | d_state=16, 20 | d_conv=4, 21 | expand=2, 22 | dt_rank="auto", 23 | dt_min=0.001, 24 | dt_max=0.1, 25 | dt_init="random", 26 | dt_scale=1.0, 27 | dt_init_floor=1e-4, 28 | conv_bias=True, 29 | bias=False, 30 | use_fast_path=True, # Fused kernel options 31 | layer_idx=None, 32 | device=None, 33 | dtype=None, 34 | ): 35 | factory_kwargs = {"device": device, "dtype": dtype} 36 | super().__init__() 37 | self.d_model = d_model 38 | self.d_state = d_state 39 | self.d_conv = d_conv 40 | self.expand = expand 41 | self.d_inner = int(self.expand * self.d_model) 42 | self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank 43 | self.use_fast_path = use_fast_path 44 | self.layer_idx = layer_idx 45 | 46 | self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) 47 | 48 | self.conv1d = nn.Conv1d( 49 | in_channels=self.d_inner, 50 | out_channels=self.d_inner, 51 | bias=conv_bias, 52 | kernel_size=d_conv, 53 | groups=self.d_inner, 54 | padding=d_conv - 1, 55 | **factory_kwargs, 56 | ) 57 | 58 | self.activation = "silu" 59 | self.act = nn.SiLU() 60 | 61 | self.x_proj = nn.Linear( 62 | self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs 63 | ) 64 | self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) 65 | 66 | # Initialize special dt projection to preserve variance at initialization 67 | dt_init_std = self.dt_rank**-0.5 * dt_scale 68 | if dt_init == "constant": 69 | nn.init.constant_(self.dt_proj.weight, dt_init_std) 70 | elif dt_init == "random": 71 | nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) 72 | else: 73 | raise NotImplementedError 74 | 75 | # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max 76 | dt = torch.exp( 77 | torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) 78 | + math.log(dt_min) 79 | ).clamp(min=dt_init_floor) 80 | # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 81 | inv_dt = dt + torch.log(-torch.expm1(-dt)) 82 | with torch.no_grad(): 83 | self.dt_proj.bias.copy_(inv_dt) 84 | # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit 85 | self.dt_proj.bias._no_reinit = True 86 | 87 | # S4D real initialization 88 | A = repeat( 89 | torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), 90 | "n -> d n", 91 | d=self.d_inner, 92 | ).contiguous() 93 | A_log = torch.log(A) # Keep A_log in fp32 94 | self.A_log = nn.Parameter(A_log) 95 | self.A_log._no_weight_decay = True 96 | 97 | # D "skip" parameter 98 | self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 99 | self.D._no_weight_decay = True 100 | 101 | self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) 102 | 103 | def forward(self, hidden_states, inference_params=None): 104 | """ 105 | hidden_states: (B, L, D) 106 | Returns: same shape as hidden_states 107 | """ 108 | batch, seqlen, dim = hidden_states.shape 109 | 110 | conv_state, ssm_state = None, None 111 | if inference_params is not None: 112 | conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) 113 | if inference_params.seqlen_offset > 0: 114 | # The states are updated inplace 115 | out, _, _ = self.step(hidden_states, conv_state, ssm_state) 116 | return out 117 | 118 | # We do matmul and transpose BLH -> HBL at the same time 119 | xz = rearrange( 120 | self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), 121 | "d (b l) -> b d l", 122 | l=seqlen, 123 | ) 124 | if self.in_proj.bias is not None: 125 | xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") 126 | 127 | A = -torch.exp(self.A_log.float()) # (d_inner, d_state) 128 | # In the backward pass we write dx and dz next to each other to avoid torch.cat 129 | if self.use_fast_path and causal_conv1d_fn is not None and inference_params is None: # Doesn't support outputting the states 130 | out = mamba_inner_fn( 131 | xz, 132 | self.conv1d.weight, 133 | self.conv1d.bias, 134 | self.x_proj.weight, 135 | self.dt_proj.weight, 136 | self.out_proj.weight, 137 | self.out_proj.bias, 138 | A, 139 | None, # input-dependent B 140 | None, # input-dependent C 141 | self.D.float(), 142 | delta_bias=self.dt_proj.bias.float(), 143 | delta_softplus=True, 144 | ) 145 | else: 146 | x, z = xz.chunk(2, dim=1) 147 | # Compute short convolution 148 | if conv_state is not None: 149 | # If we just take x[:, :, -self.d_conv :], it will error if seqlen < self.d_conv 150 | # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. 151 | conv_state.copy_(F.pad(x, (self.d_conv - x.shape[-1], 0))) # Update state (B D W) 152 | if causal_conv1d_fn is None: 153 | x = self.act(self.conv1d(x)[..., :seqlen]) 154 | else: 155 | assert self.activation in ["silu", "swish"] 156 | x = causal_conv1d_fn( 157 | x=x, 158 | weight=rearrange(self.conv1d.weight, "d 1 w -> d w"), 159 | bias=self.conv1d.bias, 160 | activation=self.activation, 161 | ) 162 | 163 | # We're careful here about the layout, to avoid extra transposes. 164 | # We want dt to have d as the slowest moving dimension 165 | # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. 166 | x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) 167 | dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) 168 | dt = self.dt_proj.weight @ dt.t() 169 | dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) 170 | B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() 171 | C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() 172 | assert self.activation in ["silu", "swish"] 173 | y = selective_scan_fn( 174 | x, 175 | dt, 176 | A, 177 | B, 178 | C, 179 | self.D.float(), 180 | z=z, 181 | delta_bias=self.dt_proj.bias.float(), 182 | delta_softplus=True, 183 | return_last_state=ssm_state is not None, 184 | ) 185 | if ssm_state is not None: 186 | y, last_state = y 187 | ssm_state.copy_(last_state) 188 | y = rearrange(y, "b d l -> b l d") 189 | out = self.out_proj(y) 190 | return out 191 | 192 | def step(self, hidden_states, conv_state, ssm_state): 193 | dtype = hidden_states.dtype 194 | assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" 195 | xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) 196 | x, z = xz.chunk(2, dim=-1) # (B D) 197 | 198 | # Conv step 199 | if causal_conv1d_update is None: 200 | conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) 201 | conv_state[:, :, -1] = x 202 | x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) 203 | if self.conv1d.bias is not None: 204 | x = x + self.conv1d.bias 205 | x = self.act(x).to(dtype=dtype) 206 | else: 207 | x = causal_conv1d_update( 208 | x, 209 | conv_state, 210 | rearrange(self.conv1d.weight, "d 1 w -> d w"), 211 | self.conv1d.bias, 212 | self.activation, 213 | ) 214 | 215 | x_db = self.x_proj(x) # (B dt_rank+2*d_state) 216 | dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) 217 | # Don't add dt_bias here 218 | dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) 219 | A = -torch.exp(self.A_log.float()) # (d_inner, d_state) 220 | 221 | # SSM step 222 | if selective_state_update is None: 223 | # Discretize A and B 224 | dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) 225 | dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) 226 | dB = torch.einsum("bd,bn->bdn", dt, B) 227 | ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) 228 | y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) 229 | y = y + self.D.to(dtype) * x 230 | y = y * self.act(z) # (B D) 231 | else: 232 | y = selective_state_update( 233 | ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True 234 | ) 235 | 236 | out = self.out_proj(y) 237 | return out.unsqueeze(1), conv_state, ssm_state 238 | 239 | def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): 240 | device = self.out_proj.weight.device 241 | conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype 242 | conv_state = torch.zeros( 243 | batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype 244 | ) 245 | ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype 246 | # ssm_dtype = torch.float32 247 | ssm_state = torch.zeros( 248 | batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype 249 | ) 250 | return conv_state, ssm_state 251 | 252 | def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): 253 | assert self.layer_idx is not None 254 | if self.layer_idx not in inference_params.key_value_memory_dict: 255 | batch_shape = (batch_size,) 256 | conv_state = torch.zeros( 257 | batch_size, 258 | self.d_model * self.expand, 259 | self.d_conv, 260 | device=self.conv1d.weight.device, 261 | dtype=self.conv1d.weight.dtype, 262 | ) 263 | ssm_state = torch.zeros( 264 | batch_size, 265 | self.d_model * self.expand, 266 | self.d_state, 267 | device=self.dt_proj.weight.device, 268 | dtype=self.dt_proj.weight.dtype, 269 | # dtype=torch.float32, 270 | ) 271 | inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) 272 | else: 273 | conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] 274 | # TODO: What if batch size changes between generation, and we reuse the same states? 275 | if initialize_states: 276 | conv_state.zero_() 277 | ssm_state.zero_() 278 | return conv_state, ssm_state 279 | 280 | 281 | class Permute(nn.Module): 282 | def __init__(self, *args): 283 | super().__init__() 284 | self.args = args 285 | 286 | def forward(self, x: torch.Tensor): 287 | return x.permute(*self.args) 288 | 289 | 290 | class Mlp(nn.Module): 291 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,channels_first=False): 292 | super().__init__() 293 | out_features = out_features or in_features 294 | hidden_features = hidden_features or in_features 295 | 296 | Linear = partial(nn.Conv2d, kernel_size=1, padding=0) if channels_first else nn.Linear 297 | self.fc1 = Linear(in_features, hidden_features) 298 | self.act = act_layer() 299 | self.fc2 = Linear(hidden_features, out_features) 300 | self.drop = nn.Dropout(drop) 301 | 302 | def forward(self, x): 303 | x = self.fc1(x) 304 | x = self.act(x) 305 | x = self.drop(x) 306 | x = self.fc2(x) 307 | x = self.drop(x) 308 | return x 309 | 310 | 311 | class CMBlock(nn.Module): 312 | 313 | def __init__( 314 | self, 315 | hidden_dim: int, 316 | norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), 317 | d_state: int = 16, 318 | dt_rank: Any = "auto", 319 | d_conv=4, 320 | expand=2, 321 | use_checkpoint: bool = False, 322 | mlp_ratio=2, 323 | act_layer=nn.ReLU, 324 | drop: float = 0.0, 325 | **kwargs, 326 | ): 327 | super().__init__() 328 | self.use_checkpoint = use_checkpoint 329 | self.norm = norm_layer(hidden_dim) 330 | 331 | self.op = Mamba(d_model=hidden_dim, 332 | d_state=d_state, 333 | d_conv=d_conv, 334 | expand=expand, 335 | dt_rank=dt_rank, 336 | **kwargs 337 | ) 338 | 339 | self.mlp_branch = mlp_ratio > 0 340 | if self.mlp_branch: 341 | self.norm2 = norm_layer(hidden_dim) 342 | mlp_hidden_dim = int(hidden_dim * mlp_ratio) 343 | self.mlp = Mlp(in_features=hidden_dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, channels_first=False) 344 | 345 | def _forward(self, x): 346 | h = self.op(self.norm(x)) 347 | # h = self.op(x) 348 | h += x 349 | if self.mlp_branch: 350 | h = h + self.drop_path(self.mlp(self.norm2(h))) 351 | return h 352 | 353 | def forward(self, x): 354 | if self.use_checkpoint: 355 | return checkpoint.checkpoint(self._forward, (x)) 356 | else: 357 | return self._forward(x) 358 | 359 | 360 | class CMamba(nn.Module): 361 | 362 | def __init__( 363 | self, 364 | num_features=5, 365 | hidden_dims=[14, 1], 366 | norm_layer=nn.LayerNorm, 367 | d_conv=4, 368 | layer_density=1, 369 | expand=2, 370 | mlp_ratio=0, 371 | drop=0.0, 372 | num_classes=None, 373 | d_states=16, 374 | use_checkpoint=False, 375 | cls=False, 376 | **kwargs 377 | ): 378 | super().__init__() 379 | 380 | self.hidden_dims = hidden_dims 381 | self.expand = expand 382 | self.mlp_ratio = mlp_ratio 383 | self.drop = drop 384 | self.num_features = num_features 385 | self.d_conv = d_conv 386 | self.layer_density = None 387 | self.num_classes = num_classes 388 | self.norm_layer = norm_layer 389 | self.d_states = None 390 | self.use_checkpoint = use_checkpoint 391 | self._set_d_states(d_states) 392 | self._create_layer_density(layer_density) 393 | self.args = kwargs 394 | self.act = nn.ReLU 395 | self.cls = cls 396 | 397 | self.post_process = nn.Sequential( 398 | Permute(0, 2, 1), 399 | nn.Linear(num_features, 1), 400 | ) 401 | self.tanh = nn.Tanh() 402 | 403 | d = len(hidden_dims) 404 | self.blocks = nn.ModuleList( 405 | self._get_block(hidden_dims[i], hidden_dims[i + 1], self.layer_density[i], self.d_states[i]) 406 | for i in range(d - 1) 407 | ) 408 | 409 | # self.norm = norm_layer((num_features, hidden_dims[0])) 410 | self.activation = self.act() 411 | 412 | 413 | def _set_d_states(self, d_states): 414 | n = len(self.hidden_dims) 415 | # if d_states == None: 416 | # self.d_states = ['auto' for _ in range(n)] 417 | if isinstance(d_states, list): 418 | self.d_states = d_states 419 | else: 420 | self.d_states = [d_states for _ in range(n)] 421 | 422 | def _init_model(self): 423 | device=next(self.parameters()).device 424 | self.cuda() 425 | input = torch.randn((1, 3, self.img_size, self.img_size), device=next(self.parameters()).device) 426 | _ = self(input) 427 | self.to(device=device) 428 | 429 | def _create_layer_density(self, layer_density): 430 | n = len(self.hidden_dims) 431 | if not isinstance(layer_density, list): 432 | self.layer_density = [layer_density for _ in range(n)] 433 | else: 434 | self.layer_density = layer_density 435 | 436 | def _get_block(self, hidden_dim, hidden_dim_next, n, d_state): 437 | # print(f'ds - {hidden_dim} - {n}') 438 | modules = [CMBlock(hidden_dim=hidden_dim, 439 | norm_layer=self.norm_layer, 440 | d_state=d_state, 441 | d_conv=self.d_conv, 442 | expand=self.expand, 443 | use_checkpoint=self.use_checkpoint, 444 | mlp_ratio=self.mlp_ratio, 445 | act_layer=self.act, 446 | drop=self.drop, 447 | **self.args 448 | ) 449 | for _ in range(n)] 450 | modules.append(nn.Linear(in_features=hidden_dim, out_features=hidden_dim_next)) 451 | # modules.append(self.norm_layer(hidden_dim_next)) 452 | return nn.Sequential(*modules) 453 | 454 | 455 | def forward(self, x): 456 | # x = self.norm(x) 457 | for layer in self.blocks: 458 | x = layer(x) 459 | 460 | x = self.post_process(x) 461 | if self.cls: 462 | x = self.tanh(x) 463 | return x -------------------------------------------------------------------------------- /data/2018-09-17_2024-09-16_86400/test.csv: -------------------------------------------------------------------------------- 1 | ,Open,High,Low,Close,Volume,Timestamp 2 | 0,26567.93,26618.0,26445.07,26534.19,6774210670.0,1694934000 3 | 1,26532.99,27414.73,26415.52,26754.28,15615339655.0,1695020400 4 | 2,26760.85,27488.76,26681.61,27211.12,13807690550.0,1695106800 5 | 3,27210.23,27379.51,26864.08,27132.01,13281116604.0,1695193200 6 | 4,27129.84,27152.94,26389.3,26567.63,13371443708.0,1695279600 7 | 5,26564.06,26726.08,26495.53,26579.57,10578746709.0,1695366000 8 | 6,26578.56,26634.19,26520.52,26579.39,7404700301.0,1695452400 9 | 7,26579.37,26716.06,26221.05,26256.83,8192867686.0,1695538800 10 | 8,26253.78,26421.51,26011.47,26298.48,11997833257.0,1695625200 11 | 9,26294.76,26389.88,26090.71,26217.25,9985498161.0,1695711600 12 | 10,26209.5,26817.84,26111.46,26352.72,11718380997.0,1695798000 13 | 11,26355.81,27259.5,26327.32,27021.55,14079002707.0,1695884400 14 | 12,27024.84,27225.94,26721.76,26911.72,10396435377.0,1695970800 15 | 13,26911.69,27091.79,26888.97,26967.92,5331172801.0,1696057200 16 | 14,26967.4,28047.24,26965.09,27983.75,9503917434.0,1696143600 17 | 15,27976.8,28494.46,27347.79,27530.79,19793041322.0,1696230000 18 | 16,27508.25,27667.19,27216.0,27429.98,11407814187.0,1696316400 19 | 17,27429.07,27826.66,27248.11,27799.39,11143355314.0,1696402800 20 | 18,27798.65,28091.86,27375.6,27415.91,11877253670.0,1696489200 21 | 19,27412.12,28252.54,27215.55,27946.6,13492391599.0,1696575600 22 | 20,27946.78,28028.09,27870.42,27968.84,6553044316.0,1696662000 23 | 21,27971.68,28102.17,27740.66,27935.09,7916875290.0,1696748400 24 | 22,27934.47,27989.47,27302.56,27583.68,12007668568.0,1696834800 25 | 23,27589.2,27715.85,27301.65,27391.02,9973350678.0,1696921200 26 | 24,27392.08,27474.12,26561.1,26873.32,13648094333.0,1697007600 27 | 25,26873.29,26921.44,26558.32,26756.8,9392909295.0,1697094000 28 | 26,26752.88,27092.7,26686.32,26862.38,15165312851.0,1697180400 29 | 27,26866.2,26969.0,26814.59,26861.71,5388116782.0,1697266800 30 | 28,26858.01,27289.17,26817.89,27159.65,7098201980.0,1697353200 31 | 29,27162.63,29448.14,27130.47,28519.47,27833876539.0,1697439600 32 | 30,28522.1,28618.75,28110.19,28415.75,14872527508.0,1697526000 33 | 31,28413.53,28889.01,28174.25,28328.34,12724128586.0,1697612400 34 | 32,28332.42,28892.47,28177.99,28719.81,14448058195.0,1697698800 35 | 33,28732.81,30104.09,28601.67,29682.95,21536125230.0,1697785200 36 | 34,29683.38,30287.48,29481.75,29918.41,11541146996.0,1697871600 37 | 35,29918.65,30199.43,29720.31,29993.9,10446520040.0,1697958000 38 | 36,30140.69,34370.44,30097.83,33086.23,38363572311.0,1698044400 39 | 37,33077.3,35150.43,32880.76,33901.53,44934999645.0,1698130800 40 | 38,33916.04,35133.76,33709.11,34502.82,25254318008.0,1698217200 41 | 39,34504.29,34832.91,33762.32,34156.65,19427195376.0,1698303600 42 | 40,34156.5,34238.21,33416.89,33909.8,16418032871.0,1698390000 43 | 41,33907.72,34399.39,33874.8,34089.57,10160330825.0,1698476400 44 | 42,34089.37,34743.26,33947.57,34538.48,11160323986.0,1698562800 45 | 43,34531.74,34843.93,34110.97,34502.36,17184860315.0,1698649200 46 | 44,34500.08,34719.25,34083.31,34667.78,15758270810.0,1698735600 47 | 45,34657.27,35527.93,34170.69,35437.25,22446272005.0,1698822000 48 | 46,35441.58,35919.84,34401.57,34938.24,20998158544.0,1698908400 49 | 47,34942.47,34942.47,34133.44,34732.32,17158456701.0,1698994800 50 | 48,34736.32,35256.03,34616.69,35082.2,9561294264.0,1699081200 51 | 49,35090.01,35340.34,34594.24,35049.36,12412743996.0,1699167600 52 | 50,35044.79,35286.03,34765.36,35037.37,12693436420.0,1699254000 53 | 51,35047.79,35892.42,34545.82,35443.56,18834737789.0,1699340400 54 | 52,35419.48,35994.42,35147.8,35655.28,17295394918.0,1699426800 55 | 53,35633.63,37926.26,35592.1,36693.13,37762672382.0,1699513200 56 | 54,36702.25,37493.8,36362.75,37313.97,22711265155.0,1699599600 57 | 55,37310.07,37407.09,36773.67,37138.05,13924272142.0,1699686000 58 | 56,37133.99,37227.69,36779.12,37054.52,11545715999.0,1699772400 59 | 57,37070.3,37405.12,36399.61,36502.36,19057712790.0,1699858800 60 | 58,36491.79,36753.35,34948.5,35537.64,23857403554.0,1699945200 61 | 59,35548.11,37964.89,35383.78,37880.58,27365821679.0,1700031600 62 | 60,37879.98,37934.63,35545.47,36154.77,26007385366.0,1700118000 63 | 61,36164.82,36704.48,35901.23,36596.68,22445028430.0,1700204400 64 | 62,36625.37,36839.28,36233.31,36585.7,11886022717.0,1700290800 65 | 63,36585.77,37509.36,36414.6,37386.55,12915986553.0,1700377200 66 | 64,37374.07,37756.82,36882.53,37476.96,20888209068.0,1700463600 67 | 65,37469.16,37631.14,35813.81,35813.81,25172163756.0,1700550000 68 | 66,35756.55,37856.98,35670.97,37432.34,24397247860.0,1700636400 69 | 67,37420.43,37643.92,36923.86,37289.62,14214948217.0,1700722800 70 | 68,37296.32,38415.34,37261.61,37720.28,22922957823.0,1700809200 71 | 69,37721.41,37892.43,37617.42,37796.79,9099571165.0,1700895600 72 | 70,37796.83,37820.3,37162.75,37479.12,13744796068.0,1700982000 73 | 71,37454.19,37559.36,36750.13,37254.17,19002925720.0,1701068400 74 | 72,37247.99,38368.48,36891.09,37831.09,21696137014.0,1701154800 75 | 73,37826.11,38366.11,37612.63,37858.49,20728546658.0,1701241200 76 | 74,37861.12,38141.75,37531.14,37712.75,18115982627.0,1701327600 77 | 75,37718.01,38954.11,37629.36,38688.75,23512784002.0,1701414000 78 | 76,38689.28,39678.94,38652.59,39476.33,15534035612.0,1701500400 79 | 77,39472.21,40135.61,39298.16,39978.39,15769696322.0,1701586800 80 | 78,39978.63,42371.75,39978.63,41980.1,39856129827.0,1701673200 81 | 79,41986.27,44408.66,41421.15,44080.65,36312154535.0,1701759600 82 | 80,44080.02,44265.77,43478.08,43746.45,29909761586.0,1701846000 83 | 81,43769.13,44042.59,42880.65,43292.66,27635760671.0,1701932400 84 | 82,43293.14,44705.52,43125.3,44166.6,24421116687.0,1702018800 85 | 83,44180.02,44361.26,43627.6,43725.98,17368210171.0,1702105200 86 | 84,43728.38,44034.63,43593.29,43779.7,13000481418.0,1702191600 87 | 85,43792.02,43808.38,40234.58,41243.83,40632672038.0,1702278000 88 | 86,41238.73,42048.3,40667.56,41450.22,24779520132.0,1702364400 89 | 87,41468.46,43429.78,40676.87,42890.74,26797884674.0,1702450800 90 | 88,42884.26,43390.86,41767.09,43023.97,25578530178.0,1702537200 91 | 89,43028.25,43087.82,41692.97,41929.76,19639442462.0,1702623600 92 | 90,41937.74,42664.95,41723.11,42240.12,14386729590.0,1702710000 93 | 91,42236.11,42359.5,41274.54,41364.66,16678702876.0,1702796400 94 | 92,41348.2,42720.3,40530.26,42623.54,25224642008.0,1702882800 95 | 93,42641.51,43354.3,41826.34,42270.53,23171001281.0,1702969200 96 | 94,42261.3,44275.59,42223.82,43652.25,27868908174.0,1703055600 97 | 95,43648.13,44240.67,43330.05,43869.15,22452766169.0,1703142000 98 | 96,43868.99,44367.96,43441.97,43997.9,21028503216.0,1703228400 99 | 97,44012.2,44015.7,43351.36,43739.54,13507796558.0,1703314800 100 | 98,43728.37,43945.52,42786.92,43016.12,18830554085.0,1703401200 101 | 99,43010.57,43765.09,42765.77,43613.14,21115795370.0,1703487600 102 | 100,43599.85,43603.18,41676.49,42520.4,30026850982.0,1703574000 103 | 101,42518.47,43683.16,42167.58,43442.86,25260941032.0,1703660400 104 | 102,43468.2,43804.78,42318.55,42627.86,22992093014.0,1703746800 105 | 103,42614.64,43124.32,41424.06,42099.4,26000021055.0,1703833200 106 | 104,42091.75,42584.13,41556.23,42156.9,16013925945.0,1703919600 107 | 105,42152.1,42860.94,41998.25,42265.19,16397498810.0,1704006000 108 | 106,42280.23,44175.44,42214.98,44167.33,18426978443.0,1704092400 109 | 107,44187.14,45899.71,44176.95,44957.97,39335274536.0,1704178800 110 | 108,44961.6,45503.24,40813.54,42848.18,46342323118.0,1704265200 111 | 109,42855.82,44770.02,42675.18,44179.92,30448091210.0,1704351600 112 | 110,44192.98,44353.29,42784.72,44162.69,32336029347.0,1704438000 113 | 111,44178.95,44227.63,43475.16,43989.2,16092503468.0,1704524400 114 | 112,43998.46,44495.57,43662.23,43943.1,19330573863.0,1704610800 115 | 113,43948.71,47218.0,43244.08,46970.5,42746192015.0,1704697200 116 | 114,46987.64,47893.7,45244.71,46139.73,39821290992.0,1704783600 117 | 115,46121.54,47647.22,44483.15,46627.78,50114613298.0,1704870000 118 | 116,46656.07,48969.37,45678.64,46368.59,45833734549.0,1704956400 119 | 117,46354.79,46498.14,41903.77,42853.17,43332698900.0,1705042800 120 | 118,42799.45,43234.66,42464.14,42842.38,20601860469.0,1705129200 121 | 119,42842.26,43065.6,41724.61,41796.27,17521429522.0,1705215600 122 | 120,41715.07,43319.72,41705.42,42511.97,22320220558.0,1705302000 123 | 121,42499.34,43566.27,42086.0,43154.95,24062872740.0,1705388400 124 | 122,43132.1,43189.89,42189.31,42742.65,20851232595.0,1705474800 125 | 123,42742.31,42876.35,40631.17,41262.06,25218357242.0,1705561200 126 | 124,41278.46,42134.16,40297.46,41618.41,25752407154.0,1705647600 127 | 125,41624.59,41877.89,41446.82,41665.59,11586690904.0,1705734000 128 | 126,41671.49,41855.37,41497.01,41545.79,9344043642.0,1705820400 129 | 127,41553.65,41651.21,39450.12,39507.37,31338708143.0,1705906800 130 | 128,39518.71,40127.35,38521.89,39845.55,29244553045.0,1705993200 131 | 129,39877.59,40483.79,39508.8,40077.07,22359526178.0,1706079600 132 | 130,40075.55,40254.48,39545.66,39933.81,18491782013.0,1706166000 133 | 131,39936.82,42209.39,39825.69,41816.87,25598119893.0,1706252400 134 | 132,41815.63,42195.63,41431.28,42120.05,11422941934.0,1706338800 135 | 133,42126.13,42797.18,41696.91,42035.59,16858971687.0,1706425200 136 | 134,42030.91,43305.87,41818.33,43288.25,20668476578.0,1706511600 137 | 135,43300.23,43838.95,42711.37,42952.61,23842814518.0,1706598000 138 | 136,42946.25,43717.41,42298.95,42582.61,24673628793.0,1706684400 139 | 137,42569.76,43243.17,41879.19,43075.77,21423953779.0,1706770800 140 | 138,43077.64,43422.49,42584.34,43185.86,18603843039.0,1706857200 141 | 139,43184.96,43359.94,42890.81,42992.25,11169245236.0,1706943600 142 | 140,42994.94,43097.64,42374.83,42583.58,14802225490.0,1707030000 143 | 141,42577.62,43494.25,42264.82,42658.67,18715487317.0,1707116400 144 | 142,42657.39,43344.15,42529.02,43084.67,16798476726.0,1707202800 145 | 143,43090.02,44341.95,42775.96,44318.22,21126587775.0,1707289200 146 | 144,44332.13,45575.84,44332.13,45301.57,26154524080.0,1707375600 147 | 145,45297.38,48152.5,45260.82,47147.2,39316770844.0,1707462000 148 | 146,47153.53,48146.17,46905.32,47771.28,16398681570.0,1707548400 149 | 147,47768.97,48535.94,47617.41,48293.92,19315867136.0,1707634800 150 | 148,48296.39,50280.48,47745.76,49958.22,34511985805.0,1707721200 151 | 149,49941.36,50358.39,48406.5,49742.44,35593051468.0,1707807600 152 | 150,49733.45,52021.37,49296.83,51826.7,39105608050.0,1707894000 153 | 151,51836.79,52820.07,51371.63,51938.55,38564360533.0,1707980400 154 | 152,51937.73,52537.97,51641.37,52160.2,28180567298.0,1708066800 155 | 153,52161.68,52191.91,50669.67,51663.0,20009091006.0,1708153200 156 | 154,51661.97,52356.96,51233.71,52122.55,17595377311.0,1708239600 157 | 155,52134.81,52483.32,51711.82,51779.14,21362184346.0,1708326000 158 | 156,51777.73,52945.05,50792.31,52284.88,33353758256.0,1708412400 159 | 157,52273.54,52368.82,50671.76,51839.18,28624907020.0,1708498800 160 | 158,51854.64,52009.61,50926.29,51304.97,25413900611.0,1708585200 161 | 159,51283.91,51497.93,50561.78,50731.95,21427078270.0,1708671600 162 | 160,50736.37,51684.2,50585.45,51571.1,15174077879.0,1708758000 163 | 161,51565.21,51950.03,51306.17,51733.24,15413239245.0,1708844400 164 | 162,51730.54,54938.18,50931.03,54522.4,34074411896.0,1708930800 165 | 163,54519.36,57537.84,54484.2,57085.37,49756832031.0,1709017200 166 | 164,57071.1,63913.13,56738.43,62504.79,83239156760.0,1709103600 167 | 165,62499.18,63585.64,60498.73,61198.38,65496611844.0,1709190000 168 | 166,61168.06,63155.1,60802.53,62440.63,40186368423.0,1709276400 169 | 167,62431.65,62458.7,61657.29,62029.85,23888473685.0,1709362800 170 | 168,62031.58,63230.21,61435.02,63167.37,26253811450.0,1709449200 171 | 169,63137.0,68537.03,62386.52,68330.41,70670471105.0,1709535600 172 | 170,68341.05,69170.63,59323.91,63801.2,102802940877.0,1709622000 173 | 171,63776.05,67637.93,62848.67,66106.8,68750229073.0,1709708400 174 | 172,66099.74,68029.92,65655.53,66925.48,46989543159.0,1709794800 175 | 173,66938.09,70083.05,66230.45,68300.09,59202881172.0,1709881200 176 | 174,68299.26,68673.05,68053.13,68498.88,21609650379.0,1709967600 177 | 175,68500.26,70005.2,68239.98,69019.79,35683977532.0,1710054000 178 | 176,69020.55,72850.71,67194.88,72123.91,65716656765.0,1710140400 179 | 177,72125.13,72825.66,68728.85,71481.29,62554434520.0,1710226800 180 | 178,71482.12,73637.48,71334.09,73083.5,48212536929.0,1710313200 181 | 179,73079.38,73750.07,68563.02,71396.59,59594605698.0,1710399600 182 | 180,71387.88,72357.13,65630.7,69403.77,78320453976.0,1710486000 183 | 181,69392.48,70046.27,64801.39,65315.12,46842198371.0,1710572400 184 | 182,65316.34,68845.72,64545.32,68390.63,44716864318.0,1710658800 185 | 183,68371.3,68897.13,66594.23,67548.59,49261579492.0,1710745200 186 | 184,67556.13,68106.93,61536.18,61912.77,74215844794.0,1710831600 187 | 185,61930.16,68115.26,60807.79,67913.67,66792634382.0,1710918000 188 | 186,67911.59,68199.99,64580.92,65491.39,44480350565.0,1711004400 189 | 187,65489.93,66623.75,62355.37,63778.76,41401116964.0,1711090800 190 | 188,63802.72,65976.4,63038.49,64062.2,24738964812.0,1711177200 191 | 189,64070.75,67622.76,63825.85,67234.17,27206630673.0,1711263600 192 | 190,67234.09,71162.59,66414.84,69958.81,42700139523.0,1711350000 193 | 191,69931.33,71535.74,69335.61,69987.84,36010437368.0,1711436400 194 | 192,69991.9,71727.69,68381.93,69455.34,40827113309.0,1711522800 195 | 193,69452.77,71546.02,68895.51,70744.95,34374900617.0,1711609200 196 | 194,70744.8,70913.09,69076.66,69892.83,25230851763.0,1711695600 197 | 195,69893.45,70355.49,69601.06,69645.3,17130241883.0,1711782000 198 | 196,69647.78,71377.78,69624.87,71333.65,20050941373.0,1711868400 199 | 197,71333.48,71342.09,68110.7,69702.15,34873527352.0,1711954800 200 | 198,69705.02,69708.38,64586.59,65446.97,50705240709.0,1712041200 201 | 199,65446.67,66914.32,64559.9,65980.81,34488018367.0,1712127600 202 | 200,65975.7,69291.26,65113.8,68508.84,34439527442.0,1712214000 203 | 201,68515.76,68725.76,66011.48,67837.64,33748230056.0,1712300400 204 | 202,67840.57,69629.6,67491.72,68896.11,19967785809.0,1712386800 205 | 203,68897.11,70284.43,68851.63,69362.55,21204930369.0,1712473200 206 | 204,69362.55,72715.36,69064.24,71631.36,37261432669.0,1712559600 207 | 205,71632.5,71742.51,68212.92,69139.02,36426900409.0,1712646000 208 | 206,69140.24,71093.43,67503.56,70587.88,38318601774.0,1712732400 209 | 207,70575.73,71256.23,69571.81,70060.61,30153382941.0,1712818800 210 | 208,70061.38,71222.74,65254.84,67195.87,44129299406.0,1712905200 211 | 209,67188.38,67931.43,60919.11,63821.47,52869738185.0,1712991600 212 | 210,63836.23,65824.43,62205.85,65738.73,49084320047.0,1713078000 213 | 211,65739.65,66878.65,62332.07,63426.21,43595917654.0,1713164400 214 | 212,63419.3,64355.67,61716.4,63811.86,42847528078.0,1713250800 215 | 213,63831.85,64486.36,59768.59,61276.69,41915247049.0,1713337200 216 | 214,61275.32,64125.69,60833.48,63512.75,36006307335.0,1713423600 217 | 215,63510.75,65481.6,59651.39,63843.57,49920425401.0,1713510000 218 | 216,63851.1,65442.46,63172.4,64994.44,23097485495.0,1713596400 219 | 217,64992.82,65723.24,64277.72,64926.64,20506644853.0,1713682800 220 | 218,64935.63,67233.96,64548.18,66837.68,28282686673.0,1713769200 221 | 219,66839.89,67199.24,65864.87,66407.27,24310975583.0,1713855600 222 | 220,66408.72,67075.37,63589.87,64276.9,30276655120.0,1713942000 223 | 221,64275.02,65275.21,62783.63,64481.71,32155786816.0,1714028400 224 | 222,64485.37,64789.66,63322.4,63755.32,24139372950.0,1714114800 225 | 223,63750.99,63898.36,62424.72,63419.14,19530783039.0,1714201200 226 | 224,63423.52,64321.48,62793.6,63113.23,17334827993.0,1714287600 227 | 225,63106.36,64174.88,61795.46,63841.12,26635912073.0,1714374000 228 | 226,63839.42,64703.33,59120.07,60636.86,37840840057.0,1714460400 229 | 227,60609.5,60780.5,56555.29,58254.01,48439780271.0,1714546800 230 | 228,58253.7,59602.3,56937.2,59123.43,32711813559.0,1714633200 231 | 229,59122.3,63320.5,58848.31,62889.84,33172023048.0,1714719600 232 | 230,62891.03,64494.96,62599.35,63891.47,20620477992.0,1714806000 233 | 231,63892.45,64610.89,62955.3,64031.13,18296164805.0,1714892400 234 | 232,64038.31,65494.9,62746.24,63161.95,28697928697.0,1714978800 235 | 233,63162.76,64390.46,62285.98,62334.82,25930730982.0,1715065200 236 | 234,62332.64,62986.09,60877.13,61187.94,26088172222.0,1715151600 237 | 235,61191.2,63404.91,60648.07,63049.96,25453338161.0,1715238000 238 | 236,63055.19,63446.74,60208.78,60792.78,27804954694.0,1715324400 239 | 237,60793.36,61451.15,60492.63,60793.71,13842272968.0,1715410800 240 | 238,60793.5,61818.16,60632.6,61448.39,13800459405.0,1715497200 241 | 239,61451.22,63422.66,60769.84,62901.45,27889181179.0,1715583600 242 | 240,62900.77,63092.13,61123.77,61552.79,28186271527.0,1715670000 243 | 241,61553.99,66454.45,61330.41,66267.49,39815167074.0,1715756400 244 | 242,66256.11,66712.43,64613.05,65231.58,31573077994.0,1715842800 245 | 243,65231.3,67459.46,65119.32,67051.88,28031279310.0,1715929200 246 | 244,67066.21,67387.33,66663.5,66940.8,16712277406.0,1716015600 247 | 245,66937.93,67694.3,65937.18,66278.37,19249094538.0,1716102000 248 | 246,66278.74,71483.56,66086.17,71448.2,43850655717.0,1716188400 249 | 247,71443.06,71946.46,69191.13,70136.53,46932005990.0,1716274800 250 | 248,70135.32,70623.7,68977.7,69122.34,32802561717.0,1716361200 251 | 249,69121.3,70041.27,66356.95,67929.56,41895680979.0,1716447600 252 | 250,67928.13,69220.3,66622.67,68526.1,29197308153.0,1716534000 253 | 251,68526.92,69579.32,68515.82,69265.95,15473071741.0,1716620400 254 | 252,69264.29,69506.23,68183.89,68518.09,15628433737.0,1716706800 255 | 253,68512.18,70597.88,68232.5,69394.55,25870990717.0,1716793200 256 | 254,69392.2,69514.64,67227.16,68296.22,32722265965.0,1716879600 257 | 255,68296.35,68852.46,67101.49,67578.09,26707072906.0,1716966000 258 | 256,67576.09,69500.54,67118.08,68364.99,29509712534.0,1717052400 259 | 257,68362.52,68999.56,66633.42,67491.41,27387283769.0,1717138800 260 | 258,67489.61,67839.77,67386.2,67706.94,11641495604.0,1717225200 261 | 259,67710.27,68409.16,67315.52,67751.6,17110588415.0,1717311600 262 | 260,67753.9,70230.82,67589.84,68804.78,32401285324.0,1717398000 263 | 261,68804.57,71047.41,68564.64,70567.77,33149696545.0,1717484400 264 | 262,70568.35,71735.41,70390.71,71082.82,32810771409.0,1717570800 265 | 263,71082.84,71625.73,70119.13,70757.16,25223152007.0,1717657200 266 | 264,70759.19,71907.85,68507.26,69342.59,36188381096.0,1717743600 267 | 265,69324.18,69533.32,69210.74,69305.77,14262185861.0,1717830000 268 | 266,69297.49,69817.52,69160.84,69647.99,13534028500.0,1717916400 269 | 267,69644.31,70146.07,69232.42,69512.28,20597699541.0,1718002800 270 | 268,69508.08,69549.41,66123.6,67332.03,37116136345.0,1718089200 271 | 269,67321.38,69977.89,66902.45,68241.19,34497940694.0,1718175600 272 | 270,68243.1,68365.78,66304.56,66756.4,28955204146.0,1718262000 273 | 271,66747.57,67294.65,65056.89,66011.09,27403884779.0,1718348400 274 | 272,66006.74,66402.19,65871.77,66191.0,14121265576.0,1718434800 275 | 273,66189.36,66894.84,66018.25,66639.05,13281140541.0,1718521200 276 | 274,66636.52,67188.32,65094.96,66490.3,30006354476.0,1718607600 277 | 275,66490.98,66556.7,64066.96,65140.75,39481285950.0,1718694000 278 | 276,65146.66,65695.35,64693.3,64960.3,21103423504.0,1718780400 279 | 277,64960.3,66438.96,64547.85,64828.66,25641109124.0,1718866800 280 | 278,64837.99,65007.55,63378.89,64096.2,26188171739.0,1718953200 281 | 279,64113.86,64475.47,63929.76,64252.58,9858198793.0,1719039600 282 | 280,64248.96,64491.7,63180.8,63180.8,11170471802.0,1719126000 283 | 281,63173.35,63292.53,58601.7,60277.41,43152133651.0,1719212400 284 | 282,60266.28,62258.26,60239.75,61804.64,29201215431.0,1719298800 285 | 283,61789.68,62434.14,60695.19,60811.28,22506003064.0,1719385200 286 | 284,60811.23,62293.86,60585.33,61604.8,21231745045.0,1719471600 287 | 285,61612.8,62126.1,59985.4,60320.14,24952866877.0,1719558000 288 | 286,60319.88,61097.62,60300.96,60887.38,12652903396.0,1719644400 289 | 287,60888.45,62892.83,60632.95,62678.29,17333226409.0,1719730800 290 | 288,62673.61,63777.23,62495.51,62851.98,25468379421.0,1719817200 291 | 289,62844.41,63203.36,61752.75,62029.02,20151616992.0,1719903600 292 | 290,62034.33,62187.7,59419.39,60173.92,29756701685.0,1719990000 293 | 291,60147.14,60399.68,56777.8,56977.7,41149609230.0,1720076400 294 | 292,57022.81,57497.15,53717.38,56662.38,55417544033.0,1720162800 295 | 293,56659.07,58472.55,56038.96,58303.54,20610320577.0,1720249200 296 | 294,58239.43,58371.12,55793.32,55849.11,20553359505.0,1720335600 297 | 295,55849.57,58131.34,54321.02,56705.1,39766159899.0,1720422000 298 | 296,56704.6,58239.2,56316.88,58009.23,27849512607.0,1720508400 299 | 297,58033.88,59359.43,57178.41,57742.5,26175260526.0,1720594800 300 | 298,57729.89,59299.43,57120.38,57344.91,28707803842.0,1720681200 301 | 299,57341.2,58532.55,56590.18,57899.46,25604805221.0,1720767600 302 | 300,57908.74,59787.08,57796.44,59231.95,17080061806.0,1720854000 303 | 301,59225.25,61329.53,59225.25,60787.79,22223416061.0,1720940400 304 | 302,60815.46,64870.15,60704.93,64870.15,38094526099.0,1721026800 305 | 303,64784.42,65354.34,62487.97,65097.15,41617346768.0,1721113200 306 | 304,65091.83,66066.73,63896.09,64118.79,32525071311.0,1721199600 307 | 305,64104.74,65104.66,63246.16,63974.07,27239305337.0,1721286000 308 | 306,63972.32,67442.64,63329.34,66710.16,37003855410.0,1721372400 309 | 307,66709.92,67610.73,66299.62,67163.65,19029581250.0,1721458800 310 | 308,67164.91,68372.91,65842.3,68154.52,26652190004.0,1721545200 311 | 309,68152.98,68480.06,66611.3,67585.25,42649109453.0,1721631600 312 | 310,67584.8,67779.02,65484.46,65927.67,35605668666.0,1721718000 313 | 311,65927.86,67113.98,65147.0,65372.13,27470942309.0,1721804400 314 | 312,65375.88,66112.42,63473.47,65777.23,38315761670.0,1721890800 315 | 313,65771.81,68207.6,65743.77,67912.06,30488630457.0,1721977200 316 | 314,67911.81,69398.51,66705.22,67813.34,34691905492.0,1722063600 317 | 315,67808.66,68301.85,67085.83,68255.87,18043166945.0,1722150000 318 | 316,68259.05,69987.54,66532.59,66819.91,40780682628.0,1722236400 319 | 317,66819.05,66987.67,65323.19,66201.02,31380492109.0,1722322800 320 | 318,66201.27,66810.21,64532.05,64619.25,31292785994.0,1722409200 321 | 319,64625.84,65593.24,62248.94,65357.5,40975554494.0,1722495600 322 | 320,65353.5,65523.22,61184.89,61415.07,43060875727.0,1722582000 323 | 321,61414.81,62148.37,59836.53,60680.09,31753030589.0,1722668400 324 | 322,60676.09,61062.99,57210.8,58116.98,31758917219.0,1722754800 325 | 323,58110.3,58268.83,49121.24,53991.46,108991085584.0,1722841200 326 | 324,53991.35,57059.92,53973.27,56034.32,49300484106.0,1722927600 327 | 325,56040.63,57726.88,54620.51,55027.46,41637562185.0,1723014000 328 | 326,55030.03,62673.77,54766.73,61710.14,45298472567.0,1723100400 329 | 327,61728.21,61751.86,59587.86,60880.11,33425553115.0,1723186800 330 | 328,60881.23,61464.51,60287.57,60945.81,15745822278.0,1723273200 331 | 329,60944.89,61778.66,58348.82,58719.48,22759754812.0,1723359600 332 | 330,58719.39,60680.33,57688.9,59354.52,37078637820.0,1723446000 333 | 331,59356.21,61572.4,58506.25,60609.57,30327698167.0,1723532400 334 | 332,60611.05,61687.76,58472.88,58737.27,29961696180.0,1723618800 335 | 333,58733.26,59838.65,56161.59,57560.1,35682112440.0,1723705200 336 | 334,57560.27,59847.36,57110.02,58894.11,29350938673.0,1723791600 337 | 335,58893.53,59694.67,58814.83,59478.97,13589684021.0,1723878000 338 | 336,59468.13,60262.72,58445.4,58483.96,17740625837.0,1723964400 339 | 337,58480.71,59612.66,57864.71,59493.45,25911207712.0,1724050800 340 | 338,59493.45,61396.33,58610.88,59012.79,31613400008.0,1724137200 341 | 339,59014.99,61834.35,58823.45,61175.19,32731154072.0,1724223600 342 | 340,61168.32,61408.11,59815.25,60381.91,27625734377.0,1724310000 343 | 341,60380.95,64947.06,60372.05,64094.36,42530509233.0,1724396400 344 | 342,64103.87,64513.79,63619.92,64178.99,21430585163.0,1724482800 345 | 343,64176.37,64996.42,63833.52,64333.54,18827683555.0,1724569200 346 | 344,64342.23,64489.71,62849.56,62880.66,27682040631.0,1724655600 347 | 345,62879.71,63210.8,58116.75,59504.13,39103882198.0,1724742000 348 | 346,59507.93,60236.45,57890.68,59027.63,40289564698.0,1724828400 349 | 347,59027.47,61184.08,58786.23,59388.18,32224990582.0,1724914800 350 | 348,59388.6,59896.89,57768.53,59119.48,32292756405.0,1725001200 351 | 349,59117.48,59432.59,58768.79,58969.9,12403470760.0,1725087600 352 | 350,58969.8,59062.07,57217.82,57325.49,24592449997.0,1725174000 353 | 351,57326.97,59403.07,57136.03,59112.48,27036454524.0,1725260400 354 | 352,59106.19,59815.06,57425.17,57431.02,26666961053.0,1725346800 355 | 353,57430.35,58511.57,55673.16,57971.54,35627680312.0,1725433200 356 | 354,57971.7,58300.58,55712.45,56160.49,31030280656.0,1725519600 357 | 355,56160.19,56976.11,52598.7,53948.75,49361693566.0,1725606000 358 | 356,53949.09,54838.14,53740.07,54139.69,19061486526.0,1725692400 359 | 357,54147.93,55300.86,53653.76,54841.57,18268287531.0,1725778800 360 | 358,54851.89,58041.13,54598.43,57019.54,34618096173.0,1725865200 361 | 359,57020.1,58029.98,56419.41,57648.71,28857630507.0,1725951600 362 | 360,57650.29,57991.32,55567.34,57343.17,37049062672.0,1726038000 363 | 361,57343.17,58534.36,57330.1,58127.01,33835707949.0,1726124400 364 | 362,58130.32,60648.02,57650.11,60571.3,32490528356.0,1726210800 365 | 363,60569.12,60656.72,59517.88,60005.12,16428405496.0,1726297200 366 | 364,60000.73,60381.92,58696.31,59182.84,18120960867.0,1726383600 367 | -------------------------------------------------------------------------------- /data/2018-09-17_2024-09-16_86400/val.csv: -------------------------------------------------------------------------------- 1 | ,Open,High,Low,Close,Volume,Timestamp 2 | 0,19777.03,20162.53,19777.03,20127.58,24957448100.0,1663398000 3 | 1,20127.23,20127.23,19387.49,19419.51,31254779144.0,1663484400 4 | 2,19418.57,19639.48,18390.32,19544.13,40177002624.0,1663570800 5 | 3,19545.59,19602.46,18813.46,18890.79,36791346508.0,1663657200 6 | 4,18891.28,19674.63,18290.31,18547.4,46363793975.0,1663743600 7 | 5,18534.65,19456.91,18415.59,19413.55,41135767926.0,1663830000 8 | 6,19412.4,19464.67,18617.55,19297.64,38896078052.0,1663916400 9 | 7,19296.99,19310.2,18861.97,18937.01,26149643168.0,1664002800 10 | 8,18936.31,19134.73,18696.47,18802.1,23359966112.0,1664089200 11 | 9,18803.9,19274.87,18721.29,19222.67,44148798321.0,1664175600 12 | 10,19221.84,20338.46,18915.67,19110.55,58571439619.0,1664262000 13 | 11,19104.62,19688.34,18553.3,19426.72,53071298734.0,1664348400 14 | 12,19427.78,19589.27,18924.35,19573.05,41037843771.0,1664434800 15 | 13,19573.43,20109.85,19265.66,19431.79,43975248085.0,1664521200 16 | 14,19431.11,19471.15,19231.08,19312.1,18719537670.0,1664607600 17 | 15,19311.85,19370.31,18970.62,19044.11,20765955327.0,1664694000 18 | 16,19044.07,19653.54,19025.23,19623.58,30484729489.0,1664780400 19 | 17,19623.58,20380.34,19523.84,20336.84,35887278685.0,1664866800 20 | 18,20335.9,20343.75,19801.8,20160.72,33223790572.0,1664953200 21 | 19,20161.04,20408.39,19900.09,19955.44,34711412966.0,1665039600 22 | 20,19957.56,20041.09,19395.79,19546.85,29227315390.0,1665126000 23 | 21,19546.33,19601.7,19299.41,19416.57,16437423167.0,1665212400 24 | 22,19417.48,19542.54,19349.26,19446.43,16837262532.0,1665298800 25 | 23,19446.42,19515.47,19102.98,19141.48,27425022774.0,1665385200 26 | 24,19139.0,19241.96,18925.6,19051.42,28711532910.0,1665471600 27 | 25,19052.65,19203.2,19029.76,19157.45,24950173846.0,1665558000 28 | 26,19156.97,19453.33,18319.82,19382.9,44219840004.0,1665644400 29 | 27,19382.53,19889.15,19115.41,19185.66,38452356727.0,1665730800 30 | 28,19185.44,19212.54,19019.25,19067.63,16192235532.0,1665817200 31 | 29,19068.91,19389.6,19068.91,19268.09,17988916650.0,1665903600 32 | 30,19268.56,19635.8,19173.33,19550.76,27472552998.0,1665990000 33 | 31,19550.47,19666.99,19144.77,19334.42,30580012344.0,1666076400 34 | 32,19335.03,19348.42,19127.69,19139.54,22425387184.0,1666162800 35 | 33,19138.09,19315.2,18971.46,19053.74,24493974420.0,1666249200 36 | 34,19053.2,19237.38,18770.97,19172.47,32459287866.0,1666335600 37 | 35,19172.38,19248.07,19132.24,19208.19,16104440957.0,1666422000 38 | 36,19207.73,19646.65,19124.2,19567.01,22128794335.0,1666508400 39 | 37,19567.77,19589.13,19206.32,19345.57,30202235805.0,1666594800 40 | 38,19344.96,20348.41,19261.45,20095.86,47761524910.0,1666681200 41 | 39,20092.24,20938.13,20076.12,20770.44,58895950537.0,1666767600 42 | 40,20772.8,20854.04,20255.37,20285.84,49625110402.0,1666854000 43 | 41,20287.96,20724.98,20086.07,20595.35,43994715910.0,1666940400 44 | 42,20595.1,20988.39,20566.48,20818.48,40369840645.0,1667026800 45 | 43,20817.98,20917.01,20547.46,20635.6,31486345556.0,1667113200 46 | 44,20633.7,20795.32,20287.46,20495.77,45668466815.0,1667199600 47 | 45,20494.9,20647.29,20359.85,20485.27,39819303159.0,1667286000 48 | 46,20482.96,20742.81,20087.13,20159.5,55552169483.0,1667372400 49 | 47,20162.69,20382.1,20086.24,20209.99,43228750179.0,1667458800 50 | 48,20208.77,21209.56,20188.02,21147.23,64072727950.0,1667545200 51 | 49,21144.83,21446.89,21097.63,21282.69,37846047609.0,1667631600 52 | 50,21285.06,21345.38,20920.19,20926.49,35082693210.0,1667718000 53 | 51,20924.62,21053.25,20489.97,20602.82,53510852236.0,1667804400 54 | 52,20600.67,20664.61,17603.54,18541.27,118992465607.0,1667890800 55 | 53,18543.76,18590.46,15682.69,15880.78,102905151606.0,1667977200 56 | 54,15883.16,18054.31,15834.02,17586.77,83202283721.0,1668063600 57 | 55,17583.25,17650.94,16543.48,17034.29,55871616488.0,1668150000 58 | 56,17036.88,17066.68,16651.78,16799.19,29717699419.0,1668236400 59 | 57,16799.72,16920.77,16320.63,16353.37,27209183682.0,1668322800 60 | 58,16352.03,17109.32,15872.94,16618.2,49630243054.0,1668409200 61 | 59,16617.48,17051.96,16542.55,16884.61,36599436183.0,1668495600 62 | 60,16884.34,16960.29,16430.11,16669.44,33925512989.0,1668582000 63 | 61,16670.43,16726.44,16460.68,16687.52,27868914022.0,1668668400 64 | 62,16687.91,16947.06,16564.61,16697.78,26862218609.0,1668754800 65 | 63,16696.22,16797.88,16570.41,16711.55,16106223492.0,1668841200 66 | 64,16712.92,16746.78,16248.69,16291.83,21313378652.0,1668927600 67 | 65,16291.22,16291.22,15599.05,15787.28,37429485518.0,1669014000 68 | 66,15782.3,16253.05,15656.61,16189.77,30726828760.0,1669100400 69 | 67,16195.59,16638.19,16170.5,16610.71,32958875628.0,1669186800 70 | 68,16611.64,16771.47,16501.77,16604.46,26129037414.0,1669273200 71 | 69,16602.27,16603.32,16388.4,16521.84,18678255976.0,1669359600 72 | 70,16521.58,16666.86,16416.23,16464.28,18000008764.0,1669446000 73 | 71,16463.88,16594.41,16437.03,16444.63,20443898509.0,1669532400 74 | 72,16440.22,16482.93,16054.53,16217.32,27743025156.0,1669618800 75 | 73,16217.64,16522.26,16139.4,16444.98,23581685468.0,1669705200 76 | 74,16445.48,17190.94,16445.48,17168.57,29523576583.0,1669791600 77 | 75,17168.0,17197.5,16888.39,16967.13,22895392882.0,1669878000 78 | 76,16968.68,17088.66,16877.88,17088.66,19539705127.0,1669964400 79 | 77,17090.1,17116.04,16888.14,16908.24,16217776704.0,1670050800 80 | 78,16908.17,17157.77,16903.44,17130.49,16824520830.0,1670137200 81 | 79,17128.89,17378.15,16922.43,16974.83,22209086834.0,1670223600 82 | 80,16975.24,17091.86,16939.92,17089.5,19889922369.0,1670310000 83 | 81,17089.51,17109.38,16750.56,16848.13,19675404389.0,1670396400 84 | 82,16847.35,17267.92,16788.78,17233.47,20496603770.0,1670482800 85 | 83,17232.15,17280.55,17100.84,17133.15,20328426366.0,1670569200 86 | 84,17134.22,17216.83,17120.68,17128.72,12706781969.0,1670655600 87 | 85,17129.71,17245.63,17091.82,17104.19,14122486832.0,1670742000 88 | 86,17102.5,17212.56,16899.39,17206.44,19617581341.0,1670828400 89 | 87,17206.44,17930.09,17111.76,17781.32,26634741631.0,1670914800 90 | 88,17782.07,18318.53,17739.51,17815.65,25534481470.0,1671001200 91 | 89,17813.64,17846.74,17322.59,17364.87,20964448341.0,1671087600 92 | 90,17364.55,17505.53,16584.7,16647.48,24031608960.0,1671174000 93 | 91,16646.98,16800.59,16614.03,16795.09,14463581825.0,1671260400 94 | 92,16795.61,16815.39,16697.82,16757.98,10924354698.0,1671346800 95 | 93,16759.04,16807.53,16398.14,16439.68,17221074814.0,1671433200 96 | 94,16441.79,17012.98,16427.87,16906.3,22722096615.0,1671519600 97 | 95,16904.53,16916.8,16755.91,16817.54,14882945045.0,1671606000 98 | 96,16818.38,16866.67,16592.41,16830.34,16441573050.0,1671692400 99 | 97,16829.64,16905.22,16794.46,16796.95,15329265213.0,1671778800 100 | 98,16796.98,16864.7,16793.53,16847.76,9744636213.0,1671865200 101 | 99,16847.51,16860.55,16755.25,16841.99,11656379938.0,1671951600 102 | 100,16842.25,16920.12,16812.37,16919.8,11886957804.0,1672038000 103 | 101,16919.29,16959.85,16642.07,16717.17,15748580239.0,1672124400 104 | 102,16716.4,16768.17,16497.56,16552.57,17005713920.0,1672210800 105 | 103,16552.32,16651.76,16508.68,16642.34,14472237479.0,1672297200 106 | 104,16641.33,16643.43,16408.47,16602.59,15929162910.0,1672383600 107 | 105,16603.67,16628.99,16517.52,16547.5,11239186456.0,1672470000 108 | 106,16547.91,16630.44,16521.23,16625.08,9244361700.0,1672556400 109 | 107,16625.51,16759.34,16572.23,16688.47,12097775227.0,1672642800 110 | 108,16688.85,16760.45,16622.37,16679.86,13903079207.0,1672729200 111 | 109,16680.21,16964.59,16667.76,16863.24,18421743322.0,1672815600 112 | 110,16863.47,16884.02,16790.28,16836.74,13692758566.0,1672902000 113 | 111,16836.47,16991.99,16716.42,16951.97,14413662913.0,1672988400 114 | 112,16952.12,16975.02,16914.19,16955.08,7714767174.0,1673074800 115 | 113,16954.15,17091.14,16924.05,17091.14,9768827914.0,1673161200 116 | 114,17093.99,17389.96,17093.99,17196.55,18624736866.0,1673247600 117 | 115,17192.95,17484.72,17162.99,17446.29,15808338949.0,1673334000 118 | 116,17446.36,17934.9,17337.99,17934.9,18372283782.0,1673420400 119 | 117,18117.59,19030.09,17995.2,18869.59,34971338710.0,1673506800 120 | 118,18868.91,19964.32,18753.16,19909.57,29225029694.0,1673593200 121 | 119,19910.54,21075.14,19907.83,20976.3,38967784639.0,1673679600 122 | 120,20977.48,20993.75,20606.99,20880.8,19298407543.0,1673766000 123 | 121,20882.22,21360.88,20715.75,21169.63,26792494050.0,1673852400 124 | 122,21175.83,21438.66,20978.53,21161.52,24999983362.0,1673938800 125 | 123,21161.05,21564.5,20541.54,20688.78,30005625418.0,1674025200 126 | 124,20686.75,21163.01,20685.38,21086.79,21152848261.0,1674111600 127 | 125,21085.37,22692.36,20919.13,22676.55,28799154319.0,1674198000 128 | 126,22677.43,23282.35,22511.83,22777.63,32442278429.0,1674284400 129 | 127,22777.99,23056.73,22387.9,22720.42,24746386230.0,1674370800 130 | 128,22721.09,23126.49,22654.3,22934.43,26518700512.0,1674457200 131 | 129,22929.63,23134.01,22549.74,22636.47,26405069715.0,1674543600 132 | 130,22639.27,23722.1,22406.08,23117.86,30685366709.0,1674630000 133 | 131,23108.96,23237.08,22911.37,23032.78,26357839322.0,1674716400 134 | 132,23030.72,23417.72,22654.59,23078.73,25383335641.0,1674802800 135 | 133,23079.96,23165.9,22908.85,23031.09,14712928379.0,1674889200 136 | 134,23031.45,23919.89,22985.07,23774.57,27423687259.0,1674975600 137 | 135,23774.65,23789.35,22657.58,22840.14,27205595568.0,1675062000 138 | 136,22840.8,23225.02,22765.57,23139.28,22837828665.0,1675148400 139 | 137,23137.84,23764.54,22877.75,23723.77,26683255504.0,1675234800 140 | 138,23720.82,24167.21,23468.6,23471.87,32066936882.0,1675321200 141 | 139,23469.41,23678.1,23279.96,23449.32,27083066007.0,1675407600 142 | 140,23446.32,23556.95,23291.79,23331.85,15639298538.0,1675494000 143 | 141,23332.25,23423.44,22841.76,22955.67,19564262605.0,1675580400 144 | 142,22954.02,23119.28,22692.03,22760.11,23825006542.0,1675666800 145 | 143,22757.27,23310.97,22756.26,23264.29,27187964471.0,1675753200 146 | 144,23263.42,23367.96,22731.1,22939.4,25371367758.0,1675839600 147 | 145,22946.57,22996.44,21773.97,21819.04,32572572185.0,1675926000 148 | 146,21819.01,21941.19,21539.39,21651.18,27078406594.0,1676012400 149 | 147,21651.84,21891.41,21618.45,21870.88,16356226232.0,1676098800 150 | 148,21870.9,22060.99,21682.83,21788.2,17821046406.0,1676185200 151 | 149,21787.0,21898.41,21460.09,21808.1,23918742607.0,1676271600 152 | 150,21801.82,22293.14,21632.39,22220.8,26792596581.0,1676358000 153 | 151,22220.59,24307.84,22082.77,24307.84,32483312909.0,1676444400 154 | 152,24307.35,25134.12,23602.52,23623.47,39316664596.0,1676530800 155 | 153,23621.28,24924.04,23460.76,24565.6,41358451255.0,1676617200 156 | 154,24565.3,24798.84,24468.37,24641.28,19625427158.0,1676703600 157 | 155,24640.03,25093.05,24327.64,24327.64,25555105670.0,1676790000 158 | 156,24336.62,25020.46,23927.91,24829.15,28987376573.0,1676876400 159 | 157,24833.05,25126.85,24200.36,24436.35,31252098714.0,1676962800 160 | 158,24437.42,24472.34,23644.32,24188.84,30199996781.0,1677049200 161 | 159,24190.72,24572.09,23693.92,23947.49,30476264066.0,1677135600 162 | 160,23946.01,24103.71,23007.07,23198.13,26811744928.0,1677222000 163 | 161,23200.13,23210.21,22861.56,23175.38,16100721565.0,1677308400 164 | 162,23174.15,23654.37,23084.22,23561.21,16644534842.0,1677394800 165 | 163,23561.45,23857.89,23205.88,23522.87,22660763494.0,1677481200 166 | 164,23521.84,23585.38,23077.65,23147.35,20535363434.0,1677567600 167 | 165,23150.93,23880.63,23088.63,23646.55,24662841200.0,1677654000 168 | 166,23647.02,23739.14,23245.02,23475.47,20386398516.0,1677740400 169 | 167,23476.63,23479.35,22213.24,22362.68,26062404610.0,1677826800 170 | 168,22362.92,22405.18,22198.98,22353.35,11166012913.0,1677913200 171 | 169,22354.14,22613.69,22307.14,22435.51,13317001733.0,1677999600 172 | 170,22436.82,22584.29,22331.31,22429.76,17353192895.0,1678086000 173 | 171,22428.32,22527.42,22011.26,22219.77,22765452204.0,1678172400 174 | 172,22216.44,22268.9,21708.05,21718.08,22536575684.0,1678258800 175 | 173,21720.08,21802.72,20210.31,20363.02,30364664171.0,1678345200 176 | 174,20367.0,20370.6,19628.25,20187.24,39578257695.0,1678431600 177 | 175,20187.88,20792.53,20068.66,20632.41,30180288176.0,1678518000 178 | 176,20628.03,22185.03,20448.81,22163.95,29279035521.0,1678604400 179 | 177,22156.41,24550.84,21918.2,24197.53,49466362688.0,1678690800 180 | 178,24201.77,26514.72,24081.18,24746.07,54622230164.0,1678777200 181 | 179,24770.93,25240.62,23964.91,24375.96,43655701450.0,1678863600 182 | 180,24373.46,25190.33,24225.11,25052.79,33866061747.0,1678950000 183 | 181,25055.12,27787.81,24955.17,27423.93,50730261335.0,1679036400 184 | 182,27448.12,27725.95,26636.26,26965.88,35723036817.0,1679122800 185 | 183,26969.5,28440.56,26907.72,28038.68,37769448859.0,1679209200 186 | 184,28041.6,28527.72,27242.88,27767.24,44774027664.0,1679295600 187 | 185,27768.39,28439.56,27439.65,28175.82,36102192830.0,1679382000 188 | 186,28158.72,28803.34,26760.0,27307.44,33382021890.0,1679468400 189 | 187,27301.96,28729.84,27183.36,28333.97,24220433689.0,1679554800 190 | 188,28324.11,28388.44,27039.27,27493.29,25980310960.0,1679641200 191 | 189,27487.34,27791.83,27196.23,27494.71,13383005987.0,1679727600 192 | 190,27495.52,28178.14,27445.05,27994.33,13878363192.0,1679814000 193 | 191,27994.07,28037.93,26606.69,27139.89,18188895178.0,1679900400 194 | 192,27132.89,27460.72,26677.82,27268.13,17783600385.0,1679986800 195 | 193,27267.03,28619.54,27259.66,28348.44,20684945906.0,1680073200 196 | 194,28350.14,29159.9,27720.16,28033.56,22435349951.0,1680159600 197 | 195,28032.26,28639.9,27583.71,28478.48,19001327598.0,1680246000 198 | 196,28473.33,28802.46,28297.17,28411.04,10876469901.0,1680332400 199 | 197,28462.85,28518.96,27884.09,28199.31,12284641999.0,1680418800 200 | 198,28183.08,28475.62,27276.72,27790.22,19556501327.0,1680505200 201 | 199,27795.27,28433.74,27681.3,28168.09,15284538859.0,1680591600 202 | 200,28169.73,28739.24,27843.76,28177.98,17052315986.0,1680678000 203 | 201,28175.23,28178.38,27738.76,28044.14,13837809380.0,1680764400 204 | 202,28038.97,28111.59,27794.03,27925.86,10861680497.0,1680850800 205 | 203,27920.51,28159.86,27883.39,27947.79,9373255556.0,1680937200 206 | 204,27952.37,28532.83,27828.48,28333.05,12175322951.0,1681023600 207 | 205,28336.03,29771.46,28189.27,29652.98,19282400094.0,1681110000 208 | 206,29653.68,30509.08,29609.3,30235.06,20121259843.0,1681196400 209 | 207,30231.58,30462.48,29725.57,30139.05,18651929926.0,1681282800 210 | 208,29892.74,30539.85,29878.62,30399.07,17487721001.0,1681369200 211 | 209,30409.56,31005.61,30044.5,30485.7,22659995079.0,1681455600 212 | 210,30490.75,30601.74,30245.88,30318.5,11940685378.0,1681542000 213 | 211,30315.98,30555.54,30157.83,30315.36,12854816417.0,1681628400 214 | 212,30317.15,30319.2,29275.37,29445.04,17872186762.0,1681714800 215 | 213,29449.09,30470.3,29154.85,30397.55,19480529496.0,1681801200 216 | 214,30394.19,30411.05,28669.9,28822.68,24571565421.0,1681887600 217 | 215,28823.68,29076.4,28037.26,28245.99,21340360360.0,1681974000 218 | 216,28249.23,28349.97,27177.37,27276.91,20759504330.0,1682060400 219 | 217,27265.89,27872.14,27169.57,27817.5,13125734602.0,1682146800 220 | 218,27816.14,27820.24,27400.31,27591.38,12785446832.0,1682233200 221 | 219,27591.73,27979.98,27070.85,27525.34,17703288330.0,1682319600 222 | 220,27514.87,28371.08,27207.93,28307.6,17733373139.0,1682406000 223 | 221,28300.06,29995.84,27324.55,28422.7,31854242019.0,1682492400 224 | 222,28428.46,29871.55,28402.89,29473.79,27153445027.0,1682578800 225 | 223,29481.01,29572.79,28929.61,29340.26,17544464887.0,1682665200 226 | 224,29336.57,29452.46,29088.04,29248.49,10662634333.0,1682751600 227 | 225,29245.52,29952.03,29114.02,29268.81,14652199272.0,1682838000 228 | 226,29227.1,29329.94,27680.79,28091.57,18655599976.0,1682924400 229 | 227,28087.18,28881.3,27924.12,28680.54,16432924527.0,1683010800 230 | 228,28680.49,29259.53,28178.39,29006.31,19122972518.0,1683097200 231 | 229,29031.3,29353.19,28694.04,28847.71,15548678514.0,1683183600 232 | 230,28851.48,29668.91,28845.51,29534.38,17936566518.0,1683270000 233 | 231,29538.86,29820.13,28468.97,28904.62,15913866714.0,1683356400 234 | 232,28901.62,29157.52,28441.37,28454.98,11301355486.0,1683442800 235 | 233,28450.46,28663.27,27310.13,27694.27,19122903752.0,1683529200 236 | 234,27695.07,27821.4,27375.6,27658.78,14128593256.0,1683615600 237 | 235,27654.64,28322.69,26883.67,27621.76,20656025026.0,1683702000 238 | 236,27621.09,27621.94,26781.83,27000.79,16724343943.0,1683788400 239 | 237,26987.66,27055.65,25878.43,26804.99,19313599897.0,1683874800 240 | 238,26807.77,27030.48,26710.87,26784.08,9999171605.0,1683961200 241 | 239,26788.97,27150.98,26661.36,26930.64,10014858959.0,1684047600 242 | 240,26931.38,27646.35,26766.1,27192.69,14413231792.0,1684134000 243 | 241,27171.51,27299.3,26878.95,27036.65,12732238816.0,1684220400 244 | 242,27035.47,27465.93,26600.14,27398.8,15140006925.0,1684306800 245 | 243,27401.65,27466.53,26415.1,26832.21,15222938600.0,1684393200 246 | 244,26826.75,27128.62,26700.21,26890.13,11258983301.0,1684479600 247 | 245,26888.84,27155.16,26843.28,27129.59,7044911360.0,1684566000 248 | 246,27118.42,27265.92,26706.92,26753.83,8647416921.0,1684652400 249 | 247,26749.89,27045.73,26549.73,26851.28,11056770492.0,1684738800 250 | 248,26855.96,27434.68,26816.18,27225.73,13697203143.0,1684825200 251 | 249,27224.6,27224.6,26106.58,26334.82,16299104428.0,1684911600 252 | 250,26329.46,26591.52,25890.59,26476.21,13851122697.0,1684998000 253 | 251,26474.18,26916.67,26343.95,26719.29,12711619225.0,1685084400 254 | 252,26720.18,26888.88,26621.14,26868.35,7892015141.0,1685170800 255 | 253,26871.16,28193.45,26802.75,28085.65,14545229578.0,1685257200 256 | 254,28075.59,28432.04,27563.88,27745.88,15181308984.0,1685343600 257 | 255,27745.12,28044.76,27588.5,27702.35,13251081851.0,1685430000 258 | 256,27700.53,27831.68,26866.45,27219.66,15656371534.0,1685516400 259 | 257,27218.41,27346.11,26671.72,26819.97,14678970415.0,1685602800 260 | 258,26824.56,27303.86,26574.64,27249.59,14837415000.0,1685689200 261 | 259,27252.32,27317.05,26958.0,27075.13,8385597470.0,1685775600 262 | 260,27075.12,27407.02,26968.22,27119.07,9360912318.0,1685862000 263 | 261,27123.11,27129.98,25445.17,25760.1,21513292646.0,1685948400 264 | 262,25732.11,27313.82,25434.87,27238.78,21929670693.0,1686034800 265 | 263,27235.65,27332.18,26146.99,26346.0,19530045082.0,1686121200 266 | 264,26347.65,26797.51,26246.66,26508.22,11904824295.0,1686207600 267 | 265,26505.92,26770.29,26339.31,26480.38,11015551640.0,1686294000 268 | 266,26481.76,26531.04,25501.84,25851.24,19872933189.0,1686380400 269 | 267,25854.03,26203.44,25668.99,25940.17,10732609603.0,1686466800 270 | 268,25934.29,26087.92,25675.2,25902.5,11677889997.0,1686553200 271 | 269,25902.94,26376.35,25728.37,25918.73,14143474486.0,1686639600 272 | 270,25920.26,26041.8,24902.15,25124.68,14265717766.0,1686726000 273 | 271,25121.67,25735.31,24797.17,25576.39,15837384409.0,1686812400 274 | 272,25575.28,26463.17,25245.36,26327.46,16324646965.0,1686898800 275 | 273,26328.68,26769.39,26174.49,26510.68,11090276850.0,1686985200 276 | 274,26510.46,26675.93,26325.89,26336.21,9565695129.0,1687071600 277 | 275,26335.44,26984.61,26312.83,26851.03,12826986222.0,1687158000 278 | 276,26841.66,28388.97,26668.79,28327.49,22211859147.0,1687244400 279 | 277,28311.31,30737.33,28283.41,30027.3,33346760979.0,1687330800 280 | 278,29995.94,30496.0,29679.16,29912.28,20653160491.0,1687417200 281 | 279,29896.38,31389.54,29845.21,30695.47,24115570085.0,1687503600 282 | 280,30708.74,30804.15,30290.15,30548.7,12147822496.0,1687590000 283 | 281,30545.15,31041.27,30327.94,30480.26,12703464114.0,1687676400 284 | 282,30480.52,30636.03,29955.74,30271.13,16493186997.0,1687762800 285 | 283,30274.32,31006.79,30236.65,30688.16,16428827944.0,1687849200 286 | 284,30696.56,30703.28,29921.82,30086.25,14571500779.0,1687935600 287 | 285,30086.19,30796.25,30057.2,30445.35,13180860821.0,1688022000 288 | 286,30441.35,31256.86,29600.28,30477.25,26387306197.0,1688108400 289 | 287,30471.85,30641.29,30328.87,30590.08,9086606733.0,1688194800 290 | 288,30587.27,30766.14,30264.02,30620.77,10533418042.0,1688281200 291 | 289,30624.52,31375.61,30586.51,31156.44,15271884873.0,1688367600 292 | 290,31156.87,31325.2,30659.36,30777.58,12810828427.0,1688454000 293 | 291,30778.72,30877.33,30225.61,30514.17,12481622280.0,1688540400 294 | 292,30507.15,31460.05,29892.23,29909.34,21129219509.0,1688626800 295 | 293,29908.0,30434.64,29777.29,30342.27,13384770155.0,1688713200 296 | 294,30346.92,30374.44,30080.16,30292.54,7509378699.0,1688799600 297 | 295,30291.61,30427.59,30085.59,30171.23,7903327692.0,1688886000 298 | 296,30172.42,31026.08,29985.39,30414.47,14828209155.0,1688972400 299 | 297,30417.63,30788.31,30358.1,30620.95,12151839152.0,1689058800 300 | 298,30622.25,30959.96,30228.84,30391.65,14805659717.0,1689145200 301 | 299,30387.49,31814.52,30268.35,31476.05,23686079548.0,1689231600 302 | 300,31474.72,31582.25,29966.39,30334.07,20917902660.0,1689318000 303 | 301,30331.78,30407.78,30263.46,30295.81,8011667756.0,1689404400 304 | 302,30297.47,30437.56,30089.67,30249.13,8516564470.0,1689490800 305 | 303,30249.63,30336.4,29685.78,30145.89,13240156074.0,1689577200 306 | 304,30147.07,30233.66,29556.43,29856.56,13138897269.0,1689663600 307 | 305,29862.05,30184.18,29794.27,29913.92,12128602812.0,1689750000 308 | 306,29915.25,30195.53,29638.1,29792.02,14655207121.0,1689836400 309 | 307,29805.11,30046.0,29733.85,29908.74,10972789818.0,1689922800 310 | 308,29908.7,29991.62,29664.12,29771.8,7873300598.0,1690009200 311 | 309,29790.11,30330.64,29741.53,30084.54,9220145050.0,1690095600 312 | 310,30081.66,30093.39,28934.29,29176.92,15395817395.0,1690182000 313 | 311,29178.97,29353.16,29062.43,29227.39,10266772793.0,1690268400 314 | 312,29225.76,29675.55,29113.91,29354.97,13497554655.0,1690354800 315 | 313,29353.8,29560.97,29099.35,29210.69,10770779217.0,1690441200 316 | 314,29212.16,29521.51,29125.85,29319.25,11218474952.0,1690527600 317 | 315,29319.45,29396.84,29264.17,29356.92,6481775959.0,1690614000 318 | 316,29357.09,29443.17,29059.5,29275.31,8678454527.0,1690700400 319 | 317,29278.31,29489.87,29131.58,29230.11,11656781982.0,1690786800 320 | 318,29230.87,29675.73,28657.02,29675.73,18272392391.0,1690873200 321 | 319,29704.15,29988.0,28946.51,29151.96,19212655598.0,1690959600 322 | 320,29161.81,29375.71,28959.49,29178.68,12780357746.0,1691046000 323 | 321,29174.38,29302.08,28885.34,29074.09,12036639988.0,1691132400 324 | 322,29075.39,29102.46,28957.8,29042.13,6598366353.0,1691218800 325 | 323,29043.7,29160.82,28963.83,29041.86,7269806994.0,1691305200 326 | 324,29038.51,29244.28,28724.14,29180.58,13618163710.0,1691391600 327 | 325,29180.02,30176.8,29113.81,29765.49,17570561357.0,1691478000 328 | 326,29766.7,30093.44,29376.8,29561.49,18379521213.0,1691564400 329 | 327,29563.97,29688.56,29354.45,29429.59,11865344789.0,1691650800 330 | 328,29424.9,29517.77,29253.52,29397.71,10195168197.0,1691737200 331 | 329,29399.79,29465.11,29357.59,29415.96,6194358008.0,1691823600 332 | 330,29416.59,29441.43,29265.81,29282.91,7329897180.0,1691910000 333 | 331,29283.26,29660.25,29124.11,29408.44,14013695304.0,1691996400 334 | 332,29408.05,29439.12,29088.85,29170.35,12640195779.0,1692082800 335 | 333,29169.07,29221.98,28701.78,28701.78,14949271904.0,1692169200 336 | 334,28699.8,28745.95,25409.11,26664.55,31120851211.0,1692255600 337 | 335,26636.08,26808.2,25668.92,26049.56,24026236529.0,1692342000 338 | 336,26047.83,26249.45,25802.41,26096.21,10631443812.0,1692428400 339 | 337,26096.86,26260.68,26004.31,26189.58,9036580420.0,1692514800 340 | 338,26188.69,26220.2,25846.09,26124.14,13371557893.0,1692601200 341 | 339,26130.75,26135.51,25520.73,26031.66,14503820706.0,1692687600 342 | 340,26040.47,26786.9,25805.0,26431.64,16985265785.0,1692774000 343 | 341,26431.52,26554.91,25914.93,26162.37,12871532023.0,1692860400 344 | 342,26163.68,26248.1,25786.81,26047.67,12406045118.0,1692946800 345 | 343,26047.23,26107.38,25983.88,26008.46,6034817316.0,1693033200 346 | 344,26008.24,26165.37,25965.1,26089.69,6913768611.0,1693119600 347 | 345,26089.62,26198.58,25880.6,26106.15,11002805166.0,1693206000 348 | 346,26102.49,28089.34,25912.63,27727.39,29368391712.0,1693292400 349 | 347,27726.08,27760.16,27069.21,27297.27,16343655235.0,1693378800 350 | 348,27301.93,27456.08,25752.93,25931.47,20181001451.0,1693465200 351 | 349,25934.02,26125.87,25362.61,25800.72,17202862221.0,1693551600 352 | 350,25800.91,25970.29,25753.09,25868.8,10100387473.0,1693638000 353 | 351,25869.47,26087.15,25817.03,25969.57,8962524523.0,1693724400 354 | 352,25968.17,26081.53,25657.03,25812.42,10680635106.0,1693810800 355 | 353,25814.96,25858.38,25589.99,25779.98,11094740040.0,1693897200 356 | 354,25783.93,25953.02,25404.36,25753.24,12752705327.0,1693983600 357 | 355,25748.31,26409.3,25608.2,26240.2,11088307100.0,1694070000 358 | 356,26245.21,26414.01,25677.48,25905.65,10817356400.0,1694156400 359 | 357,25905.43,25921.98,25810.49,25895.68,5481314132.0,1694242800 360 | 358,25895.21,25978.13,25640.26,25832.23,7899553047.0,1694329200 361 | 359,25831.71,25883.95,24930.3,25162.65,14600006467.0,1694415600 362 | 360,25160.66,26451.94,25133.08,25833.34,18657279324.0,1694502000 363 | 361,25837.55,26376.11,25781.12,26228.32,13072077070.0,1694588400 364 | 362,26228.28,26774.62,26171.45,26539.67,13811359124.0,1694674800 365 | 363,26533.82,26840.5,26240.7,26608.69,11479735788.0,1694761200 366 | 364,26606.2,26754.77,26473.89,26568.28,7402031417.0,1694847600 367 | --------------------------------------------------------------------------------