├── shimacos
├── .gitkeep
├── bin
│ ├── .gitkeep
│ ├── stacking_exp061_030_truncate_cat.sh
│ ├── stacking_exp059_030_truncate_lgbm.sh
│ └── stacking_exp060_030_truncate_small.sh
├── input
├── components
│ ├── __init__.py
│ ├── stacking
│ │ ├── sync_batchnorm
│ │ │ ├── __init__.py
│ │ │ ├── unittest.py
│ │ │ ├── batchnorm_reimpl.py
│ │ │ ├── replicate.py
│ │ │ └── comm.py
│ │ ├── io.py
│ │ └── main_nn.py
│ ├── factories
│ │ ├── detector_factory.py
│ │ ├── __init__.py
│ │ ├── collate_factory.py
│ │ ├── sampler_factory.py
│ │ ├── callback_factory.py
│ │ ├── scheduler_factory.py
│ │ └── optimizer_factory.py
│ └── preprocess
│ │ └── make_fold_v2.py
├── yamls
│ ├── feature
│ │ ├── stacking_030_truncate_small.yaml
│ │ └── stacking_009.yaml
│ └── stacking.yaml
└── .gitignore
├── kami
├── run
│ ├── conf
│ │ ├── cat_features
│ │ │ ├── base.yaml
│ │ │ ├── 002.yaml
│ │ │ ├── 001.yaml
│ │ │ ├── 003.yaml
│ │ │ ├── 006.yaml
│ │ │ ├── 004.yaml
│ │ │ ├── 005.yaml
│ │ │ └── 007.yaml
│ │ ├── model
│ │ │ ├── Spec1D.yaml
│ │ │ ├── Spec2DCNN.yaml
│ │ │ ├── Spec2DCNNAffine.yaml
│ │ │ ├── Spec2DCNNOverlap.yaml
│ │ │ ├── Spec2DCNN2Day.yaml
│ │ │ ├── Spec2DCNN2DayV2.yaml
│ │ │ ├── Spec2DCNNMinMax.yaml
│ │ │ ├── Spec2DCNNSplit.yaml
│ │ │ ├── Spec2DCNNSplitCat.yaml
│ │ │ ├── SpecWeightAvg.yaml
│ │ │ └── CenterNet.yaml
│ │ ├── decoder
│ │ │ ├── MLPDecoder.yaml
│ │ │ ├── LSTMDecoder.yaml
│ │ │ ├── TransformerDecoder.yaml
│ │ │ └── UNet1DDecoder.yaml
│ │ ├── loss
│ │ │ ├── bce.yaml
│ │ │ ├── focal.yaml
│ │ │ ├── bi_tempered.yaml
│ │ │ ├── tolerance.yaml
│ │ │ ├── tolerance_mse.yaml
│ │ │ ├── tolerance_nonzero.yaml
│ │ │ └── focal_bce.yaml
│ │ ├── feature_extractor
│ │ │ ├── LSTMFeatureExtractor.yaml
│ │ │ ├── SpecFeatureExtractor.yaml
│ │ │ ├── CNNSpectrogram.yaml
│ │ │ └── PANNsFeatureExtractor.yaml
│ │ ├── features
│ │ │ ├── 001.yaml
│ │ │ ├── base.yaml
│ │ │ ├── 002.yaml
│ │ │ ├── 003.yaml
│ │ │ ├── 005.yaml
│ │ │ ├── 004.yaml
│ │ │ ├── 006.yaml
│ │ │ ├── 007.yaml
│ │ │ ├── 009.yaml
│ │ │ ├── 010.yaml
│ │ │ ├── 011.yaml
│ │ │ ├── 008.yaml
│ │ │ ├── 012.yaml
│ │ │ ├── 013.yaml
│ │ │ ├── 014.yaml
│ │ │ ├── 015.yaml
│ │ │ ├── 016.yaml
│ │ │ ├── 017.yaml
│ │ │ ├── 021.yaml
│ │ │ ├── 025.yaml
│ │ │ ├── 018.yaml
│ │ │ ├── 024.yaml
│ │ │ ├── 031.yaml
│ │ │ ├── 022.yaml
│ │ │ ├── 023.yaml
│ │ │ ├── 027.yaml
│ │ │ ├── 028.yaml
│ │ │ ├── 029.yaml
│ │ │ ├── 030.yaml
│ │ │ ├── 032.yaml
│ │ │ ├── 019.yaml
│ │ │ ├── 020.yaml
│ │ │ └── 026.yaml
│ │ ├── post_process
│ │ │ ├── train.yaml
│ │ │ └── score.yaml
│ │ ├── ignore
│ │ │ ├── 001.yaml
│ │ │ ├── 002.yaml
│ │ │ └── zero.yaml
│ │ ├── dir
│ │ │ └── local.yaml
│ │ ├── prepare_data.yaml
│ │ ├── split_folds.yaml
│ │ ├── cv_inference.yaml
│ │ ├── cv_train.yaml
│ │ └── split
│ │ │ ├── stratify_fold_0.yaml
│ │ │ └── stratify_fold_1.yaml
│ └── split_folds.py
├── src
│ ├── models
│ │ ├── loss
│ │ │ ├── bce.py
│ │ │ ├── focal.py
│ │ │ ├── focal_bce.py
│ │ │ ├── tolerance.py
│ │ │ ├── tolerance_mse.py
│ │ │ └── tolerance_nonzero.py
│ │ ├── decoder
│ │ │ ├── mlpdecoder.py
│ │ │ ├── lstmdecoder.py
│ │ │ └── transformerdecoder.py
│ │ ├── feature_extractor
│ │ │ ├── spectrogram.py
│ │ │ ├── lstm.py
│ │ │ ├── panns.py
│ │ │ └── cnn.py
│ │ ├── spec1D.py
│ │ ├── spec2Dcnn.py
│ │ ├── spec2DcnnOverlap.py
│ │ ├── spec2Dcnn2DayV2.py
│ │ ├── spec2DcnnAffine.py
│ │ ├── spec2Dcnn2Day.py
│ │ └── spec2DcnnMinMax.py
│ ├── augmentation
│ │ ├── mixup.py
│ │ └── cutmix.py
│ └── utils
│ │ ├── common.py
│ │ └── score.py
└── .gitignore
├── .gitignore
├── sakami
├── config
│ └── meta.yaml
└── src
│ ├── __init__.py
│ ├── config.py
│ └── utils.py
├── compose.yaml
├── README.md
├── Dockerfile
├── pyproject.toml
├── requirements.lock
├── requirements-dev.lock
└── input
└── folds
├── stratify_fold_0.yaml
├── stratify_fold_1.yaml
├── stratify_fold_2.yaml
├── stratify_fold_3.yaml
└── stratify_fold_4.yaml
/shimacos/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/shimacos/bin/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/shimacos/input:
--------------------------------------------------------------------------------
1 | ../input
--------------------------------------------------------------------------------
/shimacos/components/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/base.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec1D.yaml:
--------------------------------------------------------------------------------
1 | name: Spec1D
--------------------------------------------------------------------------------
/kami/run/conf/decoder/MLPDecoder.yaml:
--------------------------------------------------------------------------------
1 | name: MLPDecoder
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | input
2 | !input/fold/*
3 | output
4 | __pycache__
5 |
--------------------------------------------------------------------------------
/sakami/config/meta.yaml:
--------------------------------------------------------------------------------
1 | # competition
2 | competition_name: .
3 |
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/002.yaml:
--------------------------------------------------------------------------------
1 | hour:
2 | dim: 10
3 | num_category: 24
4 |
--------------------------------------------------------------------------------
/kami/run/conf/loss/bce.yaml:
--------------------------------------------------------------------------------
1 | name: bce
2 | bce_weight: 1.0
3 | tolerance_weight: 1.0
--------------------------------------------------------------------------------
/kami/run/conf/loss/focal.yaml:
--------------------------------------------------------------------------------
1 | name: focal
2 |
3 |
4 | alpha: 0.5
5 | gamma: 2.0
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/001.yaml:
--------------------------------------------------------------------------------
1 | periodicity:
2 | dim: 4
3 | num_category: 2
4 |
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/003.yaml:
--------------------------------------------------------------------------------
1 | minute_15:
2 | dim: 4
3 | num_category: 15
4 |
--------------------------------------------------------------------------------
/kami/run/conf/loss/bi_tempered.yaml:
--------------------------------------------------------------------------------
1 | name: bi_tempered
2 | t1: 1.0
3 | t2: 1.0
4 | label_smoothing: 0.0
--------------------------------------------------------------------------------
/kami/run/conf/loss/tolerance.yaml:
--------------------------------------------------------------------------------
1 | name: tolerance
2 | loss_weight:
3 | - 0.5 # bce
4 | - 0.5 # tolerance
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNN.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNN
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/sakami/src/__init__.py:
--------------------------------------------------------------------------------
1 | from .config import Config
2 |
3 | meta_config = Config.load("config/meta.yaml")
4 |
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNNAffine.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNN
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNNOverlap.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNN
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/kami/run/conf/loss/tolerance_mse.yaml:
--------------------------------------------------------------------------------
1 | name: tolerance_mse
2 | loss_weight:
3 | - 0.5 # bce
4 | - 0.5 # tolerance
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNN2Day.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNN2Day
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNN2DayV2.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNN2DayV2
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNNMinMax.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNNMinMax
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
--------------------------------------------------------------------------------
/kami/run/conf/loss/tolerance_nonzero.yaml:
--------------------------------------------------------------------------------
1 | name: tolerance_nonzero
2 | loss_weight:
3 | - 0.5 # bce
4 | - 0.5 # tolerance
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/006.yaml:
--------------------------------------------------------------------------------
1 | minute_15:
2 | dim: 4
3 | num_category: 15
4 | hour:
5 | dim: 10
6 | num_category: 24
7 |
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNNSplit.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNNSplit
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
4 | n_split: 3
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/004.yaml:
--------------------------------------------------------------------------------
1 | periodicity:
2 | dim: 4
3 | num_category: 2
4 | hour:
5 | dim: 10
6 | num_category: 24
7 |
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/005.yaml:
--------------------------------------------------------------------------------
1 | periodicity:
2 | dim: 4
3 | num_category: 2
4 | minute_15:
5 | dim: 4
6 | num_category: 15
7 |
--------------------------------------------------------------------------------
/kami/run/conf/decoder/LSTMDecoder.yaml:
--------------------------------------------------------------------------------
1 | name: LSTMDecoder
2 | hidden_size: 128
3 | num_layers: 2
4 | dropout: 0.2
5 | bidirectional: true
--------------------------------------------------------------------------------
/kami/run/conf/decoder/TransformerDecoder.yaml:
--------------------------------------------------------------------------------
1 | name: TransformerDecoder
2 | hidden_size: 256
3 | num_layers: 4
4 | nhead: 4
5 | dropout: 0.2
--------------------------------------------------------------------------------
/kami/run/conf/loss/focal_bce.yaml:
--------------------------------------------------------------------------------
1 | name: focal_bce
2 | alpha: 0.5
3 | gamma: 2.0
4 |
5 | weight:
6 | - 1.0 # bce
7 | - 10.0 # focal
--------------------------------------------------------------------------------
/kami/run/conf/model/Spec2DCNNSplitCat.yaml:
--------------------------------------------------------------------------------
1 | name: Spec2DCNNSplitCat
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
4 | n_split: 3
--------------------------------------------------------------------------------
/kami/run/conf/model/SpecWeightAvg.yaml:
--------------------------------------------------------------------------------
1 | name: SpecWeightAvg
2 | encoder_name: resnet34
3 | encoder_weights: imagenet
4 | filter_size: 12
--------------------------------------------------------------------------------
/kami/run/conf/decoder/UNet1DDecoder.yaml:
--------------------------------------------------------------------------------
1 | name: UNet1DDecoder
2 | bilinear: false
3 | se: false
4 | res: false
5 | scale_factor: 2
6 | dropout: 0.2
--------------------------------------------------------------------------------
/kami/run/conf/feature_extractor/LSTMFeatureExtractor.yaml:
--------------------------------------------------------------------------------
1 | name: LSTMFeatureExtractor
2 | hidden_size: 64
3 | num_layers: 2
4 | bidirectional: true
5 |
--------------------------------------------------------------------------------
/kami/run/conf/feature_extractor/SpecFeatureExtractor.yaml:
--------------------------------------------------------------------------------
1 | name: SpecFeatureExtractor
2 | height: 64
3 | hop_length: ${downsample_rate}
4 | win_length:
--------------------------------------------------------------------------------
/kami/run/conf/features/001.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
--------------------------------------------------------------------------------
/kami/run/conf/features/base.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | # - "minute_sin"
8 | # - "minute_cos"
--------------------------------------------------------------------------------
/kami/run/conf/cat_features/007.yaml:
--------------------------------------------------------------------------------
1 | minute_15:
2 | dim: 4
3 | num_category: 15
4 | hour:
5 | dim: 10
6 | num_category: 24
7 | periodicity:
8 | dim: 4
9 | num_category: 2
--------------------------------------------------------------------------------
/shimacos/bin/stacking_exp061_030_truncate_cat.sh:
--------------------------------------------------------------------------------
1 |
2 | python -m components.stacking.main_cat \
3 | feature=stacking_030_truncate \
4 | store.model_name=$(basename $0 .sh)
5 |
6 |
--------------------------------------------------------------------------------
/kami/run/conf/model/CenterNet.yaml:
--------------------------------------------------------------------------------
1 | name: CenterNet
2 | params:
3 | encoder_name: resnet34
4 | encoder_weights: imagenet
5 | keypoint_weight: 1.0
6 | offset_weight: 1.0
7 | bbox_size_weight: 1.0
--------------------------------------------------------------------------------
/kami/run/conf/feature_extractor/CNNSpectrogram.yaml:
--------------------------------------------------------------------------------
1 | name: CNNSpectrogram
2 | base_filters: 64
3 | kernel_sizes:
4 | - 32
5 | - 16
6 | - 2
7 | stride: ${downsample_rate}
8 | sigmoid: true
9 | reinit: true
--------------------------------------------------------------------------------
/kami/run/conf/features/002.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | #- "minute_sin"
8 | #- "minute_cos"
9 | - "weekday_sin"
10 | - "weekday_cos"
--------------------------------------------------------------------------------
/kami/run/conf/post_process/train.yaml:
--------------------------------------------------------------------------------
1 | score_th: 0.02
2 | distance: 80
3 | remove_periodicity: false
4 | periodicity:
5 | filter_size: 10
6 | #downsample_rate: 15
7 | #split_hour: 8 # 4 にすると周期性を検出できないものがあった
8 | #th: 0.99
--------------------------------------------------------------------------------
/kami/run/conf/post_process/score.yaml:
--------------------------------------------------------------------------------
1 | score_th: 0.0038536
2 | distance: 80
3 | remove_periodicity: false
4 | periodicity:
5 | filter_size: 10
6 | #downsample_rate: 15
7 | #split_hour: 8 # 4 にすると周期性を検出できないものがあった
8 | #th: 0.99
--------------------------------------------------------------------------------
/kami/run/conf/features/003.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | #- "minute_sin"
8 | #- "minute_cos"
9 | # - "weekday_sin"
10 | # - "weekday_cos"
11 | - "lids"
--------------------------------------------------------------------------------
/kami/run/conf/features/005.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | #- "minute_sin"
8 | #- "minute_cos"
9 | - "weekday_sin"
10 | - "weekday_cos"
11 | - "periodicity"
--------------------------------------------------------------------------------
/kami/run/conf/features/004.yaml:
--------------------------------------------------------------------------------
1 | - "anglez_series_norm" #" anglez"
2 | - "enmo_series_norm" # "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | #- "minute_sin"
8 | #- "minute_cos"
9 | - "weekday_sin"
10 | - "weekday_cos"
--------------------------------------------------------------------------------
/kami/run/conf/features/006.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | #- "minute_sin"
8 | #- "minute_cos"
9 | - "minute15_sin"
10 | - "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
--------------------------------------------------------------------------------
/kami/run/conf/features/007.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
--------------------------------------------------------------------------------
/kami/run/conf/feature_extractor/PANNsFeatureExtractor.yaml:
--------------------------------------------------------------------------------
1 | name: PANNsFeatureExtractor
2 | base_filters: 64
3 | kernel_sizes:
4 | - 32
5 | - 16
6 | - ${downsample_rate}
7 | stride: ${downsample_rate}
8 | sigmoid: true
9 | reinit: true
10 | win_length:
--------------------------------------------------------------------------------
/kami/run/conf/ignore/001.yaml:
--------------------------------------------------------------------------------
1 | train:
2 | - no
3 |
4 | negative: # ネガティブサンプリングの場合のみスキップ
5 | - 05e1944c3818
6 | - 13b4d6a01d27
7 | - 31011ade7c0a
8 | - a596ad0b82aa
9 | # 2~3割
10 | - 60d31b0bec3b
11 | - 60e51cad2ffb
12 | - 10469f6765bf
13 | - 44a41bba1ee7
--------------------------------------------------------------------------------
/kami/run/conf/features/009.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | - "anglez_diff"
4 | - "enmo_diff"
5 | # - "month_sin"
6 | # - "month_cos"
7 | - "hour_sin"
8 | - "hour_cos"
9 | - "minute_sin"
10 | - "minute_cos"
11 | #- "minute15_sin"
12 | #- "minute15_cos"
13 | - "weekday_sin"
14 | - "weekday_cos"
--------------------------------------------------------------------------------
/kami/run/conf/dir/local.yaml:
--------------------------------------------------------------------------------
1 | data_dir: /root/app/input/
2 | processed_dir: /root/app/kami/processed
3 | output_dir: /root/app/kami/output
4 | model_dir: /root/app/kami/output/train
5 | cv_model_dir: /root/app/kami/output/cv_train
6 | kami_dir: /root/app/kami
7 | input_dir: /root/app/input
8 | sub_dir: ./
9 |
--------------------------------------------------------------------------------
/kami/run/conf/ignore/002.yaml:
--------------------------------------------------------------------------------
1 | train:
2 | # 5割以上欠損
3 | - 05e1944c3818
4 | - 13b4d6a01d27
5 | - 31011ade7c0a
6 | - a596ad0b82aa
7 | # 2~3割
8 | # - 60d31b0bec3b
9 | # - 60e51cad2ffb
10 | # - 10469f6765bf
11 | # - 44a41bba1ee7
12 |
13 |
14 | negative: # ネガティブサンプリングの場合のみスキップ
15 | - "no"
--------------------------------------------------------------------------------
/kami/run/conf/features/010.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
--------------------------------------------------------------------------------
/kami/run/conf/ignore/zero.yaml:
--------------------------------------------------------------------------------
1 | train: # 学習データから取り除く
2 | - "zero"
3 | # 5割以上欠損
4 | # - 05e1944c3818
5 | # - 13b4d6a01d27
6 | # - 31011ade7c0a
7 | # - a596ad0b82aa
8 | # 2~3割
9 | # - 60d31b0bec3b
10 | # - 60e51cad2ffb
11 | # - 10469f6765bf
12 | # - 44a41bba1ee7
13 |
14 | negative: # ネガティブサンプリングの場合のみスキップ
15 | - "zero"
--------------------------------------------------------------------------------
/shimacos/bin/stacking_exp059_030_truncate_lgbm.sh:
--------------------------------------------------------------------------------
1 |
2 | python -m components.stacking.main_lgbm \
3 | feature=stacking_030_truncate \
4 | lgbm.params.objective=binary \
5 | lgbm.params.metric=binary_logloss \
6 | store.model_name=$(basename $0 .sh) \
7 | lgbm.params.scale_pos_weight=1 \
8 | lgbm.params.is_unbalance=False
9 |
10 |
--------------------------------------------------------------------------------
/kami/run/conf/features/011.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
--------------------------------------------------------------------------------
/kami/run/conf/features/008.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "enmo_std_diff12"
14 | - "enmo_std_diff60"
15 | - "enmo_std_diff120"
16 | - "enmo_std_diff360"
17 | - "anglez_std_diff12"
18 | - "anglez_std_diff60"
19 | - "anglez_std_diff120"
20 | - "anglez_std_diff360"
--------------------------------------------------------------------------------
/kami/run/conf/features/012.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
--------------------------------------------------------------------------------
/kami/run/conf/prepare_data.yaml:
--------------------------------------------------------------------------------
1 | # ---------- Overriding hydra default configs ----------
2 | hydra:
3 | job:
4 | name: prepare_data
5 | chdir: true
6 | run:
7 | dir: ${dir.output_dir}/${hydra.job.name}/${hydra.job.override_dirname}
8 | sweep:
9 | dir: ${dir.output_dir}/${hydra.job.name}/
10 | subdir: ${hydra.job.override_dirname}
11 |
12 | defaults:
13 | - _self_
14 | - dir: local
15 |
16 | phase: train
17 |
18 |
19 | periodicity:
20 | downsample_rate: 12
21 | stride_min: 3
22 | split_min: 120
--------------------------------------------------------------------------------
/kami/run/conf/split_folds.yaml:
--------------------------------------------------------------------------------
1 | # ---------- Overriding hydra default configs ----------
2 | hydra:
3 | job:
4 | name: prepare_data
5 | chdir: true
6 | run:
7 | dir: ${dir.output_dir}/${hydra.job.name}/${hydra.job.override_dirname}
8 | sweep:
9 | dir: ${dir.output_dir}/${hydra.job.name}/
10 | subdir: ${hydra.job.override_dirname}
11 |
12 | defaults:
13 | - _self_
14 | - dir: local
15 |
16 | phase: train
17 |
18 |
19 | periodicity:
20 | downsample_rate: 12
21 | stride_min: 3
22 | split_min: 120
--------------------------------------------------------------------------------
/kami/src/models/loss/bce.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class BCEWithLogitsLoss(nn.Module):
6 | def __init__(self, weight: torch.Tensor = None, pos_weight: torch.Tensor = None):
7 | super(BCEWithLogitsLoss, self).__init__()
8 | self.weight = weight
9 | self.pos_weight = pos_weight
10 | self.loss_fn = nn.BCEWithLogitsLoss(weight=self.weight, pos_weight=self.pos_weight)
11 |
12 | def forward(self, logits, labels, masks):
13 | loss = self.loss_fn(logits, labels)
14 | return loss
15 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/sync_batchnorm/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : __init__.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
12 | from .batchnorm import patch_sync_batchnorm, convert_model
13 | from .replicate import DataParallelWithCallback, patch_replication_callback
14 |
--------------------------------------------------------------------------------
/shimacos/components/factories/detector_factory.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from omegaconf import OmegaConfig
5 | from mmcv import Config
6 | from mmdet.models import build_detector
7 |
8 |
9 | class Detector(nn.Module):
10 | def __init__(self, config: OmegaConfig) -> None:
11 | detection_config = Config.fromfile(config.detection_config_path)
12 | self.model = build_detector(
13 | detection_config.model,
14 | train_cfg=detection_config.train_cfg,
15 | test_cfg=detection_config.test_cfg,
16 | )
17 |
18 |
--------------------------------------------------------------------------------
/kami/run/conf/features/013.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | - "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "anglez_abs_diff_var_24h"
22 | - "anglez_diff_nonzero_5_var_24h"
23 | - "anglez_diff_nonzero_60_var_24h"
24 | - "lids_var_24h"
--------------------------------------------------------------------------------
/shimacos/components/factories/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | warnings.simplefilter("ignore")
4 |
5 | from .callback_factory import MyCallback, MyProgressBar
6 | from .collate_factory import text_collate
7 | from .dataset_factory import get_dataset
8 | from .loss_factory import get_loss
9 | from .model_factory import get_model
10 | from .optimizer_factory import get_optimizer
11 | from .sampler_factory import get_sampler
12 | from .scheduler_factory import get_scheduler
13 |
14 | __all__ = ["get_model", "get_dataset", "get_loss", "get_scheduler", "get_optimizer", "get_sampler", "MyProgressBar", "MyCallback", "text_collate"]
15 |
--------------------------------------------------------------------------------
/compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | kaggle:
4 | build:
5 | context: .
6 | dockerfile: Dockerfile
7 | volumes:
8 | - $PWD/input:/root/app/input
9 | - $PWD/sakami:/root/app/sakami
10 | - $PWD/kami:/root/app/kami
11 | - $PWD/shimacos:/root/app/shimacos
12 | - $PWD/tasks.py:/root/app/tasks.py
13 | working_dir: /root/app
14 | command: bash -c "echo start && /bin/bash"
15 | tty: true
16 | deploy:
17 | resources:
18 | reservations:
19 | devices:
20 | - driver: nvidia
21 | count: 1
22 | capabilities: [gpu]
23 | shm_size: "2gb"
24 |
--------------------------------------------------------------------------------
/kami/run/conf/features/014.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/015.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | - "enmo_clip"
27 | #- "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/016.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | #- "periodicity"
20 | #- "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/017.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | #- "anglez_diff_nonzero_5_mean_24h"
17 | #- "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/021.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | #- "anglez_diff_nonzero_5"
14 | #- "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/025.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | #- "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/018.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_log_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_log_12_rolling_std"
24 | - "enmo_log_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/024.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_1"
14 | - "anglez_diff_nonzero_5"
15 | - "anglez_diff_nonzero_60"
16 | - "anglez_abs_diff_mean_24h"
17 | - "anglez_diff_nonzero_5_mean_24h"
18 | - "anglez_diff_nonzero_60_mean_24h"
19 | - "lids_mean_24h"
20 | - "periodicity"
21 | - "non_periodicity"
22 | - "enmo_12_rolling_mean"
23 | - "anglez_12_rolling_std"
24 | - "enmo_12_rolling_std"
25 | - "enmo_12_rolling_max"
26 | - "anglez_diff_5min_median"
27 | #- "enmo_clip"
28 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/031.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_mean_24h"
16 | - "anglez_diff_nonzero_60_mean_24h"
17 | - "lids_mean_24h"
18 | - "periodicity"
19 | - "non_periodicity"
20 | - "enmo_12_rolling_mean"
21 | - "anglez_12_rolling_std"
22 | - "enmo_12_rolling_std"
23 | - "enmo_12_rolling_max"
24 | - "anglez_diff_5min_median"
25 | #- "enmo_clip"
26 | - "enmo_log"
27 | - "anglez_abs_diff_mean_24h"
28 | - "anglez_abs_diff_60_median"
--------------------------------------------------------------------------------
/kami/src/models/loss/focal.py:
--------------------------------------------------------------------------------
1 | from torchvision.ops.focal_loss import sigmoid_focal_loss
2 | import torch
3 | import torch.nn as nn
4 |
5 | # https://pytorch.org/vision/main/_modules/torchvision/ops/focal_loss.html
6 |
7 |
8 | class FocalLoss(nn.Module):
9 | def __init__(
10 | self,
11 | alpha: float,
12 | gamma: float,
13 | ):
14 | super(FocalLoss, self).__init__()
15 | self.alpha = alpha
16 | self.gamma = gamma
17 |
18 | def forward(self, logits, labels, masks):
19 | loss = sigmoid_focal_loss(
20 | logits[:, :, [1, 2]], labels[:, :, [1, 2]], alpha=self.alpha, gamma=self.gamma, reduction="mean"
21 | )
22 | return loss
23 |
--------------------------------------------------------------------------------
/kami/run/conf/features/022.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_max"
16 | - "anglez_diff_nonzero_60_max"
17 | - "anglez_abs_diff_mean_24h"
18 | - "anglez_diff_nonzero_5_mean_24h"
19 | - "anglez_diff_nonzero_60_mean_24h"
20 | - "lids_mean_24h"
21 | - "periodicity"
22 | - "non_periodicity"
23 | - "enmo_12_rolling_mean"
24 | - "anglez_12_rolling_std"
25 | - "enmo_12_rolling_std"
26 | - "enmo_12_rolling_max"
27 | - "anglez_diff_5min_median"
28 | #- "enmo_clip"
29 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/023.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_std"
16 | - "anglez_diff_nonzero_60_std"
17 | - "anglez_abs_diff_mean_24h"
18 | - "anglez_diff_nonzero_5_mean_24h"
19 | - "anglez_diff_nonzero_60_mean_24h"
20 | - "lids_mean_24h"
21 | - "periodicity"
22 | - "non_periodicity"
23 | - "enmo_12_rolling_mean"
24 | - "anglez_12_rolling_std"
25 | - "enmo_12_rolling_std"
26 | - "enmo_12_rolling_max"
27 | - "anglez_diff_5min_median"
28 | #- "enmo_clip"
29 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/027.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
28 | - "pca_0"
29 | - "pca_1"
30 | - "pca_2"
31 | - "pca_3"
32 | - "pca_4"
--------------------------------------------------------------------------------
/kami/run/conf/features/028.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_mean_24h"
16 | - "anglez_diff_nonzero_60_mean_24h"
17 | - "lids_mean_24h"
18 | - "periodicity"
19 | - "non_periodicity"
20 | - "enmo_12_rolling_mean"
21 | - "anglez_12_rolling_std"
22 | - "enmo_12_rolling_std"
23 | - "enmo_12_rolling_max"
24 | - "anglez_diff_5min_median"
25 | #- "enmo_clip"
26 | - "enmo_log"
27 | - "anglez_abs_diff_mean_24h"
28 | - "anglez_abs_diff_1_median"
29 | - "anglez_abs_diff_1_std"
30 | - "anglez_abs_diff_1_max"
--------------------------------------------------------------------------------
/kami/run/conf/features/029.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_mean_24h"
16 | - "anglez_diff_nonzero_60_mean_24h"
17 | - "lids_mean_24h"
18 | - "periodicity"
19 | - "non_periodicity"
20 | - "enmo_12_rolling_mean"
21 | - "anglez_12_rolling_std"
22 | - "enmo_12_rolling_std"
23 | - "enmo_12_rolling_max"
24 | - "anglez_diff_5min_median"
25 | #- "enmo_clip"
26 | - "enmo_log"
27 | - "anglez_abs_diff_mean_24h"
28 | - "anglez_abs_diff_5_median"
29 | - "anglez_abs_diff_5_std"
30 | - "anglez_abs_diff_5_max"
--------------------------------------------------------------------------------
/kami/run/conf/features/030.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_mean_24h"
16 | - "anglez_diff_nonzero_60_mean_24h"
17 | - "lids_mean_24h"
18 | - "periodicity"
19 | - "non_periodicity"
20 | - "enmo_12_rolling_mean"
21 | - "anglez_12_rolling_std"
22 | - "enmo_12_rolling_std"
23 | - "enmo_12_rolling_max"
24 | - "anglez_diff_5min_median"
25 | #- "enmo_clip"
26 | - "enmo_log"
27 | - "anglez_abs_diff_mean_24h"
28 | - "anglez_abs_diff_60_median"
29 | - "anglez_abs_diff_60_std"
30 | - "anglez_abs_diff_60_max"
--------------------------------------------------------------------------------
/kami/run/conf/features/032.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_diff_nonzero_5_mean_24h"
16 | - "anglez_diff_nonzero_60_mean_24h"
17 | - "lids_mean_24h"
18 | - "periodicity"
19 | - "non_periodicity"
20 | - "enmo_12_rolling_mean"
21 | - "anglez_12_rolling_std"
22 | - "enmo_12_rolling_std"
23 | - "enmo_12_rolling_max"
24 | - "anglez_diff_5min_median"
25 | #- "enmo_clip"
26 | - "enmo_log"
27 | - "anglez_abs_diff_mean_24h"
28 | #- "anglez_abs_diff_60_median"
29 | - "anglez_abs_diff_60_std"
30 | #- "anglez_abs_diff_60_max"
--------------------------------------------------------------------------------
/kami/run/conf/features/019.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "enmo_60_rolling_mean"
26 | - "anglez_60_rolling_std"
27 | - "enmo_60_rolling_std"
28 | - "enmo_60_rolling_max"
29 | - "anglez_diff_5min_median"
30 | #- "enmo_clip"
31 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/020.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "enmo_360_rolling_mean"
26 | - "anglez_360_rolling_std"
27 | - "enmo_360_rolling_std"
28 | - "enmo_360_rolling_max"
29 | - "anglez_diff_5min_median"
30 | #- "enmo_clip"
31 | - "enmo_log"
--------------------------------------------------------------------------------
/kami/run/conf/features/026.yaml:
--------------------------------------------------------------------------------
1 | - "anglez"
2 | #- "enmo"
3 | # - "month_sin"
4 | # - "month_cos"
5 | - "hour_sin"
6 | - "hour_cos"
7 | - "minute_sin"
8 | - "minute_cos"
9 | #- "minute15_sin"
10 | #- "minute15_cos"
11 | - "weekday_sin"
12 | - "weekday_cos"
13 | - "anglez_diff_nonzero_5"
14 | - "anglez_diff_nonzero_60"
15 | - "anglez_abs_diff_mean_24h"
16 | - "anglez_diff_nonzero_5_mean_24h"
17 | - "anglez_diff_nonzero_60_mean_24h"
18 | - "lids_mean_24h"
19 | - "periodicity"
20 | - "non_periodicity"
21 | - "enmo_12_rolling_mean"
22 | - "anglez_12_rolling_std"
23 | - "enmo_12_rolling_std"
24 | - "enmo_12_rolling_max"
25 | - "anglez_diff_5min_median"
26 | #- "enmo_clip"
27 | - "enmo_log"
28 | - "pca_0"
29 | - "pca_1"
30 | - "pca_2"
31 | - "pca_3"
32 | - "pca_4"
33 | - "pca_5"
34 | - "pca_6"
35 | - "pca_7"
36 | - "pca_8"
37 | - "pca_9"
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Prepare dataset
2 |
3 | - download from kaggle via kaggle-api.
4 | ```bash
5 | kaggle competitions download -c child-mind-institute-detect-sleep-states -p ./input/
6 | unzip ./input/child-mind-institute-detect-sleep-states.zip -d ./input
7 | ```
8 |
9 | ## Training
10 |
11 | - setting up using Docker
12 |
13 | ```sh
14 | docker compose up -d
15 | docker compose exec kaggle /bin/bash
16 | ```
17 |
18 | - training all
19 | ```sh
20 | inv run-all
21 | ```
22 | - (Optional) If you want to re-split folds, add `--overwrite-folds` option.
23 | - Not reproducible, so results change every time it is run.
24 | - Default values exists in input/folds which we used
25 | ```sh
26 | inv run-all --overwrite-folds
27 | ```
28 |
29 | ## Inference
30 |
31 | - Use [this notebook](https://www.kaggle.com/shimacos/kaggle-smi-submission-final)
32 |
--------------------------------------------------------------------------------
/kami/src/models/loss/focal_bce.py:
--------------------------------------------------------------------------------
1 | from torchvision.ops.focal_loss import sigmoid_focal_loss
2 | import torch
3 | import torch.nn as nn
4 |
5 | # https://pytorch.org/vision/main/_modules/torchvision/ops/focal_loss.html
6 |
7 |
8 | class FocalBCELoss(nn.Module):
9 | def __init__(self, alpha: float, gamma: float, weight: torch.Tensor = torch.tensor([1.0, 1.0])):
10 | super(FocalBCELoss, self).__init__()
11 | self.alpha = alpha
12 | self.gamma = gamma
13 | self.weight = weight
14 | self.bce_fn = nn.BCEWithLogitsLoss()
15 |
16 | def forward(self, logits, labels, masks):
17 | loss = self.weight[0] * self.bce_fn(logits[:, :, 0], labels[:, :, 0]) + self.weight[1] * sigmoid_focal_loss(
18 | logits[:, :, [1, 2]], labels[:, :, [1, 2]], alpha=self.alpha, gamma=self.gamma, reduction="mean"
19 | )
20 | return loss
21 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/sync_batchnorm/unittest.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : unittest.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import unittest
12 | import torch
13 |
14 |
15 | class TorchTestCase(unittest.TestCase):
16 | def assertTensorClose(self, x, y):
17 | adiff = float((x - y).abs().max())
18 | if (y == 0).all():
19 | rdiff = 'NaN'
20 | else:
21 | rdiff = float((adiff / y).abs().max())
22 |
23 | message = (
24 | 'Tensor close check failed\n'
25 | 'adiff={}\n'
26 | 'rdiff={}\n'
27 | ).format(adiff, rdiff)
28 | self.assertTrue(torch.allclose(x, y), message)
29 |
30 |
--------------------------------------------------------------------------------
/shimacos/components/factories/collate_factory.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def text_collate(batch):
5 | output = {}
6 | max_length = max([sample["raw_length"] for sample in batch])
7 | output["id"] = [sample["id"] for sample in batch]
8 | output["input_ids"] = torch.cat(
9 | [torch.as_tensor(sample["input_ids"][:, :max_length]) for sample in batch],
10 | dim=0,
11 | )
12 | output["attention_mask"] = torch.cat(
13 | [torch.as_tensor(sample["attention_mask"][:, :max_length]) for sample in batch],
14 | dim=0,
15 | )
16 | output["extension"] = torch.cat(
17 | [torch.as_tensor(sample["extension"]) for sample in batch],
18 | dim=0,
19 | )
20 | if "label" in batch[0]:
21 | output["label"] = torch.cat(
22 | [torch.as_tensor(sample["label"]) for sample in batch],
23 | dim=0,
24 | )
25 | return output
26 |
--------------------------------------------------------------------------------
/kami/src/models/decoder/mlpdecoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class MLPDecoder(nn.Module):
7 | def __init__(self, n_channels: int, n_classes: int):
8 | super(MLPDecoder, self).__init__()
9 | self.fc1 = nn.Linear(n_channels, 64)
10 | self.fc2 = nn.Linear(64, 64)
11 | self.fc3 = nn.Linear(64, n_classes)
12 |
13 | def forward(self, x: torch.Tensor) -> torch.Tensor:
14 | """Forward pass of the model.
15 |
16 | Args:
17 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
18 |
19 | Returns:
20 | torch.Tensor: (batch_size, n_timesteps, n_classes)
21 | """
22 | x = x.transpose(1, 2)
23 | x = F.relu(self.fc1(x))
24 | x = F.dropout(x, p=0.5)
25 | x = F.relu(self.fc2(x))
26 | x = F.dropout(x, p=0.5)
27 | x = self.fc3(x)
28 | return x
29 |
--------------------------------------------------------------------------------
/kami/src/augmentation/mixup.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | class Mixup:
6 | def __init__(self, alpha: float = 0.4):
7 | self.alpha = alpha
8 |
9 | def __call__(
10 | self, imgs: torch.Tensor, labels: torch.Tensor
11 | ) -> tuple[torch.Tensor, torch.Tensor]:
12 | """Mixup augmentation.
13 |
14 | Args:
15 | imgs (torch.Tensor): (batch_size, n_channels, n_timesteps)
16 | labels (torch.Tensor): (batch_size, n_timesteps, n_classes)
17 |
18 | Returns:
19 | tuple[torch.Tensor]: mixed_imgs (batch_size, n_channels, n_timesteps)
20 | mixed_labels (batch_size, n_timesteps, n_classes)
21 | """
22 | batch_size = imgs.size(0)
23 | idx = torch.randperm(batch_size)
24 | lam = np.random.beta(self.alpha, self.alpha)
25 |
26 | mixed_imgs: torch.Tensor = lam * imgs + (1 - lam) * imgs[idx]
27 | mixed_labels: torch.Tensor = lam * labels + (1 - lam) * labels[idx]
28 |
29 | return mixed_imgs, mixed_labels
30 |
--------------------------------------------------------------------------------
/shimacos/components/preprocess/make_fold_v2.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import polars as pl
5 | import yaml
6 |
7 |
8 | def load_data() -> pl.DataFrame:
9 | event_df = pl.read_csv("./input/train_events.csv")
10 | event_df = event_df.with_columns(pl.col("timestamp").str.to_datetime())
11 | event_df = event_df.with_columns(pl.col("step").cast(pl.UInt32))
12 | return event_df
13 |
14 |
15 | def main():
16 | os.makedirs("./input/preprocess", exist_ok=True)
17 | event_df = load_data()
18 | event_df = event_df[["series_id"]].unique()
19 | event_df = event_df.with_columns(pl.Series("row_id", np.arange(len(event_df))))
20 |
21 | folds = np.zeros(len(event_df))
22 | for i in range(5):
23 | with open(f"./input/folds/stratify_fold_{i}.yaml", "r") as f:
24 | stratify_fold = yaml.safe_load(f)
25 | valid_ids = stratify_fold["valid_series_ids"]
26 | idx = event_df.filter(pl.col("series_id").is_in(valid_ids))["row_id"].to_numpy()
27 | folds[idx] = i
28 |
29 | event_df = event_df.with_columns(pl.Series("fold", folds))
30 | event_df[["series_id", "fold"]].write_parquet("./input/preprocess/fold_v2.parquet")
31 |
32 |
33 | if __name__ == "__main__":
34 | main()
35 |
--------------------------------------------------------------------------------
/kami/src/models/decoder/lstmdecoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class LSTMDecoder(nn.Module):
6 | def __init__(
7 | self,
8 | input_size: int,
9 | hidden_size: int,
10 | num_layers: int,
11 | dropout: float,
12 | bidirectional: bool,
13 | n_classes: int,
14 | ):
15 | super().__init__()
16 | self.lstm = nn.LSTM(
17 | input_size=input_size,
18 | hidden_size=hidden_size,
19 | num_layers=num_layers,
20 | dropout=dropout,
21 | bidirectional=bidirectional,
22 | batch_first=True,
23 | )
24 | hidden_size = hidden_size * 2 if bidirectional else hidden_size
25 | self.linear = nn.Linear(hidden_size, n_classes)
26 |
27 | def forward(self, x: torch.Tensor) -> torch.Tensor:
28 | """Forward pass of the model.
29 |
30 | Args:
31 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
32 |
33 | Returns:
34 | torch.Tensor: (batch_size, n_timesteps, n_classes)
35 | """
36 | x = x.transpose(1, 2) # (batch_size, n_timesteps, n_channels)
37 | x, _ = self.lstm(x)
38 | x = self.linear(x)
39 | return x
40 |
--------------------------------------------------------------------------------
/kami/src/models/loss/tolerance.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class ToleranceLoss(nn.Module):
6 | def __init__(
7 | self,
8 | loss_weight: torch.Tensor = torch.Tensor([0.5, 0.5]),
9 | label_weight: torch.Tensor = None,
10 | pos_weight: torch.Tensor = None,
11 | ):
12 | super(ToleranceLoss, self).__init__()
13 |
14 | self.loss_weight = loss_weight
15 | self.label_weight = label_weight
16 | self.pos_weight = pos_weight
17 |
18 | self.loss_fn = nn.BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
19 |
20 | def forward(self, logits, labels, masks):
21 | # logits: shape (batch, seq, class)
22 | # masks: shape (batch, tolerance, seq, class)
23 |
24 | logits_event = logits[:, :, [1, 2]]
25 |
26 | # Step 1: Find the maximum value of logits in the range of each mask
27 | masked_logits_max, _ = torch.max(logits_event.unsqueeze(1) * masks, dim=2) # (batch, tolerance, class)
28 |
29 | # Step 2: Calculate max(0, logits - x) and take the average over seq
30 | loss = torch.relu(logits_event.unsqueeze(1) - masked_logits_max.unsqueeze(2)) # (batch, tolerance, seq, class)
31 | loss = torch.mean(loss)
32 |
33 | loss = self.loss_weight[0] * self.loss_fn(logits, labels) + self.loss_weight[1] * loss
34 | return loss
35 |
--------------------------------------------------------------------------------
/kami/src/models/decoder/transformerdecoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class TransformerDecoder(nn.Module):
6 | def __init__(
7 | self,
8 | input_size: int,
9 | hidden_size: int,
10 | num_layers: int,
11 | dropout: float,
12 | nhead: int,
13 | n_classes: int,
14 | ):
15 | super().__init__()
16 | self.conv = nn.Conv1d(input_size, hidden_size, 1)
17 | transformer_encoder_layer = nn.TransformerEncoderLayer(
18 | d_model=hidden_size, nhead=nhead, dropout=dropout, batch_first=True
19 | )
20 | self.transformer_encoder = nn.TransformerEncoder(
21 | transformer_encoder_layer, num_layers=num_layers
22 | )
23 | self.linear = nn.Linear(hidden_size, n_classes)
24 |
25 | def forward(self, x: torch.Tensor) -> torch.Tensor:
26 | """Forward pass of the model.
27 |
28 | Args:
29 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
30 |
31 | Returns:
32 | torch.Tensor: (batch_size, n_timesteps, n_classes)
33 | """
34 | x = self.conv(x) # (batch_size, n_channels, n_timesteps)
35 | x = x.transpose(1, 2) # (batch_size, n_timesteps, n_channels)
36 | x = self.transformer_encoder(x)
37 | x = self.linear(x) # (batch_size, n_timesteps, n_classes)
38 |
39 | return x
40 |
--------------------------------------------------------------------------------
/kami/src/models/loss/tolerance_mse.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class ToleranceMSELoss(nn.Module):
6 | def __init__(
7 | self,
8 | loss_weight: torch.Tensor = torch.Tensor([0.5, 0.5]),
9 | label_weight: torch.Tensor = None,
10 | pos_weight: torch.Tensor = None,
11 | ):
12 | super(ToleranceMSELoss, self).__init__()
13 | self.loss_weight = loss_weight
14 | self.label_weight = label_weight
15 | self.pos_weight = pos_weight
16 |
17 | self.loss_fn = nn.BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
18 |
19 | def forward(self, logits, labels, masks):
20 | # logits: shape (batch, seq, class)
21 | # masks: shape (batch, tolerance, seq, class)
22 |
23 | logits_event = logits[:, :, [1, 2]]
24 |
25 | # Step 1: Find the maximum value of logits in the range of each mask
26 | masked_logits_max, _ = torch.max(logits_event.unsqueeze(1) * masks, dim=2) # (batch, tolerance, class)
27 |
28 | # Step 2: Calculate max(0, logits - x) and take the average over seq
29 | loss = torch.square(
30 | torch.relu(logits_event.unsqueeze(1) - masked_logits_max.unsqueeze(2))
31 | ) # (batch, tolerance, seq, class)
32 | loss = torch.mean(loss)
33 | # 2乗したスケールを元に戻す
34 | loss = torch.sqrt(loss)
35 |
36 | loss = self.loss_weight[0] * self.loss_fn(logits, labels) + self.loss_weight[1] * loss
37 | return loss
38 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update \
6 | && apt-get install --yes software-properties-common \
7 | && add-apt-repository ppa:neovim-ppa/stable \
8 | && apt-get update \
9 | && apt-get install --yes --no-install-recommends \
10 | build-essential \
11 | make \
12 | curl \
13 | cmake \
14 | git \
15 | libssl-dev \
16 | libbz2-dev \
17 | libreadline-dev \
18 | libsqlite3-dev \
19 | llvm \
20 | libncursesw5-dev \
21 | libxml2-dev \
22 | libxmlsec1-dev \
23 | libffi-dev \
24 | liblzma-dev \
25 | libgl1-mesa-dev \
26 | libopencv-dev \
27 | libtesseract-dev \
28 | libleptonica-dev \
29 | tesseract-ocr \
30 | tesseract-ocr-jpn \
31 | tesseract-ocr-script-jpan \
32 | tesseract-ocr-script-jpan-vert \
33 | tk-dev \
34 | xz-utils \
35 | wget \
36 | zip \
37 | zlib1g-dev \
38 | && apt-get clean \
39 | && rm -rf /var/lib/apt/lists/*
40 |
41 | # python
42 | RUN git clone https://github.com/pyenv/pyenv.git ~/.pyenv \
43 | && ~/.pyenv/plugins/python-build/bin/python-build 3.10.11 /usr/local/python3.10.11 \
44 | && rm -r ~/.pyenv
45 | ENV PATH=/usr/local/python3.10.11/bin:$PATH
46 |
47 | # Rye
48 | ENV RYE_HOME="/opt/rye"
49 | ENV PATH="$RYE_HOME/shims:$PATH"
50 | RUN curl -sSf https://rye-up.com/get | RYE_NO_AUTO_INSTALL=1 RYE_INSTALL_OPTION="--yes" bash
51 |
52 | WORKDIR /root/app
53 | COPY README.md pyproject.toml requirements-dev.lock requirements.lock /root/app/
54 |
55 | RUN rye sync --no-lock
56 |
57 | ENTRYPOINT ["rye" , "run"]
58 |
--------------------------------------------------------------------------------
/kami/src/models/feature_extractor/spectrogram.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torchaudio.transforms as T
6 |
7 |
8 | class SpecNormalize(nn.Module):
9 | def __init__(self, eps: float = 1e-8):
10 | super().__init__()
11 | self.eps = eps
12 |
13 | def forward(self, x):
14 | # batch, channel毎に正規化
15 | # x: (batch, channel, freq, time)
16 | min_ = x.min(dim=-1, keepdim=True)[0].min(dim=-2, keepdim=True)[0]
17 | max_ = x.max(dim=-1, keepdim=True)[0].max(dim=-2, keepdim=True)[0]
18 |
19 | return (x - min_) / (max_ - min_ + self.eps)
20 |
21 |
22 | class SpecFeatureExtractor(nn.Module):
23 | def __init__(
24 | self,
25 | in_channels: int,
26 | height: int,
27 | hop_length: int,
28 | win_length: Optional[int] = None,
29 | out_size: Optional[int] = None,
30 | ):
31 | super().__init__()
32 | self.height = height
33 | self.out_chans = in_channels
34 | n_fft = height * 2 - 1
35 | self.feature_extractor = nn.Sequential(
36 | T.Spectrogram(n_fft=n_fft, hop_length=hop_length, win_length=win_length),
37 | T.AmplitudeToDB(top_db=80),
38 | SpecNormalize(),
39 | )
40 | self.out_size = out_size
41 |
42 | if self.out_size is not None:
43 | self.pool = nn.AdaptiveAvgPool2d((None, self.out_size))
44 |
45 | def forward(self, x: torch.Tensor) -> torch.Tensor:
46 | img = self.feature_extractor(x)
47 | if self.out_size is not None:
48 | img = self.pool(img)
49 |
50 | return img
51 |
--------------------------------------------------------------------------------
/kami/run/conf/cv_inference.yaml:
--------------------------------------------------------------------------------
1 | # ---------- Overriding hydra default configs ----------
2 | hydra:
3 | job:
4 | name: cv_inference
5 | chdir: true
6 | run:
7 | dir: ${dir.output_dir}/${hydra.job.name}/${exp_name}/single
8 | sweep:
9 | dir: ${dir.output_dir}/${hydra.job.name}/${exp_name}
10 | subdir: run${hydra.job.num}
11 |
12 | defaults:
13 | - _self_
14 | - dir: local
15 | - model: Spec2DCNN
16 | - feature_extractor: CNNSpectrogram
17 | - decoder: UNet1DDecoder
18 | - features: base
19 | - post_process: score
20 | # phase: train 用
21 | - split@fold_0: stratify_fold_0
22 | - split@fold_1: stratify_fold_1
23 | - split@fold_2: stratify_fold_2
24 | - split@fold_3: stratify_fold_3
25 | - split@fold_4: stratify_fold_4
26 | - loss: bce
27 |
28 | datamodule:
29 | how: random # random, stride, overlap
30 | train_stride: 3600 # 3h=2160, 5h=3600, 8h=5760
31 | overlap: 0
32 | zero_periodicity: false
33 |
34 | debug: false
35 |
36 | num_fold: 5
37 | num_tta: 2
38 |
39 | exp_name: dummy
40 | phase: test
41 | weight:
42 | exp_name: ${exp_name}
43 | run_name: cv
44 |
45 |
46 | how_post_process: 'peaks' # group_by_day
47 |
48 | seed: 42
49 | duration: 5760 # durationは32の倍数
50 | downsample_rate: 2 # durationをdownsample_rateで割った値がnum_framesになる
51 | upsample_rate: 1
52 | batch_size: 32
53 | num_workers: 2
54 | use_amp: true
55 |
56 | # augmentation
57 | augmentation:
58 | mixup_prob: 0.0
59 | mixup_alpha: 0.4
60 | cutmix_prob: 0.0
61 | cutmix_alpha: 0.4
62 |
63 |
64 | # label
65 | labels:
66 | - "awake"
67 | - "event_onset"
68 | - "event_wakeup"
69 | label_weight:
70 | - 1.0
71 | - 1.0
72 | - 1.0
73 | pos_weight:
74 | - 1.0
75 | - 1.0
76 | - 1.0
77 |
78 |
--------------------------------------------------------------------------------
/kami/src/models/loss/tolerance_nonzero.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class ToleranceNonZeroLoss(nn.Module):
6 | def __init__(
7 | self,
8 | loss_weight: torch.Tensor = torch.Tensor([0.5, 0.5]),
9 | label_weight: torch.Tensor = None,
10 | pos_weight: torch.Tensor = None,
11 | ):
12 | super(ToleranceNonZeroLoss, self).__init__()
13 |
14 | self.loss_weight = loss_weight
15 | self.label_weight = label_weight
16 | self.pos_weight = pos_weight
17 |
18 | self.loss_fn = nn.BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
19 |
20 | def forward(self, logits, labels, masks):
21 | # logits: shape (batch, seq, class)
22 | # masks: shape (batch, tolerance, seq, class)
23 |
24 | logits_event = logits[:, :, [1, 2]]
25 |
26 | # Step 1: Find the maximum value of logits in the range of each mask
27 | masked_logits_max, _ = torch.max(logits_event.unsqueeze(1) * masks, dim=2) # (batch, tolerance, class)
28 |
29 | # Step 2: Calculate max(0, logits - x) and take the average over seq
30 | loss = torch.relu(logits_event.unsqueeze(1) - masked_logits_max.unsqueeze(2)) # (batch, tolerance, seq, class)
31 |
32 | # Count the number of positive elements for each sequence
33 | num_positive_elements_seq = torch.sum(loss > 0, dim=2) # (batch, tolerance, class)
34 | # Compute the average loss for each sequence, avoiding division by zero
35 | loss_avg_seq = torch.sum(loss, dim=2) / (
36 | num_positive_elements_seq + (num_positive_elements_seq == 0)
37 | ) # (batch, tolerance, seq)
38 | loss = torch.mean(loss_avg_seq)
39 |
40 | loss = self.loss_weight[0] * self.loss_fn(logits, labels) + self.loss_weight[1] * loss
41 | return loss
42 |
--------------------------------------------------------------------------------
/shimacos/bin/stacking_exp060_030_truncate_small.sh:
--------------------------------------------------------------------------------
1 |
2 | for n_fold in 0 1 2 3 4; do
3 | python -m components.stacking.main_nn \
4 | base.use_transformer_parameter=False \
5 | data.n_fold=$n_fold \
6 | data.is_train=True \
7 | data.normalize=False \
8 | data.use_multi_label=True \
9 | model.model_class=CNNRNNWoDownSample \
10 | model.num_label=2 \
11 | base.loss_class=nn.BCEWithLogitsLoss \
12 | train.warm_start=False \
13 | store.model_name=$(basename $0 .sh) \
14 | feature=stacking_030_truncate_small \
15 | data.dataset_class=StackingDataset \
16 | train.loss_weight=1 \
17 | train.batch_size=32 \
18 | train.epoch=10 \
19 | train.warmup_multiplier=100 \
20 | train.learning_rate=0.000001 \
21 | trainer.precision=16 \
22 | data.chunk_size=120 \
23 | data.window_size=60 \
24 | model.num_layers=1 \
25 | model.hidden_size=96 \
26 | model.text.backbone=microsoft/deberta-v3-small \
27 | model.text.pretrained=False \
28 | data.use_only_positive_chunk=False \
29 | data.downsample_feature=False \
30 | data.is_stacking=True \
31 | trainer.gradient_clip_val=0.5 \
32 | base.opt_class=Adam \
33 | base.distance=8 \
34 | base.height=0.001 \
35 | train.refine_target=False \
36 | train.use_gaussian_target=False \
37 | trainer.num_sanity_val_steps=5 \
38 | model.encoder=mean \
39 | model.decoder=kernel \
40 | model.kernel_sizes=['3']
41 | done
42 |
--------------------------------------------------------------------------------
/shimacos/yamls/feature/stacking_030_truncate_small.yaml:
--------------------------------------------------------------------------------
1 | cat_cols:
2 | - time_idx
3 | - hour
4 | - minute
5 | - minute_mod15
6 | - weekday
7 | label_col: label
8 | label_cols:
9 | - label_onset
10 | - label_wakeup
11 | numerical_cols:
12 | - pred_onset_148_gru_scale_factor
13 | - pred_onset_156_gru_transformer_residual
14 | - pred_onset_163_gru_sleep_target
15 | - pred_onset_179_gru_minute_embedding_sync
16 | - pred_onset_exp068_transformer
17 | - pred_onset_exp078_lstm
18 | - pred_onset_exp081_mixup_short_feat14
19 | - pred_wakeup_148_gru_scale_factor
20 | - pred_wakeup_156_gru_transformer_residual
21 | - pred_wakeup_163_gru_sleep_target
22 | - pred_wakeup_179_gru_minute_embedding_sync
23 | - pred_wakeup_exp068_transformer
24 | - pred_wakeup_exp078_lstm
25 | - pred_wakeup_exp081_mixup_short_feat14
26 | - pred_sleep_exp068_transformer
27 | - pred_sleep_exp078_lstm
28 | - pred_sleep_exp081_mixup_short_feat14
29 | - pred_sleep_163_gru_sleep_target
30 | - pred_onset_148_gru_scale_factor_raw
31 | - pred_onset_156_gru_transformer_residual_raw
32 | - pred_onset_163_gru_sleep_target_raw
33 | - pred_onset_179_gru_minute_embedding_sync_raw
34 | - pred_onset_exp068_transformer_raw
35 | - pred_onset_exp078_lstm_raw
36 | - pred_onset_exp081_mixup_short_feat14_raw
37 | - pred_wakeup_148_gru_scale_factor_raw
38 | - pred_wakeup_156_gru_transformer_residual_raw
39 | - pred_wakeup_163_gru_sleep_target_raw
40 | - pred_wakeup_179_gru_minute_embedding_sync_raw
41 | - pred_wakeup_exp068_transformer_raw
42 | - pred_wakeup_exp078_lstm_raw
43 | - pred_wakeup_exp081_mixup_short_feat14_raw
44 | - prediction_onset
45 | - prediction_onset_min
46 | - prediction_onset_max
47 | - prediction_wakeup
48 | - prediction_wakeup_min
49 | - prediction_wakeup_max
50 | - prediction_onset_var
51 | - prediction_wakeup_var
52 | pred_col: label_pred
53 | pred_cols:
54 | - label_onset_pred
55 | - label_wakeup_pred
56 | version: 030_truncate
57 |
--------------------------------------------------------------------------------
/kami/src/models/feature_extractor/lstm.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 |
7 | class LSTMFeatureExtractor(nn.Module):
8 | def __init__(
9 | self,
10 | in_channels,
11 | hidden_size,
12 | num_layers,
13 | bidirectional,
14 | out_size: Optional[int] = None,
15 | ):
16 | super().__init__()
17 | self.fc = nn.Linear(in_channels, hidden_size)
18 | self.height = hidden_size * (2 if bidirectional else 1)
19 | self.lstm = nn.LSTM(
20 | input_size=hidden_size,
21 | hidden_size=hidden_size,
22 | num_layers=num_layers,
23 | bidirectional=bidirectional,
24 | batch_first=True,
25 | )
26 | self.out_chans = 1
27 | self.out_size = out_size
28 | if self.out_size is not None:
29 | self.pool = nn.AdaptiveAvgPool2d((None, self.out_size))
30 |
31 | def forward(self, x: torch.Tensor) -> torch.Tensor:
32 | """Forward pass
33 |
34 | Args:
35 | x (torch.Tensor): (batch_size, in_channels, time_steps)
36 |
37 | Returns:
38 | torch.Tensor: (batch_size, out_chans, height, time_steps)
39 | """
40 | # x: (batch_size, in_channels, time_steps)
41 | if self.out_size is not None:
42 | x = x.unsqueeze(1) # x: (batch_size, 1, in_channels, time_steps)
43 | x = self.pool(x) # x: (batch_size, 1, in_channels, output_size)
44 | x = x.squeeze(1) # x: (batch_size, in_channels, output_size)
45 | x = x.transpose(1, 2) # x: (batch_size, output_size, in_channels)
46 | x = self.fc(x) # x: (batch_size, output_size, hidden_size)
47 | x, _ = self.lstm(x) # x: (batch_size, output_size, hidden_size * num_directions)
48 | x = x.transpose(1, 2) # x: (batch_size, hidden_size * num_directions, output_size)
49 | x = x.unsqueeze(1) # x: (batch_size, out_chans, hidden_size * num_directions, time_steps)
50 | return x
51 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/io.py:
--------------------------------------------------------------------------------
1 | import polars as pl
2 |
3 |
4 | def load_sakami_data(exp_names: list[str]) -> pl.LazyFrame:
5 | dfs = []
6 | for exp_name in exp_names:
7 | df = pl.read_parquet(f"../sakami/output/{exp_name}/valid_preds.parquet").sort("row_id").drop("row_id")
8 | if "sleep" in df.columns:
9 | df = df.rename(
10 | {
11 | "prediction_wakeup": f"pred_wakeup_{exp_name}",
12 | "prediction_onset": f"pred_onset_{exp_name}",
13 | "sleep": f"pred_sleep_{exp_name}",
14 | }
15 | )
16 | else:
17 | df = df.rename(
18 | {
19 | "prediction_wakeup": f"pred_wakeup_{exp_name}",
20 | "prediction_onset": f"pred_onset_{exp_name}",
21 | }
22 | )
23 | dfs.append(df)
24 | df = pl.concat(dfs, how="horizontal")
25 | return df
26 |
27 |
28 | def load_kami_data(exp_names: list[str]) -> pl.LazyFrame:
29 | df = pl.concat(
30 | [
31 | pl.read_parquet(f"../kami/output/cv_inference/{exp_name}/single/train_pred.parquet").rename(
32 | {
33 | "pred_sleep": f"pred_sleep_{exp_name}",
34 | "pred_wakeup": f"pred_wakeup_{exp_name}",
35 | "pred_onset": f"pred_onset_{exp_name}",
36 | }
37 | )
38 | for exp_name in exp_names
39 | ],
40 | how="horizontal",
41 | )
42 | return df
43 |
44 |
45 | def load_shimacos_data(exp_names: list[str]) -> pl.LazyFrame:
46 | df = pl.concat(
47 | [
48 | pl.concat([pl.read_parquet(f"./output/{exp_name}/fold{i}/result/valid.parquet") for i in range(5)])
49 | .rename(
50 | {
51 | "label_onset_pred": f"pred_onset_{exp_name}",
52 | "label_wakeup_pred": f"pred_wakeup_{exp_name}",
53 | }
54 | )
55 | .drop(["label_onset", "label_wakeup"])
56 | for exp_name in exp_names
57 | ],
58 | how="align",
59 | )
60 | return df
61 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "kaggle-cmi"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | dependencies = [
6 | "numpy>=1.25.2",
7 | "pandas>=2.0.3",
8 | "polars>=0.19",
9 | "jupyter>=1.0.0",
10 | "matplotlib>=3.7.2",
11 | "seaborn>=0.12.2",
12 | "transformers>=4.31.0",
13 | "pillow==9.5.0",
14 | "torch==2.0.1+cu118",
15 | "torchvision==0.15.2+cu118",
16 | "torchaudio==2.0.2+cu118",
17 | "lightgbm>=4.0.0",
18 | "catboost>=1.2",
19 | "xgboost>=1.7.6",
20 | "hydra-core>=1.3.2",
21 | "hydra_colorlog>=1.2.0",
22 | "google-cloud-storage>=2.10.0",
23 | "deepspeed>=0.10.0",
24 | "seqeval>=1.2.2",
25 | "openmim>=0.3.9",
26 | "fairscale>=0.4.13",
27 | "wandb>=0.15.8",
28 | "pytorch-lightning==2.0.6",
29 | "timm==0.9.2",
30 | "pyarrow>=14.0.1",
31 | "matplotlib-venn>=0.11.9",
32 | "scipy>=1.11.3",
33 | "numba>=0.58.1",
34 | "loguru>=0.7.2",
35 | "segmentation_models_pytorch>=0.3.3",
36 | "invoke>=2.2.0",
37 | "pathlib>=1.0.1",
38 | ]
39 | readme = "README.md"
40 | requires-python = ">= 3.10"
41 |
42 | [build-system]
43 | requires = ["hatchling"]
44 | build-backend = "hatchling.build"
45 |
46 | [tool.hatch.build.targets.wheel]
47 | packages = ["*"]
48 |
49 | [tool.rye]
50 | managed = true
51 | dev-dependencies = [
52 | "black>=23.7.0",
53 | "mypy>=1.5.0",
54 | "flake8>=6.1.0",
55 | "ruff>=0.0.284",
56 | "kaggle>=1.5.16",
57 | ]
58 |
59 | [tool.hatch.metadata]
60 | allow-direct-references = true
61 |
62 | [[tool.rye.sources]]
63 | name = "torch"
64 | url = "https://download.pytorch.org/whl/cu118"
65 | type = "index"
66 |
67 | [[tool.rye.sources]]
68 | name = "torchvision"
69 | url = "https://download.pytorch.org/whl/cu118"
70 | type = "index"
71 |
72 | [[tool.rye.sources]]
73 | name = "cuml-cu11"
74 | url = "https://pypi.nvidia.com"
75 | type = "index"
76 |
77 |
78 | [tool.ruff]
79 | target-version = "py311"
80 | line-length = 100
81 | ignore = [
82 | "E402", # Module level import not at top of file
83 | "E501", # Line too long
84 | ]
85 | select = [
86 | "F",
87 | "E",
88 | "W",
89 | "I",
90 | "B",
91 | ]
92 |
--------------------------------------------------------------------------------
/kami/src/augmentation/cutmix.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def get_rand_1dbbox(n_timesteps: int, lam: float) -> tuple[int, int]:
6 | """Get random 1D bounding box.
7 |
8 | Args:
9 | n_timesteps (int): Number of timesteps.
10 | lam (float): Lambda value.
11 |
12 | Returns:
13 | tuple[int, int]: (start, end) of the bounding box.
14 | """
15 | cut_rat = np.sqrt(1.0 - lam)
16 | cut_len = int(n_timesteps * cut_rat)
17 |
18 | start = np.random.randint(0, n_timesteps - cut_len)
19 | end = start + cut_len
20 |
21 | return start, end
22 |
23 |
24 | class Cutmix:
25 | def __init__(self, alpha: float = 0.4):
26 | self.alpha = alpha
27 |
28 | def __call__(
29 | self, imgs: torch.Tensor, labels: torch.Tensor
30 | ) -> tuple[torch.Tensor, torch.Tensor]:
31 | """Cutmix augmentation.
32 |
33 | Args:
34 | imgs (torch.Tensor): (batch_size, n_channels, n_timesteps)
35 | labels (torch.Tensor): (batch_size, n_timesteps, n_classes)
36 |
37 | Returns:
38 | tuple[torch.Tensor]: mixed_imgs (batch_size, n_channels, n_timesteps)
39 | mixed_labels (batch_size, n_timesteps, n_classes)
40 | """
41 | batch_size = imgs.size(0)
42 | idx = torch.randperm(batch_size)
43 |
44 | shuffled_imgs = imgs[idx]
45 | shuffled_labels = labels[idx]
46 |
47 | lam = np.random.beta(self.alpha, self.alpha)
48 | start, end = get_rand_1dbbox(imgs.size(2), lam)
49 |
50 | mixed_imgs = torch.concatenate(
51 | [imgs[:, :, :start], shuffled_imgs[:, :, start:end], imgs[:, :, end:]], dim=2
52 | )
53 | mixed_labels = torch.concatenate(
54 | [labels[:, :start, :], shuffled_labels[:, start:end, :], labels[:, end:, :]], dim=1
55 | )
56 |
57 | return mixed_imgs, mixed_labels
58 |
59 |
60 | if __name__ == "__main__":
61 | imgs = torch.randn(2, 3, 100)
62 | labels = torch.randn(2, 100, 5)
63 | cutmix = Cutmix()
64 |
65 | mixed_imgs, mixed_labels = cutmix(imgs, labels)
66 |
67 | print(mixed_imgs.shape)
68 | print(mixed_labels.shape)
69 |
--------------------------------------------------------------------------------
/kami/src/models/feature_extractor/panns.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Optional
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | from src.models.feature_extractor.cnn import CNNSpectrogram
7 | from src.models.feature_extractor.spectrogram import SpecFeatureExtractor
8 |
9 |
10 | class PANNsFeatureExtractor(nn.Module):
11 | def __init__(
12 | self,
13 | in_channels: int = 3,
14 | base_filters: int | tuple = 128,
15 | kernel_sizes: tuple = (32, 16, 4, 2),
16 | stride: int = 4,
17 | sigmoid: bool = False,
18 | output_size: Optional[int] = None,
19 | conv: Callable = nn.Conv1d,
20 | reinit: bool = True,
21 | win_length: Optional[int] = None,
22 | ):
23 | super().__init__()
24 | self.cnn_feature_extractor = CNNSpectrogram(
25 | in_channels=in_channels,
26 | base_filters=base_filters,
27 | kernel_sizes=kernel_sizes,
28 | stride=stride,
29 | sigmoid=sigmoid,
30 | output_size=output_size,
31 | conv=conv,
32 | reinit=reinit,
33 | )
34 | self.spec_feature_extractor = SpecFeatureExtractor(
35 | in_channels=in_channels,
36 | height=self.cnn_feature_extractor.height,
37 | hop_length=stride,
38 | win_length=win_length,
39 | out_size=output_size,
40 | )
41 | self.height = self.cnn_feature_extractor.height
42 | self.out_chans = self.cnn_feature_extractor.out_chans + in_channels
43 |
44 | def forward(self, x: torch.Tensor) -> torch.Tensor:
45 | """Forward pass of the model.
46 |
47 | Args:
48 | x (torch.Tensor): (batch_size, in_channels, time_steps)
49 |
50 | Returns:
51 | torch.Tensor : (batch_size, out_chans, height, time_steps)
52 | """
53 |
54 | cnn_img = self.cnn_feature_extractor(x) # (batch_size, cnn_chans, height, time_steps)
55 | spec_img = self.spec_feature_extractor(x) # (batch_size, in_channels, height, time_steps)
56 |
57 | img = torch.cat([cnn_img, spec_img], dim=1) # (batch_size, out_chans, height, time_steps)
58 |
59 | return img
60 |
--------------------------------------------------------------------------------
/kami/run/conf/cv_train.yaml:
--------------------------------------------------------------------------------
1 | # ---------- Overriding hydra default configs ----------
2 | hydra:
3 | job:
4 | name: cv_train
5 | chdir: true
6 | run:
7 | dir: ${dir.output_dir}/${hydra.job.name}/${exp_name}/cv
8 | sweep:
9 | dir: ${dir.output_dir}/${hydra.job.name}/${exp_name}
10 | subdir: run${hydra.job.num}
11 |
12 | defaults:
13 | - _self_
14 | - dir: local
15 | - model: Spec2DCNN # Spec2DCNN2day
16 | - feature_extractor: CNNSpectrogram
17 | - decoder: UNet1DDecoder
18 | - split@fold_0: stratify_fold_0
19 | - split@fold_1: stratify_fold_1
20 | - split@fold_2: stratify_fold_2
21 | - split@fold_3: stratify_fold_3
22 | - split@fold_4: stratify_fold_4
23 | - features: base
24 | - post_process: train
25 | - ignore: zero
26 | - loss: bce
27 | - cat_features: base
28 |
29 |
30 | datamodule:
31 | how: random # random, stride, overlap
32 | train_stride: 3600 # 3h=2160, 5h=3600, 8h=5760
33 | overlap: 0
34 | zero_periodicity: false
35 | max_label_smoothing: 1.0
36 |
37 | label_correct:
38 | use: false
39 | save_epoch: 0
40 | pred_threshold: 0.5
41 | pred_rate: 1.0
42 |
43 | num_fold: 5
44 |
45 | seed: 42
46 | exp_name: dummy
47 | duration: 5760 # (1step 5秒なので) duration * 5 = 秒数。 eg) 5760 * 5 = 28800s = 8h
48 | downsample_rate: 2
49 | upsample_rate: 1
50 |
51 | # training
52 | epoch: 50
53 | batch_size: 32
54 | num_workers: 8
55 | accelerator: auto
56 | use_amp: true
57 | debug: false
58 | gradient_clip_val: 1.0
59 | accumulate_grad_batches: 1
60 | monitor: val_loss
61 | monitor_mode: min
62 | check_val_every_n_epoch: 1
63 |
64 | # Dataset
65 | offset: 10
66 | sigma: 10
67 | bg_sampling_rate: 0.5
68 | sigma_decay: # 0.95
69 | sleep_decay: # 0.90
70 |
71 | # augmentation
72 | augmentation:
73 | mixup_prob: 0.0
74 | mixup_alpha: 0.4
75 | cutmix_prob: 0.0
76 | cutmix_alpha: 0.4
77 |
78 | averaged_model:
79 | how: None # ema, avg
80 | ema_decay: 0.9999
81 |
82 |
83 | # label
84 | labels:
85 | - "awake"
86 | - "event_onset"
87 | - "event_wakeup"
88 | label_weight:
89 | - 1.0
90 | - 1.0
91 | - 1.0
92 | pos_weight:
93 | - 1.0
94 | - 1.0
95 | - 1.0
96 |
97 |
98 | # optimizer
99 | optimizer:
100 | lr: 0.0005
101 |
102 | # scheduler
103 | scheduler:
104 | use_warmup: False
105 |
--------------------------------------------------------------------------------
/shimacos/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | output/*
132 | input/*
--------------------------------------------------------------------------------
/kami/run/split_folds.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | import hydra
5 | import polars as pl
6 | import yaml
7 | from omegaconf import DictConfig
8 | from sklearn.model_selection import StratifiedKFold
9 |
10 |
11 | def make_fold(cfg: DictConfig, n_folds: int = 5):
12 | series_ids = [
13 | str(path).split("/")[-1]
14 | for path in (Path(cfg.dir.processed_dir) / "train").glob("*")
15 | ]
16 |
17 | event_df = pl.read_csv(Path(cfg.dir.data_dir) / "train_events.csv").drop_nulls()
18 | q_cut_num = 10 # no_event は含まないので全体では11クラス
19 | event_count_df = event_df.group_by("series_id").count()
20 | event_count_df = event_count_df.select(
21 | pl.col("series_id"),
22 | pl.col("count"),
23 | pl.col("count")
24 | .qcut(q_cut_num, labels=[str(i) for i in range(1, q_cut_num + 1)])
25 | .alias("class")
26 | .cast(pl.Utf8),
27 | )
28 |
29 | no_event = [
30 | si
31 | for si in series_ids
32 | if si not in list(event_count_df.get_column("series_id"))
33 | ]
34 | no_event_df = pl.DataFrame(
35 | {
36 | "series_id": no_event,
37 | "count": [0 for _ in range(len(no_event))],
38 | "class": [str(0) for _ in range(len(no_event))],
39 | }
40 | )
41 | no_event_df = no_event_df.select(
42 | pl.col("series_id"),
43 | pl.col("count").cast(pl.UInt32),
44 | pl.col("class").cast(pl.Utf8),
45 | )
46 |
47 | # 二つを結合
48 | all_df = pl.concat([event_count_df, no_event_df]).sort(by="series_id")
49 |
50 | X = all_df.drop("class")
51 | y = all_df.get_column("class")
52 |
53 | skf = StratifiedKFold(n_splits=n_folds)
54 |
55 | os.makedirs(Path(cfg.dir.input_dir) / "folds", exist_ok=True)
56 | for i, (train_index, test_index) in enumerate(skf.split(X, y)):
57 | fold_dict = {
58 | "train_series_ids": list(all_df.get_column("series_id").take(train_index)),
59 | "valid_series_ids": list(all_df.get_column("series_id").take(test_index)),
60 | }
61 |
62 | with open(Path(cfg.dir.input_dir) / f"folds/stratify_fold_{i}.yaml", "w") as wf:
63 | yaml.dump(fold_dict, wf)
64 |
65 | with open(
66 | Path(cfg.dir.kami_dir) / f"run/conf/split/stratify_fold_{i}.yaml", "w"
67 | ) as wf:
68 | yaml.dump(fold_dict, wf)
69 |
70 |
71 | @hydra.main(config_path="conf", config_name="split_folds", version_base="1.2")
72 | def main(cfg: DictConfig):
73 | make_fold(cfg)
74 |
75 |
76 | if __name__ == "__main__":
77 | main()
78 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/main_nn.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import re
4 | import shutil
5 | from glob import glob
6 |
7 | import components.stacking.runner as runner
8 | import hydra
9 | import numpy as np
10 | import torch
11 | from hydra.utils import instantiate
12 | from omegaconf import DictConfig
13 |
14 |
15 | def set_seed(seed: int):
16 | os.environ["PYTHONHASHSEED"] = str(seed)
17 | random.seed(seed)
18 | np.random.seed(seed)
19 | torch.manual_seed(seed)
20 | torch.cuda.manual_seed(seed)
21 | torch.backends.cudnn.benchmark = True
22 | # torch.backends.cudnn.deterministic = True
23 | # torch.use_deterministic_algorithms(True, warn_only=True)
24 |
25 |
26 | def prepair_dir(config: DictConfig):
27 | """
28 | Logの保存先を作成
29 | """
30 | for path in [
31 | config.store.result_path,
32 | config.store.log_path,
33 | config.store.model_path,
34 | config.store.feature_path,
35 | ]:
36 | if os.path.exists(path) and config.train.warm_start is False and config.data.is_train:
37 | shutil.rmtree(path)
38 | os.makedirs(path, exist_ok=True)
39 |
40 |
41 | def set_up(config: DictConfig):
42 | # Setup
43 | prepair_dir(config)
44 | set_seed(config.train.seed)
45 | os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(config.base.gpu_id)
46 |
47 |
48 | @hydra.main(config_path="../../yamls", config_name="nn.yaml")
49 | def main(config: DictConfig):
50 | os.chdir(config.store.workdir)
51 | set_up(config)
52 | if config.train.warm_start:
53 | checkpoint_path = sorted(
54 | glob(config.store.model_path + "/*epoch*"),
55 | key=lambda path: int(re.split("[=.]", path)[-2]),
56 | )[-1]
57 | print(checkpoint_path)
58 | else:
59 | checkpoint_path = None
60 |
61 | model = getattr(runner, config.runner)(config)
62 |
63 | if config.data.is_train:
64 | trainer = instantiate(config.trainer)
65 | trainer.fit(model)
66 | else:
67 | state_dict = torch.load(checkpoint_path)["state_dict"]
68 | model.load_state_dict(state_dict)
69 | config.trainer.update(
70 | {
71 | "devices": 1,
72 | "logger": None,
73 | "limit_train_batches": 0.0,
74 | "limit_val_batches": 0.0,
75 | "limit_test_batches": 1.0,
76 | "accelerator": None,
77 | }
78 | )
79 | trainer = instantiate(config.trainer)
80 | trainer.test(model, model.test_dataloader())
81 |
82 |
83 | if __name__ == "__main__":
84 | main()
85 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/sync_batchnorm/batchnorm_reimpl.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # File : batchnorm_reimpl.py
4 | # Author : acgtyrant
5 | # Date : 11/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import torch
12 | import torch.nn as nn
13 | import torch.nn.init as init
14 |
15 | __all__ = ["BatchNorm2dReimpl"]
16 |
17 |
18 | class BatchNorm2dReimpl(nn.Module):
19 | """
20 | A re-implementation of batch normalization, used for testing the numerical
21 | stability.
22 |
23 | Author: acgtyrant
24 | See also:
25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
26 | """
27 |
28 | def __init__(self, num_features, eps=1e-5, momentum=0.1):
29 | super().__init__()
30 |
31 | self.num_features = num_features
32 | self.eps = eps
33 | self.momentum = momentum
34 | self.weight = nn.Parameter(torch.empty(num_features))
35 | self.bias = nn.Parameter(torch.empty(num_features))
36 | self.register_buffer("running_mean", torch.zeros(num_features))
37 | self.register_buffer("running_var", torch.ones(num_features))
38 | self.reset_parameters()
39 |
40 | def reset_running_stats(self):
41 | self.running_mean.zero_()
42 | self.running_var.fill_(1)
43 |
44 | def reset_parameters(self):
45 | self.reset_running_stats()
46 | init.uniform_(self.weight)
47 | init.zeros_(self.bias)
48 |
49 | def forward(self, input_):
50 | batchsize, channels, height, width = input_.size()
51 | numel = batchsize * height * width
52 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
53 | sum_ = input_.sum(1)
54 | sum_of_square = input_.pow(2).sum(1)
55 | mean = sum_ / numel
56 | sumvar = sum_of_square - sum_ * mean
57 |
58 | self.running_mean = (
59 | 1 - self.momentum
60 | ) * self.running_mean + self.momentum * mean.detach()
61 | unbias_var = sumvar / (numel - 1)
62 | self.running_var = (
63 | 1 - self.momentum
64 | ) * self.running_var + self.momentum * unbias_var.detach()
65 |
66 | bias_var = sumvar / numel
67 | inv_std = 1 / (bias_var + self.eps).pow(0.5)
68 | output = (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(
69 | 1
70 | ) * self.weight.unsqueeze(1) + self.bias.unsqueeze(1)
71 |
72 | return (
73 | output.view(channels, batchsize, height, width)
74 | .permute(1, 0, 2, 3)
75 | .contiguous()
76 | )
77 |
78 |
--------------------------------------------------------------------------------
/sakami/src/config.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import copy
4 | import sys
5 | from pathlib import Path
6 | from typing import Any
7 |
8 | import yaml
9 |
10 |
11 | class Config(dict):
12 | """A dictionary class that allows for attribute-style access of values."""
13 |
14 | __setattr__ = dict.__setitem__
15 |
16 | def init(self) -> None:
17 | # update meta_config
18 | exec_file_name = sys.argv[0]
19 |
20 | self.home_dir = Path("./")
21 | self.input_dir = Path("../input")
22 | self.data_dir = self.input_dir / self.competition_name
23 | self.cache_dir = self.input_dir / "cache"
24 | self.output_dir = Path("./output")
25 | self.wandb_dir = Path("./wandb")
26 |
27 | self.run_name = Path(exec_file_name).stem
28 | self.save_dir = Path(self.output_dir) / self.run_name
29 | self.checkpoint_dir = self.save_dir / "checkpoint/"
30 |
31 | # make directories
32 | self.input_dir.mkdir(exist_ok=True)
33 | self.data_dir.mkdir(exist_ok=True)
34 | self.cache_dir.mkdir(exist_ok=True)
35 | self.output_dir.mkdir(exist_ok=True)
36 | self.wandb_dir.mkdir(exist_ok=True)
37 | self.save_dir.mkdir(exist_ok=True)
38 | self.checkpoint_dir.mkdir(exist_ok=True)
39 |
40 | def check_columns(self) -> None:
41 | required_columns = [
42 | "competition_name",
43 | ]
44 | for required_column in required_columns:
45 | if not hasattr(self, required_column):
46 | raise KeyError(f"Meta config {required_column} must be specified.")
47 |
48 | def __getattr__(self, key: Any) -> Any:
49 | value = super().get(key)
50 | if isinstance(value, dict):
51 | return Config(value)
52 | return value
53 |
54 | def __deepcopy__(self, memo: dict[int, int | list[int]] | None = None) -> Config:
55 | """Prevent errors in the `copy.deepcopy` method.
56 |
57 | References
58 | ----------
59 | - https://stackoverflow.com/questions/49901590/python-using-copy-deepcopy-on-dotdict
60 | """
61 | return Config(copy.deepcopy(dict(self), memo=memo))
62 |
63 | @classmethod
64 | def load(cls, config_path: str) -> Config:
65 | """Load a config file.
66 |
67 | Parameters
68 | ----------
69 | config_path : str
70 | Path to config file.
71 |
72 | Returns
73 | -------
74 | Config
75 | Configuration parameters.
76 | """
77 | if not Path(config_path).exists():
78 | raise ValueError(f"Configuration file {config_path} does not exist.")
79 |
80 | with open(config_path) as f:
81 | config = cls(yaml.safe_load(f))
82 |
83 | config.check_columns()
84 | config.init()
85 | return config
86 |
--------------------------------------------------------------------------------
/kami/src/utils/common.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | import random
4 | import sys
5 | import time
6 | from contextlib import contextmanager
7 |
8 | import numpy as np
9 | import pandas as pd
10 | import psutil
11 |
12 |
13 | @contextmanager
14 | def trace(title):
15 | t0 = time.time()
16 | p = psutil.Process(os.getpid())
17 | m0 = p.memory_info().rss / 2.0**30
18 | yield
19 | m1 = p.memory_info().rss / 2.0**30
20 | delta = m1 - m0
21 | sign = "+" if delta >= 0 else "-"
22 | delta = math.fabs(delta)
23 | print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr)
24 |
25 |
26 | def pad_if_needed(x: np.ndarray, max_len: int, pad_value: float = 0.0) -> np.ndarray:
27 | if len(x) == max_len:
28 | return x
29 | num_pad = max_len - len(x)
30 | n_dim = len(x.shape)
31 | pad_widths = [(0, num_pad)] + [(0, 0) for _ in range(n_dim - 1)]
32 | return np.pad(x, pad_width=pad_widths, mode="constant", constant_values=pad_value)
33 |
34 |
35 | def nearest_valid_size(input_size: int, downsample_rate: int) -> int:
36 | """
37 | (x // hop_length) % 32 == 0
38 | を満たすinput_sizeに最も近いxを返す
39 | """
40 |
41 | while (input_size // downsample_rate) % 32 != 0:
42 | input_size += 1
43 | assert (input_size // downsample_rate) % 32 == 0
44 |
45 | return input_size
46 |
47 |
48 | def random_crop(pos: int, duration: int, max_end) -> tuple[int, int]:
49 | """Randomly crops with duration length including pos.
50 | However, 0<=start, end<=max_end
51 | """
52 | start = random.randint(max(0, pos - duration), min(pos, max_end - duration))
53 | end = start + duration
54 | return start, end
55 |
56 |
57 | def negative_sampling(this_event_df: pd.DataFrame, num_steps: int) -> int:
58 | """negative sampling
59 |
60 | Args:
61 | this_event_df (pd.DataFrame): event df
62 | num_steps (int): number of steps in this series
63 |
64 | Returns:
65 | int: negative sample position
66 | """
67 | # onsetとwakupを除いた範囲からランダムにサンプリング
68 | positive_positions = set(this_event_df[["onset", "wakeup"]].to_numpy().flatten().tolist())
69 | negative_positions = list(set(range(num_steps)) - positive_positions)
70 | return random.sample(negative_positions, 1)[0]
71 |
72 |
73 | # ref: https://www.kaggle.com/competitions/dfl-bundesliga-data-shootout/discussion/360236#2004730
74 | def gaussian_kernel(length: int, sigma: int = 3) -> np.ndarray:
75 | x = np.ogrid[-length : length + 1]
76 | h = np.exp(-(x**2) / (2 * sigma * sigma)) # type: ignore
77 | h[h < np.finfo(h.dtype).eps * h.max()] = 0
78 | return h
79 |
80 |
81 | def gaussian_label(label: np.ndarray, offset: int, sigma: int) -> np.ndarray:
82 | num_events = label.shape[1]
83 | for i in range(num_events):
84 | label[:, i] = np.convolve(label[:, i], gaussian_kernel(offset, sigma), mode="same")
85 |
86 | return label
87 |
--------------------------------------------------------------------------------
/shimacos/components/factories/sampler_factory.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | from torch.utils.data.distributed import DistributedSampler
4 |
5 |
6 | class WeightedDistributedSampler(DistributedSampler):
7 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
8 | super().__init__(dataset, num_replicas, rank, shuffle)
9 | weights_dict = {0: 1, 1: 1}
10 | weights = (
11 | dataset.sample_df["pe_present_on_image"]
12 | .apply(lambda x: weights_dict[x])
13 | .values
14 | )
15 | self.weights = torch.as_tensor(weights, dtype=torch.double)
16 |
17 | def __iter__(self):
18 | indices = list(range(len(self.dataset)))
19 | # add extra samples to make it evenly divisible
20 | indices += indices[: (self.total_size - len(indices))]
21 | assert len(indices) == self.total_size
22 |
23 | # subsample
24 | indices = indices[self.rank : self.total_size : self.num_replicas]
25 | weights = self.weights[indices]
26 | assert len(indices) == self.num_samples
27 | return (
28 | indices[i]
29 | for i in torch.multinomial(weights, self.num_samples, replacement=True)
30 | )
31 |
32 | def __len__(self):
33 | return self.num_samples
34 |
35 | def set_epoch(self, epoch):
36 | self.epoch = epoch
37 |
38 |
39 | class BalancedDistributedSampler(DistributedSampler):
40 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
41 | self.dataset = dataset
42 | self.num_replicas = num_replicas
43 | self.rank = rank
44 | self.epoch = 0
45 | self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
46 | self.total_size = self.num_samples * self.num_replicas
47 | self.shuffle = shuffle
48 |
49 | def __iter__(self):
50 | # deterministically shuffle based on epoch
51 | g = torch.Generator()
52 | g.manual_seed(self.epoch)
53 | if self.shuffle:
54 | indices = torch.randperm(len(self.dataset), generator=g).tolist()
55 | else:
56 | indices = list(range(len(self.dataset)))
57 |
58 | # add extra samples to make it evenly divisible
59 | indices += indices[: (self.total_size - len(indices))]
60 | assert len(indices) == self.total_size
61 |
62 | # subsample
63 | indices = indices[self.rank : self.total_size : self.num_replicas]
64 | assert len(indices) == self.num_samples
65 |
66 | return iter(indices)
67 |
68 | def __len__(self):
69 | return self.num_samples
70 |
71 | def set_epoch(self, epoch):
72 | self.epoch = epoch
73 |
74 |
75 | def get_weighted_sampler(dataset):
76 | sampler = WeightedDistributedSampler(dataset=dataset)
77 | return sampler
78 |
79 |
80 | def get_sampler(sampler_class, **params):
81 | print("sampler class:", sampler_class)
82 | f = globals().get(sampler_class)
83 | return f(**params)
84 |
--------------------------------------------------------------------------------
/shimacos/components/factories/callback_factory.py:
--------------------------------------------------------------------------------
1 | import os
2 | from glob import glob
3 |
4 | from google.cloud import storage
5 |
6 | from pytorch_lightning.callbacks import ModelCheckpoint, ProgressBar
7 |
8 |
9 | class MyCallback(ModelCheckpoint):
10 | def __init__(
11 | self,
12 | store_config,
13 | monitor="val_loss",
14 | verbose=0,
15 | save_top_k=1,
16 | save_weights_only=False,
17 | mode="auto",
18 | period=1,
19 | ):
20 | super(MyCallback, self).__init__(
21 | store_config.model_path,
22 | monitor,
23 | verbose,
24 | save_top_k,
25 | save_weights_only,
26 | mode,
27 | period,
28 | store_config.model_class,
29 | )
30 | self.store_config = store_config
31 |
32 | def _save_model(self, filepath):
33 | dirpath = os.path.dirname(filepath)
34 | # make paths
35 | os.makedirs(dirpath, exist_ok=True)
36 |
37 | # delegate the saving to the model
38 | self.save_function(filepath)
39 | if self.store_config.gcs_project is not None:
40 | self.upload_directory()
41 |
42 | def upload_directory(self):
43 | storage_client = storage.Client(self.store_config.gcs_project)
44 | bucket = storage_client.get_bucket(self.store_config.bucket_name)
45 | filenames = glob(
46 | os.path.join(self.store_config.save_path, "**"), recursive=True
47 | )
48 | for filename in filenames:
49 | if os.path.isdir(filename):
50 | continue
51 | destination_blob_name = os.path.join(
52 | self.store_config.gcs_path,
53 | filename.split(self.store_config.save_path)[-1][1:],
54 | )
55 | blob = bucket.blob(destination_blob_name)
56 | blob.upload_from_filename(filename)
57 |
58 |
59 | class MyProgressBar(ProgressBar):
60 | def format_num(self, n):
61 | f = "{0:2.3g}".format(n)
62 | f = f.replace("+0", "+")
63 | f = f.replace("-0", "-")
64 | n = str(n)
65 | return f if len(f) < len(n) else n
66 |
67 | def on_batch_end(self, trainer, pl_module):
68 | # super().on_batch_end(trainer, pl_module)
69 | if self.is_enabled and self.train_batch_idx % self.refresh_rate == 0:
70 | self.main_progress_bar.update(self.refresh_rate)
71 | for key, val in trainer.progress_bar_dict.items():
72 | if not isinstance(val, str):
73 | trainer.progress_bar_dict[key] = self.format_num(val)
74 | self.main_progress_bar.set_postfix(trainer.progress_bar_dict)
75 |
76 | def on_validation_end(self, trainer, pl_module):
77 | super().on_validation_end(trainer, pl_module)
78 | for key, val in trainer.progress_bar_dict.items():
79 | if not isinstance(val, str):
80 | trainer.progress_bar_dict[key] = self.format_num(val)
81 | self.main_progress_bar.set_postfix(trainer.progress_bar_dict)
82 | self.val_progress_bar.close()
83 |
--------------------------------------------------------------------------------
/kami/src/models/feature_extractor/cnn.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Optional
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 |
7 | # ref: https://github.com/analokmaus/kaggle-g2net-public/tree/main/models1d_pytorch
8 | class CNNSpectrogram(nn.Module):
9 | def __init__(
10 | self,
11 | in_channels: int = 3,
12 | base_filters: int | tuple = 128,
13 | kernel_sizes: tuple = (32, 16, 4, 2),
14 | stride: int = 4,
15 | sigmoid: bool = False,
16 | output_size: Optional[int] = None,
17 | conv: Callable = nn.Conv1d,
18 | reinit: bool = True,
19 | ):
20 | super().__init__()
21 | self.out_chans = len(kernel_sizes)
22 | self.out_size = output_size
23 | self.sigmoid = sigmoid
24 | if isinstance(base_filters, int):
25 | base_filters = tuple([base_filters])
26 | self.height = base_filters[-1]
27 | self.spec_conv = nn.ModuleList()
28 | for i in range(self.out_chans):
29 | tmp_block = [
30 | conv(
31 | in_channels,
32 | base_filters[0],
33 | kernel_size=kernel_sizes[i],
34 | stride=stride,
35 | padding=(kernel_sizes[i] - 1) // 2,
36 | )
37 | ]
38 | if len(base_filters) > 1:
39 | for j in range(len(base_filters) - 1):
40 | tmp_block = tmp_block + [
41 | nn.BatchNorm1d(base_filters[j]),
42 | nn.ReLU(inplace=True),
43 | conv(
44 | base_filters[j],
45 | base_filters[j + 1],
46 | kernel_size=kernel_sizes[i],
47 | stride=stride,
48 | padding=(kernel_sizes[i] - 1) // 2,
49 | ),
50 | ]
51 | self.spec_conv.append(nn.Sequential(*tmp_block))
52 | else:
53 | self.spec_conv.append(tmp_block[0])
54 |
55 | if self.out_size is not None:
56 | self.pool = nn.AdaptiveAvgPool2d((None, self.out_size))
57 |
58 | if reinit:
59 | for m in self.modules():
60 | if isinstance(m, nn.Conv1d):
61 | nn.init.kaiming_normal_(m.weight)
62 | elif isinstance(m, nn.BatchNorm1d):
63 | nn.init.constant_(m.weight, 1)
64 | nn.init.constant_(m.bias, 0)
65 |
66 | def forward(self, x: torch.Tensor) -> torch.Tensor:
67 | """Forward pass of the model.
68 |
69 | Args:
70 | x (_type_): (batch_size, in_channels, time_steps)
71 |
72 | Returns:
73 | _type_: (batch_size, out_chans, height, time_steps)
74 | """
75 | # x: (batch_size, in_channels, time_steps)
76 | out: list[torch.Tensor] = []
77 | for i in range(self.out_chans):
78 | out.append(self.spec_conv[i](x))
79 | img = torch.stack(out, dim=1) # (batch_size, out_chans, height, time_steps)
80 | if self.out_size is not None:
81 | img = self.pool(img) # (batch_size, out_chans, height, out_size)
82 | if self.sigmoid:
83 | img = img.sigmoid()
84 | return img
85 |
--------------------------------------------------------------------------------
/shimacos/yamls/stacking.yaml:
--------------------------------------------------------------------------------
1 | defaults:
2 | - feature: "stacking_001"
3 | # - override hydra/job_logging: colorlog
4 | # - override hydra/hydra_logging: colorlog
5 |
6 | workdir: ${store.workdir}
7 | seed: 777
8 |
9 | feature:
10 | sakami_model_names: ["148_gru_scale_factor", "156_gru_transformer_residual"]
11 | kami_model_names: ["exp068_transformer", "exp078_lstm"]
12 | label_col: label
13 | pred_col: ${feature.label_col}_pred
14 |
15 | lgbm:
16 | n_fold: 5
17 | numerical_cols: ${feature.numerical_cols}
18 | cat_cols: ${feature.cat_cols}
19 | label_col: ${feature.label_col}
20 | pred_col: ${feature.pred_col}
21 | early_stopping_rounds: 200
22 | verbose_eval: 100
23 | negative_sampling: False
24 | batch_fit: False
25 | batch_iter: 500
26 | params:
27 | lambda_l1: 0.1
28 | lambda_l2: 0.1
29 | num_leaves: 32
30 | label_gain:
31 | lambdarank_truncation_level:
32 | feature_fraction: 0.8
33 | bagging_fraction: 0.8
34 | bagging_freq: 1
35 | min_child_samples: 10
36 | task: train
37 | boosting_type: gbdt
38 | objective: binary
39 | metric: binary_logloss
40 | max_depth: 8
41 | learning_rate: 0.01
42 | num_thread: -1
43 | max_bin: 256
44 | verbose: -1
45 | device: cpu
46 | scale_pos_weight: 1
47 | seed: ${seed}
48 | num_class: 1
49 | is_unbalance: False
50 |
51 | xgb:
52 | feature_cols: [""] #関数内で書き換える
53 | cat_cols:
54 | label_col: "label"
55 | early_stopping_rounds: 200
56 | verbose_eval: 100
57 | params:
58 | alpha: 0.1
59 | reg_lambda: 0.1
60 | max_leaves: 16
61 | colsample_bytree: 1.0
62 | subsample: 0.8
63 | min_child_weight: 10
64 | booster: gbtree
65 | objective: binary:logistic
66 | eval_metric: auc
67 | max_depth: 6
68 | learning_rate: 0.01
69 | nthread: -1
70 | max_bin: 256
71 | tree_method: gpu_hist
72 | scale_pos_weight: 1
73 | seed: ${seed}
74 |
75 | catboost:
76 | n_fold: 5
77 | numerical_cols: ${feature.numerical_cols}
78 | cat_cols: ${feature.cat_cols}
79 | label_col: ${feature.label_col}
80 | pred_col: ${feature.pred_col}
81 | early_stopping_rounds: 200
82 | verbose_eval: 100
83 | categorical_features_indices:
84 | negative_sampling: False
85 | params:
86 | task_type: GPU
87 | iterations: 100000
88 | loss_function: Logloss
89 | eval_metric: Logloss
90 | custom_metric: Logloss
91 | bootstrap_type: Bernoulli
92 | subsample: 0.8
93 | max_depth: 8
94 | max_ctr_complexity: 4
95 | learning_rate: 0.05
96 | max_bin: 254
97 | verbose: 100
98 | devices: "0"
99 | use_best_model: True
100 | od_type: Iter
101 | od_wait: 100
102 | random_seed: ${seed}
103 | gpu_ram_part: 0.95
104 | allow_writing_files: False
105 |
106 | store:
107 | workdir: ${oc.env:PWD}
108 | model_name: stacking_${feature.version}
109 | root_path: ${store.workdir}/output/${store.model_name}
110 | save_path: ${store.workdir}/output/${store.model_name}
111 | model_path: ${store.workdir}/output/${store.model_name}/model
112 | log_path: ${store.workdir}/output/${store.model_name}/logs
113 | result_path: ${store.workdir}/output/${store.model_name}/result
114 | gcs_path: shimacos/${store.model_name}
115 | save_feature: False
116 | wandb_project:
117 | gcs_project:
118 | bucket_name:
119 |
120 | hydra:
121 | run:
122 | dir: ${store.save_path}/logs
123 | sweep:
124 | dir: ${store.save_path}
125 | subdir: log
126 | job:
127 | chdir: False
128 |
--------------------------------------------------------------------------------
/kami/src/models/spec1D.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import torch
5 | import torch.nn as nn
6 |
7 | from src.augmentation.cutmix import Cutmix
8 | from src.augmentation.mixup import Mixup
9 |
10 | from src.models.loss.tolerance import ToleranceLoss
11 | from src.models.loss.tolerance_mse import ToleranceMSELoss
12 | from src.models.loss.bce import BCEWithLogitsLoss
13 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
14 |
15 |
16 | class Spec1D(nn.Module):
17 | def __init__(
18 | self,
19 | cfg: DictConfig,
20 | feature_extractor: nn.Module,
21 | decoder: nn.Module,
22 | mixup_alpha: float = 0.5,
23 | cutmix_alpha: float = 0.5,
24 | ):
25 | super().__init__()
26 | self.feature_extractor = feature_extractor
27 | self.decoder = decoder
28 | self.channels_fc = nn.Linear(feature_extractor.out_chans, 1)
29 | self.mixup = Mixup(mixup_alpha)
30 | self.cutmix = Cutmix(cutmix_alpha)
31 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
32 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
33 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
34 |
35 | if cfg.loss.name == "tolerance":
36 | self.loss_fn = ToleranceLoss(
37 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
38 | )
39 | elif cfg.loss.name == "tolerance_mse":
40 | self.loss_fn = ToleranceMSELoss(
41 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
42 | )
43 | elif cfg.loss.name == "tolerance_nonzero":
44 | self.loss_fn = ToleranceNonZeroLoss(
45 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
46 | )
47 | else:
48 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
49 |
50 | def forward(
51 | self,
52 | x: torch.Tensor,
53 | labels: Optional[torch.Tensor] = None,
54 | masks: Optional[torch.Tensor] = None,
55 | do_mixup: bool = False,
56 | do_cutmix: bool = False,
57 | ) -> dict[str, torch.Tensor]:
58 | """Forward pass of the model.
59 |
60 | Args:
61 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
62 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
63 | Returns:
64 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
65 | """
66 | x = self.feature_extractor(x) # (batch_size, n_channels, height, n_timesteps)
67 |
68 | if do_mixup and labels is not None:
69 | x, labels = self.mixup(x, labels)
70 | if do_cutmix and labels is not None:
71 | x, labels = self.cutmix(x, labels)
72 |
73 | # pool over n_channels dimension
74 | x = x.transpose(1, 3) # (batch_size, n_timesteps, height, n_channels)
75 | x = self.channels_fc(x) # (batch_size, n_timesteps, height, 1)
76 | x = x.squeeze(-1).transpose(1, 2) # (batch_size, height, n_timesteps)
77 | logits = self.decoder(x) # (batch_size, n_classes, n_timesteps)
78 |
79 | output = {"logits": logits}
80 | if labels is not None:
81 | loss = self.loss_fn(logits, labels, masks)
82 | output["loss"] = loss
83 |
84 | return output
85 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/sync_batchnorm/replicate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : replicate.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import functools
12 |
13 | from torch.nn.parallel.data_parallel import DataParallel
14 |
15 | __all__ = [
16 | 'CallbackContext',
17 | 'execute_replication_callbacks',
18 | 'DataParallelWithCallback',
19 | 'patch_replication_callback'
20 | ]
21 |
22 |
23 | class CallbackContext(object):
24 | pass
25 |
26 |
27 | def execute_replication_callbacks(modules):
28 | """
29 | Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
30 |
31 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
32 |
33 | Note that, as all modules are isomorphism, we assign each sub-module with a context
34 | (shared among multiple copies of this module on different devices).
35 | Through this context, different copies can share some information.
36 |
37 | We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
38 | of any slave copies.
39 | """
40 | master_copy = modules[0]
41 | nr_modules = len(list(master_copy.modules()))
42 | ctxs = [CallbackContext() for _ in range(nr_modules)]
43 |
44 | for i, module in enumerate(modules):
45 | for j, m in enumerate(module.modules()):
46 | if hasattr(m, '__data_parallel_replicate__'):
47 | m.__data_parallel_replicate__(ctxs[j], i)
48 |
49 |
50 | class DataParallelWithCallback(DataParallel):
51 | """
52 | Data Parallel with a replication callback.
53 |
54 | An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
55 | original `replicate` function.
56 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
57 |
58 | Examples:
59 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
60 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
61 | # sync_bn.__data_parallel_replicate__ will be invoked.
62 | """
63 |
64 | def replicate(self, module, device_ids):
65 | modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
66 | execute_replication_callbacks(modules)
67 | return modules
68 |
69 |
70 | def patch_replication_callback(data_parallel):
71 | """
72 | Monkey-patch an existing `DataParallel` object. Add the replication callback.
73 | Useful when you have customized `DataParallel` implementation.
74 |
75 | Examples:
76 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
77 | > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
78 | > patch_replication_callback(sync_bn)
79 | # this is equivalent to
80 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
81 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
82 | """
83 |
84 | assert isinstance(data_parallel, DataParallel)
85 |
86 | old_replicate = data_parallel.replicate
87 |
88 | @functools.wraps(old_replicate)
89 | def new_replicate(module, device_ids):
90 | modules = old_replicate(module, device_ids)
91 | execute_replication_callbacks(modules)
92 | return modules
93 |
94 | data_parallel.replicate = new_replicate
95 |
--------------------------------------------------------------------------------
/sakami/src/utils.py:
--------------------------------------------------------------------------------
1 | import functools
2 | import gc
3 | import os
4 | import random
5 | import time
6 | from contextlib import ContextDecorator
7 | from typing import Any, Callable, Type
8 |
9 | import numpy as np
10 | import torch
11 | from loguru import logger
12 |
13 |
14 | def set_seed(seed: int = 0) -> None:
15 | random.seed(seed)
16 | os.environ["PYTHONHASHSEED"] = str(seed)
17 | np.random.seed(seed)
18 | torch.manual_seed(seed)
19 | torch.cuda.manual_seed(seed)
20 | torch.backends.cudnn.deterministic = True
21 | torch.backends.cudnn.benchmark = False
22 |
23 |
24 | def freeze(cls) -> Type:
25 | """Decorator function for fixing class variables.
26 |
27 | Examples
28 | --------
29 |
30 | >>> @freeze
31 | >>> class config:
32 | >>> x = 10
33 | >>> y = 20
34 |
35 | >>> config.x = 30
36 | ValueError: Cannot overwrite config.
37 | """
38 |
39 | class _Const(type):
40 | """Metaclass of the configuration class.
41 |
42 | Examples
43 | --------
44 |
45 | >>> class config(metaclass=_Const):
46 | >>> x = 10
47 | >>> y = 20
48 |
49 | >>> config.x = 30
50 | ValueError: Cannot overwrite config.
51 |
52 | References
53 | ----------
54 | - https://cream-worker.blog.jp/archives/1077207909.html
55 | """
56 |
57 | def __setattr__(self, name, value):
58 | raise ValueError("Cannot overwrite config.")
59 |
60 | class frozen_cls(cls, metaclass=_Const):
61 | pass
62 |
63 | return frozen_cls
64 |
65 |
66 | class timer(ContextDecorator):
67 | """Context-manager that logs elapsed time of a process.
68 | Also functions as a decorator. (Make sure to instantiate with parenthesis.)
69 |
70 | Paramters
71 | ---------
72 | message : str
73 | The displayed message.
74 |
75 | Examples
76 | --------
77 | - Usage as a context-manager
78 |
79 | >>> with timer('read csv'):
80 | >>> train_df = pd.read_csv(TRAIN_PATH)
81 | [read csv] start.
82 | [read csv] done in 0.1 min.
83 |
84 | - Usage as a decorator
85 |
86 | >>> @timer()
87 | >>> def read_csv():
88 | >>> train_df = pd.read_csv(TRAIN_PATH)
89 | >>> return train_df
90 | >>>
91 | >>> train_df = read_csv()
92 | [read_csv] start.
93 | [read_csv] done in 0.1 min.
94 | """
95 |
96 | def __init__(self, message: str | None = None) -> None:
97 | self.message = message
98 |
99 | def __call__(self, function: Callable) -> Callable:
100 | if self.message is None:
101 | self.message = function.__name__
102 | return super().__call__(function)
103 |
104 | def __enter__(self) -> None:
105 | self.start_time = time.time()
106 | logger.opt(colors=True).info(f"[{self.message}] start.")
107 |
108 | def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
109 | if exc_type is None:
110 | elapsed_time = time.time() - self.start_time
111 | logger.opt(colors=True).info(f"[{self.message}] done in {elapsed_time / 60:.1f} min.")
112 |
113 |
114 | def clear_memory(function: Callable) -> Callable:
115 | """Decorator function for clearing memory cache."""
116 |
117 | @functools.wraps(function)
118 | def _clear_memory(*args, **kwargs):
119 | function(*args, **kwargs)
120 | gc.collect()
121 | torch.cuda.empty_cache()
122 |
123 | return _clear_memory
124 |
--------------------------------------------------------------------------------
/shimacos/components/factories/scheduler_factory.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler
2 | from torch.optim.lr_scheduler import ReduceLROnPlateau
3 |
4 |
5 | class GradualWarmupScheduler(_LRScheduler):
6 | """Gradually warm-up(increasing) learning rate in optimizer.
7 | Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
8 | Args:
9 | optimizer (Optimizer): Wrapped optimizer.
10 | multiplier: target learning rate = base lr * multiplier
11 | total_epoch: target learning rate is reached at total_epoch, gradually
12 | after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
13 | """
14 |
15 | def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
16 | self.multiplier = multiplier
17 | if self.multiplier <= 1.0:
18 | raise ValueError("multiplier should be greater than 1.")
19 | self.total_epoch = total_epoch
20 | self.after_scheduler = after_scheduler
21 | self.finished = False
22 | super().__init__(optimizer)
23 |
24 | def get_lr(self):
25 | if self.last_epoch > self.total_epoch:
26 | if self.after_scheduler:
27 | if not self.finished:
28 | self.after_scheduler.base_lrs = [
29 | base_lr * self.multiplier for base_lr in self.base_lrs
30 | ]
31 | self.finished = True
32 | return self.after_scheduler.get_lr()
33 | return [base_lr * self.multiplier for base_lr in self.base_lrs]
34 |
35 | return [
36 | base_lr
37 | * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0)
38 | for base_lr in self.base_lrs
39 | ]
40 |
41 | def step_ReduceLROnPlateau(self, metrics, epoch=None):
42 | if epoch is None:
43 | epoch = self.last_epoch + 1
44 | self.last_epoch = (
45 | epoch if epoch != 0 else 1
46 | ) # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
47 | if self.last_epoch <= self.total_epoch:
48 | warmup_lr = [
49 | base_lr
50 | * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0)
51 | for base_lr in self.base_lrs
52 | ]
53 | for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
54 | param_group["lr"] = lr
55 | else:
56 | if epoch is None:
57 | self.after_scheduler.step(metrics, None)
58 | else:
59 | self.after_scheduler.step(metrics, epoch - self.total_epoch)
60 |
61 | def step(self, epoch=None, metrics=None):
62 | if type(self.after_scheduler) != ReduceLROnPlateau:
63 | if self.finished and self.after_scheduler:
64 | if epoch is None:
65 | self.after_scheduler.step(None)
66 | else:
67 | self.after_scheduler.step(epoch - self.total_epoch)
68 | else:
69 | return super(GradualWarmupScheduler, self).step(epoch)
70 | else:
71 | self.step_ReduceLROnPlateau(metrics, epoch)
72 |
73 |
74 | def get_warmup_scheduler(optimizer, multiplier, total_epoch, after_scheduler):
75 | scheduler = GradualWarmupScheduler(
76 | optimizer, multiplier, total_epoch, after_scheduler
77 | )
78 | return scheduler
79 |
80 |
81 | def get_plateau_scheduler(optimizer):
82 | scheduler = ReduceLROnPlateau(optimizer, mode="max", factor=0.5, patience=2)
83 | return scheduler
84 |
85 |
86 | def get_scheduler(scheduler_class, **params):
87 | print("scheduler class:", scheduler_class)
88 | f = globals().get(scheduler_class)
89 | return f(**params)
90 |
--------------------------------------------------------------------------------
/kami/src/utils/score.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import polars as pl
3 | from tqdm.auto import tqdm
4 |
5 | from src.utils.metrics import event_detection_ap
6 | from src.utils.post_process import post_process_for_seg, post_process_for_seg_group_by_day
7 |
8 |
9 | def score_group_by_day(val_event_df: pl.DataFrame, keys: list[str], preds: np.ndarray, val_df: pl.DataFrame) -> float:
10 | """
11 | 日毎に最大値のeventを検出し、それをsubmissionとしてスコアリングする
12 |
13 | Args:
14 | val_event_df (pl.DataFrame): Ground truth
15 | keys (list[str]): 予測したchunkのkey({series_id}_{chunk_id})
16 | preds (np.ndarray): (chunk_num, duration, 2)
17 | val_df (pl.DataFrame): sequence
18 | """
19 | submission_df = post_process_for_seg_group_by_day(keys, preds, val_df)
20 | score = event_detection_ap(
21 | val_event_df.to_pandas(),
22 | submission_df.to_pandas(),
23 | )
24 | return score
25 |
26 |
27 | def score_ternary_search_distance(
28 | val_event_df: pl.DataFrame, keys: list[str], preds: np.ndarray, score_th: float = 0.005
29 | ) -> [float, float]:
30 | """
31 | post_process_for_seg のパラメータdistanceを ternary searchで探索する
32 | """
33 | l = 5
34 | r = 100
35 |
36 | cnt = 0
37 | best_score = 0.0
38 | best_distance = 0
39 |
40 | for cnt in tqdm(range(30)):
41 | if r - l < 1:
42 | break
43 | m1 = int(l + (r - l) / 3)
44 | m2 = int(r - (r - l) / 3)
45 | score1 = event_detection_ap(
46 | val_event_df.to_pandas(),
47 | post_process_for_seg(
48 | keys=keys,
49 | preds=preds,
50 | score_th=score_th,
51 | distance=m1,
52 | ).to_pandas(),
53 | )
54 | score2 = event_detection_ap(
55 | val_event_df.to_pandas(),
56 | post_process_for_seg(
57 | keys=keys,
58 | preds=preds,
59 | score_th=score_th,
60 | distance=m2,
61 | ).to_pandas(),
62 | )
63 |
64 | if score1 >= score2:
65 | r = m2
66 | best_score = score1
67 | best_distance = m1
68 |
69 | else:
70 | l = m1
71 | best_score = score2
72 | best_distance = m2
73 |
74 | tqdm.write(f"score1(m1): {score1:.5f}({m1:.5f}), score2(m2): {score2:.5f}({m2:.5f}), l: {l:.5f}, r: {r:.5f}")
75 |
76 | if abs(m2 - m1) <= 2:
77 | break
78 |
79 | return best_score, best_distance
80 |
81 |
82 | def score_ternary_search_th(
83 | val_event_df: pl.DataFrame, keys: list[str], preds: np.ndarray, distance: int = 5000
84 | ) -> [float, float]:
85 | """
86 | post_process_for_seg のパラメータ score_th を ternary searchで探索する
87 | """
88 | l = 0.0
89 | r = 1.0
90 |
91 | cnt = 0
92 | best_score = 0.0
93 | best_th = 0.0
94 |
95 | for cnt in tqdm(range(30)):
96 | if r - l < 0.01:
97 | break
98 | m1 = l + (r - l) / 3
99 | m2 = r - (r - l) / 3
100 | score1 = event_detection_ap(
101 | val_event_df.to_pandas(),
102 | post_process_for_seg(
103 | keys=keys,
104 | preds=preds,
105 | score_th=m1,
106 | distance=distance,
107 | ).to_pandas(),
108 | )
109 | score2 = event_detection_ap(
110 | val_event_df.to_pandas(),
111 | post_process_for_seg(
112 | keys=keys,
113 | preds=preds,
114 | score_th=m2,
115 | distance=distance,
116 | ).to_pandas(),
117 | )
118 | if score1 >= score2:
119 | r = m2
120 | best_score = score1
121 | best_th = m1
122 | else:
123 | l = m1
124 | best_score = score2
125 | best_th = m2
126 |
127 | tqdm.write(f"score1(m1): {score1:.5f}({m1:.5f}), score2(m2): {score2:.5f}({m2:.5f}), l: {l:.5f}, r: {r:.5f}")
128 |
129 | return best_score, best_th
130 |
--------------------------------------------------------------------------------
/kami/.gitignore:
--------------------------------------------------------------------------------
1 | /input
2 | /output
3 | /processed
4 | /wandb
5 | /.keras
6 | /.config
7 | /.py3
8 | /.nv
9 | /.local
10 | /.ipython
11 | /.virtual_documents
12 | /.jupyter
13 | /.cupy
14 | .netrc
15 | /notebook/wandb
16 | /data
17 | /db
18 | .bash_history
19 | *.parquet
20 | # Byte-compiled / optimized / DLL files
21 | __pycache__/
22 | *.py[cod]
23 | *$py.class
24 |
25 | # C extensions
26 | *.so
27 |
28 | # Distribution / packaging
29 | .Python
30 | build/
31 | develop-eggs/
32 | dist/
33 | downloads/
34 | eggs/
35 | .eggs/
36 | lib/
37 | lib64/
38 | parts/
39 | sdist/
40 | var/
41 | wheels/
42 | share/python-wheels/
43 | *.egg-info/
44 | .installed.cfg
45 | *.egg
46 | MANIFEST
47 |
48 | # PyInstaller
49 | # Usually these files are written by a python script from a template
50 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
51 | *.manifest
52 | *.spec
53 |
54 | # Installer logs
55 | pip-log.txt
56 | pip-delete-this-directory.txt
57 |
58 | # Unit test / coverage reports
59 | htmlcov/
60 | .tox/
61 | .nox/
62 | .coverage
63 | .coverage.*
64 | .cache
65 | nosetests.xml
66 | coverage.xml
67 | *.cover
68 | *.py,cover
69 | .hypothesis/
70 | .pytest_cache/
71 | cover/
72 |
73 | # Translations
74 | *.mo
75 | *.pot
76 |
77 | # Django stuff:
78 | *.log
79 | local_settings.py
80 | db.sqlite3
81 | db.sqlite3-journal
82 |
83 | # Flask stuff:
84 | instance/
85 | .webassets-cache
86 |
87 | # Scrapy stuff:
88 | .scrapy
89 |
90 | # Sphinx documentation
91 | docs/_build/
92 |
93 | # PyBuilder
94 | .pybuilder/
95 | target/
96 |
97 | # Jupyter Notebook
98 | .ipynb_checkpoints
99 |
100 | # IPython
101 | profile_default/
102 | ipython_config.py
103 |
104 | # pyenv
105 | # For a library or package, you might want to ignore these files since the code is
106 | # intended to run in multiple environments; otherwise, check them in:
107 | # .python-version
108 |
109 | # pipenv
110 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
111 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
112 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
113 | # install all needed dependencies.
114 | #Pipfile.lock
115 |
116 | # poetry
117 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
118 | # This is especially recommended for binary packages to ensure reproducibility, and is more
119 | # commonly ignored for libraries.
120 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
121 | #poetry.lock
122 |
123 | # pdm
124 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
125 | #pdm.lock
126 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
127 | # in version control.
128 | # https://pdm.fming.dev/#use-with-ide
129 | .pdm.toml
130 |
131 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
132 | __pypackages__/
133 |
134 | # Celery stuff
135 | celerybeat-schedule
136 | celerybeat.pid
137 |
138 | # SageMath parsed files
139 | *.sage.py
140 |
141 | # Environments
142 | .env
143 | .venv
144 | env/
145 | venv/
146 | ENV/
147 | env.bak/
148 | venv.bak/
149 |
150 | # Spyder project settings
151 | .spyderproject
152 | .spyproject
153 |
154 | # Rope project settings
155 | .ropeproject
156 |
157 | # mkdocs documentation
158 | /site
159 |
160 | # mypy
161 | .mypy_cache/
162 | .dmypy.json
163 | dmypy.json
164 |
165 | # Pyre type checker
166 | .pyre/
167 |
168 | # pytype static type analyzer
169 | .pytype/
170 |
171 | # Cython debug symbols
172 | cython_debug/
173 |
174 | # PyCharm
175 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
176 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
177 | # and can be added to the global gitignore or merged into this file. For a more nuclear
178 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
179 | #.idea/
180 |
--------------------------------------------------------------------------------
/shimacos/yamls/feature/stacking_009.yaml:
--------------------------------------------------------------------------------
1 | cat_cols:
2 | - time_idx
3 | - hour
4 | - minute
5 | - minute_mod15
6 | - weekday
7 | label_col: label
8 | numerical_cols:
9 | - pred_onset_148_gru_scale_factor
10 | - pred_onset_156_gru_transformer_residual
11 | - pred_onset_163_gru_sleep_target
12 | - pred_onset_exp068_transformer
13 | - pred_onset_exp078_lstm
14 | - pred_wakeup_148_gru_scale_factor
15 | - pred_wakeup_156_gru_transformer_residual
16 | - pred_wakeup_163_gru_sleep_target
17 | - pred_wakeup_exp068_transformer
18 | - pred_wakeup_exp078_lstm
19 | - pred_sleep_exp068_transformer
20 | - pred_sleep_exp078_lstm
21 | - pred_sleep_163_gru_sleep_target
22 | - anglez
23 | - enmo
24 | - prediction_onset
25 | - prediction_onset_min
26 | - prediction_onset_max
27 | - prediction_wakeup
28 | - prediction_wakeup_min
29 | - prediction_wakeup_max
30 | - periodicity
31 | - activity_count
32 | - lid
33 | - enmo_3_rolling_mean
34 | - enmo_3_rolling_std
35 | - enmo_3_rolling_max
36 | - anglez_3_rolling_mean
37 | - anglez_3_rolling_std
38 | - anglez_3_rolling_max
39 | - lid_3_rolling_mean
40 | - lid_3_rolling_std
41 | - lid_3_rolling_max
42 | - enmo_diff_prev3_rolling_mean
43 | - anglez_diff_prev3_rolling_mean
44 | - lid_diff_prev3_rolling_mean
45 | - enmo_diff_lead3_rolling_mean
46 | - anglez_diff_lead3_rolling_mean
47 | - lid_diff_lead3_rolling_mean
48 | - enmo_diff_lag
49 | - anglez_diff_lag
50 | - lid_diff_lag
51 | - pred_onset_148_gru_scale_factor_diff_prev3_rolling_mean
52 | - pred_onset_156_gru_transformer_residual_diff_prev3_rolling_mean
53 | - pred_onset_163_gru_sleep_target_diff_prev3_rolling_mean
54 | - pred_onset_exp068_transformer_diff_prev3_rolling_mean
55 | - pred_onset_exp078_lstm_diff_prev3_rolling_mean
56 | - pred_wakeup_148_gru_scale_factor_diff_prev3_rolling_mean
57 | - pred_wakeup_156_gru_transformer_residual_diff_prev3_rolling_mean
58 | - pred_wakeup_163_gru_sleep_target_diff_prev3_rolling_mean
59 | - pred_wakeup_exp068_transformer_diff_prev3_rolling_mean
60 | - pred_wakeup_exp078_lstm_diff_prev3_rolling_mean
61 | - pred_onset_148_gru_scale_factor_diff_prev5_rolling_mean
62 | - pred_onset_156_gru_transformer_residual_diff_prev5_rolling_mean
63 | - pred_onset_163_gru_sleep_target_diff_prev5_rolling_mean
64 | - pred_onset_exp068_transformer_diff_prev5_rolling_mean
65 | - pred_onset_exp078_lstm_diff_prev5_rolling_mean
66 | - pred_wakeup_148_gru_scale_factor_diff_prev5_rolling_mean
67 | - pred_wakeup_156_gru_transformer_residual_diff_prev5_rolling_mean
68 | - pred_wakeup_163_gru_sleep_target_diff_prev5_rolling_mean
69 | - pred_wakeup_exp068_transformer_diff_prev5_rolling_mean
70 | - pred_wakeup_exp078_lstm_diff_prev5_rolling_mean
71 | - pred_onset_148_gru_scale_factor_diff_lead3_rolling_mean
72 | - pred_onset_156_gru_transformer_residual_diff_lead3_rolling_mean
73 | - pred_onset_163_gru_sleep_target_diff_lead3_rolling_mean
74 | - pred_onset_exp068_transformer_diff_lead3_rolling_mean
75 | - pred_onset_exp078_lstm_diff_lead3_rolling_mean
76 | - pred_wakeup_148_gru_scale_factor_diff_lead3_rolling_mean
77 | - pred_wakeup_156_gru_transformer_residual_diff_lead3_rolling_mean
78 | - pred_wakeup_163_gru_sleep_target_diff_lead3_rolling_mean
79 | - pred_wakeup_exp068_transformer_diff_lead3_rolling_mean
80 | - pred_wakeup_exp078_lstm_diff_lead3_rolling_mean
81 | - pred_onset_148_gru_scale_factor_diff_lead5_rolling_mean
82 | - pred_onset_156_gru_transformer_residual_diff_lead5_rolling_mean
83 | - pred_onset_163_gru_sleep_target_diff_lead5_rolling_mean
84 | - pred_onset_exp068_transformer_diff_lead5_rolling_mean
85 | - pred_onset_exp078_lstm_diff_lead5_rolling_mean
86 | - pred_wakeup_148_gru_scale_factor_diff_lead5_rolling_mean
87 | - pred_wakeup_156_gru_transformer_residual_diff_lead5_rolling_mean
88 | - pred_wakeup_163_gru_sleep_target_diff_lead5_rolling_mean
89 | - pred_wakeup_exp068_transformer_diff_lead5_rolling_mean
90 | - pred_wakeup_exp078_lstm_diff_lead5_rolling_mean
91 | - pred_onset_148_gru_scale_factor_diff_lag
92 | - pred_onset_156_gru_transformer_residual_diff_lag
93 | - pred_onset_163_gru_sleep_target_diff_lag
94 | - pred_onset_exp068_transformer_diff_lag
95 | - pred_onset_exp078_lstm_diff_lag
96 | - pred_wakeup_148_gru_scale_factor_diff_lag
97 | - pred_wakeup_156_gru_transformer_residual_diff_lag
98 | - pred_wakeup_163_gru_sleep_target_diff_lag
99 | - pred_wakeup_exp068_transformer_diff_lag
100 | - pred_wakeup_exp078_lstm_diff_lag
101 | pred_col: label_pred
102 | version: 009
103 |
--------------------------------------------------------------------------------
/shimacos/components/factories/optimizer_factory.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | from torch.optim.optimizer import Optimizer, required
4 | from torch.optim import Adam, SGD, AdamW
5 |
6 |
7 | class RAdam(Optimizer):
8 | def __init__(
9 | self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4
10 | ):
11 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
12 | self.buffer = [[None, None, None] for ind in range(10)]
13 | super(RAdam, self).__init__(params, defaults)
14 |
15 | def __setstate__(self, state):
16 | super(RAdam, self).__setstate__(state)
17 |
18 | def step(self, closure=None):
19 |
20 | loss = None
21 | if closure is not None:
22 | loss = closure()
23 |
24 | for group in self.param_groups:
25 |
26 | for p in group["params"]:
27 | if p.grad is None:
28 | continue
29 | grad = p.grad.data.float()
30 | if grad.is_sparse:
31 | raise RuntimeError("RAdam does not support sparse gradients")
32 |
33 | p_data_fp32 = p.data.float()
34 |
35 | state = self.state[p]
36 |
37 | if len(state) == 0:
38 | state["step"] = 0
39 | state["exp_avg"] = torch.zeros_like(p_data_fp32)
40 | state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
41 | else:
42 | state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
43 | state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
44 |
45 | exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
46 | beta1, beta2 = group["betas"]
47 |
48 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
49 | exp_avg.mul_(beta1).add_(1 - beta1, grad)
50 |
51 | state["step"] += 1
52 | buffered = self.buffer[int(state["step"] % 10)]
53 | if state["step"] == buffered[0]:
54 | N_sma, step_size = buffered[1], buffered[2]
55 | else:
56 | buffered[0] = state["step"]
57 | beta2_t = beta2 ** state["step"]
58 | N_sma_max = 2 / (1 - beta2) - 1
59 | N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
60 | buffered[1] = N_sma
61 |
62 | # more conservative since it's an approximated value
63 | if N_sma >= 5:
64 | step_size = math.sqrt(
65 | (1 - beta2_t)
66 | * (N_sma - 4)
67 | / (N_sma_max - 4)
68 | * (N_sma - 2)
69 | / N_sma
70 | * N_sma_max
71 | / (N_sma_max - 2)
72 | ) / (1 - beta1 ** state["step"])
73 | else:
74 | step_size = 1.0 / (1 - beta1 ** state["step"])
75 | buffered[2] = step_size
76 |
77 | if group["weight_decay"] != 0:
78 | p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
79 |
80 | # more conservative since it's an approximated value
81 | if N_sma >= 5:
82 | denom = exp_avg_sq.sqrt().add_(group["eps"])
83 | p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
84 | else:
85 | p_data_fp32.add_(-step_size * group["lr"], exp_avg)
86 |
87 | p.data.copy_(p_data_fp32)
88 |
89 | return loss
90 |
91 |
92 | def get_adam(params, lr):
93 | optimizer = Adam(params=params, lr=lr)
94 | return optimizer
95 |
96 |
97 | def get_adamw(params, lr):
98 | optimizer = AdamW(params=params, lr=lr)
99 | return optimizer
100 |
101 |
102 | def get_sgd(params, lr):
103 | optimizer = SGD(params=params, lr=lr, momentum=0.9, weight_decay=1e-4)
104 | return optimizer
105 |
106 |
107 | def get_radam(params, lr):
108 | optimizer = RAdam(params=params, lr=lr)
109 | return optimizer
110 |
111 |
112 | def get_optimizer(opt_class, **params):
113 | print("optimizer class:", opt_class)
114 | f = globals().get(opt_class)
115 | return f(**params)
116 |
--------------------------------------------------------------------------------
/kami/src/models/spec2Dcnn.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNN(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | encoder_weights: Optional[str] = None,
28 | mixup_alpha: float = 0.5,
29 | cutmix_alpha: float = 0.5,
30 | ):
31 | super().__init__()
32 | self.cfg = cfg
33 | self.feature_extractor = feature_extractor
34 | self.encoder = smp.Unet(
35 | encoder_name=encoder_name,
36 | encoder_weights=encoder_weights,
37 | in_channels=in_channels,
38 | classes=1,
39 | )
40 | self.decoder = decoder
41 | self.mixup = Mixup(mixup_alpha)
42 | self.cutmix = Cutmix(cutmix_alpha)
43 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
44 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
45 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
46 | self.loss_fn = None
47 | self.update_loss_fn()
48 |
49 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
50 | self.label_weight[0] = self.label_weight[0] * sleep_decay
51 | if self.cfg.loss.name == "tolerance":
52 | self.loss_fn = ToleranceLoss(
53 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
54 | )
55 | elif self.cfg.loss.name == "tolerance_mse":
56 | self.loss_fn = ToleranceMSELoss(
57 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
58 | )
59 | elif self.cfg.loss.name == "tolerance_nonzero":
60 | self.loss_fn = ToleranceNonZeroLoss(
61 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
62 | )
63 | elif self.cfg.loss.name == "focal":
64 | self.loss_fn = FocalLoss(
65 | alpha=self.cfg.loss.alpha,
66 | gamma=self.cfg.loss.gamma,
67 | )
68 | elif self.cfg.loss.name == "focal_bce":
69 | self.loss_fn = FocalBCELoss(
70 | alpha=self.cfg.loss.alpha,
71 | gamma=self.cfg.loss.gamma,
72 | weight=torch.tensor(self.cfg.loss.weight),
73 | )
74 | else:
75 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
76 |
77 | self.loss_fn = self.loss_fn.cuda()
78 |
79 | def forward(
80 | self,
81 | x: torch.Tensor,
82 | labels: Optional[torch.Tensor] = None,
83 | masks: Optional[torch.Tensor] = None,
84 | do_mixup: bool = False,
85 | do_cutmix: bool = False,
86 | ) -> dict[str, torch.Tensor]:
87 | """Forward pass of the model.
88 |
89 | Args:
90 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
91 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
92 | Returns:
93 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
94 | """
95 | x = self.feature_extractor(x) # (batch_size, n_channels, height, n_timesteps)
96 |
97 | if do_mixup and labels is not None:
98 | x, labels = self.mixup(x, labels)
99 | if do_cutmix and labels is not None:
100 | x, labels = self.cutmix(x, labels)
101 |
102 | x = self.encoder(x).squeeze(1) # (batch_size, height, n_timesteps)
103 | logits = self.decoder(x) # (batch_size, n_timesteps, n_classes)
104 |
105 | output = {"logits": logits}
106 | if labels is not None:
107 | loss = self.loss_fn(logits, labels, masks)
108 | output["loss"] = loss
109 |
110 | return output
111 |
--------------------------------------------------------------------------------
/kami/src/models/spec2DcnnOverlap.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNNOverlap(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | encoder_weights: Optional[str] = None,
28 | mixup_alpha: float = 0.5,
29 | cutmix_alpha: float = 0.5,
30 | ):
31 | super().__init__()
32 | self.cfg = cfg
33 | self.overlap = cfg.datamodule.overlap
34 | self.feature_extractor = feature_extractor
35 | self.encoder = smp.Unet(
36 | encoder_name=encoder_name,
37 | encoder_weights=encoder_weights,
38 | in_channels=in_channels,
39 | classes=1,
40 | )
41 | self.decoder = decoder
42 | self.mixup = Mixup(mixup_alpha)
43 | self.cutmix = Cutmix(cutmix_alpha)
44 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
45 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
46 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
47 | self.loss_fn = None
48 | self.update_loss_fn()
49 |
50 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
51 | self.label_weight[0] = self.label_weight[0] * sleep_decay
52 | if self.cfg.loss.name == "tolerance":
53 | self.loss_fn = ToleranceLoss(
54 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
55 | )
56 | elif self.cfg.loss.name == "tolerance_mse":
57 | self.loss_fn = ToleranceMSELoss(
58 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
59 | )
60 | elif self.cfg.loss.name == "tolerance_nonzero":
61 | self.loss_fn = ToleranceNonZeroLoss(
62 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
63 | )
64 | elif self.cfg.loss.name == "focal":
65 | self.loss_fn = FocalLoss(
66 | alpha=self.cfg.loss.alpha,
67 | gamma=self.cfg.loss.gamma,
68 | )
69 | elif self.cfg.loss.name == "focal_bce":
70 | self.loss_fn = FocalBCELoss(
71 | alpha=self.cfg.loss.alpha,
72 | gamma=self.cfg.loss.gamma,
73 | weight=torch.tensor(self.cfg.loss.weight),
74 | )
75 | else:
76 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
77 |
78 | self.loss_fn = self.loss_fn.cuda()
79 |
80 | def forward(
81 | self,
82 | x: torch.Tensor,
83 | labels: Optional[torch.Tensor] = None,
84 | masks: Optional[torch.Tensor] = None,
85 | do_mixup: bool = False,
86 | do_cutmix: bool = False,
87 | ) -> dict[str, torch.Tensor]:
88 | """Forward pass of the model.
89 |
90 | Args:
91 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
92 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
93 | Returns:
94 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
95 | """
96 | x = self.feature_extractor(x) # (batch_size, n_channels, height, n_timesteps)
97 |
98 | if do_mixup and labels is not None:
99 | x, labels = self.mixup(x, labels)
100 | if do_cutmix and labels is not None:
101 | x, labels = self.cutmix(x, labels)
102 |
103 | x = self.encoder(x).squeeze(1) # (batch_size, height, n_timesteps)
104 | logits = self.decoder(x) # (batch_size, n_timesteps, n_classes)
105 |
106 | output = {"logits": logits}
107 | if labels is not None:
108 | l = self.overlap if self.overlap > 0 else None
109 | r = -self.overlap if self.overlap > 0 else None
110 | loss = self.loss_fn(logits[:, l:r, :], labels[:, l:r, :], masks[:, l:r, :])
111 | output["loss"] = loss
112 |
113 | return output
114 |
--------------------------------------------------------------------------------
/kami/src/models/spec2Dcnn2DayV2.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNN2DayV2(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | encoder_weights: Optional[str] = None,
28 | mixup_alpha: float = 0.5,
29 | cutmix_alpha: float = 0.5,
30 | ):
31 | super().__init__()
32 | self.cfg = cfg
33 | self.feature_extractor = feature_extractor
34 | self.encoder = smp.Unet(
35 | encoder_name=encoder_name,
36 | encoder_weights=encoder_weights,
37 | in_channels=in_channels,
38 | classes=1,
39 | )
40 | self.decoder = decoder
41 | self.mixup = Mixup(mixup_alpha)
42 | self.cutmix = Cutmix(cutmix_alpha)
43 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
44 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
45 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
46 | self.loss_fn = None
47 | self.update_loss_fn()
48 |
49 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
50 | self.label_weight[0] = self.label_weight[0] * sleep_decay
51 | if self.cfg.loss.name == "tolerance":
52 | self.loss_fn = ToleranceLoss(
53 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
54 | )
55 | elif self.cfg.loss.name == "tolerance_mse":
56 | self.loss_fn = ToleranceMSELoss(
57 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
58 | )
59 | elif self.cfg.loss.name == "tolerance_nonzero":
60 | self.loss_fn = ToleranceNonZeroLoss(
61 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
62 | )
63 | elif self.cfg.loss.name == "focal":
64 | self.loss_fn = FocalLoss(
65 | alpha=self.cfg.loss.alpha,
66 | gamma=self.cfg.loss.gamma,
67 | )
68 | elif self.cfg.loss.name == "focal_bce":
69 | self.loss_fn = FocalBCELoss(
70 | alpha=self.cfg.loss.alpha,
71 | gamma=self.cfg.loss.gamma,
72 | weight=torch.tensor(self.cfg.loss.weight),
73 | )
74 | else:
75 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
76 |
77 | self.loss_fn = self.loss_fn.cuda()
78 |
79 | def forward(
80 | self,
81 | x: torch.Tensor,
82 | labels: Optional[torch.Tensor] = None,
83 | masks: Optional[torch.Tensor] = None,
84 | do_mixup: bool = False,
85 | do_cutmix: bool = False,
86 | ) -> dict[str, torch.Tensor]:
87 | """Forward pass of the model.
88 | Args:
89 | x (torch.Tensor): (batch_size, n_channels, duration)
90 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
91 | Returns:
92 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
93 | """
94 | x1, x2 = torch.split(x, x.shape[2] // 2, dim=2)
95 |
96 | # 2日分のデータを分割して並行に入力
97 | x1 = self.feature_extractor(x1) # (batch_size, n_channels, height, n_timesteps//2)
98 | x2 = self.feature_extractor(x2) # (batch_size, n_channels, height, n_timesteps//2)
99 |
100 | x1 = self.encoder(x1).squeeze(1) # (batch_size, height, n_timesteps//2)
101 | x2 = self.encoder(x2).squeeze(1) # (batch_size, height, n_timesteps//2)
102 |
103 | # 残差を結合してデコーダーに入力
104 | x = torch.cat([x1, x1 - x2], dim=1) # (batch_size, n_classes, n_timesteps//2)
105 | logits1 = self.decoder(x) # (batch_size, n_timesteps//2, n_classes)
106 |
107 | x = torch.cat([x2, x2 - x1], dim=1)
108 | logits2 = self.decoder(x)
109 |
110 | logits = torch.cat([logits1, logits2], dim=1) # (batch_size, n_timesteps, n_classes)
111 | output = {"logits": logits}
112 | if labels is not None:
113 | loss = self.loss_fn(logits, labels, masks)
114 | output["loss"] = loss
115 |
116 | return output
117 |
--------------------------------------------------------------------------------
/shimacos/components/stacking/sync_batchnorm/comm.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : comm.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import queue
12 | import collections
13 | import threading
14 |
15 | __all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
16 |
17 |
18 | class FutureResult(object):
19 | """A thread-safe future implementation. Used only as one-to-one pipe."""
20 |
21 | def __init__(self):
22 | self._result = None
23 | self._lock = threading.Lock()
24 | self._cond = threading.Condition(self._lock)
25 |
26 | def put(self, result):
27 | with self._lock:
28 | assert self._result is None, 'Previous result has\'t been fetched.'
29 | self._result = result
30 | self._cond.notify()
31 |
32 | def get(self):
33 | with self._lock:
34 | if self._result is None:
35 | self._cond.wait()
36 |
37 | res = self._result
38 | self._result = None
39 | return res
40 |
41 |
42 | _MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
43 | _SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
44 |
45 |
46 | class SlavePipe(_SlavePipeBase):
47 | """Pipe for master-slave communication."""
48 |
49 | def run_slave(self, msg):
50 | self.queue.put((self.identifier, msg))
51 | ret = self.result.get()
52 | self.queue.put(True)
53 | return ret
54 |
55 |
56 | class SyncMaster(object):
57 | """An abstract `SyncMaster` object.
58 |
59 | - During the replication, as the data parallel will trigger an callback of each module, all slave devices should
60 | call `register(id)` and obtain an `SlavePipe` to communicate with the master.
61 | - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
62 | and passed to a registered callback.
63 | - After receiving the messages, the master device should gather the information and determine to message passed
64 | back to each slave devices.
65 | """
66 |
67 | def __init__(self, master_callback):
68 | """
69 |
70 | Args:
71 | master_callback: a callback to be invoked after having collected messages from slave devices.
72 | """
73 | self._master_callback = master_callback
74 | self._queue = queue.Queue()
75 | self._registry = collections.OrderedDict()
76 | self._activated = False
77 |
78 | def __getstate__(self):
79 | return {'master_callback': self._master_callback}
80 |
81 | def __setstate__(self, state):
82 | self.__init__(state['master_callback'])
83 |
84 | def register_slave(self, identifier):
85 | """
86 | Register an slave device.
87 |
88 | Args:
89 | identifier: an identifier, usually is the device id.
90 |
91 | Returns: a `SlavePipe` object which can be used to communicate with the master device.
92 |
93 | """
94 | if self._activated:
95 | assert self._queue.empty(), 'Queue is not clean before next initialization.'
96 | self._activated = False
97 | self._registry.clear()
98 | future = FutureResult()
99 | self._registry[identifier] = _MasterRegistry(future)
100 | return SlavePipe(identifier, self._queue, future)
101 |
102 | def run_master(self, master_msg):
103 | """
104 | Main entry for the master device in each forward pass.
105 | The messages were first collected from each devices (including the master device), and then
106 | an callback will be invoked to compute the message to be sent back to each devices
107 | (including the master device).
108 |
109 | Args:
110 | master_msg: the message that the master want to send to itself. This will be placed as the first
111 | message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
112 |
113 | Returns: the message to be sent back to the master device.
114 |
115 | """
116 | self._activated = True
117 |
118 | intermediates = [(0, master_msg)]
119 | for i in range(self.nr_slaves):
120 | intermediates.append(self._queue.get())
121 |
122 | results = self._master_callback(intermediates)
123 | assert results[0][0] == 0, 'The first result should belongs to the master.'
124 |
125 | for i, res in results:
126 | if i == 0:
127 | continue
128 | self._registry[i].result.put(res)
129 |
130 | for i in range(self.nr_slaves):
131 | assert self._queue.get() is True
132 |
133 | return results[0][1]
134 |
135 | @property
136 | def nr_slaves(self):
137 | return len(self._registry)
138 |
--------------------------------------------------------------------------------
/kami/src/models/spec2DcnnAffine.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNNAffine(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | height: int,
28 | encoder_weights: Optional[str] = None,
29 | mixup_alpha: float = 0.5,
30 | cutmix_alpha: float = 0.5,
31 | ):
32 | super().__init__()
33 | self.cfg = cfg
34 | self.feature_extractor = feature_extractor
35 | aux_params = dict(
36 | pooling="avg", # one of 'avg', 'max'
37 | dropout=0.1, # dropout ratio, default is None
38 | activation=None, # activation function, default is None
39 | classes=2 * height, # define number of output labels
40 | )
41 |
42 | self.encoder = smp.Unet(
43 | encoder_name=encoder_name,
44 | encoder_weights=encoder_weights,
45 | in_channels=in_channels,
46 | classes=1,
47 | aux_params=aux_params,
48 | )
49 | self.decoder = decoder
50 | self.mixup = Mixup(mixup_alpha)
51 | self.cutmix = Cutmix(cutmix_alpha)
52 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
53 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
54 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
55 | self.loss_fn = None
56 | self.update_loss_fn()
57 |
58 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
59 | self.label_weight[0] = self.label_weight[0] * sleep_decay
60 | if self.cfg.loss.name == "tolerance":
61 | self.loss_fn = ToleranceLoss(
62 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
63 | )
64 | elif self.cfg.loss.name == "tolerance_mse":
65 | self.loss_fn = ToleranceMSELoss(
66 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
67 | )
68 | elif self.cfg.loss.name == "tolerance_nonzero":
69 | self.loss_fn = ToleranceNonZeroLoss(
70 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
71 | )
72 | elif self.cfg.loss.name == "focal":
73 | self.loss_fn = FocalLoss(
74 | alpha=self.cfg.loss.alpha,
75 | gamma=self.cfg.loss.gamma,
76 | )
77 | elif self.cfg.loss.name == "focal_bce":
78 | self.loss_fn = FocalBCELoss(
79 | alpha=self.cfg.loss.alpha,
80 | gamma=self.cfg.loss.gamma,
81 | weight=torch.tensor(self.cfg.loss.weight),
82 | )
83 | else:
84 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
85 |
86 | self.loss_fn = self.loss_fn.cuda()
87 |
88 | def forward(
89 | self,
90 | x: torch.Tensor,
91 | labels: Optional[torch.Tensor] = None,
92 | masks: Optional[torch.Tensor] = None,
93 | do_mixup: bool = False,
94 | do_cutmix: bool = False,
95 | ) -> dict[str, torch.Tensor]:
96 | """Forward pass of the model.
97 |
98 | Args:
99 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
100 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
101 | Returns:
102 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
103 | """
104 | x = self.feature_extractor(x) # (batch_size, n_channels, height, n_timesteps)
105 |
106 | if do_mixup and labels is not None:
107 | x, labels = self.mixup(x, labels)
108 | if do_cutmix and labels is not None:
109 | x, labels = self.cutmix(x, labels)
110 |
111 | x, mid = self.encoder(x) # (batch_size, 1, height, n_timesteps), (batch_size, 2 * height)
112 | a, b = torch.split(mid, mid.shape[1] // 2, dim=1)
113 | x = x.squeeze(1) # (batch_size, height, n_timesteps)
114 | x = a.unsqueeze(1) * x + b.unsqueeze(1) # (batch_size, height, n_timesteps)
115 | logits = self.decoder(x) # (batch_size, n_classes, n_timesteps)
116 |
117 | output = {"logits": logits}
118 | if labels is not None:
119 | loss = self.loss_fn(logits, labels, masks)
120 | output["loss"] = loss
121 |
122 | return output
123 |
--------------------------------------------------------------------------------
/requirements.lock:
--------------------------------------------------------------------------------
1 | # generated by rye
2 | # use `rye lock` or `rye sync` to update this lockfile
3 | #
4 | # last locked with the following flags:
5 | # pre: false
6 | # features: []
7 | # all-features: false
8 |
9 | -e file:.
10 | aiohttp==3.8.5
11 | aiosignal==1.3.1
12 | aliyun-python-sdk-core==2.13.36
13 | aliyun-python-sdk-kms==2.16.1
14 | antlr4-python3-runtime==4.9.3
15 | anyio==3.7.1
16 | appdirs==1.4.4
17 | argon2-cffi==23.1.0
18 | argon2-cffi-bindings==21.2.0
19 | arrow==1.2.3
20 | asttokens==2.2.1
21 | async-lru==2.0.4
22 | async-timeout==4.0.3
23 | attrs==23.1.0
24 | babel==2.12.1
25 | backcall==0.2.0
26 | beautifulsoup4==4.12.2
27 | bleach==6.0.0
28 | cachetools==5.3.1
29 | catboost==1.2
30 | certifi==2023.7.22
31 | cffi==1.15.1
32 | charset-normalizer==3.2.0
33 | click==8.1.6
34 | cmake==3.27.2
35 | colorama==0.4.6
36 | colorlog==6.7.0
37 | comm==0.1.4
38 | contourpy==1.1.0
39 | crcmod==1.7
40 | cryptography==41.0.3
41 | cycler==0.11.0
42 | debugpy==1.6.7.post1
43 | decorator==5.1.1
44 | deepspeed==0.10.0
45 | defusedxml==0.7.1
46 | docker-pycreds==0.4.0
47 | efficientnet-pytorch==0.7.1
48 | exceptiongroup==1.1.3
49 | executing==1.2.0
50 | fairscale==0.4.13
51 | fastjsonschema==2.18.0
52 | filelock==3.12.2
53 | fonttools==4.42.0
54 | fqdn==1.5.1
55 | frozenlist==1.4.0
56 | fsspec==2023.6.0
57 | gitdb==4.0.10
58 | gitpython==3.1.32
59 | google-api-core==2.11.1
60 | google-auth==2.22.0
61 | google-cloud-core==2.3.3
62 | google-cloud-storage==2.10.0
63 | google-crc32c==1.5.0
64 | google-resumable-media==2.5.0
65 | googleapis-common-protos==1.60.0
66 | graphviz==0.20.1
67 | hjson==3.1.0
68 | huggingface-hub==0.16.4
69 | hydra-colorlog==1.2.0
70 | hydra-core==1.3.2
71 | idna==3.4
72 | invoke==2.2.0
73 | ipykernel==6.25.1
74 | ipython==8.14.0
75 | ipython-genutils==0.2.0
76 | ipywidgets==8.1.0
77 | isoduration==20.11.0
78 | jedi==0.19.0
79 | jinja2==3.1.2
80 | jmespath==0.10.0
81 | joblib==1.3.2
82 | json5==0.9.14
83 | jsonpointer==2.4
84 | jsonschema==4.19.0
85 | jsonschema-specifications==2023.7.1
86 | jupyter==1.0.0
87 | jupyter-client==8.3.0
88 | jupyter-console==6.6.3
89 | jupyter-core==5.3.1
90 | jupyter-events==0.7.0
91 | jupyter-lsp==2.2.0
92 | jupyter-server==2.7.1
93 | jupyter-server-terminals==0.4.4
94 | jupyterlab==4.0.5
95 | jupyterlab-pygments==0.2.2
96 | jupyterlab-server==2.24.0
97 | jupyterlab-widgets==3.0.8
98 | kiwisolver==1.4.4
99 | lightgbm==4.0.0
100 | lightning-utilities==0.9.0
101 | lit==16.0.6
102 | llvmlite==0.41.1
103 | loguru==0.7.2
104 | markdown==3.4.4
105 | markdown-it-py==3.0.0
106 | markupsafe==2.1.3
107 | matplotlib==3.7.2
108 | matplotlib-inline==0.1.6
109 | matplotlib-venn==0.11.9
110 | mdurl==0.1.2
111 | mistune==3.0.1
112 | model-index==0.1.11
113 | mpmath==1.3.0
114 | multidict==6.0.4
115 | munch==4.0.0
116 | nbclient==0.8.0
117 | nbconvert==7.7.3
118 | nbformat==5.9.2
119 | nest-asyncio==1.5.7
120 | networkx==3.1
121 | ninja==1.11.1
122 | notebook==7.0.2
123 | notebook-shim==0.2.3
124 | numba==0.58.1
125 | numpy==1.25.2
126 | omegaconf==2.3.0
127 | opendatalab==0.0.10
128 | openmim==0.3.9
129 | openxlab==0.0.17
130 | ordered-set==4.1.0
131 | oss2==2.17.0
132 | overrides==7.4.0
133 | packaging==23.1
134 | pandas==2.0.3
135 | pandocfilters==1.5.0
136 | parso==0.8.3
137 | pathlib==1.0.1
138 | pathtools==0.1.2
139 | pexpect==4.8.0
140 | pickleshare==0.7.5
141 | pillow==9.5.0
142 | platformdirs==3.10.0
143 | plotly==5.16.0
144 | polars==0.19.13
145 | pretrainedmodels==0.7.4
146 | prometheus-client==0.17.1
147 | prompt-toolkit==3.0.39
148 | protobuf==4.24.0
149 | psutil==5.9.5
150 | ptyprocess==0.7.0
151 | pure-eval==0.2.2
152 | py-cpuinfo==9.0.0
153 | pyarrow==14.0.1
154 | pyasn1==0.5.0
155 | pyasn1-modules==0.3.0
156 | pycparser==2.21
157 | pycryptodome==3.18.0
158 | pydantic==1.10.12
159 | pygments==2.16.1
160 | pyparsing==3.0.9
161 | python-dateutil==2.8.2
162 | python-json-logger==2.0.7
163 | pytorch-lightning==2.0.6
164 | pytz==2023.3
165 | pyyaml==6.0.1
166 | pyzmq==25.1.1
167 | qtconsole==5.4.3
168 | qtpy==2.3.1
169 | referencing==0.30.2
170 | regex==2023.8.8
171 | requests==2.28.2
172 | rfc3339-validator==0.1.4
173 | rfc3986-validator==0.1.1
174 | rich==13.4.2
175 | rpds-py==0.9.2
176 | rsa==4.9
177 | safetensors==0.3.2
178 | scikit-learn==1.3.0
179 | scipy==1.11.4
180 | seaborn==0.12.2
181 | segmentation-models-pytorch==0.3.3
182 | send2trash==1.8.2
183 | sentry-sdk==1.29.2
184 | seqeval==1.2.2
185 | setproctitle==1.3.2
186 | six==1.16.0
187 | smmap==5.0.0
188 | sniffio==1.3.0
189 | soupsieve==2.4.1
190 | stack-data==0.6.2
191 | sympy==1.12
192 | tabulate==0.9.0
193 | tenacity==8.2.3
194 | terminado==0.17.1
195 | threadpoolctl==3.2.0
196 | timm==0.9.2
197 | tinycss2==1.2.1
198 | tokenizers==0.13.3
199 | tomli==2.0.1
200 | torch==2.0.1+cu118
201 | torchaudio==2.0.2+cu118
202 | torchmetrics==1.0.3
203 | torchvision==0.15.2+cu118
204 | tornado==6.3.3
205 | tqdm==4.65.2
206 | traitlets==5.9.0
207 | transformers==4.31.0
208 | triton==2.0.0
209 | typing-extensions==4.7.1
210 | tzdata==2023.3
211 | uri-template==1.3.0
212 | urllib3==1.26.16
213 | wandb==0.15.8
214 | wcwidth==0.2.6
215 | webcolors==1.13
216 | webencodings==0.5.1
217 | websocket-client==1.6.1
218 | widgetsnbextension==4.0.8
219 | xgboost==1.7.6
220 | yarl==1.9.2
221 | # The following packages are considered to be unsafe in a requirements file:
222 | pip==23.2.1
223 | setuptools==60.2.0
224 |
--------------------------------------------------------------------------------
/kami/src/models/spec2Dcnn2Day.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNN2Day(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | encoder_weights: Optional[str] = None,
28 | mixup_alpha: float = 0.5,
29 | cutmix_alpha: float = 0.5,
30 | ):
31 | super().__init__()
32 | self.cfg = cfg
33 | self.feature_extractor = feature_extractor
34 | self.encoder = smp.Unet(
35 | encoder_name=encoder_name,
36 | encoder_weights=encoder_weights,
37 | in_channels=in_channels,
38 | classes=1,
39 | )
40 | self.decoder = decoder
41 | self.mixup = Mixup(mixup_alpha)
42 | self.cutmix = Cutmix(cutmix_alpha)
43 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
44 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
45 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
46 | self.loss_fn = None
47 | self.update_loss_fn()
48 |
49 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
50 | self.label_weight[0] = self.label_weight[0] * sleep_decay
51 | if self.cfg.loss.name == "tolerance":
52 | self.loss_fn = ToleranceLoss(
53 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
54 | )
55 | elif self.cfg.loss.name == "tolerance_mse":
56 | self.loss_fn = ToleranceMSELoss(
57 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
58 | )
59 | elif self.cfg.loss.name == "tolerance_nonzero":
60 | self.loss_fn = ToleranceNonZeroLoss(
61 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
62 | )
63 | elif self.cfg.loss.name == "focal":
64 | self.loss_fn = FocalLoss(
65 | alpha=self.cfg.loss.alpha,
66 | gamma=self.cfg.loss.gamma,
67 | )
68 | elif self.cfg.loss.name == "focal_bce":
69 | self.loss_fn = FocalBCELoss(
70 | alpha=self.cfg.loss.alpha,
71 | gamma=self.cfg.loss.gamma,
72 | weight=torch.tensor(self.cfg.loss.weight),
73 | )
74 | else:
75 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
76 |
77 | self.loss_fn = self.loss_fn.cuda()
78 |
79 | def forward(
80 | self,
81 | x: torch.Tensor,
82 | labels: Optional[torch.Tensor] = None,
83 | masks: Optional[torch.Tensor] = None,
84 | do_mixup: bool = False,
85 | do_cutmix: bool = False,
86 | ) -> dict[str, torch.Tensor]:
87 | """Forward pass of the model.
88 |
89 | Args:
90 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
91 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
92 | Returns:
93 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
94 | """
95 | x1, x2 = torch.split(x, x.shape[2] // 2, dim=2)
96 |
97 | # 2日分のデータを分割して並行に入力
98 | x1 = self.feature_extractor(x1) # (batch_size, n_channels, height, n_timesteps)
99 | x2 = self.feature_extractor(x2) # (batch_size, n_channels, height, n_timesteps)
100 |
101 | x1 = self.encoder(x1).squeeze(1) # (batch_size, height, n_timesteps)
102 | x2 = self.encoder(x2).squeeze(1) # (batch_size, height, n_timesteps)
103 |
104 | # 2日分のデータを結合
105 | x = torch.cat([x1, x2], dim=1) # (batch_size, 2 * n_classes, n_timesteps)
106 | logits1 = self.decoder(x) # (batch_size, n_timesteps, 2 * n_classes)
107 | l1, l2 = torch.split(logits1, logits1.shape[2] // 2, dim=2)
108 | logits1 = torch.concat([l1, l2], dim=1) # (batch_size, 2*n_timesteps, n_classes)
109 |
110 | x = torch.cat([x2, x1], dim=1)
111 | logits2 = self.decoder(x)
112 | l2, l1 = torch.split(logits2, logits2.shape[2] // 2, dim=2)
113 | logits2 = torch.concat([l1, l2], dim=1)
114 |
115 | logits = (logits1 + logits2) / 2
116 | output = {"logits": logits}
117 | if labels is not None:
118 | loss1 = self.loss_fn(logits1, labels, masks)
119 | loss2 = self.loss_fn(logits2, labels, masks)
120 | output["loss"] = (loss1 + loss2) / 2
121 |
122 | return output
123 |
--------------------------------------------------------------------------------
/kami/src/models/spec2DcnnMinMax.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from omegaconf import DictConfig
3 |
4 | import segmentation_models_pytorch as smp
5 | import torch
6 | import torch.nn as nn
7 |
8 | from src.augmentation.cutmix import Cutmix
9 | from src.augmentation.mixup import Mixup
10 |
11 | from src.models.loss.tolerance import ToleranceLoss
12 | from src.models.loss.tolerance_mse import ToleranceMSELoss
13 | from src.models.loss.bce import BCEWithLogitsLoss
14 | from src.models.loss.tolerance_nonzero import ToleranceNonZeroLoss
15 | from src.models.loss.focal import FocalLoss
16 | from src.models.loss.focal_bce import FocalBCELoss
17 |
18 |
19 | class Spec2DCNNMinMax(nn.Module):
20 | def __init__(
21 | self,
22 | cfg: DictConfig,
23 | feature_extractor: nn.Module,
24 | decoder: nn.Module,
25 | encoder_name: str,
26 | in_channels: int,
27 | height: int,
28 | encoder_weights: Optional[str] = None,
29 | mixup_alpha: float = 0.5,
30 | cutmix_alpha: float = 0.5,
31 | ):
32 | super().__init__()
33 | self.cfg = cfg
34 | self.feature_extractor = feature_extractor
35 | aux_params = dict(
36 | pooling="avg", # one of 'avg', 'max'
37 | dropout=0.1, # dropout ratio, default is None
38 | activation=None, # activation function, default is None
39 | classes=2 * height, # define number of output labels
40 | )
41 |
42 | self.encoder = smp.Unet(
43 | encoder_name=encoder_name,
44 | encoder_weights=encoder_weights,
45 | in_channels=in_channels,
46 | classes=1,
47 | aux_params=aux_params,
48 | )
49 | self.decoder = decoder
50 | self.mixup = Mixup(mixup_alpha)
51 | self.cutmix = Cutmix(cutmix_alpha)
52 | self.loss_weight = torch.tensor(cfg.loss.loss_weight) if "loss_weight" in cfg.loss else None
53 | self.label_weight = torch.tensor(cfg.label_weight) if "label_weight" in cfg else None
54 | self.pos_weight = torch.tensor(cfg.pos_weight) if "pos_weight" in cfg else None
55 | self.loss_fn = None
56 | self.update_loss_fn()
57 |
58 | def update_loss_fn(self, sleep_decay: float = 1.0) -> None:
59 | self.label_weight[0] = self.label_weight[0] * sleep_decay
60 | if self.cfg.loss.name == "tolerance":
61 | self.loss_fn = ToleranceLoss(
62 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
63 | )
64 | elif self.cfg.loss.name == "tolerance_mse":
65 | self.loss_fn = ToleranceMSELoss(
66 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
67 | )
68 | elif self.cfg.loss.name == "tolerance_nonzero":
69 | self.loss_fn = ToleranceNonZeroLoss(
70 | loss_weight=self.loss_weight, label_weight=self.label_weight, pos_weight=self.pos_weight
71 | )
72 | elif self.cfg.loss.name == "focal":
73 | self.loss_fn = FocalLoss(
74 | alpha=self.cfg.loss.alpha,
75 | gamma=self.cfg.loss.gamma,
76 | )
77 | elif self.cfg.loss.name == "focal_bce":
78 | self.loss_fn = FocalBCELoss(
79 | alpha=self.cfg.loss.alpha,
80 | gamma=self.cfg.loss.gamma,
81 | weight=torch.tensor(self.cfg.loss.weight),
82 | )
83 | else:
84 | self.loss_fn = BCEWithLogitsLoss(weight=self.label_weight, pos_weight=self.pos_weight)
85 |
86 | self.loss_fn = self.loss_fn.cuda()
87 |
88 | def forward(
89 | self,
90 | x: torch.Tensor,
91 | labels: Optional[torch.Tensor] = None,
92 | masks: Optional[torch.Tensor] = None,
93 | do_mixup: bool = False,
94 | do_cutmix: bool = False,
95 | ) -> dict[str, torch.Tensor]:
96 | """Forward pass of the model.
97 |
98 | Args:
99 | x (torch.Tensor): (batch_size, n_channels, n_timesteps)
100 | labels (Optional[torch.Tensor], optional): (batch_size, n_timesteps, n_classes)
101 | Returns:
102 | dict[str, torch.Tensor]: logits (batch_size, n_timesteps, n_classes)
103 | """
104 | x = self.feature_extractor(x) # (batch_size, n_channels, height, n_timesteps)
105 |
106 | if do_mixup and labels is not None:
107 | x, labels = self.mixup(x, labels)
108 | if do_cutmix and labels is not None:
109 | x, labels = self.cutmix(x, labels)
110 |
111 | x, mid = self.encoder(x) # (batch_size, 1, height, n_timesteps), (batch_size, 2 * height)
112 | x_min, x_max = torch.split(mid, mid.shape[1] // 2, dim=1)
113 | x = x.squeeze(1) # (batch_size, height, n_timesteps)
114 | x = torch.cat(
115 | [x, x - x_min.unsqueeze(2), x_max.unsqueeze(2) - x], dim=1
116 | ) # (batch_size, 3 * height, n_timesteps)
117 | logits = self.decoder(x) # (batch_size, n_classes, n_timesteps)
118 |
119 | output = {"logits": logits}
120 | if labels is not None:
121 | loss = self.loss_fn(logits, labels, masks)
122 | output["loss"] = loss
123 |
124 | return output
125 |
--------------------------------------------------------------------------------
/requirements-dev.lock:
--------------------------------------------------------------------------------
1 | # generated by rye
2 | # use `rye lock` or `rye sync` to update this lockfile
3 | #
4 | # last locked with the following flags:
5 | # pre: false
6 | # features: []
7 | # all-features: false
8 |
9 | -e file:.
10 | aiohttp==3.8.5
11 | aiosignal==1.3.1
12 | aliyun-python-sdk-core==2.13.36
13 | aliyun-python-sdk-kms==2.16.1
14 | antlr4-python3-runtime==4.9.3
15 | anyio==3.7.1
16 | appdirs==1.4.4
17 | argon2-cffi==23.1.0
18 | argon2-cffi-bindings==21.2.0
19 | arrow==1.2.3
20 | asttokens==2.2.1
21 | async-lru==2.0.4
22 | async-timeout==4.0.3
23 | attrs==23.1.0
24 | babel==2.12.1
25 | backcall==0.2.0
26 | beautifulsoup4==4.12.2
27 | black==23.7.0
28 | bleach==6.0.0
29 | cachetools==5.3.1
30 | catboost==1.2
31 | certifi==2023.7.22
32 | cffi==1.15.1
33 | charset-normalizer==3.2.0
34 | click==8.1.6
35 | cmake==3.27.2
36 | colorama==0.4.6
37 | colorlog==6.7.0
38 | comm==0.1.4
39 | contourpy==1.1.0
40 | crcmod==1.7
41 | cryptography==41.0.3
42 | cycler==0.11.0
43 | debugpy==1.6.7.post1
44 | decorator==5.1.1
45 | deepspeed==0.10.0
46 | defusedxml==0.7.1
47 | docker-pycreds==0.4.0
48 | efficientnet-pytorch==0.7.1
49 | exceptiongroup==1.1.3
50 | executing==1.2.0
51 | fairscale==0.4.13
52 | fastjsonschema==2.18.0
53 | filelock==3.12.2
54 | flake8==6.1.0
55 | fonttools==4.42.0
56 | fqdn==1.5.1
57 | frozenlist==1.4.0
58 | fsspec==2023.6.0
59 | gitdb==4.0.10
60 | gitpython==3.1.32
61 | google-api-core==2.11.1
62 | google-auth==2.22.0
63 | google-cloud-core==2.3.3
64 | google-cloud-storage==2.10.0
65 | google-crc32c==1.5.0
66 | google-resumable-media==2.5.0
67 | googleapis-common-protos==1.60.0
68 | graphviz==0.20.1
69 | hjson==3.1.0
70 | huggingface-hub==0.16.4
71 | hydra-colorlog==1.2.0
72 | hydra-core==1.3.2
73 | idna==3.4
74 | invoke==2.2.0
75 | ipykernel==6.25.1
76 | ipython==8.14.0
77 | ipython-genutils==0.2.0
78 | ipywidgets==8.1.0
79 | isoduration==20.11.0
80 | jedi==0.19.0
81 | jinja2==3.1.2
82 | jmespath==0.10.0
83 | joblib==1.3.2
84 | json5==0.9.14
85 | jsonpointer==2.4
86 | jsonschema==4.19.0
87 | jsonschema-specifications==2023.7.1
88 | jupyter==1.0.0
89 | jupyter-client==8.3.0
90 | jupyter-console==6.6.3
91 | jupyter-core==5.3.1
92 | jupyter-events==0.7.0
93 | jupyter-lsp==2.2.0
94 | jupyter-server==2.7.1
95 | jupyter-server-terminals==0.4.4
96 | jupyterlab==4.0.5
97 | jupyterlab-pygments==0.2.2
98 | jupyterlab-server==2.24.0
99 | jupyterlab-widgets==3.0.8
100 | kaggle==1.5.16
101 | kiwisolver==1.4.4
102 | lightgbm==4.0.0
103 | lightning-utilities==0.9.0
104 | lit==16.0.6
105 | llvmlite==0.41.1
106 | loguru==0.7.2
107 | markdown==3.4.4
108 | markdown-it-py==3.0.0
109 | markupsafe==2.1.3
110 | matplotlib==3.7.2
111 | matplotlib-inline==0.1.6
112 | matplotlib-venn==0.11.9
113 | mccabe==0.7.0
114 | mdurl==0.1.2
115 | mistune==3.0.1
116 | model-index==0.1.11
117 | mpmath==1.3.0
118 | multidict==6.0.4
119 | munch==4.0.0
120 | mypy==1.5.0
121 | mypy-extensions==1.0.0
122 | nbclient==0.8.0
123 | nbconvert==7.7.3
124 | nbformat==5.9.2
125 | nest-asyncio==1.5.7
126 | networkx==3.1
127 | ninja==1.11.1
128 | notebook==7.0.2
129 | notebook-shim==0.2.3
130 | numba==0.58.1
131 | numpy==1.25.2
132 | omegaconf==2.3.0
133 | opendatalab==0.0.10
134 | openmim==0.3.9
135 | openxlab==0.0.17
136 | ordered-set==4.1.0
137 | oss2==2.17.0
138 | overrides==7.4.0
139 | packaging==23.1
140 | pandas==2.0.3
141 | pandocfilters==1.5.0
142 | parso==0.8.3
143 | pathlib==1.0.1
144 | pathspec==0.11.2
145 | pathtools==0.1.2
146 | pexpect==4.8.0
147 | pickleshare==0.7.5
148 | pillow==9.5.0
149 | platformdirs==3.10.0
150 | plotly==5.16.0
151 | polars==0.19.13
152 | pretrainedmodels==0.7.4
153 | prometheus-client==0.17.1
154 | prompt-toolkit==3.0.39
155 | protobuf==4.24.0
156 | psutil==5.9.5
157 | ptyprocess==0.7.0
158 | pure-eval==0.2.2
159 | py-cpuinfo==9.0.0
160 | pyarrow==14.0.1
161 | pyasn1==0.5.0
162 | pyasn1-modules==0.3.0
163 | pycodestyle==2.11.0
164 | pycparser==2.21
165 | pycryptodome==3.18.0
166 | pydantic==1.10.12
167 | pyflakes==3.1.0
168 | pygments==2.16.1
169 | pyparsing==3.0.9
170 | python-dateutil==2.8.2
171 | python-json-logger==2.0.7
172 | python-slugify==8.0.1
173 | pytorch-lightning==2.0.6
174 | pytz==2023.3
175 | pyyaml==6.0.1
176 | pyzmq==25.1.1
177 | qtconsole==5.4.3
178 | qtpy==2.3.1
179 | referencing==0.30.2
180 | regex==2023.8.8
181 | requests==2.28.2
182 | rfc3339-validator==0.1.4
183 | rfc3986-validator==0.1.1
184 | rich==13.4.2
185 | rpds-py==0.9.2
186 | rsa==4.9
187 | ruff==0.0.284
188 | safetensors==0.3.2
189 | scikit-learn==1.3.0
190 | scipy==1.11.4
191 | seaborn==0.12.2
192 | segmentation-models-pytorch==0.3.3
193 | send2trash==1.8.2
194 | sentry-sdk==1.29.2
195 | seqeval==1.2.2
196 | setproctitle==1.3.2
197 | six==1.16.0
198 | smmap==5.0.0
199 | sniffio==1.3.0
200 | soupsieve==2.4.1
201 | stack-data==0.6.2
202 | sympy==1.12
203 | tabulate==0.9.0
204 | tenacity==8.2.3
205 | terminado==0.17.1
206 | text-unidecode==1.3
207 | threadpoolctl==3.2.0
208 | timm==0.9.2
209 | tinycss2==1.2.1
210 | tokenizers==0.13.3
211 | tomli==2.0.1
212 | torch==2.0.1+cu118
213 | torchaudio==2.0.2+cu118
214 | torchmetrics==1.0.3
215 | torchvision==0.15.2+cu118
216 | tornado==6.3.3
217 | tqdm==4.65.2
218 | traitlets==5.9.0
219 | transformers==4.31.0
220 | triton==2.0.0
221 | typing-extensions==4.7.1
222 | tzdata==2023.3
223 | uri-template==1.3.0
224 | urllib3==1.26.16
225 | wandb==0.15.8
226 | wcwidth==0.2.6
227 | webcolors==1.13
228 | webencodings==0.5.1
229 | websocket-client==1.6.1
230 | widgetsnbextension==4.0.8
231 | xgboost==1.7.6
232 | yarl==1.9.2
233 | # The following packages are considered to be unsafe in a requirements file:
234 | pip==23.2.1
235 | setuptools==60.2.0
236 |
--------------------------------------------------------------------------------
/input/folds/stratify_fold_0.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - 72bbd1ac3edf
3 | - d93b0c7de16b
4 | - 76237b9406d5
5 | - 2b0a1fa8eba8
6 | - 1087d7b0ff2e
7 | - 8b159a98f485
8 | - bfe41e96d12f
9 | - 8a22387617c3
10 | - 77ca4db83644
11 | - 7476c0bd18d2
12 | - 1d4569cbac0f
13 | - a9e5f5314bcb
14 | - f981a0805fd0
15 | - de6fedfb6139
16 | - 7df249527c63
17 | - d2d6b9af0553
18 | - 694faf956ebf
19 | - b364205aba43
20 | - fcca183903b7
21 | - 3d53bfea61d6
22 | - 55a47ff9dc8a
23 | - 35826366dfc7
24 | - ca732a3c37f7
25 | - fe90110788d2
26 | - d2fef7e4defd
27 | - 72ba4a8afff4
28 | - 4743bdde25df
29 | - ece2561f07e9
30 | - 90eac42a9ec9
31 | - c107b5789660
32 | - 0dee4fda51c3
33 | - 5acc9d63b5fd
34 | - 99b829cbad2d
35 | - db5e0ee1c0ab
36 | - 4b45c36f8f5a
37 | - e867b5133665
38 | - f6d2cc003183
39 | - 752900afe3a6
40 | - eef041dd50aa
41 | - 349c5562ee2c
42 | - 5ffd5e1e81ac
43 | - 29c75c018220
44 | - e2a849d283c0
45 | - dfc3ccebfdc9
46 | - d9e887091a5c
47 | - 062dbd4c95e6
48 | - e0d7b0dcf9f3
49 | - 3be2f86c3e45
50 | - c68260cc9e8f
51 | - ebb6fae8ed43
52 | - cf13ed7e457a
53 | - 03d92c9f6f8a
54 | - 6d6b9d22d48a
55 | - ca730dbf521d
56 | - 7504165f497d
57 | - b750c8c1556c
58 | - 038441c925bb
59 | - 44a41bba1ee7
60 | - a88088855de5
61 | - 6ca4f4fca6a2
62 | - 40dce6018935
63 | - 8f6f15b9f598
64 | - 0cfc06c129cc
65 | - 29d3469bd15d
66 | - 1e6717d93c1d
67 | - c289c8a823e0
68 | - 51b23d177971
69 | - dff367373725
70 | - a9a2f7fac455
71 | - 67f5fc60e494
72 | - 3318a0e3ed6f
73 | - dacc6d652e35
74 | - 207eded97727
75 | - f564985ab692
76 | - 55b7f5c99930
77 | - fa149c3c4bde
78 | - 9ddd40f2cb36
79 | - 2fbbee1a38e3
80 | - bf00506437aa
81 | - 12d01911d509
82 | - c38707ef76df
83 | - 33ceeba8918a
84 | - bb5612895813
85 | - 4ac356361be9
86 | - e0686434d029
87 | - 6ee4ade1f2bd
88 | - ccdee561ee5d
89 | - 72d2234e84e4
90 | - b737f8c78ec5
91 | - b7188813d58a
92 | - ebd76e93ec7d
93 | - a2b0a64ec9cf
94 | - 60e51cad2ffb
95 | - d3dddd3c0e00
96 | - bccf2f2819f8
97 | - ce9164297046
98 | - 1b92be89db4c
99 | - c7d693f24684
100 | - d5e47b94477e
101 | - 405df1b41f9f
102 | - 939932f1822d
103 | - 0a96f4993bd7
104 | - c8053490cec2
105 | - e1f5abb82285
106 | - 8b8b9e29171c
107 | - eec197a4bdca
108 | - 10469f6765bf
109 | - 3664fe9233f9
110 | - 1716cd4163b2
111 | - e4500e7e19e1
112 | - e30cb792a2bc
113 | - 60d31b0bec3b
114 | - d150801f3145
115 | - 8877a6586606
116 | - 844f54dcab89
117 | - 0f572d690310
118 | - 9277be28a1cf
119 | - 27f09a6a858f
120 | - e1f2a4f991cb
121 | - 655f19eabf1e
122 | - 7fd4284b7ee8
123 | - 612aa8ba44e2
124 | - 0cd1e3d0ed95
125 | - aa81faa78747
126 | - 25e2b3dd9c3b
127 | - 5aad18e7ce64
128 | - 0d0ad1e77851
129 | - 5c088d7e916c
130 | - d043c0ca71cd
131 | - ba8083a2c3b8
132 | - e6ddbaaf0639
133 | - 137b99e936ab
134 | - d8de352c2657
135 | - 653622ac8363
136 | - fbf33b1a2c10
137 | - fb223ed2278c
138 | - dc80ca623d71
139 | - 5e816f11f5c3
140 | - 5f94bb3e1bed
141 | - 483d6545417f
142 | - 3a9a9dc2cbd9
143 | - ad425f3ee76d
144 | - a81f4472c637
145 | - 3df0da2e5966
146 | - a261bc4b7470
147 | - c908a0ad3e31
148 | - 416354edd92a
149 | - 78569a801a38
150 | - 062cae666e2a
151 | - b84960841a75
152 | - d25e479ecbb7
153 | - 10f8bc1f7b07
154 | - 154fe824ed87
155 | - 99237ce045e4
156 | - 6a4cd123bd69
157 | - 1762ab70ec76
158 | - 559ffb7c166a
159 | - 599ca4ed791b
160 | - 971207c6a525
161 | - f0482490923c
162 | - 3be1545083b7
163 | - a167532acca2
164 | - 2654a87be968
165 | - f88e18cb4100
166 | - 9fbdeffbe2ba
167 | - 16fe2798ed0f
168 | - 4ab54be1a403
169 | - c6788e579967
170 | - 137771d19ca2
171 | - 91127c2b0e60
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - c5d08fc3e040
218 | - c7b1283bb7eb
219 | - 0f9e60a8e56d
220 | - 390b487231ce
221 | - e11b9d69f856
222 | - 89c7daa72eee
223 | valid_series_ids:
224 | - bfa54bd26187
225 | - d515236bdeec
226 | - 04f547b8017d
227 | - 51c49c540b4e
228 | - 8898e6db816d
229 | - 18b61dd5aae8
230 | - 1319a1935f48
231 | - 1955d568d987
232 | - e34b496b84ce
233 | - 0ef7d94fde99
234 | - 280e08693c6d
235 | - 2e9ced2c7976
236 | - 292a75c0b94e
237 | - f8a8da8bdd00
238 | - 3c336d6ba566
239 | - 89bd631d1769
240 | - 3452b878e596
241 | - b4b75225b224
242 | - 9ee455e4770d
243 | - a4e48102f402
244 | - 3aceb17ef7bd
245 | - e2b60820c325
246 | - b1831c4979da
247 | - 2f7504d0f426
248 | - c7b2155a4a47
249 | - 785c9ca4eff7
250 | - 854206f602d0
251 | - 13b4d6a01d27
252 | - 05e1944c3818
253 | - aed3850f65f0
254 | - 0402a003dae9
255 | - d0f613c700f7
256 | - b7fc34995d0f
257 | - 601559e1777d
258 | - 702bb5387b1e
259 | - cca14d1966c1
260 | - a596ad0b82aa
261 | - 8a306e0890c0
262 | - 91cb6c98201f
263 | - 9a340507e36a
264 | - cfeb11428dd7
265 | - 7822ee8fe3ec
266 | - ce85771a714c
267 | - db75092f0530
268 | - 5f76965e10cf
269 | - 73fb772e50fb
270 | - def21f50dd3c
271 | - 9c91c546e095
272 | - c535634d7dcd
273 | - 4feda0596965
274 | - 808652a666c6
275 | - 5f40907ec171
276 | - 1f96b9668bdf
277 | - 3665c86afaf5
278 | - 2fc653ca75c7
279 | - a3e59c2ce3f6
280 |
--------------------------------------------------------------------------------
/input/folds/stratify_fold_1.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - bfa54bd26187
3 | - d515236bdeec
4 | - 04f547b8017d
5 | - 51c49c540b4e
6 | - 8898e6db816d
7 | - 18b61dd5aae8
8 | - 1319a1935f48
9 | - 1955d568d987
10 | - e34b496b84ce
11 | - 0ef7d94fde99
12 | - 280e08693c6d
13 | - 2e9ced2c7976
14 | - 292a75c0b94e
15 | - f8a8da8bdd00
16 | - 3c336d6ba566
17 | - 89bd631d1769
18 | - 3452b878e596
19 | - b4b75225b224
20 | - 9ee455e4770d
21 | - a4e48102f402
22 | - 3aceb17ef7bd
23 | - e2b60820c325
24 | - b1831c4979da
25 | - 2f7504d0f426
26 | - c7b2155a4a47
27 | - 785c9ca4eff7
28 | - 854206f602d0
29 | - 13b4d6a01d27
30 | - 05e1944c3818
31 | - aed3850f65f0
32 | - 0402a003dae9
33 | - d0f613c700f7
34 | - b7fc34995d0f
35 | - 601559e1777d
36 | - 702bb5387b1e
37 | - cca14d1966c1
38 | - a596ad0b82aa
39 | - 8a306e0890c0
40 | - 91cb6c98201f
41 | - 9a340507e36a
42 | - cfeb11428dd7
43 | - 7822ee8fe3ec
44 | - ce85771a714c
45 | - db75092f0530
46 | - 5f76965e10cf
47 | - 73fb772e50fb
48 | - bfe41e96d12f
49 | - def21f50dd3c
50 | - 9c91c546e095
51 | - c535634d7dcd
52 | - 4feda0596965
53 | - 808652a666c6
54 | - 5f40907ec171
55 | - 1f96b9668bdf
56 | - 29c75c018220
57 | - e2a849d283c0
58 | - 3665c86afaf5
59 | - e0d7b0dcf9f3
60 | - 7504165f497d
61 | - b750c8c1556c
62 | - a88088855de5
63 | - 6ca4f4fca6a2
64 | - 40dce6018935
65 | - 8f6f15b9f598
66 | - 0cfc06c129cc
67 | - 29d3469bd15d
68 | - 1e6717d93c1d
69 | - c289c8a823e0
70 | - 51b23d177971
71 | - dff367373725
72 | - a9a2f7fac455
73 | - 67f5fc60e494
74 | - 3318a0e3ed6f
75 | - dacc6d652e35
76 | - f564985ab692
77 | - 55b7f5c99930
78 | - fa149c3c4bde
79 | - 9ddd40f2cb36
80 | - 2fbbee1a38e3
81 | - bf00506437aa
82 | - 12d01911d509
83 | - c38707ef76df
84 | - 33ceeba8918a
85 | - bb5612895813
86 | - 4ac356361be9
87 | - e0686434d029
88 | - ccdee561ee5d
89 | - 72d2234e84e4
90 | - b737f8c78ec5
91 | - b7188813d58a
92 | - ebd76e93ec7d
93 | - a2b0a64ec9cf
94 | - 60e51cad2ffb
95 | - d3dddd3c0e00
96 | - bccf2f2819f8
97 | - ce9164297046
98 | - 1b92be89db4c
99 | - c7d693f24684
100 | - d5e47b94477e
101 | - 405df1b41f9f
102 | - 939932f1822d
103 | - 0a96f4993bd7
104 | - c8053490cec2
105 | - e1f5abb82285
106 | - 8b8b9e29171c
107 | - eec197a4bdca
108 | - 10469f6765bf
109 | - 3664fe9233f9
110 | - 1716cd4163b2
111 | - e4500e7e19e1
112 | - e30cb792a2bc
113 | - 60d31b0bec3b
114 | - d150801f3145
115 | - 8877a6586606
116 | - 844f54dcab89
117 | - 0f572d690310
118 | - 9277be28a1cf
119 | - 27f09a6a858f
120 | - e1f2a4f991cb
121 | - 655f19eabf1e
122 | - 7fd4284b7ee8
123 | - 612aa8ba44e2
124 | - 0cd1e3d0ed95
125 | - aa81faa78747
126 | - 25e2b3dd9c3b
127 | - 5aad18e7ce64
128 | - 0d0ad1e77851
129 | - 5c088d7e916c
130 | - d043c0ca71cd
131 | - ba8083a2c3b8
132 | - e6ddbaaf0639
133 | - 137b99e936ab
134 | - d8de352c2657
135 | - 653622ac8363
136 | - fbf33b1a2c10
137 | - fb223ed2278c
138 | - dc80ca623d71
139 | - 5e816f11f5c3
140 | - 5f94bb3e1bed
141 | - 483d6545417f
142 | - 3a9a9dc2cbd9
143 | - ad425f3ee76d
144 | - a81f4472c637
145 | - 3df0da2e5966
146 | - a261bc4b7470
147 | - c908a0ad3e31
148 | - 416354edd92a
149 | - 78569a801a38
150 | - 062cae666e2a
151 | - b84960841a75
152 | - d25e479ecbb7
153 | - 10f8bc1f7b07
154 | - 154fe824ed87
155 | - 99237ce045e4
156 | - 6a4cd123bd69
157 | - 1762ab70ec76
158 | - 559ffb7c166a
159 | - 599ca4ed791b
160 | - 971207c6a525
161 | - f0482490923c
162 | - 3be1545083b7
163 | - a167532acca2
164 | - 2654a87be968
165 | - f88e18cb4100
166 | - 9fbdeffbe2ba
167 | - 16fe2798ed0f
168 | - 4ab54be1a403
169 | - c6788e579967
170 | - 137771d19ca2
171 | - 91127c2b0e60
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - 2fc653ca75c7
218 | - a3e59c2ce3f6
219 | - 0f9e60a8e56d
220 | - 390b487231ce
221 | - e11b9d69f856
222 | - 89c7daa72eee
223 | valid_series_ids:
224 | - 72bbd1ac3edf
225 | - d93b0c7de16b
226 | - 76237b9406d5
227 | - 2b0a1fa8eba8
228 | - 1087d7b0ff2e
229 | - 8b159a98f485
230 | - 8a22387617c3
231 | - 77ca4db83644
232 | - 7476c0bd18d2
233 | - 1d4569cbac0f
234 | - a9e5f5314bcb
235 | - f981a0805fd0
236 | - de6fedfb6139
237 | - 7df249527c63
238 | - d2d6b9af0553
239 | - 694faf956ebf
240 | - b364205aba43
241 | - fcca183903b7
242 | - 3d53bfea61d6
243 | - 55a47ff9dc8a
244 | - 35826366dfc7
245 | - ca732a3c37f7
246 | - fe90110788d2
247 | - d2fef7e4defd
248 | - 72ba4a8afff4
249 | - 4743bdde25df
250 | - ece2561f07e9
251 | - 90eac42a9ec9
252 | - c107b5789660
253 | - 0dee4fda51c3
254 | - 5acc9d63b5fd
255 | - 99b829cbad2d
256 | - db5e0ee1c0ab
257 | - 4b45c36f8f5a
258 | - e867b5133665
259 | - f6d2cc003183
260 | - 752900afe3a6
261 | - eef041dd50aa
262 | - 349c5562ee2c
263 | - 5ffd5e1e81ac
264 | - dfc3ccebfdc9
265 | - d9e887091a5c
266 | - 062dbd4c95e6
267 | - 3be2f86c3e45
268 | - c68260cc9e8f
269 | - ebb6fae8ed43
270 | - cf13ed7e457a
271 | - 03d92c9f6f8a
272 | - 6d6b9d22d48a
273 | - ca730dbf521d
274 | - 038441c925bb
275 | - 44a41bba1ee7
276 | - 207eded97727
277 | - 6ee4ade1f2bd
278 | - c5d08fc3e040
279 | - c7b1283bb7eb
280 |
--------------------------------------------------------------------------------
/input/folds/stratify_fold_2.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - bfa54bd26187
3 | - d515236bdeec
4 | - 04f547b8017d
5 | - 51c49c540b4e
6 | - 8898e6db816d
7 | - 18b61dd5aae8
8 | - 1319a1935f48
9 | - 1955d568d987
10 | - e34b496b84ce
11 | - 0ef7d94fde99
12 | - 280e08693c6d
13 | - 2e9ced2c7976
14 | - 292a75c0b94e
15 | - f8a8da8bdd00
16 | - 3c336d6ba566
17 | - 89bd631d1769
18 | - 3452b878e596
19 | - b4b75225b224
20 | - 9ee455e4770d
21 | - a4e48102f402
22 | - 3aceb17ef7bd
23 | - e2b60820c325
24 | - b1831c4979da
25 | - 2f7504d0f426
26 | - c7b2155a4a47
27 | - 785c9ca4eff7
28 | - 854206f602d0
29 | - 13b4d6a01d27
30 | - 72bbd1ac3edf
31 | - 05e1944c3818
32 | - aed3850f65f0
33 | - 0402a003dae9
34 | - d0f613c700f7
35 | - b7fc34995d0f
36 | - d93b0c7de16b
37 | - 601559e1777d
38 | - 702bb5387b1e
39 | - cca14d1966c1
40 | - a596ad0b82aa
41 | - 8a306e0890c0
42 | - 91cb6c98201f
43 | - 9a340507e36a
44 | - cfeb11428dd7
45 | - 7822ee8fe3ec
46 | - 76237b9406d5
47 | - ce85771a714c
48 | - db75092f0530
49 | - 2b0a1fa8eba8
50 | - 5f76965e10cf
51 | - 1087d7b0ff2e
52 | - 8b159a98f485
53 | - 73fb772e50fb
54 | - 8a22387617c3
55 | - def21f50dd3c
56 | - 9c91c546e095
57 | - c535634d7dcd
58 | - 77ca4db83644
59 | - 7476c0bd18d2
60 | - 1d4569cbac0f
61 | - 4feda0596965
62 | - 808652a666c6
63 | - a9e5f5314bcb
64 | - 5f40907ec171
65 | - 1f96b9668bdf
66 | - f981a0805fd0
67 | - de6fedfb6139
68 | - 7df249527c63
69 | - d2d6b9af0553
70 | - 694faf956ebf
71 | - b364205aba43
72 | - fcca183903b7
73 | - 3d53bfea61d6
74 | - 55a47ff9dc8a
75 | - 35826366dfc7
76 | - ca732a3c37f7
77 | - fe90110788d2
78 | - d2fef7e4defd
79 | - 72ba4a8afff4
80 | - 4743bdde25df
81 | - ece2561f07e9
82 | - 90eac42a9ec9
83 | - c107b5789660
84 | - 0dee4fda51c3
85 | - 5acc9d63b5fd
86 | - 99b829cbad2d
87 | - db5e0ee1c0ab
88 | - 4b45c36f8f5a
89 | - e867b5133665
90 | - f6d2cc003183
91 | - 752900afe3a6
92 | - eef041dd50aa
93 | - 349c5562ee2c
94 | - 5ffd5e1e81ac
95 | - 3665c86afaf5
96 | - dfc3ccebfdc9
97 | - d9e887091a5c
98 | - 062dbd4c95e6
99 | - 3be2f86c3e45
100 | - c68260cc9e8f
101 | - ebb6fae8ed43
102 | - cf13ed7e457a
103 | - 03d92c9f6f8a
104 | - 6d6b9d22d48a
105 | - ca730dbf521d
106 | - 038441c925bb
107 | - 44a41bba1ee7
108 | - 207eded97727
109 | - 6ee4ade1f2bd
110 | - b737f8c78ec5
111 | - d3dddd3c0e00
112 | - ce9164297046
113 | - 939932f1822d
114 | - 0a96f4993bd7
115 | - e1f5abb82285
116 | - eec197a4bdca
117 | - 3664fe9233f9
118 | - 1716cd4163b2
119 | - e4500e7e19e1
120 | - 8877a6586606
121 | - 844f54dcab89
122 | - 0f572d690310
123 | - 9277be28a1cf
124 | - 27f09a6a858f
125 | - 655f19eabf1e
126 | - 612aa8ba44e2
127 | - aa81faa78747
128 | - 25e2b3dd9c3b
129 | - 5aad18e7ce64
130 | - 0d0ad1e77851
131 | - 5c088d7e916c
132 | - d043c0ca71cd
133 | - ba8083a2c3b8
134 | - e6ddbaaf0639
135 | - d8de352c2657
136 | - 653622ac8363
137 | - fbf33b1a2c10
138 | - fb223ed2278c
139 | - dc80ca623d71
140 | - 5e816f11f5c3
141 | - 483d6545417f
142 | - 3a9a9dc2cbd9
143 | - ad425f3ee76d
144 | - a81f4472c637
145 | - 3df0da2e5966
146 | - a261bc4b7470
147 | - c908a0ad3e31
148 | - 416354edd92a
149 | - 78569a801a38
150 | - 062cae666e2a
151 | - b84960841a75
152 | - d25e479ecbb7
153 | - 10f8bc1f7b07
154 | - 154fe824ed87
155 | - 99237ce045e4
156 | - 6a4cd123bd69
157 | - 1762ab70ec76
158 | - 559ffb7c166a
159 | - 599ca4ed791b
160 | - 971207c6a525
161 | - f0482490923c
162 | - 3be1545083b7
163 | - a167532acca2
164 | - 2654a87be968
165 | - f88e18cb4100
166 | - 9fbdeffbe2ba
167 | - 16fe2798ed0f
168 | - 4ab54be1a403
169 | - c6788e579967
170 | - 137771d19ca2
171 | - 91127c2b0e60
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - 2fc653ca75c7
218 | - a3e59c2ce3f6
219 | - c5d08fc3e040
220 | - c7b1283bb7eb
221 | - 390b487231ce
222 | - e11b9d69f856
223 | - 89c7daa72eee
224 | valid_series_ids:
225 | - bfe41e96d12f
226 | - 29c75c018220
227 | - e2a849d283c0
228 | - e0d7b0dcf9f3
229 | - 7504165f497d
230 | - b750c8c1556c
231 | - a88088855de5
232 | - 6ca4f4fca6a2
233 | - 40dce6018935
234 | - 8f6f15b9f598
235 | - 0cfc06c129cc
236 | - 29d3469bd15d
237 | - 1e6717d93c1d
238 | - c289c8a823e0
239 | - 51b23d177971
240 | - dff367373725
241 | - a9a2f7fac455
242 | - 67f5fc60e494
243 | - 3318a0e3ed6f
244 | - dacc6d652e35
245 | - f564985ab692
246 | - 55b7f5c99930
247 | - fa149c3c4bde
248 | - 9ddd40f2cb36
249 | - 2fbbee1a38e3
250 | - bf00506437aa
251 | - 12d01911d509
252 | - c38707ef76df
253 | - 33ceeba8918a
254 | - bb5612895813
255 | - 4ac356361be9
256 | - e0686434d029
257 | - ccdee561ee5d
258 | - 72d2234e84e4
259 | - b7188813d58a
260 | - ebd76e93ec7d
261 | - a2b0a64ec9cf
262 | - 60e51cad2ffb
263 | - bccf2f2819f8
264 | - 1b92be89db4c
265 | - c7d693f24684
266 | - d5e47b94477e
267 | - 405df1b41f9f
268 | - c8053490cec2
269 | - 8b8b9e29171c
270 | - 10469f6765bf
271 | - e30cb792a2bc
272 | - 60d31b0bec3b
273 | - d150801f3145
274 | - e1f2a4f991cb
275 | - 7fd4284b7ee8
276 | - 0cd1e3d0ed95
277 | - 137b99e936ab
278 | - 5f94bb3e1bed
279 | - 0f9e60a8e56d
280 |
--------------------------------------------------------------------------------
/input/folds/stratify_fold_3.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - bfa54bd26187
3 | - d515236bdeec
4 | - 04f547b8017d
5 | - 51c49c540b4e
6 | - 8898e6db816d
7 | - 18b61dd5aae8
8 | - 1319a1935f48
9 | - 1955d568d987
10 | - e34b496b84ce
11 | - 0ef7d94fde99
12 | - 280e08693c6d
13 | - 2e9ced2c7976
14 | - 292a75c0b94e
15 | - f8a8da8bdd00
16 | - 3c336d6ba566
17 | - 89bd631d1769
18 | - 3452b878e596
19 | - b4b75225b224
20 | - 9ee455e4770d
21 | - a4e48102f402
22 | - 3aceb17ef7bd
23 | - e2b60820c325
24 | - b1831c4979da
25 | - 2f7504d0f426
26 | - c7b2155a4a47
27 | - 785c9ca4eff7
28 | - 854206f602d0
29 | - 13b4d6a01d27
30 | - 72bbd1ac3edf
31 | - 05e1944c3818
32 | - aed3850f65f0
33 | - 0402a003dae9
34 | - d0f613c700f7
35 | - b7fc34995d0f
36 | - d93b0c7de16b
37 | - 601559e1777d
38 | - 702bb5387b1e
39 | - cca14d1966c1
40 | - a596ad0b82aa
41 | - 8a306e0890c0
42 | - 91cb6c98201f
43 | - 9a340507e36a
44 | - cfeb11428dd7
45 | - 7822ee8fe3ec
46 | - 76237b9406d5
47 | - ce85771a714c
48 | - db75092f0530
49 | - 2b0a1fa8eba8
50 | - 5f76965e10cf
51 | - 1087d7b0ff2e
52 | - 8b159a98f485
53 | - 73fb772e50fb
54 | - bfe41e96d12f
55 | - 8a22387617c3
56 | - def21f50dd3c
57 | - 9c91c546e095
58 | - c535634d7dcd
59 | - 77ca4db83644
60 | - 7476c0bd18d2
61 | - 1d4569cbac0f
62 | - 4feda0596965
63 | - 808652a666c6
64 | - a9e5f5314bcb
65 | - 5f40907ec171
66 | - 1f96b9668bdf
67 | - f981a0805fd0
68 | - de6fedfb6139
69 | - 7df249527c63
70 | - d2d6b9af0553
71 | - 694faf956ebf
72 | - b364205aba43
73 | - fcca183903b7
74 | - 3d53bfea61d6
75 | - 55a47ff9dc8a
76 | - 35826366dfc7
77 | - ca732a3c37f7
78 | - fe90110788d2
79 | - d2fef7e4defd
80 | - 72ba4a8afff4
81 | - 4743bdde25df
82 | - ece2561f07e9
83 | - 90eac42a9ec9
84 | - c107b5789660
85 | - 0dee4fda51c3
86 | - 5acc9d63b5fd
87 | - 99b829cbad2d
88 | - db5e0ee1c0ab
89 | - 4b45c36f8f5a
90 | - e867b5133665
91 | - f6d2cc003183
92 | - 752900afe3a6
93 | - eef041dd50aa
94 | - 349c5562ee2c
95 | - 5ffd5e1e81ac
96 | - 29c75c018220
97 | - e2a849d283c0
98 | - 3665c86afaf5
99 | - dfc3ccebfdc9
100 | - d9e887091a5c
101 | - 062dbd4c95e6
102 | - e0d7b0dcf9f3
103 | - 3be2f86c3e45
104 | - c68260cc9e8f
105 | - ebb6fae8ed43
106 | - cf13ed7e457a
107 | - 03d92c9f6f8a
108 | - 6d6b9d22d48a
109 | - ca730dbf521d
110 | - 7504165f497d
111 | - b750c8c1556c
112 | - 038441c925bb
113 | - 44a41bba1ee7
114 | - a88088855de5
115 | - 6ca4f4fca6a2
116 | - 40dce6018935
117 | - 8f6f15b9f598
118 | - 0cfc06c129cc
119 | - 29d3469bd15d
120 | - 1e6717d93c1d
121 | - c289c8a823e0
122 | - 51b23d177971
123 | - dff367373725
124 | - a9a2f7fac455
125 | - 67f5fc60e494
126 | - 3318a0e3ed6f
127 | - dacc6d652e35
128 | - 207eded97727
129 | - f564985ab692
130 | - 55b7f5c99930
131 | - fa149c3c4bde
132 | - 9ddd40f2cb36
133 | - 2fbbee1a38e3
134 | - bf00506437aa
135 | - 12d01911d509
136 | - c38707ef76df
137 | - 33ceeba8918a
138 | - bb5612895813
139 | - 4ac356361be9
140 | - e0686434d029
141 | - 6ee4ade1f2bd
142 | - ccdee561ee5d
143 | - 72d2234e84e4
144 | - b7188813d58a
145 | - ebd76e93ec7d
146 | - a2b0a64ec9cf
147 | - 60e51cad2ffb
148 | - bccf2f2819f8
149 | - 1b92be89db4c
150 | - c7d693f24684
151 | - d5e47b94477e
152 | - 405df1b41f9f
153 | - c8053490cec2
154 | - 8b8b9e29171c
155 | - 10469f6765bf
156 | - e30cb792a2bc
157 | - 60d31b0bec3b
158 | - d150801f3145
159 | - e1f2a4f991cb
160 | - 7fd4284b7ee8
161 | - 0cd1e3d0ed95
162 | - 137b99e936ab
163 | - 5f94bb3e1bed
164 | - 416354edd92a
165 | - 062cae666e2a
166 | - 6a4cd123bd69
167 | - 559ffb7c166a
168 | - 971207c6a525
169 | - 3be1545083b7
170 | - f88e18cb4100
171 | - 137771d19ca2
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - 2fc653ca75c7
218 | - a3e59c2ce3f6
219 | - c5d08fc3e040
220 | - c7b1283bb7eb
221 | - 0f9e60a8e56d
222 | - e11b9d69f856
223 | - 89c7daa72eee
224 | valid_series_ids:
225 | - b737f8c78ec5
226 | - d3dddd3c0e00
227 | - ce9164297046
228 | - 939932f1822d
229 | - 0a96f4993bd7
230 | - e1f5abb82285
231 | - eec197a4bdca
232 | - 3664fe9233f9
233 | - 1716cd4163b2
234 | - e4500e7e19e1
235 | - 8877a6586606
236 | - 844f54dcab89
237 | - 0f572d690310
238 | - 9277be28a1cf
239 | - 27f09a6a858f
240 | - 655f19eabf1e
241 | - 612aa8ba44e2
242 | - aa81faa78747
243 | - 25e2b3dd9c3b
244 | - 5aad18e7ce64
245 | - 0d0ad1e77851
246 | - 5c088d7e916c
247 | - d043c0ca71cd
248 | - ba8083a2c3b8
249 | - e6ddbaaf0639
250 | - d8de352c2657
251 | - 653622ac8363
252 | - fbf33b1a2c10
253 | - fb223ed2278c
254 | - dc80ca623d71
255 | - 5e816f11f5c3
256 | - 483d6545417f
257 | - 3a9a9dc2cbd9
258 | - ad425f3ee76d
259 | - a81f4472c637
260 | - 3df0da2e5966
261 | - a261bc4b7470
262 | - c908a0ad3e31
263 | - 78569a801a38
264 | - b84960841a75
265 | - d25e479ecbb7
266 | - 10f8bc1f7b07
267 | - 154fe824ed87
268 | - 99237ce045e4
269 | - 1762ab70ec76
270 | - 599ca4ed791b
271 | - f0482490923c
272 | - a167532acca2
273 | - 2654a87be968
274 | - 9fbdeffbe2ba
275 | - 16fe2798ed0f
276 | - 4ab54be1a403
277 | - c6788e579967
278 | - 91127c2b0e60
279 | - 390b487231ce
280 |
--------------------------------------------------------------------------------
/input/folds/stratify_fold_4.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - bfa54bd26187
3 | - d515236bdeec
4 | - 04f547b8017d
5 | - 51c49c540b4e
6 | - 8898e6db816d
7 | - 18b61dd5aae8
8 | - 1319a1935f48
9 | - 1955d568d987
10 | - e34b496b84ce
11 | - 0ef7d94fde99
12 | - 280e08693c6d
13 | - 2e9ced2c7976
14 | - 292a75c0b94e
15 | - f8a8da8bdd00
16 | - 3c336d6ba566
17 | - 89bd631d1769
18 | - 3452b878e596
19 | - b4b75225b224
20 | - 9ee455e4770d
21 | - a4e48102f402
22 | - 3aceb17ef7bd
23 | - e2b60820c325
24 | - b1831c4979da
25 | - 2f7504d0f426
26 | - c7b2155a4a47
27 | - 785c9ca4eff7
28 | - 854206f602d0
29 | - 13b4d6a01d27
30 | - 72bbd1ac3edf
31 | - 05e1944c3818
32 | - aed3850f65f0
33 | - 0402a003dae9
34 | - d0f613c700f7
35 | - b7fc34995d0f
36 | - d93b0c7de16b
37 | - 601559e1777d
38 | - 702bb5387b1e
39 | - cca14d1966c1
40 | - a596ad0b82aa
41 | - 8a306e0890c0
42 | - 91cb6c98201f
43 | - 9a340507e36a
44 | - cfeb11428dd7
45 | - 7822ee8fe3ec
46 | - 76237b9406d5
47 | - ce85771a714c
48 | - db75092f0530
49 | - 2b0a1fa8eba8
50 | - 5f76965e10cf
51 | - 1087d7b0ff2e
52 | - 8b159a98f485
53 | - 73fb772e50fb
54 | - bfe41e96d12f
55 | - 8a22387617c3
56 | - def21f50dd3c
57 | - 9c91c546e095
58 | - c535634d7dcd
59 | - 77ca4db83644
60 | - 7476c0bd18d2
61 | - 1d4569cbac0f
62 | - 4feda0596965
63 | - 808652a666c6
64 | - a9e5f5314bcb
65 | - 5f40907ec171
66 | - 1f96b9668bdf
67 | - f981a0805fd0
68 | - de6fedfb6139
69 | - 7df249527c63
70 | - d2d6b9af0553
71 | - 694faf956ebf
72 | - b364205aba43
73 | - fcca183903b7
74 | - 3d53bfea61d6
75 | - 55a47ff9dc8a
76 | - 35826366dfc7
77 | - ca732a3c37f7
78 | - fe90110788d2
79 | - d2fef7e4defd
80 | - 72ba4a8afff4
81 | - 4743bdde25df
82 | - ece2561f07e9
83 | - 90eac42a9ec9
84 | - c107b5789660
85 | - 0dee4fda51c3
86 | - 5acc9d63b5fd
87 | - 99b829cbad2d
88 | - db5e0ee1c0ab
89 | - 4b45c36f8f5a
90 | - e867b5133665
91 | - f6d2cc003183
92 | - 752900afe3a6
93 | - eef041dd50aa
94 | - 349c5562ee2c
95 | - 5ffd5e1e81ac
96 | - 29c75c018220
97 | - e2a849d283c0
98 | - 3665c86afaf5
99 | - dfc3ccebfdc9
100 | - d9e887091a5c
101 | - 062dbd4c95e6
102 | - e0d7b0dcf9f3
103 | - 3be2f86c3e45
104 | - c68260cc9e8f
105 | - ebb6fae8ed43
106 | - cf13ed7e457a
107 | - 03d92c9f6f8a
108 | - 6d6b9d22d48a
109 | - ca730dbf521d
110 | - 7504165f497d
111 | - b750c8c1556c
112 | - 038441c925bb
113 | - 44a41bba1ee7
114 | - a88088855de5
115 | - 6ca4f4fca6a2
116 | - 40dce6018935
117 | - 8f6f15b9f598
118 | - 0cfc06c129cc
119 | - 29d3469bd15d
120 | - 1e6717d93c1d
121 | - c289c8a823e0
122 | - 51b23d177971
123 | - dff367373725
124 | - a9a2f7fac455
125 | - 67f5fc60e494
126 | - 3318a0e3ed6f
127 | - dacc6d652e35
128 | - 207eded97727
129 | - f564985ab692
130 | - 55b7f5c99930
131 | - fa149c3c4bde
132 | - 9ddd40f2cb36
133 | - 2fbbee1a38e3
134 | - bf00506437aa
135 | - 12d01911d509
136 | - c38707ef76df
137 | - 33ceeba8918a
138 | - bb5612895813
139 | - 4ac356361be9
140 | - e0686434d029
141 | - 6ee4ade1f2bd
142 | - ccdee561ee5d
143 | - 72d2234e84e4
144 | - b737f8c78ec5
145 | - b7188813d58a
146 | - ebd76e93ec7d
147 | - a2b0a64ec9cf
148 | - 60e51cad2ffb
149 | - d3dddd3c0e00
150 | - bccf2f2819f8
151 | - ce9164297046
152 | - 1b92be89db4c
153 | - c7d693f24684
154 | - d5e47b94477e
155 | - 405df1b41f9f
156 | - 939932f1822d
157 | - 0a96f4993bd7
158 | - c8053490cec2
159 | - e1f5abb82285
160 | - 8b8b9e29171c
161 | - eec197a4bdca
162 | - 10469f6765bf
163 | - 3664fe9233f9
164 | - 1716cd4163b2
165 | - e4500e7e19e1
166 | - e30cb792a2bc
167 | - 60d31b0bec3b
168 | - d150801f3145
169 | - 8877a6586606
170 | - 844f54dcab89
171 | - 0f572d690310
172 | - 9277be28a1cf
173 | - 27f09a6a858f
174 | - e1f2a4f991cb
175 | - 655f19eabf1e
176 | - 7fd4284b7ee8
177 | - 612aa8ba44e2
178 | - 0cd1e3d0ed95
179 | - aa81faa78747
180 | - 25e2b3dd9c3b
181 | - 5aad18e7ce64
182 | - 0d0ad1e77851
183 | - 5c088d7e916c
184 | - d043c0ca71cd
185 | - ba8083a2c3b8
186 | - e6ddbaaf0639
187 | - 137b99e936ab
188 | - d8de352c2657
189 | - 653622ac8363
190 | - fbf33b1a2c10
191 | - fb223ed2278c
192 | - dc80ca623d71
193 | - 5e816f11f5c3
194 | - 5f94bb3e1bed
195 | - 483d6545417f
196 | - 3a9a9dc2cbd9
197 | - ad425f3ee76d
198 | - a81f4472c637
199 | - 3df0da2e5966
200 | - a261bc4b7470
201 | - c908a0ad3e31
202 | - 78569a801a38
203 | - b84960841a75
204 | - d25e479ecbb7
205 | - 10f8bc1f7b07
206 | - 154fe824ed87
207 | - 99237ce045e4
208 | - 1762ab70ec76
209 | - 599ca4ed791b
210 | - f0482490923c
211 | - a167532acca2
212 | - 2654a87be968
213 | - 9fbdeffbe2ba
214 | - 16fe2798ed0f
215 | - 4ab54be1a403
216 | - c6788e579967
217 | - 91127c2b0e60
218 | - 2fc653ca75c7
219 | - a3e59c2ce3f6
220 | - c5d08fc3e040
221 | - c7b1283bb7eb
222 | - 0f9e60a8e56d
223 | - 390b487231ce
224 | valid_series_ids:
225 | - 416354edd92a
226 | - 062cae666e2a
227 | - 6a4cd123bd69
228 | - 559ffb7c166a
229 | - 971207c6a525
230 | - 3be1545083b7
231 | - f88e18cb4100
232 | - 137771d19ca2
233 | - d5be621fd9aa
234 | - efbfc4526d58
235 | - 2cd2340ca14d
236 | - ee4e0e3afd3d
237 | - 5c55a5e717d6
238 | - 449766346eb1
239 | - f2c2436cf7b7
240 | - af91d9a50547
241 | - f56824b503a0
242 | - 31011ade7c0a
243 | - 6bf95a3cf91c
244 | - 188d4b7cd28b
245 | - e586cbfa7762
246 | - 4a31811f3558
247 | - 927dd0c35dfd
248 | - c5365a55ebb7
249 | - 08db4255286f
250 | - 9b9cd7b7af8c
251 | - a681f9b04b21
252 | - 361366da569e
253 | - 0ec9fc461819
254 | - 44d8c02b369e
255 | - 83fa182bec3a
256 | - 804594bb1f06
257 | - 8becc76ea607
258 | - 519ae2d858b0
259 | - ea0770830757
260 | - e8d0a37c3eba
261 | - 2b8d87addea9
262 | - f7eb179216c2
263 | - 8e32047cbc1f
264 | - c75b4b207bea
265 | - 51fdcc8d9fe7
266 | - 1c7c0bad1263
267 | - 9aed9ee12ae2
268 | - df33ae359fb5
269 | - 87a6cbb7c4ed
270 | - bdfce9ce62b9
271 | - 18a0ca03431d
272 | - 703b5efa9bc1
273 | - 0ce74d6d2106
274 | - 8fb18e36697d
275 | - 148471991ffb
276 | - e69aff66e0cb
277 | - c3072a759efb
278 | - e11b9d69f856
279 | - 89c7daa72eee
280 |
--------------------------------------------------------------------------------
/kami/run/conf/split/stratify_fold_0.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - 72bbd1ac3edf
3 | - d93b0c7de16b
4 | - 76237b9406d5
5 | - 2b0a1fa8eba8
6 | - 1087d7b0ff2e
7 | - 8b159a98f485
8 | - bfe41e96d12f
9 | - 8a22387617c3
10 | - 77ca4db83644
11 | - 7476c0bd18d2
12 | - 1d4569cbac0f
13 | - a9e5f5314bcb
14 | - f981a0805fd0
15 | - de6fedfb6139
16 | - 7df249527c63
17 | - d2d6b9af0553
18 | - 694faf956ebf
19 | - b364205aba43
20 | - fcca183903b7
21 | - 3d53bfea61d6
22 | - 55a47ff9dc8a
23 | - 35826366dfc7
24 | - ca732a3c37f7
25 | - fe90110788d2
26 | - d2fef7e4defd
27 | - 72ba4a8afff4
28 | - 4743bdde25df
29 | - ece2561f07e9
30 | - 90eac42a9ec9
31 | - c107b5789660
32 | - 0dee4fda51c3
33 | - 5acc9d63b5fd
34 | - 99b829cbad2d
35 | - db5e0ee1c0ab
36 | - 4b45c36f8f5a
37 | - e867b5133665
38 | - f6d2cc003183
39 | - 752900afe3a6
40 | - eef041dd50aa
41 | - 349c5562ee2c
42 | - 5ffd5e1e81ac
43 | - 29c75c018220
44 | - e2a849d283c0
45 | - dfc3ccebfdc9
46 | - d9e887091a5c
47 | - 062dbd4c95e6
48 | - e0d7b0dcf9f3
49 | - 3be2f86c3e45
50 | - c68260cc9e8f
51 | - ebb6fae8ed43
52 | - cf13ed7e457a
53 | - 03d92c9f6f8a
54 | - 6d6b9d22d48a
55 | - ca730dbf521d
56 | - 7504165f497d
57 | - b750c8c1556c
58 | - 038441c925bb
59 | - 44a41bba1ee7
60 | - a88088855de5
61 | - 6ca4f4fca6a2
62 | - 40dce6018935
63 | - 8f6f15b9f598
64 | - 0cfc06c129cc
65 | - 29d3469bd15d
66 | - 1e6717d93c1d
67 | - c289c8a823e0
68 | - 51b23d177971
69 | - dff367373725
70 | - a9a2f7fac455
71 | - 67f5fc60e494
72 | - 3318a0e3ed6f
73 | - dacc6d652e35
74 | - 207eded97727
75 | - f564985ab692
76 | - 55b7f5c99930
77 | - fa149c3c4bde
78 | - 9ddd40f2cb36
79 | - 2fbbee1a38e3
80 | - bf00506437aa
81 | - 12d01911d509
82 | - c38707ef76df
83 | - 33ceeba8918a
84 | - bb5612895813
85 | - 4ac356361be9
86 | - e0686434d029
87 | - 6ee4ade1f2bd
88 | - ccdee561ee5d
89 | - 72d2234e84e4
90 | - b737f8c78ec5
91 | - b7188813d58a
92 | - ebd76e93ec7d
93 | - a2b0a64ec9cf
94 | - 60e51cad2ffb
95 | - d3dddd3c0e00
96 | - bccf2f2819f8
97 | - ce9164297046
98 | - 1b92be89db4c
99 | - c7d693f24684
100 | - d5e47b94477e
101 | - 405df1b41f9f
102 | - 939932f1822d
103 | - 0a96f4993bd7
104 | - c8053490cec2
105 | - e1f5abb82285
106 | - 8b8b9e29171c
107 | - eec197a4bdca
108 | - 10469f6765bf
109 | - 3664fe9233f9
110 | - 1716cd4163b2
111 | - e4500e7e19e1
112 | - e30cb792a2bc
113 | - 60d31b0bec3b
114 | - d150801f3145
115 | - 8877a6586606
116 | - 844f54dcab89
117 | - 0f572d690310
118 | - 9277be28a1cf
119 | - 27f09a6a858f
120 | - e1f2a4f991cb
121 | - 655f19eabf1e
122 | - 7fd4284b7ee8
123 | - 612aa8ba44e2
124 | - 0cd1e3d0ed95
125 | - aa81faa78747
126 | - 25e2b3dd9c3b
127 | - 5aad18e7ce64
128 | - 0d0ad1e77851
129 | - 5c088d7e916c
130 | - d043c0ca71cd
131 | - ba8083a2c3b8
132 | - e6ddbaaf0639
133 | - 137b99e936ab
134 | - d8de352c2657
135 | - 653622ac8363
136 | - fbf33b1a2c10
137 | - fb223ed2278c
138 | - dc80ca623d71
139 | - 5e816f11f5c3
140 | - 5f94bb3e1bed
141 | - 483d6545417f
142 | - 3a9a9dc2cbd9
143 | - ad425f3ee76d
144 | - a81f4472c637
145 | - 3df0da2e5966
146 | - a261bc4b7470
147 | - c908a0ad3e31
148 | - 416354edd92a
149 | - 78569a801a38
150 | - 062cae666e2a
151 | - b84960841a75
152 | - d25e479ecbb7
153 | - 10f8bc1f7b07
154 | - 154fe824ed87
155 | - 99237ce045e4
156 | - 6a4cd123bd69
157 | - 1762ab70ec76
158 | - 559ffb7c166a
159 | - 599ca4ed791b
160 | - 971207c6a525
161 | - f0482490923c
162 | - 3be1545083b7
163 | - a167532acca2
164 | - 2654a87be968
165 | - f88e18cb4100
166 | - 9fbdeffbe2ba
167 | - 16fe2798ed0f
168 | - 4ab54be1a403
169 | - c6788e579967
170 | - 137771d19ca2
171 | - 91127c2b0e60
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - c5d08fc3e040
218 | - c7b1283bb7eb
219 | - 0f9e60a8e56d
220 | - 390b487231ce
221 | - e11b9d69f856
222 | - 89c7daa72eee
223 | valid_series_ids:
224 | - bfa54bd26187
225 | - d515236bdeec
226 | - 04f547b8017d
227 | - 51c49c540b4e
228 | - 8898e6db816d
229 | - 18b61dd5aae8
230 | - 1319a1935f48
231 | - 1955d568d987
232 | - e34b496b84ce
233 | - 0ef7d94fde99
234 | - 280e08693c6d
235 | - 2e9ced2c7976
236 | - 292a75c0b94e
237 | - f8a8da8bdd00
238 | - 3c336d6ba566
239 | - 89bd631d1769
240 | - 3452b878e596
241 | - b4b75225b224
242 | - 9ee455e4770d
243 | - a4e48102f402
244 | - 3aceb17ef7bd
245 | - e2b60820c325
246 | - b1831c4979da
247 | - 2f7504d0f426
248 | - c7b2155a4a47
249 | - 785c9ca4eff7
250 | - 854206f602d0
251 | - 13b4d6a01d27
252 | - 05e1944c3818
253 | - aed3850f65f0
254 | - 0402a003dae9
255 | - d0f613c700f7
256 | - b7fc34995d0f
257 | - 601559e1777d
258 | - 702bb5387b1e
259 | - cca14d1966c1
260 | - a596ad0b82aa
261 | - 8a306e0890c0
262 | - 91cb6c98201f
263 | - 9a340507e36a
264 | - cfeb11428dd7
265 | - 7822ee8fe3ec
266 | - ce85771a714c
267 | - db75092f0530
268 | - 5f76965e10cf
269 | - 73fb772e50fb
270 | - def21f50dd3c
271 | - 9c91c546e095
272 | - c535634d7dcd
273 | - 4feda0596965
274 | - 808652a666c6
275 | - 5f40907ec171
276 | - 1f96b9668bdf
277 | - 3665c86afaf5
278 | - 2fc653ca75c7
279 | - a3e59c2ce3f6
280 |
--------------------------------------------------------------------------------
/kami/run/conf/split/stratify_fold_1.yaml:
--------------------------------------------------------------------------------
1 | train_series_ids:
2 | - bfa54bd26187
3 | - d515236bdeec
4 | - 04f547b8017d
5 | - 51c49c540b4e
6 | - 8898e6db816d
7 | - 18b61dd5aae8
8 | - 1319a1935f48
9 | - 1955d568d987
10 | - e34b496b84ce
11 | - 0ef7d94fde99
12 | - 280e08693c6d
13 | - 2e9ced2c7976
14 | - 292a75c0b94e
15 | - f8a8da8bdd00
16 | - 3c336d6ba566
17 | - 89bd631d1769
18 | - 3452b878e596
19 | - b4b75225b224
20 | - 9ee455e4770d
21 | - a4e48102f402
22 | - 3aceb17ef7bd
23 | - e2b60820c325
24 | - b1831c4979da
25 | - 2f7504d0f426
26 | - c7b2155a4a47
27 | - 785c9ca4eff7
28 | - 854206f602d0
29 | - 13b4d6a01d27
30 | - 05e1944c3818
31 | - aed3850f65f0
32 | - 0402a003dae9
33 | - d0f613c700f7
34 | - b7fc34995d0f
35 | - 601559e1777d
36 | - 702bb5387b1e
37 | - cca14d1966c1
38 | - a596ad0b82aa
39 | - 8a306e0890c0
40 | - 91cb6c98201f
41 | - 9a340507e36a
42 | - cfeb11428dd7
43 | - 7822ee8fe3ec
44 | - ce85771a714c
45 | - db75092f0530
46 | - 5f76965e10cf
47 | - 73fb772e50fb
48 | - bfe41e96d12f
49 | - def21f50dd3c
50 | - 9c91c546e095
51 | - c535634d7dcd
52 | - 4feda0596965
53 | - 808652a666c6
54 | - 5f40907ec171
55 | - 1f96b9668bdf
56 | - 29c75c018220
57 | - e2a849d283c0
58 | - 3665c86afaf5
59 | - e0d7b0dcf9f3
60 | - 7504165f497d
61 | - b750c8c1556c
62 | - a88088855de5
63 | - 6ca4f4fca6a2
64 | - 40dce6018935
65 | - 8f6f15b9f598
66 | - 0cfc06c129cc
67 | - 29d3469bd15d
68 | - 1e6717d93c1d
69 | - c289c8a823e0
70 | - 51b23d177971
71 | - dff367373725
72 | - a9a2f7fac455
73 | - 67f5fc60e494
74 | - 3318a0e3ed6f
75 | - dacc6d652e35
76 | - f564985ab692
77 | - 55b7f5c99930
78 | - fa149c3c4bde
79 | - 9ddd40f2cb36
80 | - 2fbbee1a38e3
81 | - bf00506437aa
82 | - 12d01911d509
83 | - c38707ef76df
84 | - 33ceeba8918a
85 | - bb5612895813
86 | - 4ac356361be9
87 | - e0686434d029
88 | - ccdee561ee5d
89 | - 72d2234e84e4
90 | - b737f8c78ec5
91 | - b7188813d58a
92 | - ebd76e93ec7d
93 | - a2b0a64ec9cf
94 | - 60e51cad2ffb
95 | - d3dddd3c0e00
96 | - bccf2f2819f8
97 | - ce9164297046
98 | - 1b92be89db4c
99 | - c7d693f24684
100 | - d5e47b94477e
101 | - 405df1b41f9f
102 | - 939932f1822d
103 | - 0a96f4993bd7
104 | - c8053490cec2
105 | - e1f5abb82285
106 | - 8b8b9e29171c
107 | - eec197a4bdca
108 | - 10469f6765bf
109 | - 3664fe9233f9
110 | - 1716cd4163b2
111 | - e4500e7e19e1
112 | - e30cb792a2bc
113 | - 60d31b0bec3b
114 | - d150801f3145
115 | - 8877a6586606
116 | - 844f54dcab89
117 | - 0f572d690310
118 | - 9277be28a1cf
119 | - 27f09a6a858f
120 | - e1f2a4f991cb
121 | - 655f19eabf1e
122 | - 7fd4284b7ee8
123 | - 612aa8ba44e2
124 | - 0cd1e3d0ed95
125 | - aa81faa78747
126 | - 25e2b3dd9c3b
127 | - 5aad18e7ce64
128 | - 0d0ad1e77851
129 | - 5c088d7e916c
130 | - d043c0ca71cd
131 | - ba8083a2c3b8
132 | - e6ddbaaf0639
133 | - 137b99e936ab
134 | - d8de352c2657
135 | - 653622ac8363
136 | - fbf33b1a2c10
137 | - fb223ed2278c
138 | - dc80ca623d71
139 | - 5e816f11f5c3
140 | - 5f94bb3e1bed
141 | - 483d6545417f
142 | - 3a9a9dc2cbd9
143 | - ad425f3ee76d
144 | - a81f4472c637
145 | - 3df0da2e5966
146 | - a261bc4b7470
147 | - c908a0ad3e31
148 | - 416354edd92a
149 | - 78569a801a38
150 | - 062cae666e2a
151 | - b84960841a75
152 | - d25e479ecbb7
153 | - 10f8bc1f7b07
154 | - 154fe824ed87
155 | - 99237ce045e4
156 | - 6a4cd123bd69
157 | - 1762ab70ec76
158 | - 559ffb7c166a
159 | - 599ca4ed791b
160 | - 971207c6a525
161 | - f0482490923c
162 | - 3be1545083b7
163 | - a167532acca2
164 | - 2654a87be968
165 | - f88e18cb4100
166 | - 9fbdeffbe2ba
167 | - 16fe2798ed0f
168 | - 4ab54be1a403
169 | - c6788e579967
170 | - 137771d19ca2
171 | - 91127c2b0e60
172 | - d5be621fd9aa
173 | - efbfc4526d58
174 | - 2cd2340ca14d
175 | - ee4e0e3afd3d
176 | - 5c55a5e717d6
177 | - 449766346eb1
178 | - f2c2436cf7b7
179 | - af91d9a50547
180 | - f56824b503a0
181 | - 31011ade7c0a
182 | - 6bf95a3cf91c
183 | - 188d4b7cd28b
184 | - e586cbfa7762
185 | - 4a31811f3558
186 | - 927dd0c35dfd
187 | - c5365a55ebb7
188 | - 08db4255286f
189 | - 9b9cd7b7af8c
190 | - a681f9b04b21
191 | - 361366da569e
192 | - 0ec9fc461819
193 | - 44d8c02b369e
194 | - 83fa182bec3a
195 | - 804594bb1f06
196 | - 8becc76ea607
197 | - 519ae2d858b0
198 | - ea0770830757
199 | - e8d0a37c3eba
200 | - 2b8d87addea9
201 | - f7eb179216c2
202 | - 8e32047cbc1f
203 | - c75b4b207bea
204 | - 51fdcc8d9fe7
205 | - 1c7c0bad1263
206 | - 9aed9ee12ae2
207 | - df33ae359fb5
208 | - 87a6cbb7c4ed
209 | - bdfce9ce62b9
210 | - 18a0ca03431d
211 | - 703b5efa9bc1
212 | - 0ce74d6d2106
213 | - 8fb18e36697d
214 | - 148471991ffb
215 | - e69aff66e0cb
216 | - c3072a759efb
217 | - 2fc653ca75c7
218 | - a3e59c2ce3f6
219 | - 0f9e60a8e56d
220 | - 390b487231ce
221 | - e11b9d69f856
222 | - 89c7daa72eee
223 | valid_series_ids:
224 | - 72bbd1ac3edf
225 | - d93b0c7de16b
226 | - 76237b9406d5
227 | - 2b0a1fa8eba8
228 | - 1087d7b0ff2e
229 | - 8b159a98f485
230 | - 8a22387617c3
231 | - 77ca4db83644
232 | - 7476c0bd18d2
233 | - 1d4569cbac0f
234 | - a9e5f5314bcb
235 | - f981a0805fd0
236 | - de6fedfb6139
237 | - 7df249527c63
238 | - d2d6b9af0553
239 | - 694faf956ebf
240 | - b364205aba43
241 | - fcca183903b7
242 | - 3d53bfea61d6
243 | - 55a47ff9dc8a
244 | - 35826366dfc7
245 | - ca732a3c37f7
246 | - fe90110788d2
247 | - d2fef7e4defd
248 | - 72ba4a8afff4
249 | - 4743bdde25df
250 | - ece2561f07e9
251 | - 90eac42a9ec9
252 | - c107b5789660
253 | - 0dee4fda51c3
254 | - 5acc9d63b5fd
255 | - 99b829cbad2d
256 | - db5e0ee1c0ab
257 | - 4b45c36f8f5a
258 | - e867b5133665
259 | - f6d2cc003183
260 | - 752900afe3a6
261 | - eef041dd50aa
262 | - 349c5562ee2c
263 | - 5ffd5e1e81ac
264 | - dfc3ccebfdc9
265 | - d9e887091a5c
266 | - 062dbd4c95e6
267 | - 3be2f86c3e45
268 | - c68260cc9e8f
269 | - ebb6fae8ed43
270 | - cf13ed7e457a
271 | - 03d92c9f6f8a
272 | - 6d6b9d22d48a
273 | - ca730dbf521d
274 | - 038441c925bb
275 | - 44a41bba1ee7
276 | - 207eded97727
277 | - 6ee4ade1f2bd
278 | - c5d08fc3e040
279 | - c7b1283bb7eb
280 |
--------------------------------------------------------------------------------